mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. No conflicts. Adjacent changes: net/core/page_pool_user.c0b11b1c5c3
("netdev: let netlink core handle -EMSGSIZE errors")429679dcf7
("page_pool: fix netlink dump stop/resume") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
e3afe5dd3a
@ -65,9 +65,11 @@ properties:
|
|||||||
|
|
||||||
rx-internal-delay-ps:
|
rx-internal-delay-ps:
|
||||||
enum: [0, 1800]
|
enum: [0, 1800]
|
||||||
|
default: 0
|
||||||
|
|
||||||
tx-internal-delay-ps:
|
tx-internal-delay-ps:
|
||||||
enum: [0, 2000]
|
enum: [0, 2000]
|
||||||
|
default: 0
|
||||||
|
|
||||||
'#address-cells':
|
'#address-cells':
|
||||||
const: 1
|
const: 1
|
||||||
|
@ -545,7 +545,7 @@ In such scenario, dpll device input signal shall be also configurable
|
|||||||
to drive dpll with signal recovered from the PHY netdevice.
|
to drive dpll with signal recovered from the PHY netdevice.
|
||||||
This is done by exposing a pin to the netdevice - attaching pin to the
|
This is done by exposing a pin to the netdevice - attaching pin to the
|
||||||
netdevice itself with
|
netdevice itself with
|
||||||
``netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)``.
|
``dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)``.
|
||||||
Exposed pin id handle ``DPLL_A_PIN_ID`` is then identifiable by the user
|
Exposed pin id handle ``DPLL_A_PIN_ID`` is then identifiable by the user
|
||||||
as it is attached to rtnetlink respond to get ``RTM_NEWLINK`` command in
|
as it is attached to rtnetlink respond to get ``RTM_NEWLINK`` command in
|
||||||
nested attribute ``IFLA_DPLL_PIN``.
|
nested attribute ``IFLA_DPLL_PIN``.
|
||||||
|
@ -10,3 +10,4 @@ Hyper-V Enlightenments
|
|||||||
overview
|
overview
|
||||||
vmbus
|
vmbus
|
||||||
clocks
|
clocks
|
||||||
|
vpci
|
||||||
|
316
Documentation/virt/hyperv/vpci.rst
Normal file
316
Documentation/virt/hyperv/vpci.rst
Normal file
@ -0,0 +1,316 @@
|
|||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
PCI pass-thru devices
|
||||||
|
=========================
|
||||||
|
In a Hyper-V guest VM, PCI pass-thru devices (also called
|
||||||
|
virtual PCI devices, or vPCI devices) are physical PCI devices
|
||||||
|
that are mapped directly into the VM's physical address space.
|
||||||
|
Guest device drivers can interact directly with the hardware
|
||||||
|
without intermediation by the host hypervisor. This approach
|
||||||
|
provides higher bandwidth access to the device with lower
|
||||||
|
latency, compared with devices that are virtualized by the
|
||||||
|
hypervisor. The device should appear to the guest just as it
|
||||||
|
would when running on bare metal, so no changes are required
|
||||||
|
to the Linux device drivers for the device.
|
||||||
|
|
||||||
|
Hyper-V terminology for vPCI devices is "Discrete Device
|
||||||
|
Assignment" (DDA). Public documentation for Hyper-V DDA is
|
||||||
|
available here: `DDA`_
|
||||||
|
|
||||||
|
.. _DDA: https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/plan-for-deploying-devices-using-discrete-device-assignment
|
||||||
|
|
||||||
|
DDA is typically used for storage controllers, such as NVMe,
|
||||||
|
and for GPUs. A similar mechanism for NICs is called SR-IOV
|
||||||
|
and produces the same benefits by allowing a guest device
|
||||||
|
driver to interact directly with the hardware. See Hyper-V
|
||||||
|
public documentation here: `SR-IOV`_
|
||||||
|
|
||||||
|
.. _SR-IOV: https://learn.microsoft.com/en-us/windows-hardware/drivers/network/overview-of-single-root-i-o-virtualization--sr-iov-
|
||||||
|
|
||||||
|
This discussion of vPCI devices includes DDA and SR-IOV
|
||||||
|
devices.
|
||||||
|
|
||||||
|
Device Presentation
|
||||||
|
-------------------
|
||||||
|
Hyper-V provides full PCI functionality for a vPCI device when
|
||||||
|
it is operating, so the Linux device driver for the device can
|
||||||
|
be used unchanged, provided it uses the correct Linux kernel
|
||||||
|
APIs for accessing PCI config space and for other integration
|
||||||
|
with Linux. But the initial detection of the PCI device and
|
||||||
|
its integration with the Linux PCI subsystem must use Hyper-V
|
||||||
|
specific mechanisms. Consequently, vPCI devices on Hyper-V
|
||||||
|
have a dual identity. They are initially presented to Linux
|
||||||
|
guests as VMBus devices via the standard VMBus "offer"
|
||||||
|
mechanism, so they have a VMBus identity and appear under
|
||||||
|
/sys/bus/vmbus/devices. The VMBus vPCI driver in Linux at
|
||||||
|
drivers/pci/controller/pci-hyperv.c handles a newly introduced
|
||||||
|
vPCI device by fabricating a PCI bus topology and creating all
|
||||||
|
the normal PCI device data structures in Linux that would
|
||||||
|
exist if the PCI device were discovered via ACPI on a bare-
|
||||||
|
metal system. Once those data structures are set up, the
|
||||||
|
device also has a normal PCI identity in Linux, and the normal
|
||||||
|
Linux device driver for the vPCI device can function as if it
|
||||||
|
were running in Linux on bare-metal. Because vPCI devices are
|
||||||
|
presented dynamically through the VMBus offer mechanism, they
|
||||||
|
do not appear in the Linux guest's ACPI tables. vPCI devices
|
||||||
|
may be added to a VM or removed from a VM at any time during
|
||||||
|
the life of the VM, and not just during initial boot.
|
||||||
|
|
||||||
|
With this approach, the vPCI device is a VMBus device and a
|
||||||
|
PCI device at the same time. In response to the VMBus offer
|
||||||
|
message, the hv_pci_probe() function runs and establishes a
|
||||||
|
VMBus connection to the vPCI VSP on the Hyper-V host. That
|
||||||
|
connection has a single VMBus channel. The channel is used to
|
||||||
|
exchange messages with the vPCI VSP for the purpose of setting
|
||||||
|
up and configuring the vPCI device in Linux. Once the device
|
||||||
|
is fully configured in Linux as a PCI device, the VMBus
|
||||||
|
channel is used only if Linux changes the vCPU to be interrupted
|
||||||
|
in the guest, or if the vPCI device is removed from
|
||||||
|
the VM while the VM is running. The ongoing operation of the
|
||||||
|
device happens directly between the Linux device driver for
|
||||||
|
the device and the hardware, with VMBus and the VMBus channel
|
||||||
|
playing no role.
|
||||||
|
|
||||||
|
PCI Device Setup
|
||||||
|
----------------
|
||||||
|
PCI device setup follows a sequence that Hyper-V originally
|
||||||
|
created for Windows guests, and that can be ill-suited for
|
||||||
|
Linux guests due to differences in the overall structure of
|
||||||
|
the Linux PCI subsystem compared with Windows. Nonetheless,
|
||||||
|
with a bit of hackery in the Hyper-V virtual PCI driver for
|
||||||
|
Linux, the virtual PCI device is setup in Linux so that
|
||||||
|
generic Linux PCI subsystem code and the Linux driver for the
|
||||||
|
device "just work".
|
||||||
|
|
||||||
|
Each vPCI device is set up in Linux to be in its own PCI
|
||||||
|
domain with a host bridge. The PCI domainID is derived from
|
||||||
|
bytes 4 and 5 of the instance GUID assigned to the VMBus vPCI
|
||||||
|
device. The Hyper-V host does not guarantee that these bytes
|
||||||
|
are unique, so hv_pci_probe() has an algorithm to resolve
|
||||||
|
collisions. The collision resolution is intended to be stable
|
||||||
|
across reboots of the same VM so that the PCI domainIDs don't
|
||||||
|
change, as the domainID appears in the user space
|
||||||
|
configuration of some devices.
|
||||||
|
|
||||||
|
hv_pci_probe() allocates a guest MMIO range to be used as PCI
|
||||||
|
config space for the device. This MMIO range is communicated
|
||||||
|
to the Hyper-V host over the VMBus channel as part of telling
|
||||||
|
the host that the device is ready to enter d0. See
|
||||||
|
hv_pci_enter_d0(). When the guest subsequently accesses this
|
||||||
|
MMIO range, the Hyper-V host intercepts the accesses and maps
|
||||||
|
them to the physical device PCI config space.
|
||||||
|
|
||||||
|
hv_pci_probe() also gets BAR information for the device from
|
||||||
|
the Hyper-V host, and uses this information to allocate MMIO
|
||||||
|
space for the BARs. That MMIO space is then setup to be
|
||||||
|
associated with the host bridge so that it works when generic
|
||||||
|
PCI subsystem code in Linux processes the BARs.
|
||||||
|
|
||||||
|
Finally, hv_pci_probe() creates the root PCI bus. At this
|
||||||
|
point the Hyper-V virtual PCI driver hackery is done, and the
|
||||||
|
normal Linux PCI machinery for scanning the root bus works to
|
||||||
|
detect the device, to perform driver matching, and to
|
||||||
|
initialize the driver and device.
|
||||||
|
|
||||||
|
PCI Device Removal
|
||||||
|
------------------
|
||||||
|
A Hyper-V host may initiate removal of a vPCI device from a
|
||||||
|
guest VM at any time during the life of the VM. The removal
|
||||||
|
is instigated by an admin action taken on the Hyper-V host and
|
||||||
|
is not under the control of the guest OS.
|
||||||
|
|
||||||
|
A guest VM is notified of the removal by an unsolicited
|
||||||
|
"Eject" message sent from the host to the guest over the VMBus
|
||||||
|
channel associated with the vPCI device. Upon receipt of such
|
||||||
|
a message, the Hyper-V virtual PCI driver in Linux
|
||||||
|
asynchronously invokes Linux kernel PCI subsystem calls to
|
||||||
|
shutdown and remove the device. When those calls are
|
||||||
|
complete, an "Ejection Complete" message is sent back to
|
||||||
|
Hyper-V over the VMBus channel indicating that the device has
|
||||||
|
been removed. At this point, Hyper-V sends a VMBus rescind
|
||||||
|
message to the Linux guest, which the VMBus driver in Linux
|
||||||
|
processes by removing the VMBus identity for the device. Once
|
||||||
|
that processing is complete, all vestiges of the device having
|
||||||
|
been present are gone from the Linux kernel. The rescind
|
||||||
|
message also indicates to the guest that Hyper-V has stopped
|
||||||
|
providing support for the vPCI device in the guest. If the
|
||||||
|
guest were to attempt to access that device's MMIO space, it
|
||||||
|
would be an invalid reference. Hypercalls affecting the device
|
||||||
|
return errors, and any further messages sent in the VMBus
|
||||||
|
channel are ignored.
|
||||||
|
|
||||||
|
After sending the Eject message, Hyper-V allows the guest VM
|
||||||
|
60 seconds to cleanly shutdown the device and respond with
|
||||||
|
Ejection Complete before sending the VMBus rescind
|
||||||
|
message. If for any reason the Eject steps don't complete
|
||||||
|
within the allowed 60 seconds, the Hyper-V host forcibly
|
||||||
|
performs the rescind steps, which will likely result in
|
||||||
|
cascading errors in the guest because the device is now no
|
||||||
|
longer present from the guest standpoint and accessing the
|
||||||
|
device MMIO space will fail.
|
||||||
|
|
||||||
|
Because ejection is asynchronous and can happen at any point
|
||||||
|
during the guest VM lifecycle, proper synchronization in the
|
||||||
|
Hyper-V virtual PCI driver is very tricky. Ejection has been
|
||||||
|
observed even before a newly offered vPCI device has been
|
||||||
|
fully setup. The Hyper-V virtual PCI driver has been updated
|
||||||
|
several times over the years to fix race conditions when
|
||||||
|
ejections happen at inopportune times. Care must be taken when
|
||||||
|
modifying this code to prevent re-introducing such problems.
|
||||||
|
See comments in the code.
|
||||||
|
|
||||||
|
Interrupt Assignment
|
||||||
|
--------------------
|
||||||
|
The Hyper-V virtual PCI driver supports vPCI devices using
|
||||||
|
MSI, multi-MSI, or MSI-X. Assigning the guest vCPU that will
|
||||||
|
receive the interrupt for a particular MSI or MSI-X message is
|
||||||
|
complex because of the way the Linux setup of IRQs maps onto
|
||||||
|
the Hyper-V interfaces. For the single-MSI and MSI-X cases,
|
||||||
|
Linux calls hv_compse_msi_msg() twice, with the first call
|
||||||
|
containing a dummy vCPU and the second call containing the
|
||||||
|
real vCPU. Furthermore, hv_irq_unmask() is finally called
|
||||||
|
(on x86) or the GICD registers are set (on arm64) to specify
|
||||||
|
the real vCPU again. Each of these three calls interact
|
||||||
|
with Hyper-V, which must decide which physical CPU should
|
||||||
|
receive the interrupt before it is forwarded to the guest VM.
|
||||||
|
Unfortunately, the Hyper-V decision-making process is a bit
|
||||||
|
limited, and can result in concentrating the physical
|
||||||
|
interrupts on a single CPU, causing a performance bottleneck.
|
||||||
|
See details about how this is resolved in the extensive
|
||||||
|
comment above the function hv_compose_msi_req_get_cpu().
|
||||||
|
|
||||||
|
The Hyper-V virtual PCI driver implements the
|
||||||
|
irq_chip.irq_compose_msi_msg function as hv_compose_msi_msg().
|
||||||
|
Unfortunately, on Hyper-V the implementation requires sending
|
||||||
|
a VMBus message to the Hyper-V host and awaiting an interrupt
|
||||||
|
indicating receipt of a reply message. Since
|
||||||
|
irq_chip.irq_compose_msi_msg can be called with IRQ locks
|
||||||
|
held, it doesn't work to do the normal sleep until awakened by
|
||||||
|
the interrupt. Instead hv_compose_msi_msg() must send the
|
||||||
|
VMBus message, and then poll for the completion message. As
|
||||||
|
further complexity, the vPCI device could be ejected/rescinded
|
||||||
|
while the polling is in progress, so this scenario must be
|
||||||
|
detected as well. See comments in the code regarding this
|
||||||
|
very tricky area.
|
||||||
|
|
||||||
|
Most of the code in the Hyper-V virtual PCI driver (pci-
|
||||||
|
hyperv.c) applies to Hyper-V and Linux guests running on x86
|
||||||
|
and on arm64 architectures. But there are differences in how
|
||||||
|
interrupt assignments are managed. On x86, the Hyper-V
|
||||||
|
virtual PCI driver in the guest must make a hypercall to tell
|
||||||
|
Hyper-V which guest vCPU should be interrupted by each
|
||||||
|
MSI/MSI-X interrupt, and the x86 interrupt vector number that
|
||||||
|
the x86_vector IRQ domain has picked for the interrupt. This
|
||||||
|
hypercall is made by hv_arch_irq_unmask(). On arm64, the
|
||||||
|
Hyper-V virtual PCI driver manages the allocation of an SPI
|
||||||
|
for each MSI/MSI-X interrupt. The Hyper-V virtual PCI driver
|
||||||
|
stores the allocated SPI in the architectural GICD registers,
|
||||||
|
which Hyper-V emulates, so no hypercall is necessary as with
|
||||||
|
x86. Hyper-V does not support using LPIs for vPCI devices in
|
||||||
|
arm64 guest VMs because it does not emulate a GICv3 ITS.
|
||||||
|
|
||||||
|
The Hyper-V virtual PCI driver in Linux supports vPCI devices
|
||||||
|
whose drivers create managed or unmanaged Linux IRQs. If the
|
||||||
|
smp_affinity for an unmanaged IRQ is updated via the /proc/irq
|
||||||
|
interface, the Hyper-V virtual PCI driver is called to tell
|
||||||
|
the Hyper-V host to change the interrupt targeting and
|
||||||
|
everything works properly. However, on x86 if the x86_vector
|
||||||
|
IRQ domain needs to reassign an interrupt vector due to
|
||||||
|
running out of vectors on a CPU, there's no path to inform the
|
||||||
|
Hyper-V host of the change, and things break. Fortunately,
|
||||||
|
guest VMs operate in a constrained device environment where
|
||||||
|
using all the vectors on a CPU doesn't happen. Since such a
|
||||||
|
problem is only a theoretical concern rather than a practical
|
||||||
|
concern, it has been left unaddressed.
|
||||||
|
|
||||||
|
DMA
|
||||||
|
---
|
||||||
|
By default, Hyper-V pins all guest VM memory in the host
|
||||||
|
when the VM is created, and programs the physical IOMMU to
|
||||||
|
allow the VM to have DMA access to all its memory. Hence
|
||||||
|
it is safe to assign PCI devices to the VM, and allow the
|
||||||
|
guest operating system to program the DMA transfers. The
|
||||||
|
physical IOMMU prevents a malicious guest from initiating
|
||||||
|
DMA to memory belonging to the host or to other VMs on the
|
||||||
|
host. From the Linux guest standpoint, such DMA transfers
|
||||||
|
are in "direct" mode since Hyper-V does not provide a virtual
|
||||||
|
IOMMU in the guest.
|
||||||
|
|
||||||
|
Hyper-V assumes that physical PCI devices always perform
|
||||||
|
cache-coherent DMA. When running on x86, this behavior is
|
||||||
|
required by the architecture. When running on arm64, the
|
||||||
|
architecture allows for both cache-coherent and
|
||||||
|
non-cache-coherent devices, with the behavior of each device
|
||||||
|
specified in the ACPI DSDT. But when a PCI device is assigned
|
||||||
|
to a guest VM, that device does not appear in the DSDT, so the
|
||||||
|
Hyper-V VMBus driver propagates cache-coherency information
|
||||||
|
from the VMBus node in the ACPI DSDT to all VMBus devices,
|
||||||
|
including vPCI devices (since they have a dual identity as a VMBus
|
||||||
|
device and as a PCI device). See vmbus_dma_configure().
|
||||||
|
Current Hyper-V versions always indicate that the VMBus is
|
||||||
|
cache coherent, so vPCI devices on arm64 always get marked as
|
||||||
|
cache coherent and the CPU does not perform any sync
|
||||||
|
operations as part of dma_map/unmap_*() calls.
|
||||||
|
|
||||||
|
vPCI protocol versions
|
||||||
|
----------------------
|
||||||
|
As previously described, during vPCI device setup and teardown
|
||||||
|
messages are passed over a VMBus channel between the Hyper-V
|
||||||
|
host and the Hyper-v vPCI driver in the Linux guest. Some
|
||||||
|
messages have been revised in newer versions of Hyper-V, so
|
||||||
|
the guest and host must agree on the vPCI protocol version to
|
||||||
|
be used. The version is negotiated when communication over
|
||||||
|
the VMBus channel is first established. See
|
||||||
|
hv_pci_protocol_negotiation(). Newer versions of the protocol
|
||||||
|
extend support to VMs with more than 64 vCPUs, and provide
|
||||||
|
additional information about the vPCI device, such as the
|
||||||
|
guest virtual NUMA node to which it is most closely affined in
|
||||||
|
the underlying hardware.
|
||||||
|
|
||||||
|
Guest NUMA node affinity
|
||||||
|
------------------------
|
||||||
|
When the vPCI protocol version provides it, the guest NUMA
|
||||||
|
node affinity of the vPCI device is stored as part of the Linux
|
||||||
|
device information for subsequent use by the Linux driver. See
|
||||||
|
hv_pci_assign_numa_node(). If the negotiated protocol version
|
||||||
|
does not support the host providing NUMA affinity information,
|
||||||
|
the Linux guest defaults the device NUMA node to 0. But even
|
||||||
|
when the negotiated protocol version includes NUMA affinity
|
||||||
|
information, the ability of the host to provide such
|
||||||
|
information depends on certain host configuration options. If
|
||||||
|
the guest receives NUMA node value "0", it could mean NUMA
|
||||||
|
node 0, or it could mean "no information is available".
|
||||||
|
Unfortunately it is not possible to distinguish the two cases
|
||||||
|
from the guest side.
|
||||||
|
|
||||||
|
PCI config space access in a CoCo VM
|
||||||
|
------------------------------------
|
||||||
|
Linux PCI device drivers access PCI config space using a
|
||||||
|
standard set of functions provided by the Linux PCI subsystem.
|
||||||
|
In Hyper-V guests these standard functions map to functions
|
||||||
|
hv_pcifront_read_config() and hv_pcifront_write_config()
|
||||||
|
in the Hyper-V virtual PCI driver. In normal VMs,
|
||||||
|
these hv_pcifront_*() functions directly access the PCI config
|
||||||
|
space, and the accesses trap to Hyper-V to be handled.
|
||||||
|
But in CoCo VMs, memory encryption prevents Hyper-V
|
||||||
|
from reading the guest instruction stream to emulate the
|
||||||
|
access, so the hv_pcifront_*() functions must invoke
|
||||||
|
hypercalls with explicit arguments describing the access to be
|
||||||
|
made.
|
||||||
|
|
||||||
|
Config Block back-channel
|
||||||
|
-------------------------
|
||||||
|
The Hyper-V host and Hyper-V virtual PCI driver in Linux
|
||||||
|
together implement a non-standard back-channel communication
|
||||||
|
path between the host and guest. The back-channel path uses
|
||||||
|
messages sent over the VMBus channel associated with the vPCI
|
||||||
|
device. The functions hyperv_read_cfg_blk() and
|
||||||
|
hyperv_write_cfg_blk() are the primary interfaces provided to
|
||||||
|
other parts of the Linux kernel. As of this writing, these
|
||||||
|
interfaces are used only by the Mellanox mlx5 driver to pass
|
||||||
|
diagnostic data to a Hyper-V host running in the Azure public
|
||||||
|
cloud. The functions hyperv_read_cfg_blk() and
|
||||||
|
hyperv_write_cfg_blk() are implemented in a separate module
|
||||||
|
(pci-hyperv-intf.c, under CONFIG_PCI_HYPERV_INTERFACE) that
|
||||||
|
effectively stubs them out when running in non-Hyper-V
|
||||||
|
environments.
|
49
MAINTAINERS
49
MAINTAINERS
@ -1395,6 +1395,7 @@ F: drivers/hwmon/max31760.c
|
|||||||
|
|
||||||
ANALOGBITS PLL LIBRARIES
|
ANALOGBITS PLL LIBRARIES
|
||||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||||
|
M: Samuel Holland <samuel.holland@sifive.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/clk/analogbits/*
|
F: drivers/clk/analogbits/*
|
||||||
F: include/linux/clk/analogbits*
|
F: include/linux/clk/analogbits*
|
||||||
@ -2156,7 +2157,7 @@ M: Shawn Guo <shawnguo@kernel.org>
|
|||||||
M: Sascha Hauer <s.hauer@pengutronix.de>
|
M: Sascha Hauer <s.hauer@pengutronix.de>
|
||||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||||
R: Fabio Estevam <festevam@gmail.com>
|
R: Fabio Estevam <festevam@gmail.com>
|
||||||
R: NXP Linux Team <linux-imx@nxp.com>
|
L: imx@lists.linux.dev
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
|
||||||
@ -8505,7 +8506,7 @@ FREESCALE IMX / MXC FEC DRIVER
|
|||||||
M: Wei Fang <wei.fang@nxp.com>
|
M: Wei Fang <wei.fang@nxp.com>
|
||||||
R: Shenwei Wang <shenwei.wang@nxp.com>
|
R: Shenwei Wang <shenwei.wang@nxp.com>
|
||||||
R: Clark Wang <xiaoning.wang@nxp.com>
|
R: Clark Wang <xiaoning.wang@nxp.com>
|
||||||
R: NXP Linux Team <linux-imx@nxp.com>
|
L: imx@lists.linux.dev
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/net/fsl,fec.yaml
|
F: Documentation/devicetree/bindings/net/fsl,fec.yaml
|
||||||
@ -8540,7 +8541,7 @@ F: drivers/i2c/busses/i2c-imx.c
|
|||||||
FREESCALE IMX LPI2C DRIVER
|
FREESCALE IMX LPI2C DRIVER
|
||||||
M: Dong Aisheng <aisheng.dong@nxp.com>
|
M: Dong Aisheng <aisheng.dong@nxp.com>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
L: linux-imx@nxp.com
|
L: imx@lists.linux.dev
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
|
F: Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
|
||||||
F: drivers/i2c/busses/i2c-imx-lpi2c.c
|
F: drivers/i2c/busses/i2c-imx-lpi2c.c
|
||||||
@ -15747,7 +15748,7 @@ F: drivers/iio/gyro/fxas21002c_spi.c
|
|||||||
NXP i.MX 7D/6SX/6UL/93 AND VF610 ADC DRIVER
|
NXP i.MX 7D/6SX/6UL/93 AND VF610 ADC DRIVER
|
||||||
M: Haibo Chen <haibo.chen@nxp.com>
|
M: Haibo Chen <haibo.chen@nxp.com>
|
||||||
L: linux-iio@vger.kernel.org
|
L: linux-iio@vger.kernel.org
|
||||||
L: linux-imx@nxp.com
|
L: imx@lists.linux.dev
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/iio/adc/fsl,imx7d-adc.yaml
|
F: Documentation/devicetree/bindings/iio/adc/fsl,imx7d-adc.yaml
|
||||||
F: Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
|
F: Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
|
||||||
@ -15784,7 +15785,7 @@ F: drivers/gpu/drm/imx/dcss/
|
|||||||
NXP i.MX 8QXP ADC DRIVER
|
NXP i.MX 8QXP ADC DRIVER
|
||||||
M: Cai Huoqing <cai.huoqing@linux.dev>
|
M: Cai Huoqing <cai.huoqing@linux.dev>
|
||||||
M: Haibo Chen <haibo.chen@nxp.com>
|
M: Haibo Chen <haibo.chen@nxp.com>
|
||||||
L: linux-imx@nxp.com
|
L: imx@lists.linux.dev
|
||||||
L: linux-iio@vger.kernel.org
|
L: linux-iio@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
|
F: Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
|
||||||
@ -15792,7 +15793,7 @@ F: drivers/iio/adc/imx8qxp-adc.c
|
|||||||
|
|
||||||
NXP i.MX 8QXP/8QM JPEG V4L2 DRIVER
|
NXP i.MX 8QXP/8QM JPEG V4L2 DRIVER
|
||||||
M: Mirela Rabulea <mirela.rabulea@nxp.com>
|
M: Mirela Rabulea <mirela.rabulea@nxp.com>
|
||||||
R: NXP Linux Team <linux-imx@nxp.com>
|
L: imx@lists.linux.dev
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
|
F: Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
|
||||||
@ -15802,7 +15803,7 @@ NXP i.MX CLOCK DRIVERS
|
|||||||
M: Abel Vesa <abelvesa@kernel.org>
|
M: Abel Vesa <abelvesa@kernel.org>
|
||||||
R: Peng Fan <peng.fan@nxp.com>
|
R: Peng Fan <peng.fan@nxp.com>
|
||||||
L: linux-clk@vger.kernel.org
|
L: linux-clk@vger.kernel.org
|
||||||
L: linux-imx@nxp.com
|
L: imx@lists.linux.dev
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelvesa/linux.git clk/imx
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelvesa/linux.git clk/imx
|
||||||
F: Documentation/devicetree/bindings/clock/imx*
|
F: Documentation/devicetree/bindings/clock/imx*
|
||||||
@ -16763,6 +16764,7 @@ F: drivers/pci/controller/dwc/*layerscape*
|
|||||||
PCI DRIVER FOR FU740
|
PCI DRIVER FOR FU740
|
||||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||||
M: Greentime Hu <greentime.hu@sifive.com>
|
M: Greentime Hu <greentime.hu@sifive.com>
|
||||||
|
M: Samuel Holland <samuel.holland@sifive.com>
|
||||||
L: linux-pci@vger.kernel.org
|
L: linux-pci@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
|
F: Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
|
||||||
@ -19680,7 +19682,7 @@ F: drivers/mmc/host/sdhci-of-at91.c
|
|||||||
|
|
||||||
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) NXP i.MX DRIVER
|
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) NXP i.MX DRIVER
|
||||||
M: Haibo Chen <haibo.chen@nxp.com>
|
M: Haibo Chen <haibo.chen@nxp.com>
|
||||||
L: linux-imx@nxp.com
|
L: imx@lists.linux.dev
|
||||||
L: linux-mmc@vger.kernel.org
|
L: linux-mmc@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/mmc/host/sdhci-esdhc-imx.c
|
F: drivers/mmc/host/sdhci-esdhc-imx.c
|
||||||
@ -20015,35 +20017,14 @@ S: Maintained
|
|||||||
F: drivers/watchdog/simatic-ipc-wdt.c
|
F: drivers/watchdog/simatic-ipc-wdt.c
|
||||||
|
|
||||||
SIFIVE DRIVERS
|
SIFIVE DRIVERS
|
||||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
|
||||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||||
|
M: Samuel Holland <samuel.holland@sifive.com>
|
||||||
L: linux-riscv@lists.infradead.org
|
L: linux-riscv@lists.infradead.org
|
||||||
S: Supported
|
S: Supported
|
||||||
N: sifive
|
|
||||||
K: [^@]sifive
|
|
||||||
|
|
||||||
SIFIVE CACHE DRIVER
|
|
||||||
M: Conor Dooley <conor@kernel.org>
|
|
||||||
L: linux-riscv@lists.infradead.org
|
|
||||||
S: Maintained
|
|
||||||
F: Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
|
|
||||||
F: drivers/cache/sifive_ccache.c
|
|
||||||
|
|
||||||
SIFIVE FU540 SYSTEM-ON-CHIP
|
|
||||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
|
||||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
|
||||||
L: linux-riscv@lists.infradead.org
|
|
||||||
S: Supported
|
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
|
|
||||||
N: fu540
|
|
||||||
K: fu540
|
|
||||||
|
|
||||||
SIFIVE PDMA DRIVER
|
|
||||||
M: Green Wan <green.wan@sifive.com>
|
|
||||||
S: Maintained
|
|
||||||
F: Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
|
|
||||||
F: drivers/dma/sf-pdma/
|
F: drivers/dma/sf-pdma/
|
||||||
|
N: sifive
|
||||||
|
K: fu[57]40
|
||||||
|
K: [^@]sifive
|
||||||
|
|
||||||
SILEAD TOUCHSCREEN DRIVER
|
SILEAD TOUCHSCREEN DRIVER
|
||||||
M: Hans de Goede <hdegoede@redhat.com>
|
M: Hans de Goede <hdegoede@redhat.com>
|
||||||
@ -20253,8 +20234,8 @@ F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
|
|||||||
F: drivers/net/ethernet/socionext/sni_ave.c
|
F: drivers/net/ethernet/socionext/sni_ave.c
|
||||||
|
|
||||||
SOCIONEXT (SNI) NETSEC NETWORK DRIVER
|
SOCIONEXT (SNI) NETSEC NETWORK DRIVER
|
||||||
M: Jassi Brar <jaswinder.singh@linaro.org>
|
|
||||||
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
|
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
|
||||||
|
M: Masahisa Kojima <kojima.masahisa@socionext.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml
|
F: Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 8
|
PATCHLEVEL = 8
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Hurr durr I'ma ninja sloth
|
NAME = Hurr durr I'ma ninja sloth
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -834,16 +834,6 @@ lcdif: lcdif@30730000 {
|
|||||||
<&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>;
|
<&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>;
|
||||||
clock-names = "pix", "axi";
|
clock-names = "pix", "axi";
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
|
||||||
port {
|
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <0>;
|
|
||||||
|
|
||||||
lcdif_out_mipi_dsi: endpoint@0 {
|
|
||||||
reg = <0>;
|
|
||||||
remote-endpoint = <&mipi_dsi_in_lcdif>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
mipi_csi: mipi-csi@30750000 {
|
mipi_csi: mipi-csi@30750000 {
|
||||||
@ -895,22 +885,6 @@ mipi_dsi: dsi@30760000 {
|
|||||||
samsung,esc-clock-frequency = <20000000>;
|
samsung,esc-clock-frequency = <20000000>;
|
||||||
samsung,pll-clock-frequency = <24000000>;
|
samsung,pll-clock-frequency = <24000000>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
|
||||||
ports {
|
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <0>;
|
|
||||||
|
|
||||||
port@0 {
|
|
||||||
reg = <0>;
|
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <0>;
|
|
||||||
|
|
||||||
mipi_dsi_in_lcdif: endpoint@0 {
|
|
||||||
reg = <0>;
|
|
||||||
remote-endpoint = <&lcdif_out_mipi_dsi>;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -297,6 +297,7 @@ CONFIG_FB_MODE_HELPERS=y
|
|||||||
CONFIG_LCD_CLASS_DEVICE=y
|
CONFIG_LCD_CLASS_DEVICE=y
|
||||||
CONFIG_LCD_L4F00242T03=y
|
CONFIG_LCD_L4F00242T03=y
|
||||||
CONFIG_LCD_PLATFORM=y
|
CONFIG_LCD_PLATFORM=y
|
||||||
|
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
||||||
CONFIG_BACKLIGHT_PWM=y
|
CONFIG_BACKLIGHT_PWM=y
|
||||||
CONFIG_BACKLIGHT_GPIO=y
|
CONFIG_BACKLIGHT_GPIO=y
|
||||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||||
|
@ -42,5 +42,6 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-cb1-manta.dtb
|
|||||||
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-pi.dtb
|
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-pi.dtb
|
||||||
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-orangepi-zero2.dtb
|
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-orangepi-zero2.dtb
|
||||||
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-x96-mate.dtb
|
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-x96-mate.dtb
|
||||||
|
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero2w.dtb
|
||||||
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb
|
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb
|
||||||
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb
|
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb
|
||||||
|
@ -255,7 +255,7 @@ tc_bridge: bridge@f {
|
|||||||
<&clk IMX8MP_AUDIO_PLL2_OUT>;
|
<&clk IMX8MP_AUDIO_PLL2_OUT>;
|
||||||
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
|
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
|
||||||
assigned-clock-rates = <13000000>, <13000000>, <156000000>;
|
assigned-clock-rates = <13000000>, <13000000>, <156000000>;
|
||||||
reset-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
|
reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
|
||||||
ports {
|
ports {
|
||||||
|
@ -1820,7 +1820,7 @@ lvds_bridge: bridge@5c {
|
|||||||
compatible = "fsl,imx8mp-ldb";
|
compatible = "fsl,imx8mp-ldb";
|
||||||
reg = <0x5c 0x4>, <0x128 0x4>;
|
reg = <0x5c 0x4>, <0x128 0x4>;
|
||||||
reg-names = "ldb", "lvds";
|
reg-names = "ldb", "lvds";
|
||||||
clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
|
clocks = <&clk IMX8MP_CLK_MEDIA_LDB_ROOT>;
|
||||||
clock-names = "ldb";
|
clock-names = "ldb";
|
||||||
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
|
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
|
||||||
assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
|
assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
|
||||||
|
@ -175,7 +175,7 @@ ethernet@6800000 {
|
|||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
||||||
phy-handle = <&mgbe0_phy>;
|
phy-handle = <&mgbe0_phy>;
|
||||||
phy-mode = "usxgmii";
|
phy-mode = "10gbase-r";
|
||||||
|
|
||||||
mdio {
|
mdio {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
|
@ -1459,7 +1459,7 @@ ethernet@6800000 {
|
|||||||
<&mc TEGRA234_MEMORY_CLIENT_MGBEAWR &emc>;
|
<&mc TEGRA234_MEMORY_CLIENT_MGBEAWR &emc>;
|
||||||
interconnect-names = "dma-mem", "write";
|
interconnect-names = "dma-mem", "write";
|
||||||
iommus = <&smmu_niso0 TEGRA234_SID_MGBE>;
|
iommus = <&smmu_niso0 TEGRA234_SID_MGBE>;
|
||||||
power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEA>;
|
power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEB>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1493,7 +1493,7 @@ ethernet@6900000 {
|
|||||||
<&mc TEGRA234_MEMORY_CLIENT_MGBEBWR &emc>;
|
<&mc TEGRA234_MEMORY_CLIENT_MGBEBWR &emc>;
|
||||||
interconnect-names = "dma-mem", "write";
|
interconnect-names = "dma-mem", "write";
|
||||||
iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF1>;
|
iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF1>;
|
||||||
power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEB>;
|
power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEC>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1527,7 +1527,7 @@ ethernet@6a00000 {
|
|||||||
<&mc TEGRA234_MEMORY_CLIENT_MGBECWR &emc>;
|
<&mc TEGRA234_MEMORY_CLIENT_MGBECWR &emc>;
|
||||||
interconnect-names = "dma-mem", "write";
|
interconnect-names = "dma-mem", "write";
|
||||||
iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF2>;
|
iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF2>;
|
||||||
power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEC>;
|
power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBED>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -457,25 +457,6 @@ modem_etm_out_funnel_in2: endpoint {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
mpm: interrupt-controller {
|
|
||||||
compatible = "qcom,mpm";
|
|
||||||
qcom,rpm-msg-ram = <&apss_mpm>;
|
|
||||||
interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
|
|
||||||
mboxes = <&apcs_glb 1>;
|
|
||||||
interrupt-controller;
|
|
||||||
#interrupt-cells = <2>;
|
|
||||||
#power-domain-cells = <0>;
|
|
||||||
interrupt-parent = <&intc>;
|
|
||||||
qcom,mpm-pin-count = <96>;
|
|
||||||
qcom,mpm-pin-map = <2 184>, /* TSENS1 upper_lower_int */
|
|
||||||
<52 243>, /* DWC3_PRI ss_phy_irq */
|
|
||||||
<79 347>, /* DWC3_PRI hs_phy_irq */
|
|
||||||
<80 352>, /* DWC3_SEC hs_phy_irq */
|
|
||||||
<81 347>, /* QUSB2_PHY_PRI DP+DM */
|
|
||||||
<82 352>, /* QUSB2_PHY_SEC DP+DM */
|
|
||||||
<87 326>; /* SPMI */
|
|
||||||
};
|
|
||||||
|
|
||||||
psci {
|
psci {
|
||||||
compatible = "arm,psci-1.0";
|
compatible = "arm,psci-1.0";
|
||||||
method = "smc";
|
method = "smc";
|
||||||
@ -765,15 +746,8 @@ pciephy_2: phy@3000 {
|
|||||||
};
|
};
|
||||||
|
|
||||||
rpm_msg_ram: sram@68000 {
|
rpm_msg_ram: sram@68000 {
|
||||||
compatible = "qcom,rpm-msg-ram", "mmio-sram";
|
compatible = "qcom,rpm-msg-ram";
|
||||||
reg = <0x00068000 0x6000>;
|
reg = <0x00068000 0x6000>;
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <1>;
|
|
||||||
ranges = <0 0x00068000 0x7000>;
|
|
||||||
|
|
||||||
apss_mpm: sram@1b8 {
|
|
||||||
reg = <0x1b8 0x48>;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
qfprom@74000 {
|
qfprom@74000 {
|
||||||
@ -856,8 +830,8 @@ tsens1: thermal-sensor@4ad000 {
|
|||||||
reg = <0x004ad000 0x1000>, /* TM */
|
reg = <0x004ad000 0x1000>, /* TM */
|
||||||
<0x004ac000 0x1000>; /* SROT */
|
<0x004ac000 0x1000>; /* SROT */
|
||||||
#qcom,sensors = <8>;
|
#qcom,sensors = <8>;
|
||||||
interrupts-extended = <&mpm 2 IRQ_TYPE_LEVEL_HIGH>,
|
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<&intc GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
|
<GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "uplow", "critical";
|
interrupt-names = "uplow", "critical";
|
||||||
#thermal-sensor-cells = <1>;
|
#thermal-sensor-cells = <1>;
|
||||||
};
|
};
|
||||||
@ -1363,7 +1337,6 @@ tlmm: pinctrl@1010000 {
|
|||||||
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
gpio-ranges = <&tlmm 0 0 150>;
|
gpio-ranges = <&tlmm 0 0 150>;
|
||||||
wakeup-parent = <&mpm>;
|
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
@ -1891,7 +1864,7 @@ spmi_bus: spmi@400f000 {
|
|||||||
<0x0400a000 0x002100>;
|
<0x0400a000 0x002100>;
|
||||||
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
|
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
|
||||||
interrupt-names = "periph_irq";
|
interrupt-names = "periph_irq";
|
||||||
interrupts-extended = <&mpm 87 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
qcom,ee = <0>;
|
qcom,ee = <0>;
|
||||||
qcom,channel = <0>;
|
qcom,channel = <0>;
|
||||||
#address-cells = <2>;
|
#address-cells = <2>;
|
||||||
@ -3052,8 +3025,8 @@ usb3: usb@6af8800 {
|
|||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
ranges;
|
ranges;
|
||||||
|
|
||||||
interrupts-extended = <&mpm 79 IRQ_TYPE_LEVEL_HIGH>,
|
interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<&mpm 52 IRQ_TYPE_LEVEL_HIGH>;
|
<GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-names = "hs_phy_irq", "ss_phy_irq";
|
interrupt-names = "hs_phy_irq", "ss_phy_irq";
|
||||||
|
|
||||||
clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
|
clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
|
||||||
|
@ -563,6 +563,8 @@ &pcie3a_phy {
|
|||||||
};
|
};
|
||||||
|
|
||||||
&pcie4 {
|
&pcie4 {
|
||||||
|
max-link-speed = <2>;
|
||||||
|
|
||||||
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
|
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
|
||||||
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
|
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
|
||||||
|
|
||||||
|
@ -722,6 +722,8 @@ &pcie3a_phy {
|
|||||||
};
|
};
|
||||||
|
|
||||||
&pcie4 {
|
&pcie4 {
|
||||||
|
max-link-speed = <2>;
|
||||||
|
|
||||||
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
|
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
|
||||||
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
|
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
|
||||||
|
|
||||||
|
@ -1304,6 +1304,9 @@ &clk_virt SLAVE_QUP_CORE_0 RPM_ALWAYS_TAG>,
|
|||||||
&config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>,
|
&config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>,
|
||||||
<&system_noc MASTER_QUP_0 RPM_ALWAYS_TAG
|
<&system_noc MASTER_QUP_0 RPM_ALWAYS_TAG
|
||||||
&bimc SLAVE_EBI_CH0 RPM_ALWAYS_TAG>;
|
&bimc SLAVE_EBI_CH0 RPM_ALWAYS_TAG>;
|
||||||
|
interconnect-names = "qup-core",
|
||||||
|
"qup-config",
|
||||||
|
"qup-memory";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
@ -622,7 +622,7 @@ right_spkr: speaker@0,1 {
|
|||||||
|
|
||||||
&tlmm {
|
&tlmm {
|
||||||
/* Reserved I/Os for NFC */
|
/* Reserved I/Os for NFC */
|
||||||
gpio-reserved-ranges = <32 8>;
|
gpio-reserved-ranges = <32 8>, <74 1>;
|
||||||
|
|
||||||
disp0_reset_n_active: disp0-reset-n-active-state {
|
disp0_reset_n_active: disp0-reset-n-active-state {
|
||||||
pins = "gpio133";
|
pins = "gpio133";
|
||||||
|
@ -659,7 +659,7 @@ touchscreen@0 {
|
|||||||
|
|
||||||
&tlmm {
|
&tlmm {
|
||||||
/* Reserved I/Os for NFC */
|
/* Reserved I/Os for NFC */
|
||||||
gpio-reserved-ranges = <32 8>;
|
gpio-reserved-ranges = <32 8>, <74 1>;
|
||||||
|
|
||||||
bt_default: bt-default-state {
|
bt_default: bt-default-state {
|
||||||
bt-en-pins {
|
bt-en-pins {
|
||||||
|
@ -69,7 +69,7 @@ enum rtas_function_index {
|
|||||||
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE,
|
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE,
|
||||||
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2,
|
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2,
|
||||||
RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW,
|
RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW,
|
||||||
RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS,
|
RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW,
|
||||||
RTAS_FNIDX__IBM_SCAN_LOG_DUMP,
|
RTAS_FNIDX__IBM_SCAN_LOG_DUMP,
|
||||||
RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR,
|
RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR,
|
||||||
RTAS_FNIDX__IBM_SET_EEH_OPTION,
|
RTAS_FNIDX__IBM_SET_EEH_OPTION,
|
||||||
@ -164,7 +164,7 @@ typedef struct {
|
|||||||
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE)
|
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE)
|
||||||
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE2 rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2)
|
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE2 rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2)
|
||||||
#define RTAS_FN_IBM_REMOVE_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW)
|
#define RTAS_FN_IBM_REMOVE_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW)
|
||||||
#define RTAS_FN_IBM_RESET_PE_DMA_WINDOWS rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS)
|
#define RTAS_FN_IBM_RESET_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW)
|
||||||
#define RTAS_FN_IBM_SCAN_LOG_DUMP rtas_fn_handle(RTAS_FNIDX__IBM_SCAN_LOG_DUMP)
|
#define RTAS_FN_IBM_SCAN_LOG_DUMP rtas_fn_handle(RTAS_FNIDX__IBM_SCAN_LOG_DUMP)
|
||||||
#define RTAS_FN_IBM_SET_DYNAMIC_INDICATOR rtas_fn_handle(RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR)
|
#define RTAS_FN_IBM_SET_DYNAMIC_INDICATOR rtas_fn_handle(RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR)
|
||||||
#define RTAS_FN_IBM_SET_EEH_OPTION rtas_fn_handle(RTAS_FNIDX__IBM_SET_EEH_OPTION)
|
#define RTAS_FN_IBM_SET_EEH_OPTION rtas_fn_handle(RTAS_FNIDX__IBM_SET_EEH_OPTION)
|
||||||
|
@ -375,8 +375,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
|
|||||||
[RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
|
[RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
|
||||||
.name = "ibm,remove-pe-dma-window",
|
.name = "ibm,remove-pe-dma-window",
|
||||||
},
|
},
|
||||||
[RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS] = {
|
[RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW] = {
|
||||||
.name = "ibm,reset-pe-dma-windows",
|
/*
|
||||||
|
* Note: PAPR+ v2.13 7.3.31.4.1 spells this as
|
||||||
|
* "ibm,reset-pe-dma-windows" (plural), but RTAS
|
||||||
|
* implementations use the singular form in practice.
|
||||||
|
*/
|
||||||
|
.name = "ibm,reset-pe-dma-window",
|
||||||
},
|
},
|
||||||
[RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
|
[RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
|
||||||
.name = "ibm,scan-log-dump",
|
.name = "ibm,scan-log-dump",
|
||||||
|
@ -574,29 +574,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
|
|||||||
|
|
||||||
struct iommu_table_ops iommu_table_lpar_multi_ops;
|
struct iommu_table_ops iommu_table_lpar_multi_ops;
|
||||||
|
|
||||||
/*
|
|
||||||
* iommu_table_setparms_lpar
|
|
||||||
*
|
|
||||||
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
|
|
||||||
*/
|
|
||||||
static void iommu_table_setparms_lpar(struct pci_controller *phb,
|
|
||||||
struct device_node *dn,
|
|
||||||
struct iommu_table *tbl,
|
|
||||||
struct iommu_table_group *table_group,
|
|
||||||
const __be32 *dma_window)
|
|
||||||
{
|
|
||||||
unsigned long offset, size, liobn;
|
|
||||||
|
|
||||||
of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
|
|
||||||
|
|
||||||
iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
|
|
||||||
&iommu_table_lpar_multi_ops);
|
|
||||||
|
|
||||||
|
|
||||||
table_group->tce32_start = offset;
|
|
||||||
table_group->tce32_size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct iommu_table_ops iommu_table_pseries_ops = {
|
struct iommu_table_ops iommu_table_pseries_ops = {
|
||||||
.set = tce_build_pSeries,
|
.set = tce_build_pSeries,
|
||||||
.clear = tce_free_pSeries,
|
.clear = tce_free_pSeries,
|
||||||
@ -724,26 +701,71 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
|
|||||||
* dynamic 64bit DMA window, walking up the device tree.
|
* dynamic 64bit DMA window, walking up the device tree.
|
||||||
*/
|
*/
|
||||||
static struct device_node *pci_dma_find(struct device_node *dn,
|
static struct device_node *pci_dma_find(struct device_node *dn,
|
||||||
const __be32 **dma_window)
|
struct dynamic_dma_window_prop *prop)
|
||||||
{
|
{
|
||||||
const __be32 *dw = NULL;
|
const __be32 *default_prop = NULL;
|
||||||
|
const __be32 *ddw_prop = NULL;
|
||||||
|
struct device_node *rdn = NULL;
|
||||||
|
bool default_win = false, ddw_win = false;
|
||||||
|
|
||||||
for ( ; dn && PCI_DN(dn); dn = dn->parent) {
|
for ( ; dn && PCI_DN(dn); dn = dn->parent) {
|
||||||
dw = of_get_property(dn, "ibm,dma-window", NULL);
|
default_prop = of_get_property(dn, "ibm,dma-window", NULL);
|
||||||
if (dw) {
|
if (default_prop) {
|
||||||
if (dma_window)
|
rdn = dn;
|
||||||
*dma_window = dw;
|
default_win = true;
|
||||||
return dn;
|
|
||||||
}
|
}
|
||||||
dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
|
ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL);
|
||||||
if (dw)
|
if (ddw_prop) {
|
||||||
return dn;
|
rdn = dn;
|
||||||
dw = of_get_property(dn, DMA64_PROPNAME, NULL);
|
ddw_win = true;
|
||||||
if (dw)
|
break;
|
||||||
return dn;
|
}
|
||||||
|
ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL);
|
||||||
|
if (ddw_prop) {
|
||||||
|
rdn = dn;
|
||||||
|
ddw_win = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* At least found default window, which is the case for normal boot */
|
||||||
|
if (default_win)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
/* For PCI devices there will always be a DMA window, either on the device
|
||||||
|
* or parent bus
|
||||||
|
*/
|
||||||
|
WARN_ON(!(default_win | ddw_win));
|
||||||
|
|
||||||
|
/* caller doesn't want to get DMA window property */
|
||||||
|
if (!prop)
|
||||||
|
return rdn;
|
||||||
|
|
||||||
|
/* parse DMA window property. During normal system boot, only default
|
||||||
|
* DMA window is passed in OF. But, for kdump, a dedicated adapter might
|
||||||
|
* have both default and DDW in FDT. In this scenario, DDW takes precedence
|
||||||
|
* over default window.
|
||||||
|
*/
|
||||||
|
if (ddw_win) {
|
||||||
|
struct dynamic_dma_window_prop *p;
|
||||||
|
|
||||||
|
p = (struct dynamic_dma_window_prop *)ddw_prop;
|
||||||
|
prop->liobn = p->liobn;
|
||||||
|
prop->dma_base = p->dma_base;
|
||||||
|
prop->tce_shift = p->tce_shift;
|
||||||
|
prop->window_shift = p->window_shift;
|
||||||
|
} else if (default_win) {
|
||||||
|
unsigned long offset, size, liobn;
|
||||||
|
|
||||||
|
of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size);
|
||||||
|
|
||||||
|
prop->liobn = cpu_to_be32((u32)liobn);
|
||||||
|
prop->dma_base = cpu_to_be64(offset);
|
||||||
|
prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K);
|
||||||
|
prop->window_shift = cpu_to_be32(order_base_2(size));
|
||||||
|
}
|
||||||
|
|
||||||
|
return rdn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
|
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
|
||||||
@ -751,17 +773,20 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
|
|||||||
struct iommu_table *tbl;
|
struct iommu_table *tbl;
|
||||||
struct device_node *dn, *pdn;
|
struct device_node *dn, *pdn;
|
||||||
struct pci_dn *ppci;
|
struct pci_dn *ppci;
|
||||||
const __be32 *dma_window = NULL;
|
struct dynamic_dma_window_prop prop;
|
||||||
|
|
||||||
dn = pci_bus_to_OF_node(bus);
|
dn = pci_bus_to_OF_node(bus);
|
||||||
|
|
||||||
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
|
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
|
||||||
dn);
|
dn);
|
||||||
|
|
||||||
pdn = pci_dma_find(dn, &dma_window);
|
pdn = pci_dma_find(dn, &prop);
|
||||||
|
|
||||||
if (dma_window == NULL)
|
/* In PPC architecture, there will always be DMA window on bus or one of the
|
||||||
pr_debug(" no ibm,dma-window property !\n");
|
* parent bus. During reboot, there will be ibm,dma-window property to
|
||||||
|
* define DMA window. For kdump, there will at least be default window or DDW
|
||||||
|
* or both.
|
||||||
|
*/
|
||||||
|
|
||||||
ppci = PCI_DN(pdn);
|
ppci = PCI_DN(pdn);
|
||||||
|
|
||||||
@ -771,13 +796,24 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
|
|||||||
if (!ppci->table_group) {
|
if (!ppci->table_group) {
|
||||||
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
|
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
|
||||||
tbl = ppci->table_group->tables[0];
|
tbl = ppci->table_group->tables[0];
|
||||||
if (dma_window) {
|
|
||||||
iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
|
|
||||||
ppci->table_group, dma_window);
|
|
||||||
|
|
||||||
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
|
iommu_table_setparms_common(tbl, ppci->phb->bus->number,
|
||||||
panic("Failed to initialize iommu table");
|
be32_to_cpu(prop.liobn),
|
||||||
}
|
be64_to_cpu(prop.dma_base),
|
||||||
|
1ULL << be32_to_cpu(prop.window_shift),
|
||||||
|
be32_to_cpu(prop.tce_shift), NULL,
|
||||||
|
&iommu_table_lpar_multi_ops);
|
||||||
|
|
||||||
|
/* Only for normal boot with default window. Doesn't matter even
|
||||||
|
* if we set these with DDW which is 64bit during kdump, since
|
||||||
|
* these will not be used during kdump.
|
||||||
|
*/
|
||||||
|
ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
|
||||||
|
ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
|
||||||
|
|
||||||
|
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
|
||||||
|
panic("Failed to initialize iommu table");
|
||||||
|
|
||||||
iommu_register_group(ppci->table_group,
|
iommu_register_group(ppci->table_group,
|
||||||
pci_domain_nr(bus), 0);
|
pci_domain_nr(bus), 0);
|
||||||
pr_debug(" created table: %p\n", ppci->table_group);
|
pr_debug(" created table: %p\n", ppci->table_group);
|
||||||
@ -968,6 +1004,12 @@ static void find_existing_ddw_windows_named(const char *name)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If at the time of system initialization, there are DDWs in OF,
|
||||||
|
* it means this is during kexec. DDW could be direct or dynamic.
|
||||||
|
* We will just mark DDWs as "dynamic" since this is kdump path,
|
||||||
|
* no need to worry about perforance. ddw_list_new_entry() will
|
||||||
|
* set window->direct = false.
|
||||||
|
*/
|
||||||
window = ddw_list_new_entry(pdn, dma64);
|
window = ddw_list_new_entry(pdn, dma64);
|
||||||
if (!window) {
|
if (!window) {
|
||||||
of_node_put(pdn);
|
of_node_put(pdn);
|
||||||
@ -1524,8 +1566,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
|||||||
{
|
{
|
||||||
struct device_node *pdn, *dn;
|
struct device_node *pdn, *dn;
|
||||||
struct iommu_table *tbl;
|
struct iommu_table *tbl;
|
||||||
const __be32 *dma_window = NULL;
|
|
||||||
struct pci_dn *pci;
|
struct pci_dn *pci;
|
||||||
|
struct dynamic_dma_window_prop prop;
|
||||||
|
|
||||||
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
|
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
|
||||||
|
|
||||||
@ -1538,7 +1580,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
|||||||
dn = pci_device_to_OF_node(dev);
|
dn = pci_device_to_OF_node(dev);
|
||||||
pr_debug(" node is %pOF\n", dn);
|
pr_debug(" node is %pOF\n", dn);
|
||||||
|
|
||||||
pdn = pci_dma_find(dn, &dma_window);
|
pdn = pci_dma_find(dn, &prop);
|
||||||
if (!pdn || !PCI_DN(pdn)) {
|
if (!pdn || !PCI_DN(pdn)) {
|
||||||
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
|
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
|
||||||
"no DMA window found for pci dev=%s dn=%pOF\n",
|
"no DMA window found for pci dev=%s dn=%pOF\n",
|
||||||
@ -1551,8 +1593,20 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
|||||||
if (!pci->table_group) {
|
if (!pci->table_group) {
|
||||||
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
|
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
|
||||||
tbl = pci->table_group->tables[0];
|
tbl = pci->table_group->tables[0];
|
||||||
iommu_table_setparms_lpar(pci->phb, pdn, tbl,
|
|
||||||
pci->table_group, dma_window);
|
iommu_table_setparms_common(tbl, pci->phb->bus->number,
|
||||||
|
be32_to_cpu(prop.liobn),
|
||||||
|
be64_to_cpu(prop.dma_base),
|
||||||
|
1ULL << be32_to_cpu(prop.window_shift),
|
||||||
|
be32_to_cpu(prop.tce_shift), NULL,
|
||||||
|
&iommu_table_lpar_multi_ops);
|
||||||
|
|
||||||
|
/* Only for normal boot with default window. Doesn't matter even
|
||||||
|
* if we set these with DDW which is 64bit during kdump, since
|
||||||
|
* these will not be used during kdump.
|
||||||
|
*/
|
||||||
|
pci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
|
||||||
|
pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
|
||||||
|
|
||||||
iommu_init_table(tbl, pci->phb->node, 0, 0);
|
iommu_init_table(tbl, pci->phb->node, 0, 0);
|
||||||
iommu_register_group(pci->table_group,
|
iommu_register_group(pci->table_group,
|
||||||
|
@ -315,7 +315,6 @@ config AS_HAS_OPTION_ARCH
|
|||||||
# https://reviews.llvm.org/D123515
|
# https://reviews.llvm.org/D123515
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on $(as-instr, .option arch$(comma) +m)
|
depends on $(as-instr, .option arch$(comma) +m)
|
||||||
depends on !$(as-instr, .option arch$(comma) -i)
|
|
||||||
|
|
||||||
source "arch/riscv/Kconfig.socs"
|
source "arch/riscv/Kconfig.socs"
|
||||||
source "arch/riscv/Kconfig.errata"
|
source "arch/riscv/Kconfig.errata"
|
||||||
|
@ -424,6 +424,7 @@
|
|||||||
# define CSR_STATUS CSR_MSTATUS
|
# define CSR_STATUS CSR_MSTATUS
|
||||||
# define CSR_IE CSR_MIE
|
# define CSR_IE CSR_MIE
|
||||||
# define CSR_TVEC CSR_MTVEC
|
# define CSR_TVEC CSR_MTVEC
|
||||||
|
# define CSR_ENVCFG CSR_MENVCFG
|
||||||
# define CSR_SCRATCH CSR_MSCRATCH
|
# define CSR_SCRATCH CSR_MSCRATCH
|
||||||
# define CSR_EPC CSR_MEPC
|
# define CSR_EPC CSR_MEPC
|
||||||
# define CSR_CAUSE CSR_MCAUSE
|
# define CSR_CAUSE CSR_MCAUSE
|
||||||
@ -448,6 +449,7 @@
|
|||||||
# define CSR_STATUS CSR_SSTATUS
|
# define CSR_STATUS CSR_SSTATUS
|
||||||
# define CSR_IE CSR_SIE
|
# define CSR_IE CSR_SIE
|
||||||
# define CSR_TVEC CSR_STVEC
|
# define CSR_TVEC CSR_STVEC
|
||||||
|
# define CSR_ENVCFG CSR_SENVCFG
|
||||||
# define CSR_SCRATCH CSR_SSCRATCH
|
# define CSR_SCRATCH CSR_SSCRATCH
|
||||||
# define CSR_EPC CSR_SEPC
|
# define CSR_EPC CSR_SEPC
|
||||||
# define CSR_CAUSE CSR_SCAUSE
|
# define CSR_CAUSE CSR_SCAUSE
|
||||||
|
@ -25,6 +25,11 @@
|
|||||||
|
|
||||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
extern void *return_address(unsigned int level);
|
||||||
|
|
||||||
|
#define ftrace_return_address(n) return_address(n)
|
||||||
|
|
||||||
void MCOUNT_NAME(void);
|
void MCOUNT_NAME(void);
|
||||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@ -11,8 +11,10 @@ static inline void arch_clear_hugepage_flags(struct page *page)
|
|||||||
}
|
}
|
||||||
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
|
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||||
bool arch_hugetlb_migration_supported(struct hstate *h);
|
bool arch_hugetlb_migration_supported(struct hstate *h);
|
||||||
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
|
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
||||||
#define __HAVE_ARCH_HUGE_PTE_CLEAR
|
#define __HAVE_ARCH_HUGE_PTE_CLEAR
|
||||||
|
@ -81,6 +81,8 @@
|
|||||||
#define RISCV_ISA_EXT_ZTSO 72
|
#define RISCV_ISA_EXT_ZTSO 72
|
||||||
#define RISCV_ISA_EXT_ZACAS 73
|
#define RISCV_ISA_EXT_ZACAS 73
|
||||||
|
|
||||||
|
#define RISCV_ISA_EXT_XLINUXENVCFG 127
|
||||||
|
|
||||||
#define RISCV_ISA_EXT_MAX 128
|
#define RISCV_ISA_EXT_MAX 128
|
||||||
#define RISCV_ISA_EXT_INVALID U32_MAX
|
#define RISCV_ISA_EXT_INVALID U32_MAX
|
||||||
|
|
||||||
|
@ -95,7 +95,13 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|||||||
__pud_free(mm, pud);
|
__pud_free(mm, pud);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
|
#define __pud_free_tlb(tlb, pud, addr) \
|
||||||
|
do { \
|
||||||
|
if (pgtable_l4_enabled) { \
|
||||||
|
pagetable_pud_dtor(virt_to_ptdesc(pud)); \
|
||||||
|
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define p4d_alloc_one p4d_alloc_one
|
#define p4d_alloc_one p4d_alloc_one
|
||||||
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
@ -124,7 +130,11 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
|
|||||||
__p4d_free(mm, p4d);
|
__p4d_free(mm, p4d);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
|
#define __p4d_free_tlb(tlb, p4d, addr) \
|
||||||
|
do { \
|
||||||
|
if (pgtable_l5_enabled) \
|
||||||
|
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(p4d)); \
|
||||||
|
} while (0)
|
||||||
#endif /* __PAGETABLE_PMD_FOLDED */
|
#endif /* __PAGETABLE_PMD_FOLDED */
|
||||||
|
|
||||||
static inline void sync_kernel_mappings(pgd_t *pgd)
|
static inline void sync_kernel_mappings(pgd_t *pgd)
|
||||||
@ -149,7 +159,11 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||||||
|
|
||||||
#ifndef __PAGETABLE_PMD_FOLDED
|
#ifndef __PAGETABLE_PMD_FOLDED
|
||||||
|
|
||||||
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
|
#define __pmd_free_tlb(tlb, pmd, addr) \
|
||||||
|
do { \
|
||||||
|
pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \
|
||||||
|
tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#endif /* __PAGETABLE_PMD_FOLDED */
|
#endif /* __PAGETABLE_PMD_FOLDED */
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ enum napot_cont_order {
|
|||||||
* 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
|
* 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
|
||||||
*/
|
*/
|
||||||
#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
|
#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
|
||||||
#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60))
|
#define _PAGE_NOCACHE_THEAD ((1UL << 61) | (1UL << 60))
|
||||||
#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
|
#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
|
||||||
#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
|
#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@
|
|||||||
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
|
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
|
||||||
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
|
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
|
||||||
*/
|
*/
|
||||||
#define vmemmap ((struct page *)VMEMMAP_START)
|
#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
|
||||||
|
|
||||||
#define PCI_IO_SIZE SZ_16M
|
#define PCI_IO_SIZE SZ_16M
|
||||||
#define PCI_IO_END VMEMMAP_START
|
#define PCI_IO_END VMEMMAP_START
|
||||||
@ -439,6 +439,10 @@ static inline pte_t pte_mkhuge(pte_t pte)
|
|||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define pte_leaf_size(pte) (pte_napot(pte) ? \
|
||||||
|
napot_cont_size(napot_cont_order(pte)) :\
|
||||||
|
PAGE_SIZE)
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
/*
|
/*
|
||||||
* See the comment in include/asm-generic/pgtable.h
|
* See the comment in include/asm-generic/pgtable.h
|
||||||
|
@ -14,6 +14,7 @@ struct suspend_context {
|
|||||||
struct pt_regs regs;
|
struct pt_regs regs;
|
||||||
/* Saved and restored by high-level functions */
|
/* Saved and restored by high-level functions */
|
||||||
unsigned long scratch;
|
unsigned long scratch;
|
||||||
|
unsigned long envcfg;
|
||||||
unsigned long tvec;
|
unsigned long tvec;
|
||||||
unsigned long ie;
|
unsigned long ie;
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
#endif
|
||||||
#include <linux/pgtable.h>
|
|
||||||
|
|
||||||
#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
|
|
||||||
static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
|
|
||||||
u64 pfn, unsigned int max_page_shift)
|
|
||||||
{
|
|
||||||
unsigned long map_size = PAGE_SIZE;
|
|
||||||
unsigned long size, order;
|
|
||||||
|
|
||||||
if (!has_svnapot())
|
|
||||||
return map_size;
|
|
||||||
|
|
||||||
for_each_napot_order_rev(order) {
|
|
||||||
if (napot_cont_shift(order) > max_page_shift)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
size = napot_cont_size(order);
|
|
||||||
if (end - addr < size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(addr, size))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(PFN_PHYS(pfn), size))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
map_size = size;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return map_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
|
|
||||||
static inline int arch_vmap_pte_supported_shift(unsigned long size)
|
|
||||||
{
|
|
||||||
int shift = PAGE_SHIFT;
|
|
||||||
unsigned long order;
|
|
||||||
|
|
||||||
if (!has_svnapot())
|
|
||||||
return shift;
|
|
||||||
|
|
||||||
WARN_ON_ONCE(size >= PMD_SIZE);
|
|
||||||
|
|
||||||
for_each_napot_order_rev(order) {
|
|
||||||
if (napot_cont_size(order) > size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(size, napot_cont_size(order)))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
shift = napot_cont_shift(order);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return shift;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* CONFIG_RISCV_ISA_SVNAPOT */
|
|
||||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
|
||||||
#endif /* _ASM_RISCV_VMALLOC_H */
|
#endif /* _ASM_RISCV_VMALLOC_H */
|
||||||
|
@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
|
|||||||
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
||||||
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
|
||||||
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
|
||||||
|
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
|
||||||
endif
|
endif
|
||||||
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||||
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
|
||||||
@ -46,6 +47,7 @@ obj-y += irq.o
|
|||||||
obj-y += process.o
|
obj-y += process.o
|
||||||
obj-y += ptrace.o
|
obj-y += ptrace.o
|
||||||
obj-y += reset.o
|
obj-y += reset.o
|
||||||
|
obj-y += return_address.o
|
||||||
obj-y += setup.o
|
obj-y += setup.o
|
||||||
obj-y += signal.o
|
obj-y += signal.o
|
||||||
obj-y += syscall_table.o
|
obj-y += syscall_table.o
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#include <asm/hwprobe.h>
|
#include <asm/hwprobe.h>
|
||||||
#include <asm/patch.h>
|
#include <asm/patch.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
#include <asm/sbi.h>
|
||||||
#include <asm/vector.h>
|
#include <asm/vector.h>
|
||||||
|
|
||||||
#include "copy-unaligned.h"
|
#include "copy-unaligned.h"
|
||||||
@ -201,6 +202,16 @@ static const unsigned int riscv_zvbb_exts[] = {
|
|||||||
RISCV_ISA_EXT_ZVKB
|
RISCV_ISA_EXT_ZVKB
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V
|
||||||
|
* privileged ISA, the existence of the CSRs is implied by any extension which
|
||||||
|
* specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the
|
||||||
|
* existence of the CSR, and treat it as a subset of those other extensions.
|
||||||
|
*/
|
||||||
|
static const unsigned int riscv_xlinuxenvcfg_exts[] = {
|
||||||
|
RISCV_ISA_EXT_XLINUXENVCFG
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The canonical order of ISA extension names in the ISA string is defined in
|
* The canonical order of ISA extension names in the ISA string is defined in
|
||||||
* chapter 27 of the unprivileged specification.
|
* chapter 27 of the unprivileged specification.
|
||||||
@ -250,8 +261,8 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
|
|||||||
__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
|
__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
|
||||||
__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
|
__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
|
||||||
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
|
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
|
||||||
__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
|
__RISCV_ISA_EXT_SUPERSET(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts),
|
||||||
__RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
|
__RISCV_ISA_EXT_SUPERSET(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts),
|
||||||
__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
|
__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
|
||||||
__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
|
__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
|
||||||
__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
|
__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
|
||||||
@ -538,6 +549,20 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
|
|||||||
set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
|
set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "V" in ISA strings is ambiguous in practice: it should mean
|
||||||
|
* just the standard V-1.0 but vendors aren't well behaved.
|
||||||
|
* Many vendors with T-Head CPU cores which implement the 0.7.1
|
||||||
|
* version of the vector specification put "v" into their DTs.
|
||||||
|
* CPU cores with the ratified spec will contain non-zero
|
||||||
|
* marchid.
|
||||||
|
*/
|
||||||
|
if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
|
||||||
|
riscv_cached_marchid(cpu) == 0x0) {
|
||||||
|
this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
|
||||||
|
clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All "okay" hart should have same isa. Set HWCAP based on
|
* All "okay" hart should have same isa. Set HWCAP based on
|
||||||
* common capabilities of every "okay" hart, in case they don't
|
* common capabilities of every "okay" hart, in case they don't
|
||||||
@ -950,7 +975,7 @@ arch_initcall(check_unaligned_access_all_cpus);
|
|||||||
void riscv_user_isa_enable(void)
|
void riscv_user_isa_enable(void)
|
||||||
{
|
{
|
||||||
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
|
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
|
||||||
csr_set(CSR_SENVCFG, ENVCFG_CBZE);
|
csr_set(CSR_ENVCFG, ENVCFG_CBZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_ALTERNATIVE
|
#ifdef CONFIG_RISCV_ALTERNATIVE
|
||||||
|
48
arch/riscv/kernel/return_address.c
Normal file
48
arch/riscv/kernel/return_address.c
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
/*
|
||||||
|
* This code come from arch/arm64/kernel/return_address.c
|
||||||
|
*
|
||||||
|
* Copyright (C) 2023 SiFive.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/export.h>
|
||||||
|
#include <linux/kprobes.h>
|
||||||
|
#include <linux/stacktrace.h>
|
||||||
|
|
||||||
|
struct return_address_data {
|
||||||
|
unsigned int level;
|
||||||
|
void *addr;
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool save_return_addr(void *d, unsigned long pc)
|
||||||
|
{
|
||||||
|
struct return_address_data *data = d;
|
||||||
|
|
||||||
|
if (!data->level) {
|
||||||
|
data->addr = (void *)pc;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
--data->level;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
NOKPROBE_SYMBOL(save_return_addr);
|
||||||
|
|
||||||
|
noinline void *return_address(unsigned int level)
|
||||||
|
{
|
||||||
|
struct return_address_data data;
|
||||||
|
|
||||||
|
data.level = level + 3;
|
||||||
|
data.addr = NULL;
|
||||||
|
|
||||||
|
arch_stack_walk(save_return_addr, &data, current, NULL);
|
||||||
|
|
||||||
|
if (!data.level)
|
||||||
|
return data.addr;
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(return_address);
|
||||||
|
NOKPROBE_SYMBOL(return_address);
|
@ -15,6 +15,8 @@
|
|||||||
void suspend_save_csrs(struct suspend_context *context)
|
void suspend_save_csrs(struct suspend_context *context)
|
||||||
{
|
{
|
||||||
context->scratch = csr_read(CSR_SCRATCH);
|
context->scratch = csr_read(CSR_SCRATCH);
|
||||||
|
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
|
||||||
|
context->envcfg = csr_read(CSR_ENVCFG);
|
||||||
context->tvec = csr_read(CSR_TVEC);
|
context->tvec = csr_read(CSR_TVEC);
|
||||||
context->ie = csr_read(CSR_IE);
|
context->ie = csr_read(CSR_IE);
|
||||||
|
|
||||||
@ -36,6 +38,8 @@ void suspend_save_csrs(struct suspend_context *context)
|
|||||||
void suspend_restore_csrs(struct suspend_context *context)
|
void suspend_restore_csrs(struct suspend_context *context)
|
||||||
{
|
{
|
||||||
csr_write(CSR_SCRATCH, context->scratch);
|
csr_write(CSR_SCRATCH, context->scratch);
|
||||||
|
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
|
||||||
|
csr_write(CSR_ENVCFG, context->envcfg);
|
||||||
csr_write(CSR_TVEC, context->tvec);
|
csr_write(CSR_TVEC, context->tvec);
|
||||||
csr_write(CSR_IE, context->ie);
|
csr_write(CSR_IE, context->ie);
|
||||||
|
|
||||||
|
@ -426,10 +426,12 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
|||||||
return __hugetlb_valid_size(size);
|
return __hugetlb_valid_size(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||||
bool arch_hugetlb_migration_supported(struct hstate *h)
|
bool arch_hugetlb_migration_supported(struct hstate *h)
|
||||||
{
|
{
|
||||||
return __hugetlb_valid_size(huge_page_size(h));
|
return __hugetlb_valid_size(huge_page_size(h));
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_CONTIG_ALLOC
|
#ifdef CONFIG_CONTIG_ALLOC
|
||||||
static __init int gigantic_pages_init(void)
|
static __init int gigantic_pages_init(void)
|
||||||
|
@ -16,6 +16,11 @@
|
|||||||
extern struct boot_params boot_params;
|
extern struct boot_params boot_params;
|
||||||
static struct real_mode_header hv_vtl_real_mode_header;
|
static struct real_mode_header hv_vtl_real_mode_header;
|
||||||
|
|
||||||
|
static bool __init hv_vtl_msi_ext_dest_id(void)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void __init hv_vtl_init_platform(void)
|
void __init hv_vtl_init_platform(void)
|
||||||
{
|
{
|
||||||
pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
|
pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
|
||||||
@ -38,6 +43,8 @@ void __init hv_vtl_init_platform(void)
|
|||||||
x86_platform.legacy.warm_reset = 0;
|
x86_platform.legacy.warm_reset = 0;
|
||||||
x86_platform.legacy.reserve_bios_regions = 0;
|
x86_platform.legacy.reserve_bios_regions = 0;
|
||||||
x86_platform.legacy.devices.pnpbios = 0;
|
x86_platform.legacy.devices.pnpbios = 0;
|
||||||
|
|
||||||
|
x86_init.hyper.msi_ext_dest_id = hv_vtl_msi_ext_dest_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
|
static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/coco.h>
|
#include <asm/coco.h>
|
||||||
#include <asm/mem_encrypt.h>
|
#include <asm/mem_encrypt.h>
|
||||||
|
#include <asm/set_memory.h>
|
||||||
#include <asm/mshyperv.h>
|
#include <asm/mshyperv.h>
|
||||||
#include <asm/hypervisor.h>
|
#include <asm/hypervisor.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
@ -502,6 +503,31 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When transitioning memory between encrypted and decrypted, the caller
|
||||||
|
* of set_memory_encrypted() or set_memory_decrypted() is responsible for
|
||||||
|
* ensuring that the memory isn't in use and isn't referenced while the
|
||||||
|
* transition is in progress. The transition has multiple steps, and the
|
||||||
|
* memory is in an inconsistent state until all steps are complete. A
|
||||||
|
* reference while the state is inconsistent could result in an exception
|
||||||
|
* that can't be cleanly fixed up.
|
||||||
|
*
|
||||||
|
* But the Linux kernel load_unaligned_zeropad() mechanism could cause a
|
||||||
|
* stray reference that can't be prevented by the caller, so Linux has
|
||||||
|
* specific code to handle this case. But when the #VC and #VE exceptions
|
||||||
|
* routed to a paravisor, the specific code doesn't work. To avoid this
|
||||||
|
* problem, mark the pages as "not present" while the transition is in
|
||||||
|
* progress. If load_unaligned_zeropad() causes a stray reference, a normal
|
||||||
|
* page fault is generated instead of #VC or #VE, and the page-fault-based
|
||||||
|
* handlers for load_unaligned_zeropad() resolve the reference. When the
|
||||||
|
* transition is complete, hv_vtom_set_host_visibility() marks the pages
|
||||||
|
* as "present" again.
|
||||||
|
*/
|
||||||
|
static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
|
||||||
|
{
|
||||||
|
return !set_memory_np(kbuffer, pagecount);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* hv_vtom_set_host_visibility - Set specified memory visible to host.
|
* hv_vtom_set_host_visibility - Set specified memory visible to host.
|
||||||
*
|
*
|
||||||
@ -515,16 +541,28 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
|
|||||||
enum hv_mem_host_visibility visibility = enc ?
|
enum hv_mem_host_visibility visibility = enc ?
|
||||||
VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
|
VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
|
||||||
u64 *pfn_array;
|
u64 *pfn_array;
|
||||||
|
phys_addr_t paddr;
|
||||||
|
void *vaddr;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
bool result = true;
|
bool result = true;
|
||||||
int i, pfn;
|
int i, pfn;
|
||||||
|
|
||||||
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
|
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
|
||||||
if (!pfn_array)
|
if (!pfn_array) {
|
||||||
return false;
|
result = false;
|
||||||
|
goto err_set_memory_p;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0, pfn = 0; i < pagecount; i++) {
|
for (i = 0, pfn = 0; i < pagecount; i++) {
|
||||||
pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
|
/*
|
||||||
|
* Use slow_virt_to_phys() because the PRESENT bit has been
|
||||||
|
* temporarily cleared in the PTEs. slow_virt_to_phys() works
|
||||||
|
* without the PRESENT bit while virt_to_hvpfn() or similar
|
||||||
|
* does not.
|
||||||
|
*/
|
||||||
|
vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
|
||||||
|
paddr = slow_virt_to_phys(vaddr);
|
||||||
|
pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT;
|
||||||
pfn++;
|
pfn++;
|
||||||
|
|
||||||
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
|
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
|
||||||
@ -538,14 +576,30 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err_free_pfn_array:
|
err_free_pfn_array:
|
||||||
kfree(pfn_array);
|
kfree(pfn_array);
|
||||||
|
|
||||||
|
err_set_memory_p:
|
||||||
|
/*
|
||||||
|
* Set the PTE PRESENT bits again to revert what hv_vtom_clear_present()
|
||||||
|
* did. Do this even if there is an error earlier in this function in
|
||||||
|
* order to avoid leaving the memory range in a "broken" state. Setting
|
||||||
|
* the PRESENT bits shouldn't fail, but return an error if it does.
|
||||||
|
*/
|
||||||
|
if (set_memory_p(kbuffer, pagecount))
|
||||||
|
result = false;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool hv_vtom_tlb_flush_required(bool private)
|
static bool hv_vtom_tlb_flush_required(bool private)
|
||||||
{
|
{
|
||||||
return true;
|
/*
|
||||||
|
* Since hv_vtom_clear_present() marks the PTEs as "not present"
|
||||||
|
* and flushes the TLB, they can't be in the TLB. That makes the
|
||||||
|
* flush controlled by this function redundant, so return "false".
|
||||||
|
*/
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool hv_vtom_cache_flush_required(void)
|
static bool hv_vtom_cache_flush_required(void)
|
||||||
@ -608,6 +662,7 @@ void __init hv_vtom_init(void)
|
|||||||
x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
|
x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
|
||||||
x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
|
x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
|
||||||
x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
|
x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
|
||||||
|
x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present;
|
||||||
x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
|
x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
|
||||||
|
|
||||||
/* Set WB as the default cache mode. */
|
/* Set WB as the default cache mode. */
|
||||||
|
@ -47,6 +47,7 @@ int set_memory_uc(unsigned long addr, int numpages);
|
|||||||
int set_memory_wc(unsigned long addr, int numpages);
|
int set_memory_wc(unsigned long addr, int numpages);
|
||||||
int set_memory_wb(unsigned long addr, int numpages);
|
int set_memory_wb(unsigned long addr, int numpages);
|
||||||
int set_memory_np(unsigned long addr, int numpages);
|
int set_memory_np(unsigned long addr, int numpages);
|
||||||
|
int set_memory_p(unsigned long addr, int numpages);
|
||||||
int set_memory_4k(unsigned long addr, int numpages);
|
int set_memory_4k(unsigned long addr, int numpages);
|
||||||
int set_memory_encrypted(unsigned long addr, int numpages);
|
int set_memory_encrypted(unsigned long addr, int numpages);
|
||||||
int set_memory_decrypted(unsigned long addr, int numpages);
|
int set_memory_decrypted(unsigned long addr, int numpages);
|
||||||
|
@ -1589,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||||||
get_cpu_vendor(c);
|
get_cpu_vendor(c);
|
||||||
get_cpu_cap(c);
|
get_cpu_cap(c);
|
||||||
setup_force_cpu_cap(X86_FEATURE_CPUID);
|
setup_force_cpu_cap(X86_FEATURE_CPUID);
|
||||||
|
get_cpu_address_sizes(c);
|
||||||
cpu_parse_early_param();
|
cpu_parse_early_param();
|
||||||
|
|
||||||
if (this_cpu->c_early_init)
|
if (this_cpu->c_early_init)
|
||||||
@ -1601,10 +1602,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||||||
this_cpu->c_bsp_init(c);
|
this_cpu->c_bsp_init(c);
|
||||||
} else {
|
} else {
|
||||||
setup_clear_cpu_cap(X86_FEATURE_CPUID);
|
setup_clear_cpu_cap(X86_FEATURE_CPUID);
|
||||||
|
get_cpu_address_sizes(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
get_cpu_address_sizes(c);
|
|
||||||
|
|
||||||
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
||||||
|
|
||||||
cpu_set_bug_bits(c);
|
cpu_set_bug_bits(c);
|
||||||
|
@ -184,6 +184,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MSR_IA32_TME_ACTIVATE 0x982
|
||||||
|
|
||||||
|
/* Helpers to access TME_ACTIVATE MSR */
|
||||||
|
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
|
||||||
|
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
|
||||||
|
|
||||||
|
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
|
||||||
|
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
|
||||||
|
|
||||||
|
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
|
||||||
|
|
||||||
|
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
|
||||||
|
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
|
||||||
|
|
||||||
|
/* Values for mktme_status (SW only construct) */
|
||||||
|
#define MKTME_ENABLED 0
|
||||||
|
#define MKTME_DISABLED 1
|
||||||
|
#define MKTME_UNINITIALIZED 2
|
||||||
|
static int mktme_status = MKTME_UNINITIALIZED;
|
||||||
|
|
||||||
|
static void detect_tme_early(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
u64 tme_activate, tme_policy, tme_crypto_algs;
|
||||||
|
int keyid_bits = 0, nr_keyids = 0;
|
||||||
|
static u64 tme_activate_cpu0 = 0;
|
||||||
|
|
||||||
|
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
|
||||||
|
|
||||||
|
if (mktme_status != MKTME_UNINITIALIZED) {
|
||||||
|
if (tme_activate != tme_activate_cpu0) {
|
||||||
|
/* Broken BIOS? */
|
||||||
|
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
|
||||||
|
pr_err_once("x86/tme: MKTME is not usable\n");
|
||||||
|
mktme_status = MKTME_DISABLED;
|
||||||
|
|
||||||
|
/* Proceed. We may need to exclude bits from x86_phys_bits. */
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tme_activate_cpu0 = tme_activate;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
|
||||||
|
pr_info_once("x86/tme: not enabled by BIOS\n");
|
||||||
|
mktme_status = MKTME_DISABLED;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mktme_status != MKTME_UNINITIALIZED)
|
||||||
|
goto detect_keyid_bits;
|
||||||
|
|
||||||
|
pr_info("x86/tme: enabled by BIOS\n");
|
||||||
|
|
||||||
|
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
|
||||||
|
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
|
||||||
|
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
|
||||||
|
|
||||||
|
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
|
||||||
|
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
|
||||||
|
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
|
||||||
|
tme_crypto_algs);
|
||||||
|
mktme_status = MKTME_DISABLED;
|
||||||
|
}
|
||||||
|
detect_keyid_bits:
|
||||||
|
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
|
||||||
|
nr_keyids = (1UL << keyid_bits) - 1;
|
||||||
|
if (nr_keyids) {
|
||||||
|
pr_info_once("x86/mktme: enabled by BIOS\n");
|
||||||
|
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
|
||||||
|
} else {
|
||||||
|
pr_info_once("x86/mktme: disabled by BIOS\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mktme_status == MKTME_UNINITIALIZED) {
|
||||||
|
/* MKTME is usable */
|
||||||
|
mktme_status = MKTME_ENABLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* KeyID bits effectively lower the number of physical address
|
||||||
|
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
|
||||||
|
*/
|
||||||
|
c->x86_phys_bits -= keyid_bits;
|
||||||
|
}
|
||||||
|
|
||||||
static void early_init_intel(struct cpuinfo_x86 *c)
|
static void early_init_intel(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
u64 misc_enable;
|
u64 misc_enable;
|
||||||
@ -322,6 +406,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||||||
*/
|
*/
|
||||||
if (detect_extended_topology_early(c) < 0)
|
if (detect_extended_topology_early(c) < 0)
|
||||||
detect_ht_early(c);
|
detect_ht_early(c);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Adjust the number of physical bits early because it affects the
|
||||||
|
* valid bits of the MTRR mask registers.
|
||||||
|
*/
|
||||||
|
if (cpu_has(c, X86_FEATURE_TME))
|
||||||
|
detect_tme_early(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bsp_init_intel(struct cpuinfo_x86 *c)
|
static void bsp_init_intel(struct cpuinfo_x86 *c)
|
||||||
@ -482,90 +573,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MSR_IA32_TME_ACTIVATE 0x982
|
|
||||||
|
|
||||||
/* Helpers to access TME_ACTIVATE MSR */
|
|
||||||
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
|
|
||||||
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
|
|
||||||
|
|
||||||
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
|
|
||||||
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
|
|
||||||
|
|
||||||
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
|
|
||||||
|
|
||||||
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
|
|
||||||
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
|
|
||||||
|
|
||||||
/* Values for mktme_status (SW only construct) */
|
|
||||||
#define MKTME_ENABLED 0
|
|
||||||
#define MKTME_DISABLED 1
|
|
||||||
#define MKTME_UNINITIALIZED 2
|
|
||||||
static int mktme_status = MKTME_UNINITIALIZED;
|
|
||||||
|
|
||||||
static void detect_tme(struct cpuinfo_x86 *c)
|
|
||||||
{
|
|
||||||
u64 tme_activate, tme_policy, tme_crypto_algs;
|
|
||||||
int keyid_bits = 0, nr_keyids = 0;
|
|
||||||
static u64 tme_activate_cpu0 = 0;
|
|
||||||
|
|
||||||
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
|
|
||||||
|
|
||||||
if (mktme_status != MKTME_UNINITIALIZED) {
|
|
||||||
if (tme_activate != tme_activate_cpu0) {
|
|
||||||
/* Broken BIOS? */
|
|
||||||
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
|
|
||||||
pr_err_once("x86/tme: MKTME is not usable\n");
|
|
||||||
mktme_status = MKTME_DISABLED;
|
|
||||||
|
|
||||||
/* Proceed. We may need to exclude bits from x86_phys_bits. */
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tme_activate_cpu0 = tme_activate;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
|
|
||||||
pr_info_once("x86/tme: not enabled by BIOS\n");
|
|
||||||
mktme_status = MKTME_DISABLED;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mktme_status != MKTME_UNINITIALIZED)
|
|
||||||
goto detect_keyid_bits;
|
|
||||||
|
|
||||||
pr_info("x86/tme: enabled by BIOS\n");
|
|
||||||
|
|
||||||
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
|
|
||||||
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
|
|
||||||
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
|
|
||||||
|
|
||||||
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
|
|
||||||
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
|
|
||||||
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
|
|
||||||
tme_crypto_algs);
|
|
||||||
mktme_status = MKTME_DISABLED;
|
|
||||||
}
|
|
||||||
detect_keyid_bits:
|
|
||||||
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
|
|
||||||
nr_keyids = (1UL << keyid_bits) - 1;
|
|
||||||
if (nr_keyids) {
|
|
||||||
pr_info_once("x86/mktme: enabled by BIOS\n");
|
|
||||||
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
|
|
||||||
} else {
|
|
||||||
pr_info_once("x86/mktme: disabled by BIOS\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mktme_status == MKTME_UNINITIALIZED) {
|
|
||||||
/* MKTME is usable */
|
|
||||||
mktme_status = MKTME_ENABLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* KeyID bits effectively lower the number of physical address
|
|
||||||
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
|
|
||||||
*/
|
|
||||||
c->x86_phys_bits -= keyid_bits;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void init_cpuid_fault(struct cpuinfo_x86 *c)
|
static void init_cpuid_fault(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
u64 msr;
|
u64 msr;
|
||||||
@ -702,9 +709,6 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|||||||
|
|
||||||
init_ia32_feat_ctl(c);
|
init_ia32_feat_ctl(c);
|
||||||
|
|
||||||
if (cpu_has(c, X86_FEATURE_TME))
|
|
||||||
detect_tme(c);
|
|
||||||
|
|
||||||
init_intel_misc_features(c);
|
init_intel_misc_features(c);
|
||||||
|
|
||||||
split_lock_init();
|
split_lock_init();
|
||||||
|
@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
|
|||||||
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
|
* SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
|
||||||
* to be reserved.
|
* kexec and do not need to be reserved.
|
||||||
*/
|
*/
|
||||||
if (data->type != SETUP_EFI && data->type != SETUP_IMA)
|
if (data->type != SETUP_EFI &&
|
||||||
|
data->type != SETUP_IMA &&
|
||||||
|
data->type != SETUP_RNG_SEED)
|
||||||
e820__range_update_kexec(pa_data,
|
e820__range_update_kexec(pa_data,
|
||||||
sizeof(*data) + data->len,
|
sizeof(*data) + data->len,
|
||||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||||
|
@ -755,10 +755,14 @@ pmd_t *lookup_pmd_address(unsigned long address)
|
|||||||
* areas on 32-bit NUMA systems. The percpu areas can
|
* areas on 32-bit NUMA systems. The percpu areas can
|
||||||
* end up in this kind of memory, for instance.
|
* end up in this kind of memory, for instance.
|
||||||
*
|
*
|
||||||
* This could be optimized, but it is only intended to be
|
* Note that as long as the PTEs are well-formed with correct PFNs, this
|
||||||
* used at initialization time, and keeping it
|
* works without checking the PRESENT bit in the leaf PTE. This is unlike
|
||||||
* unoptimized should increase the testing coverage for
|
* the similar vmalloc_to_page() and derivatives. Callers may depend on
|
||||||
* the more obscure platforms.
|
* this behavior.
|
||||||
|
*
|
||||||
|
* This could be optimized, but it is only used in paths that are not perf
|
||||||
|
* sensitive, and keeping it unoptimized should increase the testing coverage
|
||||||
|
* for the more obscure platforms.
|
||||||
*/
|
*/
|
||||||
phys_addr_t slow_virt_to_phys(void *__virt_addr)
|
phys_addr_t slow_virt_to_phys(void *__virt_addr)
|
||||||
{
|
{
|
||||||
@ -2041,17 +2045,12 @@ int set_mce_nospec(unsigned long pfn)
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int set_memory_p(unsigned long *addr, int numpages)
|
|
||||||
{
|
|
||||||
return change_page_attr_set(addr, numpages, __pgprot(_PAGE_PRESENT), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Restore full speculative operation to the pfn. */
|
/* Restore full speculative operation to the pfn. */
|
||||||
int clear_mce_nospec(unsigned long pfn)
|
int clear_mce_nospec(unsigned long pfn)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
|
unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
|
||||||
|
|
||||||
return set_memory_p(&addr, 1);
|
return set_memory_p(addr, 1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(clear_mce_nospec);
|
EXPORT_SYMBOL_GPL(clear_mce_nospec);
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
@ -2104,6 +2103,11 @@ int set_memory_np_noalias(unsigned long addr, int numpages)
|
|||||||
CPA_NO_CHECK_ALIAS, NULL);
|
CPA_NO_CHECK_ALIAS, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int set_memory_p(unsigned long addr, int numpages)
|
||||||
|
{
|
||||||
|
return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
|
||||||
|
}
|
||||||
|
|
||||||
int set_memory_4k(unsigned long addr, int numpages)
|
int set_memory_4k(unsigned long addr, int numpages)
|
||||||
{
|
{
|
||||||
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
|
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
|
||||||
|
@ -299,22 +299,6 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
|
|
||||||
{
|
|
||||||
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
|
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
|
|
||||||
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
|
||||||
struct sun8i_ce_dev *ce = op->ce;
|
|
||||||
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
|
|
||||||
int flow, err;
|
|
||||||
|
|
||||||
flow = rctx->flow;
|
|
||||||
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
|
|
||||||
local_bh_disable();
|
|
||||||
crypto_finalize_skcipher_request(engine, breq, err);
|
|
||||||
local_bh_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
|
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
|
||||||
void *async_req)
|
void *async_req)
|
||||||
{
|
{
|
||||||
@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
|
|||||||
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
|
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
|
||||||
|
{
|
||||||
|
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
|
||||||
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
|
||||||
|
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
|
||||||
|
struct sun8i_ce_dev *ce = op->ce;
|
||||||
|
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
|
||||||
|
int flow, err;
|
||||||
|
|
||||||
|
flow = rctx->flow;
|
||||||
|
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
|
||||||
|
sun8i_ce_cipher_unprepare(engine, areq);
|
||||||
|
local_bh_disable();
|
||||||
|
crypto_finalize_skcipher_request(engine, breq, err);
|
||||||
|
local_bh_enable();
|
||||||
|
}
|
||||||
|
|
||||||
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
|
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
|
||||||
{
|
{
|
||||||
int err = sun8i_ce_cipher_prepare(engine, areq);
|
int err = sun8i_ce_cipher_prepare(engine, areq);
|
||||||
@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
sun8i_ce_cipher_run(engine, areq);
|
sun8i_ce_cipher_run(engine, areq);
|
||||||
sun8i_ce_cipher_unprepare(engine, areq);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
|
|||||||
theend:
|
theend:
|
||||||
pm_runtime_put_autosuspend(rkc->dev);
|
pm_runtime_put_autosuspend(rkc->dev);
|
||||||
|
|
||||||
|
rk_hash_unprepare(engine, breq);
|
||||||
|
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
crypto_finalize_hash_request(engine, breq, err);
|
crypto_finalize_hash_request(engine, breq, err);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
rk_hash_unprepare(engine, breq);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
|||||||
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
|
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* In case of remote eDMA engine setup, the DW PCIe RP/EP internal
|
||||||
|
* configuration registers and application memory are normally accessed
|
||||||
|
* over different buses. Ensure LL-data reaches the memory before the
|
||||||
|
* doorbell register is toggled by issuing the dummy-read from the remote
|
||||||
|
* LL memory in a hope that the MRd TLP will return only after the
|
||||||
|
* last MWr TLP is completed
|
||||||
|
*/
|
||||||
|
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
|
||||||
|
readl(chunk->ll_region.vaddr.io);
|
||||||
|
}
|
||||||
|
|
||||||
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||||
{
|
{
|
||||||
struct dw_edma_chan *chan = chunk->chan;
|
struct dw_edma_chan *chan = chunk->chan;
|
||||||
@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
|||||||
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
|
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
|
||||||
upper_32_bits(chunk->ll_region.paddr));
|
upper_32_bits(chunk->ll_region.paddr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dw_edma_v0_sync_ll_data(chunk);
|
||||||
|
|
||||||
/* Doorbell */
|
/* Doorbell */
|
||||||
SET_RW_32(dw, chan->dir, doorbell,
|
SET_RW_32(dw, chan->dir, doorbell,
|
||||||
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
|
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
|
||||||
|
@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
|
|||||||
|
|
||||||
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
|
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
|
||||||
{
|
{
|
||||||
u32 num_ch = 0;
|
/*
|
||||||
int id;
|
* The HDMA IP have no way to know the number of hardware channels
|
||||||
|
* available, we set it to maximum channels and let the platform
|
||||||
for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
|
* set the right number of channels.
|
||||||
if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
|
*/
|
||||||
num_ch++;
|
return HDMA_V0_MAX_NR_CH;
|
||||||
}
|
|
||||||
|
|
||||||
if (num_ch > HDMA_V0_MAX_NR_CH)
|
|
||||||
num_ch = HDMA_V0_MAX_NR_CH;
|
|
||||||
|
|
||||||
return (u16)num_ch;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
|
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
|
||||||
@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
|
|||||||
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
|
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* In case of remote HDMA engine setup, the DW PCIe RP/EP internal
|
||||||
|
* configuration registers and application memory are normally accessed
|
||||||
|
* over different buses. Ensure LL-data reaches the memory before the
|
||||||
|
* doorbell register is toggled by issuing the dummy-read from the remote
|
||||||
|
* LL memory in a hope that the MRd TLP will return only after the
|
||||||
|
* last MWr TLP is completed
|
||||||
|
*/
|
||||||
|
if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
|
||||||
|
readl(chunk->ll_region.vaddr.io);
|
||||||
|
}
|
||||||
|
|
||||||
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
||||||
{
|
{
|
||||||
struct dw_edma_chan *chan = chunk->chan;
|
struct dw_edma_chan *chan = chunk->chan;
|
||||||
@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
|||||||
/* Interrupt enable&unmask - done, abort */
|
/* Interrupt enable&unmask - done, abort */
|
||||||
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
|
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
|
||||||
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
|
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
|
||||||
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
|
HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
|
||||||
|
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
|
||||||
|
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
|
||||||
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
|
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
|
||||||
/* Channel control */
|
/* Channel control */
|
||||||
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
|
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
|
||||||
@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
|
|||||||
/* Set consumer cycle */
|
/* Set consumer cycle */
|
||||||
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
|
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
|
||||||
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
|
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
|
||||||
|
|
||||||
|
dw_hdma_v0_sync_ll_data(chunk);
|
||||||
|
|
||||||
/* Doorbell */
|
/* Doorbell */
|
||||||
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
|
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
|
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
|
||||||
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
|
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
|
||||||
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
|
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
|
||||||
#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
|
#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
|
||||||
#define HDMA_V0_ABORT_INT_MASK BIT(2)
|
#define HDMA_V0_ABORT_INT_MASK BIT(2)
|
||||||
#define HDMA_V0_STOP_INT_MASK BIT(0)
|
#define HDMA_V0_STOP_INT_MASK BIT(0)
|
||||||
#define HDMA_V0_LINKLIST_EN BIT(0)
|
#define HDMA_V0_LINKLIST_EN BIT(0)
|
||||||
|
@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
|
|||||||
if (fsl_chan->is_multi_fifo) {
|
if (fsl_chan->is_multi_fifo) {
|
||||||
/* set mloff to support multiple fifo */
|
/* set mloff to support multiple fifo */
|
||||||
burst = cfg->direction == DMA_DEV_TO_MEM ?
|
burst = cfg->direction == DMA_DEV_TO_MEM ?
|
||||||
cfg->src_addr_width : cfg->dst_addr_width;
|
cfg->src_maxburst : cfg->dst_maxburst;
|
||||||
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
|
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
|
||||||
/* enable DMLOE/SMLOE */
|
/* enable DMLOE/SMLOE */
|
||||||
if (cfg->direction == DMA_MEM_TO_DEV) {
|
if (cfg->direction == DMA_MEM_TO_DEV) {
|
||||||
|
@ -30,8 +30,9 @@
|
|||||||
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
|
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
|
||||||
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
|
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
|
||||||
|
|
||||||
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
|
#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
|
||||||
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
|
#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
|
||||||
|
#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
|
||||||
|
|
||||||
#define EDMA_TCD_CSR_START BIT(0)
|
#define EDMA_TCD_CSR_START BIT(0)
|
||||||
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
|
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <dt-bindings/dma/fsl-edma.h>
|
#include <dt-bindings/dma/fsl-edma.h>
|
||||||
|
#include <linux/bitfield.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||||||
DMAENGINE_ALIGN_32_BYTES;
|
DMAENGINE_ALIGN_32_BYTES;
|
||||||
|
|
||||||
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
|
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
|
||||||
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
|
dma_set_max_seg_size(fsl_edma->dma_dev.dev,
|
||||||
|
FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
|
||||||
|
|
||||||
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||||
|
|
||||||
|
@ -109,6 +109,7 @@
|
|||||||
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
|
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
|
||||||
#define FSL_QDMA_CMD_DSEN_OFFSET 19
|
#define FSL_QDMA_CMD_DSEN_OFFSET 19
|
||||||
#define FSL_QDMA_CMD_LWC_OFFSET 16
|
#define FSL_QDMA_CMD_LWC_OFFSET 16
|
||||||
|
#define FSL_QDMA_CMD_PF BIT(17)
|
||||||
|
|
||||||
/* Field definition for Descriptor status */
|
/* Field definition for Descriptor status */
|
||||||
#define QDMA_CCDF_STATUS_RTE BIT(5)
|
#define QDMA_CCDF_STATUS_RTE BIT(5)
|
||||||
@ -160,6 +161,10 @@ struct fsl_qdma_format {
|
|||||||
u8 __reserved1[2];
|
u8 __reserved1[2];
|
||||||
u8 cfg8b_w1;
|
u8 cfg8b_w1;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
struct {
|
||||||
|
__le32 __reserved2;
|
||||||
|
__le32 cmd;
|
||||||
|
} __packed;
|
||||||
__le64 data;
|
__le64 data;
|
||||||
};
|
};
|
||||||
} __packed;
|
} __packed;
|
||||||
@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
|
|||||||
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
||||||
dma_addr_t dst, dma_addr_t src, u32 len)
|
dma_addr_t dst, dma_addr_t src, u32 len)
|
||||||
{
|
{
|
||||||
u32 cmd;
|
|
||||||
struct fsl_qdma_format *sdf, *ddf;
|
struct fsl_qdma_format *sdf, *ddf;
|
||||||
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
|
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
|
||||||
|
|
||||||
@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
|||||||
/* This entry is the last entry. */
|
/* This entry is the last entry. */
|
||||||
qdma_csgf_set_f(csgf_dest, len);
|
qdma_csgf_set_f(csgf_dest, len);
|
||||||
/* Descriptor Buffer */
|
/* Descriptor Buffer */
|
||||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
|
||||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
FSL_QDMA_CMD_PF);
|
||||||
sdf->data = QDMA_SDDF_CMD(cmd);
|
|
||||||
|
|
||||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
|
||||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
|
||||||
cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
|
|
||||||
ddf->data = QDMA_SDDF_CMD(cmd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -624,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
|
|||||||
|
|
||||||
static int
|
static int
|
||||||
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
|
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
|
||||||
void *block,
|
__iomem void *block,
|
||||||
int id)
|
int id)
|
||||||
{
|
{
|
||||||
bool duplicate;
|
bool duplicate;
|
||||||
@ -1196,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
|||||||
if (!fsl_qdma->queue)
|
if (!fsl_qdma->queue)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
|
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
|
||||||
if (fsl_qdma->irq_base < 0)
|
if (fsl_qdma->irq_base < 0)
|
||||||
return fsl_qdma->irq_base;
|
return fsl_qdma->irq_base;
|
||||||
@ -1238,19 +1235,22 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
platform_set_drvdata(pdev, fsl_qdma);
|
platform_set_drvdata(pdev, fsl_qdma);
|
||||||
|
|
||||||
ret = dma_async_device_register(&fsl_qdma->dma_dev);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(&pdev->dev,
|
|
||||||
"Can't register NXP Layerscape qDMA engine.\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = fsl_qdma_reg_init(fsl_qdma);
|
ret = fsl_qdma_reg_init(fsl_qdma);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
|
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = dma_async_device_register(&fsl_qdma->dma_dev);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
|
|||||||
spin_lock(&evl->lock);
|
spin_lock(&evl->lock);
|
||||||
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||||
t = status.tail;
|
t = status.tail;
|
||||||
h = evl->head;
|
h = status.head;
|
||||||
size = evl->size;
|
size = evl->size;
|
||||||
|
|
||||||
while (h != t) {
|
while (h != t) {
|
||||||
|
@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
|
|||||||
|
|
||||||
spin_lock(&evl->lock);
|
spin_lock(&evl->lock);
|
||||||
|
|
||||||
h = evl->head;
|
|
||||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||||
t = evl_status.tail;
|
t = evl_status.tail;
|
||||||
|
h = evl_status.head;
|
||||||
evl_size = evl->size;
|
evl_size = evl->size;
|
||||||
|
|
||||||
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
|
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
|
||||||
|
@ -300,7 +300,6 @@ struct idxd_evl {
|
|||||||
unsigned int log_size;
|
unsigned int log_size;
|
||||||
/* The number of entries in the event log. */
|
/* The number of entries in the event log. */
|
||||||
u16 size;
|
u16 size;
|
||||||
u16 head;
|
|
||||||
unsigned long *bmap;
|
unsigned long *bmap;
|
||||||
bool batch_fail[IDXD_MAX_BATCH_IDENT];
|
bool batch_fail[IDXD_MAX_BATCH_IDENT];
|
||||||
};
|
};
|
||||||
|
@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
|
|||||||
static int idxd_init_evl(struct idxd_device *idxd)
|
static int idxd_init_evl(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
unsigned int evl_cache_size;
|
||||||
struct idxd_evl *evl;
|
struct idxd_evl *evl;
|
||||||
|
const char *idxd_name;
|
||||||
|
|
||||||
if (idxd->hw.gen_cap.evl_support == 0)
|
if (idxd->hw.gen_cap.evl_support == 0)
|
||||||
return 0;
|
return 0;
|
||||||
@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
|
|||||||
spin_lock_init(&evl->lock);
|
spin_lock_init(&evl->lock);
|
||||||
evl->size = IDXD_EVL_SIZE_MIN;
|
evl->size = IDXD_EVL_SIZE_MIN;
|
||||||
|
|
||||||
idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
|
idxd_name = dev_name(idxd_confdev(idxd));
|
||||||
sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
|
evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
|
||||||
0, 0, NULL);
|
/*
|
||||||
|
* Since completion record in evl_cache will be copied to user
|
||||||
|
* when handling completion record page fault, need to create
|
||||||
|
* the cache suitable for user copy.
|
||||||
|
*/
|
||||||
|
idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
|
||||||
|
0, 0, 0, evl_cache_size,
|
||||||
|
NULL);
|
||||||
if (!idxd->evl_cache) {
|
if (!idxd->evl_cache) {
|
||||||
kfree(evl);
|
kfree(evl);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
|
|||||||
/* Clear interrupt pending bit */
|
/* Clear interrupt pending bit */
|
||||||
iowrite32(evl_status.bits_upper32,
|
iowrite32(evl_status.bits_upper32,
|
||||||
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
|
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
|
||||||
h = evl->head;
|
|
||||||
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||||
t = evl_status.tail;
|
t = evl_status.tail;
|
||||||
|
h = evl_status.head;
|
||||||
size = idxd->evl->size;
|
size = idxd->evl->size;
|
||||||
|
|
||||||
while (h != t) {
|
while (h != t) {
|
||||||
@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
|
|||||||
h = (h + 1) % size;
|
h = (h + 1) % size;
|
||||||
}
|
}
|
||||||
|
|
||||||
evl->head = h;
|
|
||||||
evl_status.head = h;
|
evl_status.head = h;
|
||||||
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
|
||||||
spin_unlock(&evl->lock);
|
spin_unlock(&evl->lock);
|
||||||
|
@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
|
|||||||
chan->vc.desc_free = pt_do_cleanup;
|
chan->vc.desc_free = pt_do_cleanup;
|
||||||
vchan_init(&chan->vc, dma_dev);
|
vchan_init(&chan->vc, dma_dev);
|
||||||
|
|
||||||
dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
|
|
||||||
|
|
||||||
ret = dma_async_device_register(dma_dev);
|
ret = dma_async_device_register(dma_dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_reg;
|
goto err_reg;
|
||||||
|
@ -44,11 +44,6 @@ struct dpll_pin_registration {
|
|||||||
void *priv;
|
void *priv;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dpll_pin *netdev_dpll_pin(const struct net_device *dev)
|
|
||||||
{
|
|
||||||
return rcu_dereference_rtnl(dev->dpll_pin);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct dpll_device *dpll_device_get_by_id(int id)
|
struct dpll_device *dpll_device_get_by_id(int id)
|
||||||
{
|
{
|
||||||
if (xa_get_mark(&dpll_device_xa, id, DPLL_REGISTERED))
|
if (xa_get_mark(&dpll_device_xa, id, DPLL_REGISTERED))
|
||||||
@ -515,6 +510,26 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
|
|||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
|
||||||
|
{
|
||||||
|
rtnl_lock();
|
||||||
|
rcu_assign_pointer(dev->dpll_pin, dpll_pin);
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
|
||||||
|
{
|
||||||
|
WARN_ON(!dpll_pin);
|
||||||
|
dpll_netdev_pin_assign(dev, dpll_pin);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dpll_netdev_pin_set);
|
||||||
|
|
||||||
|
void dpll_netdev_pin_clear(struct net_device *dev)
|
||||||
|
{
|
||||||
|
dpll_netdev_pin_assign(dev, NULL);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(dpll_netdev_pin_clear);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dpll_pin_get - find existing or create new dpll pin
|
* dpll_pin_get - find existing or create new dpll pin
|
||||||
* @clock_id: clock_id of creator
|
* @clock_id: clock_id of creator
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/netdevice.h>
|
||||||
#include <net/genetlink.h>
|
#include <net/genetlink.h>
|
||||||
#include "dpll_core.h"
|
#include "dpll_core.h"
|
||||||
#include "dpll_netlink.h"
|
#include "dpll_netlink.h"
|
||||||
@ -47,18 +48,6 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* dpll_msg_pin_handle_size - get size of pin handle attribute for given pin
|
|
||||||
* @pin: pin pointer
|
|
||||||
*
|
|
||||||
* Return: byte size of pin handle attribute for given pin.
|
|
||||||
*/
|
|
||||||
size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
|
|
||||||
{
|
|
||||||
return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
|
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
|
||||||
* @msg: pointer to sk_buff message to attach a pin handle
|
* @msg: pointer to sk_buff message to attach a pin handle
|
||||||
@ -68,7 +57,7 @@ EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
|
|||||||
* * 0 - success
|
* * 0 - success
|
||||||
* * -EMSGSIZE - no space in message to attach pin handle
|
* * -EMSGSIZE - no space in message to attach pin handle
|
||||||
*/
|
*/
|
||||||
int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
|
static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
|
||||||
{
|
{
|
||||||
if (!pin)
|
if (!pin)
|
||||||
return 0;
|
return 0;
|
||||||
@ -76,7 +65,28 @@ int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
|
|||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle);
|
|
||||||
|
static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev)
|
||||||
|
{
|
||||||
|
return rcu_dereference_rtnl(dev->dpll_pin);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev
|
||||||
|
* @dev: netdev from which to get the pin
|
||||||
|
*
|
||||||
|
* Return: byte size of pin handle attribute, or 0 if @dev has no pin.
|
||||||
|
*/
|
||||||
|
size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
|
||||||
|
{
|
||||||
|
return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
|
||||||
|
}
|
||||||
|
|
||||||
|
int dpll_netdev_add_pin_handle(struct sk_buff *msg,
|
||||||
|
const struct net_device *dev)
|
||||||
|
{
|
||||||
|
return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev));
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
|
dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
|
||||||
|
@ -500,7 +500,19 @@ static void bm_work(struct work_struct *work)
|
|||||||
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
|
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
|
||||||
new_root_id, gap_count);
|
new_root_id, gap_count);
|
||||||
fw_send_phy_config(card, new_root_id, generation, gap_count);
|
fw_send_phy_config(card, new_root_id, generation, gap_count);
|
||||||
reset_bus(card, true);
|
/*
|
||||||
|
* Where possible, use a short bus reset to minimize
|
||||||
|
* disruption to isochronous transfers. But in the event
|
||||||
|
* of a gap count inconsistency, use a long bus reset.
|
||||||
|
*
|
||||||
|
* As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
|
||||||
|
* may set different gap counts after a bus reset. On a mixed
|
||||||
|
* 1394/1394a bus, a short bus reset can get doubled. Some
|
||||||
|
* nodes may treat the double reset as one bus reset and others
|
||||||
|
* may treat it as two, causing a gap count inconsistency
|
||||||
|
* again. Using a long bus reset prevents this.
|
||||||
|
*/
|
||||||
|
reset_bus(card, card->gap_count != 0);
|
||||||
/* Will allocate broadcast channel after the reset. */
|
/* Will allocate broadcast channel after the reset. */
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
|
cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
|
||||||
if (!cap_info->phys) {
|
if (!cap_info->phys) {
|
||||||
kfree(cap_info->pages);
|
kfree(cap_info->pages);
|
||||||
kfree(cap_info);
|
kfree(cap_info);
|
||||||
|
@ -384,7 +384,8 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
|
|||||||
u32 *response_msg;
|
u32 *response_msg;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
|
response_msg = devm_kzalloc(priv->dev,
|
||||||
|
AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!response_msg)
|
if (!response_msg)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
|
|||||||
if (IS_ERR(chip->gpiod_oe))
|
if (IS_ERR(chip->gpiod_oe))
|
||||||
return PTR_ERR(chip->gpiod_oe);
|
return PTR_ERR(chip->gpiod_oe);
|
||||||
|
|
||||||
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
|
|
||||||
|
|
||||||
spi_set_drvdata(spi, chip);
|
spi_set_drvdata(spi, chip);
|
||||||
|
|
||||||
chip->gpio_chip.label = spi->modalias;
|
chip->gpio_chip.label = spi->modalias;
|
||||||
@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
|
|||||||
goto exit_destroy;
|
goto exit_destroy;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
|
||||||
|
|
||||||
ret = gpiochip_add_data(&chip->gpio_chip, chip);
|
ret = gpiochip_add_data(&chip->gpio_chip, chip);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -968,11 +968,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|||||||
|
|
||||||
ret = gpiochip_irqchip_init_valid_mask(gc);
|
ret = gpiochip_irqchip_init_valid_mask(gc);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_remove_acpi_chip;
|
goto err_free_hogs;
|
||||||
|
|
||||||
ret = gpiochip_irqchip_init_hw(gc);
|
ret = gpiochip_irqchip_init_hw(gc);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_remove_acpi_chip;
|
goto err_remove_irqchip_mask;
|
||||||
|
|
||||||
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
|
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -997,13 +997,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|||||||
gpiochip_irqchip_remove(gc);
|
gpiochip_irqchip_remove(gc);
|
||||||
err_remove_irqchip_mask:
|
err_remove_irqchip_mask:
|
||||||
gpiochip_irqchip_free_valid_mask(gc);
|
gpiochip_irqchip_free_valid_mask(gc);
|
||||||
err_remove_acpi_chip:
|
err_free_hogs:
|
||||||
acpi_gpiochip_remove(gc);
|
|
||||||
err_remove_of_chip:
|
|
||||||
gpiochip_free_hogs(gc);
|
gpiochip_free_hogs(gc);
|
||||||
|
acpi_gpiochip_remove(gc);
|
||||||
|
gpiochip_remove_pin_ranges(gc);
|
||||||
|
err_remove_of_chip:
|
||||||
of_gpiochip_remove(gc);
|
of_gpiochip_remove(gc);
|
||||||
err_free_gpiochip_mask:
|
err_free_gpiochip_mask:
|
||||||
gpiochip_remove_pin_ranges(gc);
|
|
||||||
gpiochip_free_valid_mask(gc);
|
gpiochip_free_valid_mask(gc);
|
||||||
err_remove_from_list:
|
err_remove_from_list:
|
||||||
spin_lock_irqsave(&gpio_lock, flags);
|
spin_lock_irqsave(&gpio_lock, flags);
|
||||||
|
@ -199,7 +199,7 @@ config DRM_TTM
|
|||||||
config DRM_TTM_KUNIT_TEST
|
config DRM_TTM_KUNIT_TEST
|
||||||
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
|
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
|
||||||
default n
|
default n
|
||||||
depends on DRM && KUNIT && MMU
|
depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
|
||||||
select DRM_TTM
|
select DRM_TTM
|
||||||
select DRM_EXPORT_FOR_TESTS if m
|
select DRM_EXPORT_FOR_TESTS if m
|
||||||
select DRM_KUNIT_TEST_HELPERS
|
select DRM_KUNIT_TEST_HELPERS
|
||||||
@ -207,7 +207,8 @@ config DRM_TTM_KUNIT_TEST
|
|||||||
help
|
help
|
||||||
Enables unit tests for TTM, a GPU memory manager subsystem used
|
Enables unit tests for TTM, a GPU memory manager subsystem used
|
||||||
to manage memory buffers. This option is mostly useful for kernel
|
to manage memory buffers. This option is mostly useful for kernel
|
||||||
developers.
|
developers. It depends on (UML || COMPILE_TEST) since no other driver
|
||||||
|
which uses TTM can be loaded while running the tests.
|
||||||
|
|
||||||
If in doubt, say "N".
|
If in doubt, say "N".
|
||||||
|
|
||||||
|
@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
|
|||||||
return AMD_RESET_METHOD_MODE1;
|
return AMD_RESET_METHOD_MODE1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
u32 sol_reg;
|
||||||
|
|
||||||
|
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||||
|
|
||||||
|
/* Will reset for the following suspend abort cases.
|
||||||
|
* 1) Only reset limit on APU side, dGPU hasn't checked yet.
|
||||||
|
* 2) S3 suspend abort and TOS already launched.
|
||||||
|
*/
|
||||||
|
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
|
||||||
|
!adev->suspend_complete &&
|
||||||
|
sol_reg)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int soc15_asic_reset(struct amdgpu_device *adev)
|
static int soc15_asic_reset(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
/* original raven doesn't have full asic reset */
|
/* original raven doesn't have full asic reset */
|
||||||
if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
|
/* On the latest Raven, the GPU reset can be performed
|
||||||
(adev->apu_flags & AMD_APU_IS_RAVEN2))
|
* successfully. So now, temporarily enable it for the
|
||||||
|
* S3 suspend abort case.
|
||||||
|
*/
|
||||||
|
if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
|
||||||
|
(adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
|
||||||
|
!soc15_need_reset_on_resume(adev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (soc15_asic_reset_method(adev)) {
|
switch (soc15_asic_reset_method(adev)) {
|
||||||
@ -1298,24 +1321,6 @@ static int soc15_common_suspend(void *handle)
|
|||||||
return soc15_common_hw_fini(adev);
|
return soc15_common_hw_fini(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
u32 sol_reg;
|
|
||||||
|
|
||||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
|
||||||
|
|
||||||
/* Will reset for the following suspend abort cases.
|
|
||||||
* 1) Only reset limit on APU side, dGPU hasn't checked yet.
|
|
||||||
* 2) S3 suspend abort and TOS already launched.
|
|
||||||
*/
|
|
||||||
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
|
|
||||||
!adev->suspend_complete &&
|
|
||||||
sol_reg)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int soc15_common_resume(void *handle)
|
static int soc15_common_resume(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
|
|||||||
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
|
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
|
||||||
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
|
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
|
||||||
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
|
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
|
||||||
|
case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
|
||||||
|
case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
|
||||||
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
|
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
|
||||||
edid_caps->panel_patch.remove_sink_ext_caps = true;
|
edid_caps->panel_patch.remove_sink_ext_caps = true;
|
||||||
break;
|
break;
|
||||||
@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
|
|||||||
|
|
||||||
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
|
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
|
||||||
|
|
||||||
|
apply_edid_quirks(edid_buf, edid_caps);
|
||||||
|
|
||||||
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
|
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
|
||||||
if (sad_count <= 0)
|
if (sad_count <= 0)
|
||||||
return result;
|
return result;
|
||||||
@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
|
|||||||
else
|
else
|
||||||
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
|
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
|
||||||
|
|
||||||
apply_edid_quirks(edid_buf, edid_caps);
|
|
||||||
|
|
||||||
kfree(sads);
|
kfree(sads);
|
||||||
kfree(sadb);
|
kfree(sadb);
|
||||||
|
|
||||||
|
@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,
|
|||||||
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
|
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
|
||||||
}
|
}
|
||||||
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
|
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
|
||||||
|
if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
|
||||||
|
dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
|
||||||
|
__func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||||
|
break;
|
||||||
|
}
|
||||||
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
|
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
|
||||||
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
|
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
|
||||||
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
|
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
|
||||||
|
@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int si_set_temperature_range(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = si_thermal_enable_alert(adev, false);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
ret = si_thermal_enable_alert(adev, true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void si_dpm_disable(struct amdgpu_device *adev)
|
static void si_dpm_disable(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct rv7xx_power_info *pi = rv770_get_pi(adev);
|
struct rv7xx_power_info *pi = rv770_get_pi(adev);
|
||||||
@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
static int si_dpm_late_init(void *handle)
|
static int si_dpm_late_init(void *handle)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
if (!adev->pm.dpm_enabled)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = si_set_temperature_range(adev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
#if 0 //TODO ?
|
||||||
|
si_dpm_powergate_uvd(adev, true);
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
|
|||||||
if (default_power_limit)
|
if (default_power_limit)
|
||||||
*default_power_limit = power_limit;
|
*default_power_limit = power_limit;
|
||||||
|
|
||||||
if (smu->od_enabled) {
|
if (smu->od_enabled)
|
||||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
else
|
||||||
} else {
|
|
||||||
od_percent_upper = 0;
|
od_percent_upper = 0;
|
||||||
od_percent_lower = 100;
|
|
||||||
}
|
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||||
|
|
||||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||||
od_percent_upper, od_percent_lower, power_limit);
|
od_percent_upper, od_percent_lower, power_limit);
|
||||||
|
@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu,
|
|||||||
*default_power_limit = power_limit;
|
*default_power_limit = power_limit;
|
||||||
|
|
||||||
if (smu->od_enabled &&
|
if (smu->od_enabled &&
|
||||||
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
|
navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
|
||||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
else
|
||||||
} else {
|
|
||||||
od_percent_upper = 0;
|
od_percent_upper = 0;
|
||||||
od_percent_lower = 100;
|
|
||||||
}
|
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
|
||||||
|
|
||||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||||
od_percent_upper, od_percent_lower, power_limit);
|
od_percent_upper, od_percent_lower, power_limit);
|
||||||
|
@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
|
|||||||
if (default_power_limit)
|
if (default_power_limit)
|
||||||
*default_power_limit = power_limit;
|
*default_power_limit = power_limit;
|
||||||
|
|
||||||
if (smu->od_enabled) {
|
if (smu->od_enabled)
|
||||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
else
|
||||||
} else {
|
|
||||||
od_percent_upper = 0;
|
od_percent_upper = 0;
|
||||||
od_percent_lower = 100;
|
|
||||||
}
|
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||||
|
|
||||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||||
od_percent_upper, od_percent_lower, power_limit);
|
od_percent_upper, od_percent_lower, power_limit);
|
||||||
|
@ -2369,13 +2369,12 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
|
|||||||
if (default_power_limit)
|
if (default_power_limit)
|
||||||
*default_power_limit = power_limit;
|
*default_power_limit = power_limit;
|
||||||
|
|
||||||
if (smu->od_enabled) {
|
if (smu->od_enabled)
|
||||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
else
|
||||||
} else {
|
|
||||||
od_percent_upper = 0;
|
od_percent_upper = 0;
|
||||||
od_percent_lower = 100;
|
|
||||||
}
|
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
|
||||||
|
|
||||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||||
od_percent_upper, od_percent_lower, power_limit);
|
od_percent_upper, od_percent_lower, power_limit);
|
||||||
|
@ -2333,13 +2333,12 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
|
|||||||
if (default_power_limit)
|
if (default_power_limit)
|
||||||
*default_power_limit = power_limit;
|
*default_power_limit = power_limit;
|
||||||
|
|
||||||
if (smu->od_enabled) {
|
if (smu->od_enabled)
|
||||||
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||||
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
else
|
||||||
} else {
|
|
||||||
od_percent_upper = 0;
|
od_percent_upper = 0;
|
||||||
od_percent_lower = 100;
|
|
||||||
}
|
od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
|
||||||
|
|
||||||
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
|
||||||
od_percent_upper, od_percent_lower, power_limit);
|
od_percent_upper, od_percent_lower, power_limit);
|
||||||
|
@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev)
|
|||||||
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
|
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
|
||||||
|
|
||||||
of_node_put(adev->dev.platform_data);
|
of_node_put(adev->dev.platform_data);
|
||||||
|
of_node_put(adev->dev.of_node);
|
||||||
|
|
||||||
kfree(adev);
|
kfree(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
|
static void drm_aux_hpd_bridge_free_adev(void *_adev)
|
||||||
{
|
{
|
||||||
struct auxiliary_device *adev = _adev;
|
auxiliary_device_uninit(_adev);
|
||||||
|
|
||||||
auxiliary_device_delete(adev);
|
|
||||||
auxiliary_device_uninit(adev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
|
* devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
|
||||||
* @parent: device instance providing this bridge
|
* @parent: device instance providing this bridge
|
||||||
* @np: device node pointer corresponding to this bridge instance
|
* @np: device node pointer corresponding to this bridge instance
|
||||||
*
|
*
|
||||||
@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
|
|||||||
* DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
|
* DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
|
||||||
* able to send the HPD events.
|
* able to send the HPD events.
|
||||||
*
|
*
|
||||||
* Return: device instance that will handle created bridge or an error code
|
* Return: bridge auxiliary device pointer or an error pointer
|
||||||
* encoded into the pointer.
|
|
||||||
*/
|
*/
|
||||||
struct device *drm_dp_hpd_bridge_register(struct device *parent,
|
struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
|
||||||
struct device_node *np)
|
|
||||||
{
|
{
|
||||||
struct auxiliary_device *adev;
|
struct auxiliary_device *adev;
|
||||||
int ret;
|
int ret;
|
||||||
@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
|
|||||||
|
|
||||||
ret = auxiliary_device_init(adev);
|
ret = auxiliary_device_init(adev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
of_node_put(adev->dev.platform_data);
|
||||||
|
of_node_put(adev->dev.of_node);
|
||||||
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
|
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
|
||||||
kfree(adev);
|
kfree(adev);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = auxiliary_device_add(adev);
|
ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
|
||||||
if (ret) {
|
if (ret)
|
||||||
auxiliary_device_uninit(adev);
|
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
|
||||||
|
|
||||||
ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
|
return adev;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
|
||||||
|
|
||||||
|
static void drm_aux_hpd_bridge_del_adev(void *_adev)
|
||||||
|
{
|
||||||
|
auxiliary_device_delete(_adev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
|
||||||
|
* @dev: struct device to tie registration lifetime to
|
||||||
|
* @adev: bridge auxiliary device to be registered
|
||||||
|
*
|
||||||
|
* Returns: zero on success or a negative errno
|
||||||
|
*/
|
||||||
|
int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = auxiliary_device_add(adev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
|
||||||
|
* @parent: device instance providing this bridge
|
||||||
|
* @np: device node pointer corresponding to this bridge instance
|
||||||
|
*
|
||||||
|
* Return: device instance that will handle created bridge or an error pointer
|
||||||
|
*/
|
||||||
|
struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
|
||||||
|
{
|
||||||
|
struct auxiliary_device *adev;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
|
||||||
|
if (IS_ERR(adev))
|
||||||
|
return ERR_CAST(adev);
|
||||||
|
|
||||||
|
ret = devm_drm_dp_hpd_bridge_add(parent, adev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
|
|||||||
u64 start, u64 end,
|
u64 start, u64 end,
|
||||||
unsigned int order)
|
unsigned int order)
|
||||||
{
|
{
|
||||||
|
u64 req_size = mm->chunk_size << order;
|
||||||
struct drm_buddy_block *block;
|
struct drm_buddy_block *block;
|
||||||
struct drm_buddy_block *buddy;
|
struct drm_buddy_block *buddy;
|
||||||
LIST_HEAD(dfs);
|
LIST_HEAD(dfs);
|
||||||
@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
|
|||||||
if (drm_buddy_block_is_allocated(block))
|
if (drm_buddy_block_is_allocated(block))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
if (block_start < start || block_end > end) {
|
||||||
|
u64 adjusted_start = max(block_start, start);
|
||||||
|
u64 adjusted_end = min(block_end, end);
|
||||||
|
|
||||||
|
if (round_down(adjusted_end + 1, req_size) <=
|
||||||
|
round_up(adjusted_start, req_size))
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (contains(start, end, block_start, block_end) &&
|
if (contains(start, end, block_start, block_end) &&
|
||||||
order == drm_buddy_block_order(block)) {
|
order == drm_buddy_block_order(block)) {
|
||||||
/*
|
/*
|
||||||
@ -761,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Actual range allocation */
|
/* Actual range allocation */
|
||||||
if (start + size == end)
|
if (start + size == end) {
|
||||||
|
if (!IS_ALIGNED(start | end, min_block_size))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
|
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
|
||||||
|
}
|
||||||
|
|
||||||
original_size = size;
|
original_size = size;
|
||||||
original_min_size = min_block_size;
|
original_min_size = min_block_size;
|
||||||
|
@ -329,10 +329,26 @@ static const struct component_ops dp_display_comp_ops = {
|
|||||||
.unbind = dp_display_unbind,
|
.unbind = dp_display_unbind,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void dp_display_send_hpd_event(struct msm_dp *dp_display)
|
||||||
|
{
|
||||||
|
struct dp_display_private *dp;
|
||||||
|
struct drm_connector *connector;
|
||||||
|
|
||||||
|
dp = container_of(dp_display, struct dp_display_private, dp_display);
|
||||||
|
|
||||||
|
connector = dp->dp_display.connector;
|
||||||
|
drm_helper_hpd_irq_event(connector->dev);
|
||||||
|
}
|
||||||
|
|
||||||
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
|
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
|
||||||
bool hpd)
|
bool hpd)
|
||||||
{
|
{
|
||||||
struct drm_bridge *bridge = dp->dp_display.bridge;
|
if ((hpd && dp->dp_display.link_ready) ||
|
||||||
|
(!hpd && !dp->dp_display.link_ready)) {
|
||||||
|
drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
|
||||||
|
(hpd ? "on" : "off"));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* reset video pattern flag on disconnect */
|
/* reset video pattern flag on disconnect */
|
||||||
if (!hpd) {
|
if (!hpd) {
|
||||||
@ -348,7 +364,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
|
|||||||
|
|
||||||
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
|
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
|
||||||
dp->dp_display.connector_type, hpd);
|
dp->dp_display.connector_type, hpd);
|
||||||
drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
|
dp_display_send_hpd_event(&dp->dp_display);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
|
|||||||
break;
|
break;
|
||||||
case NOUVEAU_GETPARAM_VRAM_USED: {
|
case NOUVEAU_GETPARAM_VRAM_USED: {
|
||||||
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
|
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
|
||||||
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr) << PAGE_SHIFT;
|
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -1054,8 +1054,6 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
|
|||||||
/* Release the DMA buffers that were needed only for boot and init */
|
/* Release the DMA buffers that were needed only for boot and init */
|
||||||
nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
|
nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
|
||||||
nvkm_gsp_mem_dtor(gsp, &gsp->libos);
|
nvkm_gsp_mem_dtor(gsp, &gsp->libos);
|
||||||
nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
|
|
||||||
nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2163,6 +2161,8 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
|
|||||||
|
|
||||||
r535_gsp_dtor_fws(gsp);
|
r535_gsp_dtor_fws(gsp);
|
||||||
|
|
||||||
|
nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
|
||||||
|
nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
|
||||||
nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
|
nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
|
||||||
nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
|
nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
|
||||||
nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
|
nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
|
||||||
|
@ -1243,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
|
|||||||
|
|
||||||
drm_mode_config_reset(drm);
|
drm_mode_config_reset(drm);
|
||||||
|
|
||||||
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
|
/*
|
||||||
if (err < 0)
|
* Only take over from a potential firmware framebuffer if any CRTCs
|
||||||
goto hub;
|
* have been registered. This must not be a fatal error because there
|
||||||
|
* are other accelerators that are exposed via this driver.
|
||||||
|
*
|
||||||
|
* Another case where this happens is on Tegra234 where the display
|
||||||
|
* hardware is no longer part of the host1x complex, so this driver
|
||||||
|
* will not expose any modesetting features.
|
||||||
|
*/
|
||||||
|
if (drm->mode_config.num_crtc > 0) {
|
||||||
|
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
|
||||||
|
if (err < 0)
|
||||||
|
goto hub;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Indicate to userspace that this doesn't expose any display
|
||||||
|
* capabilities.
|
||||||
|
*/
|
||||||
|
drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
|
||||||
|
}
|
||||||
|
|
||||||
err = drm_dev_register(drm, 0);
|
err = drm_dev_register(drm, 0);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
|
@ -14,11 +14,216 @@
|
|||||||
|
|
||||||
#include "../lib/drm_random.h"
|
#include "../lib/drm_random.h"
|
||||||
|
|
||||||
|
static unsigned int random_seed;
|
||||||
|
|
||||||
static inline u64 get_size(int order, u64 chunk_size)
|
static inline u64 get_size(int order, u64 chunk_size)
|
||||||
{
|
{
|
||||||
return (1 << order) * chunk_size;
|
return (1 << order) * chunk_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void drm_test_buddy_alloc_range_bias(struct kunit *test)
|
||||||
|
{
|
||||||
|
u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
|
||||||
|
DRM_RND_STATE(prng, random_seed);
|
||||||
|
unsigned int i, count, *order;
|
||||||
|
struct drm_buddy mm;
|
||||||
|
LIST_HEAD(allocated);
|
||||||
|
|
||||||
|
bias_size = SZ_1M;
|
||||||
|
ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
|
||||||
|
ps = max(SZ_4K, ps);
|
||||||
|
mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
|
||||||
|
|
||||||
|
kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
|
||||||
|
|
||||||
|
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
|
||||||
|
"buddy_init failed\n");
|
||||||
|
|
||||||
|
count = mm_size / bias_size;
|
||||||
|
order = drm_random_order(count, &prng);
|
||||||
|
KUNIT_EXPECT_TRUE(test, order);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Idea is to split the address space into uniform bias ranges, and then
|
||||||
|
* in some random order allocate within each bias, using various
|
||||||
|
* patterns within. This should detect if allocations leak out from a
|
||||||
|
* given bias, for example.
|
||||||
|
*/
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++) {
|
||||||
|
LIST_HEAD(tmp);
|
||||||
|
u32 size;
|
||||||
|
|
||||||
|
bias_start = order[i] * bias_size;
|
||||||
|
bias_end = bias_start + bias_size;
|
||||||
|
bias_rem = bias_size;
|
||||||
|
|
||||||
|
/* internal round_up too big */
|
||||||
|
KUNIT_ASSERT_TRUE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, bias_size + ps, bias_size,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, bias_size, bias_size);
|
||||||
|
|
||||||
|
/* size too big */
|
||||||
|
KUNIT_ASSERT_TRUE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, bias_size + ps, ps,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, bias_size + ps, ps);
|
||||||
|
|
||||||
|
/* bias range too small for size */
|
||||||
|
KUNIT_ASSERT_TRUE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start + ps,
|
||||||
|
bias_end, bias_size, ps,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start + ps, bias_end, bias_size, ps);
|
||||||
|
|
||||||
|
/* bias misaligned */
|
||||||
|
KUNIT_ASSERT_TRUE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start + ps,
|
||||||
|
bias_end - ps,
|
||||||
|
bias_size >> 1, bias_size >> 1,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
|
||||||
|
|
||||||
|
/* single big page */
|
||||||
|
KUNIT_ASSERT_FALSE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, bias_size, bias_size,
|
||||||
|
&tmp,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, bias_size, bias_size);
|
||||||
|
drm_buddy_free_list(&mm, &tmp);
|
||||||
|
|
||||||
|
/* single page with internal round_up */
|
||||||
|
KUNIT_ASSERT_FALSE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, ps, bias_size,
|
||||||
|
&tmp,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, ps, bias_size);
|
||||||
|
drm_buddy_free_list(&mm, &tmp);
|
||||||
|
|
||||||
|
/* random size within */
|
||||||
|
size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
|
||||||
|
if (size)
|
||||||
|
KUNIT_ASSERT_FALSE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, size, ps,
|
||||||
|
&tmp,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, size, ps);
|
||||||
|
|
||||||
|
bias_rem -= size;
|
||||||
|
/* too big for current avail */
|
||||||
|
KUNIT_ASSERT_TRUE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, bias_rem + ps, ps,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, bias_rem + ps, ps);
|
||||||
|
|
||||||
|
if (bias_rem) {
|
||||||
|
/* random fill of the remainder */
|
||||||
|
size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
|
||||||
|
size = max(size, ps);
|
||||||
|
|
||||||
|
KUNIT_ASSERT_FALSE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, size, ps,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, size, ps);
|
||||||
|
/*
|
||||||
|
* Intentionally allow some space to be left
|
||||||
|
* unallocated, and ideally not always on the bias
|
||||||
|
* boundaries.
|
||||||
|
*/
|
||||||
|
drm_buddy_free_list(&mm, &tmp);
|
||||||
|
} else {
|
||||||
|
list_splice_tail(&tmp, &allocated);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(order);
|
||||||
|
drm_buddy_free_list(&mm, &allocated);
|
||||||
|
drm_buddy_fini(&mm);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Something more free-form. Idea is to pick a random starting bias
|
||||||
|
* range within the address space and then start filling it up. Also
|
||||||
|
* randomly grow the bias range in both directions as we go along. This
|
||||||
|
* should give us bias start/end which is not always uniform like above,
|
||||||
|
* and in some cases will require the allocator to jump over already
|
||||||
|
* allocated nodes in the middle of the address space.
|
||||||
|
*/
|
||||||
|
|
||||||
|
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
|
||||||
|
"buddy_init failed\n");
|
||||||
|
|
||||||
|
bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
|
||||||
|
bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
|
||||||
|
bias_end = max(bias_end, bias_start + ps);
|
||||||
|
bias_rem = bias_end - bias_start;
|
||||||
|
|
||||||
|
do {
|
||||||
|
u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
|
||||||
|
|
||||||
|
KUNIT_ASSERT_FALSE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start,
|
||||||
|
bias_end, size, ps,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
|
||||||
|
bias_start, bias_end, size);
|
||||||
|
bias_rem -= size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Try to randomly grow the bias range in both directions, or
|
||||||
|
* only one, or perhaps don't grow at all.
|
||||||
|
*/
|
||||||
|
do {
|
||||||
|
u32 old_bias_start = bias_start;
|
||||||
|
u32 old_bias_end = bias_end;
|
||||||
|
|
||||||
|
if (bias_start)
|
||||||
|
bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
|
||||||
|
if (bias_end != mm_size)
|
||||||
|
bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
|
||||||
|
|
||||||
|
bias_rem += old_bias_start - bias_start;
|
||||||
|
bias_rem += bias_end - old_bias_end;
|
||||||
|
} while (!bias_rem && (bias_start || bias_end != mm_size));
|
||||||
|
} while (bias_rem);
|
||||||
|
|
||||||
|
KUNIT_ASSERT_EQ(test, bias_start, 0);
|
||||||
|
KUNIT_ASSERT_EQ(test, bias_end, mm_size);
|
||||||
|
KUNIT_ASSERT_TRUE_MSG(test,
|
||||||
|
drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
|
||||||
|
ps, ps,
|
||||||
|
&allocated,
|
||||||
|
DRM_BUDDY_RANGE_ALLOCATION),
|
||||||
|
"buddy_alloc passed with bias(%x-%x), size=%u\n",
|
||||||
|
bias_start, bias_end, ps);
|
||||||
|
|
||||||
|
drm_buddy_free_list(&mm, &allocated);
|
||||||
|
drm_buddy_fini(&mm);
|
||||||
|
}
|
||||||
|
|
||||||
static void drm_test_buddy_alloc_contiguous(struct kunit *test)
|
static void drm_test_buddy_alloc_contiguous(struct kunit *test)
|
||||||
{
|
{
|
||||||
const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
|
const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
|
||||||
@ -362,17 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
|
|||||||
drm_buddy_fini(&mm);
|
drm_buddy_fini(&mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int drm_buddy_suite_init(struct kunit_suite *suite)
|
||||||
|
{
|
||||||
|
while (!random_seed)
|
||||||
|
random_seed = get_random_u32();
|
||||||
|
|
||||||
|
kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
|
||||||
|
random_seed);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct kunit_case drm_buddy_tests[] = {
|
static struct kunit_case drm_buddy_tests[] = {
|
||||||
KUNIT_CASE(drm_test_buddy_alloc_limit),
|
KUNIT_CASE(drm_test_buddy_alloc_limit),
|
||||||
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
|
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
|
||||||
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
|
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
|
||||||
KUNIT_CASE(drm_test_buddy_alloc_pathological),
|
KUNIT_CASE(drm_test_buddy_alloc_pathological),
|
||||||
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
|
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
|
||||||
|
KUNIT_CASE(drm_test_buddy_alloc_range_bias),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct kunit_suite drm_buddy_test_suite = {
|
static struct kunit_suite drm_buddy_test_suite = {
|
||||||
.name = "drm_buddy",
|
.name = "drm_buddy",
|
||||||
|
.suite_init = drm_buddy_suite_init,
|
||||||
.test_cases = drm_buddy_tests,
|
.test_cases = drm_buddy_tests,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -28,6 +28,14 @@
|
|||||||
#include "xe_ttm_stolen_mgr.h"
|
#include "xe_ttm_stolen_mgr.h"
|
||||||
#include "xe_vm.h"
|
#include "xe_vm.h"
|
||||||
|
|
||||||
|
const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
|
||||||
|
[XE_PL_SYSTEM] = "system",
|
||||||
|
[XE_PL_TT] = "gtt",
|
||||||
|
[XE_PL_VRAM0] = "vram0",
|
||||||
|
[XE_PL_VRAM1] = "vram1",
|
||||||
|
[XE_PL_STOLEN] = "stolen"
|
||||||
|
};
|
||||||
|
|
||||||
static const struct ttm_place sys_placement_flags = {
|
static const struct ttm_place sys_placement_flags = {
|
||||||
.fpfn = 0,
|
.fpfn = 0,
|
||||||
.lpfn = 0,
|
.lpfn = 0,
|
||||||
@ -713,8 +721,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
|||||||
migrate = xe->tiles[0].migrate;
|
migrate = xe->tiles[0].migrate;
|
||||||
|
|
||||||
xe_assert(xe, migrate);
|
xe_assert(xe, migrate);
|
||||||
|
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
|
||||||
trace_xe_bo_move(bo);
|
|
||||||
xe_device_mem_access_get(xe);
|
xe_device_mem_access_get(xe);
|
||||||
|
|
||||||
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
|
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
|
||||||
|
@ -243,6 +243,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo);
|
|||||||
int xe_bo_restore_pinned(struct xe_bo *bo);
|
int xe_bo_restore_pinned(struct xe_bo *bo);
|
||||||
|
|
||||||
extern struct ttm_device_funcs xe_ttm_funcs;
|
extern struct ttm_device_funcs xe_ttm_funcs;
|
||||||
|
extern const char *const xe_mem_type_to_name[];
|
||||||
|
|
||||||
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
|
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file);
|
struct drm_file *file);
|
||||||
|
@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
|
|||||||
|
|
||||||
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
|
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
|
||||||
{
|
{
|
||||||
static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = {
|
|
||||||
[XE_PL_SYSTEM] = "system",
|
|
||||||
[XE_PL_TT] = "gtt",
|
|
||||||
[XE_PL_VRAM0] = "vram0",
|
|
||||||
[XE_PL_VRAM1] = "vram1",
|
|
||||||
[4 ... 6] = NULL,
|
|
||||||
[XE_PL_STOLEN] = "stolen"
|
|
||||||
};
|
|
||||||
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
|
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
|
||||||
struct xe_file *xef = file->driver_priv;
|
struct xe_file *xef = file->driver_priv;
|
||||||
struct ttm_device *bdev = &xef->xe->ttm;
|
struct ttm_device *bdev = &xef->xe->ttm;
|
||||||
@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
|
|||||||
spin_unlock(&client->bos_lock);
|
spin_unlock(&client->bos_lock);
|
||||||
|
|
||||||
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
|
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
|
||||||
if (!mem_type_to_name[mem_type])
|
if (!xe_mem_type_to_name[mem_type])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
man = ttm_manager_type(bdev, mem_type);
|
man = ttm_manager_type(bdev, mem_type);
|
||||||
@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
|
|||||||
DRM_GEM_OBJECT_RESIDENT |
|
DRM_GEM_OBJECT_RESIDENT |
|
||||||
(mem_type != XE_PL_SYSTEM ? 0 :
|
(mem_type != XE_PL_SYSTEM ? 0 :
|
||||||
DRM_GEM_OBJECT_PURGEABLE),
|
DRM_GEM_OBJECT_PURGEABLE),
|
||||||
mem_type_to_name[mem_type]);
|
xe_mem_type_to_name[mem_type]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -309,85 +309,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
|
|||||||
return q->ops->set_timeslice(q, value);
|
return q->ops->set_timeslice(q, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int exec_queue_set_preemption_timeout(struct xe_device *xe,
|
|
||||||
struct xe_exec_queue *q, u64 value,
|
|
||||||
bool create)
|
|
||||||
{
|
|
||||||
u32 min = 0, max = 0;
|
|
||||||
|
|
||||||
xe_exec_queue_get_prop_minmax(q->hwe->eclass,
|
|
||||||
XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
|
|
||||||
|
|
||||||
if (xe_exec_queue_enforce_schedule_limit() &&
|
|
||||||
!xe_hw_engine_timeout_in_range(value, min, max))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return q->ops->set_preempt_timeout(q, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
|
|
||||||
u64 value, bool create)
|
|
||||||
{
|
|
||||||
u32 min = 0, max = 0;
|
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, !create))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
xe_exec_queue_get_prop_minmax(q->hwe->eclass,
|
|
||||||
XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
|
|
||||||
|
|
||||||
if (xe_exec_queue_enforce_schedule_limit() &&
|
|
||||||
!xe_hw_engine_timeout_in_range(value, min, max))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return q->ops->set_job_timeout(q, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
|
|
||||||
u64 value, bool create)
|
|
||||||
{
|
|
||||||
if (XE_IOCTL_DBG(xe, !create))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
q->usm.acc_trigger = value;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
|
|
||||||
u64 value, bool create)
|
|
||||||
{
|
|
||||||
if (XE_IOCTL_DBG(xe, !create))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
q->usm.acc_notify = value;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
|
|
||||||
u64 value, bool create)
|
|
||||||
{
|
|
||||||
if (XE_IOCTL_DBG(xe, !create))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (value > DRM_XE_ACC_GRANULARITY_64M)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
q->usm.acc_granularity = value;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
|
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
|
||||||
struct xe_exec_queue *q,
|
struct xe_exec_queue *q,
|
||||||
u64 value, bool create);
|
u64 value, bool create);
|
||||||
@ -395,11 +316,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
|
|||||||
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
|
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
|
||||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
|
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
|
||||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
|
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
|
||||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
|
|
||||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
|
|
||||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
|
|
||||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
|
|
||||||
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static int exec_queue_user_ext_set_property(struct xe_device *xe,
|
static int exec_queue_user_ext_set_property(struct xe_device *xe,
|
||||||
@ -418,7 +334,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
|
|||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, ext.property >=
|
if (XE_IOCTL_DBG(xe, ext.property >=
|
||||||
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
|
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
|
||||||
XE_IOCTL_DBG(xe, ext.pad))
|
XE_IOCTL_DBG(xe, ext.pad) ||
|
||||||
|
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
|
||||||
|
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
|
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
|
||||||
|
@ -150,16 +150,6 @@ struct xe_exec_queue {
|
|||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
} compute;
|
} compute;
|
||||||
|
|
||||||
/** @usm: unified shared memory state */
|
|
||||||
struct {
|
|
||||||
/** @acc_trigger: access counter trigger */
|
|
||||||
u32 acc_trigger;
|
|
||||||
/** @acc_notify: access counter notify */
|
|
||||||
u32 acc_notify;
|
|
||||||
/** @acc_granularity: access counter granularity */
|
|
||||||
u32 acc_granularity;
|
|
||||||
} usm;
|
|
||||||
|
|
||||||
/** @ops: submission backend exec queue operations */
|
/** @ops: submission backend exec queue operations */
|
||||||
const struct xe_exec_queue_ops *ops;
|
const struct xe_exec_queue_ops *ops;
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
|
|||||||
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
|
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
|
||||||
{
|
{
|
||||||
struct xe_execlist_port *port = exl->port;
|
struct xe_execlist_port *port = exl->port;
|
||||||
enum xe_exec_queue_priority priority = exl->active_priority;
|
enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
|
||||||
|
|
||||||
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
|
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
|
||||||
XE_WARN_ON(priority < 0);
|
XE_WARN_ON(priority < 0);
|
||||||
|
@ -247,6 +247,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
|
|||||||
|
|
||||||
xe_gt_assert(gt, vma);
|
xe_gt_assert(gt, vma);
|
||||||
|
|
||||||
|
/* Execlists not supported */
|
||||||
|
if (gt_to_xe(gt)->info.force_execlist) {
|
||||||
|
if (fence)
|
||||||
|
__invalidation_fence_signal(fence);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
|
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
|
||||||
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
|
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
|
||||||
if (!xe->info.has_range_tlb_invalidation) {
|
if (!xe->info.has_range_tlb_invalidation) {
|
||||||
@ -317,6 +325,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
|
|||||||
struct drm_printer p = drm_err_printer(__func__);
|
struct drm_printer p = drm_err_printer(__func__);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* Execlists not supported */
|
||||||
|
if (gt_to_xe(gt)->info.force_execlist)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: See above, this algorithm only works if seqno are always in
|
* XXX: See above, this algorithm only works if seqno are always in
|
||||||
* order
|
* order
|
||||||
|
@ -682,8 +682,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
|
|||||||
|
|
||||||
#define PVC_CTX_ASID (0x2e + 1)
|
#define PVC_CTX_ASID (0x2e + 1)
|
||||||
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
|
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
|
||||||
#define ACC_GRANULARITY_S 20
|
|
||||||
#define ACC_NOTIFY_S 16
|
|
||||||
|
|
||||||
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
||||||
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
|
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
|
||||||
@ -754,13 +752,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
|
|||||||
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
|
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
|
||||||
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
|
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
|
||||||
if (xe->info.has_asid && vm)
|
if (xe->info.has_asid && vm)
|
||||||
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
|
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
|
||||||
(q->usm.acc_granularity <<
|
|
||||||
ACC_GRANULARITY_S) | vm->usm.asid);
|
|
||||||
if (xe->info.has_usm && vm)
|
|
||||||
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
|
|
||||||
(q->usm.acc_notify << ACC_NOTIFY_S) |
|
|
||||||
q->usm.acc_trigger);
|
|
||||||
|
|
||||||
lrc->desc = LRC_VALID;
|
lrc->desc = LRC_VALID;
|
||||||
lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
|
lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
|
||||||
|
@ -105,7 +105,7 @@ static void xe_resize_vram_bar(struct xe_device *xe)
|
|||||||
|
|
||||||
pci_bus_for_each_resource(root, root_res, i) {
|
pci_bus_for_each_resource(root, root_res, i) {
|
||||||
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
|
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
|
||||||
root_res->start > 0x100000000ull)
|
(u64)root_res->start > 0x100000000ul)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
#include "xe_macros.h"
|
#include "xe_macros.h"
|
||||||
#include "xe_sched_job_types.h"
|
#include "xe_sched_job_types.h"
|
||||||
|
|
||||||
struct user_fence {
|
struct xe_user_fence {
|
||||||
struct xe_device *xe;
|
struct xe_device *xe;
|
||||||
struct kref refcount;
|
struct kref refcount;
|
||||||
struct dma_fence_cb cb;
|
struct dma_fence_cb cb;
|
||||||
@ -27,31 +27,32 @@ struct user_fence {
|
|||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
u64 __user *addr;
|
u64 __user *addr;
|
||||||
u64 value;
|
u64 value;
|
||||||
|
int signalled;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void user_fence_destroy(struct kref *kref)
|
static void user_fence_destroy(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct user_fence *ufence = container_of(kref, struct user_fence,
|
struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
|
||||||
refcount);
|
refcount);
|
||||||
|
|
||||||
mmdrop(ufence->mm);
|
mmdrop(ufence->mm);
|
||||||
kfree(ufence);
|
kfree(ufence);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void user_fence_get(struct user_fence *ufence)
|
static void user_fence_get(struct xe_user_fence *ufence)
|
||||||
{
|
{
|
||||||
kref_get(&ufence->refcount);
|
kref_get(&ufence->refcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void user_fence_put(struct user_fence *ufence)
|
static void user_fence_put(struct xe_user_fence *ufence)
|
||||||
{
|
{
|
||||||
kref_put(&ufence->refcount, user_fence_destroy);
|
kref_put(&ufence->refcount, user_fence_destroy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
|
static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
|
||||||
u64 value)
|
u64 value)
|
||||||
{
|
{
|
||||||
struct user_fence *ufence;
|
struct xe_user_fence *ufence;
|
||||||
|
|
||||||
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
|
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
|
||||||
if (!ufence)
|
if (!ufence)
|
||||||
@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
|
|||||||
|
|
||||||
static void user_fence_worker(struct work_struct *w)
|
static void user_fence_worker(struct work_struct *w)
|
||||||
{
|
{
|
||||||
struct user_fence *ufence = container_of(w, struct user_fence, worker);
|
struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
|
||||||
|
|
||||||
if (mmget_not_zero(ufence->mm)) {
|
if (mmget_not_zero(ufence->mm)) {
|
||||||
kthread_use_mm(ufence->mm);
|
kthread_use_mm(ufence->mm);
|
||||||
@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w)
|
|||||||
}
|
}
|
||||||
|
|
||||||
wake_up_all(&ufence->xe->ufence_wq);
|
wake_up_all(&ufence->xe->ufence_wq);
|
||||||
|
WRITE_ONCE(ufence->signalled, 1);
|
||||||
user_fence_put(ufence);
|
user_fence_put(ufence);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
|
static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
|
||||||
{
|
{
|
||||||
INIT_WORK(&ufence->worker, user_fence_worker);
|
INIT_WORK(&ufence->worker, user_fence_worker);
|
||||||
queue_work(ufence->xe->ordered_wq, &ufence->worker);
|
queue_work(ufence->xe->ordered_wq, &ufence->worker);
|
||||||
@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
|
|||||||
|
|
||||||
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
|
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
|
||||||
{
|
{
|
||||||
struct user_fence *ufence = container_of(cb, struct user_fence, cb);
|
struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
|
||||||
|
|
||||||
kick_ufence(ufence, fence);
|
kick_ufence(ufence, fence);
|
||||||
}
|
}
|
||||||
@ -340,3 +342,39 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
|
|||||||
|
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sync_ufence_get() - Get user fence from sync
|
||||||
|
* @sync: input sync
|
||||||
|
*
|
||||||
|
* Get a user fence reference from sync.
|
||||||
|
*
|
||||||
|
* Return: xe_user_fence pointer with reference
|
||||||
|
*/
|
||||||
|
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
|
||||||
|
{
|
||||||
|
user_fence_get(sync->ufence);
|
||||||
|
|
||||||
|
return sync->ufence;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sync_ufence_put() - Put user fence reference
|
||||||
|
* @ufence: user fence reference
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void xe_sync_ufence_put(struct xe_user_fence *ufence)
|
||||||
|
{
|
||||||
|
user_fence_put(ufence);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xe_sync_ufence_get_status() - Get user fence status
|
||||||
|
* @ufence: user fence
|
||||||
|
*
|
||||||
|
* Return: 1 if signalled, 0 not signalled, <0 on error
|
||||||
|
*/
|
||||||
|
int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
|
||||||
|
{
|
||||||
|
return READ_ONCE(ufence->signalled);
|
||||||
|
}
|
||||||
|
@ -38,4 +38,8 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
|
|||||||
return !!sync->ufence;
|
return !!sync->ufence;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
|
||||||
|
void xe_sync_ufence_put(struct xe_user_fence *ufence);
|
||||||
|
int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -18,7 +18,7 @@ struct xe_sync_entry {
|
|||||||
struct drm_syncobj *syncobj;
|
struct drm_syncobj *syncobj;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
struct dma_fence_chain *chain_fence;
|
struct dma_fence_chain *chain_fence;
|
||||||
struct user_fence *ufence;
|
struct xe_user_fence *ufence;
|
||||||
u64 addr;
|
u64 addr;
|
||||||
u64 timeline_value;
|
u64 timeline_value;
|
||||||
u32 type;
|
u32 type;
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <linux/tracepoint.h>
|
#include <linux/tracepoint.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
#include "xe_bo.h"
|
||||||
#include "xe_bo_types.h"
|
#include "xe_bo_types.h"
|
||||||
#include "xe_exec_queue_types.h"
|
#include "xe_exec_queue_types.h"
|
||||||
#include "xe_gpu_scheduler_types.h"
|
#include "xe_gpu_scheduler_types.h"
|
||||||
@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
|
|||||||
TP_ARGS(fence),
|
TP_ARGS(fence),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(u64, fence)
|
__field(struct xe_gt_tlb_invalidation_fence *, fence)
|
||||||
__field(int, seqno)
|
__field(int, seqno)
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->fence = (u64)fence;
|
__entry->fence = fence;
|
||||||
__entry->seqno = fence->seqno;
|
__entry->seqno = fence->seqno;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("fence=0x%016llx, seqno=%d",
|
TP_printk("fence=%p, seqno=%d",
|
||||||
__entry->fence, __entry->seqno)
|
__entry->fence, __entry->seqno)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
|
|||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(size_t, size)
|
__field(size_t, size)
|
||||||
__field(u32, flags)
|
__field(u32, flags)
|
||||||
__field(u64, vm)
|
__field(struct xe_vm *, vm)
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->size = bo->size;
|
__entry->size = bo->size;
|
||||||
__entry->flags = bo->flags;
|
__entry->flags = bo->flags;
|
||||||
__entry->vm = (unsigned long)bo->vm;
|
__entry->vm = bo->vm;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
|
TP_printk("size=%zu, flags=0x%02x, vm=%p",
|
||||||
__entry->size, __entry->flags, __entry->vm)
|
__entry->size, __entry->flags, __entry->vm)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
|
|||||||
TP_ARGS(bo)
|
TP_ARGS(bo)
|
||||||
);
|
);
|
||||||
|
|
||||||
DEFINE_EVENT(xe_bo, xe_bo_move,
|
TRACE_EVENT(xe_bo_move,
|
||||||
TP_PROTO(struct xe_bo *bo),
|
TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
|
||||||
TP_ARGS(bo)
|
bool move_lacks_source),
|
||||||
|
TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(struct xe_bo *, bo)
|
||||||
|
__field(size_t, size)
|
||||||
|
__field(u32, new_placement)
|
||||||
|
__field(u32, old_placement)
|
||||||
|
__array(char, device_id, 12)
|
||||||
|
__field(bool, move_lacks_source)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->bo = bo;
|
||||||
|
__entry->size = bo->size;
|
||||||
|
__entry->new_placement = new_placement;
|
||||||
|
__entry->old_placement = old_placement;
|
||||||
|
strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
|
||||||
|
__entry->move_lacks_source = move_lacks_source;
|
||||||
|
),
|
||||||
|
TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
|
||||||
|
__entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
|
||||||
|
xe_mem_type_to_name[__entry->old_placement],
|
||||||
|
xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
|
||||||
);
|
);
|
||||||
|
|
||||||
DECLARE_EVENT_CLASS(xe_exec_queue,
|
DECLARE_EVENT_CLASS(xe_exec_queue,
|
||||||
@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
|
|||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(u64, ctx)
|
__field(u64, ctx)
|
||||||
__field(u32, seqno)
|
__field(u32, seqno)
|
||||||
__field(u64, fence)
|
__field(struct xe_hw_fence *, fence)
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->ctx = fence->dma.context;
|
__entry->ctx = fence->dma.context;
|
||||||
__entry->seqno = fence->dma.seqno;
|
__entry->seqno = fence->dma.seqno;
|
||||||
__entry->fence = (unsigned long)fence;
|
__entry->fence = fence;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
|
TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
|
||||||
__entry->ctx, __entry->fence, __entry->seqno)
|
__entry->ctx, __entry->fence, __entry->seqno)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma,
|
|||||||
TP_ARGS(vma),
|
TP_ARGS(vma),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(u64, vma)
|
__field(struct xe_vma *, vma)
|
||||||
__field(u32, asid)
|
__field(u32, asid)
|
||||||
__field(u64, start)
|
__field(u64, start)
|
||||||
__field(u64, end)
|
__field(u64, end)
|
||||||
@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma,
|
|||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->vma = (unsigned long)vma;
|
__entry->vma = vma;
|
||||||
__entry->asid = xe_vma_vm(vma)->usm.asid;
|
__entry->asid = xe_vma_vm(vma)->usm.asid;
|
||||||
__entry->start = xe_vma_start(vma);
|
__entry->start = xe_vma_start(vma);
|
||||||
__entry->end = xe_vma_end(vma) - 1;
|
__entry->end = xe_vma_end(vma) - 1;
|
||||||
__entry->ptr = xe_vma_userptr(vma);
|
__entry->ptr = xe_vma_userptr(vma);
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
|
TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
|
||||||
__entry->vma, __entry->asid, __entry->start,
|
__entry->vma, __entry->asid, __entry->start,
|
||||||
__entry->end, __entry->ptr)
|
__entry->end, __entry->ptr)
|
||||||
)
|
)
|
||||||
@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm,
|
|||||||
TP_ARGS(vm),
|
TP_ARGS(vm),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(u64, vm)
|
__field(struct xe_vm *, vm)
|
||||||
__field(u32, asid)
|
__field(u32, asid)
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->vm = (unsigned long)vm;
|
__entry->vm = vm;
|
||||||
__entry->asid = vm->usm.asid;
|
__entry->asid = vm->usm.asid;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
|
TP_printk("vm=%p, asid=0x%05x", __entry->vm,
|
||||||
__entry->asid)
|
__entry->asid)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -897,6 +897,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
|
|||||||
struct xe_device *xe = vm->xe;
|
struct xe_device *xe = vm->xe;
|
||||||
bool read_only = xe_vma_read_only(vma);
|
bool read_only = xe_vma_read_only(vma);
|
||||||
|
|
||||||
|
if (vma->ufence) {
|
||||||
|
xe_sync_ufence_put(vma->ufence);
|
||||||
|
vma->ufence = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (xe_vma_is_userptr(vma)) {
|
if (xe_vma_is_userptr(vma)) {
|
||||||
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
|
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
|
||||||
|
|
||||||
@ -1608,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
|||||||
|
|
||||||
trace_xe_vma_unbind(vma);
|
trace_xe_vma_unbind(vma);
|
||||||
|
|
||||||
|
if (vma->ufence) {
|
||||||
|
struct xe_user_fence * const f = vma->ufence;
|
||||||
|
|
||||||
|
if (!xe_sync_ufence_get_status(f))
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
|
||||||
|
vma->ufence = NULL;
|
||||||
|
xe_sync_ufence_put(f);
|
||||||
|
}
|
||||||
|
|
||||||
if (number_tiles > 1) {
|
if (number_tiles > 1) {
|
||||||
fences = kmalloc_array(number_tiles, sizeof(*fences),
|
fences = kmalloc_array(number_tiles, sizeof(*fences),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
@ -1741,6 +1756,21 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
|||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct xe_user_fence *
|
||||||
|
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < num_syncs; i++) {
|
||||||
|
struct xe_sync_entry *e = &syncs[i];
|
||||||
|
|
||||||
|
if (xe_sync_is_ufence(e))
|
||||||
|
return xe_sync_ufence_get(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
||||||
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
||||||
u32 num_syncs, bool immediate, bool first_op,
|
u32 num_syncs, bool immediate, bool first_op,
|
||||||
@ -1748,9 +1778,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
|
|||||||
{
|
{
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
||||||
|
struct xe_user_fence *ufence;
|
||||||
|
|
||||||
xe_vm_assert_held(vm);
|
xe_vm_assert_held(vm);
|
||||||
|
|
||||||
|
ufence = find_ufence_get(syncs, num_syncs);
|
||||||
|
if (vma->ufence && ufence)
|
||||||
|
xe_sync_ufence_put(vma->ufence);
|
||||||
|
|
||||||
|
vma->ufence = ufence ?: vma->ufence;
|
||||||
|
|
||||||
if (immediate) {
|
if (immediate) {
|
||||||
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
|
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
|
||||||
last_op);
|
last_op);
|
||||||
@ -2117,10 +2154,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
|||||||
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
|
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
|
||||||
|
|
||||||
if (__op->op == DRM_GPUVA_OP_MAP) {
|
if (__op->op == DRM_GPUVA_OP_MAP) {
|
||||||
op->map.immediate =
|
|
||||||
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
|
|
||||||
op->map.read_only =
|
|
||||||
flags & DRM_XE_VM_BIND_FLAG_READONLY;
|
|
||||||
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
|
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
|
||||||
op->map.pat_index = pat_index;
|
op->map.pat_index = pat_index;
|
||||||
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
|
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
|
||||||
@ -2313,8 +2346,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
|||||||
switch (op->base.op) {
|
switch (op->base.op) {
|
||||||
case DRM_GPUVA_OP_MAP:
|
case DRM_GPUVA_OP_MAP:
|
||||||
{
|
{
|
||||||
flags |= op->map.read_only ?
|
|
||||||
VMA_CREATE_FLAG_READ_ONLY : 0;
|
|
||||||
flags |= op->map.is_null ?
|
flags |= op->map.is_null ?
|
||||||
VMA_CREATE_FLAG_IS_NULL : 0;
|
VMA_CREATE_FLAG_IS_NULL : 0;
|
||||||
|
|
||||||
@ -2445,7 +2476,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
|
|||||||
case DRM_GPUVA_OP_MAP:
|
case DRM_GPUVA_OP_MAP:
|
||||||
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
|
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
|
||||||
op->syncs, op->num_syncs,
|
op->syncs, op->num_syncs,
|
||||||
op->map.immediate || !xe_vm_in_fault_mode(vm),
|
!xe_vm_in_fault_mode(vm),
|
||||||
op->flags & XE_VMA_OP_FIRST,
|
op->flags & XE_VMA_OP_FIRST,
|
||||||
op->flags & XE_VMA_OP_LAST);
|
op->flags & XE_VMA_OP_LAST);
|
||||||
break;
|
break;
|
||||||
@ -2720,14 +2751,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SUPPORTED_FLAGS \
|
#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
|
||||||
(DRM_XE_VM_BIND_FLAG_READONLY | \
|
DRM_XE_VM_BIND_FLAG_DUMPABLE)
|
||||||
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
|
|
||||||
#define XE_64K_PAGE_MASK 0xffffull
|
#define XE_64K_PAGE_MASK 0xffffull
|
||||||
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
|
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
|
||||||
|
|
||||||
#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
|
|
||||||
|
|
||||||
static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
||||||
struct drm_xe_vm_bind *args,
|
struct drm_xe_vm_bind *args,
|
||||||
struct drm_xe_vm_bind_op **bind_ops)
|
struct drm_xe_vm_bind_op **bind_ops)
|
||||||
@ -2739,16 +2767,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
|||||||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (XE_IOCTL_DBG(xe, args->extensions) ||
|
if (XE_IOCTL_DBG(xe, args->extensions))
|
||||||
XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (args->num_binds > 1) {
|
if (args->num_binds > 1) {
|
||||||
u64 __user *bind_user =
|
u64 __user *bind_user =
|
||||||
u64_to_user_ptr(args->vector_of_binds);
|
u64_to_user_ptr(args->vector_of_binds);
|
||||||
|
|
||||||
*bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
|
*bind_ops = kvmalloc_array(args->num_binds,
|
||||||
args->num_binds, GFP_KERNEL);
|
sizeof(struct drm_xe_vm_bind_op),
|
||||||
|
GFP_KERNEL | __GFP_ACCOUNT);
|
||||||
if (!*bind_ops)
|
if (!*bind_ops)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -2838,7 +2866,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
|
|||||||
|
|
||||||
free_bind_ops:
|
free_bind_ops:
|
||||||
if (args->num_binds > 1)
|
if (args->num_binds > 1)
|
||||||
kfree(*bind_ops);
|
kvfree(*bind_ops);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2926,13 +2954,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (args->num_binds) {
|
if (args->num_binds) {
|
||||||
bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
|
bos = kvcalloc(args->num_binds, sizeof(*bos),
|
||||||
|
GFP_KERNEL | __GFP_ACCOUNT);
|
||||||
if (!bos) {
|
if (!bos) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto release_vm_lock;
|
goto release_vm_lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
|
ops = kvcalloc(args->num_binds, sizeof(*ops),
|
||||||
|
GFP_KERNEL | __GFP_ACCOUNT);
|
||||||
if (!ops) {
|
if (!ops) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto release_vm_lock;
|
goto release_vm_lock;
|
||||||
@ -3073,10 +3103,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
for (i = 0; bos && i < args->num_binds; ++i)
|
for (i = 0; bos && i < args->num_binds; ++i)
|
||||||
xe_bo_put(bos[i]);
|
xe_bo_put(bos[i]);
|
||||||
|
|
||||||
kfree(bos);
|
kvfree(bos);
|
||||||
kfree(ops);
|
kvfree(ops);
|
||||||
if (args->num_binds > 1)
|
if (args->num_binds > 1)
|
||||||
kfree(bind_ops);
|
kvfree(bind_ops);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -3100,10 +3130,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
if (q)
|
if (q)
|
||||||
xe_exec_queue_put(q);
|
xe_exec_queue_put(q);
|
||||||
free_objs:
|
free_objs:
|
||||||
kfree(bos);
|
kvfree(bos);
|
||||||
kfree(ops);
|
kvfree(ops);
|
||||||
if (args->num_binds > 1)
|
if (args->num_binds > 1)
|
||||||
kfree(bind_ops);
|
kvfree(bind_ops);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
struct xe_bo;
|
struct xe_bo;
|
||||||
struct xe_sync_entry;
|
struct xe_sync_entry;
|
||||||
|
struct xe_user_fence;
|
||||||
struct xe_vm;
|
struct xe_vm;
|
||||||
|
|
||||||
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
|
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
|
||||||
@ -104,6 +105,12 @@ struct xe_vma {
|
|||||||
* @pat_index: The pat index to use when encoding the PTEs for this vma.
|
* @pat_index: The pat index to use when encoding the PTEs for this vma.
|
||||||
*/
|
*/
|
||||||
u16 pat_index;
|
u16 pat_index;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @ufence: The user fence that was provided with MAP.
|
||||||
|
* Needs to be signalled before UNMAP can be processed.
|
||||||
|
*/
|
||||||
|
struct xe_user_fence *ufence;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -288,10 +295,6 @@ struct xe_vm {
|
|||||||
struct xe_vma_op_map {
|
struct xe_vma_op_map {
|
||||||
/** @vma: VMA to map */
|
/** @vma: VMA to map */
|
||||||
struct xe_vma *vma;
|
struct xe_vma *vma;
|
||||||
/** @immediate: Immediate bind */
|
|
||||||
bool immediate;
|
|
||||||
/** @read_only: Read only */
|
|
||||||
bool read_only;
|
|
||||||
/** @is_null: is NULL binding */
|
/** @is_null: is NULL binding */
|
||||||
bool is_null;
|
bool is_null;
|
||||||
/** @pat_index: The pat index to use for this operation. */
|
/** @pat_index: The pat index to use for this operation. */
|
||||||
|
@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = {
|
|||||||
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
|
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
|
||||||
.sid_table = tegra186_sid_table,
|
.sid_table = tegra186_sid_table,
|
||||||
.reserve_vblank_syncpts = false,
|
.reserve_vblank_syncpts = false,
|
||||||
|
.skip_reset_assert = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct host1x_sid_entry tegra194_sid_table[] = {
|
static const struct host1x_sid_entry tegra194_sid_table[] = {
|
||||||
@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev)
|
|||||||
host1x_intr_stop(host);
|
host1x_intr_stop(host);
|
||||||
host1x_syncpt_save(host);
|
host1x_syncpt_save(host);
|
||||||
|
|
||||||
err = reset_control_bulk_assert(host->nresets, host->resets);
|
if (!host->info->skip_reset_assert) {
|
||||||
if (err) {
|
err = reset_control_bulk_assert(host->nresets, host->resets);
|
||||||
dev_err(dev, "failed to assert reset: %d\n", err);
|
if (err) {
|
||||||
goto resume_host1x;
|
dev_err(dev, "failed to assert reset: %d\n", err);
|
||||||
}
|
goto resume_host1x;
|
||||||
|
}
|
||||||
|
|
||||||
usleep_range(1000, 2000);
|
usleep_range(1000, 2000);
|
||||||
|
}
|
||||||
|
|
||||||
clk_disable_unprepare(host->clk);
|
clk_disable_unprepare(host->clk);
|
||||||
reset_control_bulk_release(host->nresets, host->resets);
|
reset_control_bulk_release(host->nresets, host->resets);
|
||||||
|
@ -116,6 +116,12 @@ struct host1x_info {
|
|||||||
* the display driver disables VBLANK increments.
|
* the display driver disables VBLANK increments.
|
||||||
*/
|
*/
|
||||||
bool reserve_vblank_syncpts;
|
bool reserve_vblank_syncpts;
|
||||||
|
/*
|
||||||
|
* On Tegra186, secure world applications may require access to
|
||||||
|
* host1x during suspend/resume. To allow this, we need to leave
|
||||||
|
* host1x not in reset.
|
||||||
|
*/
|
||||||
|
bool skip_reset_assert;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct host1x {
|
struct host1x {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user