Merge branch 'kvm-docs-6.13' into HEAD

- Drop obsolete references to PPC970 KVM, which was removed 10 years ago.

- Fix incorrect references to non-existing ioctls

- List registers supported by KVM_GET/SET_ONE_REG on s390

- Use rST internal links

- Reorganize the introduction to the API document
This commit is contained in:
Paolo Bonzini 2024-11-13 07:04:53 -05:00
commit 2e9a2c624e
405 changed files with 4595 additions and 1949 deletions

View File

@ -199,7 +199,8 @@ Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
Eugen Hristev <eugen.hristev@collabora.com> <eugen.hristev@microchip.com>
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@microchip.com>
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@collabora.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> <ezequiel@collabora.com>
Faith Ekstrand <faith.ekstrand@collabora.com> <jason@jlekstrand.net>
@ -282,7 +283,7 @@ Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
Jan Kuliga <jtkuliga.kdev@gmail.com> <jankul@alatek.krakow.pl>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@tuni.fi>
Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@parity.io>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com>
Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>

View File

@ -63,6 +63,16 @@ properties:
- const: sleep
power-domains:
description: |
The MediaTek DPI module is typically associated with one of the
following multimedia power domains:
POWER_DOMAIN_DISPLAY
POWER_DOMAIN_VDOSYS
POWER_DOMAIN_MM
The specific power domain used varies depending on the SoC design.
It is recommended to explicitly add the appropriate power domain
property to the DPI node in the device tree.
maxItems: 1
port:
@ -79,20 +89,6 @@ required:
- clock-names
- port
allOf:
- if:
not:
properties:
compatible:
contains:
enum:
- mediatek,mt6795-dpi
- mediatek,mt8173-dpi
- mediatek,mt8186-dpi
then:
properties:
power-domains: false
additionalProperties: false
examples:

View File

@ -38,6 +38,7 @@ properties:
description: A phandle and PM domain specifier as defined by bindings of
the power controller specified by phandle. See
Documentation/devicetree/bindings/power/power-domain.yaml for details.
maxItems: 1
mediatek,gce-client-reg:
description:
@ -57,6 +58,9 @@ properties:
clocks:
items:
- description: SPLIT Clock
- description: Used for interfacing with the HDMI RX signal source.
- description: Paired with receiving HDMI RX metadata.
minItems: 1
required:
- compatible
@ -72,9 +76,24 @@ allOf:
const: mediatek,mt8195-mdp3-split
then:
properties:
clocks:
minItems: 3
required:
- mediatek,gce-client-reg
- if:
properties:
compatible:
contains:
const: mediatek,mt8173-disp-split
then:
properties:
clocks:
maxItems: 1
additionalProperties: false
examples:

View File

@ -67,6 +67,10 @@ properties:
A 2.5V to 3.3V supply for the external reference voltage. When omitted,
the internal 2.5V reference is used.
refin-supply:
description:
A 2.5V to 3.3V supply for external reference voltage, for ad7380-4 only.
aina-supply:
description:
The common mode voltage supply for the AINA- pin on pseudo-differential
@ -135,6 +139,23 @@ allOf:
ainc-supply: false
aind-supply: false
# ad7380-4 uses refin-supply as external reference.
# All other chips from ad738x family use refio as optional external reference.
# When refio-supply is omitted, internal reference is used.
- if:
properties:
compatible:
enum:
- adi,ad7380-4
then:
properties:
refio-supply: false
required:
- refin-supply
else:
properties:
refin-supply: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>

View File

@ -154,8 +154,6 @@ allOf:
- qcom,sm8550-qmp-gen4x2-pcie-phy
- qcom,sm8650-qmp-gen3x2-pcie-phy
- qcom,sm8650-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
then:
properties:
clocks:
@ -171,6 +169,8 @@ allOf:
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x2-pcie-phy
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
- qcom,x1e80100-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen4x4-pcie-phy
then:
properties:
@ -201,6 +201,7 @@ allOf:
- qcom,sm8550-qmp-gen4x2-pcie-phy
- qcom,sm8650-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen4x4-pcie-phy
then:
properties:
resets:

View File

@ -48,6 +48,10 @@ properties:
- const: mclk_rx
- const: hclk
port:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false
resets:
maxItems: 1

View File

@ -115,7 +115,7 @@ set up cache ready for use. The following script commands are available:
This mask can also be set through sysfs, eg::
echo 5 >/sys/modules/cachefiles/parameters/debug
echo 5 > /sys/module/cachefiles/parameters/debug
Starting the Cache

View File

@ -41,13 +41,22 @@ supports only 1 SDO line.
Reference voltage
-----------------
2 possible reference voltage sources are supported:
ad7380-4
~~~~~~~~
ad7380-4 supports only an external reference voltage (2.5V to 3.3V). It must be
declared in the device tree as ``refin-supply``.
All other devices from ad738x family
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
All other devices from ad738x support 2 possible reference voltage sources:
- Internal reference (2.5V)
- External reference (2.5V to 3.3V)
The source is determined by the device tree. If ``refio-supply`` is present,
then the external reference is used, else the internal reference is used.
then it is used as external reference, else the internal reference is used.
Oversampling and resolution boost
---------------------------------

View File

@ -16,7 +16,7 @@ ii) transmit network traffic, or any other that needs raw
Howto can be found at:
https://sites.google.com/site/packetmmap/
https://web.archive.org/web/20220404160947/https://sites.google.com/site/packetmmap/
Please send your comments to
- Ulisses Alonso Camaró <uaca@i.hate.spam.alumni.uv.es>
@ -166,7 +166,8 @@ As capture, each frame contains two parts::
/* bind socket to eth0 */
bind(this->socket, (struct sockaddr *)&my_addr, sizeof(struct sockaddr_ll));
A complete tutorial is available at: https://sites.google.com/site/packetmmap/
A complete tutorial is available at:
https://web.archive.org/web/20220404160947/https://sites.google.com/site/packetmmap/
By default, the user should put data at::

View File

@ -17,7 +17,7 @@ Architecture Level of support Constraints
============= ================ ==============================================
``arm64`` Maintained Little Endian only.
``loongarch`` Maintained \-
``riscv`` Maintained ``riscv64`` only.
``riscv`` Maintained ``riscv64`` and LLVM/Clang only.
``um`` Maintained \-
``x86`` Maintained ``x86_64`` only.
============= ================ ==============================================

View File

@ -23,177 +23,166 @@ applications can additionally seal security critical data at runtime.
A similar feature already exists in the XNU kernel with the
VM_FLAGS_PERMANENT flag [1] and on OpenBSD with the mimmutable syscall [2].
User API
========
mseal()
-----------
The mseal() syscall has the following signature:
SYSCALL
=======
mseal syscall signature
-----------------------
``int mseal(void \* addr, size_t len, unsigned long flags)``
``int mseal(void addr, size_t len, unsigned long flags)``
**addr**/**len**: virtual memory address range.
The address range set by **addr**/**len** must meet:
- The start address must be in an allocated VMA.
- The start address must be page aligned.
- The end address (**addr** + **len**) must be in an allocated VMA.
- no gap (unallocated memory) between start and end address.
**addr/len**: virtual memory address range.
The ``len`` will be paged aligned implicitly by the kernel.
The address range set by ``addr``/``len`` must meet:
- The start address must be in an allocated VMA.
- The start address must be page aligned.
- The end address (``addr`` + ``len``) must be in an allocated VMA.
- no gap (unallocated memory) between start and end address.
**flags**: reserved for future use.
The ``len`` will be paged aligned implicitly by the kernel.
**Return values**:
- **0**: Success.
- **-EINVAL**:
* Invalid input ``flags``.
* The start address (``addr``) is not page aligned.
* Address range (``addr`` + ``len``) overflow.
- **-ENOMEM**:
* The start address (``addr``) is not allocated.
* The end address (``addr`` + ``len``) is not allocated.
* A gap (unallocated memory) between start and end address.
- **-EPERM**:
* sealing is supported only on 64-bit CPUs, 32-bit is not supported.
**flags**: reserved for future use.
**Note about error return**:
- For above error cases, users can expect the given memory range is
unmodified, i.e. no partial update.
- There might be other internal errors/cases not listed here, e.g.
error during merging/splitting VMAs, or the process reaching the maximum
number of supported VMAs. In those cases, partial updates to the given
memory range could happen. However, those cases should be rare.
**return values**:
**Architecture support**:
mseal only works on 64-bit CPUs, not 32-bit CPUs.
- ``0``: Success.
**Idempotent**:
users can call mseal multiple times. mseal on an already sealed memory
is a no-action (not error).
- ``-EINVAL``:
- Invalid input ``flags``.
- The start address (``addr``) is not page aligned.
- Address range (``addr`` + ``len``) overflow.
**no munseal**
Once mapping is sealed, it can't be unsealed. The kernel should never
have munseal, this is consistent with other sealing feature, e.g.
F_SEAL_SEAL for file.
- ``-ENOMEM``:
- The start address (``addr``) is not allocated.
- The end address (``addr`` + ``len``) is not allocated.
- A gap (unallocated memory) between start and end address.
Blocked mm syscall for sealed mapping
-------------------------------------
It might be important to note: **once the mapping is sealed, it will
stay in the process's memory until the process terminates**.
- ``-EPERM``:
- sealing is supported only on 64-bit CPUs, 32-bit is not supported.
Example::
- For above error cases, users can expect the given memory range is
unmodified, i.e. no partial update.
*ptr = mmap(0, 4096, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
rc = mseal(ptr, 4096, 0);
/* munmap will fail */
rc = munmap(ptr, 4096);
assert(rc < 0);
- There might be other internal errors/cases not listed here, e.g.
error during merging/splitting VMAs, or the process reaching the max
number of supported VMAs. In those cases, partial updates to the given
memory range could happen. However, those cases should be rare.
Blocked mm syscall:
- munmap
- mmap
- mremap
- mprotect and pkey_mprotect
- some destructive madvise behaviors: MADV_DONTNEED, MADV_FREE,
MADV_DONTNEED_LOCKED, MADV_FREE, MADV_DONTFORK, MADV_WIPEONFORK
**Blocked operations after sealing**:
Unmapping, moving to another location, and shrinking the size,
via munmap() and mremap(), can leave an empty space, therefore
can be replaced with a VMA with a new set of attributes.
The first set of syscalls to block is munmap, mremap, mmap. They can
either leave an empty space in the address space, therefore allowing
replacement with a new mapping with new set of attributes, or can
overwrite the existing mapping with another mapping.
Moving or expanding a different VMA into the current location,
via mremap().
mprotect and pkey_mprotect are blocked because they changes the
protection bits (RWX) of the mapping.
Modifying a VMA via mmap(MAP_FIXED).
Certain destructive madvise behaviors, specifically MADV_DONTNEED,
MADV_FREE, MADV_DONTNEED_LOCKED, and MADV_WIPEONFORK, can introduce
risks when applied to anonymous memory by threads lacking write
permissions. Consequently, these operations are prohibited under such
conditions. The aforementioned behaviors have the potential to modify
region contents by discarding pages, effectively performing a memset(0)
operation on the anonymous memory.
Size expansion, via mremap(), does not appear to pose any
specific risks to sealed VMAs. It is included anyway because
the use case is unclear. In any case, users can rely on
merging to expand a sealed VMA.
Kernel will return -EPERM for blocked syscalls.
mprotect() and pkey_mprotect().
When blocked syscall return -EPERM due to sealing, the memory regions may
or may not be changed, depends on the syscall being blocked:
Some destructive madvice() behaviors (e.g. MADV_DONTNEED)
for anonymous memory, when users don't have write permission to the
memory. Those behaviors can alter region contents by discarding pages,
effectively a memset(0) for anonymous memory.
- munmap: munmap is atomic. If one of VMAs in the given range is
sealed, none of VMAs are updated.
- mprotect, pkey_mprotect, madvise: partial update might happen, e.g.
when mprotect over multiple VMAs, mprotect might update the beginning
VMAs before reaching the sealed VMA and return -EPERM.
- mmap and mremap: undefined behavior.
Kernel will return -EPERM for blocked operations.
For blocked operations, one can expect the given address is unmodified,
i.e. no partial update. Note, this is different from existing mm
system call behaviors, where partial updates are made till an error is
found and returned to userspace. To give an example:
Assume following code sequence:
- ptr = mmap(null, 8192, PROT_NONE);
- munmap(ptr + 4096, 4096);
- ret1 = mprotect(ptr, 8192, PROT_READ);
- mseal(ptr, 4096);
- ret2 = mprotect(ptr, 8192, PROT_NONE);
ret1 will be -ENOMEM, the page from ptr is updated to PROT_READ.
ret2 will be -EPERM, the page remains to be PROT_READ.
**Note**:
- mseal() only works on 64-bit CPUs, not 32-bit CPU.
- users can call mseal() multiple times, mseal() on an already sealed memory
is a no-action (not error).
- munseal() is not supported.
Use cases:
==========
Use cases
=========
- glibc:
The dynamic linker, during loading ELF executables, can apply sealing to
non-writable memory segments.
mapping segments.
- Chrome browser: protect some security sensitive data-structures.
- Chrome browser: protect some security sensitive data structures.
Notes on which memory to seal:
==============================
It might be important to note that sealing changes the lifetime of a mapping,
i.e. the sealed mapping wont be unmapped till the process terminates or the
exec system call is invoked. Applications can apply sealing to any virtual
memory region from userspace, but it is crucial to thoroughly analyze the
mapping's lifetime prior to apply the sealing.
When not to use mseal
=====================
Applications can apply sealing to any virtual memory region from userspace,
but it is *crucial to thoroughly analyze the mapping's lifetime* prior to
apply the sealing. This is because the sealed mapping *wont be unmapped*
until the process terminates or the exec system call is invoked.
For example:
- aio/shm
aio/shm can call mmap and munmap on behalf of userspace, e.g.
ksys_shmdt() in shm.c. The lifetimes of those mapping are not tied to
the lifetime of the process. If those memories are sealed from userspace,
then munmap will fail, causing leaks in VMA address space during the
lifetime of the process.
- aio/shm
- ptr allocated by malloc (heap)
Don't use mseal on the memory ptr return from malloc().
malloc() is implemented by allocator, e.g. by glibc. Heap manager might
allocate a ptr from brk or mapping created by mmap.
If an app calls mseal on a ptr returned from malloc(), this can affect
the heap manager's ability to manage the mappings; the outcome is
non-deterministic.
aio/shm can call mmap()/munmap() on behalf of userspace, e.g. ksys_shmdt() in
shm.c. The lifetime of those mapping are not tied to the lifetime of the
process. If those memories are sealed from userspace, then munmap() will fail,
causing leaks in VMA address space during the lifetime of the process.
Example::
- Brk (heap)
ptr = malloc(size);
/* don't call mseal on ptr return from malloc. */
mseal(ptr, size);
/* free will success, allocator can't shrink heap lower than ptr */
free(ptr);
Currently, userspace applications can seal parts of the heap by calling
malloc() and mseal().
let's assume following calls from user space:
mseal doesn't block
===================
In a nutshell, mseal blocks certain mm syscall from modifying some of VMA's
attributes, such as protection bits (RWX). Sealed mappings doesn't mean the
memory is immutable.
- ptr = malloc(size);
- mprotect(ptr, size, RO);
- mseal(ptr, size);
- free(ptr);
Technically, before mseal() is added, the user can change the protection of
the heap by calling mprotect(RO). As long as the user changes the protection
back to RW before free(), the memory range can be reused.
Adding mseal() into the picture, however, the heap is then sealed partially,
the user can still free it, but the memory remains to be RO. If the address
is re-used by the heap manager for another malloc, the process might crash
soon after. Therefore, it is important not to apply sealing to any memory
that might get recycled.
Furthermore, even if the application never calls the free() for the ptr,
the heap manager may invoke the brk system call to shrink the size of the
heap. In the kernel, the brk-shrink will call munmap(). Consequently,
depending on the location of the ptr, the outcome of brk-shrink is
nondeterministic.
Additional notes:
=================
As Jann Horn pointed out in [3], there are still a few ways to write
to RO memory, which is, in a way, by design. Those cases are not covered
by mseal(). If applications want to block such cases, sandbox tools (such as
seccomp, LSM, etc) might be considered.
to RO memory, which is, in a way, by design. And those could be blocked
by different security measures.
Those cases are:
- Write to read-only memory through /proc/self/mem interface.
- Write to read-only memory through ptrace (such as PTRACE_POKETEXT).
- userfaultfd.
- Write to read-only memory through /proc/self/mem interface (FOLL_FORCE).
- Write to read-only memory through ptrace (such as PTRACE_POKETEXT).
- userfaultfd.
The idea that inspired this patch comes from Stephen Röttgers work in V8
CFI [4]. Chrome browser in ChromeOS will be the first user of this API.
Reference:
==========
[1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274
[2] https://man.openbsd.org/mimmutable.2
[3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com
[4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc
Reference
=========
- [1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274
- [2] https://man.openbsd.org/mimmutable.2
- [3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com
- [4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc

View File

@ -7,8 +7,19 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
1. General description
======================
The kvm API is a set of ioctls that are issued to control various aspects
of a virtual machine. The ioctls belong to the following classes:
The kvm API is centered around different kinds of file descriptors
and ioctls that can be issued to these file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
handle will create a VM file descriptor which can be used to issue VM
ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
create a virtual cpu or device and return a file descriptor pointing to
the new resource.
In other words, the kvm API is a set of ioctls that are issued to
different kinds of file descriptor in order to control various aspects of
a virtual machine. Depending on the file descriptor that accepts them,
ioctls belong to the following classes:
- System ioctls: These query and set global attributes which affect the
whole kvm subsystem. In addition a system ioctl is used to create
@ -35,18 +46,19 @@ of a virtual machine. The ioctls belong to the following classes:
device ioctls must be issued from the same process (address space) that
was used to create the VM.
2. File descriptors
===================
While most ioctls are specific to one kind of file descriptor, in some
cases the same ioctl can belong to more than one class.
The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
handle will create a VM file descriptor which can be used to issue VM
ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
create a virtual cpu or device and return a file descriptor pointing to
the new resource. Finally, ioctls on a vcpu or device fd can be used
to control the vcpu or device. For vcpus, this includes the important
task of actually running guest code.
The KVM API grew over time. For this reason, KVM defines many constants
of the form ``KVM_CAP_*``, each corresponding to a set of functionality
provided by one or more ioctls. Availability of these "capabilities" can
be checked with :ref:`KVM_CHECK_EXTENSION <KVM_CHECK_EXTENSION>`. Some
capabilities also need to be enabled for VMs or VCPUs where their
functionality is desired (see :ref:`cap_enable` and :ref:`cap_enable_vm`).
2. Restrictions
===============
In general file descriptors can be migrated among processes by means
of fork() and the SCM_RIGHTS facility of unix domain socket. These
@ -96,12 +108,9 @@ description:
Capability:
which KVM extension provides this ioctl. Can be 'basic',
which means that is will be provided by any kernel that supports
API version 12 (see section 4.1), a KVM_CAP_xyz constant, which
means availability needs to be checked with KVM_CHECK_EXTENSION
(see section 4.4), or 'none' which means that while not all kernels
support this ioctl, there's no capability bit to check its
availability: for kernels that don't support the ioctl,
the ioctl returns -ENOTTY.
API version 12 (see :ref:`KVM_GET_API_VERSION <KVM_GET_API_VERSION>`),
or a KVM_CAP_xyz constant that can be checked with
:ref:`KVM_CHECK_EXTENSION <KVM_CHECK_EXTENSION>`.
Architectures:
which instruction set architectures provide this ioctl.
@ -118,6 +127,8 @@ description:
are not detailed, but errors with specific meanings are.
.. _KVM_GET_API_VERSION:
4.1 KVM_GET_API_VERSION
-----------------------
@ -246,6 +257,8 @@ This list also varies by kvm version and host processor, but does not change
otherwise.
.. _KVM_CHECK_EXTENSION:
4.4 KVM_CHECK_EXTENSION
-----------------------
@ -288,7 +301,7 @@ the VCPU file descriptor can be mmap-ed, including:
- if KVM_CAP_DIRTY_LOG_RING is available, a number of pages at
KVM_DIRTY_LOG_PAGE_OFFSET * PAGE_SIZE. For more information on
KVM_CAP_DIRTY_LOG_RING, see section 8.3.
KVM_CAP_DIRTY_LOG_RING, see :ref:`KVM_CAP_DIRTY_LOG_RING`.
4.7 KVM_CREATE_VCPU
@ -338,8 +351,8 @@ KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual
cpu's hardware control block.
4.8 KVM_GET_DIRTY_LOG (vm ioctl)
--------------------------------
4.8 KVM_GET_DIRTY_LOG
---------------------
:Capability: basic
:Architectures: all
@ -1298,7 +1311,7 @@ See KVM_GET_VCPU_EVENTS for the data structure.
:Capability: KVM_CAP_DEBUGREGS
:Architectures: x86
:Type: vm ioctl
:Type: vcpu ioctl
:Parameters: struct kvm_debugregs (out)
:Returns: 0 on success, -1 on error
@ -1320,7 +1333,7 @@ Reads debug registers from the vcpu.
:Capability: KVM_CAP_DEBUGREGS
:Architectures: x86
:Type: vm ioctl
:Type: vcpu ioctl
:Parameters: struct kvm_debugregs (in)
:Returns: 0 on success, -1 on error
@ -1429,6 +1442,8 @@ because of a quirk in the virtualization implementation (see the internals
documentation when it pops into existence).
.. _KVM_ENABLE_CAP:
4.37 KVM_ENABLE_CAP
-------------------
@ -2116,8 +2131,8 @@ TLB, prior to calling KVM_RUN on the associated vcpu.
The "bitmap" field is the userspace address of an array. This array
consists of a number of bits, equal to the total number of TLB entries as
determined by the last successful call to KVM_CONFIG_TLB, rounded up to the
nearest multiple of 64.
determined by the last successful call to ``KVM_ENABLE_CAP(KVM_CAP_SW_TLB)``,
rounded up to the nearest multiple of 64.
Each bit corresponds to one TLB entry, ordered the same as in the shared TLB
array.
@ -2170,42 +2185,6 @@ userspace update the TCE table directly which is useful in some
circumstances.
4.63 KVM_ALLOCATE_RMA
---------------------
:Capability: KVM_CAP_PPC_RMA
:Architectures: powerpc
:Type: vm ioctl
:Parameters: struct kvm_allocate_rma (out)
:Returns: file descriptor for mapping the allocated RMA
This allocates a Real Mode Area (RMA) from the pool allocated at boot
time by the kernel. An RMA is a physically-contiguous, aligned region
of memory used on older POWER processors to provide the memory which
will be accessed by real-mode (MMU off) accesses in a KVM guest.
POWER processors support a set of sizes for the RMA that usually
includes 64MB, 128MB, 256MB and some larger powers of two.
::
/* for KVM_ALLOCATE_RMA */
struct kvm_allocate_rma {
__u64 rma_size;
};
The return value is a file descriptor which can be passed to mmap(2)
to map the allocated RMA into userspace. The mapped area can then be
passed to the KVM_SET_USER_MEMORY_REGION ioctl to establish it as the
RMA for a virtual machine. The size of the RMA in bytes (which is
fixed at host kernel boot time) is returned in the rma_size field of
the argument structure.
The KVM_CAP_PPC_RMA capability is 1 or 2 if the KVM_ALLOCATE_RMA ioctl
is supported; 2 if the processor requires all virtual machines to have
an RMA, or 1 if the processor can use an RMA but doesn't require it,
because it supports the Virtual RMA (VRMA) facility.
4.64 KVM_NMI
------------
@ -2602,7 +2581,7 @@ Specifically:
======================= ========= ===== =======================================
.. [1] These encodings are not accepted for SVE-enabled vcpus. See
KVM_ARM_VCPU_INIT.
:ref:`KVM_ARM_VCPU_INIT`.
The equivalent register content can be accessed via bits [127:0] of
the corresponding SVE Zn registers instead for vcpus that have SVE
@ -3593,6 +3572,27 @@ Errors:
This ioctl returns the guest registers that are supported for the
KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
Note that s390 does not support KVM_GET_REG_LIST for historical reasons
(read: nobody cared). The set of registers in kernels 4.x and newer is:
- KVM_REG_S390_TODPR
- KVM_REG_S390_EPOCHDIFF
- KVM_REG_S390_CPU_TIMER
- KVM_REG_S390_CLOCK_COMP
- KVM_REG_S390_PFTOKEN
- KVM_REG_S390_PFCOMPARE
- KVM_REG_S390_PFSELECT
- KVM_REG_S390_PP
- KVM_REG_S390_GBEA
4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated)
-----------------------------------------
@ -4956,8 +4956,8 @@ Coalesced pio is based on coalesced mmio. There is little difference
between coalesced mmio and pio except that coalesced pio records accesses
to I/O ports.
4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
------------------------------------
4.117 KVM_CLEAR_DIRTY_LOG
-------------------------
:Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
:Architectures: x86, arm64, mips
@ -5093,8 +5093,8 @@ Recognised values for feature:
Finalizes the configuration of the specified vcpu feature.
The vcpu must already have been initialised, enabling the affected feature, by
means of a successful KVM_ARM_VCPU_INIT call with the appropriate flag set in
features[].
means of a successful :ref:`KVM_ARM_VCPU_INIT <KVM_ARM_VCPU_INIT>` call with the
appropriate flag set in features[].
For affected vcpu features, this is a mandatory step that must be performed
before the vcpu is fully usable.
@ -5266,7 +5266,7 @@ the cpu reset definition in the POP (Principles Of Operation).
4.123 KVM_S390_INITIAL_RESET
----------------------------
:Capability: none
:Capability: basic
:Architectures: s390
:Type: vcpu ioctl
:Parameters: none
@ -6205,7 +6205,7 @@ applied.
.. _KVM_ARM_GET_REG_WRITABLE_MASKS:
4.139 KVM_ARM_GET_REG_WRITABLE_MASKS
-------------------------------------------
------------------------------------
:Capability: KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES
:Architectures: arm64
@ -6443,6 +6443,8 @@ the capability to be present.
`flags` must currently be zero.
.. _kvm_run:
5. The kvm_run structure
========================
@ -7162,11 +7164,15 @@ primary storage for certain register types. Therefore, the kernel may use the
values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set.
.. _cap_enable:
6. Capabilities that can be enabled on vCPUs
============================================
There are certain capabilities that change the behavior of the virtual CPU or
the virtual machine when enabled. To enable them, please see section 4.37.
the virtual machine when enabled. To enable them, please see
:ref:`KVM_ENABLE_CAP`.
Below you can find a list of capabilities and what their effect on the vCPU or
the virtual machine is when enabling them.
@ -7375,7 +7381,7 @@ KVM API and also from the guest.
sets are supported
(bitfields defined in arch/x86/include/uapi/asm/kvm.h).
As described above in the kvm_sync_regs struct info in section 5 (kvm_run):
As described above in the kvm_sync_regs struct info in section :ref:`kvm_run`,
KVM_CAP_SYNC_REGS "allow[s] userspace to access certain guest registers
without having to call SET/GET_*REGS". This reduces overhead by eliminating
repeated ioctl calls for setting and/or getting register values. This is
@ -7421,13 +7427,15 @@ Unused bitfields in the bitarrays must be set to zero.
This capability connects the vcpu to an in-kernel XIVE device.
.. _cap_enable_vm:
7. Capabilities that can be enabled on VMs
==========================================
There are certain capabilities that change the behavior of the virtual
machine when enabled. To enable them, please see section 4.37. Below
you can find a list of capabilities and what their effect on the VM
is when enabling them.
machine when enabled. To enable them, please see section
:ref:`KVM_ENABLE_CAP`. Below you can find a list of capabilities and
what their effect on the VM is when enabling them.
The following information is provided along with the description:
@ -8610,6 +8618,8 @@ guest according to the bits in the KVM_CPUID_FEATURES CPUID leaf
(0x40000001). Otherwise, a guest may use the paravirtual features
regardless of what has actually been exposed through the CPUID leaf.
.. _KVM_CAP_DIRTY_LOG_RING:
8.29 KVM_CAP_DIRTY_LOG_RING/KVM_CAP_DIRTY_LOG_RING_ACQ_REL
----------------------------------------------------------

View File

@ -14141,6 +14141,15 @@ S: Maintained
T: git git://linuxtv.org/media_tree.git
F: drivers/media/platform/nxp/imx-pxp.[ch]
MEDIA DRIVERS FOR ASCOT2E
M: Abylay Ospan <aospan@amazon.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/ascot2e*
MEDIA DRIVERS FOR CXD2099AR CI CONTROLLERS
M: Jasmin Jessich <jasmin@anw.at>
L: linux-media@vger.kernel.org
@ -14149,6 +14158,15 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/cxd2099*
MEDIA DRIVERS FOR CXD2841ER
M: Abylay Ospan <aospan@amazon.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/cxd2841er*
MEDIA DRIVERS FOR CXD2880
M: Yasunari Takiguchi <Yasunari.Takiguchi@sony.com>
L: linux-media@vger.kernel.org
@ -14193,6 +14211,33 @@ F: drivers/media/platform/nxp/imx-mipi-csis.c
F: drivers/media/platform/nxp/imx7-media-csi.c
F: drivers/media/platform/nxp/imx8mq-mipi-csi2.c
MEDIA DRIVERS FOR HELENE
M: Abylay Ospan <aospan@amazon.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/helene*
MEDIA DRIVERS FOR HORUS3A
M: Abylay Ospan <aospan@amazon.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/horus3a*
MEDIA DRIVERS FOR LNBH25
M: Abylay Ospan <aospan@amazon.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/lnbh25*
MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS
L: linux-media@vger.kernel.org
S: Orphan
@ -14200,6 +14245,15 @@ W: https://linuxtv.org
T: git git://linuxtv.org/media_tree.git
F: drivers/media/dvb-frontends/mxl5xx*
MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
M: Abylay Ospan <aospan@amazon.com>
L: linux-media@vger.kernel.org
S: Supported
W: https://linuxtv.org
W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
F: drivers/media/pci/netup_unidvb/*
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
M: Dmitry Osipenko <digetx@gmail.com>
L: linux-media@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 12
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -855,14 +855,14 @@ config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_CLANG
def_bool y
depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers)
# With GCOV/KASAN we need this fix: https://github.com/llvm/llvm-project/pull/104826
depends on CLANG_VERSION >= 190000 || (!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS)
depends on CLANG_VERSION >= 190103 || (!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS)
config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC
def_bool y
depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS_CLANG
depends on RUSTC_VERSION >= 107900
# With GCOV/KASAN we need this fix: https://github.com/rust-lang/rust/pull/129373
depends on (RUSTC_LLVM_VERSION >= 190000 && RUSTC_VERSION >= 108200) || \
depends on (RUSTC_LLVM_VERSION >= 190103 && RUSTC_VERSION >= 108200) || \
(!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS)
config CFI_PERMISSIVE

View File

@ -19,6 +19,7 @@
#include <linux/ratelimit.h>
#include <linux/rseq.h>
#include <linux/syscalls.h>
#include <linux/pkeys.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
@ -66,10 +67,63 @@ struct rt_sigframe_user_layout {
unsigned long end_offset;
};
/*
* Holds any EL0-controlled state that influences unprivileged memory accesses.
* This includes both accesses done in userspace and uaccess done in the kernel.
*
* This state needs to be carefully managed to ensure that it doesn't cause
* uaccess to fail when setting up the signal frame, and the signal handler
* itself also expects a well-defined state when entered.
*/
struct user_access_state {
u64 por_el0;
};
#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
/*
* Save the user access state into ua_state and reset it to disable any
* restrictions.
*/
static void save_reset_user_access_state(struct user_access_state *ua_state)
{
if (system_supports_poe()) {
u64 por_enable_all = 0;
for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
por_enable_all |= POE_RXW << (pkey * POR_BITS_PER_PKEY);
ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
write_sysreg_s(por_enable_all, SYS_POR_EL0);
/* Ensure that any subsequent uaccess observes the updated value */
isb();
}
}
/*
* Set the user access state for invoking the signal handler.
*
* No uaccess should be done after that function is called.
*/
static void set_handler_user_access_state(void)
{
if (system_supports_poe())
write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
}
/*
* Restore the user access state to the values saved in ua_state.
*
* No uaccess should be done after that function is called.
*/
static void restore_user_access_state(const struct user_access_state *ua_state)
{
if (system_supports_poe())
write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
}
static void init_user_layout(struct rt_sigframe_user_layout *user)
{
const size_t reserved_size =
@ -261,18 +315,20 @@ static int restore_fpmr_context(struct user_ctxs *user)
return err;
}
static int preserve_poe_context(struct poe_context __user *ctx)
static int preserve_poe_context(struct poe_context __user *ctx,
const struct user_access_state *ua_state)
{
int err = 0;
__put_user_error(POE_MAGIC, &ctx->head.magic, err);
__put_user_error(sizeof(*ctx), &ctx->head.size, err);
__put_user_error(read_sysreg_s(SYS_POR_EL0), &ctx->por_el0, err);
__put_user_error(ua_state->por_el0, &ctx->por_el0, err);
return err;
}
static int restore_poe_context(struct user_ctxs *user)
static int restore_poe_context(struct user_ctxs *user,
struct user_access_state *ua_state)
{
u64 por_el0;
int err = 0;
@ -282,7 +338,7 @@ static int restore_poe_context(struct user_ctxs *user)
__get_user_error(por_el0, &(user->poe->por_el0), err);
if (!err)
write_sysreg_s(por_el0, SYS_POR_EL0);
ua_state->por_el0 = por_el0;
return err;
}
@ -850,7 +906,8 @@ static int parse_user_sigframe(struct user_ctxs *user,
}
static int restore_sigframe(struct pt_regs *regs,
struct rt_sigframe __user *sf)
struct rt_sigframe __user *sf,
struct user_access_state *ua_state)
{
sigset_t set;
int i, err;
@ -899,7 +956,7 @@ static int restore_sigframe(struct pt_regs *regs,
err = restore_zt_context(&user);
if (err == 0 && system_supports_poe() && user.poe)
err = restore_poe_context(&user);
err = restore_poe_context(&user, ua_state);
return err;
}
@ -908,6 +965,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
struct user_access_state ua_state;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
@ -924,12 +982,14 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (!access_ok(frame, sizeof (*frame)))
goto badframe;
if (restore_sigframe(regs, frame))
if (restore_sigframe(regs, frame, &ua_state))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
restore_user_access_state(&ua_state);
return regs->regs[0];
badframe:
@ -1035,7 +1095,8 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
}
static int setup_sigframe(struct rt_sigframe_user_layout *user,
struct pt_regs *regs, sigset_t *set)
struct pt_regs *regs, sigset_t *set,
const struct user_access_state *ua_state)
{
int i, err = 0;
struct rt_sigframe __user *sf = user->sigframe;
@ -1097,10 +1158,9 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
struct poe_context __user *poe_ctx =
apply_user_offset(user, user->poe_offset);
err |= preserve_poe_context(poe_ctx);
err |= preserve_poe_context(poe_ctx, ua_state);
}
/* ZA state if present */
if (system_supports_sme() && err == 0 && user->za_offset) {
struct za_context __user *za_ctx =
@ -1237,9 +1297,6 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
sme_smstop();
}
if (system_supports_poe())
write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0);
if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer;
else
@ -1253,6 +1310,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
{
struct rt_sigframe_user_layout user;
struct rt_sigframe __user *frame;
struct user_access_state ua_state;
int err = 0;
fpsimd_signal_preserve_current_state();
@ -1260,13 +1318,14 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
if (get_sigframe(&user, ksig, regs))
return 1;
save_reset_user_access_state(&ua_state);
frame = user.sigframe;
__put_user_error(0, &frame->uc.uc_flags, err);
__put_user_error(NULL, &frame->uc.uc_link, err);
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
err |= setup_sigframe(&user, regs, set);
err |= setup_sigframe(&user, regs, set, &ua_state);
if (err == 0) {
setup_return(regs, &ksig->ka, &user, usig);
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
@ -1276,6 +1335,11 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
}
}
if (err == 0)
set_handler_user_access_state();
else
restore_user_access_state(&ua_state);
return err;
}

View File

@ -102,3 +102,4 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
return old;
}
}
EXPORT_SYMBOL(__cmpxchg_small);

View File

@ -612,9 +612,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 8 | 4 | 2 | 1;
}
break;
case KVM_CAP_PPC_RMA:
r = 0;
break;
case KVM_CAP_PPC_HWRNG:
r = kvmppc_hwrng_present();
break;

View File

@ -177,7 +177,7 @@ config RISCV
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
select HAVE_RUST if RUSTC_SUPPORTS_RISCV
select HAVE_RUST if RUSTC_SUPPORTS_RISCV && CC_IS_CLANG
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR

View File

@ -2,6 +2,12 @@ ifdef CONFIG_RELOCATABLE
KBUILD_CFLAGS += -fno-pie
endif
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
ifdef CONFIG_FORTIFY_SOURCE
KBUILD_CFLAGS += -D__NO_FORTIFY
endif
endif
obj-$(CONFIG_ERRATA_ANDES) += andes/
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
obj-$(CONFIG_ERRATA_THEAD) += thead/

View File

@ -36,6 +36,11 @@ KASAN_SANITIZE_alternative.o := n
KASAN_SANITIZE_cpufeature.o := n
KASAN_SANITIZE_sbi_ecall.o := n
endif
ifdef CONFIG_FORTIFY_SOURCE
CFLAGS_alternative.o += -D__NO_FORTIFY
CFLAGS_cpufeature.o += -D__NO_FORTIFY
CFLAGS_sbi_ecall.o += -D__NO_FORTIFY
endif
endif
extra-y += vmlinux.lds

View File

@ -210,7 +210,7 @@ void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
if (!size)
return NULL;
return early_ioremap(phys, size);
return early_memremap(phys, size);
}
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
@ -218,7 +218,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
if (!map || !size)
return;
early_iounmap(map, size);
early_memunmap(map, size);
}
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)

View File

@ -4,8 +4,6 @@
* Copyright (C) 2017 SiFive
*/
#define GENERATING_ASM_OFFSETS
#include <linux/kbuild.h>
#include <linux/mm.h>
#include <linux/sched.h>

View File

@ -80,8 +80,7 @@ int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct device_node *np = of_cpu_device_node_get(cpu);
struct device_node *prev = NULL;
struct device_node *np, *prev;
int levels = 1, level = 1;
if (!acpi_disabled) {
@ -105,6 +104,10 @@ int populate_cache_leaves(unsigned int cpu)
return 0;
}
np = of_cpu_device_node_get(cpu);
if (!np)
return -ENOENT;
if (of_property_read_bool(np, "cache-size"))
ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
if (of_property_read_bool(np, "i-cache-size"))

View File

@ -58,7 +58,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
if (cpu_ops->cpu_is_stopped)
ret = cpu_ops->cpu_is_stopped(cpu);
if (ret)
pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
pr_warn("CPU%u may not have stopped: %d\n", cpu, ret);
}
/*

View File

@ -64,7 +64,7 @@ extra_header_fields:
.long efi_header_end - _start // SizeOfHeaders
.long 0 // CheckSum
.short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
.short 0 // DllCharacteristics
.short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics
.quad 0 // SizeOfStackReserve
.quad 0 // SizeOfStackCommit
.quad 0 // SizeOfHeapReserve

View File

@ -16,8 +16,12 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
KBUILD_CFLAGS += -mcmodel=medany
CFLAGS_cmdline_early.o += -D__NO_FORTIFY
CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY
CFLAGS_fdt_early.o += -D__NO_FORTIFY
# lib/string.c already defines __NO_FORTIFY
CFLAGS_ctype.o += -D__NO_FORTIFY
CFLAGS_lib-fdt.o += -D__NO_FORTIFY
CFLAGS_lib-fdt_ro.o += -D__NO_FORTIFY
CFLAGS_archrandom_early.o += -D__NO_FORTIFY
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_ \
--remove-section=.note.gnu.property \

View File

@ -136,8 +136,6 @@
#define REG_PTR(insn, pos, regs) \
(ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
#define GET_RM(insn) (((insn) >> 12) & 7)
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))

View File

@ -18,6 +18,7 @@ obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
ccflags-y := -fno-stack-protector
ccflags-y += -DDISABLE_BRANCH_PROFILING
ccflags-y += -fno-builtin
ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)

View File

@ -116,7 +116,10 @@ static inline bool amd_gart_present(void)
#define amd_nb_num(x) 0
#define amd_nb_has_feature(x) false
#define node_to_amd_nb(x) NULL
static inline struct amd_northbridge *node_to_amd_nb(int node)
{
return NULL;
}
#define amd_gart_present(x) false
#endif

View File

@ -261,12 +261,6 @@ static noinstr bool handle_bug(struct pt_regs *regs)
int ud_type;
u32 imm;
/*
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
* is a rare case that uses @regs without passing them to
* irqentry_enter().
*/
kmsan_unpoison_entry_regs(regs);
ud_type = decode_bug(regs->ip, &imm);
if (ud_type == BUG_NONE)
return handled;
@ -275,6 +269,12 @@ static noinstr bool handle_bug(struct pt_regs *regs)
* All lies, just get the WARN/BUG out.
*/
instrumentation_begin();
/*
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
* is a rare case that uses @regs without passing them to
* irqentry_enter().
*/
kmsan_unpoison_entry_regs(regs);
/*
* Since we're emulating a CALL with exceptions, restore the interrupt
* state to what it was at the exception site.

View File

@ -2654,19 +2654,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (apic->apicv_active) {
/* irr_pending is always true when apicv is activated. */
apic->irr_pending = true;
/*
* When APICv is enabled, KVM must always search the IRR for a pending
* IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
* isn't running. If APICv is disabled, KVM _should_ search the IRR
* for a pending IRQ. But KVM currently doesn't ensure *all* hardware,
* e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
* the IRR at this time could race with IRQ delivery from hardware that
* still sees APICv as being enabled.
*
* FIXME: Ensure other vCPUs and devices observe the change in APICv
* state prior to updating KVM's metadata caches, so that KVM
* can safely search the IRR and set irr_pending accordingly.
*/
apic->irr_pending = true;
if (apic->apicv_active)
apic->isr_count = 1;
} else {
/*
* Don't clear irr_pending, searching the IRR can race with
* updates from the CPU as APICv is still active from hardware's
* perspective. The flag will be cleared as appropriate when
* KVM injects the interrupt.
*/
else
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}
apic->highest_isr_cache = -1;
}

View File

@ -450,8 +450,11 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
goto e_free;
/* This needs to happen after SEV/SNP firmware initialization. */
if (vm_type == KVM_X86_SNP_VM && snp_guest_req_init(kvm))
goto e_free;
if (vm_type == KVM_X86_SNP_VM) {
ret = snp_guest_req_init(kvm);
if (ret)
goto e_free;
}
INIT_LIST_HEAD(&sev->regions_list);
INIT_LIST_HEAD(&sev->mirror_vms);
@ -2212,10 +2215,6 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (sev->snp_context)
return -EINVAL;
sev->snp_context = snp_context_create(kvm, argp);
if (!sev->snp_context)
return -ENOTTY;
if (params.flags)
return -EINVAL;
@ -2230,6 +2229,10 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET)
return -EINVAL;
sev->snp_context = snp_context_create(kvm, argp);
if (!sev->snp_context)
return -ENOTTY;
start.gctx_paddr = __psp_pa(sev->snp_context);
start.policy = params.policy;
memcpy(start.gosvw, params.gosvw, sizeof(params.gosvw));

View File

@ -1197,11 +1197,14 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept);
/*
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
* for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
* full TLB flush from the guest's perspective. This is required even
* if VPID is disabled in the host as KVM may need to synchronize the
* MMU in response to the guest TLB flush.
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
* same VPID as the host, and so architecturally, linear and combined
* mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM
* emulates L2 sharing L1's VPID=0 by using vpid01 while running L2,
* and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This
* is required if VPID is disabled in KVM, as a TLB flush (there are no
* VPIDs) still occurs from L1's perspective, and KVM may need to
* synchronize the MMU in response to the guest TLB flush.
*
* Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
* EPT is a special snowflake, as guest-physical mappings aren't
@ -2315,6 +2318,17 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
/*
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
* same VPID as the host. Emulate this behavior by using vpid01 for L2
* if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter
* and VM-Exit are architecturally required to flush VPID=0, but *only*
* VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the
* required flushes), but doing so would cause KVM to over-flush. E.g.
* if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled,
* and then runs L2 X again, then KVM can and should retain TLB entries
* for VPID12=1.
*/
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
@ -5957,6 +5971,12 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return nested_vmx_fail(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
/*
* Always flush the effective vpid02, i.e. never flush the current VPID
* and never explicitly flush vpid01. INVVPID targets a VPID, not a
* VMCS, and so whether or not the current vmcs12 has VPID enabled is
* irrelevant (and there may not be a loaded vmcs12).
*/
vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:

View File

@ -217,9 +217,11 @@ module_param(ple_window_shrink, uint, 0444);
static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);
/* Default is SYSTEM mode, 1 for host-guest mode */
/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
int __read_mostly pt_mode = PT_MODE_SYSTEM;
#ifdef CONFIG_BROKEN
module_param(pt_mode, int, S_IRUGO);
#endif
struct x86_pmu_lbr __ro_after_init vmx_lbr_caps;
@ -3213,7 +3215,7 @@ void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
return nested_get_vpid02(vcpu);
return to_vmx(vcpu)->vpid;
}

View File

@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
/* Prepare bio for passthrough IO given ITER_BVEC iter */
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
{
struct request_queue *q = rq->q;
size_t nr_iter = iov_iter_count(iter);
size_t nr_segs = iter->nr_segs;
struct bio_vec *bvecs, *bvprvp = NULL;
const struct queue_limits *lim = &q->limits;
unsigned int nsegs = 0, bytes = 0;
const struct queue_limits *lim = &rq->q->limits;
unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
unsigned int nsegs;
struct bio *bio;
size_t i;
int ret;
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
return -EINVAL;
if (nr_segs > queue_max_segments(q))
if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
return -EINVAL;
/* no iovecs to alloc, as we already have a BVEC iterator */
/* reuse the bvecs from the iterator instead of allocating new ones */
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
if (bio == NULL)
if (!bio)
return -ENOMEM;
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
blk_rq_bio_prep(rq, bio, nr_segs);
/* loop to perform a bunch of sanity checks */
bvecs = (struct bio_vec *)iter->bvec;
for (i = 0; i < nr_segs; i++) {
struct bio_vec *bv = &bvecs[i];
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, fallback to copy.
*/
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
blk_mq_map_bio_put(bio);
return -EREMOTEIO;
}
/* check full condition */
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
goto put_bio;
if (bytes + bv->bv_len > nr_iter)
break;
nsegs++;
bytes += bv->bv_len;
bvprvp = bv;
/* check that the data layout matches the hardware restrictions */
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
if (ret) {
/* if we would have to split the bio, copy instead */
if (ret > 0)
ret = -EREMOTEIO;
blk_mq_map_bio_put(bio);
return ret;
}
blk_rq_bio_prep(rq, bio, nsegs);
return 0;
put_bio:
blk_mq_map_bio_put(bio);
return -EINVAL;
}
/**

View File

@ -108,6 +108,14 @@ static int reset_pending_show(struct seq_file *s, void *v)
return 0;
}
static int firewall_irq_counter_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = seq_to_ivpu(s);
seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter));
return 0;
}
static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"bo_list", bo_list_show, 0},
{"fw_name", fw_name_show, 0},
@ -116,6 +124,7 @@ static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"last_bootmode", last_bootmode_show, 0},
{"reset_counter", reset_counter_show, 0},
{"reset_pending", reset_pending_show, 0},
{"firewall_irq_counter", firewall_irq_counter_show, 0},
};
static ssize_t

View File

@ -249,6 +249,7 @@ int ivpu_hw_init(struct ivpu_device *vdev)
platform_init(vdev);
wa_init(vdev);
timeouts_init(vdev);
atomic_set(&vdev->hw->firewall_irq_counter, 0);
return 0;
}

View File

@ -52,6 +52,7 @@ struct ivpu_hw_info {
int dma_bits;
ktime_t d0i3_entry_host_ts;
u64 d0i3_entry_vpu_ts;
atomic_t firewall_irq_counter;
};
int ivpu_hw_init(struct ivpu_device *vdev);

View File

@ -1062,7 +1062,10 @@ static void irq_wdt_mss_handler(struct ivpu_device *vdev)
static void irq_noc_firewall_handler(struct ivpu_device *vdev)
{
ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
atomic_inc(&vdev->hw->firewall_irq_counter);
ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
atomic_read(&vdev->hw->firewall_irq_counter));
}
/* Handler for IRQs from NPU core */

View File

@ -867,7 +867,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
spin_lock_init(&cpc_ptr->rmw_lock);
raw_spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
@ -1087,6 +1087,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
struct cpc_desc *cpc_desc;
unsigned long flags;
size = GET_BIT_WIDTH(reg);
@ -1126,7 +1127,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return -ENODEV;
}
spin_lock(&cpc_desc->rmw_lock);
raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
switch (size) {
case 8:
prev_val = readb_relaxed(vaddr);
@ -1141,7 +1142,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
prev_val = readq_relaxed(vaddr);
break;
default:
spin_unlock(&cpc_desc->rmw_lock);
raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return -EFAULT;
}
val = MASK_VAL_WRITE(reg, prev_val, val);
@ -1174,7 +1175,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
spin_unlock(&cpc_desc->rmw_lock);
raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return ret_val;
}

View File

@ -26,7 +26,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/rcupdate.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
@ -2634,7 +2633,6 @@ static const char *dev_uevent_name(const struct kobject *kobj)
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
struct device_driver *driver;
int retval = 0;
/* add device node properties if present */
@ -2663,12 +2661,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
/* Synchronize with module_remove_driver() */
rcu_read_lock();
driver = READ_ONCE(dev->driver);
if (driver)
add_uevent_var(env, "DRIVER=%s", driver->name);
rcu_read_unlock();
if (dev->driver)
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@ -2738,8 +2732,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
/* Synchronize with really_probe() */
device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
device_unlock(dev);
if (retval)
goto out;
@ -4037,6 +4034,41 @@ int device_for_each_child_reverse(struct device *parent, void *data,
}
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
* device_for_each_child_reverse_from - device child iterator in reversed order.
* @parent: parent struct device.
* @from: optional starting point in child list
* @fn: function to be called for each device.
* @data: data for the callback.
*
* Iterate over @parent's child devices, starting at @from, and call @fn
* for each, passing it @data. This helper is identical to
* device_for_each_child_reverse() when @from is NULL.
*
* @fn is checked each iteration. If it returns anything other than 0,
* iteration stop and that value is returned to the caller of
* device_for_each_child_reverse_from();
*/
int device_for_each_child_reverse_from(struct device *parent,
struct device *from, const void *data,
int (*fn)(struct device *, const void *))
{
struct klist_iter i;
struct device *child;
int error = 0;
if (!parent->p)
return 0;
klist_iter_init_node(&parent->p->klist_children, &i,
(from ? &from->p->knode_parent : NULL));
while ((child = prev_device(&i)) && !error)
error = fn(child, data);
klist_iter_exit(&i);
return error;
}
EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device

View File

@ -7,7 +7,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/rcupdate.h>
#include "base.h"
static char *make_driver_name(const struct device_driver *drv)
@ -102,9 +101,6 @@ void module_remove_driver(const struct device_driver *drv)
if (!drv)
return;
/* Synchronize with dev_uevent() */
synchronize_rcu();
sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner)

View File

@ -674,6 +674,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
#ifdef CONFIG_TCG_TPM2_HMAC
int rc;
rc = tpm_try_get_ops(chip);
if (!rc) {
tpm2_end_auth_session(chip);
tpm_put_ops(chip);
}
#endif
tpm_del_legacy_sysfs(chip);
if (tpm_is_hwrng_enabled(chip))
hwrng_unregister(&chip->hwrng);

View File

@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
struct tpm_header *header = (void *)buf;
ssize_t ret, len;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_end_auth_session(chip);
ret = tpm2_prepare_space(chip, space, buf, bufsiz);
/* If the command is not implemented by the TPM, synthesize a
* response with a TPM2_RC_COMMAND_CODE return for user-space.

View File

@ -379,10 +379,12 @@ int tpm_pm_suspend(struct device *dev)
rc = tpm_try_get_ops(chip);
if (!rc) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
tpm2_end_auth_session(chip);
tpm2_shutdown(chip, TPM2_SU_STATE);
else
} else {
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
}
tpm_put_ops(chip);
}

View File

@ -333,6 +333,9 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
}
#ifdef CONFIG_TCG_TPM2_HMAC
/* The first write to /dev/tpm{rm0} will flush the session. */
attributes |= TPM2_SA_CONTINUE_SESSION;
/*
* The Architecture Guide requires us to strip trailing zeros
* before computing the HMAC
@ -484,7 +487,8 @@ static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
sha256_final(&sctx, out);
}
static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
struct tpm2_auth *auth)
{
struct crypto_kpp *kpp;
struct kpp_request *req;
@ -543,7 +547,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
kpp_request_set_input(req, s, EC_PT_SZ*2);
sg_init_one(d, chip->auth->salt, EC_PT_SZ);
sg_init_one(d, auth->salt, EC_PT_SZ);
kpp_request_set_output(req, d, EC_PT_SZ);
crypto_kpp_compute_shared_secret(req);
kpp_request_free(req);
@ -554,8 +558,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
* This works because KDFe fully consumes the secret before it
* writes the salt
*/
tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
chip->auth->salt);
tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt);
out:
crypto_free_kpp(kpp);
@ -853,7 +856,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
if (rc)
/* manually close the session if it wasn't consumed */
tpm2_flush_context(chip, auth->handle);
memzero_explicit(auth, sizeof(*auth));
kfree_sensitive(auth);
chip->auth = NULL;
} else {
/* reset for next use */
auth->session = TPM_HEADER_SIZE;
@ -881,7 +886,8 @@ void tpm2_end_auth_session(struct tpm_chip *chip)
return;
tpm2_flush_context(chip, auth->handle);
memzero_explicit(auth, sizeof(*auth));
kfree_sensitive(auth);
chip->auth = NULL;
}
EXPORT_SYMBOL(tpm2_end_auth_session);
@ -915,33 +921,37 @@ static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
{
int rc;
unsigned int offset = 0; /* dummy offset for null seed context */
u8 name[SHA256_DIGEST_SIZE + 2];
u32 tmp_null_key;
int rc;
rc = tpm2_load_context(chip, chip->null_key_context, &offset,
null_key);
if (rc != -EINVAL)
return rc;
&tmp_null_key);
if (rc != -EINVAL) {
if (!rc)
*null_key = tmp_null_key;
goto err;
}
/* an integrity failure may mean the TPM has been reset */
dev_err(&chip->dev, "NULL key integrity failure!\n");
/* check the null name against what we know */
tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
/* name unchanged, assume transient integrity failure */
return rc;
/*
* Fatal TPM failure: the NULL seed has actually changed, so
* the TPM must have been illegally reset. All in-kernel TPM
* operations will fail because the NULL primary can't be
* loaded to salt the sessions, but disable the TPM anyway so
* userspace programmes can't be compromised by it.
*/
dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
/* Try to re-create null key, given the integrity failure: */
rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name);
if (rc)
goto err;
/* Return null key if the name has not been changed: */
if (!memcmp(name, chip->null_key_name, sizeof(name))) {
*null_key = tmp_null_key;
return 0;
}
/* Deduce from the name change TPM interference: */
dev_err(&chip->dev, "null key integrity check failed\n");
tpm2_flush_context(chip, tmp_null_key);
chip->flags |= TPM_CHIP_FLAG_DISABLE;
return rc;
err:
return rc ? -ENODEV : 0;
}
/**
@ -958,16 +968,20 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
struct tpm2_auth *auth;
struct tpm_buf buf;
struct tpm2_auth *auth = chip->auth;
int rc;
u32 null_key;
int rc;
if (!auth) {
dev_warn_once(&chip->dev, "auth session is not active\n");
if (chip->auth) {
dev_warn_once(&chip->dev, "auth session is active\n");
return 0;
}
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
if (!auth)
return -ENOMEM;
rc = tpm2_load_null(chip, &null_key);
if (rc)
goto out;
@ -988,7 +1002,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
/* append encrypted salt and squirrel away unencrypted in auth */
tpm_buf_append_salt(&buf, chip);
tpm_buf_append_salt(&buf, chip, auth);
/* session type (HMAC, audit or policy) */
tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
@ -1010,10 +1024,13 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_destroy(&buf);
if (rc)
goto out;
if (rc == TPM2_RC_SUCCESS) {
chip->auth = auth;
return 0;
}
out:
out:
kfree_sensitive(auth);
return rc;
}
EXPORT_SYMBOL(tpm2_start_auth_session);
@ -1347,18 +1364,21 @@ static int tpm2_create_null_primary(struct tpm_chip *chip)
*
* Derive and context save the null primary and allocate memory in the
* struct tpm_chip for the authorizations.
*
* Return:
* * 0 - OK
* * -errno - A system error
* * TPM_RC - A TPM error
*/
int tpm2_sessions_init(struct tpm_chip *chip)
{
int rc;
rc = tpm2_create_null_primary(chip);
if (rc)
dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
if (!chip->auth)
return -ENOMEM;
if (rc) {
dev_err(&chip->dev, "null key creation failed with %d\n", rc);
return rc;
}
return rc;
}

View File

@ -60,6 +60,7 @@ config CXL_ACPI
default CXL_BUS
select ACPI_TABLE_LIB
select ACPI_HMAT
select CXL_PORT
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See

View File

@ -1,13 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
# Order is important here for the built-in case:
# - 'core' first for fundamental init
# - 'port' before platform root drivers like 'acpi' so that CXL-root ports
# are immediately enabled
# - 'mem' and 'pmem' before endpoint drivers so that memdevs are
# immediately enabled
# - 'pci' last, also mirrors the hardware enumeration hierarchy
obj-y += core/
obj-$(CONFIG_CXL_PCI) += cxl_pci.o
obj-$(CONFIG_CXL_MEM) += cxl_mem.o
obj-$(CONFIG_CXL_PORT) += cxl_port.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
obj-$(CONFIG_CXL_PORT) += cxl_port.o
obj-$(CONFIG_CXL_MEM) += cxl_mem.o
obj-$(CONFIG_CXL_PCI) += cxl_pci.o
cxl_mem-y := mem.o
cxl_pci-y := pci.o
cxl_port-y := port.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o security.o
cxl_port-y := port.o
cxl_mem-y := mem.o
cxl_pci-y := pci.o

View File

@ -924,6 +924,13 @@ static void __exit cxl_acpi_exit(void)
/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
subsys_initcall(cxl_acpi_init);
/*
* Arrange for host-bridge ports to be active synchronous with
* cxl_acpi_probe() exit.
*/
MODULE_SOFTDEP("pre: cxl_port");
module_exit(cxl_acpi_exit);
MODULE_DESCRIPTION("CXL ACPI: Platform Support");
MODULE_LICENSE("GPL v2");

View File

@ -641,6 +641,9 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
void *ptr;
int rc;
if (!dev_is_pci(cxlds->dev))
return -ENODEV;
if (cxlds->rcd)
return -ENODEV;

View File

@ -712,7 +712,44 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
return 0;
}
static int cxl_decoder_reset(struct cxl_decoder *cxld)
static int commit_reap(struct device *dev, const void *data)
{
struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
return 0;
cxld = to_cxl_decoder(dev);
if (port->commit_end == cxld->id &&
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
port->commit_end--;
dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
dev_name(&cxld->dev), port->commit_end);
}
return 0;
}
void cxl_port_commit_reap(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
lockdep_assert_held_write(&cxl_region_rwsem);
/*
* Once the highest committed decoder is disabled, free any other
* decoders that were pinned allocated by out-of-order release.
*/
port->commit_end--;
dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
port->commit_end);
device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
commit_reap);
}
EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, CXL);
static void cxl_decoder_reset(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
@ -721,14 +758,14 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
u32 ctrl;
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return 0;
return;
if (port->commit_end != id) {
if (port->commit_end == id)
cxl_port_commit_reap(cxld);
else
dev_dbg(&port->dev,
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
return -EBUSY;
}
down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@ -741,7 +778,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
up_read(&cxl_dpa_rwsem);
port->commit_end--;
cxld->flags &= ~CXL_DECODER_F_ENABLE;
/* Userspace is now responsible for reconfiguring this decoder */
@ -751,8 +787,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
cxled = to_cxl_endpoint_decoder(&cxld->dev);
cxled->state = CXL_DECODER_STATE_MANUAL;
}
return 0;
}
static int cxl_setup_hdm_decoder_from_dvsec(

View File

@ -2084,11 +2084,18 @@ static void cxl_bus_remove(struct device *dev)
static struct workqueue_struct *cxl_bus_wq;
static int cxl_rescan_attach(struct device *dev, void *data)
{
int rc = device_attach(dev);
dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached");
return 0;
}
static void cxl_bus_rescan_queue(struct work_struct *w)
{
int rc = bus_rescan_devices(&cxl_bus_type);
pr_debug("CXL bus rescan result: %d\n", rc);
bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach);
}
void cxl_bus_rescan(void)

View File

@ -232,8 +232,8 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
} else {
dev_err(&cxlr->dev,
"Failed to synchronize CPU cache state\n");
dev_WARN(&cxlr->dev,
"Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
@ -242,19 +242,17 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return 0;
}
static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
{
struct cxl_region_params *p = &cxlr->params;
int i, rc = 0;
int i;
/*
* Before region teardown attempt to flush, and if the flush
* fails cancel the region teardown for data consistency
* concerns
* Before region teardown attempt to flush, evict any data cached for
* this region, or scream loudly about missing arch / platform support
* for CXL teardown.
*/
rc = cxl_region_invalidate_memregion(cxlr);
if (rc)
return rc;
cxl_region_invalidate_memregion(cxlr);
for (i = count - 1; i >= 0; i--) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@ -277,23 +275,17 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
if (cxld->reset)
rc = cxld->reset(cxld);
if (rc)
return rc;
cxld->reset(cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
endpoint_reset:
rc = cxled->cxld.reset(&cxled->cxld);
if (rc)
return rc;
cxled->cxld.reset(&cxled->cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
/* all decoders associated with this region have been torn down */
clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
return 0;
}
static int commit_decoder(struct cxl_decoder *cxld)
@ -409,16 +401,8 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
* still pending.
*/
if (p->state == CXL_CONFIG_RESET_PENDING) {
rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
/*
* Revert to committed since there may still be active
* decoders associated with this region, or move forward
* to active to mark the reset successful
*/
if (rc)
p->state = CXL_CONFIG_COMMIT;
else
p->state = CXL_CONFIG_ACTIVE;
cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}
}
@ -794,26 +778,50 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
return rc;
}
static int check_commit_order(struct device *dev, const void *data)
{
struct cxl_decoder *cxld = to_cxl_decoder(dev);
/*
* if port->commit_end is not the only free decoder, then out of
* order shutdown has occurred, block further allocations until
* that is resolved
*/
if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0))
return -EBUSY;
return 0;
}
static int match_free_decoder(struct device *dev, void *data)
{
struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
int *id = data;
int rc;
if (!is_switch_decoder(dev))
return 0;
cxld = to_cxl_decoder(dev);
/* enforce ordered allocation */
if (cxld->id != *id)
if (cxld->id != port->commit_end + 1)
return 0;
if (!cxld->region)
return 1;
if (cxld->region) {
dev_dbg(dev->parent,
"next decoder to commit (%s) is already reserved (%s)\n",
dev_name(dev), dev_name(&cxld->region->dev));
return 0;
}
(*id)++;
return 0;
rc = device_for_each_child_reverse_from(dev->parent, dev, NULL,
check_commit_order);
if (rc) {
dev_dbg(dev->parent,
"unable to allocate %s due to out of order shutdown\n",
dev_name(dev));
return 0;
}
return 1;
}
static int match_auto_decoder(struct device *dev, void *data)
@ -840,7 +848,6 @@ cxl_region_find_decoder(struct cxl_port *port,
struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
if (port == cxled_to_port(cxled))
return &cxled->cxld;
@ -849,7 +856,7 @@ cxl_region_find_decoder(struct cxl_port *port,
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
else
dev = device_find_child(&port->dev, &id, match_free_decoder);
dev = device_find_child(&port->dev, NULL, match_free_decoder);
if (!dev)
return NULL;
/*
@ -2054,13 +2061,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
get_device(&cxlr->dev);
if (p->state > CXL_CONFIG_ACTIVE) {
/*
* TODO: tear down all impacted regions if a device is
* removed out of order
*/
rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
if (rc)
goto out;
cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}

View File

@ -279,7 +279,7 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
#define show_mem_event_type(type) __print_symbolic(type, \
#define show_gmer_mem_event_type(type) __print_symbolic(type, \
{ CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
{ CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
{ CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
@ -373,7 +373,7 @@ TRACE_EVENT(cxl_general_media,
"hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
show_gmer_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
__print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
@ -391,6 +391,17 @@ TRACE_EVENT(cxl_general_media,
* DRAM Event Record defines many fields the same as the General Media Event
* Record. Reuse those definitions as appropriate.
*/
#define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00
#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01
#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02
#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03
#define show_dram_mem_event_type(type) __print_symbolic(type, \
{ CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
{ CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
{ CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
{ CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
)
#define CXL_DER_VALID_CHANNEL BIT(0)
#define CXL_DER_VALID_RANK BIT(1)
#define CXL_DER_VALID_NIBBLE BIT(2)
@ -477,7 +488,7 @@ TRACE_EVENT(cxl_dram,
"hpa=%llx region=%s region_uuid=%pUb",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
show_mem_event_type(__entry->type),
show_dram_mem_event_type(__entry->type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->nibble_mask,
__entry->bank_group, __entry->bank,

View File

@ -359,7 +359,7 @@ struct cxl_decoder {
struct cxl_region *region;
unsigned long flags;
int (*commit)(struct cxl_decoder *cxld);
int (*reset)(struct cxl_decoder *cxld);
void (*reset)(struct cxl_decoder *cxld);
};
/*
@ -730,6 +730,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
struct pci_bus *bus);

View File

@ -208,7 +208,22 @@ static struct cxl_driver cxl_port_driver = {
},
};
module_cxl_driver(cxl_port_driver);
static int __init cxl_port_init(void)
{
return cxl_driver_register(&cxl_port_driver);
}
/*
* Be ready to immediately enable ports emitted by the platform CXL root
* (e.g. cxl_acpi) when CONFIG_CXL_PORT=y.
*/
subsys_initcall(cxl_port_init);
static void __exit cxl_port_exit(void)
{
cxl_driver_unregister(&cxl_port_driver);
}
module_exit(cxl_port_exit);
MODULE_DESCRIPTION("CXL: Port enumeration and services");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);

View File

@ -601,22 +601,25 @@ static int rz_dmac_config(struct dma_chan *chan,
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
u32 val;
channel->src_per_address = config->src_addr;
channel->dst_per_address = config->dst_addr;
val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
if (channel->dst_per_address) {
val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
}
channel->src_per_address = config->src_addr;
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
if (channel->src_per_address) {
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
}
return 0;
}

View File

@ -3185,27 +3185,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
/*
* PDMA must to close the packet when the channel is in packet mode.
* For TR mode when the channel is not cyclic we also need PDMA to close
* the packet otherwise the transfer will stall because PDMA holds on
* the data it has received from the peripheral.
*/
if (uc->config.pkt_mode || !uc->cyclic) {
/*
* PDMA must close the packet when the channel is in packet mode.
* For TR mode when the channel is not cyclic we also need PDMA
* to close the packet otherwise the transfer will stall because
* PDMA holds on the data it has received from the peripheral.
*/
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
} else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
uc->config.dir == DMA_DEV_TO_MEM &&
uc->cyclic) {
/*
* For cyclic mode with BCDMA we have to set EOP in each TR to
* prevent short packet errors seen on channel teardown. So the
* PDMA must close the packet after every TR transfer by setting
* burst count equal to the number of bytes transferred.
*/
struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
if (uc->config.dir == DMA_DEV_TO_MEM &&
d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
return -EINVAL;
d->static_tr.bstcnt =
(tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
if (uc->config.dir == DMA_DEV_TO_MEM &&
d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
return -EINVAL;
return 0;
}
@ -3450,8 +3463,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
__func__, d->static_tr.bstcnt);
"%s: StaticTR Z is limited to maximum %u (%u)\n",
__func__, uc->ud->match_data->statictr_z_mask,
d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@ -3476,6 +3490,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@ -3498,6 +3513,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
/*
* For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
* last TR of a descriptor, to mark the packet as complete.
* This is required for getting the teardown completion message in case
* of TX, and to avoid short-packet error in case of RX.
*
* As we are in cyclic mode, we do not know which period might be the
* last one, so set the flag for each period.
*/
if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
uc->ud->match_data->type == DMA_TYPE_BCDMA) {
period_csf = CPPI5_TR_CSF_EOP;
}
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@ -3525,8 +3554,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
cppi5_tr_csf_set(&tr_req[tr_idx].flags,
CPPI5_TR_CSF_SUPR_EVT);
period_csf |= CPPI5_TR_CSF_SUPR_EVT;
if (period_csf)
cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@ -3655,8 +3686,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
"%s: StaticTR Z is limited to maximum 4095 (%u)\n",
__func__, d->static_tr.bstcnt);
"%s: StaticTR Z is limited to maximum %u (%u)\n",
__func__, uc->ud->match_data->statictr_z_mask,
d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);

View File

@ -763,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
cpuhp_remove_state(sdei_entry_point);
cpuhp_remove_state(sdei_hp_state);
err = sdei_unregister_shared();
if (err)

View File

@ -234,7 +234,9 @@ static int gpio_la_poll_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
devm_mutex_init(dev, &priv->blob_lock);
ret = devm_mutex_init(dev, &priv->blob_lock);
if (ret)
return ret;
fops_buf_size_set(priv, GPIO_LA_DEFAULT_BUF_SIZE);

View File

@ -64,7 +64,7 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode,
struct fwnode_reference_args args;
struct gpio_desc *desc;
char propname[32]; /* 32 is max size of property name */
int ret;
int ret = 0;
swnode = to_software_node(fwnode);
if (!swnode)

View File

@ -4926,6 +4926,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
return NULL;
s->private = priv;
if (*pos > 0)
priv->newline = true;
priv->idx = srcu_read_lock(&gpio_devices_srcu);
list_for_each_entry_srcu(gdev, &gpio_devices, list,
@ -4969,7 +4971,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
gc = srcu_dereference(gdev->chip, &gdev->srcu);
if (!gc) {
seq_printf(s, "%s%s: (dangling chip)",
seq_printf(s, "%s%s: (dangling chip)\n",
priv->newline ? "\n" : "",
dev_name(&gdev->dev));
return 0;

View File

@ -303,6 +303,7 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
if (project == dml_project_dcn35 ||
project == dml_project_dcn351) {
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/
policy->UseOnlyMaxPrefetchModes = 1;
}

View File

@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;

View File

@ -2485,7 +2485,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
u32 workload_mask;
u32 workload_mask, selected_workload_mask;
smu->power_profile_mode = input[size];
@ -2552,7 +2552,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
if (workload_type < 0)
return -EINVAL;
workload_mask = 1 << workload_type;
selected_workload_mask = workload_mask = 1 << workload_type;
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@ -2572,7 +2572,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
workload_mask,
NULL);
if (!ret)
smu->workload_mask = workload_mask;
smu->workload_mask = selected_workload_mask;
return ret;
}

View File

@ -127,9 +127,8 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc)
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
if (mtk_crtc->cmdq_client.chan) {
cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
}
@ -913,6 +912,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
BIT(pipe),
mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp), i);
if (ret)

View File

@ -363,6 +363,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.layer_config = mtk_ovl_layer_config,
.bgclr_in_on = mtk_ovl_bgclr_in_on,
.bgclr_in_off = mtk_ovl_bgclr_in_off,
.get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
};
@ -416,6 +417,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
.disconnect = mtk_ovl_adaptor_disconnect,
.add = mtk_ovl_adaptor_add_comp,
.remove = mtk_ovl_adaptor_remove_comp,
.get_blend_modes = mtk_ovl_adaptor_get_blend_modes,
.get_formats = mtk_ovl_adaptor_get_formats,
.get_num_formats = mtk_ovl_adaptor_get_num_formats,
.mode_valid = mtk_ovl_adaptor_mode_valid,

View File

@ -80,6 +80,7 @@ struct mtk_ddp_comp_funcs {
void (*ctm_set)(struct device *dev,
struct drm_crtc_state *state);
struct device * (*dma_dev_get)(struct device *dev);
u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
@ -266,6 +267,15 @@ static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp)
return comp->dev;
}
static inline
u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->get_blend_modes)
return comp->funcs->get_blend_modes(comp->dev);
return 0;
}
static inline
const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp)
{

View File

@ -103,6 +103,7 @@ void mtk_ovl_register_vblank_cb(struct device *dev,
void mtk_ovl_unregister_vblank_cb(struct device *dev);
void mtk_ovl_enable_vblank(struct device *dev);
void mtk_ovl_disable_vblank(struct device *dev);
u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
@ -131,6 +132,7 @@ void mtk_ovl_adaptor_start(struct device *dev);
void mtk_ovl_adaptor_stop(struct device *dev);
unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev);
struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev);
u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev);
size_t mtk_ovl_adaptor_get_num_formats(struct device *dev);
enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,

View File

@ -65,8 +65,8 @@
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@ -146,6 +146,7 @@ struct mtk_disp_ovl_data {
bool fmt_rgb565_is_0;
bool smi_id_en;
bool supports_afbc;
const u32 blend_modes;
const u32 *formats;
size_t num_formats;
bool supports_clrfmt_ext;
@ -214,6 +215,13 @@ void mtk_ovl_disable_vblank(struct device *dev)
writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
}
u32 mtk_ovl_get_blend_modes(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
return ovl->data->blend_modes;
}
const u32 *mtk_ovl_get_formats(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
@ -386,14 +394,27 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
DISP_REG_OVL_RDMA_CTRL(idx));
}
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
unsigned int blend_mode)
static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
struct mtk_plane_state *state)
{
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
* is defined in mediatek HW data sheet.
* The alphabet order in XXX is no relation to data
* arrangement in memory.
unsigned int fmt = state->pending.format;
unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE;
/*
* For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet
* and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888.
*
* Check blend_modes in the driver data to see if premultiplied mode is supported.
* If not, use coverage mode instead to set it to the supported color formats.
*
* Current DRM assumption is that alpha is default premultiplied, so the bitmask of
* blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init()
* will get an error return from drm_plane_create_blend_mode_property() and
* state->base.pixel_blend_mode should not be used.
*/
if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI))
blend_mode = state->base.pixel_blend_mode;
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@ -471,20 +492,26 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
return;
}
con = ovl_fmt_convert(ovl, fmt, blend_mode);
con = mtk_ovl_fmt_convert(ovl, state);
if (state->base.fb) {
con |= OVL_CON_AEN;
con |= state->base.alpha & OVL_CON_ALPHA;
}
/* CONST_BLD must be enabled for XRGB formats although the alpha channel
* can be ignored, or OVL will still read the value from memory.
* For RGB888 related formats, whether CONST_BLD is enabled or not won't
* affect the result. Therefore we use !has_alpha as the condition.
*/
if ((state->base.fb && !state->base.fb->format->has_alpha) ||
blend_mode == DRM_MODE_BLEND_PIXEL_NONE)
ignore_pixel_alpha = OVL_CONST_BLEND;
/*
* For blend_modes supported SoCs, always enable alpha blending.
* For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set.
*/
if (blend_mode || state->base.fb->format->has_alpha)
con |= OVL_CON_AEN;
/*
* Although the alpha channel can be ignored, CONST_BLD must be enabled
* for XRGB format, otherwise OVL will still read the value from memory.
* For RGB888 related formats, whether CONST_BLD is enabled or not won't
* affect the result. Therefore we use !has_alpha as the condition.
*/
if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha)
ignore_pixel_alpha = OVL_CONST_BLEND;
}
if (pending->rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
@ -663,6 +690,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE) |
BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@ -673,6 +703,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
.layer_nr = 2,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE) |
BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@ -684,6 +717,9 @@ static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.supports_afbc = true,
.blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE) |
BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8195_formats,
.num_formats = ARRAY_SIZE(mt8195_formats),
.supports_clrfmt_ext = true,

View File

@ -400,6 +400,13 @@ void mtk_ovl_adaptor_disable_vblank(struct device *dev)
mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);

View File

@ -145,6 +145,89 @@ struct mtk_dp_data {
u16 audio_m_div2_bit;
};
static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
[MTK_DP_CAL_GLB_BIAS_TRIM] = {
.idx = 0,
.shift = 10,
.mask = 0x1f,
.min_val = 1,
.max_val = 0x1e,
.default_val = 0xf,
},
[MTK_DP_CAL_CLKTX_IMPSE] = {
.idx = 0,
.shift = 15,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
.idx = 1,
.shift = 0,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
.idx = 1,
.shift = 8,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
.idx = 1,
.shift = 16,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
.idx = 1,
.shift = 24,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
.idx = 1,
.shift = 4,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
.idx = 1,
.shift = 12,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
.idx = 1,
.shift = 20,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
.idx = 1,
.shift = 28,
.mask = 0xf,
.min_val = 1,
.max_val = 0xe,
.default_val = 0x8,
},
};
static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = {
[MTK_DP_CAL_GLB_BIAS_TRIM] = {
.idx = 3,
@ -2771,7 +2854,7 @@ static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume);
static const struct mtk_dp_data mt8188_dp_data = {
.bridge_type = DRM_MODE_CONNECTOR_DisplayPort,
.smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE,
.efuse_fmt = mt8195_dp_efuse_fmt,
.efuse_fmt = mt8188_dp_efuse_fmt,
.audio_supported = true,
.audio_pkt_in_hblank_area = true,
.audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,

View File

@ -145,6 +145,13 @@ static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
u32 mtk_ethdr_get_blend_modes(struct device *dev)
{
return BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE) |
BIT(DRM_MODE_BLEND_PIXEL_NONE);
}
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)

View File

@ -13,6 +13,7 @@ void mtk_ethdr_clk_disable(struct device *dev);
void mtk_ethdr_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
u32 mtk_ethdr_get_blend_modes(struct device *dev);
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt);

View File

@ -320,8 +320,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
size_t num_formats, unsigned int plane_idx)
unsigned int supported_rotations, const u32 blend_modes,
const u32 *formats, size_t num_formats, unsigned int plane_idx)
{
int err;
@ -366,12 +366,11 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (err)
DRM_ERROR("failed to create property: alpha\n");
err = drm_plane_create_blend_mode_property(plane,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE) |
BIT(DRM_MODE_BLEND_PIXEL_NONE));
if (err)
DRM_ERROR("failed to create property: blend_mode\n");
if (blend_modes) {
err = drm_plane_create_blend_mode_property(plane, blend_modes);
if (err)
DRM_ERROR("failed to create property: blend_mode\n");
}
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);

View File

@ -48,6 +48,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
size_t num_formats, unsigned int plane_idx);
unsigned int supported_rotations, const u32 blend_modes,
const u32 *formats, size_t num_formats, unsigned int plane_idx);
#endif

View File

@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
struct panthor_fw_binary_iter *iter,
u32 ehdr)
{
ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
struct panthor_fw_binary_section_entry_hdr hdr;
struct panthor_fw_section *section;
u32 section_size;
@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return -EINVAL;
}
if ((hdr.va.start & ~PAGE_MASK) != 0 ||
(hdr.va.end & ~PAGE_MASK) != 0) {
if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
hdr.va.start, hdr.va.end);
return -EINVAL;

View File

@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
goto out_free_bo;
ret = panthor_vm_unmap_range(vm, bo->va_node.start,
panthor_kernel_bo_size(bo));
ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
if (ret)
goto out_free_bo;
@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
}
bo = to_panthor_bo(&obj->base);
size = obj->base.size;
kbo->obj = &obj->base;
bo->flags = bo_flags;
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
* expected to be used for another section.
* Make sure we never map more than we need.
*/
size = ALIGN(size, panthor_vm_page_size(vm));
ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
if (ret)
goto err_put_obj;

View File

@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
mutex_unlock(&ptdev->mmu->as.slots_lock);
}
u32 panthor_vm_page_size(struct panthor_vm *vm)
{
const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
return 1u << pg_shift;
}
static void panthor_vm_stop(struct panthor_vm *vm)
{
drm_sched_stop(&vm->sched, NULL);
@ -1025,12 +1033,13 @@ int
panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
struct drm_mm_node *va_node)
{
ssize_t vm_pgsz = panthor_vm_page_size(vm);
int ret;
if (!size || (size & ~PAGE_MASK))
if (!size || !IS_ALIGNED(size, vm_pgsz))
return -EINVAL;
if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
return -EINVAL;
mutex_lock(&vm->mm_lock);
@ -2366,11 +2375,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
const struct drm_panthor_vm_bind_op *op,
struct panthor_vm_op_ctx *op_ctx)
{
ssize_t vm_pgsz = panthor_vm_page_size(vm);
struct drm_gem_object *gem;
int ret;
/* Aligned on page size. */
if ((op->va | op->size) & ~PAGE_MASK)
if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {

View File

@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
int panthor_vm_flush_all(struct panthor_vm *vm);

View File

@ -589,10 +589,11 @@ struct panthor_group {
* @timedout: True when a timeout occurred on any of the queues owned by
* this group.
*
* Timeouts can be reported by drm_sched or by the FW. In any case, any
* timeout situation is unrecoverable, and the group becomes useless.
* We simply wait for all references to be dropped so we can release the
* group object.
* Timeouts can be reported by drm_sched or by the FW. If a reset is required,
* and the group can't be suspended, this also leads to a timeout. In any case,
* any timeout situation is unrecoverable, and the group becomes useless. We
* simply wait for all references to be dropped so we can release the group
* object.
*/
bool timedout;
@ -2640,6 +2641,12 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
csgs_upd_ctx_init(&upd_ctx);
while (slot_mask) {
u32 csg_id = ffs(slot_mask) - 1;
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
/* We consider group suspension failures as fatal and flag the
* group as unusable by setting timedout=true.
*/
csg_slot->group->timedout = true;
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
CSG_STATE_TERMINATE,
@ -3409,6 +3416,11 @@ panthor_job_create(struct panthor_file *pfile,
goto err_put_job;
}
if (!group_can_run(job->group)) {
ret = -EINVAL;
goto err_put_job;
}
if (job->queue_idx >= job->group->queue_count ||
!job->group->queues[job->queue_idx]) {
ret = -EINVAL;

View File

@ -1276,10 +1276,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->own_submit_wq = false;
} else {
#ifdef CONFIG_LOCKDEP
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
WQ_MEM_RECLAIM,
&drm_sched_lockdep_map);
#else
sched->submit_wq = alloc_ordered_workqueue(name, 0);
sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
#endif
if (!sched->submit_wq)
return -ENOMEM;

View File

@ -1153,8 +1153,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) {
tegra->domain = iommu_paging_domain_alloc(dma_dev);
if (!tegra->domain) {
err = -ENOMEM;
if (IS_ERR(tegra->domain)) {
err = PTR_ERR(tegra->domain);
goto free;
}

View File

@ -996,7 +996,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
mode = drm_display_mode_from_cea_vic(drm, 16);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1017,7 +1017,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
mode = drm_display_mode_from_cea_vic(drm, 16);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1038,7 +1038,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
mode = drm_display_mode_from_cea_vic(drm, 1);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB);
@ -1056,7 +1056,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
mode = drm_display_mode_from_cea_vic(drm, 16);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1077,7 +1077,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
mode = drm_display_mode_from_cea_vic(drm, 1);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB);
@ -1095,7 +1095,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_double(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
mode = drm_display_mode_from_cea_vic(drm, 6);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1118,7 +1118,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_valid(struct kunit
unsigned long long rate;
unsigned int vic = *(unsigned int *)test->param_value;
mode = drm_display_mode_from_cea_vic(drm, vic);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1155,7 +1155,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
mode = drm_display_mode_from_cea_vic(drm, vic);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1180,7 +1180,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
mode = drm_display_mode_from_cea_vic(drm, vic);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1203,7 +1203,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc(struct kunit
struct drm_device *drm = &priv->drm;
unsigned long long rate;
mode = drm_display_mode_from_cea_vic(drm, 16);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1225,7 +1225,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
mode = drm_display_mode_from_cea_vic(drm, 16);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@ -1247,7 +1247,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
mode = drm_display_mode_from_cea_vic(drm, 16);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);

View File

@ -441,7 +441,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
mode = drm_display_mode_from_cea_vic(drm, 1);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@ -555,7 +555,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
mode = drm_display_mode_from_cea_vic(drm, 1);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@ -671,7 +671,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
mode = drm_display_mode_from_cea_vic(drm, 1);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@ -1263,7 +1263,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
mode = drm_display_mode_from_cea_vic(drm, 1);
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
/*

View File

@ -3,6 +3,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
@ -311,6 +312,47 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
static void kunit_action_drm_mode_destroy(void *ptr)
{
struct drm_display_mode *mode = ptr;
drm_mode_destroy(NULL, mode);
}
/**
* drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
for a KUnit test
* @test: The test context object
* @dev: DRM device
* @video_code: CEA VIC of the mode
*
* Creates a new mode matching the specified CEA VIC for a KUnit test.
*
* Resources will be cleaned up automatically.
*
* Returns: A new drm_display_mode on success or NULL on failure
*/
struct drm_display_mode *
drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
u8 video_code)
{
struct drm_display_mode *mode;
int ret;
mode = drm_display_mode_from_cea_vic(dev, video_code);
if (!mode)
return NULL;
ret = kunit_add_action_or_reset(test,
kunit_action_drm_mode_destroy,
mode);
if (ret)
return NULL;
return mode;
}
EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic);
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_DESCRIPTION("KUnit test suite helper functions");
MODULE_LICENSE("GPL");

View File

@ -309,18 +309,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
if (!xe->info.probe_display)
return;
if (xe->d3cold.allowed)
xe_display_pm_suspend(xe, true);
intel_hpd_poll_enable(xe);
}
void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
@ -353,6 +342,27 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
intel_dmc_suspend(xe);
if (runtime && has_display(xe))
intel_hpd_poll_enable(xe);
}
void xe_display_pm_suspend(struct xe_device *xe)
{
__xe_display_pm_suspend(xe, false);
}
void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
if (!xe->info.probe_display)
return;
if (xe->d3cold.allowed) {
__xe_display_pm_suspend(xe, true);
return;
}
intel_hpd_poll_enable(xe);
}
void xe_display_pm_suspend_late(struct xe_device *xe)
@ -366,17 +376,6 @@ void xe_display_pm_suspend_late(struct xe_device *xe)
intel_display_power_suspend_late(xe);
}
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
if (!xe->info.probe_display)
return;
intel_hpd_poll_disable(xe);
if (xe->d3cold.allowed)
xe_display_pm_resume(xe, true);
}
void xe_display_pm_resume_early(struct xe_device *xe)
{
if (!xe->info.probe_display)
@ -387,7 +386,7 @@ void xe_display_pm_resume_early(struct xe_device *xe)
intel_power_domains_resume(xe);
}
void xe_display_pm_resume(struct xe_device *xe, bool runtime)
static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
@ -411,9 +410,11 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_display_driver_resume(xe);
drm_kms_helper_poll_enable(&xe->drm);
intel_display_driver_enable_user_access(xe);
intel_hpd_poll_disable(xe);
}
if (has_display(xe))
intel_hpd_poll_disable(xe);
intel_opregion_resume(display);
intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
@ -421,6 +422,26 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_power_domains_enable(xe);
}
void xe_display_pm_resume(struct xe_device *xe)
{
__xe_display_pm_resume(xe, false);
}
void xe_display_pm_runtime_resume(struct xe_device *xe)
{
if (!xe->info.probe_display)
return;
if (xe->d3cold.allowed) {
__xe_display_pm_resume(xe, true);
return;
}
intel_hpd_init(xe);
intel_hpd_poll_disable(xe);
}
static void display_device_remove(struct drm_device *dev, void *arg)
{
struct xe_device *xe = arg;

View File

@ -34,10 +34,10 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
void xe_display_pm_suspend(struct xe_device *xe);
void xe_display_pm_suspend_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
void xe_display_pm_resume(struct xe_device *xe, bool runtime);
void xe_display_pm_resume(struct xe_device *xe);
void xe_display_pm_runtime_suspend(struct xe_device *xe);
void xe_display_pm_runtime_resume(struct xe_device *xe);
@ -65,10 +65,10 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
static inline void xe_display_irq_reset(struct xe_device *xe) {}
static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
static inline void xe_display_pm_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
static inline void xe_display_pm_resume(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}

View File

@ -397,6 +397,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
struct xe_device *xe = tile_to_xe(ggtt->tile);
/*
* XXX: Barrier for GGTT pages. Unsure exactly why this required but
* without this LNL is having issues with the GuC reading scratch page
* vs. correct GGTT page. Not particularly a hot code path so blindly
* do a mmio read here which results in GuC reading correct GGTT page.
*/
xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
/* Each GT in a tile has its own TLB to cache GGTT lookups */
ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);

View File

@ -916,12 +916,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
u32 ctx_timestamp, ctx_job_timestamp;
u32 timeout_ms = q->sched_props.job_timeout_ms;
u32 diff;
u64 running_time_ms;
if (!xe_sched_job_started(job)) {
xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started",
xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
q->guc->id);
return xe_sched_invalidate_job(job, 2);
}
ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
/*
* Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
* possible overflows with a high timeout.
@ -1049,10 +1059,6 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
exec_queue_killed_or_banned_or_wedged(q) ||
exec_queue_destroyed(q);
/* Job hasn't started, can't be timed out */
if (!skip_timeout_check && !xe_sched_job_started(job))
goto rearm;
/*
* XXX: Sampling timeout doesn't work in wedged mode as we have to
* modify scheduling state to read timestamp. We could read the

View File

@ -123,7 +123,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
xe_display_pm_suspend(xe, false);
xe_display_pm_suspend(xe);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
@ -133,7 +133,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err) {
xe_display_pm_resume(xe, false);
xe_display_pm_resume(xe);
goto err;
}
}
@ -187,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
xe_display_pm_resume(xe, false);
xe_display_pm_resume(xe);
err = xe_bo_restore_user(xe);
if (err)

View File

@ -637,7 +637,7 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2 != 0) {
if (val2 != 0 || val == 0) {
ret = -EINVAL;
break;
}

View File

@ -75,6 +75,7 @@
#define T_CONVERT_NS 190 /* conversion time */
#define T_CONVERT_0_NS 10 /* 1st conversion start time (oversampling) */
#define T_CONVERT_X_NS 500 /* xth conversion start time (oversampling) */
#define T_POWERUP_US 5000 /* Power up */
struct ad7380_timing_specs {
const unsigned int t_csh_ns; /* CS minimum high time */
@ -86,6 +87,9 @@ struct ad7380_chip_info {
unsigned int num_channels;
unsigned int num_simult_channels;
bool has_mux;
const char * const *supplies;
unsigned int num_supplies;
bool external_ref_only;
const char * const *vcm_supplies;
unsigned int num_vcm_supplies;
const unsigned long *available_scan_masks;
@ -243,6 +247,10 @@ DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u);
static const char * const ad7380_supplies[] = {
"vcc", "vlogic",
};
static const char * const ad7380_2_channel_vcm_supplies[] = {
"aina", "ainb",
};
@ -338,6 +346,8 @@ static const struct ad7380_chip_info ad7380_chip_info = {
.channels = ad7380_channels,
.num_channels = ARRAY_SIZE(ad7380_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@ -347,6 +357,8 @@ static const struct ad7380_chip_info ad7381_chip_info = {
.channels = ad7381_channels,
.num_channels = ARRAY_SIZE(ad7381_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
};
@ -356,6 +368,8 @@ static const struct ad7380_chip_info ad7383_chip_info = {
.channels = ad7383_channels,
.num_channels = ARRAY_SIZE(ad7383_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
@ -367,6 +381,8 @@ static const struct ad7380_chip_info ad7384_chip_info = {
.channels = ad7384_channels,
.num_channels = ARRAY_SIZE(ad7384_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_2_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
@ -378,6 +394,8 @@ static const struct ad7380_chip_info ad7386_chip_info = {
.channels = ad7386_channels,
.num_channels = ARRAY_SIZE(ad7386_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@ -388,6 +406,8 @@ static const struct ad7380_chip_info ad7387_chip_info = {
.channels = ad7387_channels,
.num_channels = ARRAY_SIZE(ad7387_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@ -398,6 +418,8 @@ static const struct ad7380_chip_info ad7388_chip_info = {
.channels = ad7388_channels,
.num_channels = ARRAY_SIZE(ad7388_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
@ -408,6 +430,9 @@ static const struct ad7380_chip_info ad7380_4_chip_info = {
.channels = ad7380_4_channels,
.num_channels = ARRAY_SIZE(ad7380_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.external_ref_only = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@ -417,6 +442,8 @@ static const struct ad7380_chip_info ad7381_4_chip_info = {
.channels = ad7381_4_channels,
.num_channels = ARRAY_SIZE(ad7381_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
};
@ -426,6 +453,8 @@ static const struct ad7380_chip_info ad7383_4_chip_info = {
.channels = ad7383_4_channels,
.num_channels = ARRAY_SIZE(ad7383_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@ -437,6 +466,8 @@ static const struct ad7380_chip_info ad7384_4_chip_info = {
.channels = ad7384_4_channels,
.num_channels = ARRAY_SIZE(ad7384_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.vcm_supplies = ad7380_4_channel_vcm_supplies,
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
@ -448,6 +479,8 @@ static const struct ad7380_chip_info ad7386_4_chip_info = {
.channels = ad7386_4_channels,
.num_channels = ARRAY_SIZE(ad7386_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@ -458,6 +491,8 @@ static const struct ad7380_chip_info ad7387_4_chip_info = {
.channels = ad7387_4_channels,
.num_channels = ARRAY_SIZE(ad7387_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@ -468,6 +503,8 @@ static const struct ad7380_chip_info ad7388_4_chip_info = {
.channels = ad7388_4_channels,
.num_channels = ARRAY_SIZE(ad7388_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
@ -956,7 +993,7 @@ static const struct iio_info ad7380_info = {
.debugfs_reg_access = &ad7380_debugfs_reg_access,
};
static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
{
int ret;
@ -968,13 +1005,13 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
if (ret < 0)
return ret;
/* select internal or external reference voltage */
ret = regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG1,
AD7380_CONFIG1_REFSEL,
FIELD_PREP(AD7380_CONFIG1_REFSEL,
vref ? 1 : 0));
if (ret < 0)
return ret;
if (external_ref_en) {
/* select external reference voltage */
ret = regmap_set_bits(st->regmap, AD7380_REG_ADDR_CONFIG1,
AD7380_CONFIG1_REFSEL);
if (ret < 0)
return ret;
}
/* This is the default value after reset. */
st->oversampling_ratio = 1;
@ -987,16 +1024,11 @@ static int ad7380_init(struct ad7380_state *st, struct regulator *vref)
FIELD_PREP(AD7380_CONFIG2_SDO, 1));
}
static void ad7380_regulator_disable(void *p)
{
regulator_disable(p);
}
static int ad7380_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct ad7380_state *st;
struct regulator *vref;
bool external_ref_en;
int ret, i;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@ -1009,36 +1041,38 @@ static int ad7380_probe(struct spi_device *spi)
if (!st->chip_info)
return dev_err_probe(&spi->dev, -EINVAL, "missing match data\n");
vref = devm_regulator_get_optional(&spi->dev, "refio");
if (IS_ERR(vref)) {
if (PTR_ERR(vref) != -ENODEV)
return dev_err_probe(&spi->dev, PTR_ERR(vref),
"Failed to get refio regulator\n");
ret = devm_regulator_bulk_get_enable(&spi->dev, st->chip_info->num_supplies,
st->chip_info->supplies);
vref = NULL;
}
if (ret)
return dev_err_probe(&spi->dev, ret,
"Failed to enable power supplies\n");
fsleep(T_POWERUP_US);
/*
* If there is no REFIO supply, then it means that we are using
* the internal 2.5V reference, otherwise REFIO is reference voltage.
*/
if (vref) {
ret = regulator_enable(vref);
if (ret)
return ret;
ret = devm_add_action_or_reset(&spi->dev,
ad7380_regulator_disable, vref);
if (ret)
return ret;
ret = regulator_get_voltage(vref);
if (st->chip_info->external_ref_only) {
ret = devm_regulator_get_enable_read_voltage(&spi->dev,
"refin");
if (ret < 0)
return ret;
return dev_err_probe(&spi->dev, ret,
"Failed to get refin regulator\n");
st->vref_mv = ret / 1000;
/* these chips don't have a register bit for this */
external_ref_en = false;
} else {
st->vref_mv = AD7380_INTERNAL_REF_MV;
/*
* If there is no REFIO supply, then it means that we are using
* the internal reference, otherwise REFIO is reference voltage.
*/
ret = devm_regulator_get_enable_read_voltage(&spi->dev,
"refio");
if (ret < 0 && ret != -ENODEV)
return dev_err_probe(&spi->dev, ret,
"Failed to get refio regulator\n");
external_ref_en = ret != -ENODEV;
st->vref_mv = external_ref_en ? ret / 1000 : AD7380_INTERNAL_REF_MV;
}
if (st->chip_info->num_vcm_supplies > ARRAY_SIZE(st->vcm_mv))
@ -1050,27 +1084,13 @@ static int ad7380_probe(struct spi_device *spi)
* input pin.
*/
for (i = 0; i < st->chip_info->num_vcm_supplies; i++) {
struct regulator *vcm;
const char *vcm = st->chip_info->vcm_supplies[i];
vcm = devm_regulator_get(&spi->dev,
st->chip_info->vcm_supplies[i]);
if (IS_ERR(vcm))
return dev_err_probe(&spi->dev, PTR_ERR(vcm),
"Failed to get %s regulator\n",
st->chip_info->vcm_supplies[i]);
ret = regulator_enable(vcm);
if (ret)
return ret;
ret = devm_add_action_or_reset(&spi->dev,
ad7380_regulator_disable, vcm);
if (ret)
return ret;
ret = regulator_get_voltage(vcm);
ret = devm_regulator_get_enable_read_voltage(&spi->dev, vcm);
if (ret < 0)
return ret;
return dev_err_probe(&spi->dev, ret,
"Failed to get %s regulator\n",
vcm);
st->vcm_mv[i] = ret / 1000;
}
@ -1135,7 +1155,7 @@ static int ad7380_probe(struct spi_device *spi)
if (ret)
return ret;
ret = ad7380_init(st, vref);
ret = ad7380_init(st, external_ref_en);
if (ret)
return ret;

View File

@ -380,7 +380,7 @@ config LTC2632
config LTC2664
tristate "Analog Devices LTC2664 and LTC2672 DAC SPI driver"
depends on SPI
select REGMAP
select REGMAP_SPI
help
Say yes here to build support for Analog Devices
LTC2664 and LTC2672 converters (DAC).

View File

@ -307,13 +307,15 @@ static int iio_gts_build_avail_scale_table(struct iio_gts *gts)
if (ret)
goto err_free_out;
for (i = 0; i < gts->num_itime; i++)
kfree(per_time_gains[i]);
kfree(per_time_gains);
gts->per_time_avail_scale_tables = per_time_scales;
return 0;
err_free_out:
for (i--; i; i--) {
for (i--; i >= 0; i--) {
kfree(per_time_scales[i]);
kfree(per_time_gains[i]);
}

View File

@ -522,7 +522,7 @@ static int veml6030_read_raw(struct iio_dev *indio_dev,
}
if (mask == IIO_CHAN_INFO_PROCESSED) {
*val = (reg * data->cur_resolution) / 10000;
*val2 = (reg * data->cur_resolution) % 10000;
*val2 = (reg * data->cur_resolution) % 10000 * 100;
return IIO_VAL_INT_PLUS_MICRO;
}
*val = reg;

View File

@ -1532,9 +1532,11 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
u32 tbl_indx;
int rc;
spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
spin_unlock_bh(&rcfw->tbl_lock);
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_DESTROY_QP,
@ -1545,8 +1547,10 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc) {
spin_lock_bh(&rcfw->tbl_lock);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = qp;
spin_unlock_bh(&rcfw->tbl_lock);
return rc;
}

View File

@ -290,7 +290,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_hwq *hwq;
u32 sw_prod, cmdq_prod;
struct pci_dev *pdev;
unsigned long flags;
u16 cookie;
u8 *preq;
@ -301,7 +300,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
spin_lock_irqsave(&hwq->lock, flags);
spin_lock_bh(&hwq->lock);
required_slots = bnxt_qplib_get_cmd_slots(msg->req);
free_slots = HWQ_FREE_SLOTS(hwq);
cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
@ -311,7 +310,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
dev_info_ratelimited(&pdev->dev,
"CMDQ is full req/free %d/%d!",
required_slots, free_slots);
spin_unlock_irqrestore(&hwq->lock, flags);
spin_unlock_bh(&hwq->lock);
return -EAGAIN;
}
if (msg->block)
@ -367,7 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
wmb();
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
spin_unlock_irqrestore(&hwq->lock, flags);
spin_unlock_bh(&hwq->lock);
/* Return the CREQ response pointer */
return 0;
}
@ -486,7 +485,6 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
{
struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
struct bnxt_qplib_crsqe *crsqe;
unsigned long flags;
u16 cookie;
int rc;
u8 opcode;
@ -512,12 +510,12 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __poll_for_resp(rcfw, cookie);
if (rc) {
spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);
spin_lock_bh(&rcfw->cmdq.hwq.lock);
crsqe = &rcfw->crsqe_tbl[cookie];
crsqe->is_waiter_alive = false;
if (rc == -ENODEV)
set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);
spin_unlock_bh(&rcfw->cmdq.hwq.lock);
return -ETIMEDOUT;
}
@ -628,7 +626,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
u16 cookie, blocked = 0;
bool is_waiter_alive;
struct pci_dev *pdev;
unsigned long flags;
u32 wait_cmds = 0;
int rc = 0;
@ -637,17 +634,21 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
spin_lock(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
qp = rcfw->qp_tbl[tbl_indx].qp_handle;
if (!qp) {
spin_unlock(&rcfw->tbl_lock);
break;
}
bnxt_qplib_mark_qp_error(qp);
rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
spin_unlock(&rcfw->tbl_lock);
dev_dbg(&pdev->dev, "Received QP error notification\n");
dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
if (!qp)
break;
bnxt_qplib_mark_qp_error(qp);
rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
break;
default:
/*
@ -659,8 +660,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
*
*/
spin_lock_irqsave_nested(&hwq->lock, flags,
SINGLE_DEPTH_NESTING);
spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
@ -672,7 +672,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
dev_info(&pdev->dev,
"rcfw timedout: cookie = %#x, free_slots = %d",
cookie, crsqe->free_slots);
spin_unlock_irqrestore(&hwq->lock, flags);
spin_unlock(&hwq->lock);
return rc;
}
@ -720,7 +720,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
__destroy_timedout_ah(rcfw,
(struct creq_create_ah_resp *)
qp_event);
spin_unlock_irqrestore(&hwq->lock, flags);
spin_unlock(&hwq->lock);
}
*num_wait += wait_cmds;
return rc;
@ -734,12 +734,11 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
struct creq_base *creqe;
unsigned long flags;
u32 num_wakeup = 0;
u32 hw_polled = 0;
/* Service the CREQ until budget is over */
spin_lock_irqsave(&hwq->lock, flags);
spin_lock_bh(&hwq->lock);
while (budget > 0) {
creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
@ -782,7 +781,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
if (hw_polled)
bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
rcfw->res->cctx, true);
spin_unlock_irqrestore(&hwq->lock, flags);
spin_unlock_bh(&hwq->lock);
if (num_wakeup)
wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
}
@ -978,6 +977,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
GFP_KERNEL);
if (!rcfw->qp_tbl)
goto fail;
spin_lock_init(&rcfw->tbl_lock);
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;

View File

@ -224,6 +224,8 @@ struct bnxt_qplib_rcfw {
struct bnxt_qplib_crsqe *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
/* To synchronize the qp-handle hash table */
spinlock_t tbl_lock;
u64 oos_prev;
u32 init_oos_stats;
u32 cmdq_depth;

Some files were not shown because too many files have changed in this diff Show More