mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 11:37:47 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.13-rc6). No conflicts. Adjacent changes: include/linux/if_vlan.h f91a5b808938 ("af_packet: fix vlan_get_protocol_dgram() vs MSG_PEEK") 3f330db30638 ("net: reformat kdoc return statements") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
385f186aba
1
.mailmap
1
.mailmap
@ -735,6 +735,7 @@ Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
|
|||||||
Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
|
Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
|
||||||
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
|
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
|
||||||
Yanteng Si <si.yanteng@linux.dev> <siyanteng@loongson.cn>
|
Yanteng Si <si.yanteng@linux.dev> <siyanteng@loongson.cn>
|
||||||
|
Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com>
|
||||||
Yusuke Goda <goda.yusuke@renesas.com>
|
Yusuke Goda <goda.yusuke@renesas.com>
|
||||||
Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
|
Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
|
||||||
Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
|
Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
|
||||||
|
@ -445,8 +445,10 @@ event code Key Notes
|
|||||||
0x1008 0x07 FN+F8 IBM: toggle screen expand
|
0x1008 0x07 FN+F8 IBM: toggle screen expand
|
||||||
Lenovo: configure UltraNav,
|
Lenovo: configure UltraNav,
|
||||||
or toggle screen expand.
|
or toggle screen expand.
|
||||||
On newer platforms (2024+)
|
On 2024 platforms replaced by
|
||||||
replaced by 0x131f (see below)
|
0x131f (see below) and on newer
|
||||||
|
platforms (2025 +) keycode is
|
||||||
|
replaced by 0x1401 (see below).
|
||||||
|
|
||||||
0x1009 0x08 FN+F9 -
|
0x1009 0x08 FN+F9 -
|
||||||
|
|
||||||
@ -506,9 +508,11 @@ event code Key Notes
|
|||||||
|
|
||||||
0x1019 0x18 unknown
|
0x1019 0x18 unknown
|
||||||
|
|
||||||
0x131f ... FN+F8 Platform Mode change.
|
0x131f ... FN+F8 Platform Mode change (2024 systems).
|
||||||
Implemented in driver.
|
Implemented in driver.
|
||||||
|
|
||||||
|
0x1401 ... FN+F8 Platform Mode change (2025 + systems).
|
||||||
|
Implemented in driver.
|
||||||
... ... ...
|
... ... ...
|
||||||
|
|
||||||
0x1020 0x1F unknown
|
0x1020 0x1F unknown
|
||||||
|
@ -251,9 +251,7 @@ performance supported in `AMD CPPC Performance Capability <perf_cap_>`_).
|
|||||||
In some ASICs, the highest CPPC performance is not the one in the ``_CPC``
|
In some ASICs, the highest CPPC performance is not the one in the ``_CPC``
|
||||||
table, so we need to expose it to sysfs. If boost is not active, but
|
table, so we need to expose it to sysfs. If boost is not active, but
|
||||||
still supported, this maximum frequency will be larger than the one in
|
still supported, this maximum frequency will be larger than the one in
|
||||||
``cpuinfo``. On systems that support preferred core, the driver will have
|
``cpuinfo``.
|
||||||
different values for some cores than others and this will reflect the values
|
|
||||||
advertised by the platform at bootup.
|
|
||||||
This attribute is read-only.
|
This attribute is read-only.
|
||||||
|
|
||||||
``amd_pstate_lowest_nonlinear_freq``
|
``amd_pstate_lowest_nonlinear_freq``
|
||||||
|
@ -114,8 +114,9 @@ patternProperties:
|
|||||||
table that specifies the PPID to LIODN mapping. Needed if the PAMU is
|
table that specifies the PPID to LIODN mapping. Needed if the PAMU is
|
||||||
used. Value is a 12 bit value where value is a LIODN ID for this JR.
|
used. Value is a 12 bit value where value is a LIODN ID for this JR.
|
||||||
This property is normally set by boot firmware.
|
This property is normally set by boot firmware.
|
||||||
$ref: /schemas/types.yaml#/definitions/uint32
|
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||||
maximum: 0xfff
|
items:
|
||||||
|
- maximum: 0xfff
|
||||||
|
|
||||||
'^rtic@[0-9a-f]+$':
|
'^rtic@[0-9a-f]+$':
|
||||||
type: object
|
type: object
|
||||||
@ -186,8 +187,9 @@ patternProperties:
|
|||||||
Needed if the PAMU is used. Value is a 12 bit value where value
|
Needed if the PAMU is used. Value is a 12 bit value where value
|
||||||
is a LIODN ID for this JR. This property is normally set by boot
|
is a LIODN ID for this JR. This property is normally set by boot
|
||||||
firmware.
|
firmware.
|
||||||
$ref: /schemas/types.yaml#/definitions/uint32
|
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||||
maximum: 0xfff
|
items:
|
||||||
|
- maximum: 0xfff
|
||||||
|
|
||||||
fsl,rtic-region:
|
fsl,rtic-region:
|
||||||
description:
|
description:
|
||||||
|
@ -90,7 +90,7 @@ properties:
|
|||||||
adi,dsi-lanes:
|
adi,dsi-lanes:
|
||||||
description: Number of DSI data lanes connected to the DSI host.
|
description: Number of DSI data lanes connected to the DSI host.
|
||||||
$ref: /schemas/types.yaml#/definitions/uint32
|
$ref: /schemas/types.yaml#/definitions/uint32
|
||||||
enum: [ 1, 2, 3, 4 ]
|
enum: [ 2, 3, 4 ]
|
||||||
|
|
||||||
"#sound-dai-cells":
|
"#sound-dai-cells":
|
||||||
const: 0
|
const: 0
|
||||||
|
@ -82,7 +82,7 @@ examples:
|
|||||||
|
|
||||||
uimage@100000 {
|
uimage@100000 {
|
||||||
reg = <0x0100000 0x200000>;
|
reg = <0x0100000 0x200000>;
|
||||||
compress = "lzma";
|
compression = "lzma";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ properties:
|
|||||||
|
|
||||||
fsl,liodn:
|
fsl,liodn:
|
||||||
$ref: /schemas/types.yaml#/definitions/uint32-array
|
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||||
|
maxItems: 2
|
||||||
description: See pamu.txt. Two LIODN(s). DQRR LIODN (DLIODN) and Frame LIODN
|
description: See pamu.txt. Two LIODN(s). DQRR LIODN (DLIODN) and Frame LIODN
|
||||||
(FLIODN)
|
(FLIODN)
|
||||||
|
|
||||||
@ -69,6 +70,7 @@ patternProperties:
|
|||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
fsl,liodn:
|
fsl,liodn:
|
||||||
|
$ref: /schemas/types.yaml#/definitions/uint32-array
|
||||||
description: See pamu.txt, PAMU property used for static LIODN assignment
|
description: See pamu.txt, PAMU property used for static LIODN assignment
|
||||||
|
|
||||||
fsl,iommu-parent:
|
fsl,iommu-parent:
|
||||||
|
@ -51,7 +51,7 @@ properties:
|
|||||||
description: Power supply for AVDD, providing 1.8V.
|
description: Power supply for AVDD, providing 1.8V.
|
||||||
|
|
||||||
cpvdd-supply:
|
cpvdd-supply:
|
||||||
description: Power supply for CPVDD, providing 3.5V.
|
description: Power supply for CPVDD, providing 1.8V.
|
||||||
|
|
||||||
hp-detect-gpios:
|
hp-detect-gpios:
|
||||||
description:
|
description:
|
||||||
|
@ -3,3 +3,853 @@
|
|||||||
=================
|
=================
|
||||||
Process Addresses
|
Process Addresses
|
||||||
=================
|
=================
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 3
|
||||||
|
|
||||||
|
|
||||||
|
Userland memory ranges are tracked by the kernel via Virtual Memory Areas or
|
||||||
|
'VMA's of type :c:struct:`!struct vm_area_struct`.
|
||||||
|
|
||||||
|
Each VMA describes a virtually contiguous memory range with identical
|
||||||
|
attributes, each described by a :c:struct:`!struct vm_area_struct`
|
||||||
|
object. Userland access outside of VMAs is invalid except in the case where an
|
||||||
|
adjacent stack VMA could be extended to contain the accessed address.
|
||||||
|
|
||||||
|
All VMAs are contained within one and only one virtual address space, described
|
||||||
|
by a :c:struct:`!struct mm_struct` object which is referenced by all tasks (that is,
|
||||||
|
threads) which share the virtual address space. We refer to this as the
|
||||||
|
:c:struct:`!mm`.
|
||||||
|
|
||||||
|
Each mm object contains a maple tree data structure which describes all VMAs
|
||||||
|
within the virtual address space.
|
||||||
|
|
||||||
|
.. note:: An exception to this is the 'gate' VMA which is provided by
|
||||||
|
architectures which use :c:struct:`!vsyscall` and is a global static
|
||||||
|
object which does not belong to any specific mm.
|
||||||
|
|
||||||
|
-------
|
||||||
|
Locking
|
||||||
|
-------
|
||||||
|
|
||||||
|
The kernel is designed to be highly scalable against concurrent read operations
|
||||||
|
on VMA **metadata** so a complicated set of locks are required to ensure memory
|
||||||
|
corruption does not occur.
|
||||||
|
|
||||||
|
.. note:: Locking VMAs for their metadata does not have any impact on the memory
|
||||||
|
they describe nor the page tables that map them.
|
||||||
|
|
||||||
|
Terminology
|
||||||
|
-----------
|
||||||
|
|
||||||
|
* **mmap locks** - Each MM has a read/write semaphore :c:member:`!mmap_lock`
|
||||||
|
which locks at a process address space granularity which can be acquired via
|
||||||
|
:c:func:`!mmap_read_lock`, :c:func:`!mmap_write_lock` and variants.
|
||||||
|
* **VMA locks** - The VMA lock is at VMA granularity (of course) which behaves
|
||||||
|
as a read/write semaphore in practice. A VMA read lock is obtained via
|
||||||
|
:c:func:`!lock_vma_under_rcu` (and unlocked via :c:func:`!vma_end_read`) and a
|
||||||
|
write lock via :c:func:`!vma_start_write` (all VMA write locks are unlocked
|
||||||
|
automatically when the mmap write lock is released). To take a VMA write lock
|
||||||
|
you **must** have already acquired an :c:func:`!mmap_write_lock`.
|
||||||
|
* **rmap locks** - When trying to access VMAs through the reverse mapping via a
|
||||||
|
:c:struct:`!struct address_space` or :c:struct:`!struct anon_vma` object
|
||||||
|
(reachable from a folio via :c:member:`!folio->mapping`). VMAs must be stabilised via
|
||||||
|
:c:func:`!anon_vma_[try]lock_read` or :c:func:`!anon_vma_[try]lock_write` for
|
||||||
|
anonymous memory and :c:func:`!i_mmap_[try]lock_read` or
|
||||||
|
:c:func:`!i_mmap_[try]lock_write` for file-backed memory. We refer to these
|
||||||
|
locks as the reverse mapping locks, or 'rmap locks' for brevity.
|
||||||
|
|
||||||
|
We discuss page table locks separately in the dedicated section below.
|
||||||
|
|
||||||
|
The first thing **any** of these locks achieve is to **stabilise** the VMA
|
||||||
|
within the MM tree. That is, guaranteeing that the VMA object will not be
|
||||||
|
deleted from under you nor modified (except for some specific fields
|
||||||
|
described below).
|
||||||
|
|
||||||
|
Stabilising a VMA also keeps the address space described by it around.
|
||||||
|
|
||||||
|
Lock usage
|
||||||
|
----------
|
||||||
|
|
||||||
|
If you want to **read** VMA metadata fields or just keep the VMA stable, you
|
||||||
|
must do one of the following:
|
||||||
|
|
||||||
|
* Obtain an mmap read lock at the MM granularity via :c:func:`!mmap_read_lock` (or a
|
||||||
|
suitable variant), unlocking it with a matching :c:func:`!mmap_read_unlock` when
|
||||||
|
you're done with the VMA, *or*
|
||||||
|
* Try to obtain a VMA read lock via :c:func:`!lock_vma_under_rcu`. This tries to
|
||||||
|
acquire the lock atomically so might fail, in which case fall-back logic is
|
||||||
|
required to instead obtain an mmap read lock if this returns :c:macro:`!NULL`,
|
||||||
|
*or*
|
||||||
|
* Acquire an rmap lock before traversing the locked interval tree (whether
|
||||||
|
anonymous or file-backed) to obtain the required VMA.
|
||||||
|
|
||||||
|
If you want to **write** VMA metadata fields, then things vary depending on the
|
||||||
|
field (we explore each VMA field in detail below). For the majority you must:
|
||||||
|
|
||||||
|
* Obtain an mmap write lock at the MM granularity via :c:func:`!mmap_write_lock` (or a
|
||||||
|
suitable variant), unlocking it with a matching :c:func:`!mmap_write_unlock` when
|
||||||
|
you're done with the VMA, *and*
|
||||||
|
* Obtain a VMA write lock via :c:func:`!vma_start_write` for each VMA you wish to
|
||||||
|
modify, which will be released automatically when :c:func:`!mmap_write_unlock` is
|
||||||
|
called.
|
||||||
|
* If you want to be able to write to **any** field, you must also hide the VMA
|
||||||
|
from the reverse mapping by obtaining an **rmap write lock**.
|
||||||
|
|
||||||
|
VMA locks are special in that you must obtain an mmap **write** lock **first**
|
||||||
|
in order to obtain a VMA **write** lock. A VMA **read** lock however can be
|
||||||
|
obtained without any other lock (:c:func:`!lock_vma_under_rcu` will acquire then
|
||||||
|
release an RCU lock to lookup the VMA for you).
|
||||||
|
|
||||||
|
This constrains the impact of writers on readers, as a writer can interact with
|
||||||
|
one VMA while a reader interacts with another simultaneously.
|
||||||
|
|
||||||
|
.. note:: The primary users of VMA read locks are page fault handlers, which
|
||||||
|
means that without a VMA write lock, page faults will run concurrent with
|
||||||
|
whatever you are doing.
|
||||||
|
|
||||||
|
Examining all valid lock states:
|
||||||
|
|
||||||
|
.. table::
|
||||||
|
|
||||||
|
========= ======== ========= ======= ===== =========== ==========
|
||||||
|
mmap lock VMA lock rmap lock Stable? Read? Write most? Write all?
|
||||||
|
========= ======== ========= ======= ===== =========== ==========
|
||||||
|
\- \- \- N N N N
|
||||||
|
\- R \- Y Y N N
|
||||||
|
\- \- R/W Y Y N N
|
||||||
|
R/W \-/R \-/R/W Y Y N N
|
||||||
|
W W \-/R Y Y Y N
|
||||||
|
W W W Y Y Y Y
|
||||||
|
========= ======== ========= ======= ===== =========== ==========
|
||||||
|
|
||||||
|
.. warning:: While it's possible to obtain a VMA lock while holding an mmap read lock,
|
||||||
|
attempting to do the reverse is invalid as it can result in deadlock - if
|
||||||
|
another task already holds an mmap write lock and attempts to acquire a VMA
|
||||||
|
write lock that will deadlock on the VMA read lock.
|
||||||
|
|
||||||
|
All of these locks behave as read/write semaphores in practice, so you can
|
||||||
|
obtain either a read or a write lock for each of these.
|
||||||
|
|
||||||
|
.. note:: Generally speaking, a read/write semaphore is a class of lock which
|
||||||
|
permits concurrent readers. However a write lock can only be obtained
|
||||||
|
once all readers have left the critical region (and pending readers
|
||||||
|
made to wait).
|
||||||
|
|
||||||
|
This renders read locks on a read/write semaphore concurrent with other
|
||||||
|
readers and write locks exclusive against all others holding the semaphore.
|
||||||
|
|
||||||
|
VMA fields
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
We can subdivide :c:struct:`!struct vm_area_struct` fields by their purpose, which makes it
|
||||||
|
easier to explore their locking characteristics:
|
||||||
|
|
||||||
|
.. note:: We exclude VMA lock-specific fields here to avoid confusion, as these
|
||||||
|
are in effect an internal implementation detail.
|
||||||
|
|
||||||
|
.. table:: Virtual layout fields
|
||||||
|
|
||||||
|
===================== ======================================== ===========
|
||||||
|
Field Description Write lock
|
||||||
|
===================== ======================================== ===========
|
||||||
|
:c:member:`!vm_start` Inclusive start virtual address of range mmap write,
|
||||||
|
VMA describes. VMA write,
|
||||||
|
rmap write.
|
||||||
|
:c:member:`!vm_end` Exclusive end virtual address of range mmap write,
|
||||||
|
VMA describes. VMA write,
|
||||||
|
rmap write.
|
||||||
|
:c:member:`!vm_pgoff` Describes the page offset into the file, mmap write,
|
||||||
|
the original page offset within the VMA write,
|
||||||
|
virtual address space (prior to any rmap write.
|
||||||
|
:c:func:`!mremap`), or PFN if a PFN map
|
||||||
|
and the architecture does not support
|
||||||
|
:c:macro:`!CONFIG_ARCH_HAS_PTE_SPECIAL`.
|
||||||
|
===================== ======================================== ===========
|
||||||
|
|
||||||
|
These fields describes the size, start and end of the VMA, and as such cannot be
|
||||||
|
modified without first being hidden from the reverse mapping since these fields
|
||||||
|
are used to locate VMAs within the reverse mapping interval trees.
|
||||||
|
|
||||||
|
.. table:: Core fields
|
||||||
|
|
||||||
|
============================ ======================================== =========================
|
||||||
|
Field Description Write lock
|
||||||
|
============================ ======================================== =========================
|
||||||
|
:c:member:`!vm_mm` Containing mm_struct. None - written once on
|
||||||
|
initial map.
|
||||||
|
:c:member:`!vm_page_prot` Architecture-specific page table mmap write, VMA write.
|
||||||
|
protection bits determined from VMA
|
||||||
|
flags.
|
||||||
|
:c:member:`!vm_flags` Read-only access to VMA flags describing N/A
|
||||||
|
attributes of the VMA, in union with
|
||||||
|
private writable
|
||||||
|
:c:member:`!__vm_flags`.
|
||||||
|
:c:member:`!__vm_flags` Private, writable access to VMA flags mmap write, VMA write.
|
||||||
|
field, updated by
|
||||||
|
:c:func:`!vm_flags_*` functions.
|
||||||
|
:c:member:`!vm_file` If the VMA is file-backed, points to a None - written once on
|
||||||
|
struct file object describing the initial map.
|
||||||
|
underlying file, if anonymous then
|
||||||
|
:c:macro:`!NULL`.
|
||||||
|
:c:member:`!vm_ops` If the VMA is file-backed, then either None - Written once on
|
||||||
|
the driver or file-system provides a initial map by
|
||||||
|
:c:struct:`!struct vm_operations_struct` :c:func:`!f_ops->mmap()`.
|
||||||
|
object describing callbacks to be
|
||||||
|
invoked on VMA lifetime events.
|
||||||
|
:c:member:`!vm_private_data` A :c:member:`!void *` field for Handled by driver.
|
||||||
|
driver-specific metadata.
|
||||||
|
============================ ======================================== =========================
|
||||||
|
|
||||||
|
These are the core fields which describe the MM the VMA belongs to and its attributes.
|
||||||
|
|
||||||
|
.. table:: Config-specific fields
|
||||||
|
|
||||||
|
================================= ===================== ======================================== ===============
|
||||||
|
Field Configuration option Description Write lock
|
||||||
|
================================= ===================== ======================================== ===============
|
||||||
|
:c:member:`!anon_name` CONFIG_ANON_VMA_NAME A field for storing a mmap write,
|
||||||
|
:c:struct:`!struct anon_vma_name` VMA write.
|
||||||
|
object providing a name for anonymous
|
||||||
|
mappings, or :c:macro:`!NULL` if none
|
||||||
|
is set or the VMA is file-backed. The
|
||||||
|
underlying object is reference counted
|
||||||
|
and can be shared across multiple VMAs
|
||||||
|
for scalability.
|
||||||
|
:c:member:`!swap_readahead_info` CONFIG_SWAP Metadata used by the swap mechanism mmap read,
|
||||||
|
to perform readahead. This field is swap-specific
|
||||||
|
accessed atomically. lock.
|
||||||
|
:c:member:`!vm_policy` CONFIG_NUMA :c:type:`!mempolicy` object which mmap write,
|
||||||
|
describes the NUMA behaviour of the VMA write.
|
||||||
|
VMA. The underlying object is reference
|
||||||
|
counted.
|
||||||
|
:c:member:`!numab_state` CONFIG_NUMA_BALANCING :c:type:`!vma_numab_state` object which mmap read,
|
||||||
|
describes the current state of numab-specific
|
||||||
|
NUMA balancing in relation to this VMA. lock.
|
||||||
|
Updated under mmap read lock by
|
||||||
|
:c:func:`!task_numa_work`.
|
||||||
|
:c:member:`!vm_userfaultfd_ctx` CONFIG_USERFAULTFD Userfaultfd context wrapper object of mmap write,
|
||||||
|
type :c:type:`!vm_userfaultfd_ctx`, VMA write.
|
||||||
|
either of zero size if userfaultfd is
|
||||||
|
disabled, or containing a pointer
|
||||||
|
to an underlying
|
||||||
|
:c:type:`!userfaultfd_ctx` object which
|
||||||
|
describes userfaultfd metadata.
|
||||||
|
================================= ===================== ======================================== ===============
|
||||||
|
|
||||||
|
These fields are present or not depending on whether the relevant kernel
|
||||||
|
configuration option is set.
|
||||||
|
|
||||||
|
.. table:: Reverse mapping fields
|
||||||
|
|
||||||
|
=================================== ========================================= ============================
|
||||||
|
Field Description Write lock
|
||||||
|
=================================== ========================================= ============================
|
||||||
|
:c:member:`!shared.rb` A red/black tree node used, if the mmap write, VMA write,
|
||||||
|
mapping is file-backed, to place the VMA i_mmap write.
|
||||||
|
in the
|
||||||
|
:c:member:`!struct address_space->i_mmap`
|
||||||
|
red/black interval tree.
|
||||||
|
:c:member:`!shared.rb_subtree_last` Metadata used for management of the mmap write, VMA write,
|
||||||
|
interval tree if the VMA is file-backed. i_mmap write.
|
||||||
|
:c:member:`!anon_vma_chain` List of pointers to both forked/CoW’d mmap read, anon_vma write.
|
||||||
|
:c:type:`!anon_vma` objects and
|
||||||
|
:c:member:`!vma->anon_vma` if it is
|
||||||
|
non-:c:macro:`!NULL`.
|
||||||
|
:c:member:`!anon_vma` :c:type:`!anon_vma` object used by When :c:macro:`NULL` and
|
||||||
|
anonymous folios mapped exclusively to setting non-:c:macro:`NULL`:
|
||||||
|
this VMA. Initially set by mmap read, page_table_lock.
|
||||||
|
:c:func:`!anon_vma_prepare` serialised
|
||||||
|
by the :c:macro:`!page_table_lock`. This When non-:c:macro:`NULL` and
|
||||||
|
is set as soon as any page is faulted in. setting :c:macro:`NULL`:
|
||||||
|
mmap write, VMA write,
|
||||||
|
anon_vma write.
|
||||||
|
=================================== ========================================= ============================
|
||||||
|
|
||||||
|
These fields are used to both place the VMA within the reverse mapping, and for
|
||||||
|
anonymous mappings, to be able to access both related :c:struct:`!struct anon_vma` objects
|
||||||
|
and the :c:struct:`!struct anon_vma` in which folios mapped exclusively to this VMA should
|
||||||
|
reside.
|
||||||
|
|
||||||
|
.. note:: If a file-backed mapping is mapped with :c:macro:`!MAP_PRIVATE` set
|
||||||
|
then it can be in both the :c:type:`!anon_vma` and :c:type:`!i_mmap`
|
||||||
|
trees at the same time, so all of these fields might be utilised at
|
||||||
|
once.
|
||||||
|
|
||||||
|
Page tables
|
||||||
|
-----------
|
||||||
|
|
||||||
|
We won't speak exhaustively on the subject but broadly speaking, page tables map
|
||||||
|
virtual addresses to physical ones through a series of page tables, each of
|
||||||
|
which contain entries with physical addresses for the next page table level
|
||||||
|
(along with flags), and at the leaf level the physical addresses of the
|
||||||
|
underlying physical data pages or a special entry such as a swap entry,
|
||||||
|
migration entry or other special marker. Offsets into these pages are provided
|
||||||
|
by the virtual address itself.
|
||||||
|
|
||||||
|
In Linux these are divided into five levels - PGD, P4D, PUD, PMD and PTE. Huge
|
||||||
|
pages might eliminate one or two of these levels, but when this is the case we
|
||||||
|
typically refer to the leaf level as the PTE level regardless.
|
||||||
|
|
||||||
|
.. note:: In instances where the architecture supports fewer page tables than
|
||||||
|
five the kernel cleverly 'folds' page table levels, that is stubbing
|
||||||
|
out functions related to the skipped levels. This allows us to
|
||||||
|
conceptually act as if there were always five levels, even if the
|
||||||
|
compiler might, in practice, eliminate any code relating to missing
|
||||||
|
ones.
|
||||||
|
|
||||||
|
There are four key operations typically performed on page tables:
|
||||||
|
|
||||||
|
1. **Traversing** page tables - Simply reading page tables in order to traverse
|
||||||
|
them. This only requires that the VMA is kept stable, so a lock which
|
||||||
|
establishes this suffices for traversal (there are also lockless variants
|
||||||
|
which eliminate even this requirement, such as :c:func:`!gup_fast`).
|
||||||
|
2. **Installing** page table mappings - Whether creating a new mapping or
|
||||||
|
modifying an existing one in such a way as to change its identity. This
|
||||||
|
requires that the VMA is kept stable via an mmap or VMA lock (explicitly not
|
||||||
|
rmap locks).
|
||||||
|
3. **Zapping/unmapping** page table entries - This is what the kernel calls
|
||||||
|
clearing page table mappings at the leaf level only, whilst leaving all page
|
||||||
|
tables in place. This is a very common operation in the kernel performed on
|
||||||
|
file truncation, the :c:macro:`!MADV_DONTNEED` operation via
|
||||||
|
:c:func:`!madvise`, and others. This is performed by a number of functions
|
||||||
|
including :c:func:`!unmap_mapping_range` and :c:func:`!unmap_mapping_pages`.
|
||||||
|
The VMA need only be kept stable for this operation.
|
||||||
|
4. **Freeing** page tables - When finally the kernel removes page tables from a
|
||||||
|
userland process (typically via :c:func:`!free_pgtables`) extreme care must
|
||||||
|
be taken to ensure this is done safely, as this logic finally frees all page
|
||||||
|
tables in the specified range, ignoring existing leaf entries (it assumes the
|
||||||
|
caller has both zapped the range and prevented any further faults or
|
||||||
|
modifications within it).
|
||||||
|
|
||||||
|
.. note:: Modifying mappings for reclaim or migration is performed under rmap
|
||||||
|
lock as it, like zapping, does not fundamentally modify the identity
|
||||||
|
of what is being mapped.
|
||||||
|
|
||||||
|
**Traversing** and **zapping** ranges can be performed holding any one of the
|
||||||
|
locks described in the terminology section above - that is the mmap lock, the
|
||||||
|
VMA lock or either of the reverse mapping locks.
|
||||||
|
|
||||||
|
That is - as long as you keep the relevant VMA **stable** - you are good to go
|
||||||
|
ahead and perform these operations on page tables (though internally, kernel
|
||||||
|
operations that perform writes also acquire internal page table locks to
|
||||||
|
serialise - see the page table implementation detail section for more details).
|
||||||
|
|
||||||
|
When **installing** page table entries, the mmap or VMA lock must be held to
|
||||||
|
keep the VMA stable. We explore why this is in the page table locking details
|
||||||
|
section below.
|
||||||
|
|
||||||
|
.. warning:: Page tables are normally only traversed in regions covered by VMAs.
|
||||||
|
If you want to traverse page tables in areas that might not be
|
||||||
|
covered by VMAs, heavier locking is required.
|
||||||
|
See :c:func:`!walk_page_range_novma` for details.
|
||||||
|
|
||||||
|
**Freeing** page tables is an entirely internal memory management operation and
|
||||||
|
has special requirements (see the page freeing section below for more details).
|
||||||
|
|
||||||
|
.. warning:: When **freeing** page tables, it must not be possible for VMAs
|
||||||
|
containing the ranges those page tables map to be accessible via
|
||||||
|
the reverse mapping.
|
||||||
|
|
||||||
|
The :c:func:`!free_pgtables` function removes the relevant VMAs
|
||||||
|
from the reverse mappings, but no other VMAs can be permitted to be
|
||||||
|
accessible and span the specified range.
|
||||||
|
|
||||||
|
Lock ordering
|
||||||
|
-------------
|
||||||
|
|
||||||
|
As we have multiple locks across the kernel which may or may not be taken at the
|
||||||
|
same time as explicit mm or VMA locks, we have to be wary of lock inversion, and
|
||||||
|
the **order** in which locks are acquired and released becomes very important.
|
||||||
|
|
||||||
|
.. note:: Lock inversion occurs when two threads need to acquire multiple locks,
|
||||||
|
but in doing so inadvertently cause a mutual deadlock.
|
||||||
|
|
||||||
|
For example, consider thread 1 which holds lock A and tries to acquire lock B,
|
||||||
|
while thread 2 holds lock B and tries to acquire lock A.
|
||||||
|
|
||||||
|
Both threads are now deadlocked on each other. However, had they attempted to
|
||||||
|
acquire locks in the same order, one would have waited for the other to
|
||||||
|
complete its work and no deadlock would have occurred.
|
||||||
|
|
||||||
|
The opening comment in :c:macro:`!mm/rmap.c` describes in detail the required
|
||||||
|
ordering of locks within memory management code:
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
inode->i_rwsem (while writing or truncating, not reading or faulting)
|
||||||
|
mm->mmap_lock
|
||||||
|
mapping->invalidate_lock (in filemap_fault)
|
||||||
|
folio_lock
|
||||||
|
hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
|
||||||
|
vma_start_write
|
||||||
|
mapping->i_mmap_rwsem
|
||||||
|
anon_vma->rwsem
|
||||||
|
mm->page_table_lock or pte_lock
|
||||||
|
swap_lock (in swap_duplicate, swap_info_get)
|
||||||
|
mmlist_lock (in mmput, drain_mmlist and others)
|
||||||
|
mapping->private_lock (in block_dirty_folio)
|
||||||
|
i_pages lock (widely used)
|
||||||
|
lruvec->lru_lock (in folio_lruvec_lock_irq)
|
||||||
|
inode->i_lock (in set_page_dirty's __mark_inode_dirty)
|
||||||
|
bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
|
||||||
|
sb_lock (within inode_lock in fs/fs-writeback.c)
|
||||||
|
i_pages lock (widely used, in set_page_dirty,
|
||||||
|
in arch-dependent flush_dcache_mmap_lock,
|
||||||
|
within bdi.wb->list_lock in __sync_single_inode)
|
||||||
|
|
||||||
|
There is also a file-system specific lock ordering comment located at the top of
|
||||||
|
:c:macro:`!mm/filemap.c`:
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
->i_mmap_rwsem (truncate_pagecache)
|
||||||
|
->private_lock (__free_pte->block_dirty_folio)
|
||||||
|
->swap_lock (exclusive_swap_page, others)
|
||||||
|
->i_pages lock
|
||||||
|
|
||||||
|
->i_rwsem
|
||||||
|
->invalidate_lock (acquired by fs in truncate path)
|
||||||
|
->i_mmap_rwsem (truncate->unmap_mapping_range)
|
||||||
|
|
||||||
|
->mmap_lock
|
||||||
|
->i_mmap_rwsem
|
||||||
|
->page_table_lock or pte_lock (various, mainly in memory.c)
|
||||||
|
->i_pages lock (arch-dependent flush_dcache_mmap_lock)
|
||||||
|
|
||||||
|
->mmap_lock
|
||||||
|
->invalidate_lock (filemap_fault)
|
||||||
|
->lock_page (filemap_fault, access_process_vm)
|
||||||
|
|
||||||
|
->i_rwsem (generic_perform_write)
|
||||||
|
->mmap_lock (fault_in_readable->do_page_fault)
|
||||||
|
|
||||||
|
bdi->wb.list_lock
|
||||||
|
sb_lock (fs/fs-writeback.c)
|
||||||
|
->i_pages lock (__sync_single_inode)
|
||||||
|
|
||||||
|
->i_mmap_rwsem
|
||||||
|
->anon_vma.lock (vma_merge)
|
||||||
|
|
||||||
|
->anon_vma.lock
|
||||||
|
->page_table_lock or pte_lock (anon_vma_prepare and various)
|
||||||
|
|
||||||
|
->page_table_lock or pte_lock
|
||||||
|
->swap_lock (try_to_unmap_one)
|
||||||
|
->private_lock (try_to_unmap_one)
|
||||||
|
->i_pages lock (try_to_unmap_one)
|
||||||
|
->lruvec->lru_lock (follow_page_mask->mark_page_accessed)
|
||||||
|
->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
|
||||||
|
->private_lock (folio_remove_rmap_pte->set_page_dirty)
|
||||||
|
->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
|
||||||
|
bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
|
||||||
|
->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
|
||||||
|
bdi.wb->list_lock (zap_pte_range->set_page_dirty)
|
||||||
|
->inode->i_lock (zap_pte_range->set_page_dirty)
|
||||||
|
->private_lock (zap_pte_range->block_dirty_folio)
|
||||||
|
|
||||||
|
Please check the current state of these comments which may have changed since
|
||||||
|
the time of writing of this document.
|
||||||
|
|
||||||
|
------------------------------
|
||||||
|
Locking Implementation Details
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
.. warning:: Locking rules for PTE-level page tables are very different from
|
||||||
|
locking rules for page tables at other levels.
|
||||||
|
|
||||||
|
Page table locking details
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
In addition to the locks described in the terminology section above, we have
|
||||||
|
additional locks dedicated to page tables:
|
||||||
|
|
||||||
|
* **Higher level page table locks** - Higher level page tables, that is PGD, P4D
|
||||||
|
and PUD each make use of the process address space granularity
|
||||||
|
:c:member:`!mm->page_table_lock` lock when modified.
|
||||||
|
|
||||||
|
* **Fine-grained page table locks** - PMDs and PTEs each have fine-grained locks
|
||||||
|
either kept within the folios describing the page tables or allocated
|
||||||
|
separated and pointed at by the folios if :c:macro:`!ALLOC_SPLIT_PTLOCKS` is
|
||||||
|
set. The PMD spin lock is obtained via :c:func:`!pmd_lock`, however PTEs are
|
||||||
|
mapped into higher memory (if a 32-bit system) and carefully locked via
|
||||||
|
:c:func:`!pte_offset_map_lock`.
|
||||||
|
|
||||||
|
These locks represent the minimum required to interact with each page table
|
||||||
|
level, but there are further requirements.
|
||||||
|
|
||||||
|
Importantly, note that on a **traversal** of page tables, sometimes no such
|
||||||
|
locks are taken. However, at the PTE level, at least concurrent page table
|
||||||
|
deletion must be prevented (using RCU) and the page table must be mapped into
|
||||||
|
high memory, see below.
|
||||||
|
|
||||||
|
Whether care is taken on reading the page table entries depends on the
|
||||||
|
architecture, see the section on atomicity below.
|
||||||
|
|
||||||
|
Locking rules
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
We establish basic locking rules when interacting with page tables:
|
||||||
|
|
||||||
|
* When changing a page table entry the page table lock for that page table
|
||||||
|
**must** be held, except if you can safely assume nobody can access the page
|
||||||
|
tables concurrently (such as on invocation of :c:func:`!free_pgtables`).
|
||||||
|
* Reads from and writes to page table entries must be *appropriately*
|
||||||
|
atomic. See the section on atomicity below for details.
|
||||||
|
* Populating previously empty entries requires that the mmap or VMA locks are
|
||||||
|
held (read or write), doing so with only rmap locks would be dangerous (see
|
||||||
|
the warning below).
|
||||||
|
* As mentioned previously, zapping can be performed while simply keeping the VMA
|
||||||
|
stable, that is holding any one of the mmap, VMA or rmap locks.
|
||||||
|
|
||||||
|
.. warning:: Populating previously empty entries is dangerous as, when unmapping
|
||||||
|
VMAs, :c:func:`!vms_clear_ptes` has a window of time between
|
||||||
|
zapping (via :c:func:`!unmap_vmas`) and freeing page tables (via
|
||||||
|
:c:func:`!free_pgtables`), where the VMA is still visible in the
|
||||||
|
rmap tree. :c:func:`!free_pgtables` assumes that the zap has
|
||||||
|
already been performed and removes PTEs unconditionally (along with
|
||||||
|
all other page tables in the freed range), so installing new PTE
|
||||||
|
entries could leak memory and also cause other unexpected and
|
||||||
|
dangerous behaviour.
|
||||||
|
|
||||||
|
There are additional rules applicable when moving page tables, which we discuss
|
||||||
|
in the section on this topic below.
|
||||||
|
|
||||||
|
PTE-level page tables are different from page tables at other levels, and there
|
||||||
|
are extra requirements for accessing them:
|
||||||
|
|
||||||
|
* On 32-bit architectures, they may be in high memory (meaning they need to be
|
||||||
|
mapped into kernel memory to be accessible).
|
||||||
|
* When empty, they can be unlinked and RCU-freed while holding an mmap lock or
|
||||||
|
rmap lock for reading in combination with the PTE and PMD page table locks.
|
||||||
|
In particular, this happens in :c:func:`!retract_page_tables` when handling
|
||||||
|
:c:macro:`!MADV_COLLAPSE`.
|
||||||
|
So accessing PTE-level page tables requires at least holding an RCU read lock;
|
||||||
|
but that only suffices for readers that can tolerate racing with concurrent
|
||||||
|
page table updates such that an empty PTE is observed (in a page table that
|
||||||
|
has actually already been detached and marked for RCU freeing) while another
|
||||||
|
new page table has been installed in the same location and filled with
|
||||||
|
entries. Writers normally need to take the PTE lock and revalidate that the
|
||||||
|
PMD entry still refers to the same PTE-level page table.
|
||||||
|
|
||||||
|
To access PTE-level page tables, a helper like :c:func:`!pte_offset_map_lock` or
|
||||||
|
:c:func:`!pte_offset_map` can be used depending on stability requirements.
|
||||||
|
These map the page table into kernel memory if required, take the RCU lock, and
|
||||||
|
depending on variant, may also look up or acquire the PTE lock.
|
||||||
|
See the comment on :c:func:`!__pte_offset_map_lock`.
|
||||||
|
|
||||||
|
Atomicity
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
Regardless of page table locks, the MMU hardware concurrently updates accessed
|
||||||
|
and dirty bits (perhaps more, depending on architecture). Additionally, page
|
||||||
|
table traversal operations in parallel (though holding the VMA stable) and
|
||||||
|
functionality like GUP-fast locklessly traverses (that is reads) page tables,
|
||||||
|
without even keeping the VMA stable at all.
|
||||||
|
|
||||||
|
When performing a page table traversal and keeping the VMA stable, whether a
|
||||||
|
read must be performed once and only once or not depends on the architecture
|
||||||
|
(for instance x86-64 does not require any special precautions).
|
||||||
|
|
||||||
|
If a write is being performed, or if a read informs whether a write takes place
|
||||||
|
(on an installation of a page table entry say, for instance in
|
||||||
|
:c:func:`!__pud_install`), special care must always be taken. In these cases we
|
||||||
|
can never assume that page table locks give us entirely exclusive access, and
|
||||||
|
must retrieve page table entries once and only once.
|
||||||
|
|
||||||
|
If we are reading page table entries, then we need only ensure that the compiler
|
||||||
|
does not rearrange our loads. This is achieved via :c:func:`!pXXp_get`
|
||||||
|
functions - :c:func:`!pgdp_get`, :c:func:`!p4dp_get`, :c:func:`!pudp_get`,
|
||||||
|
:c:func:`!pmdp_get`, and :c:func:`!ptep_get`.
|
||||||
|
|
||||||
|
Each of these uses :c:func:`!READ_ONCE` to guarantee that the compiler reads
|
||||||
|
the page table entry only once.
|
||||||
|
|
||||||
|
However, if we wish to manipulate an existing page table entry and care about
|
||||||
|
the previously stored data, we must go further and use an hardware atomic
|
||||||
|
operation as, for example, in :c:func:`!ptep_get_and_clear`.
|
||||||
|
|
||||||
|
Equally, operations that do not rely on the VMA being held stable, such as
|
||||||
|
GUP-fast (see :c:func:`!gup_fast` and its various page table level handlers like
|
||||||
|
:c:func:`!gup_fast_pte_range`), must very carefully interact with page table
|
||||||
|
entries, using functions such as :c:func:`!ptep_get_lockless` and equivalent for
|
||||||
|
higher level page table levels.
|
||||||
|
|
||||||
|
Writes to page table entries must also be appropriately atomic, as established
|
||||||
|
by :c:func:`!set_pXX` functions - :c:func:`!set_pgd`, :c:func:`!set_p4d`,
|
||||||
|
:c:func:`!set_pud`, :c:func:`!set_pmd`, and :c:func:`!set_pte`.
|
||||||
|
|
||||||
|
Equally functions which clear page table entries must be appropriately atomic,
|
||||||
|
as in :c:func:`!pXX_clear` functions - :c:func:`!pgd_clear`,
|
||||||
|
:c:func:`!p4d_clear`, :c:func:`!pud_clear`, :c:func:`!pmd_clear`, and
|
||||||
|
:c:func:`!pte_clear`.
|
||||||
|
|
||||||
|
Page table installation
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Page table installation is performed with the VMA held stable explicitly by an
|
||||||
|
mmap or VMA lock in read or write mode (see the warning in the locking rules
|
||||||
|
section for details as to why).
|
||||||
|
|
||||||
|
When allocating a P4D, PUD or PMD and setting the relevant entry in the above
|
||||||
|
PGD, P4D or PUD, the :c:member:`!mm->page_table_lock` must be held. This is
|
||||||
|
acquired in :c:func:`!__p4d_alloc`, :c:func:`!__pud_alloc` and
|
||||||
|
:c:func:`!__pmd_alloc` respectively.
|
||||||
|
|
||||||
|
.. note:: :c:func:`!__pmd_alloc` actually invokes :c:func:`!pud_lock` and
|
||||||
|
:c:func:`!pud_lockptr` in turn, however at the time of writing it ultimately
|
||||||
|
references the :c:member:`!mm->page_table_lock`.
|
||||||
|
|
||||||
|
Allocating a PTE will either use the :c:member:`!mm->page_table_lock` or, if
|
||||||
|
:c:macro:`!USE_SPLIT_PMD_PTLOCKS` is defined, a lock embedded in the PMD
|
||||||
|
physical page metadata in the form of a :c:struct:`!struct ptdesc`, acquired by
|
||||||
|
:c:func:`!pmd_ptdesc` called from :c:func:`!pmd_lock` and ultimately
|
||||||
|
:c:func:`!__pte_alloc`.
|
||||||
|
|
||||||
|
Finally, modifying the contents of the PTE requires special treatment, as the
|
||||||
|
PTE page table lock must be acquired whenever we want stable and exclusive
|
||||||
|
access to entries contained within a PTE, especially when we wish to modify
|
||||||
|
them.
|
||||||
|
|
||||||
|
This is performed via :c:func:`!pte_offset_map_lock` which carefully checks to
|
||||||
|
ensure that the PTE hasn't changed from under us, ultimately invoking
|
||||||
|
:c:func:`!pte_lockptr` to obtain a spin lock at PTE granularity contained within
|
||||||
|
the :c:struct:`!struct ptdesc` associated with the physical PTE page. The lock
|
||||||
|
must be released via :c:func:`!pte_unmap_unlock`.
|
||||||
|
|
||||||
|
.. note:: There are some variants on this, such as
|
||||||
|
:c:func:`!pte_offset_map_rw_nolock` when we know we hold the PTE stable but
|
||||||
|
for brevity we do not explore this. See the comment for
|
||||||
|
:c:func:`!__pte_offset_map_lock` for more details.
|
||||||
|
|
||||||
|
When modifying data in ranges we typically only wish to allocate higher page
|
||||||
|
tables as necessary, using these locks to avoid races or overwriting anything,
|
||||||
|
and set/clear data at the PTE level as required (for instance when page faulting
|
||||||
|
or zapping).
|
||||||
|
|
||||||
|
A typical pattern taken when traversing page table entries to install a new
|
||||||
|
mapping is to optimistically determine whether the page table entry in the table
|
||||||
|
above is empty, if so, only then acquiring the page table lock and checking
|
||||||
|
again to see if it was allocated underneath us.
|
||||||
|
|
||||||
|
This allows for a traversal with page table locks only being taken when
|
||||||
|
required. An example of this is :c:func:`!__pud_alloc`.
|
||||||
|
|
||||||
|
At the leaf page table, that is the PTE, we can't entirely rely on this pattern
|
||||||
|
as we have separate PMD and PTE locks and a THP collapse for instance might have
|
||||||
|
eliminated the PMD entry as well as the PTE from under us.
|
||||||
|
|
||||||
|
This is why :c:func:`!__pte_offset_map_lock` locklessly retrieves the PMD entry
|
||||||
|
for the PTE, carefully checking it is as expected, before acquiring the
|
||||||
|
PTE-specific lock, and then *again* checking that the PMD entry is as expected.
|
||||||
|
|
||||||
|
If a THP collapse (or similar) were to occur then the lock on both pages would
|
||||||
|
be acquired, so we can ensure this is prevented while the PTE lock is held.
|
||||||
|
|
||||||
|
Installing entries this way ensures mutual exclusion on write.
|
||||||
|
|
||||||
|
Page table freeing
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Tearing down page tables themselves is something that requires significant
|
||||||
|
care. There must be no way that page tables designated for removal can be
|
||||||
|
traversed or referenced by concurrent tasks.
|
||||||
|
|
||||||
|
It is insufficient to simply hold an mmap write lock and VMA lock (which will
|
||||||
|
prevent racing faults, and rmap operations), as a file-backed mapping can be
|
||||||
|
truncated under the :c:struct:`!struct address_space->i_mmap_rwsem` alone.
|
||||||
|
|
||||||
|
As a result, no VMA which can be accessed via the reverse mapping (either
|
||||||
|
through the :c:struct:`!struct anon_vma->rb_root` or the :c:member:`!struct
|
||||||
|
address_space->i_mmap` interval trees) can have its page tables torn down.
|
||||||
|
|
||||||
|
The operation is typically performed via :c:func:`!free_pgtables`, which assumes
|
||||||
|
either the mmap write lock has been taken (as specified by its
|
||||||
|
:c:member:`!mm_wr_locked` parameter), or that the VMA is already unreachable.
|
||||||
|
|
||||||
|
It carefully removes the VMA from all reverse mappings, however it's important
|
||||||
|
that no new ones overlap these or any route remain to permit access to addresses
|
||||||
|
within the range whose page tables are being torn down.
|
||||||
|
|
||||||
|
Additionally, it assumes that a zap has already been performed and steps have
|
||||||
|
been taken to ensure that no further page table entries can be installed between
|
||||||
|
the zap and the invocation of :c:func:`!free_pgtables`.
|
||||||
|
|
||||||
|
Since it is assumed that all such steps have been taken, page table entries are
|
||||||
|
cleared without page table locks (in the :c:func:`!pgd_clear`, :c:func:`!p4d_clear`,
|
||||||
|
:c:func:`!pud_clear`, and :c:func:`!pmd_clear` functions.
|
||||||
|
|
||||||
|
.. note:: It is possible for leaf page tables to be torn down independent of
|
||||||
|
the page tables above it as is done by
|
||||||
|
:c:func:`!retract_page_tables`, which is performed under the i_mmap
|
||||||
|
read lock, PMD, and PTE page table locks, without this level of care.
|
||||||
|
|
||||||
|
Page table moving
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Some functions manipulate page table levels above PMD (that is PUD, P4D and PGD
|
||||||
|
page tables). Most notable of these is :c:func:`!mremap`, which is capable of
|
||||||
|
moving higher level page tables.
|
||||||
|
|
||||||
|
In these instances, it is required that **all** locks are taken, that is
|
||||||
|
the mmap lock, the VMA lock and the relevant rmap locks.
|
||||||
|
|
||||||
|
You can observe this in the :c:func:`!mremap` implementation in the functions
|
||||||
|
:c:func:`!take_rmap_locks` and :c:func:`!drop_rmap_locks` which perform the rmap
|
||||||
|
side of lock acquisition, invoked ultimately by :c:func:`!move_page_tables`.
|
||||||
|
|
||||||
|
VMA lock internals
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Overview
|
||||||
|
^^^^^^^^
|
||||||
|
|
||||||
|
VMA read locking is entirely optimistic - if the lock is contended or a competing
|
||||||
|
write has started, then we do not obtain a read lock.
|
||||||
|
|
||||||
|
A VMA **read** lock is obtained by :c:func:`!lock_vma_under_rcu`, which first
|
||||||
|
calls :c:func:`!rcu_read_lock` to ensure that the VMA is looked up in an RCU
|
||||||
|
critical section, then attempts to VMA lock it via :c:func:`!vma_start_read`,
|
||||||
|
before releasing the RCU lock via :c:func:`!rcu_read_unlock`.
|
||||||
|
|
||||||
|
VMA read locks hold the read lock on the :c:member:`!vma->vm_lock` semaphore for
|
||||||
|
their duration and the caller of :c:func:`!lock_vma_under_rcu` must release it
|
||||||
|
via :c:func:`!vma_end_read`.
|
||||||
|
|
||||||
|
VMA **write** locks are acquired via :c:func:`!vma_start_write` in instances where a
|
||||||
|
VMA is about to be modified, unlike :c:func:`!vma_start_read` the lock is always
|
||||||
|
acquired. An mmap write lock **must** be held for the duration of the VMA write
|
||||||
|
lock, releasing or downgrading the mmap write lock also releases the VMA write
|
||||||
|
lock so there is no :c:func:`!vma_end_write` function.
|
||||||
|
|
||||||
|
Note that a semaphore write lock is not held across a VMA lock. Rather, a
|
||||||
|
sequence number is used for serialisation, and the write semaphore is only
|
||||||
|
acquired at the point of write lock to update this.
|
||||||
|
|
||||||
|
This ensures the semantics we require - VMA write locks provide exclusive write
|
||||||
|
access to the VMA.
|
||||||
|
|
||||||
|
Implementation details
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The VMA lock mechanism is designed to be a lightweight means of avoiding the use
|
||||||
|
of the heavily contended mmap lock. It is implemented using a combination of a
|
||||||
|
read/write semaphore and sequence numbers belonging to the containing
|
||||||
|
:c:struct:`!struct mm_struct` and the VMA.
|
||||||
|
|
||||||
|
Read locks are acquired via :c:func:`!vma_start_read`, which is an optimistic
|
||||||
|
operation, i.e. it tries to acquire a read lock but returns false if it is
|
||||||
|
unable to do so. At the end of the read operation, :c:func:`!vma_end_read` is
|
||||||
|
called to release the VMA read lock.
|
||||||
|
|
||||||
|
Invoking :c:func:`!vma_start_read` requires that :c:func:`!rcu_read_lock` has
|
||||||
|
been called first, establishing that we are in an RCU critical section upon VMA
|
||||||
|
read lock acquisition. Once acquired, the RCU lock can be released as it is only
|
||||||
|
required for lookup. This is abstracted by :c:func:`!lock_vma_under_rcu` which
|
||||||
|
is the interface a user should use.
|
||||||
|
|
||||||
|
Writing requires the mmap to be write-locked and the VMA lock to be acquired via
|
||||||
|
:c:func:`!vma_start_write`, however the write lock is released by the termination or
|
||||||
|
downgrade of the mmap write lock so no :c:func:`!vma_end_write` is required.
|
||||||
|
|
||||||
|
All this is achieved by the use of per-mm and per-VMA sequence counts, which are
|
||||||
|
used in order to reduce complexity, especially for operations which write-lock
|
||||||
|
multiple VMAs at once.
|
||||||
|
|
||||||
|
If the mm sequence count, :c:member:`!mm->mm_lock_seq` is equal to the VMA
|
||||||
|
sequence count :c:member:`!vma->vm_lock_seq` then the VMA is write-locked. If
|
||||||
|
they differ, then it is not.
|
||||||
|
|
||||||
|
Each time the mmap write lock is released in :c:func:`!mmap_write_unlock` or
|
||||||
|
:c:func:`!mmap_write_downgrade`, :c:func:`!vma_end_write_all` is invoked which
|
||||||
|
also increments :c:member:`!mm->mm_lock_seq` via
|
||||||
|
:c:func:`!mm_lock_seqcount_end`.
|
||||||
|
|
||||||
|
This way, we ensure that, regardless of the VMA's sequence number, a write lock
|
||||||
|
is never incorrectly indicated and that when we release an mmap write lock we
|
||||||
|
efficiently release **all** VMA write locks contained within the mmap at the
|
||||||
|
same time.
|
||||||
|
|
||||||
|
Since the mmap write lock is exclusive against others who hold it, the automatic
|
||||||
|
release of any VMA locks on its release makes sense, as you would never want to
|
||||||
|
keep VMAs locked across entirely separate write operations. It also maintains
|
||||||
|
correct lock ordering.
|
||||||
|
|
||||||
|
Each time a VMA read lock is acquired, we acquire a read lock on the
|
||||||
|
:c:member:`!vma->vm_lock` read/write semaphore and hold it, while checking that
|
||||||
|
the sequence count of the VMA does not match that of the mm.
|
||||||
|
|
||||||
|
If it does, the read lock fails. If it does not, we hold the lock, excluding
|
||||||
|
writers, but permitting other readers, who will also obtain this lock under RCU.
|
||||||
|
|
||||||
|
Importantly, maple tree operations performed in :c:func:`!lock_vma_under_rcu`
|
||||||
|
are also RCU safe, so the whole read lock operation is guaranteed to function
|
||||||
|
correctly.
|
||||||
|
|
||||||
|
On the write side, we acquire a write lock on the :c:member:`!vma->vm_lock`
|
||||||
|
read/write semaphore, before setting the VMA's sequence number under this lock,
|
||||||
|
also simultaneously holding the mmap write lock.
|
||||||
|
|
||||||
|
This way, if any read locks are in effect, :c:func:`!vma_start_write` will sleep
|
||||||
|
until these are finished and mutual exclusion is achieved.
|
||||||
|
|
||||||
|
After setting the VMA's sequence number, the lock is released, avoiding
|
||||||
|
complexity with a long-term held write lock.
|
||||||
|
|
||||||
|
This clever combination of a read/write semaphore and sequence count allows for
|
||||||
|
fast RCU-based per-VMA lock acquisition (especially on page fault, though
|
||||||
|
utilised elsewhere) with minimal complexity around lock ordering.
|
||||||
|
|
||||||
|
mmap write lock downgrading
|
||||||
|
---------------------------
|
||||||
|
|
||||||
|
When an mmap write lock is held one has exclusive access to resources within the
|
||||||
|
mmap (with the usual caveats about requiring VMA write locks to avoid races with
|
||||||
|
tasks holding VMA read locks).
|
||||||
|
|
||||||
|
It is then possible to **downgrade** from a write lock to a read lock via
|
||||||
|
:c:func:`!mmap_write_downgrade` which, similar to :c:func:`!mmap_write_unlock`,
|
||||||
|
implicitly terminates all VMA write locks via :c:func:`!vma_end_write_all`, but
|
||||||
|
importantly does not relinquish the mmap lock while downgrading, therefore
|
||||||
|
keeping the locked virtual address space stable.
|
||||||
|
|
||||||
|
An interesting consequence of this is that downgraded locks are exclusive
|
||||||
|
against any other task possessing a downgraded lock (since a racing task would
|
||||||
|
have to acquire a write lock first to downgrade it, and the downgraded lock
|
||||||
|
prevents a new write lock from being obtained until the original lock is
|
||||||
|
released).
|
||||||
|
|
||||||
|
For clarity, we map read (R)/downgraded write (D)/write (W) locks against one
|
||||||
|
another showing which locks exclude the others:
|
||||||
|
|
||||||
|
.. list-table:: Lock exclusivity
|
||||||
|
:widths: 5 5 5 5
|
||||||
|
:header-rows: 1
|
||||||
|
:stub-columns: 1
|
||||||
|
|
||||||
|
* -
|
||||||
|
- R
|
||||||
|
- D
|
||||||
|
- W
|
||||||
|
* - R
|
||||||
|
- N
|
||||||
|
- N
|
||||||
|
- Y
|
||||||
|
* - D
|
||||||
|
- N
|
||||||
|
- Y
|
||||||
|
- Y
|
||||||
|
* - W
|
||||||
|
- Y
|
||||||
|
- Y
|
||||||
|
- Y
|
||||||
|
|
||||||
|
Here a Y indicates the locks in the matching row/column are mutually exclusive,
|
||||||
|
and N indicates that they are not.
|
||||||
|
|
||||||
|
Stack expansion
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Stack expansion throws up additional complexities in that we cannot permit there
|
||||||
|
to be racing page faults, as a result we invoke :c:func:`!vma_start_write` to
|
||||||
|
prevent this in :c:func:`!expand_downwards` or :c:func:`!expand_upwards`.
|
||||||
|
@ -22,65 +22,67 @@ definitions:
|
|||||||
doc: unused event
|
doc: unused event
|
||||||
-
|
-
|
||||||
name: created
|
name: created
|
||||||
doc:
|
doc: >-
|
||||||
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
|
|
||||||
A new MPTCP connection has been created. It is the good time to
|
A new MPTCP connection has been created. It is the good time to
|
||||||
allocate memory and send ADD_ADDR if needed. Depending on the
|
allocate memory and send ADD_ADDR if needed. Depending on the
|
||||||
traffic-patterns it can take a long time until the
|
traffic-patterns it can take a long time until the
|
||||||
MPTCP_EVENT_ESTABLISHED is sent.
|
MPTCP_EVENT_ESTABLISHED is sent.
|
||||||
|
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||||
|
dport, server-side.
|
||||||
-
|
-
|
||||||
name: established
|
name: established
|
||||||
doc:
|
doc: >-
|
||||||
token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
|
|
||||||
A MPTCP connection is established (can start new subflows).
|
A MPTCP connection is established (can start new subflows).
|
||||||
|
Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
|
||||||
|
dport, server-side.
|
||||||
-
|
-
|
||||||
name: closed
|
name: closed
|
||||||
doc:
|
doc: >-
|
||||||
token
|
|
||||||
A MPTCP connection has stopped.
|
A MPTCP connection has stopped.
|
||||||
|
Attribute: token.
|
||||||
-
|
-
|
||||||
name: announced
|
name: announced
|
||||||
value: 6
|
value: 6
|
||||||
doc:
|
doc: >-
|
||||||
token, rem_id, family, daddr4 | daddr6 [, dport]
|
|
||||||
A new address has been announced by the peer.
|
A new address has been announced by the peer.
|
||||||
|
Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
|
||||||
-
|
-
|
||||||
name: removed
|
name: removed
|
||||||
doc:
|
doc: >-
|
||||||
token, rem_id
|
|
||||||
An address has been lost by the peer.
|
An address has been lost by the peer.
|
||||||
|
Attributes: token, rem_id.
|
||||||
-
|
-
|
||||||
name: sub-established
|
name: sub-established
|
||||||
value: 10
|
value: 10
|
||||||
doc:
|
doc: >-
|
||||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
|
||||||
dport, backup, if_idx [, error]
|
|
||||||
A new subflow has been established. 'error' should not be set.
|
A new subflow has been established. 'error' should not be set.
|
||||||
|
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||||
|
daddr6, sport, dport, backup, if_idx [, error].
|
||||||
-
|
-
|
||||||
name: sub-closed
|
name: sub-closed
|
||||||
doc:
|
doc: >-
|
||||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
|
||||||
dport, backup, if_idx [, error]
|
|
||||||
A subflow has been closed. An error (copy of sk_err) could be set if an
|
A subflow has been closed. An error (copy of sk_err) could be set if an
|
||||||
error has been detected for this subflow.
|
error has been detected for this subflow.
|
||||||
|
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||||
|
daddr6, sport, dport, backup, if_idx [, error].
|
||||||
-
|
-
|
||||||
name: sub-priority
|
name: sub-priority
|
||||||
value: 13
|
value: 13
|
||||||
doc:
|
doc: >-
|
||||||
token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
|
|
||||||
dport, backup, if_idx [, error]
|
|
||||||
The priority of a subflow has changed. 'error' should not be set.
|
The priority of a subflow has changed. 'error' should not be set.
|
||||||
|
Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
|
||||||
|
daddr6, sport, dport, backup, if_idx [, error].
|
||||||
-
|
-
|
||||||
name: listener-created
|
name: listener-created
|
||||||
value: 15
|
value: 15
|
||||||
doc:
|
doc: >-
|
||||||
family, sport, saddr4 | saddr6
|
|
||||||
A new PM listener is created.
|
A new PM listener is created.
|
||||||
|
Attributes: family, sport, saddr4 | saddr6.
|
||||||
-
|
-
|
||||||
name: listener-closed
|
name: listener-closed
|
||||||
doc:
|
doc: >-
|
||||||
family, sport, saddr4 | saddr6
|
|
||||||
A PM listener is closed.
|
A PM listener is closed.
|
||||||
|
Attributes: family, sport, saddr4 | saddr6.
|
||||||
|
|
||||||
attribute-sets:
|
attribute-sets:
|
||||||
-
|
-
|
||||||
@ -306,8 +308,8 @@ operations:
|
|||||||
attributes:
|
attributes:
|
||||||
- addr
|
- addr
|
||||||
-
|
-
|
||||||
name: flush-addrs
|
name: flush-addrs
|
||||||
doc: flush addresses
|
doc: Flush addresses
|
||||||
attribute-set: endpoint
|
attribute-set: endpoint
|
||||||
dont-validate: [ strict ]
|
dont-validate: [ strict ]
|
||||||
flags: [ uns-admin-perm ]
|
flags: [ uns-admin-perm ]
|
||||||
@ -351,7 +353,7 @@ operations:
|
|||||||
- addr-remote
|
- addr-remote
|
||||||
-
|
-
|
||||||
name: announce
|
name: announce
|
||||||
doc: announce new sf
|
doc: Announce new address
|
||||||
attribute-set: attr
|
attribute-set: attr
|
||||||
dont-validate: [ strict ]
|
dont-validate: [ strict ]
|
||||||
flags: [ uns-admin-perm ]
|
flags: [ uns-admin-perm ]
|
||||||
@ -362,7 +364,7 @@ operations:
|
|||||||
- token
|
- token
|
||||||
-
|
-
|
||||||
name: remove
|
name: remove
|
||||||
doc: announce removal
|
doc: Announce removal
|
||||||
attribute-set: attr
|
attribute-set: attr
|
||||||
dont-validate: [ strict ]
|
dont-validate: [ strict ]
|
||||||
flags: [ uns-admin-perm ]
|
flags: [ uns-admin-perm ]
|
||||||
@ -373,7 +375,7 @@ operations:
|
|||||||
- loc-id
|
- loc-id
|
||||||
-
|
-
|
||||||
name: subflow-create
|
name: subflow-create
|
||||||
doc: todo
|
doc: Create subflow
|
||||||
attribute-set: attr
|
attribute-set: attr
|
||||||
dont-validate: [ strict ]
|
dont-validate: [ strict ]
|
||||||
flags: [ uns-admin-perm ]
|
flags: [ uns-admin-perm ]
|
||||||
@ -385,7 +387,7 @@ operations:
|
|||||||
- addr-remote
|
- addr-remote
|
||||||
-
|
-
|
||||||
name: subflow-destroy
|
name: subflow-destroy
|
||||||
doc: todo
|
doc: Destroy subflow
|
||||||
attribute-set: attr
|
attribute-set: attr
|
||||||
dont-validate: [ strict ]
|
dont-validate: [ strict ]
|
||||||
flags: [ uns-admin-perm ]
|
flags: [ uns-admin-perm ]
|
||||||
|
@ -1797,7 +1797,6 @@ F: include/uapi/linux/if_arcnet.h
|
|||||||
|
|
||||||
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
|
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
|
||||||
M: Arnd Bergmann <arnd@arndb.de>
|
M: Arnd Bergmann <arnd@arndb.de>
|
||||||
M: Olof Johansson <olof@lixom.net>
|
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: soc@lists.linux.dev
|
L: soc@lists.linux.dev
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -3615,6 +3614,7 @@ F: drivers/phy/qualcomm/phy-ath79-usb.c
|
|||||||
|
|
||||||
ATHEROS ATH GENERIC UTILITIES
|
ATHEROS ATH GENERIC UTILITIES
|
||||||
M: Kalle Valo <kvalo@kernel.org>
|
M: Kalle Valo <kvalo@kernel.org>
|
||||||
|
M: Jeff Johnson <jjohnson@kernel.org>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/wireless/ath/*
|
F: drivers/net/wireless/ath/*
|
||||||
@ -7355,7 +7355,7 @@ F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
|
|||||||
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
|
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
|
||||||
M: Karol Herbst <kherbst@redhat.com>
|
M: Karol Herbst <kherbst@redhat.com>
|
||||||
M: Lyude Paul <lyude@redhat.com>
|
M: Lyude Paul <lyude@redhat.com>
|
||||||
M: Danilo Krummrich <dakr@redhat.com>
|
M: Danilo Krummrich <dakr@kernel.org>
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
L: nouveau@lists.freedesktop.org
|
L: nouveau@lists.freedesktop.org
|
||||||
S: Supported
|
S: Supported
|
||||||
@ -8932,7 +8932,7 @@ F: include/linux/arm_ffa.h
|
|||||||
FIRMWARE LOADER (request_firmware)
|
FIRMWARE LOADER (request_firmware)
|
||||||
M: Luis Chamberlain <mcgrof@kernel.org>
|
M: Luis Chamberlain <mcgrof@kernel.org>
|
||||||
M: Russ Weight <russ.weight@linux.dev>
|
M: Russ Weight <russ.weight@linux.dev>
|
||||||
M: Danilo Krummrich <dakr@redhat.com>
|
M: Danilo Krummrich <dakr@kernel.org>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/firmware_class/
|
F: Documentation/firmware_class/
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 6
|
VERSION = 6
|
||||||
PATCHLEVEL = 13
|
PATCHLEVEL = 13
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc3
|
EXTRAVERSION = -rc5
|
||||||
NAME = Baby Opossum Posse
|
NAME = Baby Opossum Posse
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
config ARC
|
config ARC
|
||||||
def_bool y
|
def_bool y
|
||||||
select ARC_TIMERS
|
select ARC_TIMERS
|
||||||
|
select ARCH_HAS_CPU_CACHE_ALIASING
|
||||||
select ARCH_HAS_CACHE_LINE_SIZE
|
select ARCH_HAS_CACHE_LINE_SIZE
|
||||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||||
select ARCH_HAS_DMA_PREP_COHERENT
|
select ARCH_HAS_DMA_PREP_COHERENT
|
||||||
|
8
arch/arc/include/asm/cachetype.h
Normal file
8
arch/arc/include/asm/cachetype.h
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef __ASM_ARC_CACHETYPE_H
|
||||||
|
#define __ASM_ARC_CACHETYPE_H
|
||||||
|
|
||||||
|
#define cpu_dcache_is_aliasing() false
|
||||||
|
#define cpu_icache_is_aliasing() true
|
||||||
|
|
||||||
|
#endif
|
@ -6,6 +6,7 @@ menuconfig ARCH_MXC
|
|||||||
select CLKSRC_IMX_GPT
|
select CLKSRC_IMX_GPT
|
||||||
select GENERIC_IRQ_CHIP
|
select GENERIC_IRQ_CHIP
|
||||||
select GPIOLIB
|
select GPIOLIB
|
||||||
|
select PINCTRL
|
||||||
select PM_OPP if PM
|
select PM_OPP if PM
|
||||||
select SOC_BUS
|
select SOC_BUS
|
||||||
select SRAM
|
select SRAM
|
||||||
|
@ -67,7 +67,7 @@
|
|||||||
l2_cache_l0: l2-cache-l0 {
|
l2_cache_l0: l2-cache-l0 {
|
||||||
compatible = "cache";
|
compatible = "cache";
|
||||||
cache-size = <0x80000>;
|
cache-size = <0x80000>;
|
||||||
cache-line-size = <128>;
|
cache-line-size = <64>;
|
||||||
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
||||||
cache-level = <2>;
|
cache-level = <2>;
|
||||||
cache-unified;
|
cache-unified;
|
||||||
@ -91,7 +91,7 @@
|
|||||||
l2_cache_l1: l2-cache-l1 {
|
l2_cache_l1: l2-cache-l1 {
|
||||||
compatible = "cache";
|
compatible = "cache";
|
||||||
cache-size = <0x80000>;
|
cache-size = <0x80000>;
|
||||||
cache-line-size = <128>;
|
cache-line-size = <64>;
|
||||||
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
||||||
cache-level = <2>;
|
cache-level = <2>;
|
||||||
cache-unified;
|
cache-unified;
|
||||||
@ -115,7 +115,7 @@
|
|||||||
l2_cache_l2: l2-cache-l2 {
|
l2_cache_l2: l2-cache-l2 {
|
||||||
compatible = "cache";
|
compatible = "cache";
|
||||||
cache-size = <0x80000>;
|
cache-size = <0x80000>;
|
||||||
cache-line-size = <128>;
|
cache-line-size = <64>;
|
||||||
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
||||||
cache-level = <2>;
|
cache-level = <2>;
|
||||||
cache-unified;
|
cache-unified;
|
||||||
@ -139,7 +139,7 @@
|
|||||||
l2_cache_l3: l2-cache-l3 {
|
l2_cache_l3: l2-cache-l3 {
|
||||||
compatible = "cache";
|
compatible = "cache";
|
||||||
cache-size = <0x80000>;
|
cache-size = <0x80000>;
|
||||||
cache-line-size = <128>;
|
cache-line-size = <64>;
|
||||||
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
|
||||||
cache-level = <2>;
|
cache-level = <2>;
|
||||||
cache-unified;
|
cache-unified;
|
||||||
|
@ -36,15 +36,8 @@
|
|||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/vdso.h>
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_GCS
|
|
||||||
#define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
|
#define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
|
||||||
|
|
||||||
static bool gcs_signal_cap_valid(u64 addr, u64 val)
|
|
||||||
{
|
|
||||||
return val == GCS_SIGNAL_CAP(addr);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do a signal return; undo the signal stack. These are aligned to 128-bit.
|
* Do a signal return; undo the signal stack. These are aligned to 128-bit.
|
||||||
*/
|
*/
|
||||||
@ -1062,8 +1055,7 @@ static int restore_sigframe(struct pt_regs *regs,
|
|||||||
#ifdef CONFIG_ARM64_GCS
|
#ifdef CONFIG_ARM64_GCS
|
||||||
static int gcs_restore_signal(void)
|
static int gcs_restore_signal(void)
|
||||||
{
|
{
|
||||||
unsigned long __user *gcspr_el0;
|
u64 gcspr_el0, cap;
|
||||||
u64 cap;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!system_supports_gcs())
|
if (!system_supports_gcs())
|
||||||
@ -1072,7 +1064,7 @@ static int gcs_restore_signal(void)
|
|||||||
if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
|
if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0);
|
gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that any changes to the GCS done via GCS operations
|
* Ensure that any changes to the GCS done via GCS operations
|
||||||
@ -1087,22 +1079,23 @@ static int gcs_restore_signal(void)
|
|||||||
* then faults will be generated on GCS operations - the main
|
* then faults will be generated on GCS operations - the main
|
||||||
* concern is to protect GCS pages.
|
* concern is to protect GCS pages.
|
||||||
*/
|
*/
|
||||||
ret = copy_from_user(&cap, gcspr_el0, sizeof(cap));
|
ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0,
|
||||||
|
sizeof(cap));
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check that the cap is the actual GCS before replacing it.
|
* Check that the cap is the actual GCS before replacing it.
|
||||||
*/
|
*/
|
||||||
if (!gcs_signal_cap_valid((u64)gcspr_el0, cap))
|
if (cap != GCS_SIGNAL_CAP(gcspr_el0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Invalidate the token to prevent reuse */
|
/* Invalidate the token to prevent reuse */
|
||||||
put_user_gcs(0, (__user void*)gcspr_el0, &ret);
|
put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
write_sysreg_s(gcspr_el0 + 1, SYS_GCSPR_EL0);
|
write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1421,7 +1414,7 @@ static int get_sigframe(struct rt_sigframe_user_layout *user,
|
|||||||
|
|
||||||
static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
|
static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
|
||||||
{
|
{
|
||||||
unsigned long __user *gcspr_el0;
|
u64 gcspr_el0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!system_supports_gcs())
|
if (!system_supports_gcs())
|
||||||
@ -1434,18 +1427,20 @@ static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
|
|||||||
* We are entering a signal handler, current register state is
|
* We are entering a signal handler, current register state is
|
||||||
* active.
|
* active.
|
||||||
*/
|
*/
|
||||||
gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0);
|
gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Push a cap and the GCS entry for the trampoline onto the GCS.
|
* Push a cap and the GCS entry for the trampoline onto the GCS.
|
||||||
*/
|
*/
|
||||||
put_user_gcs((unsigned long)sigtramp, gcspr_el0 - 2, &ret);
|
put_user_gcs((unsigned long)sigtramp,
|
||||||
put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 1), gcspr_el0 - 1, &ret);
|
(unsigned long __user *)(gcspr_el0 - 16), &ret);
|
||||||
|
put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8),
|
||||||
|
(unsigned long __user *)(gcspr_el0 - 8), &ret);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
gcspr_el0 -= 2;
|
gcspr_el0 -= 16;
|
||||||
write_sysreg_s((unsigned long)gcspr_el0, SYS_GCSPR_EL0);
|
write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -143,11 +143,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||||||
" DIV:\t\t%s\n"
|
" DIV:\t\t%s\n"
|
||||||
" BMX:\t\t%s\n"
|
" BMX:\t\t%s\n"
|
||||||
" CDX:\t\t%s\n",
|
" CDX:\t\t%s\n",
|
||||||
cpuinfo.has_mul ? "yes" : "no",
|
str_yes_no(cpuinfo.has_mul),
|
||||||
cpuinfo.has_mulx ? "yes" : "no",
|
str_yes_no(cpuinfo.has_mulx),
|
||||||
cpuinfo.has_div ? "yes" : "no",
|
str_yes_no(cpuinfo.has_div),
|
||||||
cpuinfo.has_bmx ? "yes" : "no",
|
str_yes_no(cpuinfo.has_bmx),
|
||||||
cpuinfo.has_cdx ? "yes" : "no");
|
str_yes_no(cpuinfo.has_cdx));
|
||||||
|
|
||||||
seq_printf(m,
|
seq_printf(m,
|
||||||
"Icache:\t\t%ukB, line length: %u\n",
|
"Icache:\t\t%ukB, line length: %u\n",
|
||||||
|
@ -208,6 +208,7 @@ CONFIG_FB_ATY=y
|
|||||||
CONFIG_FB_ATY_CT=y
|
CONFIG_FB_ATY_CT=y
|
||||||
CONFIG_FB_ATY_GX=y
|
CONFIG_FB_ATY_GX=y
|
||||||
CONFIG_FB_3DFX=y
|
CONFIG_FB_3DFX=y
|
||||||
|
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
||||||
# CONFIG_VGA_CONSOLE is not set
|
# CONFIG_VGA_CONSOLE is not set
|
||||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||||
CONFIG_LOGO=y
|
CONFIG_LOGO=y
|
||||||
|
@ -716,6 +716,7 @@ CONFIG_FB_TRIDENT=m
|
|||||||
CONFIG_FB_SM501=m
|
CONFIG_FB_SM501=m
|
||||||
CONFIG_FB_IBM_GXT4500=y
|
CONFIG_FB_IBM_GXT4500=y
|
||||||
CONFIG_LCD_PLATFORM=m
|
CONFIG_LCD_PLATFORM=m
|
||||||
|
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
||||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||||
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
|
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
|
||||||
CONFIG_LOGO=y
|
CONFIG_LOGO=y
|
||||||
|
@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
|
|||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* During mmap() paste address, mapping VMA is saved in VAS window
|
||||||
|
* struct which is used to unmap during migration if the window is
|
||||||
|
* still open. But the user space can remove this mapping with
|
||||||
|
* munmap() before closing the window and the VMA address will
|
||||||
|
* be invalid. Set VAS window VMA to NULL in this function which
|
||||||
|
* is called before VMA free.
|
||||||
|
*/
|
||||||
|
static void vas_mmap_close(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
struct file *fp = vma->vm_file;
|
||||||
|
struct coproc_instance *cp_inst = fp->private_data;
|
||||||
|
struct vas_window *txwin;
|
||||||
|
|
||||||
|
/* Should not happen */
|
||||||
|
if (!cp_inst || !cp_inst->txwin) {
|
||||||
|
pr_err("No attached VAS window for the paste address mmap\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
txwin = cp_inst->txwin;
|
||||||
|
/*
|
||||||
|
* task_ref.vma is set in coproc_mmap() during mmap paste
|
||||||
|
* address. So it has to be the same VMA that is getting freed.
|
||||||
|
*/
|
||||||
|
if (WARN_ON(txwin->task_ref.vma != vma)) {
|
||||||
|
pr_err("Invalid paste address mmaping\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&txwin->task_ref.mmap_mutex);
|
||||||
|
txwin->task_ref.vma = NULL;
|
||||||
|
mutex_unlock(&txwin->task_ref.mmap_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct vm_operations_struct vas_vm_ops = {
|
static const struct vm_operations_struct vas_vm_ops = {
|
||||||
|
.close = vas_mmap_close,
|
||||||
.fault = vas_mmap_fault,
|
.fault = vas_mmap_fault,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -429,6 +429,16 @@ static struct event_constraint intel_lnc_event_constraints[] = {
|
|||||||
EVENT_CONSTRAINT_END
|
EVENT_CONSTRAINT_END
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
|
||||||
|
INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
|
||||||
|
INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
|
||||||
|
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
||||||
|
INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
|
||||||
|
INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
|
||||||
|
INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
|
||||||
|
INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
|
||||||
|
EVENT_EXTRA_END
|
||||||
|
};
|
||||||
|
|
||||||
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
|
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
|
||||||
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
|
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
|
||||||
@ -6422,7 +6432,7 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
|
|||||||
intel_pmu_init_glc(pmu);
|
intel_pmu_init_glc(pmu);
|
||||||
hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
|
hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
|
||||||
hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
|
hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
|
||||||
hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
|
hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
|
static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
|
||||||
|
@ -2517,6 +2517,7 @@ void __init intel_ds_init(void)
|
|||||||
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
|
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 6:
|
||||||
case 5:
|
case 5:
|
||||||
x86_pmu.pebs_ept = 1;
|
x86_pmu.pebs_ept = 1;
|
||||||
fallthrough;
|
fallthrough;
|
||||||
|
@ -1910,6 +1910,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
|||||||
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init),
|
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init),
|
||||||
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init),
|
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init),
|
||||||
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init),
|
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init),
|
||||||
|
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_uncore_init),
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
|
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
|
||||||
|
@ -452,6 +452,7 @@
|
|||||||
#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
|
#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
|
||||||
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
|
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
|
||||||
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
|
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
|
||||||
|
#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */
|
||||||
|
|
||||||
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
||||||
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
|
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
|
||||||
|
@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
|||||||
|
|
||||||
static __ro_after_init bool ibt_fatal = true;
|
static __ro_after_init bool ibt_fatal = true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
|
||||||
|
*
|
||||||
|
* For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
|
||||||
|
* the WFE state of the interrupted context needs to be cleared to let execution
|
||||||
|
* continue. Otherwise when the CPU resumes from the instruction that just
|
||||||
|
* caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
|
||||||
|
* enters a dead loop.
|
||||||
|
*
|
||||||
|
* This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
|
||||||
|
* set WFE. But FRED provides space on the entry stack (in an expanded CS area)
|
||||||
|
* to save and restore the WFE state, thus the WFE state is no longer clobbered,
|
||||||
|
* so software must clear it.
|
||||||
|
*/
|
||||||
|
static void ibt_clear_fred_wfe(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* No need to do any FRED checks.
|
||||||
|
*
|
||||||
|
* For IDT event delivery, the high-order 48 bits of CS are pushed
|
||||||
|
* as 0s into the stack, and later IRET ignores these bits.
|
||||||
|
*
|
||||||
|
* For FRED, a test to check if fred_cs.wfe is set would be dropped
|
||||||
|
* by compilers.
|
||||||
|
*/
|
||||||
|
regs->fred_cs.wfe = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
||||||
{
|
{
|
||||||
if ((error_code & CP_EC) != CP_ENDBR) {
|
if ((error_code & CP_EC) != CP_ENDBR) {
|
||||||
@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
|||||||
|
|
||||||
if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
|
if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
|
||||||
regs->ax = 0;
|
regs->ax = 0;
|
||||||
|
ibt_clear_fred_wfe(regs);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
|
|||||||
if (!ibt_fatal) {
|
if (!ibt_fatal) {
|
||||||
printk(KERN_DEFAULT CUT_HERE);
|
printk(KERN_DEFAULT CUT_HERE);
|
||||||
__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
|
__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
|
||||||
|
ibt_clear_fred_wfe(regs);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -3364,18 +3364,6 @@ static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
|
|
||||||
{
|
|
||||||
if (fault->exec)
|
|
||||||
return is_executable_pte(spte);
|
|
||||||
|
|
||||||
if (fault->write)
|
|
||||||
return is_writable_pte(spte);
|
|
||||||
|
|
||||||
/* Fault was on Read access */
|
|
||||||
return spte & PT_PRESENT_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the last level spte pointer of the shadow page walk for the given
|
* Returns the last level spte pointer of the shadow page walk for the given
|
||||||
* gpa, and sets *spte to the spte value. This spte may be non-preset. If no
|
* gpa, and sets *spte to the spte value. This spte may be non-preset. If no
|
||||||
|
@ -461,6 +461,23 @@ static inline bool is_mmu_writable_spte(u64 spte)
|
|||||||
return spte & shadow_mmu_writable_mask;
|
return spte & shadow_mmu_writable_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns true if the access indicated by @fault is allowed by the existing
|
||||||
|
* SPTE protections. Note, the caller is responsible for checking that the
|
||||||
|
* SPTE is a shadow-present, leaf SPTE (either before or after).
|
||||||
|
*/
|
||||||
|
static inline bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
|
||||||
|
{
|
||||||
|
if (fault->exec)
|
||||||
|
return is_executable_pte(spte);
|
||||||
|
|
||||||
|
if (fault->write)
|
||||||
|
return is_writable_pte(spte);
|
||||||
|
|
||||||
|
/* Fault was on Read access */
|
||||||
|
return spte & PT_PRESENT_MASK;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the MMU-writable flag is cleared, i.e. the SPTE is write-protected for
|
* If the MMU-writable flag is cleared, i.e. the SPTE is write-protected for
|
||||||
* write-tracking, remote TLBs must be flushed, even if the SPTE was read-only,
|
* write-tracking, remote TLBs must be flushed, even if the SPTE was read-only,
|
||||||
|
@ -985,6 +985,11 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
|
|||||||
if (fault->prefetch && is_shadow_present_pte(iter->old_spte))
|
if (fault->prefetch && is_shadow_present_pte(iter->old_spte))
|
||||||
return RET_PF_SPURIOUS;
|
return RET_PF_SPURIOUS;
|
||||||
|
|
||||||
|
if (is_shadow_present_pte(iter->old_spte) &&
|
||||||
|
is_access_allowed(fault, iter->old_spte) &&
|
||||||
|
is_last_spte(iter->old_spte, iter->level))
|
||||||
|
return RET_PF_SPURIOUS;
|
||||||
|
|
||||||
if (unlikely(!fault->slot))
|
if (unlikely(!fault->slot))
|
||||||
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
|
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
|
||||||
else
|
else
|
||||||
|
@ -1199,6 +1199,12 @@ bool avic_hardware_setup(void)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_HV_INUSE_WR_ALLOWED)) {
|
||||||
|
pr_warn("AVIC disabled: missing HvInUseWrAllowed on SNP-enabled system\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (boot_cpu_has(X86_FEATURE_AVIC)) {
|
if (boot_cpu_has(X86_FEATURE_AVIC)) {
|
||||||
pr_info("AVIC enabled\n");
|
pr_info("AVIC enabled\n");
|
||||||
} else if (force_avic) {
|
} else if (force_avic) {
|
||||||
|
@ -3201,15 +3201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||||||
if (data & ~supported_de_cfg)
|
if (data & ~supported_de_cfg)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't let the guest change the host-programmed value. The
|
|
||||||
* MSR is very model specific, i.e. contains multiple bits that
|
|
||||||
* are completely unknown to KVM, and the one bit known to KVM
|
|
||||||
* is simply a reflection of hardware capabilities.
|
|
||||||
*/
|
|
||||||
if (!msr->host_initiated && data != svm->msr_decfg)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
svm->msr_decfg = data;
|
svm->msr_decfg = data;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
#ifndef __KVM_X86_VMX_POSTED_INTR_H
|
#ifndef __KVM_X86_VMX_POSTED_INTR_H
|
||||||
#define __KVM_X86_VMX_POSTED_INTR_H
|
#define __KVM_X86_VMX_POSTED_INTR_H
|
||||||
|
|
||||||
#include <linux/find.h>
|
#include <linux/bitmap.h>
|
||||||
#include <asm/posted_intr.h>
|
#include <asm/posted_intr.h>
|
||||||
|
|
||||||
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
|
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
|
||||||
|
@ -9976,7 +9976,7 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
u64 ret = vcpu->run->hypercall.ret;
|
u64 ret = vcpu->run->hypercall.ret;
|
||||||
|
|
||||||
if (!is_64_bit_mode(vcpu))
|
if (!is_64_bit_hypercall(vcpu))
|
||||||
ret = (u32)ret;
|
ret = (u32)ret;
|
||||||
kvm_rax_write(vcpu, ret);
|
kvm_rax_write(vcpu, ret);
|
||||||
++vcpu->stat.hypercalls;
|
++vcpu->stat.hypercalls;
|
||||||
@ -12724,6 +12724,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||||||
kvm_hv_init_vm(kvm);
|
kvm_hv_init_vm(kvm);
|
||||||
kvm_xen_init_vm(kvm);
|
kvm_xen_init_vm(kvm);
|
||||||
|
|
||||||
|
if (ignore_msrs && !report_ignored_msrs) {
|
||||||
|
pr_warn_once("Running KVM with ignore_msrs=1 and report_ignored_msrs=0 is not a\n"
|
||||||
|
"a supported configuration. Lying to the guest about the existence of MSRs\n"
|
||||||
|
"may cause the guest operating system to hang or produce errors. If a guest\n"
|
||||||
|
"does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_uninit_mmu:
|
out_uninit_mmu:
|
||||||
|
@ -155,8 +155,7 @@ int set_blocksize(struct file *file, int size)
|
|||||||
struct inode *inode = file->f_mapping->host;
|
struct inode *inode = file->f_mapping->host;
|
||||||
struct block_device *bdev = I_BDEV(inode);
|
struct block_device *bdev = I_BDEV(inode);
|
||||||
|
|
||||||
/* Size must be a power of two, and between 512 and PAGE_SIZE */
|
if (blk_validate_block_size(size))
|
||||||
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Size cannot be smaller than the size supported by the device */
|
/* Size cannot be smaller than the size supported by the device */
|
||||||
|
@ -275,13 +275,15 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
|
|||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
lockdep_assert_held(&q->sysfs_dir_lock);
|
mutex_lock(&q->sysfs_dir_lock);
|
||||||
|
|
||||||
if (!q->mq_sysfs_init_done)
|
if (!q->mq_sysfs_init_done)
|
||||||
return;
|
goto unlock;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i)
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
blk_mq_unregister_hctx(hctx);
|
blk_mq_unregister_hctx(hctx);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&q->sysfs_dir_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
||||||
@ -290,10 +292,9 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
|||||||
unsigned long i;
|
unsigned long i;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
lockdep_assert_held(&q->sysfs_dir_lock);
|
mutex_lock(&q->sysfs_dir_lock);
|
||||||
|
|
||||||
if (!q->mq_sysfs_init_done)
|
if (!q->mq_sysfs_init_done)
|
||||||
return ret;
|
goto unlock;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
ret = blk_mq_register_hctx(hctx);
|
ret = blk_mq_register_hctx(hctx);
|
||||||
@ -301,5 +302,8 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&q->sysfs_dir_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -4412,6 +4412,15 @@ struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
|
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only hctx removed from cpuhp list can be reused
|
||||||
|
*/
|
||||||
|
static bool blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx *hctx)
|
||||||
|
{
|
||||||
|
return hlist_unhashed(&hctx->cpuhp_online) &&
|
||||||
|
hlist_unhashed(&hctx->cpuhp_dead);
|
||||||
|
}
|
||||||
|
|
||||||
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
|
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
|
||||||
struct blk_mq_tag_set *set, struct request_queue *q,
|
struct blk_mq_tag_set *set, struct request_queue *q,
|
||||||
int hctx_idx, int node)
|
int hctx_idx, int node)
|
||||||
@ -4421,7 +4430,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
|
|||||||
/* reuse dead hctx first */
|
/* reuse dead hctx first */
|
||||||
spin_lock(&q->unused_hctx_lock);
|
spin_lock(&q->unused_hctx_lock);
|
||||||
list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
|
list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
|
||||||
if (tmp->numa_node == node) {
|
if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) {
|
||||||
hctx = tmp;
|
hctx = tmp;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -4453,8 +4462,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
|||||||
unsigned long i, j;
|
unsigned long i, j;
|
||||||
|
|
||||||
/* protect against switching io scheduler */
|
/* protect against switching io scheduler */
|
||||||
lockdep_assert_held(&q->sysfs_lock);
|
mutex_lock(&q->sysfs_lock);
|
||||||
|
|
||||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||||
int old_node;
|
int old_node;
|
||||||
int node = blk_mq_get_hctx_node(set, i);
|
int node = blk_mq_get_hctx_node(set, i);
|
||||||
@ -4487,6 +4495,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
|||||||
|
|
||||||
xa_for_each_start(&q->hctx_table, j, hctx, j)
|
xa_for_each_start(&q->hctx_table, j, hctx, j)
|
||||||
blk_mq_exit_hctx(q, set, hctx, j);
|
blk_mq_exit_hctx(q, set, hctx, j);
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
|
||||||
/* unregister cpuhp callbacks for exited hctxs */
|
/* unregister cpuhp callbacks for exited hctxs */
|
||||||
blk_mq_remove_hw_queues_cpuhp(q);
|
blk_mq_remove_hw_queues_cpuhp(q);
|
||||||
@ -4518,14 +4527,10 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||||||
|
|
||||||
xa_init(&q->hctx_table);
|
xa_init(&q->hctx_table);
|
||||||
|
|
||||||
mutex_lock(&q->sysfs_lock);
|
|
||||||
|
|
||||||
blk_mq_realloc_hw_ctxs(set, q);
|
blk_mq_realloc_hw_ctxs(set, q);
|
||||||
if (!q->nr_hw_queues)
|
if (!q->nr_hw_queues)
|
||||||
goto err_hctxs;
|
goto err_hctxs;
|
||||||
|
|
||||||
mutex_unlock(&q->sysfs_lock);
|
|
||||||
|
|
||||||
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
|
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
|
||||||
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
|
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
|
||||||
|
|
||||||
@ -4544,7 +4549,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_hctxs:
|
err_hctxs:
|
||||||
mutex_unlock(&q->sysfs_lock);
|
|
||||||
blk_mq_release(q);
|
blk_mq_release(q);
|
||||||
err_exit:
|
err_exit:
|
||||||
q->mq_ops = NULL;
|
q->mq_ops = NULL;
|
||||||
@ -4925,12 +4929,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* q->elevator needs protection from ->sysfs_lock */
|
/* q->elevator needs protection from ->sysfs_lock */
|
||||||
lockdep_assert_held(&q->sysfs_lock);
|
mutex_lock(&q->sysfs_lock);
|
||||||
|
|
||||||
/* the check has to be done with holding sysfs_lock */
|
/* the check has to be done with holding sysfs_lock */
|
||||||
if (!q->elevator) {
|
if (!q->elevator) {
|
||||||
kfree(qe);
|
kfree(qe);
|
||||||
goto out;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&qe->node);
|
INIT_LIST_HEAD(&qe->node);
|
||||||
@ -4940,7 +4944,9 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
|
|||||||
__elevator_get(qe->type);
|
__elevator_get(qe->type);
|
||||||
list_add(&qe->node, head);
|
list_add(&qe->node, head);
|
||||||
elevator_disable(q);
|
elevator_disable(q);
|
||||||
out:
|
unlock:
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4969,9 +4975,11 @@ static void blk_mq_elv_switch_back(struct list_head *head,
|
|||||||
list_del(&qe->node);
|
list_del(&qe->node);
|
||||||
kfree(qe);
|
kfree(qe);
|
||||||
|
|
||||||
|
mutex_lock(&q->sysfs_lock);
|
||||||
elevator_switch(q, t);
|
elevator_switch(q, t);
|
||||||
/* drop the reference acquired in blk_mq_elv_switch_none */
|
/* drop the reference acquired in blk_mq_elv_switch_none */
|
||||||
elevator_put(t);
|
elevator_put(t);
|
||||||
|
mutex_unlock(&q->sysfs_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||||
@ -4991,11 +4999,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
|||||||
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
|
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||||
mutex_lock(&q->sysfs_dir_lock);
|
|
||||||
mutex_lock(&q->sysfs_lock);
|
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Switch IO scheduler to 'none', cleaning up the data associated
|
* Switch IO scheduler to 'none', cleaning up the data associated
|
||||||
* with the previous scheduler. We will switch back once we are done
|
* with the previous scheduler. We will switch back once we are done
|
||||||
@ -5051,11 +5056,8 @@ switch_back:
|
|||||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||||
blk_mq_elv_switch_back(&head, q);
|
blk_mq_elv_switch_back(&head, q);
|
||||||
|
|
||||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||||
blk_mq_unfreeze_queue(q);
|
blk_mq_unfreeze_queue(q);
|
||||||
mutex_unlock(&q->sysfs_lock);
|
|
||||||
mutex_unlock(&q->sysfs_dir_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Free the excess tags when nr_hw_queues shrink. */
|
/* Free the excess tags when nr_hw_queues shrink. */
|
||||||
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
|
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
|
||||||
|
@ -706,11 +706,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
|
|||||||
if (entry->load_module)
|
if (entry->load_module)
|
||||||
entry->load_module(disk, page, length);
|
entry->load_module(disk, page, length);
|
||||||
|
|
||||||
mutex_lock(&q->sysfs_lock);
|
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
|
mutex_lock(&q->sysfs_lock);
|
||||||
res = entry->store(disk, page, length);
|
res = entry->store(disk, page, length);
|
||||||
blk_mq_unfreeze_queue(q);
|
|
||||||
mutex_unlock(&q->sysfs_lock);
|
mutex_unlock(&q->sysfs_lock);
|
||||||
|
blk_mq_unfreeze_queue(q);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,7 +409,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
|
|||||||
mutex_lock(&bo->lock);
|
mutex_lock(&bo->lock);
|
||||||
|
|
||||||
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
|
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
|
||||||
bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
|
bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
|
||||||
bo->flags, kref_read(&bo->base.base.refcount));
|
bo->flags, kref_read(&bo->base.base.refcount));
|
||||||
|
|
||||||
if (bo->base.pages)
|
if (bo->base.pages)
|
||||||
|
@ -612,18 +612,22 @@ int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
|
|||||||
if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
|
if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
|
||||||
ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
|
ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto unlock;
|
goto err_ctx_fini;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
|
ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
|
ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
|
||||||
goto unlock;
|
goto err_ctx_fini;
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
|
||||||
mutex_unlock(&vdev->rctx.lock);
|
mutex_unlock(&vdev->rctx.lock);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
err_ctx_fini:
|
||||||
|
mutex_unlock(&vdev->rctx.lock);
|
||||||
|
ivpu_mmu_context_fini(vdev, &vdev->rctx);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
|
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
|
||||||
|
@ -378,6 +378,7 @@ void ivpu_pm_init(struct ivpu_device *vdev)
|
|||||||
|
|
||||||
pm_runtime_use_autosuspend(dev);
|
pm_runtime_use_autosuspend(dev);
|
||||||
pm_runtime_set_autosuspend_delay(dev, delay);
|
pm_runtime_set_autosuspend_delay(dev, delay);
|
||||||
|
pm_runtime_set_active(dev);
|
||||||
|
|
||||||
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
|
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
|
||||||
}
|
}
|
||||||
@ -392,7 +393,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev)
|
|||||||
{
|
{
|
||||||
struct device *dev = vdev->drm.dev;
|
struct device *dev = vdev->drm.dev;
|
||||||
|
|
||||||
pm_runtime_set_active(dev);
|
|
||||||
pm_runtime_allow(dev);
|
pm_runtime_allow(dev);
|
||||||
pm_runtime_mark_last_busy(dev);
|
pm_runtime_mark_last_busy(dev);
|
||||||
pm_runtime_put_autosuspend(dev);
|
pm_runtime_put_autosuspend(dev);
|
||||||
|
@ -135,10 +135,10 @@ config ACPI_REV_OVERRIDE_POSSIBLE
|
|||||||
config ACPI_EC
|
config ACPI_EC
|
||||||
bool "Embedded Controller"
|
bool "Embedded Controller"
|
||||||
depends on HAS_IOPORT
|
depends on HAS_IOPORT
|
||||||
default X86
|
default X86 || LOONGARCH
|
||||||
help
|
help
|
||||||
This driver handles communication with the microcontroller
|
This driver handles communication with the microcontroller
|
||||||
on many x86 laptops and other machines.
|
on many x86/LoongArch laptops and other machines.
|
||||||
|
|
||||||
config ACPI_EC_DEBUGFS
|
config ACPI_EC_DEBUGFS
|
||||||
tristate "EC read/write access through /sys/kernel/debug/ec"
|
tristate "EC read/write access through /sys/kernel/debug/ec"
|
||||||
|
@ -489,7 +489,7 @@ config IMG_ASCII_LCD
|
|||||||
|
|
||||||
config HT16K33
|
config HT16K33
|
||||||
tristate "Holtek Ht16K33 LED controller with keyscan"
|
tristate "Holtek Ht16K33 LED controller with keyscan"
|
||||||
depends on FB && I2C && INPUT
|
depends on FB && I2C && INPUT && BACKLIGHT_CLASS_DEVICE
|
||||||
select FB_SYSMEM_HELPERS
|
select FB_SYSMEM_HELPERS
|
||||||
select INPUT_MATRIXKMAP
|
select INPUT_MATRIXKMAP
|
||||||
select FB_BACKLIGHT
|
select FB_BACKLIGHT
|
||||||
|
@ -1618,6 +1618,21 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
|
|||||||
blk_mq_kick_requeue_list(ub->ub_disk->queue);
|
blk_mq_kick_requeue_list(ub->ub_disk->queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
|
||||||
|
{
|
||||||
|
struct gendisk *disk;
|
||||||
|
|
||||||
|
/* Sync with ublk_abort_queue() by holding the lock */
|
||||||
|
spin_lock(&ub->lock);
|
||||||
|
disk = ub->ub_disk;
|
||||||
|
ub->dev_info.state = UBLK_S_DEV_DEAD;
|
||||||
|
ub->dev_info.ublksrv_pid = -1;
|
||||||
|
ub->ub_disk = NULL;
|
||||||
|
spin_unlock(&ub->lock);
|
||||||
|
|
||||||
|
return disk;
|
||||||
|
}
|
||||||
|
|
||||||
static void ublk_stop_dev(struct ublk_device *ub)
|
static void ublk_stop_dev(struct ublk_device *ub)
|
||||||
{
|
{
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
@ -1631,14 +1646,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
|
|||||||
ublk_unquiesce_dev(ub);
|
ublk_unquiesce_dev(ub);
|
||||||
}
|
}
|
||||||
del_gendisk(ub->ub_disk);
|
del_gendisk(ub->ub_disk);
|
||||||
|
disk = ublk_detach_disk(ub);
|
||||||
/* Sync with ublk_abort_queue() by holding the lock */
|
|
||||||
spin_lock(&ub->lock);
|
|
||||||
disk = ub->ub_disk;
|
|
||||||
ub->dev_info.state = UBLK_S_DEV_DEAD;
|
|
||||||
ub->dev_info.ublksrv_pid = -1;
|
|
||||||
ub->ub_disk = NULL;
|
|
||||||
spin_unlock(&ub->lock);
|
|
||||||
put_disk(disk);
|
put_disk(disk);
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&ub->mutex);
|
mutex_unlock(&ub->mutex);
|
||||||
@ -2336,7 +2344,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
|
|||||||
|
|
||||||
out_put_cdev:
|
out_put_cdev:
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ub->dev_info.state = UBLK_S_DEV_DEAD;
|
ublk_detach_disk(ub);
|
||||||
ublk_put_device(ub);
|
ublk_put_device(ub);
|
||||||
}
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -614,6 +614,12 @@ static ssize_t backing_dev_store(struct device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
|
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
|
||||||
|
/* Refuse to use zero sized device (also prevents self reference) */
|
||||||
|
if (!nr_pages) {
|
||||||
|
err = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
||||||
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
|
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
|
||||||
if (!bitmap) {
|
if (!bitmap) {
|
||||||
@ -1438,12 +1444,16 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
|
|||||||
size_t num_pages = disksize >> PAGE_SHIFT;
|
size_t num_pages = disksize >> PAGE_SHIFT;
|
||||||
size_t index;
|
size_t index;
|
||||||
|
|
||||||
|
if (!zram->table)
|
||||||
|
return;
|
||||||
|
|
||||||
/* Free all pages that are still in this zram device */
|
/* Free all pages that are still in this zram device */
|
||||||
for (index = 0; index < num_pages; index++)
|
for (index = 0; index < num_pages; index++)
|
||||||
zram_free_page(zram, index);
|
zram_free_page(zram, index);
|
||||||
|
|
||||||
zs_destroy_pool(zram->mem_pool);
|
zs_destroy_pool(zram->mem_pool);
|
||||||
vfree(zram->table);
|
vfree(zram->table);
|
||||||
|
zram->table = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
|
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
|
||||||
@ -2320,11 +2330,6 @@ static void zram_reset_device(struct zram *zram)
|
|||||||
|
|
||||||
zram->limit_pages = 0;
|
zram->limit_pages = 0;
|
||||||
|
|
||||||
if (!init_done(zram)) {
|
|
||||||
up_write(&zram->init_lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
set_capacity_and_notify(zram->disk, 0);
|
set_capacity_and_notify(zram->disk, 0);
|
||||||
part_stat_set_all(zram->disk->part0, 0);
|
part_stat_set_all(zram->disk->part0, 0);
|
||||||
|
|
||||||
|
@ -374,15 +374,19 @@ static inline int amd_pstate_cppc_enable(bool enable)
|
|||||||
|
|
||||||
static int msr_init_perf(struct amd_cpudata *cpudata)
|
static int msr_init_perf(struct amd_cpudata *cpudata)
|
||||||
{
|
{
|
||||||
u64 cap1;
|
u64 cap1, numerator;
|
||||||
|
|
||||||
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
|
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
|
||||||
&cap1);
|
&cap1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
|
ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
|
||||||
WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
WRITE_ONCE(cpudata->highest_perf, numerator);
|
||||||
|
WRITE_ONCE(cpudata->max_limit_perf, numerator);
|
||||||
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
|
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
|
||||||
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
|
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
|
||||||
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
|
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
|
||||||
@ -394,13 +398,18 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
|
|||||||
static int shmem_init_perf(struct amd_cpudata *cpudata)
|
static int shmem_init_perf(struct amd_cpudata *cpudata)
|
||||||
{
|
{
|
||||||
struct cppc_perf_caps cppc_perf;
|
struct cppc_perf_caps cppc_perf;
|
||||||
|
u64 numerator;
|
||||||
|
|
||||||
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
|
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
|
ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
|
||||||
WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
WRITE_ONCE(cpudata->highest_perf, numerator);
|
||||||
|
WRITE_ONCE(cpudata->max_limit_perf, numerator);
|
||||||
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
|
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
|
||||||
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
|
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
|
||||||
cppc_perf.lowest_nonlinear_perf);
|
cppc_perf.lowest_nonlinear_perf);
|
||||||
@ -561,16 +570,13 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
|
|||||||
|
|
||||||
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
|
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf;
|
u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf, max_freq;
|
||||||
struct amd_cpudata *cpudata = policy->driver_data;
|
struct amd_cpudata *cpudata = policy->driver_data;
|
||||||
|
|
||||||
if (cpudata->boost_supported && !policy->boost_enabled)
|
max_perf = READ_ONCE(cpudata->highest_perf);
|
||||||
max_perf = READ_ONCE(cpudata->nominal_perf);
|
max_freq = READ_ONCE(cpudata->max_freq);
|
||||||
else
|
max_limit_perf = div_u64(policy->max * max_perf, max_freq);
|
||||||
max_perf = READ_ONCE(cpudata->highest_perf);
|
min_limit_perf = div_u64(policy->min * max_perf, max_freq);
|
||||||
|
|
||||||
max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq);
|
|
||||||
min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq);
|
|
||||||
|
|
||||||
lowest_perf = READ_ONCE(cpudata->lowest_perf);
|
lowest_perf = READ_ONCE(cpudata->lowest_perf);
|
||||||
if (min_limit_perf < lowest_perf)
|
if (min_limit_perf < lowest_perf)
|
||||||
@ -889,7 +895,6 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u32 min_freq, max_freq;
|
u32 min_freq, max_freq;
|
||||||
u64 numerator;
|
|
||||||
u32 nominal_perf, nominal_freq;
|
u32 nominal_perf, nominal_freq;
|
||||||
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
|
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
|
||||||
u32 boost_ratio, lowest_nonlinear_ratio;
|
u32 boost_ratio, lowest_nonlinear_ratio;
|
||||||
@ -911,10 +916,7 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
|
|||||||
|
|
||||||
nominal_perf = READ_ONCE(cpudata->nominal_perf);
|
nominal_perf = READ_ONCE(cpudata->nominal_perf);
|
||||||
|
|
||||||
ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
|
boost_ratio = div_u64(cpudata->highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
|
|
||||||
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
|
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
|
||||||
|
|
||||||
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
|
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
|
||||||
@ -1869,18 +1871,18 @@ static int __init amd_pstate_init(void)
|
|||||||
static_call_update(amd_pstate_update_perf, shmem_update_perf);
|
static_call_update(amd_pstate_update_perf, shmem_update_perf);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = amd_pstate_register_driver(cppc_state);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("failed to register with return %d\n", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (amd_pstate_prefcore) {
|
if (amd_pstate_prefcore) {
|
||||||
ret = amd_detect_prefcore(&amd_pstate_prefcore);
|
ret = amd_detect_prefcore(&amd_pstate_prefcore);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = amd_pstate_register_driver(cppc_state);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("failed to register with return %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
dev_root = bus_get_dev_root(&cpu_subsys);
|
dev_root = bus_get_dev_root(&cpu_subsys);
|
||||||
if (dev_root) {
|
if (dev_root) {
|
||||||
ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
|
ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
|
||||||
|
@ -60,7 +60,7 @@ static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __dma_buf_debugfs_list_del(struct file *file)
|
static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -297,7 +297,7 @@ static const struct dma_buf_ops udmabuf_ops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define SEALS_WANTED (F_SEAL_SHRINK)
|
#define SEALS_WANTED (F_SEAL_SHRINK)
|
||||||
#define SEALS_DENIED (F_SEAL_WRITE)
|
#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
|
||||||
|
|
||||||
static int check_memfd_seals(struct file *memfd)
|
static int check_memfd_seals(struct file *memfd)
|
||||||
{
|
{
|
||||||
@ -317,12 +317,10 @@ static int check_memfd_seals(struct file *memfd)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int export_udmabuf(struct udmabuf *ubuf,
|
static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
|
||||||
struct miscdevice *device,
|
struct miscdevice *device)
|
||||||
u32 flags)
|
|
||||||
{
|
{
|
||||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||||
struct dma_buf *buf;
|
|
||||||
|
|
||||||
ubuf->device = device;
|
ubuf->device = device;
|
||||||
exp_info.ops = &udmabuf_ops;
|
exp_info.ops = &udmabuf_ops;
|
||||||
@ -330,11 +328,7 @@ static int export_udmabuf(struct udmabuf *ubuf,
|
|||||||
exp_info.priv = ubuf;
|
exp_info.priv = ubuf;
|
||||||
exp_info.flags = O_RDWR;
|
exp_info.flags = O_RDWR;
|
||||||
|
|
||||||
buf = dma_buf_export(&exp_info);
|
return dma_buf_export(&exp_info);
|
||||||
if (IS_ERR(buf))
|
|
||||||
return PTR_ERR(buf);
|
|
||||||
|
|
||||||
return dma_buf_fd(buf, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
|
static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
|
||||||
@ -391,6 +385,7 @@ static long udmabuf_create(struct miscdevice *device,
|
|||||||
struct folio **folios = NULL;
|
struct folio **folios = NULL;
|
||||||
pgoff_t pgcnt = 0, pglimit;
|
pgoff_t pgcnt = 0, pglimit;
|
||||||
struct udmabuf *ubuf;
|
struct udmabuf *ubuf;
|
||||||
|
struct dma_buf *dmabuf;
|
||||||
long ret = -EINVAL;
|
long ret = -EINVAL;
|
||||||
u32 i, flags;
|
u32 i, flags;
|
||||||
|
|
||||||
@ -436,23 +431,39 @@ static long udmabuf_create(struct miscdevice *device,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Take the inode lock to protect against concurrent
|
||||||
|
* memfd_add_seals(), which takes this lock in write mode.
|
||||||
|
*/
|
||||||
|
inode_lock_shared(file_inode(memfd));
|
||||||
ret = check_memfd_seals(memfd);
|
ret = check_memfd_seals(memfd);
|
||||||
if (ret < 0) {
|
if (ret)
|
||||||
fput(memfd);
|
goto out_unlock;
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
|
ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
|
||||||
list[i].size, folios);
|
list[i].size, folios);
|
||||||
|
out_unlock:
|
||||||
|
inode_unlock_shared(file_inode(memfd));
|
||||||
fput(memfd);
|
fput(memfd);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
|
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
|
||||||
ret = export_udmabuf(ubuf, device, flags);
|
dmabuf = export_udmabuf(ubuf, device);
|
||||||
if (ret < 0)
|
if (IS_ERR(dmabuf)) {
|
||||||
|
ret = PTR_ERR(dmabuf);
|
||||||
goto err;
|
goto err;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Ownership of ubuf is held by the dmabuf from here.
|
||||||
|
* If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
|
||||||
|
* dmabuf and the ubuf (through udmabuf_ops.release).
|
||||||
|
*/
|
||||||
|
|
||||||
|
ret = dma_buf_fd(dmabuf, flags);
|
||||||
|
if (ret < 0)
|
||||||
|
dma_buf_put(dmabuf);
|
||||||
|
|
||||||
kvfree(folios);
|
kvfree(folios);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -7,9 +7,9 @@
|
|||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/mod_devicetable.h>
|
#include <linux/mod_devicetable.h>
|
||||||
#include <linux/dma-map-ops.h>
|
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/platform_data/amd_qdma.h>
|
#include <linux/platform_data/amd_qdma.h>
|
||||||
#include <linux/regmap.h>
|
#include <linux/regmap.h>
|
||||||
@ -492,18 +492,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
|
|||||||
|
|
||||||
static int qdma_device_setup(struct qdma_device *qdev)
|
static int qdma_device_setup(struct qdma_device *qdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &qdev->pdev->dev;
|
|
||||||
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
|
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
while (dev && get_dma_ops(dev))
|
|
||||||
dev = dev->parent;
|
|
||||||
if (!dev) {
|
|
||||||
qdma_err(qdev, "dma device not found");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
|
|
||||||
|
|
||||||
ret = qdma_setup_fmap_context(qdev);
|
ret = qdma_setup_fmap_context(qdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
qdma_err(qdev, "Failed setup fmap context");
|
qdma_err(qdev, "Failed setup fmap context");
|
||||||
@ -548,11 +539,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
|
|||||||
{
|
{
|
||||||
struct qdma_queue *queue = to_qdma_queue(chan);
|
struct qdma_queue *queue = to_qdma_queue(chan);
|
||||||
struct qdma_device *qdev = queue->qdev;
|
struct qdma_device *qdev = queue->qdev;
|
||||||
struct device *dev = qdev->dma_dev.dev;
|
struct qdma_platdata *pdata;
|
||||||
|
|
||||||
qdma_clear_queue_context(queue);
|
qdma_clear_queue_context(queue);
|
||||||
vchan_free_chan_resources(&queue->vchan);
|
vchan_free_chan_resources(&queue->vchan);
|
||||||
dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
|
pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||||
|
dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
|
||||||
queue->desc_base, queue->dma_desc_base);
|
queue->desc_base, queue->dma_desc_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -565,6 +557,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
|||||||
struct qdma_queue *queue = to_qdma_queue(chan);
|
struct qdma_queue *queue = to_qdma_queue(chan);
|
||||||
struct qdma_device *qdev = queue->qdev;
|
struct qdma_device *qdev = queue->qdev;
|
||||||
struct qdma_ctxt_sw_desc desc;
|
struct qdma_ctxt_sw_desc desc;
|
||||||
|
struct qdma_platdata *pdata;
|
||||||
size_t size;
|
size_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -572,8 +565,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||||
size = queue->ring_size * QDMA_MM_DESC_SIZE;
|
size = queue->ring_size * QDMA_MM_DESC_SIZE;
|
||||||
queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
|
queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
|
||||||
&queue->dma_desc_base,
|
&queue->dma_desc_base,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!queue->desc_base) {
|
if (!queue->desc_base) {
|
||||||
@ -588,7 +582,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
|
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
|
||||||
chan->name);
|
chan->name);
|
||||||
dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
|
dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
|
||||||
queue->dma_desc_base);
|
queue->dma_desc_base);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -948,8 +942,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
|
|||||||
|
|
||||||
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
|
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
|
||||||
{
|
{
|
||||||
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
|
struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
|
||||||
struct device *dev = &qdev->pdev->dev;
|
struct device *dev = &qdev->pdev->dev;
|
||||||
|
u32 ctxt[QDMA_CTXT_REGMAP_LEN];
|
||||||
struct qdma_intr_ring *ring;
|
struct qdma_intr_ring *ring;
|
||||||
struct qdma_ctxt_intr intr_ctxt;
|
struct qdma_ctxt_intr intr_ctxt;
|
||||||
u32 vector;
|
u32 vector;
|
||||||
@ -969,7 +964,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
|
|||||||
ring->msix_id = qdev->err_irq_idx + i + 1;
|
ring->msix_id = qdev->err_irq_idx + i + 1;
|
||||||
ring->ridx = i;
|
ring->ridx = i;
|
||||||
ring->color = 1;
|
ring->color = 1;
|
||||||
ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
|
ring->base = dmam_alloc_coherent(pdata->dma_dev,
|
||||||
|
QDMA_INTR_RING_SIZE,
|
||||||
&ring->dev_base, GFP_KERNEL);
|
&ring->dev_base, GFP_KERNEL);
|
||||||
if (!ring->base) {
|
if (!ring->base) {
|
||||||
qdma_err(qdev, "Failed to alloc intr ring %d", i);
|
qdma_err(qdev, "Failed to alloc intr ring %d", i);
|
||||||
|
@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
|
|||||||
{
|
{
|
||||||
struct admac_sram *sram;
|
struct admac_sram *sram;
|
||||||
int i, ret = 0, nblocks;
|
int i, ret = 0, nblocks;
|
||||||
|
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
|
||||||
|
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
|
||||||
|
|
||||||
if (dir == DMA_MEM_TO_DEV)
|
if (dir == DMA_MEM_TO_DEV)
|
||||||
sram = &ad->txcache;
|
sram = &ad->txcache;
|
||||||
@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
|
|||||||
goto free_irq;
|
goto free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
|
|
||||||
ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
|
|
||||||
|
|
||||||
dev_info(&pdev->dev, "Audio DMA Controller\n");
|
dev_info(&pdev->dev, "Audio DMA Controller\n");
|
||||||
dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
|
|
||||||
readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
|
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
|
||||||
|
if (!desc)
|
||||||
|
return NULL;
|
||||||
list_add_tail(&desc->desc_node, &desc->descs_list);
|
list_add_tail(&desc->desc_node, &desc->descs_list);
|
||||||
|
|
||||||
desc->tx_dma_desc.cookie = -EBUSY;
|
desc->tx_dma_desc.cookie = -EBUSY;
|
||||||
|
@ -8,13 +8,15 @@
|
|||||||
|
|
||||||
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
|
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
|
||||||
{
|
{
|
||||||
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
||||||
|
struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
|
||||||
struct acpi_dma_spec *dma_spec = param;
|
struct acpi_dma_spec *dma_spec = param;
|
||||||
struct dw_dma_slave slave = {
|
struct dw_dma_slave slave = {
|
||||||
.dma_dev = dma_spec->dev,
|
.dma_dev = dma_spec->dev,
|
||||||
.src_id = dma_spec->slave_id,
|
.src_id = dma_spec->slave_id,
|
||||||
.dst_id = dma_spec->slave_id,
|
.dst_id = dma_spec->slave_id,
|
||||||
.m_master = 0,
|
.m_master = data->m_master,
|
||||||
.p_master = 1,
|
.p_master = data->p_master,
|
||||||
};
|
};
|
||||||
|
|
||||||
return dw_dma_filter(chan, &slave);
|
return dw_dma_filter(chan, &slave);
|
||||||
|
@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
|
|||||||
int (*probe)(struct dw_dma_chip *chip);
|
int (*probe)(struct dw_dma_chip *chip);
|
||||||
int (*remove)(struct dw_dma_chip *chip);
|
int (*remove)(struct dw_dma_chip *chip);
|
||||||
struct dw_dma_chip *chip;
|
struct dw_dma_chip *chip;
|
||||||
|
u8 m_master;
|
||||||
|
u8 p_master;
|
||||||
};
|
};
|
||||||
|
|
||||||
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
|
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
|
||||||
.probe = dw_dma_probe,
|
.probe = dw_dma_probe,
|
||||||
.remove = dw_dma_remove,
|
.remove = dw_dma_remove,
|
||||||
|
.m_master = 0,
|
||||||
|
.p_master = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct dw_dma_platform_data idma32_pdata = {
|
static const struct dw_dma_platform_data idma32_pdata = {
|
||||||
@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
|
|||||||
.pdata = &idma32_pdata,
|
.pdata = &idma32_pdata,
|
||||||
.probe = idma32_dma_probe,
|
.probe = idma32_dma_probe,
|
||||||
.remove = idma32_dma_remove,
|
.remove = idma32_dma_remove,
|
||||||
|
.m_master = 0,
|
||||||
|
.p_master = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct dw_dma_platform_data xbar_pdata = {
|
static const struct dw_dma_platform_data xbar_pdata = {
|
||||||
@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
|
|||||||
.pdata = &xbar_pdata,
|
.pdata = &xbar_pdata,
|
||||||
.probe = idma32_dma_probe,
|
.probe = idma32_dma_probe,
|
||||||
.remove = idma32_dma_remove,
|
.remove = idma32_dma_remove,
|
||||||
|
.m_master = 0,
|
||||||
|
.p_master = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _DMA_DW_INTERNAL_H */
|
#endif /* _DMA_DW_INTERNAL_H */
|
||||||
|
@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
dw_dma_acpi_controller_register(chip->dw);
|
|
||||||
|
|
||||||
pci_set_drvdata(pdev, data);
|
pci_set_drvdata(pdev, data);
|
||||||
|
|
||||||
|
dw_dma_acpi_controller_register(chip->dw);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,6 +166,7 @@ struct fsl_edma_chan {
|
|||||||
struct work_struct issue_worker;
|
struct work_struct issue_worker;
|
||||||
struct platform_device *pdev;
|
struct platform_device *pdev;
|
||||||
struct device *pd_dev;
|
struct device *pd_dev;
|
||||||
|
struct device_link *pd_dev_link;
|
||||||
u32 srcid;
|
u32 srcid;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
int priority;
|
int priority;
|
||||||
|
@ -417,10 +417,33 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
|
|||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
|
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
|
||||||
|
|
||||||
|
static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
|
||||||
|
{
|
||||||
|
struct fsl_edma_chan *fsl_chan;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < fsl_edma->n_chans; i++) {
|
||||||
|
if (fsl_edma->chan_masked & BIT(i))
|
||||||
|
continue;
|
||||||
|
fsl_chan = &fsl_edma->chans[i];
|
||||||
|
if (fsl_chan->pd_dev_link)
|
||||||
|
device_link_del(fsl_chan->pd_dev_link);
|
||||||
|
if (fsl_chan->pd_dev) {
|
||||||
|
dev_pm_domain_detach(fsl_chan->pd_dev, false);
|
||||||
|
pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
|
||||||
|
pm_runtime_set_suspended(fsl_chan->pd_dev);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void devm_fsl_edma3_detach_pd(void *data)
|
||||||
|
{
|
||||||
|
fsl_edma3_detach_pd(data);
|
||||||
|
}
|
||||||
|
|
||||||
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
|
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
|
||||||
{
|
{
|
||||||
struct fsl_edma_chan *fsl_chan;
|
struct fsl_edma_chan *fsl_chan;
|
||||||
struct device_link *link;
|
|
||||||
struct device *pd_chan;
|
struct device *pd_chan;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
int i;
|
int i;
|
||||||
@ -436,15 +459,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
|
|||||||
pd_chan = dev_pm_domain_attach_by_id(dev, i);
|
pd_chan = dev_pm_domain_attach_by_id(dev, i);
|
||||||
if (IS_ERR_OR_NULL(pd_chan)) {
|
if (IS_ERR_OR_NULL(pd_chan)) {
|
||||||
dev_err(dev, "Failed attach pd %d\n", i);
|
dev_err(dev, "Failed attach pd %d\n", i);
|
||||||
return -EINVAL;
|
goto detach;
|
||||||
}
|
}
|
||||||
|
|
||||||
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
|
||||||
DL_FLAG_PM_RUNTIME |
|
DL_FLAG_PM_RUNTIME |
|
||||||
DL_FLAG_RPM_ACTIVE);
|
DL_FLAG_RPM_ACTIVE);
|
||||||
if (!link) {
|
if (!fsl_chan->pd_dev_link) {
|
||||||
dev_err(dev, "Failed to add device_link to %d\n", i);
|
dev_err(dev, "Failed to add device_link to %d\n", i);
|
||||||
return -EINVAL;
|
dev_pm_domain_detach(pd_chan, false);
|
||||||
|
goto detach;
|
||||||
}
|
}
|
||||||
|
|
||||||
fsl_chan->pd_dev = pd_chan;
|
fsl_chan->pd_dev = pd_chan;
|
||||||
@ -455,6 +479,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
|
|||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
detach:
|
||||||
|
fsl_edma3_detach_pd(fsl_edma);
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fsl_edma_probe(struct platform_device *pdev)
|
static int fsl_edma_probe(struct platform_device *pdev)
|
||||||
@ -544,6 +572,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
|||||||
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
|
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
|
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
|
||||||
|
@ -31,7 +31,7 @@
|
|||||||
#define LDMA_ASK_VALID BIT(2)
|
#define LDMA_ASK_VALID BIT(2)
|
||||||
#define LDMA_START BIT(3) /* DMA start operation */
|
#define LDMA_START BIT(3) /* DMA start operation */
|
||||||
#define LDMA_STOP BIT(4) /* DMA stop operation */
|
#define LDMA_STOP BIT(4) /* DMA stop operation */
|
||||||
#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
|
#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
|
||||||
|
|
||||||
/* Bitfields in ndesc_addr field of HW descriptor */
|
/* Bitfields in ndesc_addr field of HW descriptor */
|
||||||
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
|
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
|
||||||
|
@ -1388,6 +1388,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|||||||
irq = irq_of_parse_and_map(np, 0);
|
irq = irq_of_parse_and_map(np, 0);
|
||||||
if (!irq) {
|
if (!irq) {
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
|
of_node_put(np);
|
||||||
goto err_channel_add;
|
goto err_channel_add;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1396,6 +1397,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|||||||
if (IS_ERR(chan)) {
|
if (IS_ERR(chan)) {
|
||||||
ret = PTR_ERR(chan);
|
ret = PTR_ERR(chan);
|
||||||
irq_dispose_mapping(irq);
|
irq_dispose_mapping(irq);
|
||||||
|
of_node_put(np);
|
||||||
goto err_channel_add;
|
goto err_channel_add;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,6 +231,7 @@ struct tegra_dma_channel {
|
|||||||
bool config_init;
|
bool config_init;
|
||||||
char name[30];
|
char name[30];
|
||||||
enum dma_transfer_direction sid_dir;
|
enum dma_transfer_direction sid_dir;
|
||||||
|
enum dma_status status;
|
||||||
int id;
|
int id;
|
||||||
int irq;
|
int irq;
|
||||||
int slave_id;
|
int slave_id;
|
||||||
@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc)
|
|||||||
tegra_dma_dump_chan_regs(tdc);
|
tegra_dma_dump_chan_regs(tdc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tdc->status = DMA_PAUSED;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
|
|||||||
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
|
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
|
||||||
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
|
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
|
||||||
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
|
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
|
||||||
|
|
||||||
|
tdc->status = DMA_IN_PROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tegra_dma_device_resume(struct dma_chan *dc)
|
static int tegra_dma_device_resume(struct dma_chan *dc)
|
||||||
@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
|
|||||||
|
|
||||||
tegra_dma_sid_free(tdc);
|
tegra_dma_sid_free(tdc);
|
||||||
tdc->dma_desc = NULL;
|
tdc->dma_desc = NULL;
|
||||||
|
tdc->status = DMA_COMPLETE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
|
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
|
||||||
@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
|
|||||||
tdc->dma_desc = NULL;
|
tdc->dma_desc = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tdc->status = DMA_COMPLETE;
|
||||||
tegra_dma_sid_free(tdc);
|
tegra_dma_sid_free(tdc);
|
||||||
vchan_get_all_descriptors(&tdc->vc, &head);
|
vchan_get_all_descriptors(&tdc->vc, &head);
|
||||||
spin_unlock_irqrestore(&tdc->vc.lock, flags);
|
spin_unlock_irqrestore(&tdc->vc.lock, flags);
|
||||||
@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
|
|||||||
if (ret == DMA_COMPLETE)
|
if (ret == DMA_COMPLETE)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (tdc->status == DMA_PAUSED)
|
||||||
|
ret = DMA_PAUSED;
|
||||||
|
|
||||||
spin_lock_irqsave(&tdc->vc.lock, flags);
|
spin_lock_irqsave(&tdc->vc.lock, flags);
|
||||||
vd = vchan_find_desc(&tdc->vc, cookie);
|
vd = vchan_find_desc(&tdc->vc, cookie);
|
||||||
if (vd) {
|
if (vd) {
|
||||||
|
@ -402,10 +402,10 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bit 5 of byte 1 is "UL_Auto Update" & if it is set, Auto Update is
|
* Bit 5 of byte 1 is "UL_IAP" & if it is set, Auto Update is
|
||||||
* not possible.
|
* not possible.
|
||||||
*/
|
*/
|
||||||
if (response_msg[1] & AUTO_UPDATE_FEATURE_ENABLED)
|
if ((((u8 *)response_msg)[1] & AUTO_UPDATE_FEATURE_ENABLED))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -99,6 +99,7 @@ config DRM_KUNIT_TEST
|
|||||||
config DRM_KMS_HELPER
|
config DRM_KMS_HELPER
|
||||||
tristate
|
tristate
|
||||||
depends on DRM
|
depends on DRM
|
||||||
|
select FB_CORE if DRM_FBDEV_EMULATION
|
||||||
help
|
help
|
||||||
CRTC helpers for KMS drivers.
|
CRTC helpers for KMS drivers.
|
||||||
|
|
||||||
@ -358,6 +359,7 @@ config DRM_TTM_HELPER
|
|||||||
tristate
|
tristate
|
||||||
depends on DRM
|
depends on DRM
|
||||||
select DRM_TTM
|
select DRM_TTM
|
||||||
|
select FB_CORE if DRM_FBDEV_EMULATION
|
||||||
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
|
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
|
||||||
help
|
help
|
||||||
Helpers for ttm-based gem objects
|
Helpers for ttm-based gem objects
|
||||||
@ -365,6 +367,7 @@ config DRM_TTM_HELPER
|
|||||||
config DRM_GEM_DMA_HELPER
|
config DRM_GEM_DMA_HELPER
|
||||||
tristate
|
tristate
|
||||||
depends on DRM
|
depends on DRM
|
||||||
|
select FB_CORE if DRM_FBDEV_EMULATION
|
||||||
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
|
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
|
||||||
help
|
help
|
||||||
Choose this if you need the GEM DMA helper functions
|
Choose this if you need the GEM DMA helper functions
|
||||||
@ -372,6 +375,7 @@ config DRM_GEM_DMA_HELPER
|
|||||||
config DRM_GEM_SHMEM_HELPER
|
config DRM_GEM_SHMEM_HELPER
|
||||||
tristate
|
tristate
|
||||||
depends on DRM && MMU
|
depends on DRM && MMU
|
||||||
|
select FB_CORE if DRM_FBDEV_EMULATION
|
||||||
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
|
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
|
||||||
help
|
help
|
||||||
Choose this if you need the GEM shmem helper functions
|
Choose this if you need the GEM shmem helper functions
|
||||||
|
@ -343,11 +343,10 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
|
|||||||
coredump->skip_vram_check = skip_vram_check;
|
coredump->skip_vram_check = skip_vram_check;
|
||||||
coredump->reset_vram_lost = vram_lost;
|
coredump->reset_vram_lost = vram_lost;
|
||||||
|
|
||||||
if (job && job->vm) {
|
if (job && job->pasid) {
|
||||||
struct amdgpu_vm *vm = job->vm;
|
|
||||||
struct amdgpu_task_info *ti;
|
struct amdgpu_task_info *ti;
|
||||||
|
|
||||||
ti = amdgpu_vm_get_task_info_vm(vm);
|
ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
|
||||||
if (ti) {
|
if (ti) {
|
||||||
coredump->reset_task_info = *ti;
|
coredump->reset_task_info = *ti;
|
||||||
amdgpu_vm_put_task_info(ti);
|
amdgpu_vm_put_task_info(ti);
|
||||||
|
@ -417,6 +417,9 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (adev->has_pr3 ||
|
if (adev->has_pr3 ||
|
||||||
((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
|
((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
|
||||||
return true;
|
return true;
|
||||||
|
@ -255,7 +255,6 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
|
|||||||
|
|
||||||
void amdgpu_job_free_resources(struct amdgpu_job *job)
|
void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
|
||||||
struct dma_fence *f;
|
struct dma_fence *f;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
@ -268,7 +267,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
|||||||
f = NULL;
|
f = NULL;
|
||||||
|
|
||||||
for (i = 0; i < job->num_ibs; ++i)
|
for (i = 0; i < job->num_ibs; ++i)
|
||||||
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
|
amdgpu_ib_free(NULL, &job->ibs[i], f);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||||
|
@ -1266,10 +1266,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||||||
* next command submission.
|
* next command submission.
|
||||||
*/
|
*/
|
||||||
if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
|
if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
|
||||||
uint32_t mem_type = bo->tbo.resource->mem_type;
|
if (bo->tbo.resource &&
|
||||||
|
!(bo->preferred_domains &
|
||||||
if (!(bo->preferred_domains &
|
amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
|
||||||
amdgpu_mem_type_to_domain(mem_type)))
|
|
||||||
amdgpu_vm_bo_evicted(&bo_va->base);
|
amdgpu_vm_bo_evicted(&bo_va->base);
|
||||||
else
|
else
|
||||||
amdgpu_vm_bo_idle(&bo_va->base);
|
amdgpu_vm_bo_idle(&bo_va->base);
|
||||||
|
@ -4123,7 +4123,7 @@ static int gfx_v12_0_set_clockgating_state(void *handle,
|
|||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||||
case IP_VERSION(12, 0, 0):
|
case IP_VERSION(12, 0, 0):
|
||||||
case IP_VERSION(12, 0, 1):
|
case IP_VERSION(12, 0, 1):
|
||||||
gfx_v12_0_update_gfx_clock_gating(adev,
|
gfx_v12_0_update_gfx_clock_gating(adev,
|
||||||
|
@ -108,7 +108,7 @@ mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
|
|||||||
dev_err(adev->dev,
|
dev_err(adev->dev,
|
||||||
"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
|
"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
|
||||||
status);
|
status);
|
||||||
switch (adev->ip_versions[MMHUB_HWIP][0]) {
|
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
|
||||||
case IP_VERSION(4, 1, 0):
|
case IP_VERSION(4, 1, 0):
|
||||||
mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw];
|
mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw];
|
||||||
break;
|
break;
|
||||||
|
@ -271,8 +271,19 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
|
|||||||
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
|
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define regRCC_DEV0_EPF6_STRAP4 0xd304
|
||||||
|
#define regRCC_DEV0_EPF6_STRAP4_BASE_IDX 5
|
||||||
|
|
||||||
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
|
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
uint32_t data;
|
||||||
|
|
||||||
|
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
|
||||||
|
case IP_VERSION(2, 5, 0):
|
||||||
|
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4) & ~BIT(23);
|
||||||
|
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4, data);
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
|
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
|
||||||
|
@ -275,7 +275,7 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
|
|||||||
if (def != data)
|
if (def != data)
|
||||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
|
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
|
||||||
|
|
||||||
switch (adev->ip_versions[NBIO_HWIP][0]) {
|
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
|
||||||
case IP_VERSION(7, 11, 0):
|
case IP_VERSION(7, 11, 0):
|
||||||
case IP_VERSION(7, 11, 1):
|
case IP_VERSION(7, 11, 1):
|
||||||
case IP_VERSION(7, 11, 2):
|
case IP_VERSION(7, 11, 2):
|
||||||
|
@ -247,7 +247,7 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
|
|||||||
if (def != data)
|
if (def != data)
|
||||||
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
|
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
|
||||||
|
|
||||||
switch (adev->ip_versions[NBIO_HWIP][0]) {
|
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
|
||||||
case IP_VERSION(7, 7, 0):
|
case IP_VERSION(7, 7, 0):
|
||||||
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
|
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
|
||||||
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
|
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
|
||||||
|
@ -2096,7 +2096,7 @@ static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = smu->adev;
|
struct amdgpu_device *adev = smu->adev;
|
||||||
|
|
||||||
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2))
|
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
|
||||||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
|
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
|
||||||
FEATURE_PWR_GFX, NULL);
|
FEATURE_PWR_GFX, NULL);
|
||||||
else
|
else
|
||||||
|
@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
|
|||||||
ADV7511_AUDIO_CFG3_LEN_MASK, len);
|
ADV7511_AUDIO_CFG3_LEN_MASK, len);
|
||||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
|
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
|
||||||
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
|
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
|
||||||
regmap_write(adv7511->regmap, 0x73, 0x1);
|
|
||||||
|
/* send current Audio infoframe values while updating */
|
||||||
|
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||||
|
BIT(5), BIT(5));
|
||||||
|
|
||||||
|
regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
|
||||||
|
|
||||||
|
/* use Audio infoframe updated info */
|
||||||
|
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||||
|
BIT(5), 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
|
|||||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
|
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
|
||||||
BIT(7) | BIT(6), BIT(7));
|
BIT(7) | BIT(6), BIT(7));
|
||||||
/* use Audio infoframe updated info */
|
/* use Audio infoframe updated info */
|
||||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
|
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||||
BIT(5), 0);
|
BIT(5), 0);
|
||||||
|
|
||||||
/* enable SPDIF receiver */
|
/* enable SPDIF receiver */
|
||||||
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
|
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
|
||||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
|
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
|
||||||
|
@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = adv7511_init_regulators(adv7511);
|
ret = adv7511_init_regulators(adv7511);
|
||||||
if (ret)
|
if (ret) {
|
||||||
return dev_err_probe(dev, ret, "failed to init regulators\n");
|
dev_err_probe(dev, ret, "failed to init regulators\n");
|
||||||
|
goto err_of_node_put;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The power down GPIO is optional. If present, toggle it from active to
|
* The power down GPIO is optional. If present, toggle it from active to
|
||||||
@ -1363,6 +1365,8 @@ err_i2c_unregister_edid:
|
|||||||
i2c_unregister_device(adv7511->i2c_edid);
|
i2c_unregister_device(adv7511->i2c_edid);
|
||||||
uninit_regulators:
|
uninit_regulators:
|
||||||
adv7511_uninit_regulators(adv7511);
|
adv7511_uninit_regulators(adv7511);
|
||||||
|
err_of_node_put:
|
||||||
|
of_node_put(adv7511->host_node);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
|
|||||||
{
|
{
|
||||||
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
|
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
|
||||||
|
|
||||||
|
of_node_put(adv7511->host_node);
|
||||||
|
|
||||||
adv7511_uninit_regulators(adv7511);
|
adv7511_uninit_regulators(adv7511);
|
||||||
|
|
||||||
drm_bridge_remove(&adv7511->bridge);
|
drm_bridge_remove(&adv7511->bridge);
|
||||||
|
@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
|
|||||||
|
|
||||||
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
|
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
|
||||||
|
|
||||||
if (num_lanes < 1 || num_lanes > 4)
|
if (num_lanes < 2 || num_lanes > 4)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
adv->num_dsi_lanes = num_lanes;
|
adv->num_dsi_lanes = num_lanes;
|
||||||
@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
|
|||||||
if (!adv->host_node)
|
if (!adv->host_node)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
of_node_put(adv->host_node);
|
|
||||||
|
|
||||||
adv->use_timing_gen = !of_property_read_bool(np,
|
adv->use_timing_gen = !of_property_read_bool(np,
|
||||||
"adi,disable-timing-generator");
|
"adi,disable-timing-generator");
|
||||||
|
|
||||||
|
@ -1896,8 +1896,8 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
|
|||||||
*
|
*
|
||||||
* Creates a DP tunnel manager for @dev.
|
* Creates a DP tunnel manager for @dev.
|
||||||
*
|
*
|
||||||
* Returns a pointer to the tunnel manager if created successfully or NULL in
|
* Returns a pointer to the tunnel manager if created successfully or error
|
||||||
* case of an error.
|
* pointer in case of failure.
|
||||||
*/
|
*/
|
||||||
struct drm_dp_tunnel_mgr *
|
struct drm_dp_tunnel_mgr *
|
||||||
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
|
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
|
||||||
@ -1907,7 +1907,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
|
|||||||
|
|
||||||
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||||
if (!mgr)
|
if (!mgr)
|
||||||
return NULL;
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
mgr->dev = dev;
|
mgr->dev = dev;
|
||||||
init_waitqueue_head(&mgr->bw_req_queue);
|
init_waitqueue_head(&mgr->bw_req_queue);
|
||||||
@ -1916,7 +1916,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
|
|||||||
if (!mgr->groups) {
|
if (!mgr->groups) {
|
||||||
kfree(mgr);
|
kfree(mgr);
|
||||||
|
|
||||||
return NULL;
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
|
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
|
||||||
@ -1927,7 +1927,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
|
|||||||
if (!init_group(mgr, &mgr->groups[i])) {
|
if (!init_group(mgr, &mgr->groups[i])) {
|
||||||
destroy_mgr(mgr);
|
destroy_mgr(mgr);
|
||||||
|
|
||||||
return NULL;
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
mgr->group_count++;
|
mgr->group_count++;
|
||||||
|
@ -1287,14 +1287,11 @@ EXPORT_SYMBOL(drm_mode_set_name);
|
|||||||
*/
|
*/
|
||||||
int drm_mode_vrefresh(const struct drm_display_mode *mode)
|
int drm_mode_vrefresh(const struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
unsigned int num, den;
|
unsigned int num = 1, den = 1;
|
||||||
|
|
||||||
if (mode->htotal == 0 || mode->vtotal == 0)
|
if (mode->htotal == 0 || mode->vtotal == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
num = mode->clock;
|
|
||||||
den = mode->htotal * mode->vtotal;
|
|
||||||
|
|
||||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||||
num *= 2;
|
num *= 2;
|
||||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||||
@ -1302,6 +1299,12 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
|
|||||||
if (mode->vscan > 1)
|
if (mode->vscan > 1)
|
||||||
den *= mode->vscan;
|
den *= mode->vscan;
|
||||||
|
|
||||||
|
if (check_mul_overflow(mode->clock, num, &num))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (check_mul_overflow(mode->htotal * mode->vtotal, den, &den))
|
||||||
|
return 0;
|
||||||
|
|
||||||
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
|
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mode_vrefresh);
|
EXPORT_SYMBOL(drm_mode_vrefresh);
|
||||||
|
@ -2115,14 +2115,6 @@ static void intel_c10_pll_program(struct intel_display *display,
|
|||||||
0, C10_VDR_CTRL_MSGBUS_ACCESS,
|
0, C10_VDR_CTRL_MSGBUS_ACCESS,
|
||||||
MB_WRITE_COMMITTED);
|
MB_WRITE_COMMITTED);
|
||||||
|
|
||||||
/* Custom width needs to be programmed to 0 for both the phy lanes */
|
|
||||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
|
|
||||||
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
|
|
||||||
MB_WRITE_COMMITTED);
|
|
||||||
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
|
|
||||||
0, C10_VDR_CTRL_UPDATE_CFG,
|
|
||||||
MB_WRITE_COMMITTED);
|
|
||||||
|
|
||||||
/* Program the pll values only for the master lane */
|
/* Program the pll values only for the master lane */
|
||||||
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
|
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
|
||||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
|
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
|
||||||
@ -2132,6 +2124,10 @@ static void intel_c10_pll_program(struct intel_display *display,
|
|||||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
|
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
|
||||||
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
|
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
|
||||||
|
|
||||||
|
/* Custom width needs to be programmed to 0 for both the phy lanes */
|
||||||
|
intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
|
||||||
|
C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
|
||||||
|
MB_WRITE_COMMITTED);
|
||||||
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
|
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
|
||||||
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
|
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
|
||||||
MB_WRITE_COMMITTED);
|
MB_WRITE_COMMITTED);
|
||||||
|
@ -343,6 +343,11 @@ struct intel_engine_guc_stats {
|
|||||||
* @start_gt_clk: GT clock time of last idle to active transition.
|
* @start_gt_clk: GT clock time of last idle to active transition.
|
||||||
*/
|
*/
|
||||||
u64 start_gt_clk;
|
u64 start_gt_clk;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @total: The last value of total returned
|
||||||
|
*/
|
||||||
|
u64 total;
|
||||||
};
|
};
|
||||||
|
|
||||||
union intel_engine_tlb_inv_reg {
|
union intel_engine_tlb_inv_reg {
|
||||||
|
@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
|
|||||||
GEN9_MEDIA_PG_ENABLE |
|
GEN9_MEDIA_PG_ENABLE |
|
||||||
GEN11_MEDIA_SAMPLER_PG_ENABLE;
|
GEN11_MEDIA_SAMPLER_PG_ENABLE;
|
||||||
|
|
||||||
if (GRAPHICS_VER(gt->i915) >= 12) {
|
if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
|
||||||
for (i = 0; i < I915_MAX_VCS; i++)
|
for (i = 0; i < I915_MAX_VCS; i++)
|
||||||
if (HAS_ENGINE(gt, _VCS(i)))
|
if (HAS_ENGINE(gt, _VCS(i)))
|
||||||
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
|
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
|
||||||
|
@ -1243,6 +1243,21 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
|
|||||||
} while (++i < 6);
|
} while (++i < 6);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __set_engine_usage_record(struct intel_engine_cs *engine,
|
||||||
|
u32 last_in, u32 id, u32 total)
|
||||||
|
{
|
||||||
|
struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
|
||||||
|
|
||||||
|
#define record_write(map_, field_, val_) \
|
||||||
|
iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_)
|
||||||
|
|
||||||
|
record_write(&rec_map, last_switch_in_stamp, last_in);
|
||||||
|
record_write(&rec_map, current_context_index, id);
|
||||||
|
record_write(&rec_map, total_runtime, total);
|
||||||
|
|
||||||
|
#undef record_write
|
||||||
|
}
|
||||||
|
|
||||||
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct intel_engine_guc_stats *stats = &engine->stats.guc;
|
struct intel_engine_guc_stats *stats = &engine->stats.guc;
|
||||||
@ -1363,9 +1378,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
|
|||||||
total += intel_gt_clock_interval_to_ns(gt, clk);
|
total += intel_gt_clock_interval_to_ns(gt, clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (total > stats->total)
|
||||||
|
stats->total = total;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||||
|
|
||||||
return ns_to_ktime(total);
|
return ns_to_ktime(stats->total);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void guc_enable_busyness_worker(struct intel_guc *guc)
|
static void guc_enable_busyness_worker(struct intel_guc *guc)
|
||||||
@ -1431,8 +1449,21 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
|
|||||||
|
|
||||||
guc_update_pm_timestamp(guc, &unused);
|
guc_update_pm_timestamp(guc, &unused);
|
||||||
for_each_engine(engine, gt, id) {
|
for_each_engine(engine, gt, id) {
|
||||||
|
struct intel_engine_guc_stats *stats = &engine->stats.guc;
|
||||||
|
|
||||||
guc_update_engine_gt_clks(engine);
|
guc_update_engine_gt_clks(engine);
|
||||||
engine->stats.guc.prev_total = 0;
|
|
||||||
|
/*
|
||||||
|
* If resetting a running context, accumulate the active
|
||||||
|
* time as well since there will be no context switch.
|
||||||
|
*/
|
||||||
|
if (stats->running) {
|
||||||
|
u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
|
||||||
|
|
||||||
|
stats->total_gt_clks += clk;
|
||||||
|
}
|
||||||
|
stats->prev_total = 0;
|
||||||
|
stats->running = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
|
||||||
@ -1543,6 +1574,9 @@ err_trylock:
|
|||||||
|
|
||||||
static int guc_action_enable_usage_stats(struct intel_guc *guc)
|
static int guc_action_enable_usage_stats(struct intel_guc *guc)
|
||||||
{
|
{
|
||||||
|
struct intel_gt *gt = guc_to_gt(guc);
|
||||||
|
struct intel_engine_cs *engine;
|
||||||
|
enum intel_engine_id id;
|
||||||
u32 offset = intel_guc_engine_usage_offset(guc);
|
u32 offset = intel_guc_engine_usage_offset(guc);
|
||||||
u32 action[] = {
|
u32 action[] = {
|
||||||
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
|
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
|
||||||
@ -1550,6 +1584,9 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
|
|||||||
0,
|
0,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
for_each_engine(engine, gt, id)
|
||||||
|
__set_engine_usage_record(engine, 0, 0xffffffff, 0);
|
||||||
|
|
||||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -565,6 +565,8 @@ static int hx83102_get_modes(struct drm_panel *panel,
|
|||||||
struct drm_display_mode *mode;
|
struct drm_display_mode *mode;
|
||||||
|
|
||||||
mode = drm_mode_duplicate(connector->dev, m);
|
mode = drm_mode_duplicate(connector->dev, m);
|
||||||
|
if (!mode)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
||||||
drm_mode_set_name(mode);
|
drm_mode_set_name(mode);
|
||||||
|
@ -481,9 +481,9 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
|
|||||||
return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
|
return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
|
||||||
|
|
||||||
nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
|
nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
|
||||||
if (!nt->dsi[1]) {
|
if (IS_ERR(nt->dsi[1])) {
|
||||||
dev_err(dev, "Cannot get secondary DSI node\n");
|
dev_err(dev, "Cannot get secondary DSI node\n");
|
||||||
return -ENODEV;
|
return PTR_ERR(nt->dsi[1]);
|
||||||
}
|
}
|
||||||
num_dsis++;
|
num_dsis++;
|
||||||
}
|
}
|
||||||
|
@ -1177,6 +1177,7 @@ static int st7701_probe(struct device *dev, int connector_type)
|
|||||||
return dev_err_probe(dev, ret, "Failed to get orientation\n");
|
return dev_err_probe(dev, ret, "Failed to get orientation\n");
|
||||||
|
|
||||||
drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
|
drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
|
||||||
|
st7701->panel.prepare_prev_first = true;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Once sleep out has been issued, ST7701 IC required to wait 120ms
|
* Once sleep out has been issued, ST7701 IC required to wait 120ms
|
||||||
|
@ -325,7 +325,7 @@ static void r63353_panel_shutdown(struct mipi_dsi_device *dsi)
|
|||||||
{
|
{
|
||||||
struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
|
struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
|
||||||
|
|
||||||
r63353_panel_unprepare(&rpanel->base);
|
drm_panel_unprepare(&rpanel->base);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct r63353_desc sharp_ls068b3sx02_data = {
|
static const struct r63353_desc sharp_ls068b3sx02_data = {
|
||||||
|
@ -1355,7 +1355,8 @@ EXPORT_SYMBOL(drm_sched_init);
|
|||||||
* drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
|
* drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
|
||||||
* will not be called for all jobs still in drm_gpu_scheduler.pending_list.
|
* will not be called for all jobs still in drm_gpu_scheduler.pending_list.
|
||||||
* There is no solution for this currently. Thus, it is up to the driver to make
|
* There is no solution for this currently. Thus, it is up to the driver to make
|
||||||
* sure that
|
* sure that:
|
||||||
|
*
|
||||||
* a) drm_sched_fini() is only called after for all submitted jobs
|
* a) drm_sched_fini() is only called after for all submitted jobs
|
||||||
* drm_sched_backend_ops.free_job() has been called or that
|
* drm_sched_backend_ops.free_job() has been called or that
|
||||||
* b) the jobs for which drm_sched_backend_ops.free_job() has not been called
|
* b) the jobs for which drm_sched_backend_ops.free_job() has not been called
|
||||||
|
@ -724,7 +724,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
|||||||
new_mem->mem_type == XE_PL_SYSTEM) {
|
new_mem->mem_type == XE_PL_SYSTEM) {
|
||||||
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
|
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
|
||||||
DMA_RESV_USAGE_BOOKKEEP,
|
DMA_RESV_USAGE_BOOKKEEP,
|
||||||
true,
|
false,
|
||||||
MAX_SCHEDULE_TIMEOUT);
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
if (timeout < 0) {
|
if (timeout < 0) {
|
||||||
ret = timeout;
|
ret = timeout;
|
||||||
@ -848,8 +848,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
|
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
|
||||||
ttm_bo->ttm)
|
ttm_bo->ttm) {
|
||||||
|
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
|
||||||
|
DMA_RESV_USAGE_KERNEL,
|
||||||
|
false,
|
||||||
|
MAX_SCHEDULE_TIMEOUT);
|
||||||
|
if (timeout < 0)
|
||||||
|
ret = timeout;
|
||||||
|
|
||||||
xe_tt_unmap_sg(ttm_bo->ttm);
|
xe_tt_unmap_sg(ttm_bo->ttm);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
|
|||||||
drm_puts(&p, "\n**** GuC CT ****\n");
|
drm_puts(&p, "\n**** GuC CT ****\n");
|
||||||
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
|
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
|
||||||
|
|
||||||
drm_puts(&p, "\n**** Contexts ****\n");
|
/*
|
||||||
|
* Don't add a new section header here because the mesa debug decoder
|
||||||
|
* tool expects the context information to be in the 'GuC CT' section.
|
||||||
|
*/
|
||||||
|
/* drm_puts(&p, "\n**** Contexts ****\n"); */
|
||||||
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
|
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
|
||||||
|
|
||||||
drm_puts(&p, "\n**** Job ****\n");
|
drm_puts(&p, "\n**** Job ****\n");
|
||||||
@ -363,6 +367,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
|
|||||||
char buff[ASCII85_BUFSZ], *line_buff;
|
char buff[ASCII85_BUFSZ], *line_buff;
|
||||||
size_t line_pos = 0;
|
size_t line_pos = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Splitting blobs across multiple lines is not compatible with the mesa
|
||||||
|
* debug decoder tool. Note that even dropping the explicit '\n' below
|
||||||
|
* doesn't help because the GuC log is so big some underlying implementation
|
||||||
|
* still splits the lines at 512K characters. So just bail completely for
|
||||||
|
* the moment.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
|
||||||
#define DMESG_MAX_LINE_LEN 800
|
#define DMESG_MAX_LINE_LEN 800
|
||||||
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
|
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <linux/nospec.h>
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
#include <drm/drm_device.h>
|
#include <drm/drm_device.h>
|
||||||
|
#include <drm/drm_drv.h>
|
||||||
#include <drm/drm_file.h>
|
#include <drm/drm_file.h>
|
||||||
#include <uapi/drm/xe_drm.h>
|
#include <uapi/drm/xe_drm.h>
|
||||||
|
|
||||||
@ -762,9 +763,11 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
|
|||||||
*/
|
*/
|
||||||
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
||||||
{
|
{
|
||||||
|
struct xe_device *xe = gt_to_xe(q->gt);
|
||||||
struct xe_file *xef;
|
struct xe_file *xef;
|
||||||
struct xe_lrc *lrc;
|
struct xe_lrc *lrc;
|
||||||
u32 old_ts, new_ts;
|
u32 old_ts, new_ts;
|
||||||
|
int idx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Jobs that are run during driver load may use an exec_queue, but are
|
* Jobs that are run during driver load may use an exec_queue, but are
|
||||||
@ -774,6 +777,10 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
|||||||
if (!q->vm || !q->vm->xef)
|
if (!q->vm || !q->vm->xef)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* Synchronize with unbind while holding the xe file open */
|
||||||
|
if (!drm_dev_enter(&xe->drm, &idx))
|
||||||
|
return;
|
||||||
|
|
||||||
xef = q->vm->xef;
|
xef = q->vm->xef;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -787,6 +794,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
|
|||||||
lrc = q->lrc[0];
|
lrc = q->lrc[0];
|
||||||
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
|
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
|
||||||
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
|
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
|
||||||
|
|
||||||
|
drm_dev_exit(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2046,7 +2046,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
|
|||||||
valid_any = valid_any || (valid_ggtt && is_primary);
|
valid_any = valid_any || (valid_ggtt && is_primary);
|
||||||
|
|
||||||
if (IS_DGFX(xe)) {
|
if (IS_DGFX(xe)) {
|
||||||
bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
|
bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
|
||||||
|
|
||||||
valid_any = valid_any || (valid_lmem && is_primary);
|
valid_any = valid_any || (valid_lmem && is_primary);
|
||||||
valid_all = valid_all && valid_lmem;
|
valid_all = valid_all && valid_lmem;
|
||||||
|
@ -74,12 +74,6 @@ struct xe_oa_config {
|
|||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct flex {
|
|
||||||
struct xe_reg reg;
|
|
||||||
u32 offset;
|
|
||||||
u32 value;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct xe_oa_open_param {
|
struct xe_oa_open_param {
|
||||||
struct xe_file *xef;
|
struct xe_file *xef;
|
||||||
u32 oa_unit_id;
|
u32 oa_unit_id;
|
||||||
@ -596,19 +590,38 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void xe_oa_lock_vma(struct xe_exec_queue *q)
|
||||||
|
{
|
||||||
|
if (q->vm) {
|
||||||
|
down_read(&q->vm->lock);
|
||||||
|
xe_vm_lock(q->vm, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xe_oa_unlock_vma(struct xe_exec_queue *q)
|
||||||
|
{
|
||||||
|
if (q->vm) {
|
||||||
|
xe_vm_unlock(q->vm);
|
||||||
|
up_read(&q->vm->lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
|
static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
|
||||||
struct xe_bb *bb)
|
struct xe_bb *bb)
|
||||||
{
|
{
|
||||||
|
struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
|
||||||
struct xe_sched_job *job;
|
struct xe_sched_job *job;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
|
xe_oa_lock_vma(q);
|
||||||
job = xe_bb_create_job(stream->k_exec_q, bb);
|
|
||||||
|
job = xe_bb_create_job(q, bb);
|
||||||
if (IS_ERR(job)) {
|
if (IS_ERR(job)) {
|
||||||
err = PTR_ERR(job);
|
err = PTR_ERR(job);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
job->ggtt = true;
|
||||||
|
|
||||||
if (deps == XE_OA_SUBMIT_ADD_DEPS) {
|
if (deps == XE_OA_SUBMIT_ADD_DEPS) {
|
||||||
for (int i = 0; i < stream->num_syncs && !err; i++)
|
for (int i = 0; i < stream->num_syncs && !err; i++)
|
||||||
@ -623,10 +636,13 @@ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa
|
|||||||
fence = dma_fence_get(&job->drm.s_fence->finished);
|
fence = dma_fence_get(&job->drm.s_fence->finished);
|
||||||
xe_sched_job_push(job);
|
xe_sched_job_push(job);
|
||||||
|
|
||||||
|
xe_oa_unlock_vma(q);
|
||||||
|
|
||||||
return fence;
|
return fence;
|
||||||
err_put_job:
|
err_put_job:
|
||||||
xe_sched_job_put(job);
|
xe_sched_job_put(job);
|
||||||
exit:
|
exit:
|
||||||
|
xe_oa_unlock_vma(q);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -675,63 +691,19 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
|
|||||||
dma_fence_put(stream->last_fence);
|
dma_fence_put(stream->last_fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
|
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
|
||||||
struct xe_bb *bb, const struct flex *flex, u32 count)
|
|
||||||
{
|
|
||||||
u32 offset = xe_bo_ggtt_addr(lrc->bo);
|
|
||||||
|
|
||||||
do {
|
|
||||||
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
|
|
||||||
bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
|
|
||||||
bb->cs[bb->len++] = 0;
|
|
||||||
bb->cs[bb->len++] = flex->value;
|
|
||||||
|
|
||||||
} while (flex++, --count);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
|
|
||||||
const struct flex *flex, u32 count)
|
|
||||||
{
|
{
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
struct xe_bb *bb;
|
struct xe_bb *bb;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
bb = xe_bb_new(stream->gt, 4 * count, false);
|
bb = xe_bb_new(stream->gt, 2 * count + 1, false);
|
||||||
if (IS_ERR(bb)) {
|
if (IS_ERR(bb)) {
|
||||||
err = PTR_ERR(bb);
|
err = PTR_ERR(bb);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
xe_oa_store_flex(stream, lrc, bb, flex, count);
|
write_cs_mi_lri(bb, reg_lri, count);
|
||||||
|
|
||||||
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
|
|
||||||
if (IS_ERR(fence)) {
|
|
||||||
err = PTR_ERR(fence);
|
|
||||||
goto free_bb;
|
|
||||||
}
|
|
||||||
xe_bb_free(bb, fence);
|
|
||||||
dma_fence_put(fence);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
free_bb:
|
|
||||||
xe_bb_free(bb, NULL);
|
|
||||||
exit:
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
|
|
||||||
{
|
|
||||||
struct dma_fence *fence;
|
|
||||||
struct xe_bb *bb;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
bb = xe_bb_new(stream->gt, 3, false);
|
|
||||||
if (IS_ERR(bb)) {
|
|
||||||
err = PTR_ERR(bb);
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
write_cs_mi_lri(bb, reg_lri, 1);
|
|
||||||
|
|
||||||
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
|
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
|
||||||
if (IS_ERR(fence)) {
|
if (IS_ERR(fence)) {
|
||||||
@ -751,71 +723,55 @@ exit:
|
|||||||
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
|
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
|
||||||
{
|
{
|
||||||
const struct xe_oa_format *format = stream->oa_buffer.format;
|
const struct xe_oa_format *format = stream->oa_buffer.format;
|
||||||
struct xe_lrc *lrc = stream->exec_q->lrc[0];
|
|
||||||
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
|
|
||||||
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
|
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
|
||||||
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
|
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
|
||||||
|
|
||||||
struct flex regs_context[] = {
|
struct xe_oa_reg reg_lri[] = {
|
||||||
{
|
{
|
||||||
OACTXCONTROL(stream->hwe->mmio_base),
|
OACTXCONTROL(stream->hwe->mmio_base),
|
||||||
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
|
|
||||||
enable ? OA_COUNTER_RESUME : 0,
|
enable ? OA_COUNTER_RESUME : 0,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
OAR_OACONTROL,
|
||||||
|
oacontrol,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||||
regs_offset + CTX_CONTEXT_CONTROL,
|
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
||||||
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
|
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
|
|
||||||
int err;
|
|
||||||
|
|
||||||
/* Modify stream hwe context image with regs_context */
|
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
|
||||||
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
|
|
||||||
regs_context, ARRAY_SIZE(regs_context));
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
/* Apply reg_lri using LRI */
|
|
||||||
return xe_oa_load_with_lri(stream, ®_lri);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
|
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
|
||||||
{
|
{
|
||||||
const struct xe_oa_format *format = stream->oa_buffer.format;
|
const struct xe_oa_format *format = stream->oa_buffer.format;
|
||||||
struct xe_lrc *lrc = stream->exec_q->lrc[0];
|
|
||||||
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
|
|
||||||
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
|
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
|
||||||
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
|
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
|
||||||
struct flex regs_context[] = {
|
struct xe_oa_reg reg_lri[] = {
|
||||||
{
|
{
|
||||||
OACTXCONTROL(stream->hwe->mmio_base),
|
OACTXCONTROL(stream->hwe->mmio_base),
|
||||||
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
|
|
||||||
enable ? OA_COUNTER_RESUME : 0,
|
enable ? OA_COUNTER_RESUME : 0,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
OAC_OACONTROL,
|
||||||
|
oacontrol
|
||||||
|
},
|
||||||
{
|
{
|
||||||
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
|
||||||
regs_offset + CTX_CONTEXT_CONTROL,
|
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
|
||||||
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
|
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
|
||||||
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
|
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
|
|
||||||
int err;
|
|
||||||
|
|
||||||
/* Set ccs select to enable programming of OAC_OACONTROL */
|
/* Set ccs select to enable programming of OAC_OACONTROL */
|
||||||
xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
|
xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
|
||||||
__oa_ccs_select(stream));
|
__oa_ccs_select(stream));
|
||||||
|
|
||||||
/* Modify stream hwe context image with regs_context */
|
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
|
||||||
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
|
|
||||||
regs_context, ARRAY_SIZE(regs_context));
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
/* Apply reg_lri using LRI */
|
|
||||||
return xe_oa_load_with_lri(stream, ®_lri);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
|
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
|
||||||
@ -2066,8 +2022,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
|
|||||||
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
|
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
if (param.exec_q->width > 1)
|
if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
|
||||||
drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
|
|||||||
|
|
||||||
static u32 get_ppgtt_flag(struct xe_sched_job *job)
|
static u32 get_ppgtt_flag(struct xe_sched_job *job)
|
||||||
{
|
{
|
||||||
return job->q->vm ? BIT(8) : 0;
|
if (job->q->vm && !job->ggtt)
|
||||||
|
return BIT(8);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
|
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
|
||||||
|
@ -56,6 +56,8 @@ struct xe_sched_job {
|
|||||||
u32 migrate_flush_flags;
|
u32 migrate_flush_flags;
|
||||||
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
|
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
|
||||||
bool ring_ops_flush_tlb;
|
bool ring_ops_flush_tlb;
|
||||||
|
/** @ggtt: mapped in ggtt. */
|
||||||
|
bool ggtt;
|
||||||
/** @ptrs: per instance pointers. */
|
/** @ptrs: per instance pointers. */
|
||||||
struct xe_job_ptrs ptrs[];
|
struct xe_job_ptrs ptrs[];
|
||||||
};
|
};
|
||||||
|
@ -182,7 +182,7 @@ struct tmp51x_data {
|
|||||||
struct regmap *regmap;
|
struct regmap *regmap;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
|
// Set the shift based on the gain: 8 -> 1, 4 -> 2, 2 -> 3, 1 -> 4
|
||||||
static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
|
static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
|
||||||
{
|
{
|
||||||
return 5 - ffs(data->pga_gain);
|
return 5 - ffs(data->pga_gain);
|
||||||
@ -204,7 +204,9 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
|
|||||||
* 2's complement number shifted by one to four depending
|
* 2's complement number shifted by one to four depending
|
||||||
* on the pga gain setting. 1lsb = 10uV
|
* on the pga gain setting. 1lsb = 10uV
|
||||||
*/
|
*/
|
||||||
*val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
|
*val = sign_extend32(regval,
|
||||||
|
reg == TMP51X_SHUNT_CURRENT_RESULT ?
|
||||||
|
16 - tmp51x_get_pga_shift(data) : 15);
|
||||||
*val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
|
*val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
|
||||||
break;
|
break;
|
||||||
case TMP51X_BUS_VOLTAGE_RESULT:
|
case TMP51X_BUS_VOLTAGE_RESULT:
|
||||||
@ -220,7 +222,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
|
|||||||
break;
|
break;
|
||||||
case TMP51X_BUS_CURRENT_RESULT:
|
case TMP51X_BUS_CURRENT_RESULT:
|
||||||
// Current = (ShuntVoltage * CalibrationRegister) / 4096
|
// Current = (ShuntVoltage * CalibrationRegister) / 4096
|
||||||
*val = sign_extend32(regval, 16) * data->curr_lsb_ua;
|
*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
|
||||||
*val = DIV_ROUND_CLOSEST(*val, MILLI);
|
*val = DIV_ROUND_CLOSEST(*val, MILLI);
|
||||||
break;
|
break;
|
||||||
case TMP51X_LOCAL_TEMP_RESULT:
|
case TMP51X_LOCAL_TEMP_RESULT:
|
||||||
@ -232,7 +234,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
|
|||||||
case TMP51X_REMOTE_TEMP_LIMIT_2:
|
case TMP51X_REMOTE_TEMP_LIMIT_2:
|
||||||
case TMP513_REMOTE_TEMP_LIMIT_3:
|
case TMP513_REMOTE_TEMP_LIMIT_3:
|
||||||
// 1lsb = 0.0625 degrees centigrade
|
// 1lsb = 0.0625 degrees centigrade
|
||||||
*val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
|
*val = sign_extend32(regval, 15) >> TMP51X_TEMP_SHIFT;
|
||||||
*val = DIV_ROUND_CLOSEST(*val * 625, 10);
|
*val = DIV_ROUND_CLOSEST(*val * 625, 10);
|
||||||
break;
|
break;
|
||||||
case TMP51X_N_FACTOR_AND_HYST_1:
|
case TMP51X_N_FACTOR_AND_HYST_1:
|
||||||
|
@ -335,6 +335,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = {
|
|||||||
{ .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
|
{ .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
|
||||||
{ .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
|
{ .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
|
||||||
{ .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
|
{ .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
|
||||||
|
{ .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, },
|
||||||
{ .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
|
{ .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
|
||||||
{ .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
|
{ .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
|
||||||
{ .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
|
{ .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
|
||||||
@ -532,22 +533,20 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
|
|||||||
|
|
||||||
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
|
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
|
||||||
{
|
{
|
||||||
|
bool multi_master = i2c_imx->multi_master;
|
||||||
unsigned long orig_jiffies = jiffies;
|
unsigned long orig_jiffies = jiffies;
|
||||||
unsigned int temp;
|
unsigned int temp;
|
||||||
|
|
||||||
if (!i2c_imx->multi_master)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
|
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
|
||||||
|
|
||||||
/* check for arbitration lost */
|
/* check for arbitration lost */
|
||||||
if (temp & I2SR_IAL) {
|
if (multi_master && (temp & I2SR_IAL)) {
|
||||||
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
|
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (for_busy && (temp & I2SR_IBB)) {
|
if (for_busy && (!multi_master || (temp & I2SR_IBB))) {
|
||||||
i2c_imx->stopped = 0;
|
i2c_imx->stopped = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -93,27 +93,35 @@
|
|||||||
* @base: pointer to register struct
|
* @base: pointer to register struct
|
||||||
* @dev: device reference
|
* @dev: device reference
|
||||||
* @i2c_clk: clock reference for i2c input clock
|
* @i2c_clk: clock reference for i2c input clock
|
||||||
|
* @msg_queue: pointer to the messages requiring sending
|
||||||
* @buf: pointer to msg buffer for easier use
|
* @buf: pointer to msg buffer for easier use
|
||||||
* @msg_complete: xfer completion object
|
* @msg_complete: xfer completion object
|
||||||
* @adapter: core i2c abstraction
|
* @adapter: core i2c abstraction
|
||||||
* @msg_err: error code for completed message
|
* @msg_err: error code for completed message
|
||||||
* @bus_clk_rate: current i2c bus clock rate
|
* @bus_clk_rate: current i2c bus clock rate
|
||||||
* @isr_status: cached copy of local ISR status
|
* @isr_status: cached copy of local ISR status
|
||||||
|
* @total_num: total number of messages to be sent/received
|
||||||
|
* @current_num: index of the current message being sent/received
|
||||||
* @msg_len: number of bytes transferred in msg
|
* @msg_len: number of bytes transferred in msg
|
||||||
* @addr: address of the current slave
|
* @addr: address of the current slave
|
||||||
|
* @restart_needed: whether or not a repeated start is required after current message
|
||||||
*/
|
*/
|
||||||
struct mchp_corei2c_dev {
|
struct mchp_corei2c_dev {
|
||||||
void __iomem *base;
|
void __iomem *base;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct clk *i2c_clk;
|
struct clk *i2c_clk;
|
||||||
|
struct i2c_msg *msg_queue;
|
||||||
u8 *buf;
|
u8 *buf;
|
||||||
struct completion msg_complete;
|
struct completion msg_complete;
|
||||||
struct i2c_adapter adapter;
|
struct i2c_adapter adapter;
|
||||||
int msg_err;
|
int msg_err;
|
||||||
|
int total_num;
|
||||||
|
int current_num;
|
||||||
u32 bus_clk_rate;
|
u32 bus_clk_rate;
|
||||||
u32 isr_status;
|
u32 isr_status;
|
||||||
u16 msg_len;
|
u16 msg_len;
|
||||||
u8 addr;
|
u8 addr;
|
||||||
|
bool restart_needed;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
|
static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
|
||||||
@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev)
|
||||||
|
{
|
||||||
|
struct i2c_msg *this_msg;
|
||||||
|
u8 ctrl;
|
||||||
|
|
||||||
|
if (idev->current_num >= idev->total_num) {
|
||||||
|
complete(&idev->msg_complete);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there's been an error, the isr needs to return control
|
||||||
|
* to the "main" part of the driver, so as not to keep sending
|
||||||
|
* messages once it completes and clears the SI bit.
|
||||||
|
*/
|
||||||
|
if (idev->msg_err) {
|
||||||
|
complete(&idev->msg_complete);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
this_msg = idev->msg_queue++;
|
||||||
|
|
||||||
|
if (idev->current_num < (idev->total_num - 1)) {
|
||||||
|
struct i2c_msg *next_msg = idev->msg_queue;
|
||||||
|
|
||||||
|
idev->restart_needed = next_msg->flags & I2C_M_RD;
|
||||||
|
} else {
|
||||||
|
idev->restart_needed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
idev->addr = i2c_8bit_addr_from_msg(this_msg);
|
||||||
|
idev->msg_len = this_msg->len;
|
||||||
|
idev->buf = this_msg->buf;
|
||||||
|
|
||||||
|
ctrl = readb(idev->base + CORE_I2C_CTRL);
|
||||||
|
ctrl |= CTRL_STA;
|
||||||
|
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
||||||
|
|
||||||
|
idev->current_num++;
|
||||||
|
}
|
||||||
|
|
||||||
static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
||||||
{
|
{
|
||||||
u32 status = idev->isr_status;
|
u32 status = idev->isr_status;
|
||||||
@ -238,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
|||||||
ctrl &= ~CTRL_STA;
|
ctrl &= ~CTRL_STA;
|
||||||
writeb(idev->addr, idev->base + CORE_I2C_DATA);
|
writeb(idev->addr, idev->base + CORE_I2C_DATA);
|
||||||
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
||||||
if (idev->msg_len == 0)
|
|
||||||
finished = true;
|
|
||||||
break;
|
break;
|
||||||
case STATUS_M_ARB_LOST:
|
case STATUS_M_ARB_LOST:
|
||||||
idev->msg_err = -EAGAIN;
|
idev->msg_err = -EAGAIN;
|
||||||
@ -247,10 +294,14 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
|||||||
break;
|
break;
|
||||||
case STATUS_M_SLAW_ACK:
|
case STATUS_M_SLAW_ACK:
|
||||||
case STATUS_M_TX_DATA_ACK:
|
case STATUS_M_TX_DATA_ACK:
|
||||||
if (idev->msg_len > 0)
|
if (idev->msg_len > 0) {
|
||||||
mchp_corei2c_fill_tx(idev);
|
mchp_corei2c_fill_tx(idev);
|
||||||
else
|
} else {
|
||||||
last_byte = true;
|
if (idev->restart_needed)
|
||||||
|
finished = true;
|
||||||
|
else
|
||||||
|
last_byte = true;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case STATUS_M_TX_DATA_NACK:
|
case STATUS_M_TX_DATA_NACK:
|
||||||
case STATUS_M_SLAR_NACK:
|
case STATUS_M_SLAR_NACK:
|
||||||
@ -287,7 +338,7 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
|
|||||||
mchp_corei2c_stop(idev);
|
mchp_corei2c_stop(idev);
|
||||||
|
|
||||||
if (last_byte || finished)
|
if (last_byte || finished)
|
||||||
complete(&idev->msg_complete);
|
mchp_corei2c_next_msg(idev);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
@ -311,21 +362,48 @@ static irqreturn_t mchp_corei2c_isr(int irq, void *_dev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
|
static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
||||||
struct i2c_msg *msg)
|
int num)
|
||||||
{
|
{
|
||||||
u8 ctrl;
|
struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
|
||||||
|
struct i2c_msg *this_msg = msgs;
|
||||||
unsigned long time_left;
|
unsigned long time_left;
|
||||||
|
u8 ctrl;
|
||||||
idev->addr = i2c_8bit_addr_from_msg(msg);
|
|
||||||
idev->msg_len = msg->len;
|
|
||||||
idev->buf = msg->buf;
|
|
||||||
idev->msg_err = 0;
|
|
||||||
|
|
||||||
reinit_completion(&idev->msg_complete);
|
|
||||||
|
|
||||||
mchp_corei2c_core_enable(idev);
|
mchp_corei2c_core_enable(idev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The isr controls the flow of a transfer, this info needs to be saved
|
||||||
|
* to a location that it can access the queue information from.
|
||||||
|
*/
|
||||||
|
idev->restart_needed = false;
|
||||||
|
idev->msg_queue = msgs;
|
||||||
|
idev->total_num = num;
|
||||||
|
idev->current_num = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* But the first entry to the isr is triggered by the start in this
|
||||||
|
* function, so the first message needs to be "dequeued".
|
||||||
|
*/
|
||||||
|
idev->addr = i2c_8bit_addr_from_msg(this_msg);
|
||||||
|
idev->msg_len = this_msg->len;
|
||||||
|
idev->buf = this_msg->buf;
|
||||||
|
idev->msg_err = 0;
|
||||||
|
|
||||||
|
if (idev->total_num > 1) {
|
||||||
|
struct i2c_msg *next_msg = msgs + 1;
|
||||||
|
|
||||||
|
idev->restart_needed = next_msg->flags & I2C_M_RD;
|
||||||
|
}
|
||||||
|
|
||||||
|
idev->current_num++;
|
||||||
|
idev->msg_queue++;
|
||||||
|
|
||||||
|
reinit_completion(&idev->msg_complete);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Send the first start to pass control to the isr
|
||||||
|
*/
|
||||||
ctrl = readb(idev->base + CORE_I2C_CTRL);
|
ctrl = readb(idev->base + CORE_I2C_CTRL);
|
||||||
ctrl |= CTRL_STA;
|
ctrl |= CTRL_STA;
|
||||||
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
writeb(ctrl, idev->base + CORE_I2C_CTRL);
|
||||||
@ -335,20 +413,8 @@ static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
|
|||||||
if (!time_left)
|
if (!time_left)
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
|
|
||||||
return idev->msg_err;
|
if (idev->msg_err)
|
||||||
}
|
return idev->msg_err;
|
||||||
|
|
||||||
static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
|
||||||
int num)
|
|
||||||
{
|
|
||||||
struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
|
|
||||||
int i, ret;
|
|
||||||
|
|
||||||
for (i = 0; i < num; i++) {
|
|
||||||
ret = mchp_corei2c_xfer_msg(idev, msgs++);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
|
@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
|
|||||||
int bound_if_index = dev_addr->bound_dev_if;
|
int bound_if_index = dev_addr->bound_dev_if;
|
||||||
int dev_type = dev_addr->dev_type;
|
int dev_type = dev_addr->dev_type;
|
||||||
struct net_device *ndev = NULL;
|
struct net_device *ndev = NULL;
|
||||||
|
struct net_device *pdev = NULL;
|
||||||
|
|
||||||
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
|
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
|
||||||
goto out;
|
goto out;
|
||||||
@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
ndev = rcu_dereference(sgid_attr->ndev);
|
ndev = rcu_dereference(sgid_attr->ndev);
|
||||||
|
if (ndev->ifindex != bound_if_index) {
|
||||||
|
pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
|
||||||
|
if (pdev) {
|
||||||
|
if (is_vlan_dev(pdev)) {
|
||||||
|
pdev = vlan_dev_real_dev(pdev);
|
||||||
|
if (ndev->ifindex == pdev->ifindex)
|
||||||
|
bound_if_index = pdev->ifindex;
|
||||||
|
}
|
||||||
|
if (is_vlan_dev(ndev)) {
|
||||||
|
pdev = vlan_dev_real_dev(ndev);
|
||||||
|
if (bound_if_index == pdev->ifindex)
|
||||||
|
bound_if_index = ndev->ifindex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if (!net_eq(dev_net(ndev), dev_addr->net) ||
|
if (!net_eq(dev_net(ndev), dev_addr->net) ||
|
||||||
ndev->ifindex != bound_if_index) {
|
ndev->ifindex != bound_if_index) {
|
||||||
rdma_put_gid_attr(sgid_attr);
|
rdma_put_gid_attr(sgid_attr);
|
||||||
|
@ -2833,8 +2833,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
|
|||||||
enum rdma_nl_notify_event_type type)
|
enum rdma_nl_notify_event_type type)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
int ret = -EMSGSIZE;
|
||||||
struct net *net;
|
struct net *net;
|
||||||
int ret = 0;
|
|
||||||
void *nlh;
|
void *nlh;
|
||||||
|
|
||||||
net = read_pnet(&device->coredev.rdma_net);
|
net = read_pnet(&device->coredev.rdma_net);
|
||||||
|
@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
|
|||||||
{
|
{
|
||||||
const void __user *res = iter->cur;
|
const void __user *res = iter->cur;
|
||||||
|
|
||||||
if (iter->cur + len > iter->end)
|
if (len > iter->end - iter->cur)
|
||||||
return (void __force __user *)ERR_PTR(-ENOSPC);
|
return (void __force __user *)ERR_PTR(-ENOSPC);
|
||||||
iter->cur += len;
|
iter->cur += len;
|
||||||
return res;
|
return res;
|
||||||
@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
|
|||||||
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
|
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
|
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
|
||||||
|
cmd.wr_count));
|
||||||
if (IS_ERR(wqes))
|
if (IS_ERR(wqes))
|
||||||
return PTR_ERR(wqes);
|
return PTR_ERR(wqes);
|
||||||
sgls = uverbs_request_next_ptr(
|
sgls = uverbs_request_next_ptr(&iter,
|
||||||
&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
|
size_mul(cmd.sge_count,
|
||||||
|
sizeof(struct ib_uverbs_sge)));
|
||||||
if (IS_ERR(sgls))
|
if (IS_ERR(sgls))
|
||||||
return PTR_ERR(sgls);
|
return PTR_ERR(sgls);
|
||||||
ret = uverbs_request_finish(&iter);
|
ret = uverbs_request_finish(&iter);
|
||||||
@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
|
|||||||
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
|
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
|
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
|
||||||
if (IS_ERR(wqes))
|
if (IS_ERR(wqes))
|
||||||
return ERR_CAST(wqes);
|
return ERR_CAST(wqes);
|
||||||
sgls = uverbs_request_next_ptr(
|
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
|
||||||
iter, sge_count * sizeof(struct ib_uverbs_sge));
|
sizeof(struct ib_uverbs_sge)));
|
||||||
if (IS_ERR(sgls))
|
if (IS_ERR(sgls))
|
||||||
return ERR_CAST(sgls);
|
return ERR_CAST(sgls);
|
||||||
ret = uverbs_request_finish(iter);
|
ret = uverbs_request_finish(iter);
|
||||||
|
@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
|
|||||||
|
|
||||||
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
|
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
|
||||||
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
|
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
|
||||||
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
|
ib_attr->hw_ver = rdev->en_dev->pdev->revision;
|
||||||
ib_attr->max_qp = dev_attr->max_qp;
|
ib_attr->max_qp = dev_attr->max_qp;
|
||||||
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
|
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
|
||||||
ib_attr->device_cap_flags =
|
ib_attr->device_cap_flags =
|
||||||
@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
|||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
||||||
|
|
||||||
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
|
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
|
||||||
|
|
||||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
||||||
if (rc) {
|
if (rc)
|
||||||
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
|
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
|
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
|
||||||
flags = bnxt_re_lock_cqs(qp);
|
flags = bnxt_re_lock_cqs(qp);
|
||||||
@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
|||||||
|
|
||||||
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
|
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
|
||||||
|
|
||||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
|
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
|
||||||
rc = bnxt_re_destroy_gsi_sqp(qp);
|
bnxt_re_destroy_gsi_sqp(qp);
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&rdev->qp_lock);
|
mutex_lock(&rdev->qp_lock);
|
||||||
list_del(&qp->list);
|
list_del(&qp->list);
|
||||||
@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
|||||||
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
|
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
|
||||||
atomic_dec(&rdev->stats.res.ud_qp_count);
|
atomic_dec(&rdev->stats.res.ud_qp_count);
|
||||||
|
|
||||||
bnxt_re_debug_rem_qpinfo(rdev, qp);
|
|
||||||
|
|
||||||
ib_umem_release(qp->rumem);
|
ib_umem_release(qp->rumem);
|
||||||
ib_umem_release(qp->sumem);
|
ib_umem_release(qp->sumem);
|
||||||
|
|
||||||
@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qp_attr_mask & IB_QP_PATH_MTU) {
|
if (qp_attr->qp_state == IB_QPS_RTR) {
|
||||||
qp->qplib_qp.modify_flags |=
|
enum ib_mtu qpmtu;
|
||||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
|
||||||
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
|
qpmtu = iboe_get_mtu(rdev->netdev->mtu);
|
||||||
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
|
if (qp_attr_mask & IB_QP_PATH_MTU) {
|
||||||
} else if (qp_attr->qp_state == IB_QPS_RTR) {
|
if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
|
||||||
qp->qplib_qp.modify_flags |=
|
ib_mtu_enum_to_int(qpmtu))
|
||||||
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
return -EINVAL;
|
||||||
qp->qplib_qp.path_mtu =
|
qpmtu = qp_attr->path_mtu;
|
||||||
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
|
}
|
||||||
qp->qplib_qp.mtu =
|
|
||||||
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
|
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
|
||||||
|
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
|
||||||
|
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qp_attr_mask & IB_QP_TIMEOUT) {
|
if (qp_attr_mask & IB_QP_TIMEOUT) {
|
||||||
@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
|
|||||||
qp_attr->retry_cnt = qplib_qp->retry_cnt;
|
qp_attr->retry_cnt = qplib_qp->retry_cnt;
|
||||||
qp_attr->rnr_retry = qplib_qp->rnr_retry;
|
qp_attr->rnr_retry = qplib_qp->rnr_retry;
|
||||||
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
|
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
|
||||||
|
qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
|
||||||
qp_attr->rq_psn = qplib_qp->rq.psn;
|
qp_attr->rq_psn = qplib_qp->rq.psn;
|
||||||
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
|
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
|
||||||
qp_attr->sq_psn = qplib_qp->sq.psn;
|
qp_attr->sq_psn = qplib_qp->sq.psn;
|
||||||
@ -2824,7 +2822,8 @@ bad:
|
|||||||
wr = wr->next;
|
wr = wr->next;
|
||||||
}
|
}
|
||||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||||
|
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@ -2936,7 +2935,8 @@ bad:
|
|||||||
wr = wr->next;
|
wr = wr->next;
|
||||||
}
|
}
|
||||||
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
bnxt_qplib_post_send_db(&qp->qplib_qp);
|
||||||
bnxt_ud_qp_hw_stall_workaround(qp);
|
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
|
||||||
|
bnxt_ud_qp_hw_stall_workaround(qp);
|
||||||
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
spin_unlock_irqrestore(&qp->sq_lock, flags);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
|||||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||||
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
||||||
|
|
||||||
|
static inline u32 __to_ib_port_num(u16 port_id)
|
||||||
|
{
|
||||||
|
return (u32)port_id + 1;
|
||||||
|
}
|
||||||
|
|
||||||
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
||||||
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
|
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
|
||||||
|
@ -1715,11 +1715,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
|
|||||||
|
|
||||||
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
||||||
{
|
{
|
||||||
int mask = IB_QP_STATE;
|
|
||||||
struct ib_qp_attr qp_attr;
|
|
||||||
struct bnxt_re_qp *qp;
|
struct bnxt_re_qp *qp;
|
||||||
|
|
||||||
qp_attr.qp_state = IB_QPS_ERR;
|
|
||||||
mutex_lock(&rdev->qp_lock);
|
mutex_lock(&rdev->qp_lock);
|
||||||
list_for_each_entry(qp, &rdev->qp_list, list) {
|
list_for_each_entry(qp, &rdev->qp_list, list) {
|
||||||
/* Modify the state of all QPs except QP1/Shadow QP */
|
/* Modify the state of all QPs except QP1/Shadow QP */
|
||||||
@ -1727,12 +1724,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
|
|||||||
if (qp->qplib_qp.state !=
|
if (qp->qplib_qp.state !=
|
||||||
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
|
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
|
||||||
qp->qplib_qp.state !=
|
qp->qplib_qp.state !=
|
||||||
CMDQ_MODIFY_QP_NEW_STATE_ERR) {
|
CMDQ_MODIFY_QP_NEW_STATE_ERR)
|
||||||
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
|
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
|
||||||
1, IB_EVENT_QP_FATAL);
|
1, IB_EVENT_QP_FATAL);
|
||||||
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
|
|
||||||
NULL);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&rdev->qp_lock);
|
mutex_unlock(&rdev->qp_lock);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user