mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.12-rc6). Conflicts: drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.ccbe84e9ad5
("wifi: iwlwifi: mvm: really send iwl_txpower_constraints_cmd")188a1bf894
("wifi: mac80211: re-order assigning channel in activate links") https://lore.kernel.org/all/20241028123621.7bbb131b@canb.auug.org.au/ net/mac80211/cfg.cc4382d5ca1
("wifi: mac80211: update the right link for tx power")8dd0498983
("wifi: mac80211: Fix setting txpower with emulate_chanctx") drivers/net/ethernet/intel/ice/ice_ptp_hw.h6e58c33106
("ice: fix crash on probe for DPLL enabled E810 LOM")e4291b64e1
("ice: Align E810T GPIO to other products")ebb2693f8f
("ice: Read SDP section from NVM for pin definitions")ac532f4f42
("ice: Cleanup unused declarations") https://lore.kernel.org/all/20241030120524.1ee1af18@canb.auug.org.au/ No adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
5b1c965956
@ -425,8 +425,8 @@ This governor exposes only one tunable:
|
||||
|
||||
``rate_limit_us``
|
||||
Minimum time (in microseconds) that has to pass between two consecutive
|
||||
runs of governor computations (default: 1000 times the scaling driver's
|
||||
transition latency).
|
||||
runs of governor computations (default: 1.5 times the scaling driver's
|
||||
transition latency or the maximum 2ms).
|
||||
|
||||
The purpose of this tunable is to reduce the scheduler context overhead
|
||||
of the governor which might be excessive without it.
|
||||
@ -474,17 +474,17 @@ This governor exposes the following tunables:
|
||||
This is how often the governor's worker routine should run, in
|
||||
microseconds.
|
||||
|
||||
Typically, it is set to values of the order of 10000 (10 ms). Its
|
||||
default value is equal to the value of ``cpuinfo_transition_latency``
|
||||
for each policy this governor is attached to (but since the unit here
|
||||
is greater by 1000, this means that the time represented by
|
||||
``sampling_rate`` is 1000 times greater than the transition latency by
|
||||
default).
|
||||
Typically, it is set to values of the order of 2000 (2 ms). Its
|
||||
default value is to add a 50% breathing room
|
||||
to ``cpuinfo_transition_latency`` on each policy this governor is
|
||||
attached to. The minimum is typically the length of two scheduler
|
||||
ticks.
|
||||
|
||||
If this tunable is per-policy, the following shell command sets the time
|
||||
represented by it to be 750 times as high as the transition latency::
|
||||
represented by it to be 1.5 times as high as the transition latency
|
||||
(the default)::
|
||||
|
||||
# echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) > ondemand/sampling_rate
|
||||
# echo `$(($(cat cpuinfo_transition_latency) * 3 / 2)) > ondemand/sampling_rate
|
||||
|
||||
``up_threshold``
|
||||
If the estimated CPU load is above this value (in percent), the governor
|
||||
|
@ -102,21 +102,21 @@ properties:
|
||||
default: 2
|
||||
|
||||
interrupts:
|
||||
oneOf:
|
||||
- minItems: 1
|
||||
items:
|
||||
- description: TX interrupt
|
||||
- description: RX interrupt
|
||||
- items:
|
||||
- description: common/combined interrupt
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
|
||||
interrupt-names:
|
||||
oneOf:
|
||||
- minItems: 1
|
||||
- description: TX interrupt
|
||||
const: tx
|
||||
- description: RX interrupt
|
||||
const: rx
|
||||
- description: TX and RX interrupts
|
||||
items:
|
||||
- const: tx
|
||||
- const: rx
|
||||
- const: common
|
||||
- description: Common/combined interrupt
|
||||
const: common
|
||||
|
||||
fck_parent:
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
|
@ -48,6 +48,10 @@ properties:
|
||||
- const: mclk_rx
|
||||
- const: hclk
|
||||
|
||||
port:
|
||||
$ref: audio-graph-port.yaml#
|
||||
unevaluatedProperties: false
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
|
@ -16,7 +16,7 @@ ii) transmit network traffic, or any other that needs raw
|
||||
|
||||
Howto can be found at:
|
||||
|
||||
https://sites.google.com/site/packetmmap/
|
||||
https://web.archive.org/web/20220404160947/https://sites.google.com/site/packetmmap/
|
||||
|
||||
Please send your comments to
|
||||
- Ulisses Alonso Camaró <uaca@i.hate.spam.alumni.uv.es>
|
||||
@ -166,7 +166,8 @@ As capture, each frame contains two parts::
|
||||
/* bind socket to eth0 */
|
||||
bind(this->socket, (struct sockaddr *)&my_addr, sizeof(struct sockaddr_ll));
|
||||
|
||||
A complete tutorial is available at: https://sites.google.com/site/packetmmap/
|
||||
A complete tutorial is available at:
|
||||
https://web.archive.org/web/20220404160947/https://sites.google.com/site/packetmmap/
|
||||
|
||||
By default, the user should put data at::
|
||||
|
||||
|
@ -23,177 +23,166 @@ applications can additionally seal security critical data at runtime.
|
||||
A similar feature already exists in the XNU kernel with the
|
||||
VM_FLAGS_PERMANENT flag [1] and on OpenBSD with the mimmutable syscall [2].
|
||||
|
||||
User API
|
||||
========
|
||||
mseal()
|
||||
-----------
|
||||
The mseal() syscall has the following signature:
|
||||
SYSCALL
|
||||
=======
|
||||
mseal syscall signature
|
||||
-----------------------
|
||||
``int mseal(void \* addr, size_t len, unsigned long flags)``
|
||||
|
||||
``int mseal(void addr, size_t len, unsigned long flags)``
|
||||
**addr**/**len**: virtual memory address range.
|
||||
The address range set by **addr**/**len** must meet:
|
||||
- The start address must be in an allocated VMA.
|
||||
- The start address must be page aligned.
|
||||
- The end address (**addr** + **len**) must be in an allocated VMA.
|
||||
- no gap (unallocated memory) between start and end address.
|
||||
|
||||
**addr/len**: virtual memory address range.
|
||||
The ``len`` will be paged aligned implicitly by the kernel.
|
||||
|
||||
The address range set by ``addr``/``len`` must meet:
|
||||
- The start address must be in an allocated VMA.
|
||||
- The start address must be page aligned.
|
||||
- The end address (``addr`` + ``len``) must be in an allocated VMA.
|
||||
- no gap (unallocated memory) between start and end address.
|
||||
**flags**: reserved for future use.
|
||||
|
||||
The ``len`` will be paged aligned implicitly by the kernel.
|
||||
**Return values**:
|
||||
- **0**: Success.
|
||||
- **-EINVAL**:
|
||||
* Invalid input ``flags``.
|
||||
* The start address (``addr``) is not page aligned.
|
||||
* Address range (``addr`` + ``len``) overflow.
|
||||
- **-ENOMEM**:
|
||||
* The start address (``addr``) is not allocated.
|
||||
* The end address (``addr`` + ``len``) is not allocated.
|
||||
* A gap (unallocated memory) between start and end address.
|
||||
- **-EPERM**:
|
||||
* sealing is supported only on 64-bit CPUs, 32-bit is not supported.
|
||||
|
||||
**flags**: reserved for future use.
|
||||
**Note about error return**:
|
||||
- For above error cases, users can expect the given memory range is
|
||||
unmodified, i.e. no partial update.
|
||||
- There might be other internal errors/cases not listed here, e.g.
|
||||
error during merging/splitting VMAs, or the process reaching the maximum
|
||||
number of supported VMAs. In those cases, partial updates to the given
|
||||
memory range could happen. However, those cases should be rare.
|
||||
|
||||
**return values**:
|
||||
**Architecture support**:
|
||||
mseal only works on 64-bit CPUs, not 32-bit CPUs.
|
||||
|
||||
- ``0``: Success.
|
||||
**Idempotent**:
|
||||
users can call mseal multiple times. mseal on an already sealed memory
|
||||
is a no-action (not error).
|
||||
|
||||
- ``-EINVAL``:
|
||||
- Invalid input ``flags``.
|
||||
- The start address (``addr``) is not page aligned.
|
||||
- Address range (``addr`` + ``len``) overflow.
|
||||
**no munseal**
|
||||
Once mapping is sealed, it can't be unsealed. The kernel should never
|
||||
have munseal, this is consistent with other sealing feature, e.g.
|
||||
F_SEAL_SEAL for file.
|
||||
|
||||
- ``-ENOMEM``:
|
||||
- The start address (``addr``) is not allocated.
|
||||
- The end address (``addr`` + ``len``) is not allocated.
|
||||
- A gap (unallocated memory) between start and end address.
|
||||
Blocked mm syscall for sealed mapping
|
||||
-------------------------------------
|
||||
It might be important to note: **once the mapping is sealed, it will
|
||||
stay in the process's memory until the process terminates**.
|
||||
|
||||
- ``-EPERM``:
|
||||
- sealing is supported only on 64-bit CPUs, 32-bit is not supported.
|
||||
Example::
|
||||
|
||||
- For above error cases, users can expect the given memory range is
|
||||
unmodified, i.e. no partial update.
|
||||
*ptr = mmap(0, 4096, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
|
||||
rc = mseal(ptr, 4096, 0);
|
||||
/* munmap will fail */
|
||||
rc = munmap(ptr, 4096);
|
||||
assert(rc < 0);
|
||||
|
||||
- There might be other internal errors/cases not listed here, e.g.
|
||||
error during merging/splitting VMAs, or the process reaching the max
|
||||
number of supported VMAs. In those cases, partial updates to the given
|
||||
memory range could happen. However, those cases should be rare.
|
||||
Blocked mm syscall:
|
||||
- munmap
|
||||
- mmap
|
||||
- mremap
|
||||
- mprotect and pkey_mprotect
|
||||
- some destructive madvise behaviors: MADV_DONTNEED, MADV_FREE,
|
||||
MADV_DONTNEED_LOCKED, MADV_FREE, MADV_DONTFORK, MADV_WIPEONFORK
|
||||
|
||||
**Blocked operations after sealing**:
|
||||
Unmapping, moving to another location, and shrinking the size,
|
||||
via munmap() and mremap(), can leave an empty space, therefore
|
||||
can be replaced with a VMA with a new set of attributes.
|
||||
The first set of syscalls to block is munmap, mremap, mmap. They can
|
||||
either leave an empty space in the address space, therefore allowing
|
||||
replacement with a new mapping with new set of attributes, or can
|
||||
overwrite the existing mapping with another mapping.
|
||||
|
||||
Moving or expanding a different VMA into the current location,
|
||||
via mremap().
|
||||
mprotect and pkey_mprotect are blocked because they changes the
|
||||
protection bits (RWX) of the mapping.
|
||||
|
||||
Modifying a VMA via mmap(MAP_FIXED).
|
||||
Certain destructive madvise behaviors, specifically MADV_DONTNEED,
|
||||
MADV_FREE, MADV_DONTNEED_LOCKED, and MADV_WIPEONFORK, can introduce
|
||||
risks when applied to anonymous memory by threads lacking write
|
||||
permissions. Consequently, these operations are prohibited under such
|
||||
conditions. The aforementioned behaviors have the potential to modify
|
||||
region contents by discarding pages, effectively performing a memset(0)
|
||||
operation on the anonymous memory.
|
||||
|
||||
Size expansion, via mremap(), does not appear to pose any
|
||||
specific risks to sealed VMAs. It is included anyway because
|
||||
the use case is unclear. In any case, users can rely on
|
||||
merging to expand a sealed VMA.
|
||||
Kernel will return -EPERM for blocked syscalls.
|
||||
|
||||
mprotect() and pkey_mprotect().
|
||||
When blocked syscall return -EPERM due to sealing, the memory regions may
|
||||
or may not be changed, depends on the syscall being blocked:
|
||||
|
||||
Some destructive madvice() behaviors (e.g. MADV_DONTNEED)
|
||||
for anonymous memory, when users don't have write permission to the
|
||||
memory. Those behaviors can alter region contents by discarding pages,
|
||||
effectively a memset(0) for anonymous memory.
|
||||
- munmap: munmap is atomic. If one of VMAs in the given range is
|
||||
sealed, none of VMAs are updated.
|
||||
- mprotect, pkey_mprotect, madvise: partial update might happen, e.g.
|
||||
when mprotect over multiple VMAs, mprotect might update the beginning
|
||||
VMAs before reaching the sealed VMA and return -EPERM.
|
||||
- mmap and mremap: undefined behavior.
|
||||
|
||||
Kernel will return -EPERM for blocked operations.
|
||||
|
||||
For blocked operations, one can expect the given address is unmodified,
|
||||
i.e. no partial update. Note, this is different from existing mm
|
||||
system call behaviors, where partial updates are made till an error is
|
||||
found and returned to userspace. To give an example:
|
||||
|
||||
Assume following code sequence:
|
||||
|
||||
- ptr = mmap(null, 8192, PROT_NONE);
|
||||
- munmap(ptr + 4096, 4096);
|
||||
- ret1 = mprotect(ptr, 8192, PROT_READ);
|
||||
- mseal(ptr, 4096);
|
||||
- ret2 = mprotect(ptr, 8192, PROT_NONE);
|
||||
|
||||
ret1 will be -ENOMEM, the page from ptr is updated to PROT_READ.
|
||||
|
||||
ret2 will be -EPERM, the page remains to be PROT_READ.
|
||||
|
||||
**Note**:
|
||||
|
||||
- mseal() only works on 64-bit CPUs, not 32-bit CPU.
|
||||
|
||||
- users can call mseal() multiple times, mseal() on an already sealed memory
|
||||
is a no-action (not error).
|
||||
|
||||
- munseal() is not supported.
|
||||
|
||||
Use cases:
|
||||
==========
|
||||
Use cases
|
||||
=========
|
||||
- glibc:
|
||||
The dynamic linker, during loading ELF executables, can apply sealing to
|
||||
non-writable memory segments.
|
||||
mapping segments.
|
||||
|
||||
- Chrome browser: protect some security sensitive data-structures.
|
||||
- Chrome browser: protect some security sensitive data structures.
|
||||
|
||||
Notes on which memory to seal:
|
||||
==============================
|
||||
|
||||
It might be important to note that sealing changes the lifetime of a mapping,
|
||||
i.e. the sealed mapping won’t be unmapped till the process terminates or the
|
||||
exec system call is invoked. Applications can apply sealing to any virtual
|
||||
memory region from userspace, but it is crucial to thoroughly analyze the
|
||||
mapping's lifetime prior to apply the sealing.
|
||||
When not to use mseal
|
||||
=====================
|
||||
Applications can apply sealing to any virtual memory region from userspace,
|
||||
but it is *crucial to thoroughly analyze the mapping's lifetime* prior to
|
||||
apply the sealing. This is because the sealed mapping *won’t be unmapped*
|
||||
until the process terminates or the exec system call is invoked.
|
||||
|
||||
For example:
|
||||
- aio/shm
|
||||
aio/shm can call mmap and munmap on behalf of userspace, e.g.
|
||||
ksys_shmdt() in shm.c. The lifetimes of those mapping are not tied to
|
||||
the lifetime of the process. If those memories are sealed from userspace,
|
||||
then munmap will fail, causing leaks in VMA address space during the
|
||||
lifetime of the process.
|
||||
|
||||
- aio/shm
|
||||
- ptr allocated by malloc (heap)
|
||||
Don't use mseal on the memory ptr return from malloc().
|
||||
malloc() is implemented by allocator, e.g. by glibc. Heap manager might
|
||||
allocate a ptr from brk or mapping created by mmap.
|
||||
If an app calls mseal on a ptr returned from malloc(), this can affect
|
||||
the heap manager's ability to manage the mappings; the outcome is
|
||||
non-deterministic.
|
||||
|
||||
aio/shm can call mmap()/munmap() on behalf of userspace, e.g. ksys_shmdt() in
|
||||
shm.c. The lifetime of those mapping are not tied to the lifetime of the
|
||||
process. If those memories are sealed from userspace, then munmap() will fail,
|
||||
causing leaks in VMA address space during the lifetime of the process.
|
||||
Example::
|
||||
|
||||
- Brk (heap)
|
||||
ptr = malloc(size);
|
||||
/* don't call mseal on ptr return from malloc. */
|
||||
mseal(ptr, size);
|
||||
/* free will success, allocator can't shrink heap lower than ptr */
|
||||
free(ptr);
|
||||
|
||||
Currently, userspace applications can seal parts of the heap by calling
|
||||
malloc() and mseal().
|
||||
let's assume following calls from user space:
|
||||
mseal doesn't block
|
||||
===================
|
||||
In a nutshell, mseal blocks certain mm syscall from modifying some of VMA's
|
||||
attributes, such as protection bits (RWX). Sealed mappings doesn't mean the
|
||||
memory is immutable.
|
||||
|
||||
- ptr = malloc(size);
|
||||
- mprotect(ptr, size, RO);
|
||||
- mseal(ptr, size);
|
||||
- free(ptr);
|
||||
|
||||
Technically, before mseal() is added, the user can change the protection of
|
||||
the heap by calling mprotect(RO). As long as the user changes the protection
|
||||
back to RW before free(), the memory range can be reused.
|
||||
|
||||
Adding mseal() into the picture, however, the heap is then sealed partially,
|
||||
the user can still free it, but the memory remains to be RO. If the address
|
||||
is re-used by the heap manager for another malloc, the process might crash
|
||||
soon after. Therefore, it is important not to apply sealing to any memory
|
||||
that might get recycled.
|
||||
|
||||
Furthermore, even if the application never calls the free() for the ptr,
|
||||
the heap manager may invoke the brk system call to shrink the size of the
|
||||
heap. In the kernel, the brk-shrink will call munmap(). Consequently,
|
||||
depending on the location of the ptr, the outcome of brk-shrink is
|
||||
nondeterministic.
|
||||
|
||||
|
||||
Additional notes:
|
||||
=================
|
||||
As Jann Horn pointed out in [3], there are still a few ways to write
|
||||
to RO memory, which is, in a way, by design. Those cases are not covered
|
||||
by mseal(). If applications want to block such cases, sandbox tools (such as
|
||||
seccomp, LSM, etc) might be considered.
|
||||
to RO memory, which is, in a way, by design. And those could be blocked
|
||||
by different security measures.
|
||||
|
||||
Those cases are:
|
||||
|
||||
- Write to read-only memory through /proc/self/mem interface.
|
||||
- Write to read-only memory through ptrace (such as PTRACE_POKETEXT).
|
||||
- userfaultfd.
|
||||
- Write to read-only memory through /proc/self/mem interface (FOLL_FORCE).
|
||||
- Write to read-only memory through ptrace (such as PTRACE_POKETEXT).
|
||||
- userfaultfd.
|
||||
|
||||
The idea that inspired this patch comes from Stephen Röttger’s work in V8
|
||||
CFI [4]. Chrome browser in ChromeOS will be the first user of this API.
|
||||
|
||||
Reference:
|
||||
==========
|
||||
[1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274
|
||||
|
||||
[2] https://man.openbsd.org/mimmutable.2
|
||||
|
||||
[3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com
|
||||
|
||||
[4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc
|
||||
Reference
|
||||
=========
|
||||
- [1] https://github.com/apple-oss-distributions/xnu/blob/1031c584a5e37aff177559b9f69dbd3c8c3fd30a/osfmk/mach/vm_statistics.h#L274
|
||||
- [2] https://man.openbsd.org/mimmutable.2
|
||||
- [3] https://lore.kernel.org/lkml/CAG48ez3ShUYey+ZAFsU2i1RpQn0a5eOs2hzQ426FkcgnfUGLvA@mail.gmail.com
|
||||
- [4] https://docs.google.com/document/d/1O2jwK4dxI3nRcOJuPYkonhTkNQfbmwdvxQMyXgeaRHo/edit#heading=h.bvaojj9fu6hc
|
||||
|
@ -9723,6 +9723,7 @@ F: include/dt-bindings/gpio/
|
||||
F: include/linux/gpio.h
|
||||
F: include/linux/gpio/
|
||||
F: include/linux/of_gpio.h
|
||||
K: (devm_)?gpio_(request|free|direction|get|set)
|
||||
|
||||
GPIO UAPI
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
@ -14992,6 +14993,7 @@ F: drivers/spi/spi-at91-usart.c
|
||||
|
||||
MICROCHIP AUDIO ASOC DRIVERS
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
M: Andrei Simion <andrei.simion@microchip.com>
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/sound/atmel*
|
||||
@ -15107,6 +15109,7 @@ F: include/video/atmel_lcdc.h
|
||||
|
||||
MICROCHIP MCP16502 PMIC DRIVER
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
M: Andrei Simion <andrei.simion@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml
|
||||
@ -15237,6 +15240,7 @@ F: drivers/spi/spi-atmel.*
|
||||
|
||||
MICROCHIP SSC DRIVER
|
||||
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
|
||||
M: Andrei Simion <andrei.simion@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/misc/atmel-ssc.txt
|
||||
@ -23167,7 +23171,7 @@ F: Documentation/devicetree/bindings/iio/adc/ti,lmp92064.yaml
|
||||
F: drivers/iio/adc/ti-lmp92064.c
|
||||
|
||||
TI PCM3060 ASoC CODEC DRIVER
|
||||
M: Kirill Marinushkin <kmarinushkin@birdec.com>
|
||||
M: Kirill Marinushkin <k.marinushkin@gmail.com>
|
||||
L: linux-sound@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/pcm3060.txt
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -2220,7 +2220,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
/* for the first pass, assume the worst case */
|
||||
if (!ctx->image)
|
||||
ctx->idx += 4;
|
||||
else
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
emit_call((const u64)__bpf_tramp_enter, ctx);
|
||||
}
|
||||
|
||||
@ -2264,7 +2268,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
im->ip_epilogue = ctx->ro_image + ctx->idx;
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
/* for the first pass, assume the worst case */
|
||||
if (!ctx->image)
|
||||
ctx->idx += 4;
|
||||
else
|
||||
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
|
||||
emit_call((const u64)__bpf_tramp_exit, ctx);
|
||||
}
|
||||
|
||||
|
@ -2257,6 +2257,7 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
|
||||
config ADDRESS_MASKING
|
||||
bool "Linear Address Masking support"
|
||||
depends on X86_64
|
||||
depends on COMPILE_TEST || !CPU_MITIGATIONS # wait for LASS
|
||||
help
|
||||
Linear Address Masking (LAM) modifies the checking that is applied
|
||||
to 64-bit linear addresses, allowing software to use of the
|
||||
|
@ -6,7 +6,7 @@
|
||||
typeof(sym) __ret; \
|
||||
asm_inline("mov %1,%0\n1:\n" \
|
||||
".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
|
||||
".long 1b - %c2 - .\n\t" \
|
||||
".long 1b - %c2 - .\n" \
|
||||
".popsection" \
|
||||
:"=r" (__ret) \
|
||||
:"i" ((unsigned long)0x0123456789abcdefull), \
|
||||
@ -20,7 +20,7 @@
|
||||
typeof(0u+(val)) __ret = (val); \
|
||||
asm_inline("shrl $12,%k0\n1:\n" \
|
||||
".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
|
||||
".long 1b - 1 - .\n\t" \
|
||||
".long 1b - 1 - .\n" \
|
||||
".popsection" \
|
||||
:"+r" (__ret)); \
|
||||
__ret; })
|
||||
|
@ -12,6 +12,13 @@
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/runtime-const.h>
|
||||
|
||||
/*
|
||||
* Virtual variable: there's no actual backing store for this,
|
||||
* it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)'
|
||||
*/
|
||||
extern unsigned long USER_PTR_MAX;
|
||||
|
||||
#ifdef CONFIG_ADDRESS_MASKING
|
||||
/*
|
||||
@ -46,19 +53,24 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The virtual address space space is logically divided into a kernel
|
||||
* half and a user half. When cast to a signed type, user pointers
|
||||
* are positive and kernel pointers are negative.
|
||||
*/
|
||||
#define valid_user_address(x) ((__force long)(x) >= 0)
|
||||
#define valid_user_address(x) \
|
||||
((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
|
||||
|
||||
/*
|
||||
* Masking the user address is an alternative to a conditional
|
||||
* user_access_begin that can avoid the fencing. This only works
|
||||
* for dense accesses starting at the address.
|
||||
*/
|
||||
#define mask_user_address(x) ((typeof(x))((long)(x)|((long)(x)>>63)))
|
||||
static inline void __user *mask_user_address(const void __user *ptr)
|
||||
{
|
||||
unsigned long mask;
|
||||
asm("cmp %1,%0\n\t"
|
||||
"sbb %0,%0"
|
||||
:"=r" (mask)
|
||||
:"r" (ptr),
|
||||
"0" (runtime_const_ptr(USER_PTR_MAX)));
|
||||
return (__force void __user *)(mask | (__force unsigned long)ptr);
|
||||
}
|
||||
#define masked_user_access_begin(x) ({ \
|
||||
__auto_type __masked_ptr = (x); \
|
||||
__masked_ptr = mask_user_address(__masked_ptr); \
|
||||
@ -69,23 +81,16 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
|
||||
* arbitrary values in those bits rather then masking them off.
|
||||
*
|
||||
* Enforce two rules:
|
||||
* 1. 'ptr' must be in the user half of the address space
|
||||
* 1. 'ptr' must be in the user part of the address space
|
||||
* 2. 'ptr+size' must not overflow into kernel addresses
|
||||
*
|
||||
* Note that addresses around the sign change are not valid addresses,
|
||||
* and will GP-fault even with LAM enabled if the sign bit is set (see
|
||||
* "CR3.LAM_SUP" that can narrow the canonicality check if we ever
|
||||
* enable it, but not remove it entirely).
|
||||
*
|
||||
* So the "overflow into kernel addresses" does not imply some sudden
|
||||
* exact boundary at the sign bit, and we can allow a lot of slop on the
|
||||
* size check.
|
||||
* Note that we always have at least one guard page between the
|
||||
* max user address and the non-canonical gap, allowing us to
|
||||
* ignore small sizes entirely.
|
||||
*
|
||||
* In fact, we could probably remove the size check entirely, since
|
||||
* any kernel accesses will be in increasing address order starting
|
||||
* at 'ptr', and even if the end might be in kernel space, we'll
|
||||
* hit the GP faults for non-canonical accesses before we ever get
|
||||
* there.
|
||||
* at 'ptr'.
|
||||
*
|
||||
* That's a separate optimization, for now just handle the small
|
||||
* constant case.
|
||||
|
@ -69,6 +69,7 @@
|
||||
#include <asm/sev.h>
|
||||
#include <asm/tdx.h>
|
||||
#include <asm/posted_intr.h>
|
||||
#include <asm/runtime-const.h>
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
@ -2389,6 +2390,15 @@ void __init arch_cpu_finalize_init(void)
|
||||
alternative_instructions();
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_64)) {
|
||||
unsigned long USER_PTR_MAX = TASK_SIZE_MAX-1;
|
||||
|
||||
/*
|
||||
* Enable this when LAM is gated on LASS support
|
||||
if (cpu_feature_enabled(X86_FEATURE_LAM))
|
||||
USER_PTR_MAX = (1ul << 63) - PAGE_SIZE - 1;
|
||||
*/
|
||||
runtime_const_init(ptr, USER_PTR_MAX);
|
||||
|
||||
/*
|
||||
* Make sure the first 2MB area is not mapped by huge pages
|
||||
* There are typically fixed size MTRRs in there and overlapping
|
||||
|
@ -584,7 +584,7 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
|
||||
}
|
||||
|
||||
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
@ -605,7 +605,7 @@ static int __init save_microcode_in_initrd(void)
|
||||
if (!desc.mc)
|
||||
return -EINVAL;
|
||||
|
||||
ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
|
||||
@ -613,16 +613,19 @@ static int __init save_microcode_in_initrd(void)
|
||||
}
|
||||
early_initcall(save_microcode_in_initrd);
|
||||
|
||||
static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n)
|
||||
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
|
||||
struct ucode_patch *n,
|
||||
bool ignore_stepping)
|
||||
{
|
||||
/* Zen and newer hardcode the f/m/s in the patch ID */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
|
||||
union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
|
||||
union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
|
||||
|
||||
/* Zap stepping */
|
||||
p_cid.stepping = 0;
|
||||
n_cid.stepping = 0;
|
||||
if (ignore_stepping) {
|
||||
p_cid.stepping = 0;
|
||||
n_cid.stepping = 0;
|
||||
}
|
||||
|
||||
return p_cid.full == n_cid.full;
|
||||
} else {
|
||||
@ -644,13 +647,13 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi
|
||||
WARN_ON_ONCE(!n.patch_id);
|
||||
|
||||
list_for_each_entry(p, µcode_cache, plist)
|
||||
if (patch_cpus_equivalent(p, &n))
|
||||
if (patch_cpus_equivalent(p, &n, false))
|
||||
return p;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
|
||||
static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
|
||||
{
|
||||
/* Zen and newer hardcode the f/m/s in the patch ID */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
|
||||
@ -659,6 +662,9 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
|
||||
zp.ucode_rev = p->patch_id;
|
||||
zn.ucode_rev = n->patch_id;
|
||||
|
||||
if (zn.stepping != zp.stepping)
|
||||
return -1;
|
||||
|
||||
return zn.rev > zp.rev;
|
||||
} else {
|
||||
return n->patch_id > p->patch_id;
|
||||
@ -668,10 +674,14 @@ static inline bool patch_newer(struct ucode_patch *p, struct ucode_patch *n)
|
||||
static void update_cache(struct ucode_patch *new_patch)
|
||||
{
|
||||
struct ucode_patch *p;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(p, µcode_cache, plist) {
|
||||
if (patch_cpus_equivalent(p, new_patch)) {
|
||||
if (!patch_newer(p, new_patch)) {
|
||||
if (patch_cpus_equivalent(p, new_patch, true)) {
|
||||
ret = patch_newer(p, new_patch);
|
||||
if (ret < 0)
|
||||
continue;
|
||||
else if (!ret) {
|
||||
/* we already have the latest patch */
|
||||
kfree(new_patch->data);
|
||||
kfree(new_patch);
|
||||
@ -944,6 +954,20 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||
return UCODE_OK;
|
||||
}
|
||||
|
||||
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
enum ucode_state ret;
|
||||
|
||||
/* free old equiv table */
|
||||
free_equiv_cpu_table();
|
||||
|
||||
ret = __load_microcode_amd(family, data, size);
|
||||
if (ret != UCODE_OK)
|
||||
cleanup();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
struct cpuinfo_x86 *c;
|
||||
@ -951,14 +975,9 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||
struct ucode_patch *p;
|
||||
enum ucode_state ret;
|
||||
|
||||
/* free old equiv table */
|
||||
free_equiv_cpu_table();
|
||||
|
||||
ret = __load_microcode_amd(family, data, size);
|
||||
if (ret != UCODE_OK) {
|
||||
cleanup();
|
||||
ret = _load_microcode_amd(family, data, size);
|
||||
if (ret != UCODE_OK)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for_each_node(nid) {
|
||||
cpu = cpumask_first(cpumask_of_node(nid));
|
||||
|
@ -261,12 +261,6 @@ static noinstr bool handle_bug(struct pt_regs *regs)
|
||||
int ud_type;
|
||||
u32 imm;
|
||||
|
||||
/*
|
||||
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
|
||||
* is a rare case that uses @regs without passing them to
|
||||
* irqentry_enter().
|
||||
*/
|
||||
kmsan_unpoison_entry_regs(regs);
|
||||
ud_type = decode_bug(regs->ip, &imm);
|
||||
if (ud_type == BUG_NONE)
|
||||
return handled;
|
||||
@ -275,6 +269,12 @@ static noinstr bool handle_bug(struct pt_regs *regs)
|
||||
* All lies, just get the WARN/BUG out.
|
||||
*/
|
||||
instrumentation_begin();
|
||||
/*
|
||||
* Normally @regs are unpoisoned by irqentry_enter(), but handle_bug()
|
||||
* is a rare case that uses @regs without passing them to
|
||||
* irqentry_enter().
|
||||
*/
|
||||
kmsan_unpoison_entry_regs(regs);
|
||||
/*
|
||||
* Since we're emulating a CALL with exceptions, restore the interrupt
|
||||
* state to what it was at the exception site.
|
||||
|
@ -358,6 +358,7 @@ SECTIONS
|
||||
#endif
|
||||
|
||||
RUNTIME_CONST_VARIABLES
|
||||
RUNTIME_CONST(ptr, USER_PTR_MAX)
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
||||
|
@ -39,8 +39,13 @@
|
||||
|
||||
.macro check_range size:req
|
||||
.if IS_ENABLED(CONFIG_X86_64)
|
||||
mov %rax, %rdx
|
||||
sar $63, %rdx
|
||||
movq $0x0123456789abcdef,%rdx
|
||||
1:
|
||||
.pushsection runtime_ptr_USER_PTR_MAX,"a"
|
||||
.long 1b - 8 - .
|
||||
.popsection
|
||||
cmp %rax, %rdx
|
||||
sbb %rdx, %rdx
|
||||
or %rdx, %rax
|
||||
.else
|
||||
cmp $TASK_SIZE_MAX-\size+1, %eax
|
||||
|
@ -173,6 +173,8 @@ static void __init __snp_fixup_e820_tables(u64 pa)
|
||||
e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
|
||||
e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
|
||||
e820__range_update_table(e820_table_firmware, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
|
||||
if (!memblock_is_region_reserved(pa, PMD_SIZE))
|
||||
memblock_reserve(pa, PMD_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -600,9 +600,7 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
||||
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
|
||||
goto put_bio;
|
||||
if (bytes + bv->bv_len > nr_iter)
|
||||
goto put_bio;
|
||||
if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
|
||||
goto put_bio;
|
||||
break;
|
||||
|
||||
nsegs++;
|
||||
bytes += bv->bv_len;
|
||||
|
@ -130,6 +130,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
|
||||
},
|
||||
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Samsung galaxybook2 ,initial _LID device notification returns
|
||||
* lid closed.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "750XED"),
|
||||
},
|
||||
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -1916,9 +1916,15 @@ unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
mul = caps->nominal_freq - caps->lowest_freq;
|
||||
/* Avoid special case when nominal_freq is equal to lowest_freq */
|
||||
if (caps->lowest_freq == caps->nominal_freq) {
|
||||
mul = caps->nominal_freq;
|
||||
div = caps->nominal_perf;
|
||||
} else {
|
||||
mul = caps->nominal_freq - caps->lowest_freq;
|
||||
div = caps->nominal_perf - caps->lowest_perf;
|
||||
}
|
||||
mul *= KHZ_PER_MHZ;
|
||||
div = caps->nominal_perf - caps->lowest_perf;
|
||||
offset = caps->nominal_freq * KHZ_PER_MHZ -
|
||||
div64_u64(caps->nominal_perf * mul, div);
|
||||
} else {
|
||||
@ -1939,11 +1945,17 @@ unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
|
||||
{
|
||||
s64 retval, offset = 0;
|
||||
static u64 max_khz;
|
||||
u64 mul, div;
|
||||
u64 mul, div;
|
||||
|
||||
if (caps->lowest_freq && caps->nominal_freq) {
|
||||
mul = caps->nominal_perf - caps->lowest_perf;
|
||||
div = caps->nominal_freq - caps->lowest_freq;
|
||||
/* Avoid special case when nominal_freq is equal to lowest_freq */
|
||||
if (caps->lowest_freq == caps->nominal_freq) {
|
||||
mul = caps->nominal_perf;
|
||||
div = caps->nominal_freq;
|
||||
} else {
|
||||
mul = caps->nominal_perf - caps->lowest_perf;
|
||||
div = caps->nominal_freq - caps->lowest_freq;
|
||||
}
|
||||
/*
|
||||
* We don't need to convert to kHz for computing offset and can
|
||||
* directly use nominal_freq and lowest_freq as the div64_u64
|
||||
|
@ -52,7 +52,7 @@ struct prm_context_buffer {
|
||||
static LIST_HEAD(prm_module_list);
|
||||
|
||||
struct prm_handler_info {
|
||||
guid_t guid;
|
||||
efi_guid_t guid;
|
||||
efi_status_t (__efiapi *handler_addr)(u64, void *);
|
||||
u64 static_data_buffer_addr;
|
||||
u64 acpi_param_buffer_addr;
|
||||
@ -72,17 +72,21 @@ struct prm_module_info {
|
||||
struct prm_handler_info handlers[] __counted_by(handler_count);
|
||||
};
|
||||
|
||||
static u64 efi_pa_va_lookup(u64 pa)
|
||||
static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
u64 pa_offset = pa & ~PAGE_MASK;
|
||||
u64 page = pa & PAGE_MASK;
|
||||
|
||||
for_each_efi_memory_desc(md) {
|
||||
if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
|
||||
if ((md->attribute & EFI_MEMORY_RUNTIME) &&
|
||||
(md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) {
|
||||
return pa_offset + md->virt_addr + page - md->phys_addr;
|
||||
}
|
||||
}
|
||||
|
||||
pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -148,9 +152,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
|
||||
th = &tm->handlers[cur_handler];
|
||||
|
||||
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
|
||||
th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
|
||||
th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
|
||||
th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
|
||||
th->handler_addr =
|
||||
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
|
||||
|
||||
th->static_data_buffer_addr =
|
||||
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
|
||||
|
||||
th->acpi_param_buffer_addr =
|
||||
efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
|
||||
|
||||
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
|
||||
|
||||
return 0;
|
||||
@ -277,6 +287,13 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
|
||||
if (!handler || !module)
|
||||
goto invalid_guid;
|
||||
|
||||
if (!handler->handler_addr ||
|
||||
!handler->static_data_buffer_addr ||
|
||||
!handler->acpi_param_buffer_addr) {
|
||||
buffer->prm_status = PRM_HANDLER_ERROR;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ACPI_COPY_NAMESEG(context.signature, "PRMC");
|
||||
context.revision = 0x0;
|
||||
context.reserved = 0x0;
|
||||
|
@ -503,6 +503,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* LG Electronics 16T90SP */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "16T90SP"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -651,6 +651,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
|
||||
/* the scmd has an associated qc */
|
||||
if (!(qc->flags & ATA_QCFLAG_EH)) {
|
||||
/* which hasn't failed yet, timeout */
|
||||
set_host_byte(scmd, DID_TIME_OUT);
|
||||
qc->err_mask |= AC_ERR_TIMEOUT;
|
||||
qc->flags |= ATA_QCFLAG_EH;
|
||||
nr_timedout++;
|
||||
|
@ -674,6 +674,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
|
||||
*/
|
||||
void tpm_chip_unregister(struct tpm_chip *chip)
|
||||
{
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
int rc;
|
||||
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (!rc) {
|
||||
tpm2_end_auth_session(chip);
|
||||
tpm_put_ops(chip);
|
||||
}
|
||||
#endif
|
||||
|
||||
tpm_del_legacy_sysfs(chip);
|
||||
if (tpm_is_hwrng_enabled(chip))
|
||||
hwrng_unregister(&chip->hwrng);
|
||||
|
@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
|
||||
struct tpm_header *header = (void *)buf;
|
||||
ssize_t ret, len;
|
||||
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
tpm2_end_auth_session(chip);
|
||||
|
||||
ret = tpm2_prepare_space(chip, space, buf, bufsiz);
|
||||
/* If the command is not implemented by the TPM, synthesize a
|
||||
* response with a TPM2_RC_COMMAND_CODE return for user-space.
|
||||
|
@ -379,10 +379,12 @@ int tpm_pm_suspend(struct device *dev)
|
||||
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (!rc) {
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
|
||||
tpm2_end_auth_session(chip);
|
||||
tpm2_shutdown(chip, TPM2_SU_STATE);
|
||||
else
|
||||
} else {
|
||||
rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
|
||||
}
|
||||
|
||||
tpm_put_ops(chip);
|
||||
}
|
||||
|
@ -333,6 +333,9 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TCG_TPM2_HMAC
|
||||
/* The first write to /dev/tpm{rm0} will flush the session. */
|
||||
attributes |= TPM2_SA_CONTINUE_SESSION;
|
||||
|
||||
/*
|
||||
* The Architecture Guide requires us to strip trailing zeros
|
||||
* before computing the HMAC
|
||||
@ -484,7 +487,8 @@ static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
|
||||
sha256_final(&sctx, out);
|
||||
}
|
||||
|
||||
static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
|
||||
static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
|
||||
struct tpm2_auth *auth)
|
||||
{
|
||||
struct crypto_kpp *kpp;
|
||||
struct kpp_request *req;
|
||||
@ -543,7 +547,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
|
||||
sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
|
||||
sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
|
||||
kpp_request_set_input(req, s, EC_PT_SZ*2);
|
||||
sg_init_one(d, chip->auth->salt, EC_PT_SZ);
|
||||
sg_init_one(d, auth->salt, EC_PT_SZ);
|
||||
kpp_request_set_output(req, d, EC_PT_SZ);
|
||||
crypto_kpp_compute_shared_secret(req);
|
||||
kpp_request_free(req);
|
||||
@ -554,8 +558,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
|
||||
* This works because KDFe fully consumes the secret before it
|
||||
* writes the salt
|
||||
*/
|
||||
tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
|
||||
chip->auth->salt);
|
||||
tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt);
|
||||
|
||||
out:
|
||||
crypto_free_kpp(kpp);
|
||||
@ -853,7 +856,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
|
||||
if (rc)
|
||||
/* manually close the session if it wasn't consumed */
|
||||
tpm2_flush_context(chip, auth->handle);
|
||||
memzero_explicit(auth, sizeof(*auth));
|
||||
|
||||
kfree_sensitive(auth);
|
||||
chip->auth = NULL;
|
||||
} else {
|
||||
/* reset for next use */
|
||||
auth->session = TPM_HEADER_SIZE;
|
||||
@ -881,7 +886,8 @@ void tpm2_end_auth_session(struct tpm_chip *chip)
|
||||
return;
|
||||
|
||||
tpm2_flush_context(chip, auth->handle);
|
||||
memzero_explicit(auth, sizeof(*auth));
|
||||
kfree_sensitive(auth);
|
||||
chip->auth = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(tpm2_end_auth_session);
|
||||
|
||||
@ -915,33 +921,37 @@ static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
|
||||
|
||||
static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
|
||||
{
|
||||
int rc;
|
||||
unsigned int offset = 0; /* dummy offset for null seed context */
|
||||
u8 name[SHA256_DIGEST_SIZE + 2];
|
||||
u32 tmp_null_key;
|
||||
int rc;
|
||||
|
||||
rc = tpm2_load_context(chip, chip->null_key_context, &offset,
|
||||
null_key);
|
||||
if (rc != -EINVAL)
|
||||
return rc;
|
||||
&tmp_null_key);
|
||||
if (rc != -EINVAL) {
|
||||
if (!rc)
|
||||
*null_key = tmp_null_key;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* an integrity failure may mean the TPM has been reset */
|
||||
dev_err(&chip->dev, "NULL key integrity failure!\n");
|
||||
/* check the null name against what we know */
|
||||
tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
|
||||
if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
|
||||
/* name unchanged, assume transient integrity failure */
|
||||
return rc;
|
||||
/*
|
||||
* Fatal TPM failure: the NULL seed has actually changed, so
|
||||
* the TPM must have been illegally reset. All in-kernel TPM
|
||||
* operations will fail because the NULL primary can't be
|
||||
* loaded to salt the sessions, but disable the TPM anyway so
|
||||
* userspace programmes can't be compromised by it.
|
||||
*/
|
||||
dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
|
||||
/* Try to re-create null key, given the integrity failure: */
|
||||
rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
/* Return null key if the name has not been changed: */
|
||||
if (!memcmp(name, chip->null_key_name, sizeof(name))) {
|
||||
*null_key = tmp_null_key;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Deduce from the name change TPM interference: */
|
||||
dev_err(&chip->dev, "null key integrity check failed\n");
|
||||
tpm2_flush_context(chip, tmp_null_key);
|
||||
chip->flags |= TPM_CHIP_FLAG_DISABLE;
|
||||
|
||||
return rc;
|
||||
err:
|
||||
return rc ? -ENODEV : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -958,16 +968,20 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
|
||||
*/
|
||||
int tpm2_start_auth_session(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm2_auth *auth;
|
||||
struct tpm_buf buf;
|
||||
struct tpm2_auth *auth = chip->auth;
|
||||
int rc;
|
||||
u32 null_key;
|
||||
int rc;
|
||||
|
||||
if (!auth) {
|
||||
dev_warn_once(&chip->dev, "auth session is not active\n");
|
||||
if (chip->auth) {
|
||||
dev_warn_once(&chip->dev, "auth session is active\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
|
||||
if (!auth)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = tpm2_load_null(chip, &null_key);
|
||||
if (rc)
|
||||
goto out;
|
||||
@ -988,7 +1002,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
|
||||
tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
|
||||
|
||||
/* append encrypted salt and squirrel away unencrypted in auth */
|
||||
tpm_buf_append_salt(&buf, chip);
|
||||
tpm_buf_append_salt(&buf, chip, auth);
|
||||
/* session type (HMAC, audit or policy) */
|
||||
tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
|
||||
|
||||
@ -1010,10 +1024,13 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
|
||||
|
||||
tpm_buf_destroy(&buf);
|
||||
|
||||
if (rc)
|
||||
goto out;
|
||||
if (rc == TPM2_RC_SUCCESS) {
|
||||
chip->auth = auth;
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
out:
|
||||
kfree_sensitive(auth);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(tpm2_start_auth_session);
|
||||
@ -1347,18 +1364,21 @@ static int tpm2_create_null_primary(struct tpm_chip *chip)
|
||||
*
|
||||
* Derive and context save the null primary and allocate memory in the
|
||||
* struct tpm_chip for the authorizations.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - OK
|
||||
* * -errno - A system error
|
||||
* * TPM_RC - A TPM error
|
||||
*/
|
||||
int tpm2_sessions_init(struct tpm_chip *chip)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = tpm2_create_null_primary(chip);
|
||||
if (rc)
|
||||
dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
|
||||
|
||||
chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
|
||||
if (!chip->auth)
|
||||
return -ENOMEM;
|
||||
if (rc) {
|
||||
dev_err(&chip->dev, "null key creation failed with %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
|
||||
// the node->ports array where the parent node should be. Later,
|
||||
// when we handle the parent node, we fix up the reference.
|
||||
++parent_count;
|
||||
node->color = i;
|
||||
node->color = port_index;
|
||||
break;
|
||||
|
||||
case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
|
||||
|
@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
|
||||
struct acpi_buffer *params)
|
||||
{
|
||||
acpi_status status;
|
||||
union acpi_object *obj;
|
||||
union acpi_object atif_arg_elements[2];
|
||||
struct acpi_object_list atif_arg;
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
|
||||
|
||||
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
|
||||
&buffer);
|
||||
obj = (union acpi_object *)buffer.pointer;
|
||||
|
||||
/* Fail only if calling the method fails and ATIF is supported */
|
||||
/* Fail if calling the method fails and ATIF is supported */
|
||||
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
||||
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
|
||||
acpi_format_exception(status));
|
||||
kfree(buffer.pointer);
|
||||
kfree(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buffer.pointer;
|
||||
if (obj->type != ACPI_TYPE_BUFFER) {
|
||||
DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
|
||||
obj->type);
|
||||
kfree(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
|
||||
#define SDMA0_HYP_DEC_REG_END 0x589a
|
||||
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
|
||||
|
||||
/*define for compression field for sdma7*/
|
||||
#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0
|
||||
#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001
|
||||
#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16
|
||||
#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift)
|
||||
|
||||
static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
|
||||
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
|
||||
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
|
||||
@ -1724,7 +1730,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
|
||||
uint64_t dst_offset,
|
||||
uint32_t byte_count)
|
||||
{
|
||||
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
|
||||
ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) |
|
||||
SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
|
||||
ib->ptr[ib->length_dw++] = src_data;
|
||||
|
@ -8374,7 +8374,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
|
||||
if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
|
||||
IP_VERSION(3, 5, 0) ||
|
||||
acrtc_state->stream->link->psr_settings.psr_version <
|
||||
DC_PSR_VERSION_UNSUPPORTED) {
|
||||
DC_PSR_VERSION_UNSUPPORTED ||
|
||||
!(adev->flags & AMD_IS_APU)) {
|
||||
timing = &acrtc_state->stream->timing;
|
||||
|
||||
/* at least 2 frames */
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
#include "dm_helpers.h"
|
||||
#include "ddc_service_types.h"
|
||||
#include "clk_mgr.h"
|
||||
|
||||
static u32 edid_extract_panel_id(struct edid *edid)
|
||||
{
|
||||
@ -1121,6 +1122,8 @@ bool dm_helpers_dp_handle_test_pattern_request(
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
struct drm_device *dev = aconnector->base.dev;
|
||||
struct dc_state *dc_state = ctx->dc->current_state;
|
||||
struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
@ -1221,6 +1224,16 @@ bool dm_helpers_dp_handle_test_pattern_request(
|
||||
pipe_ctx->stream->test_pattern.type = test_pattern;
|
||||
pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
|
||||
|
||||
/* Temp W/A for compliance test failure */
|
||||
dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
|
||||
dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
|
||||
clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
|
||||
dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
|
||||
ctx->dc->clk_mgr->funcs->update_clocks(
|
||||
ctx->dc->clk_mgr,
|
||||
dc_state,
|
||||
false);
|
||||
|
||||
dc_link_dp_set_test_pattern(
|
||||
(struct dc_link *) link,
|
||||
test_pattern,
|
||||
|
@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
|
||||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
|
||||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
|
||||
isPSRSUSupported = false;
|
||||
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
|
||||
isPSRSUSupported = true;
|
||||
}
|
||||
|
@ -1234,6 +1234,14 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
|
||||
}
|
||||
}
|
||||
|
||||
static bool smu_is_workload_profile_available(struct smu_context *smu,
|
||||
u32 profile)
|
||||
{
|
||||
if (profile >= PP_SMC_POWER_PROFILE_COUNT)
|
||||
return false;
|
||||
return smu->workload_map && smu->workload_map[profile].valid_mapping;
|
||||
}
|
||||
|
||||
static int smu_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -1265,7 +1273,8 @@ static int smu_sw_init(void *handle)
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
|
||||
|
||||
if (smu->is_apu)
|
||||
if (smu->is_apu ||
|
||||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
|
||||
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
|
||||
else
|
||||
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
|
||||
|
@ -25,7 +25,7 @@
|
||||
#define SMU14_DRIVER_IF_V14_0_H
|
||||
|
||||
//Increment this version if SkuTable_t or BoardTable_t change
|
||||
#define PPTABLE_VERSION 0x18
|
||||
#define PPTABLE_VERSION 0x1B
|
||||
|
||||
#define NUM_GFXCLK_DPM_LEVELS 16
|
||||
#define NUM_SOCCLK_DPM_LEVELS 8
|
||||
@ -145,7 +145,7 @@ typedef enum {
|
||||
} FEATURE_BTC_e;
|
||||
|
||||
// Debug Overrides Bitmask
|
||||
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
|
||||
#define DEBUG_OVERRIDE_NOT_USE 0x00000001
|
||||
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
|
||||
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
|
||||
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
|
||||
@ -161,6 +161,7 @@ typedef enum {
|
||||
#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
|
||||
#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
|
||||
#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
|
||||
#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000
|
||||
|
||||
// VR Mapping Bit Defines
|
||||
#define VR_MAPPING_VR_SELECT_MASK 0x01
|
||||
@ -391,6 +392,21 @@ typedef struct {
|
||||
EccInfo_t EccInfo[24];
|
||||
} EccInfoTable_t;
|
||||
|
||||
#define EPCS_HIGH_POWER 600
|
||||
#define EPCS_NORMAL_POWER 450
|
||||
#define EPCS_LOW_POWER 300
|
||||
#define EPCS_SHORTED_POWER 150
|
||||
#define EPCS_NO_BOOTUP 0
|
||||
|
||||
typedef enum{
|
||||
EPCS_SHORTED_LIMIT,
|
||||
EPCS_LOW_POWER_LIMIT,
|
||||
EPCS_NORMAL_POWER_LIMIT,
|
||||
EPCS_HIGH_POWER_LIMIT,
|
||||
EPCS_NOT_CONFIGURED,
|
||||
EPCS_STATUS_COUNT,
|
||||
} EPCS_STATUS_e;
|
||||
|
||||
//D3HOT sequences
|
||||
typedef enum {
|
||||
BACO_SEQUENCE,
|
||||
@ -662,7 +678,7 @@ typedef enum {
|
||||
} PP_GRTAVFS_FW_SEP_FUSE_e;
|
||||
|
||||
#define PP_NUM_RTAVFS_PWL_ZONES 5
|
||||
|
||||
#define PP_NUM_PSM_DIDT_PWL_ZONES 3
|
||||
|
||||
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
|
||||
// Slope Q1.7, Offset Q1.2
|
||||
@ -746,10 +762,10 @@ typedef struct {
|
||||
uint16_t Padding;
|
||||
|
||||
//Frequency changes
|
||||
int16_t GfxclkFmin; // MHz
|
||||
int16_t GfxclkFmax; // MHz
|
||||
uint16_t UclkFmin; // MHz
|
||||
uint16_t UclkFmax; // MHz
|
||||
int16_t GfxclkFoffset;
|
||||
uint16_t Padding1;
|
||||
uint16_t UclkFmin;
|
||||
uint16_t UclkFmax;
|
||||
uint16_t FclkFmin;
|
||||
uint16_t FclkFmax;
|
||||
|
||||
@ -770,19 +786,23 @@ typedef struct {
|
||||
uint8_t MaxOpTemp;
|
||||
|
||||
uint8_t AdvancedOdModeEnabled;
|
||||
uint8_t Padding1[3];
|
||||
uint8_t Padding2[3];
|
||||
|
||||
uint16_t GfxVoltageFullCtrlMode;
|
||||
uint16_t SocVoltageFullCtrlMode;
|
||||
uint16_t GfxclkFullCtrlMode;
|
||||
uint16_t UclkFullCtrlMode;
|
||||
uint16_t FclkFullCtrlMode;
|
||||
uint16_t Padding2;
|
||||
uint16_t Padding3;
|
||||
|
||||
int16_t GfxEdc;
|
||||
int16_t GfxPccLimitControl;
|
||||
|
||||
uint32_t Spare[10];
|
||||
uint16_t GfxclkFmaxVmax;
|
||||
uint8_t GfxclkFmaxVmaxTemperature;
|
||||
uint8_t Padding4[1];
|
||||
|
||||
uint32_t Spare[9];
|
||||
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
|
||||
} OverDriveTable_t;
|
||||
|
||||
@ -802,8 +822,8 @@ typedef struct {
|
||||
uint16_t VddSocVmax;
|
||||
|
||||
//gfxclk
|
||||
int16_t GfxclkFmin; // MHz
|
||||
int16_t GfxclkFmax; // MHz
|
||||
int16_t GfxclkFoffset;
|
||||
uint16_t Padding;
|
||||
//uclk
|
||||
uint16_t UclkFmin; // MHz
|
||||
uint16_t UclkFmax; // MHz
|
||||
@ -828,7 +848,7 @@ typedef struct {
|
||||
uint8_t FanZeroRpmEnable;
|
||||
//temperature
|
||||
uint8_t MaxOpTemp;
|
||||
uint8_t Padding[2];
|
||||
uint8_t Padding1[2];
|
||||
|
||||
//Full Ctrl
|
||||
uint16_t GfxVoltageFullCtrlMode;
|
||||
@ -839,7 +859,7 @@ typedef struct {
|
||||
//EDC
|
||||
int16_t GfxEdc;
|
||||
int16_t GfxPccLimitControl;
|
||||
int16_t Padding1;
|
||||
int16_t Padding2;
|
||||
|
||||
uint32_t Spare[5];
|
||||
} OverDriveLimits_t;
|
||||
@ -987,8 +1007,9 @@ typedef struct {
|
||||
uint16_t BaseClockDc;
|
||||
uint16_t GameClockDc;
|
||||
uint16_t BoostClockDc;
|
||||
|
||||
uint32_t Reserved[4];
|
||||
uint16_t MaxReportedClock;
|
||||
uint16_t Padding;
|
||||
uint32_t Reserved[3];
|
||||
} DriverReportedClocks_t;
|
||||
|
||||
typedef struct {
|
||||
@ -1132,7 +1153,7 @@ typedef struct {
|
||||
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
|
||||
|
||||
uint16_t GfxclkAibFmax;
|
||||
uint16_t GfxclkFreqCap;
|
||||
uint16_t GfxDpmPadding;
|
||||
|
||||
//GFX Idle Power Settings
|
||||
uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
|
||||
@ -1172,8 +1193,7 @@ typedef struct {
|
||||
uint32_t DvoFmaxLowScaler; //Unitless float
|
||||
|
||||
// GFX DCS
|
||||
uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
|
||||
uint16_t PaddingDcs;
|
||||
uint32_t PaddingDcs;
|
||||
|
||||
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
|
||||
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
|
||||
@ -1205,8 +1225,7 @@ typedef struct {
|
||||
uint16_t DalDcModeMaxUclkFreq;
|
||||
uint8_t PaddingsMem[2];
|
||||
//FCLK Section
|
||||
uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
|
||||
uint16_t PaddingFclk;
|
||||
uint32_t PaddingFclk;
|
||||
|
||||
// Link DPM Settings
|
||||
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
|
||||
@ -1215,12 +1234,19 @@ typedef struct {
|
||||
|
||||
// SECTION: VDD_GFX AVFS
|
||||
uint8_t OverrideGfxAvfsFuses;
|
||||
uint8_t GfxAvfsPadding[3];
|
||||
uint8_t GfxAvfsPadding[1];
|
||||
uint16_t DroopGBStDev;
|
||||
|
||||
uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
|
||||
uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
|
||||
//uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
|
||||
uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
|
||||
|
||||
uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1];
|
||||
uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
|
||||
uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
|
||||
uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
|
||||
uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
|
||||
uint32_t spare_HwRtAvfsFuses[19];
|
||||
|
||||
uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
|
||||
uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
|
||||
@ -1246,11 +1272,7 @@ typedef struct {
|
||||
uint32_t dGbV_dT_vmin;
|
||||
uint32_t dGbV_dT_vmax;
|
||||
|
||||
//Unused: PMFW-9370
|
||||
uint32_t V2F_vmin_range_low;
|
||||
uint32_t V2F_vmin_range_high;
|
||||
uint32_t V2F_vmax_range_low;
|
||||
uint32_t V2F_vmax_range_high;
|
||||
uint32_t PaddingV2F[4];
|
||||
|
||||
AvfsDcBtcParams_t DcBtcGfxParams;
|
||||
QuadraticInt_t SSCurve_GFX;
|
||||
@ -1327,18 +1349,18 @@ typedef struct {
|
||||
uint16_t PsmDidtReleaseTimer;
|
||||
uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
|
||||
// CAC EDC
|
||||
uint32_t Leakage_C0; // in IEEE float
|
||||
uint32_t Leakage_C1; // in IEEE float
|
||||
uint32_t Leakage_C2; // in IEEE float
|
||||
uint32_t Leakage_C3; // in IEEE float
|
||||
uint32_t Leakage_C4; // in IEEE float
|
||||
uint32_t Leakage_C5; // in IEEE float
|
||||
uint32_t GFX_CLK_SCALAR; // in IEEE float
|
||||
uint32_t GFX_CLK_INTERCEPT; // in IEEE float
|
||||
uint32_t GFX_CAC_M; // in IEEE float
|
||||
uint32_t GFX_CAC_B; // in IEEE float
|
||||
uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
|
||||
uint32_t DynToTotalCacScalar; // in IEEE
|
||||
uint32_t CacEdcCacLeakageC0;
|
||||
uint32_t CacEdcCacLeakageC1;
|
||||
uint32_t CacEdcCacLeakageC2;
|
||||
uint32_t CacEdcCacLeakageC3;
|
||||
uint32_t CacEdcCacLeakageC4;
|
||||
uint32_t CacEdcCacLeakageC5;
|
||||
uint32_t CacEdcGfxClkScalar;
|
||||
uint32_t CacEdcGfxClkIntercept;
|
||||
uint32_t CacEdcCac_m;
|
||||
uint32_t CacEdcCac_b;
|
||||
uint32_t CacEdcCurrLimitGuardband;
|
||||
uint32_t CacEdcDynToTotalCacRatio;
|
||||
// GFX EDC XVMIN
|
||||
uint32_t XVmin_Gfx_EdcThreshScalar;
|
||||
uint32_t XVmin_Gfx_EdcEnableFreq;
|
||||
@ -1467,7 +1489,7 @@ typedef struct {
|
||||
uint8_t VddqOffEnabled;
|
||||
uint8_t PaddingUmcFlags[2];
|
||||
|
||||
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
|
||||
uint32_t Paddign1;
|
||||
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
|
||||
|
||||
uint8_t FuseWritePowerMuxPresent;
|
||||
@ -1530,7 +1552,7 @@ typedef struct {
|
||||
int16_t FuzzyFan_ErrorSetDelta;
|
||||
int16_t FuzzyFan_ErrorRateSetDelta;
|
||||
int16_t FuzzyFan_PwmSetDelta;
|
||||
uint16_t FuzzyFan_Reserved;
|
||||
uint16_t FanPadding2;
|
||||
|
||||
uint16_t FwCtfLimit[TEMP_COUNT];
|
||||
|
||||
@ -1547,9 +1569,10 @@ typedef struct {
|
||||
uint16_t FanSpare[1];
|
||||
uint8_t FanIntakeSensorSupport;
|
||||
uint8_t FanIntakePadding;
|
||||
uint32_t FanAmbientPerfBoostThreshold;
|
||||
uint32_t FanSpare2[12];
|
||||
|
||||
uint32_t ODFeatureCtrlMask;
|
||||
|
||||
uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
|
||||
uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
|
||||
uint16_t TemperatureFwCtfLimit_Hynix;
|
||||
@ -1637,7 +1660,7 @@ typedef struct {
|
||||
uint16_t AverageDclk0Frequency ;
|
||||
uint16_t AverageVclk1Frequency ;
|
||||
uint16_t AverageDclk1Frequency ;
|
||||
uint16_t PCIeBusy ;
|
||||
uint16_t AveragePCIeBusy ;
|
||||
uint16_t dGPU_W_MAX ;
|
||||
uint16_t padding ;
|
||||
|
||||
@ -1665,12 +1688,12 @@ typedef struct {
|
||||
|
||||
uint16_t AverageGfxActivity ;
|
||||
uint16_t AverageUclkActivity ;
|
||||
uint16_t Vcn0ActivityPercentage ;
|
||||
uint16_t AverageVcn0ActivityPercentage;
|
||||
uint16_t Vcn1ActivityPercentage ;
|
||||
|
||||
uint32_t EnergyAccumulator;
|
||||
uint16_t AverageSocketPower;
|
||||
uint16_t MovingAverageTotalBoardPower;
|
||||
uint16_t AverageTotalBoardPower;
|
||||
|
||||
uint16_t AvgTemperature[TEMP_COUNT];
|
||||
uint16_t AvgTemperatureFanIntake;
|
||||
@ -1684,7 +1707,8 @@ typedef struct {
|
||||
|
||||
|
||||
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
|
||||
uint8_t padding1[3];
|
||||
uint8_t VmaxThrottlingPercentage;
|
||||
uint8_t padding1[2];
|
||||
|
||||
//metrics for D3hot entry/exit and driver ARM msgs
|
||||
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
|
||||
@ -1693,7 +1717,7 @@ typedef struct {
|
||||
|
||||
uint16_t ApuSTAPMSmartShiftLimit;
|
||||
uint16_t ApuSTAPMLimit;
|
||||
uint16_t MovingAvgApuSocketPower;
|
||||
uint16_t AvgApuSocketPower;
|
||||
|
||||
uint16_t AverageUclkActivity_MAX;
|
||||
|
||||
@ -1823,6 +1847,17 @@ typedef struct {
|
||||
#define TABLE_TRANSFER_FAILED 0xFF
|
||||
#define TABLE_TRANSFER_PENDING 0xAB
|
||||
|
||||
#define TABLE_PPT_FAILED 0x100
|
||||
#define TABLE_TDC_FAILED 0x200
|
||||
#define TABLE_TEMP_FAILED 0x400
|
||||
#define TABLE_FAN_TARGET_TEMP_FAILED 0x800
|
||||
#define TABLE_FAN_STOP_TEMP_FAILED 0x1000
|
||||
#define TABLE_FAN_START_TEMP_FAILED 0x2000
|
||||
#define TABLE_FAN_PWM_MIN_FAILED 0x4000
|
||||
#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000
|
||||
#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000
|
||||
#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000
|
||||
|
||||
// Table types
|
||||
#define TABLE_PPTABLE 0
|
||||
#define TABLE_COMBO_PPTABLE 1
|
||||
@ -1849,5 +1884,6 @@ typedef struct {
|
||||
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
|
||||
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
|
||||
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
|
||||
#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA
|
||||
|
||||
#endif
|
||||
|
@ -28,7 +28,7 @@
|
||||
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
|
||||
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
|
||||
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
|
||||
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26
|
||||
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
|
||||
|
||||
#define FEATURE_MASK(feature) (1ULL << feature)
|
||||
|
||||
|
@ -1077,12 +1077,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
|
||||
|
||||
switch (od_feature_bit) {
|
||||
case PP_OD_FEATURE_GFXCLK_FMIN:
|
||||
od_min_setting = overdrive_lowerlimits->GfxclkFmin;
|
||||
od_max_setting = overdrive_upperlimits->GfxclkFmin;
|
||||
break;
|
||||
case PP_OD_FEATURE_GFXCLK_FMAX:
|
||||
od_min_setting = overdrive_lowerlimits->GfxclkFmax;
|
||||
od_max_setting = overdrive_upperlimits->GfxclkFmax;
|
||||
od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
|
||||
od_max_setting = overdrive_upperlimits->GfxclkFoffset;
|
||||
break;
|
||||
case PP_OD_FEATURE_UCLK_FMIN:
|
||||
od_min_setting = overdrive_lowerlimits->UclkFmin;
|
||||
@ -1269,10 +1266,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
|
||||
PP_OD_FEATURE_GFXCLK_BIT))
|
||||
break;
|
||||
|
||||
size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
|
||||
size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
|
||||
od_table->OverDriveTable.GfxclkFmin,
|
||||
od_table->OverDriveTable.GfxclkFmax);
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
const OverDriveLimits_t * const overdrive_upperlimits =
|
||||
&pptable->SkuTable.OverDriveLimitsBasicMax;
|
||||
const OverDriveLimits_t * const overdrive_lowerlimits =
|
||||
&pptable->SkuTable.OverDriveLimitsBasicMin;
|
||||
|
||||
size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
|
||||
size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
|
||||
overdrive_lowerlimits->GfxclkFoffset,
|
||||
overdrive_upperlimits->GfxclkFoffset);
|
||||
break;
|
||||
|
||||
case SMU_OD_MCLK:
|
||||
@ -1414,7 +1417,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
|
||||
PP_OD_FEATURE_GFXCLK_FMAX,
|
||||
NULL,
|
||||
&max_value);
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
|
||||
size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
|
||||
min_value, max_value);
|
||||
}
|
||||
|
||||
@ -1796,7 +1799,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
|
||||
DpmActivityMonitorCoeffInt_t *activity_monitor =
|
||||
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
|
||||
int workload_type, ret = 0;
|
||||
|
||||
uint32_t current_profile_mode = smu->power_profile_mode;
|
||||
smu->power_profile_mode = input[size];
|
||||
|
||||
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
|
||||
@ -1854,6 +1857,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
|
||||
}
|
||||
}
|
||||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
|
||||
smu_v14_0_deep_sleep_control(smu, false);
|
||||
else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
|
||||
smu_v14_0_deep_sleep_control(smu, true);
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
@ -2158,7 +2166,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
|
||||
|
||||
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
|
||||
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
|
||||
gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
|
||||
gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
|
||||
metrics->Vcn1ActivityPercentage);
|
||||
|
||||
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
|
||||
@ -2217,8 +2225,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
||||
dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
|
||||
od_table->OverDriveTable.GfxclkFmax);
|
||||
dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
|
||||
dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
|
||||
od_table->OverDriveTable.UclkFmax);
|
||||
}
|
||||
@ -2309,10 +2316,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
|
||||
memcpy(user_od_table,
|
||||
boot_od_table,
|
||||
sizeof(OverDriveTableExternal_t));
|
||||
user_od_table->OverDriveTable.GfxclkFmin =
|
||||
user_od_table_bak.OverDriveTable.GfxclkFmin;
|
||||
user_od_table->OverDriveTable.GfxclkFmax =
|
||||
user_od_table_bak.OverDriveTable.GfxclkFmax;
|
||||
user_od_table->OverDriveTable.GfxclkFoffset =
|
||||
user_od_table_bak.OverDriveTable.GfxclkFoffset;
|
||||
user_od_table->OverDriveTable.UclkFmin =
|
||||
user_od_table_bak.OverDriveTable.UclkFmin;
|
||||
user_od_table->OverDriveTable.UclkFmax =
|
||||
@ -2441,22 +2446,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
|
||||
}
|
||||
|
||||
switch (input[i]) {
|
||||
case 0:
|
||||
smu_v14_0_2_get_od_setting_limits(smu,
|
||||
PP_OD_FEATURE_GFXCLK_FMIN,
|
||||
&minimum,
|
||||
&maximum);
|
||||
if (input[i + 1] < minimum ||
|
||||
input[i + 1] > maximum) {
|
||||
dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
|
||||
input[i + 1], minimum, maximum);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
od_table->OverDriveTable.GfxclkFmin = input[i + 1];
|
||||
od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
|
||||
break;
|
||||
|
||||
case 1:
|
||||
smu_v14_0_2_get_od_setting_limits(smu,
|
||||
PP_OD_FEATURE_GFXCLK_FMAX,
|
||||
@ -2469,7 +2458,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
od_table->OverDriveTable.GfxclkFmax = input[i + 1];
|
||||
od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
|
||||
od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
|
||||
break;
|
||||
|
||||
@ -2480,13 +2469,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
|
||||
}
|
||||
}
|
||||
|
||||
if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
|
||||
dev_err(adev->dev,
|
||||
"Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
|
||||
(uint32_t)od_table->OverDriveTable.GfxclkFmin,
|
||||
(uint32_t)od_table->OverDriveTable.GfxclkFmax);
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
case PP_OD_EDIT_MCLK_VDDC_TABLE:
|
||||
|
@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent)
|
||||
adev->id = ret;
|
||||
adev->name = "aux_bridge";
|
||||
adev->dev.parent = parent;
|
||||
adev->dev.of_node = of_node_get(parent->of_node);
|
||||
adev->dev.release = drm_aux_bridge_release;
|
||||
|
||||
device_set_of_node_from_dev(&adev->dev, parent);
|
||||
|
||||
ret = auxiliary_device_init(adev);
|
||||
if (ret) {
|
||||
ida_free(&drm_aux_bridge_ida, adev->id);
|
||||
|
@ -2391,6 +2391,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
|
||||
if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 ||
|
||||
tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) {
|
||||
dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n");
|
||||
of_node_put(node);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -123,9 +123,8 @@ config DRM_I915_USERPTR
|
||||
config DRM_I915_GVT_KVMGT
|
||||
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
|
||||
depends on DRM_I915
|
||||
depends on X86
|
||||
depends on KVM_X86
|
||||
depends on 64BIT
|
||||
depends on KVM
|
||||
depends on VFIO
|
||||
select DRM_I915_GVT
|
||||
select KVM_EXTERNAL_WRITE_TRACKING
|
||||
|
@ -890,7 +890,7 @@ void xe_device_l2_flush(struct xe_device *xe)
|
||||
spin_lock(>->global_invl_lock);
|
||||
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
|
||||
|
||||
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
|
||||
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
|
||||
xe_gt_err_once(gt, "Global invalidation timeout\n");
|
||||
spin_unlock(>->global_invl_lock);
|
||||
|
||||
|
@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
|
||||
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
|
||||
&value, true);
|
||||
if (ret)
|
||||
xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
|
||||
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
|
||||
domain->reg_ack.addr, value);
|
||||
xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
|
||||
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
|
||||
domain->reg_ack.addr, value);
|
||||
if (value == ~0) {
|
||||
xe_gt_err(gt,
|
||||
"Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
|
||||
domain->id, str_wake_sleep(wake));
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -897,6 +897,24 @@ static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
|
||||
|
||||
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
|
||||
|
||||
/*
|
||||
* Occasionally it is seen that the G2H worker starts running after a delay of more than
|
||||
* a second even after being queued and activated by the Linux workqueue subsystem. This
|
||||
* leads to G2H timeout error. The root cause of issue lies with scheduling latency of
|
||||
* Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
|
||||
* and this is beyond xe kmd.
|
||||
*
|
||||
* TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
|
||||
*/
|
||||
if (!ret) {
|
||||
flush_work(&ct->g2h_worker);
|
||||
if (g2h_fence.done) {
|
||||
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
|
||||
g2h_fence.seqno, action[0]);
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
|
||||
* the stack, since we have no clue if it will fire after the timeout before we can erase
|
||||
|
@ -1726,8 +1726,13 @@ void xe_guc_submit_stop(struct xe_guc *guc)
|
||||
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
|
||||
/* Prevent redundant attempts to stop parallel queues */
|
||||
if (q->guc->id != index)
|
||||
continue;
|
||||
|
||||
guc_exec_queue_stop(guc, q);
|
||||
}
|
||||
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
|
||||
@ -1765,8 +1770,13 @@ int xe_guc_submit_start(struct xe_guc *guc)
|
||||
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
atomic_dec(&guc->submission_state.stopped);
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
|
||||
/* Prevent redundant attempts to start parallel queues */
|
||||
if (q->guc->id != index)
|
||||
continue;
|
||||
|
||||
guc_exec_queue_start(q);
|
||||
}
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
|
||||
wake_up_all(&guc->ct.wq);
|
||||
|
@ -54,8 +54,9 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
|
||||
{
|
||||
struct xe_user_fence *ufence;
|
||||
u64 __user *ptr = u64_to_user_ptr(addr);
|
||||
u64 __maybe_unused prefetch_val;
|
||||
|
||||
if (!access_ok(ptr, sizeof(*ptr)))
|
||||
if (get_user(prefetch_val, ptr))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
|
||||
|
@ -546,6 +546,26 @@ static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_n
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The only difference from bio_chain_endio() is that the current
|
||||
* bi_status of bio does not affect the bi_status of parent.
|
||||
*/
|
||||
static void md_end_flush(struct bio *bio)
|
||||
{
|
||||
struct bio *parent = bio->bi_private;
|
||||
|
||||
/*
|
||||
* If any flush io error before the power failure,
|
||||
* disk data may be lost.
|
||||
*/
|
||||
if (bio->bi_status)
|
||||
pr_err("md: %pg flush io error %d\n", bio->bi_bdev,
|
||||
blk_status_to_errno(bio->bi_status));
|
||||
|
||||
bio_put(bio);
|
||||
bio_endio(parent);
|
||||
}
|
||||
|
||||
bool md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
struct md_rdev *rdev;
|
||||
@ -565,7 +585,9 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
new = bio_alloc_bioset(rdev->bdev, 0,
|
||||
REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO,
|
||||
&mddev->bio_set);
|
||||
bio_chain(new, bio);
|
||||
new->bi_private = bio;
|
||||
new->bi_end_io = md_end_flush;
|
||||
bio_inc_remaining(bio);
|
||||
submit_bio(new);
|
||||
}
|
||||
|
||||
|
@ -4061,9 +4061,12 @@ static int raid10_run(struct mddev *mddev)
|
||||
}
|
||||
|
||||
if (!mddev_is_dm(conf->mddev)) {
|
||||
ret = raid10_set_queue_limits(mddev);
|
||||
if (ret)
|
||||
int err = raid10_set_queue_limits(mddev);
|
||||
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto out_free_conf;
|
||||
}
|
||||
}
|
||||
|
||||
/* need to check that every block has at least one working mirror */
|
||||
|
@ -1293,8 +1293,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
|
||||
|
||||
/* save the buffer addr until the last read operation */
|
||||
*save_buf = read_buf;
|
||||
}
|
||||
|
||||
/* get data ready for the first time to read */
|
||||
/* get data ready for the first time to read */
|
||||
if (!*ppos) {
|
||||
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
|
||||
read_buf, hns3_dbg_cmd[index].buf_len);
|
||||
if (ret)
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/skbuff.h>
|
||||
@ -380,6 +381,24 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
|
||||
#define HNS3_INVALID_PTYPE \
|
||||
ARRAY_SIZE(hns3_rx_ptype_tbl)
|
||||
|
||||
static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_iotlb_gather iotlb_gather;
|
||||
size_t granule;
|
||||
|
||||
if (!domain || !iommu_is_dma_domain(domain))
|
||||
return;
|
||||
|
||||
granule = 1 << __ffs(domain->pgsize_bitmap);
|
||||
iova = ALIGN_DOWN(iova, granule);
|
||||
iotlb_gather.start = iova;
|
||||
iotlb_gather.end = iova + granule - 1;
|
||||
iotlb_gather.pgsize = granule;
|
||||
|
||||
iommu_iotlb_sync(domain, &iotlb_gather);
|
||||
}
|
||||
|
||||
static irqreturn_t hns3_irq_handle(int irq, void *vector)
|
||||
{
|
||||
struct hns3_enet_tqp_vector *tqp_vector = vector;
|
||||
@ -1032,6 +1051,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
|
||||
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||
{
|
||||
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
|
||||
struct net_device *netdev = ring_to_netdev(ring);
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hns3_tx_spare *tx_spare;
|
||||
struct page *page;
|
||||
dma_addr_t dma;
|
||||
@ -1073,6 +1094,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
|
||||
tx_spare->buf = page_address(page);
|
||||
tx_spare->len = PAGE_SIZE << order;
|
||||
ring->tx_spare = tx_spare;
|
||||
ring->tx_copybreak = priv->tx_copybreak;
|
||||
return;
|
||||
|
||||
dma_mapping_error:
|
||||
@ -1724,7 +1746,9 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
unsigned int type)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
struct hnae3_handle *handle = ring->tqp->handle;
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
unsigned int size;
|
||||
dma_addr_t dma;
|
||||
|
||||
@ -1756,6 +1780,13 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
|
||||
* prefetch
|
||||
*/
|
||||
ae_dev = hns3_get_ae_dev(handle);
|
||||
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
|
||||
hns3_dma_map_sync(dev, dma);
|
||||
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->length = size;
|
||||
desc_cb->dma = dma;
|
||||
@ -2452,7 +2483,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
netdev->features = features;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4868,6 +4898,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
|
||||
devm_kfree(&pdev->dev, priv->tqp_vector);
|
||||
}
|
||||
|
||||
static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
|
||||
{
|
||||
#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
|
||||
#define HNS3_MAX_PACKET_SIZE (64 * 1024)
|
||||
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
|
||||
struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
|
||||
struct hnae3_handle *handle = priv->ae_handle;
|
||||
|
||||
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
|
||||
return;
|
||||
|
||||
if (!(domain && iommu_is_dma_domain(domain)))
|
||||
return;
|
||||
|
||||
priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
|
||||
priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
|
||||
|
||||
if (priv->tx_copybreak < priv->min_tx_copybreak)
|
||||
priv->tx_copybreak = priv->min_tx_copybreak;
|
||||
if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
|
||||
handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
|
||||
}
|
||||
|
||||
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
|
||||
unsigned int ring_type)
|
||||
{
|
||||
@ -5101,6 +5155,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
|
||||
int i, j;
|
||||
int ret;
|
||||
|
||||
hns3_update_tx_spare_buf_config(priv);
|
||||
for (i = 0; i < ring_num; i++) {
|
||||
ret = hns3_alloc_ring_memory(&priv->ring[i]);
|
||||
if (ret) {
|
||||
@ -5305,6 +5360,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
priv->ae_handle = handle;
|
||||
priv->tx_timeout_count = 0;
|
||||
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
|
||||
priv->min_tx_copybreak = 0;
|
||||
priv->min_tx_spare_buf_size = 0;
|
||||
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||
|
||||
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
|
||||
|
@ -596,6 +596,8 @@ struct hns3_nic_priv {
|
||||
struct hns3_enet_coalesce rx_coal;
|
||||
u32 tx_copybreak;
|
||||
u32 rx_copybreak;
|
||||
u32 min_tx_copybreak;
|
||||
u32 min_tx_spare_buf_size;
|
||||
};
|
||||
|
||||
union l3_hdr_info {
|
||||
|
@ -1933,6 +1933,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (copybreak < priv->min_tx_copybreak) {
|
||||
netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
|
||||
copybreak, priv->min_tx_copybreak);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (buf_size < priv->min_tx_spare_buf_size) {
|
||||
netdev_err(netdev,
|
||||
"tx spare buf size %u should be no less than %u!\n",
|
||||
buf_size, priv->min_tx_spare_buf_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns3_set_tunable(struct net_device *netdev,
|
||||
const struct ethtool_tunable *tuna,
|
||||
const void *data)
|
||||
@ -1949,6 +1974,10 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
|
||||
switch (tuna->id) {
|
||||
case ETHTOOL_TX_COPYBREAK:
|
||||
ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->tx_copybreak = *(u32 *)data;
|
||||
|
||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||
@ -1963,6 +1992,10 @@ static int hns3_set_tunable(struct net_device *netdev,
|
||||
|
||||
break;
|
||||
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
|
||||
ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
|
||||
new_tx_spare_buf_size = *(u32 *)data;
|
||||
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
@ -3584,6 +3585,17 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclge_set_reset_pending(struct hclge_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
{
|
||||
u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
|
||||
@ -3604,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
*/
|
||||
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
|
||||
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_IMP_RESET);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
|
||||
hdev->rst_stats.imp_rst_cnt++;
|
||||
@ -3614,7 +3626,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
|
||||
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
|
||||
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET);
|
||||
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
|
||||
hdev->rst_stats.global_rst_cnt++;
|
||||
return HCLGE_VECTOR0_EVENT_RST;
|
||||
@ -3769,7 +3781,7 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
|
||||
snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
|
||||
HCLGE_NAME, pci_name(hdev->pdev));
|
||||
ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
|
||||
0, hdev->misc_vector.name, hdev);
|
||||
IRQ_NOAUTOEN, hdev->misc_vector.name, hdev);
|
||||
if (ret) {
|
||||
hclge_free_vector(hdev, 0);
|
||||
dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
|
||||
@ -4062,7 +4074,7 @@ static void hclge_do_reset(struct hclge_dev *hdev)
|
||||
case HNAE3_FUNC_RESET:
|
||||
dev_info(&pdev->dev, "PF reset requested\n");
|
||||
/* schedule again to check later */
|
||||
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET);
|
||||
hclge_reset_task_schedule(hdev);
|
||||
break;
|
||||
default:
|
||||
@ -4096,6 +4108,8 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET &&
|
||||
rst_level < hdev->reset_type)
|
||||
return HNAE3_NONE_RESET;
|
||||
@ -4237,7 +4251,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
|
||||
return false;
|
||||
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
|
||||
hdev->rst_stats.reset_fail_cnt++;
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
hclge_set_reset_pending(hdev, hdev->reset_type);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"re-schedule reset task(%u)\n",
|
||||
hdev->rst_stats.reset_fail_cnt);
|
||||
@ -4480,8 +4494,20 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
|
||||
static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGE_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_FLR_RESET) | BIT(HNAE3_FUNC_RESET) | \
|
||||
BIT(HNAE3_GLOBAL_RESET) | BIT(HNAE3_IMP_RESET))
|
||||
|
||||
struct hclge_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGE_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@ -11891,9 +11917,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
hclge_init_rxd_adv_layout(hdev);
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
ret = hclge_init_wol(hdev);
|
||||
if (ret)
|
||||
dev_warn(&pdev->dev,
|
||||
@ -11906,6 +11929,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
hclge_state_init(hdev);
|
||||
hdev->last_reset_time = jiffies;
|
||||
|
||||
/* Enable MISC vector(vector0) */
|
||||
enable_irq(hdev->misc_vector.vector_irq);
|
||||
hclge_enable_vector(&hdev->misc_vector, true);
|
||||
|
||||
dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
|
||||
HCLGE_DRIVER_NAME);
|
||||
|
||||
@ -12311,7 +12338,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
|
||||
/* Disable MISC vector(vector0) */
|
||||
hclge_enable_vector(&hdev->misc_vector, false);
|
||||
synchronize_irq(hdev->misc_vector.vector_irq);
|
||||
disable_irq(hdev->misc_vector.vector_irq);
|
||||
|
||||
/* Disable all hw interrupts */
|
||||
hclge_config_mac_tnl_int(hdev, false);
|
||||
|
@ -58,6 +58,9 @@ bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
struct hclge_ptp *ptp = hdev->ptp;
|
||||
|
||||
if (!ptp)
|
||||
return false;
|
||||
|
||||
if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
|
||||
test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
|
||||
ptp->tx_skipped++;
|
||||
|
@ -510,9 +510,9 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
|
||||
static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
struct hnae3_knic_private_info *kinfo)
|
||||
{
|
||||
#define HCLGE_RING_REG_OFFSET 0x200
|
||||
#define HCLGE_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_num;
|
||||
int data_num_sum;
|
||||
u32 *reg = data;
|
||||
@ -533,10 +533,11 @@ static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
|
||||
reg_num = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < kinfo->num_tqps; j++) {
|
||||
reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
|
||||
tqp = kinfo->tqp[j];
|
||||
for (i = 0; i < reg_num; i++)
|
||||
*reg++ = hclge_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGE_RING_REG_OFFSET * j);
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGE_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
}
|
||||
data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
|
||||
|
||||
|
@ -1395,6 +1395,17 @@ static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev,
|
||||
enum hnae3_reset_type reset_type)
|
||||
{
|
||||
/* When an incorrect reset type is executed, the get_reset_level
|
||||
* function generates the HNAE3_NONE_RESET flag. As a result, this
|
||||
* type do not need to pending.
|
||||
*/
|
||||
if (reset_type != HNAE3_NONE_RESET)
|
||||
set_bit(reset_type, &hdev->reset_pending);
|
||||
}
|
||||
|
||||
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
|
||||
{
|
||||
#define HCLGEVF_RESET_WAIT_US 20000
|
||||
@ -1544,7 +1555,7 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
|
||||
hdev->rst_stats.rst_fail_cnt);
|
||||
|
||||
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
|
||||
set_bit(hdev->reset_type, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_type);
|
||||
|
||||
if (hclgevf_is_reset_pending(hdev)) {
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
@ -1664,6 +1675,8 @@ static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr)
|
||||
clear_bit(HNAE3_FLR_RESET, addr);
|
||||
}
|
||||
|
||||
clear_bit(HNAE3_NONE_RESET, addr);
|
||||
|
||||
return rst_level;
|
||||
}
|
||||
|
||||
@ -1673,14 +1686,15 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
|
||||
|
||||
if (hdev->default_reset_request)
|
||||
hdev->reset_level =
|
||||
hclgevf_get_reset_level(&hdev->default_reset_request);
|
||||
else
|
||||
hdev->reset_level = HNAE3_VF_FUNC_RESET;
|
||||
|
||||
dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n",
|
||||
hdev->reset_level);
|
||||
|
||||
/* reset of this VF requested */
|
||||
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@ -1691,8 +1705,20 @@ static void hclgevf_reset_event(struct pci_dev *pdev,
|
||||
static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
|
||||
enum hnae3_reset_type rst_type)
|
||||
{
|
||||
#define HCLGEVF_SUPPORT_RESET_TYPE \
|
||||
(BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \
|
||||
BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \
|
||||
BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET))
|
||||
|
||||
struct hclgevf_dev *hdev = ae_dev->priv;
|
||||
|
||||
if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) {
|
||||
/* To prevent reset triggered by hclge_reset_event */
|
||||
set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request);
|
||||
dev_info(&hdev->pdev->dev, "unsupported reset type %d\n",
|
||||
rst_type);
|
||||
return;
|
||||
}
|
||||
set_bit(rst_type, &hdev->default_reset_request);
|
||||
}
|
||||
|
||||
@ -1849,14 +1875,14 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
||||
*/
|
||||
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
|
||||
/* prepare for full reset of stack + pcie interface */
|
||||
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET);
|
||||
|
||||
/* "defer" schedule the reset task again */
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
} else {
|
||||
hdev->reset_attempts++;
|
||||
|
||||
set_bit(hdev->reset_level, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, hdev->reset_level);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
}
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
@ -1979,7 +2005,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"receive reset interrupt 0x%x!\n", rst_ing_reg);
|
||||
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
|
||||
hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET);
|
||||
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
|
||||
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
|
||||
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
|
||||
@ -2289,6 +2315,7 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
|
||||
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
mutex_init(&hdev->mbx_resp.mbx_mutex);
|
||||
sema_init(&hdev->reset_sem, 1);
|
||||
@ -2988,7 +3015,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
||||
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -123,10 +123,10 @@ int hclgevf_get_regs_len(struct hnae3_handle *handle)
|
||||
void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
void *data)
|
||||
{
|
||||
#define HCLGEVF_RING_REG_OFFSET 0x200
|
||||
#define HCLGEVF_RING_INT_REG_OFFSET 0x4
|
||||
|
||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||
struct hnae3_queue *tqp;
|
||||
int i, j, reg_um;
|
||||
u32 *reg = data;
|
||||
|
||||
@ -147,10 +147,11 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
|
||||
reg_um = ARRAY_SIZE(ring_reg_addr_list);
|
||||
for (j = 0; j < hdev->num_tqps; j++) {
|
||||
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
|
||||
tqp = &hdev->htqp[j].q;
|
||||
for (i = 0; i < reg_um; i++)
|
||||
*reg++ = hclgevf_read_dev(&hdev->hw,
|
||||
ring_reg_addr_list[i] +
|
||||
HCLGEVF_RING_REG_OFFSET * j);
|
||||
*reg++ = readl_relaxed(tqp->io_base -
|
||||
HCLGEVF_TQP_REG_OFFSET +
|
||||
ring_reg_addr_list[i]);
|
||||
}
|
||||
|
||||
reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
|
||||
|
@ -989,5 +989,11 @@ ice_devlink_port_new(struct devlink *devlink,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!ice_is_eswitch_mode_switchdev(pf)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"SF ports are only supported in eswitch switchdev mode");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port);
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#define ICE_DPLL_PIN_IDX_INVALID 0xff
|
||||
#define ICE_DPLL_RCLK_NUM_PER_PF 1
|
||||
#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
|
||||
#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125
|
||||
|
||||
/**
|
||||
* enum ice_dpll_pin_type - enumerate ice pin types:
|
||||
@ -2063,6 +2064,73 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dpll_init_info_pins_generic - initializes generic pins info
|
||||
* @pf: board private structure
|
||||
* @input: if input pins initialized
|
||||
*
|
||||
* Init information for generic pins, cache them in PF's pins structures.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - success
|
||||
* * negative - init failure reason
|
||||
*/
|
||||
static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
|
||||
{
|
||||
struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps;
|
||||
static const char labels[][sizeof("99")] = {
|
||||
"0", "1", "2", "3", "4", "5", "6", "7", "8",
|
||||
"9", "10", "11", "12", "13", "14", "15" };
|
||||
u32 cap = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
|
||||
enum ice_dpll_pin_type pin_type;
|
||||
int i, pin_num, ret = -EINVAL;
|
||||
struct ice_dpll_pin *pins;
|
||||
u32 phase_adj_max;
|
||||
|
||||
if (input) {
|
||||
pin_num = pf->dplls.num_inputs;
|
||||
pins = pf->dplls.inputs;
|
||||
phase_adj_max = pf->dplls.input_phase_adj_max;
|
||||
pin_type = ICE_DPLL_PIN_TYPE_INPUT;
|
||||
cap |= DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
|
||||
} else {
|
||||
pin_num = pf->dplls.num_outputs;
|
||||
pins = pf->dplls.outputs;
|
||||
phase_adj_max = pf->dplls.output_phase_adj_max;
|
||||
pin_type = ICE_DPLL_PIN_TYPE_OUTPUT;
|
||||
}
|
||||
if (pin_num > ARRAY_SIZE(labels))
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < pin_num; i++) {
|
||||
pins[i].idx = i;
|
||||
pins[i].prop.board_label = labels[i];
|
||||
pins[i].prop.phase_range.min = phase_adj_max;
|
||||
pins[i].prop.phase_range.max = -phase_adj_max;
|
||||
pins[i].prop.capabilities = cap;
|
||||
pins[i].pf = pf;
|
||||
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
|
||||
if (ret)
|
||||
break;
|
||||
if (input && pins[i].freq == ICE_DPLL_PIN_GEN_RCLK_FREQ)
|
||||
pins[i].prop.type = DPLL_PIN_TYPE_MUX;
|
||||
else
|
||||
pins[i].prop.type = DPLL_PIN_TYPE_EXT;
|
||||
if (!input)
|
||||
continue;
|
||||
ret = ice_aq_get_cgu_ref_prio(&pf->hw, de->dpll_idx, i,
|
||||
&de->input_prio[i]);
|
||||
if (ret)
|
||||
break;
|
||||
ret = ice_aq_get_cgu_ref_prio(&pf->hw, dp->dpll_idx, i,
|
||||
&dp->input_prio[i]);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dpll_init_info_direct_pins - initializes direct pins info
|
||||
* @pf: board private structure
|
||||
@ -2101,6 +2169,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (num_pins != ice_cgu_get_num_pins(hw, input))
|
||||
return ice_dpll_init_info_pins_generic(pf, input);
|
||||
|
||||
for (i = 0; i < num_pins; i++) {
|
||||
caps = 0;
|
||||
|
@ -34,7 +34,6 @@ static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
|
||||
ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
|
||||
{ "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
|
||||
ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
|
||||
{ "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
|
||||
};
|
||||
|
||||
static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
|
||||
@ -52,7 +51,6 @@ static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
|
||||
ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
|
||||
{ "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
|
||||
ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
|
||||
{ "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
|
||||
};
|
||||
|
||||
static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
|
||||
@ -6047,6 +6045,25 @@ ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_cgu_get_num_pins - get pin description array size
|
||||
* @hw: pointer to the hw struct
|
||||
* @input: if request is done against input or output pins
|
||||
*
|
||||
* Return: size of pin description array for given hw.
|
||||
*/
|
||||
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input)
|
||||
{
|
||||
const struct ice_cgu_pin_desc *t;
|
||||
int size;
|
||||
|
||||
t = ice_cgu_get_pin_desc(hw, input, &size);
|
||||
if (t)
|
||||
return size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_cgu_get_pin_type - get pin's type
|
||||
* @hw: pointer to the hw struct
|
||||
|
@ -406,6 +406,7 @@ int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
|
||||
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
|
||||
int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
|
||||
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
|
||||
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
|
||||
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
|
||||
struct dpll_pin_frequency *
|
||||
ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num);
|
||||
|
@ -907,7 +907,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
|
||||
int i, err = 0, vector = 0, free_vector = 0;
|
||||
|
||||
err = request_irq(adapter->msix_entries[vector].vector,
|
||||
igb_msix_other, 0, netdev->name, adapter);
|
||||
igb_msix_other, IRQF_NO_THREAD, netdev->name, adapter);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
|
@ -91,8 +91,8 @@ enum mtk_wed_dummy_cr_idx {
|
||||
#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
|
||||
#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
|
||||
#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
|
||||
#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
|
||||
#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
|
||||
#define MT7988_FIRMWARE_WO0 "mediatek/mt7988/mt7988_wo_0.bin"
|
||||
#define MT7988_FIRMWARE_WO1 "mediatek/mt7988/mt7988_wo_1.bin"
|
||||
|
||||
#define MTK_WO_MCU_CFG_LS_BASE 0
|
||||
#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
|
||||
|
@ -389,15 +389,27 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
|
||||
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
|
||||
}
|
||||
|
||||
static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
|
||||
static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
|
||||
struct page *pages[],
|
||||
u16 byte_count)
|
||||
{
|
||||
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
|
||||
unsigned int linear_data_size;
|
||||
struct page_pool *page_pool;
|
||||
struct sk_buff *skb;
|
||||
int page_index = 0;
|
||||
bool linear_only;
|
||||
void *data;
|
||||
|
||||
linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
|
||||
linear_data_size = linear_only ? byte_count :
|
||||
PAGE_SIZE -
|
||||
MLXSW_PCI_RX_BUF_SW_OVERHEAD;
|
||||
|
||||
page_pool = cq->u.cq.page_pool;
|
||||
page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
|
||||
MLXSW_PCI_SKB_HEADROOM, linear_data_size);
|
||||
|
||||
data = page_address(pages[page_index]);
|
||||
net_prefetch(data);
|
||||
|
||||
@ -405,11 +417,6 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
|
||||
if (unlikely(!skb))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
|
||||
linear_data_size = linear_only ? byte_count :
|
||||
PAGE_SIZE -
|
||||
MLXSW_PCI_RX_BUF_SW_OVERHEAD;
|
||||
|
||||
skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
|
||||
skb_put(skb, linear_data_size);
|
||||
|
||||
@ -425,6 +432,7 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
|
||||
|
||||
page = pages[page_index];
|
||||
frag_size = min(byte_count, PAGE_SIZE);
|
||||
page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
page, 0, frag_size, PAGE_SIZE);
|
||||
byte_count -= frag_size;
|
||||
@ -760,7 +768,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
|
||||
skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
|
||||
if (IS_ERR(skb)) {
|
||||
dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
|
||||
mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
|
||||
@ -988,12 +996,13 @@ static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
|
||||
if (cq_type != MLXSW_PCI_CQ_RDQ)
|
||||
return 0;
|
||||
|
||||
pp_params.flags = PP_FLAG_DMA_MAP;
|
||||
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
|
||||
pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
|
||||
pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
|
||||
pp_params.dev = &mlxsw_pci->pdev->dev;
|
||||
pp_params.napi = &q->u.cq.napi;
|
||||
pp_params.dma_dir = DMA_FROM_DEVICE;
|
||||
pp_params.max_len = PAGE_SIZE;
|
||||
|
||||
page_pool = page_pool_create(&pp_params);
|
||||
if (IS_ERR(page_pool))
|
||||
|
@ -481,11 +481,33 @@ mlxsw_sp_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_ipip_entry *ipip_entry,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 new_kvdl_index, old_kvdl_index = ipip_entry->dip_kvdl_index;
|
||||
struct in6_addr old_addr6 = ipip_entry->parms.daddr.addr6;
|
||||
struct mlxsw_sp_ipip_parms new_parms;
|
||||
int err;
|
||||
|
||||
new_parms = mlxsw_sp_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
|
||||
return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
|
||||
&new_parms, extack);
|
||||
|
||||
err = mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
|
||||
&new_parms.daddr.addr6,
|
||||
&new_kvdl_index);
|
||||
if (err)
|
||||
return err;
|
||||
ipip_entry->dip_kvdl_index = new_kvdl_index;
|
||||
|
||||
err = mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
|
||||
&new_parms, extack);
|
||||
if (err)
|
||||
goto err_change_gre;
|
||||
|
||||
mlxsw_sp_ipv6_addr_put(mlxsw_sp, &old_addr6);
|
||||
|
||||
return 0;
|
||||
|
||||
err_change_gre:
|
||||
ipip_entry->dip_kvdl_index = old_kvdl_index;
|
||||
mlxsw_sp_ipv6_addr_put(mlxsw_sp, &new_parms.daddr.addr6);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "spectrum.h"
|
||||
#include "spectrum_ptp.h"
|
||||
#include "core.h"
|
||||
#include "txheader.h"
|
||||
|
||||
#define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT 29
|
||||
#define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ 156257 /* 6.4nSec */
|
||||
@ -1684,6 +1685,12 @@ int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
|
||||
struct sk_buff *skb,
|
||||
const struct mlxsw_tx_info *tx_info)
|
||||
{
|
||||
if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
|
||||
this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
|
||||
dev_kfree_skb_any(skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mlxsw_sp_txhdr_construct(skb, tx_info);
|
||||
return 0;
|
||||
}
|
||||
|
@ -203,8 +203,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
|
||||
readl(ioaddr + DMA_CHAN_TX_CONTROL(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_RX_CONTROL(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_CONTROL(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_TX_BASE_ADDR_HI(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_TX_BASE_ADDR(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_RX_BASE_ADDR_HI(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_RX_BASE_ADDR(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_TX_END_ADDR(default_addrs, channel) / 4] =
|
||||
@ -225,8 +229,12 @@ static void _dwmac4_dump_dma_regs(struct stmmac_priv *priv,
|
||||
readl(ioaddr + DMA_CHAN_CUR_TX_DESC(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_CUR_RX_DESC(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_RX_DESC(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_CUR_TX_BUF_ADDR_HI(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR_HI(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_CUR_RX_BUF_ADDR_HI(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR_HI(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(default_addrs, channel) / 4] =
|
||||
readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(dwmac4_addrs, channel));
|
||||
reg_space[DMA_CHAN_STATUS(default_addrs, channel) / 4] =
|
||||
|
@ -127,7 +127,9 @@ static inline u32 dma_chanx_base_addr(const struct dwmac4_addrs *addrs,
|
||||
#define DMA_CHAN_SLOT_CTRL_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x3c)
|
||||
#define DMA_CHAN_CUR_TX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x44)
|
||||
#define DMA_CHAN_CUR_RX_DESC(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x4c)
|
||||
#define DMA_CHAN_CUR_TX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x50)
|
||||
#define DMA_CHAN_CUR_TX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x54)
|
||||
#define DMA_CHAN_CUR_RX_BUF_ADDR_HI(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x58)
|
||||
#define DMA_CHAN_CUR_RX_BUF_ADDR(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x5c)
|
||||
#define DMA_CHAN_STATUS(addrs, x) (dma_chanx_base_addr(addrs, x) + 0x60)
|
||||
|
||||
|
@ -4304,11 +4304,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (dma_mapping_error(priv->device, des))
|
||||
goto dma_map_err;
|
||||
|
||||
tx_q->tx_skbuff_dma[first_entry].buf = des;
|
||||
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
|
||||
tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
|
||||
tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
|
||||
|
||||
if (priv->dma_cap.addr64 <= 32) {
|
||||
first->des0 = cpu_to_le32(des);
|
||||
|
||||
@ -4327,6 +4322,23 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
|
||||
|
||||
/* In case two or more DMA transmit descriptors are allocated for this
|
||||
* non-paged SKB data, the DMA buffer address should be saved to
|
||||
* tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
|
||||
* and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
|
||||
* that stmmac_tx_clean() does not unmap the entire DMA buffer too early
|
||||
* since the tail areas of the DMA buffer can be accessed by DMA engine
|
||||
* sooner or later.
|
||||
* By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
|
||||
* corresponding to the last descriptor, stmmac_tx_clean() will unmap
|
||||
* this DMA buffer right after the DMA engine completely finishes the
|
||||
* full buffer transmission.
|
||||
*/
|
||||
tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
|
||||
tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
|
||||
tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
|
||||
tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
|
||||
|
||||
/* Prepare fragments */
|
||||
for (i = 0; i < nfrags; i++) {
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
@ -1702,20 +1702,24 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
|
||||
return -EINVAL;
|
||||
|
||||
if (data[IFLA_GTP_FD0]) {
|
||||
u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
|
||||
int fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
|
||||
|
||||
sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
|
||||
if (IS_ERR(sk0))
|
||||
return PTR_ERR(sk0);
|
||||
if (fd0 >= 0) {
|
||||
sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
|
||||
if (IS_ERR(sk0))
|
||||
return PTR_ERR(sk0);
|
||||
}
|
||||
}
|
||||
|
||||
if (data[IFLA_GTP_FD1]) {
|
||||
u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
|
||||
int fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
|
||||
|
||||
sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
|
||||
if (IS_ERR(sk1u)) {
|
||||
gtp_encap_disable_sock(sk0);
|
||||
return PTR_ERR(sk1u);
|
||||
if (fd1 >= 0) {
|
||||
sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
|
||||
if (IS_ERR(sk1u)) {
|
||||
gtp_encap_disable_sock(sk0);
|
||||
return PTR_ERR(sk1u);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3798,8 +3798,7 @@ static void macsec_free_netdev(struct net_device *dev)
|
||||
{
|
||||
struct macsec_dev *macsec = macsec_priv(dev);
|
||||
|
||||
if (macsec->secy.tx_sc.md_dst)
|
||||
metadata_dst_free(macsec->secy.tx_sc.md_dst);
|
||||
dst_release(&macsec->secy.tx_sc.md_dst->dst);
|
||||
free_percpu(macsec->stats);
|
||||
free_percpu(macsec->secy.tx_sc.stats);
|
||||
|
||||
|
@ -588,6 +588,9 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
|
||||
if (len > MCTP_I2C_MAXMTU)
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (!daddr || !saddr)
|
||||
return -EINVAL;
|
||||
|
||||
lldst = *((u8 *)daddr);
|
||||
llsrc = *((u8 *)saddr);
|
||||
|
||||
|
@ -1377,10 +1377,12 @@ static ssize_t nsim_nexthop_bucket_activity_write(struct file *file,
|
||||
|
||||
if (pos != 0)
|
||||
return -EINVAL;
|
||||
if (size > sizeof(buf))
|
||||
if (size > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(buf, user_buf, size))
|
||||
return -EFAULT;
|
||||
buf[size] = 0;
|
||||
|
||||
if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1076,6 +1076,7 @@ static const struct usb_device_id products[] = {
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0122)}, /* Quectel RG650V */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
|
||||
|
@ -10069,6 +10069,7 @@ static const struct usb_device_id rtl8152_table[] = {
|
||||
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) },
|
||||
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) },
|
||||
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) },
|
||||
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3098) },
|
||||
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) },
|
||||
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) },
|
||||
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) },
|
||||
|
@ -3043,9 +3043,14 @@ ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
|
||||
struct sk_buff *msdu)
|
||||
{
|
||||
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
|
||||
struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
|
||||
struct ath10k_wmi *wmi = &ar->wmi;
|
||||
|
||||
idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
kfree(pkt_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2441,6 +2441,7 @@ wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
|
||||
dma_unmap_single(ar->dev, pkt_addr->paddr,
|
||||
msdu->len, DMA_TO_DEVICE);
|
||||
info = IEEE80211_SKB_CB(msdu);
|
||||
kfree(pkt_addr);
|
||||
|
||||
if (param->status) {
|
||||
info->flags &= ~IEEE80211_TX_STAT_ACK;
|
||||
@ -9612,6 +9613,7 @@ static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
|
||||
dma_unmap_single(ar->dev, pkt_addr->paddr,
|
||||
msdu->len, DMA_TO_DEVICE);
|
||||
ieee80211_free_txskb(ar->hw, msdu);
|
||||
kfree(pkt_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5291,8 +5291,11 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
|
||||
hal_status == HAL_TLV_STATUS_PPDU_DONE) {
|
||||
rx_mon_stats->status_ppdu_done++;
|
||||
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
|
||||
ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
|
||||
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
|
||||
if (!ab->hw_params.full_monitor_mode) {
|
||||
ath11k_dp_rx_mon_dest_process(ar, mac_id,
|
||||
budget, napi);
|
||||
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
|
||||
}
|
||||
}
|
||||
|
||||
if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
|
||||
|
@ -306,7 +306,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct wil6210_rtap {
|
||||
struct ieee80211_radiotap_header rthdr;
|
||||
struct ieee80211_radiotap_header_fixed rthdr;
|
||||
/* fields should be in the order of bits in rthdr.it_present */
|
||||
/* flags */
|
||||
u8 flags;
|
||||
|
@ -27,6 +27,7 @@ source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig"
|
||||
config BRCM_TRACING
|
||||
bool "Broadcom device tracing"
|
||||
depends on BRCMSMAC || BRCMFMAC
|
||||
depends on TRACING
|
||||
help
|
||||
If you say Y here, the Broadcom wireless drivers will register
|
||||
with ftrace to dump event information into the trace ringbuffer.
|
||||
|
@ -2515,7 +2515,7 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i,
|
||||
* to build this manually element by element, we can write it much
|
||||
* more efficiently than we can parse it. ORDER MATTERS HERE */
|
||||
struct ipw_rt_hdr {
|
||||
struct ieee80211_radiotap_header rt_hdr;
|
||||
struct ieee80211_radiotap_header_fixed rt_hdr;
|
||||
s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
|
||||
} *ipw_rt;
|
||||
|
||||
|
@ -1141,7 +1141,7 @@ struct ipw_prom_priv {
|
||||
* structure is provided regardless of any bits unset.
|
||||
*/
|
||||
struct ipw_rt_hdr {
|
||||
struct ieee80211_radiotap_header rt_hdr;
|
||||
struct ieee80211_radiotap_header_fixed rt_hdr;
|
||||
u64 rt_tsf; /* TSF */ /* XXX */
|
||||
u8 rt_flags; /* radiotap packet flags */
|
||||
u8 rt_rate; /* rate in 500kb/s */
|
||||
|
@ -3122,6 +3122,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
|
||||
struct il_cmd_meta *out_meta;
|
||||
dma_addr_t phys_addr;
|
||||
unsigned long flags;
|
||||
u8 *out_payload;
|
||||
u32 idx;
|
||||
u16 fix_size;
|
||||
|
||||
@ -3157,6 +3158,16 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
|
||||
out_cmd = txq->cmd[idx];
|
||||
out_meta = &txq->meta[idx];
|
||||
|
||||
/* The payload is in the same place in regular and huge
|
||||
* command buffers, but we need to let the compiler know when
|
||||
* we're using a larger payload buffer to avoid "field-
|
||||
* spanning write" warnings at run-time for huge commands.
|
||||
*/
|
||||
if (cmd->flags & CMD_SIZE_HUGE)
|
||||
out_payload = ((struct il_device_cmd_huge *)out_cmd)->cmd.payload;
|
||||
else
|
||||
out_payload = out_cmd->cmd.payload;
|
||||
|
||||
if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
|
||||
spin_unlock_irqrestore(&il->hcmd_lock, flags);
|
||||
return -ENOSPC;
|
||||
@ -3170,7 +3181,7 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
|
||||
out_meta->callback = cmd->callback;
|
||||
|
||||
out_cmd->hdr.cmd = cmd->id;
|
||||
memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
|
||||
memcpy(out_payload, cmd->data, cmd->len);
|
||||
|
||||
/* At this point, the out_cmd now has all of the incoming cmd
|
||||
* information */
|
||||
@ -4962,6 +4973,8 @@ il_pci_resume(struct device *device)
|
||||
*/
|
||||
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
||||
|
||||
_il_wr(il, CSR_INT, 0xffffffff);
|
||||
_il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
|
||||
il_enable_interrupts(il);
|
||||
|
||||
if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
|
||||
|
@ -560,6 +560,18 @@ struct il_device_cmd {
|
||||
|
||||
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
|
||||
|
||||
/**
|
||||
* struct il_device_cmd_huge
|
||||
*
|
||||
* For use when sending huge commands.
|
||||
*/
|
||||
struct il_device_cmd_huge {
|
||||
struct il_cmd_header hdr; /* uCode API */
|
||||
union {
|
||||
u8 payload[IL_MAX_CMD_SIZE - sizeof(struct il_cmd_header)];
|
||||
} __packed cmd;
|
||||
} __packed;
|
||||
|
||||
struct il_host_cmd {
|
||||
const void *data;
|
||||
unsigned long reply_page;
|
||||
|
@ -429,38 +429,28 @@ int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_acpi_sar_set_profile(union acpi_object *table,
|
||||
struct iwl_sar_profile *profile,
|
||||
bool enabled, u8 num_chains,
|
||||
u8 num_sub_bands)
|
||||
static int
|
||||
iwl_acpi_parse_chains_table(union acpi_object *table,
|
||||
struct iwl_sar_profile_chain *chains,
|
||||
u8 num_chains, u8 num_sub_bands)
|
||||
{
|
||||
int i, j, idx = 0;
|
||||
|
||||
/*
|
||||
* The table from ACPI is flat, but we store it in a
|
||||
* structured array.
|
||||
*/
|
||||
for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) {
|
||||
for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) {
|
||||
for (u8 chain = 0; chain < num_chains; chain++) {
|
||||
for (u8 subband = 0; subband < BIOS_SAR_MAX_SUB_BANDS_NUM;
|
||||
subband++) {
|
||||
/* if we don't have the values, use the default */
|
||||
if (i >= num_chains || j >= num_sub_bands) {
|
||||
profile->chains[i].subbands[j] = 0;
|
||||
if (subband >= num_sub_bands) {
|
||||
chains[chain].subbands[subband] = 0;
|
||||
} else if (table->type != ACPI_TYPE_INTEGER ||
|
||||
table->integer.value > U8_MAX) {
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (table[idx].type != ACPI_TYPE_INTEGER ||
|
||||
table[idx].integer.value > U8_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
profile->chains[i].subbands[j] =
|
||||
table[idx].integer.value;
|
||||
|
||||
idx++;
|
||||
chains[chain].subbands[subband] =
|
||||
table->integer.value;
|
||||
table++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Only if all values were valid can the profile be enabled */
|
||||
profile->enabled = enabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -543,9 +533,11 @@ int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
|
||||
/* The profile from WRDS is officially profile 1, but goes
|
||||
* into sar_profiles[0] (because we don't have a profile 0).
|
||||
*/
|
||||
ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0],
|
||||
flags & IWL_SAR_ENABLE_MSK,
|
||||
num_chains, num_sub_bands);
|
||||
ret = iwl_acpi_parse_chains_table(table, fwrt->sar_profiles[0].chains,
|
||||
num_chains, num_sub_bands);
|
||||
if (!ret && flags & IWL_SAR_ENABLE_MSK)
|
||||
fwrt->sar_profiles[0].enabled = true;
|
||||
|
||||
out_free:
|
||||
kfree(data);
|
||||
return ret;
|
||||
@ -557,7 +549,7 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
bool enabled;
|
||||
int i, n_profiles, tbl_rev, pos;
|
||||
int ret = 0;
|
||||
u8 num_chains, num_sub_bands;
|
||||
u8 num_sub_bands;
|
||||
|
||||
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
|
||||
if (IS_ERR(data))
|
||||
@ -573,7 +565,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV2;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
|
||||
|
||||
goto read_table;
|
||||
@ -589,7 +580,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV1;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
|
||||
|
||||
goto read_table;
|
||||
@ -605,7 +595,6 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV0;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
|
||||
|
||||
goto read_table;
|
||||
@ -637,23 +626,54 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
/* the tables start at element 3 */
|
||||
pos = 3;
|
||||
|
||||
BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV0 != ACPI_SAR_NUM_CHAINS_REV1);
|
||||
BUILD_BUG_ON(ACPI_SAR_NUM_CHAINS_REV2 != 2 * ACPI_SAR_NUM_CHAINS_REV0);
|
||||
|
||||
/* parse non-cdb chains for all profiles */
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
union acpi_object *table = &wifi_pkg->package.elements[pos];
|
||||
|
||||
/* The EWRD profiles officially go from 2 to 4, but we
|
||||
* save them in sar_profiles[1-3] (because we don't
|
||||
* have profile 0). So in the array we start from 1.
|
||||
*/
|
||||
ret = iwl_acpi_sar_set_profile(table,
|
||||
&fwrt->sar_profiles[i + 1],
|
||||
enabled, num_chains,
|
||||
num_sub_bands);
|
||||
ret = iwl_acpi_parse_chains_table(table,
|
||||
fwrt->sar_profiles[i + 1].chains,
|
||||
ACPI_SAR_NUM_CHAINS_REV0,
|
||||
num_sub_bands);
|
||||
if (ret < 0)
|
||||
break;
|
||||
goto out_free;
|
||||
|
||||
/* go to the next table */
|
||||
pos += num_chains * num_sub_bands;
|
||||
pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands;
|
||||
}
|
||||
|
||||
/* non-cdb table revisions */
|
||||
if (tbl_rev < 2)
|
||||
goto set_enabled;
|
||||
|
||||
/* parse cdb chains for all profiles */
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
struct iwl_sar_profile_chain *chains;
|
||||
union acpi_object *table;
|
||||
|
||||
table = &wifi_pkg->package.elements[pos];
|
||||
chains = &fwrt->sar_profiles[i + 1].chains[ACPI_SAR_NUM_CHAINS_REV0];
|
||||
ret = iwl_acpi_parse_chains_table(table,
|
||||
chains,
|
||||
ACPI_SAR_NUM_CHAINS_REV0,
|
||||
num_sub_bands);
|
||||
if (ret < 0)
|
||||
goto out_free;
|
||||
|
||||
/* go to the next table */
|
||||
pos += ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands;
|
||||
}
|
||||
|
||||
set_enabled:
|
||||
for (i = 0; i < n_profiles; i++)
|
||||
fwrt->sar_profiles[i + 1].enabled = enabled;
|
||||
|
||||
out_free:
|
||||
kfree(data);
|
||||
return ret;
|
||||
|
@ -39,10 +39,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
|
||||
|
||||
/* Assumes the appropriate lock is held by the caller */
|
||||
void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
iwl_fw_suspend_timestamp(fwrt);
|
||||
iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL);
|
||||
iwl_dbg_tlv_time_point_sync(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START,
|
||||
NULL);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
|
||||
|
||||
|
@ -1413,26 +1413,36 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
|
||||
const struct iwl_op_mode_ops *ops = op->ops;
|
||||
struct dentry *dbgfs_dir = NULL;
|
||||
struct iwl_op_mode *op_mode = NULL;
|
||||
int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
|
||||
|
||||
/* also protects start/stop from racing against each other */
|
||||
lockdep_assert_held(&iwlwifi_opmode_table_mtx);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
|
||||
drv->dbgfs_drv);
|
||||
dbgfs_dir = drv->dbgfs_op_mode;
|
||||
#endif
|
||||
|
||||
op_mode = ops->start(drv->trans, drv->trans->cfg,
|
||||
&drv->fw, dbgfs_dir);
|
||||
if (op_mode)
|
||||
return op_mode;
|
||||
for (retry = 0; retry <= max_retry; retry++) {
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
debugfs_remove_recursive(drv->dbgfs_op_mode);
|
||||
drv->dbgfs_op_mode = NULL;
|
||||
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
|
||||
drv->dbgfs_drv);
|
||||
dbgfs_dir = drv->dbgfs_op_mode;
|
||||
#endif
|
||||
|
||||
op_mode = ops->start(drv->trans, drv->trans->cfg,
|
||||
&drv->fw, dbgfs_dir);
|
||||
|
||||
if (op_mode)
|
||||
return op_mode;
|
||||
|
||||
if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
|
||||
break;
|
||||
|
||||
IWL_ERR(drv, "retry init count %d\n", retry);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
debugfs_remove_recursive(drv->dbgfs_op_mode);
|
||||
drv->dbgfs_op_mode = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -98,6 +98,9 @@ void iwl_drv_stop(struct iwl_drv *drv);
|
||||
#define VISIBLE_IF_IWLWIFI_KUNIT static
|
||||
#endif
|
||||
|
||||
/* max retry for init flow */
|
||||
#define IWL_MAX_INIT_RETRY 2
|
||||
|
||||
#define FW_NAME_PRE_BUFSIZE 64
|
||||
struct iwl_trans;
|
||||
const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf);
|
||||
|
@ -1398,7 +1398,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
||||
|
||||
iwl_mvm_pause_tcm(mvm, true);
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_fw_runtime_suspend(&mvm->fwrt);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
return __iwl_mvm_suspend(hw, wowlan, false);
|
||||
}
|
||||
|
@ -1307,8 +1307,8 @@ static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
|
||||
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
|
||||
{
|
||||
u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
|
||||
u32 status = 0;
|
||||
int ret;
|
||||
u32 resp;
|
||||
|
||||
struct iwl_fw_error_recovery_cmd recovery_cmd = {
|
||||
.flags = cpu_to_le32(flags),
|
||||
@ -1316,7 +1316,6 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
|
||||
};
|
||||
struct iwl_host_cmd host_cmd = {
|
||||
.id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
|
||||
.flags = CMD_WANT_SKB,
|
||||
.data = {&recovery_cmd, },
|
||||
.len = {sizeof(recovery_cmd), },
|
||||
};
|
||||
@ -1336,7 +1335,7 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
|
||||
recovery_cmd.buf_size = cpu_to_le32(error_log_size);
|
||||
}
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &host_cmd);
|
||||
ret = iwl_mvm_send_cmd_status(mvm, &host_cmd, &status);
|
||||
kfree(mvm->error_recovery_buf);
|
||||
mvm->error_recovery_buf = NULL;
|
||||
|
||||
@ -1347,11 +1346,10 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
|
||||
|
||||
/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
|
||||
if (flags & ERROR_RECOVERY_UPDATE_DB) {
|
||||
resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
|
||||
if (resp) {
|
||||
if (status) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to send recovery cmd blob was invalid %d\n",
|
||||
resp);
|
||||
status);
|
||||
|
||||
ieee80211_iterate_interfaces(mvm->hw, 0,
|
||||
iwl_mvm_disconnect_iterator,
|
||||
|
@ -1293,12 +1293,14 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
int ret;
|
||||
int retry, max_retry = 0;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* we are starting the mac not in error flow, and restart is enabled */
|
||||
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
|
||||
iwlwifi_mod_params.fw_restart) {
|
||||
max_retry = IWL_MAX_INIT_RETRY;
|
||||
/*
|
||||
* This will prevent mac80211 recovery flows to trigger during
|
||||
* init failures
|
||||
@ -1306,7 +1308,13 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
|
||||
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
|
||||
}
|
||||
|
||||
ret = __iwl_mvm_mac_start(mvm);
|
||||
for (retry = 0; retry <= max_retry; retry++) {
|
||||
ret = __iwl_mvm_mac_start(mvm);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
IWL_ERR(mvm, "mac start retry %d\n", retry);
|
||||
}
|
||||
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
@ -1992,7 +2000,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
|
||||
mvm->p2p_device_vif = NULL;
|
||||
}
|
||||
|
||||
iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
|
||||
iwl_mvm_mac_ctxt_remove(mvm, vif);
|
||||
|
||||
RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
|
||||
@ -2001,6 +2008,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
|
||||
mvm->monitor_on = false;
|
||||
|
||||
out:
|
||||
iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC) {
|
||||
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta);
|
||||
|
@ -41,8 +41,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
|
||||
/* reset deflink MLO parameters */
|
||||
mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
|
||||
mvmvif->deflink.active = 0;
|
||||
/* the first link always points to the default one */
|
||||
mvmvif->link[0] = &mvmvif->deflink;
|
||||
|
||||
ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif);
|
||||
if (ret)
|
||||
@ -60,9 +58,19 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
|
||||
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
|
||||
if (ret)
|
||||
goto out_free_bf;
|
||||
/* We want link[0] to point to the default link, unless we have MLO and
|
||||
* in this case this will be modified later by .change_vif_links()
|
||||
* If we are in the restart flow with an MLD connection, we will wait
|
||||
* to .change_vif_links() to setup the links.
|
||||
*/
|
||||
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
|
||||
!ieee80211_vif_is_mld(vif)) {
|
||||
mvmvif->link[0] = &mvmvif->deflink;
|
||||
|
||||
ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
|
||||
if (ret)
|
||||
goto out_free_bf;
|
||||
}
|
||||
|
||||
/* Save a pointer to p2p device vif, so it can later be used to
|
||||
* update the p2p device MAC when a GO is started/stopped
|
||||
@ -1186,7 +1194,11 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (old_links == 0) {
|
||||
/* If we're in RESTART flow, the default link wasn't added in
|
||||
* drv_add_interface(), and link[0] doesn't point to it.
|
||||
*/
|
||||
if (old_links == 0 && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
|
||||
&mvm->status)) {
|
||||
err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
@ -1774,7 +1774,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
|
||||
&cp->channel_config[ch_cnt];
|
||||
|
||||
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
|
||||
u8 j, k, n_s_ssids = 0, n_bssids = 0;
|
||||
u8 k, n_s_ssids = 0, n_bssids = 0;
|
||||
u8 max_s_ssids, max_bssids;
|
||||
bool force_passive = false, found = false, allow_passive = true,
|
||||
unsolicited_probe_on_chan = false, psc_no_listen = false;
|
||||
@ -1799,7 +1799,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
|
||||
cfg->v5.iter_count = 1;
|
||||
cfg->v5.iter_interval = 0;
|
||||
|
||||
for (j = 0; j < params->n_6ghz_params; j++) {
|
||||
for (u32 j = 0; j < params->n_6ghz_params; j++) {
|
||||
s8 tmp_psd_20;
|
||||
|
||||
if (!(scan_6ghz_params[j].channel_idx == i))
|
||||
@ -1873,7 +1873,7 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
|
||||
* SSID.
|
||||
* TODO: improve this logic
|
||||
*/
|
||||
for (j = 0; j < params->n_6ghz_params; j++) {
|
||||
for (u32 j = 0; j < params->n_6ghz_params; j++) {
|
||||
if (!(scan_6ghz_params[j].channel_idx == i))
|
||||
continue;
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#include <net/ieee80211_radiotap.h>
|
||||
|
||||
struct tx_radiotap_hdr {
|
||||
struct ieee80211_radiotap_header hdr;
|
||||
struct ieee80211_radiotap_header_fixed hdr;
|
||||
u8 rate;
|
||||
u8 txpower;
|
||||
u8 rts_retries;
|
||||
@ -31,7 +31,7 @@ struct tx_radiotap_hdr {
|
||||
#define IEEE80211_FC_DSTODS 0x0300
|
||||
|
||||
struct rx_radiotap_hdr {
|
||||
struct ieee80211_radiotap_header hdr;
|
||||
struct ieee80211_radiotap_header_fixed hdr;
|
||||
u8 flags;
|
||||
u8 rate;
|
||||
u8 antsignal;
|
||||
|
@ -84,13 +84,16 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
|
||||
mutex_lock(&dev->mcu.mutex);
|
||||
|
||||
if (dev->mcu_ops->mcu_skb_prepare_msg) {
|
||||
orig_skb = skb;
|
||||
ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry:
|
||||
orig_skb = skb_get(skb);
|
||||
/* orig skb might be needed for retry, mcu_skb_send_msg consumes it */
|
||||
if (orig_skb)
|
||||
skb_get(orig_skb);
|
||||
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@ -105,7 +108,7 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
|
||||
do {
|
||||
skb = mt76_mcu_get_response(dev, expires);
|
||||
if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) &&
|
||||
retry++ < dev->mcu_ops->max_retry) {
|
||||
orig_skb && retry++ < dev->mcu_ops->max_retry) {
|
||||
dev_err(dev->dev, "Retry message %08x (seq %d)\n",
|
||||
cmd, seq);
|
||||
skb = orig_skb;
|
||||
|
@ -7,12 +7,12 @@
|
||||
#include "cfg80211.h"
|
||||
|
||||
struct wilc_wfi_radiotap_hdr {
|
||||
struct ieee80211_radiotap_header hdr;
|
||||
struct ieee80211_radiotap_header_fixed hdr;
|
||||
u8 rate;
|
||||
} __packed;
|
||||
|
||||
struct wilc_wfi_radiotap_cb_hdr {
|
||||
struct ieee80211_radiotap_header hdr;
|
||||
struct ieee80211_radiotap_header_fixed hdr;
|
||||
u8 rate;
|
||||
u8 dump;
|
||||
u16 tx_flags;
|
||||
|
@ -352,7 +352,6 @@ static const struct usb_device_id rtl8192d_usb_ids[] = {
|
||||
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8194, rtl92du_hal_cfg)},
|
||||
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8111, rtl92du_hal_cfg)},
|
||||
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x0193, rtl92du_hal_cfg)},
|
||||
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8171, rtl92du_hal_cfg)},
|
||||
{RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0xe194, rtl92du_hal_cfg)},
|
||||
{RTL_USB_DEVICE(0x2019, 0xab2c, rtl92du_hal_cfg)},
|
||||
{RTL_USB_DEVICE(0x2019, 0xab2d, rtl92du_hal_cfg)},
|
||||
|
@ -772,7 +772,6 @@ static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
|
||||
u8 size, timeout;
|
||||
u16 val16;
|
||||
|
||||
rtw_write32_set(rtwdev, REG_RXDMA_AGG_PG_TH, BIT_EN_PRE_CALC);
|
||||
rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
|
||||
rtw_write8_clr(rtwdev, REG_RXDMA_AGG_PG_TH + 3, BIT(7));
|
||||
|
||||
|
@ -6506,6 +6506,8 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
|
||||
|
||||
/* todo DBCC related event */
|
||||
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now);
|
||||
rtw89_debug(rtwdev, RTW89_DBG_BTC,
|
||||
"[BTC] rlink cnt_2g=%d cnt_5g=%d\n", cnt_2g, cnt_5g);
|
||||
|
||||
if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) {
|
||||
wl_rinfo->dbcc_chg = 1;
|
||||
|
@ -3050,23 +3050,53 @@ static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
|
||||
static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev)
|
||||
{
|
||||
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
|
||||
const struct rtw89_chip_info *chip = rtwdev->chip;
|
||||
|
||||
if (!rtwpci->enable_dac)
|
||||
return;
|
||||
|
||||
switch (chip->chip_id) {
|
||||
case RTL8852A:
|
||||
case RTL8852B:
|
||||
case RTL8851B:
|
||||
case RTL8852BT:
|
||||
break;
|
||||
return true;
|
||||
default:
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
|
||||
{
|
||||
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
|
||||
struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev);
|
||||
|
||||
if (!rtw89_pci_chip_is_manual_dac(rtwdev))
|
||||
return true;
|
||||
|
||||
if (!bridge)
|
||||
return false;
|
||||
|
||||
switch (bridge->vendor) {
|
||||
case PCI_VENDOR_ID_INTEL:
|
||||
return true;
|
||||
case PCI_VENDOR_ID_ASMEDIA:
|
||||
if (bridge->device == 0x2806)
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
|
||||
{
|
||||
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
|
||||
|
||||
if (!rtwpci->enable_dac)
|
||||
return;
|
||||
|
||||
if (!rtw89_pci_chip_is_manual_dac(rtwdev))
|
||||
return;
|
||||
|
||||
rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS);
|
||||
}
|
||||
@ -3085,6 +3115,9 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
|
||||
goto no_dac;
|
||||
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
|
||||
if (!ret) {
|
||||
rtwpci->enable_dac = true;
|
||||
@ -3097,6 +3130,7 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
|
||||
goto err_release_regions;
|
||||
}
|
||||
}
|
||||
no_dac:
|
||||
|
||||
resource_len = pci_resource_len(pdev, bar_id);
|
||||
rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
|
||||
|
@ -763,7 +763,7 @@ static const struct rhashtable_params hwsim_rht_params = {
|
||||
};
|
||||
|
||||
struct hwsim_radiotap_hdr {
|
||||
struct ieee80211_radiotap_header hdr;
|
||||
struct ieee80211_radiotap_header_fixed hdr;
|
||||
__le64 rt_tsft;
|
||||
u8 rt_flags;
|
||||
u8 rt_rate;
|
||||
@ -772,7 +772,7 @@ struct hwsim_radiotap_hdr {
|
||||
} __packed;
|
||||
|
||||
struct hwsim_radiotap_ack_hdr {
|
||||
struct ieee80211_radiotap_header hdr;
|
||||
struct ieee80211_radiotap_header_fixed hdr;
|
||||
u8 rt_flags;
|
||||
u8 pad;
|
||||
__le16 rt_channel;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user