mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
ASoC: Fixes for v6.13
A mix of quirks and small fixes, nothing too major anywhere. -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmdlZjYACgkQJNaLcl1U h9ARQQf9HZmj8fhuaU/NyfooCd7/Dn/vUdsn1Ru8JZ59pzavGCqE7SRZY/4I0dKE GtQbtRYIvk9moXNGaav1s7LSlOXKLoFapwqY2aphpncKRUWckPN0cznKc/FDCUOZ D2UmWgnxiMZF3PwQYgKjwcxb3V5WXA/uWgdhFCBwjiA4uNgT3fjpa/0an5NlXhK8 5oYtrb7RHefdXl8X0CH0/EuWBddkhjzs9Y7TaBFmim/2HrrTNZKOAsg//bBQd3zl vzMQbGwx01QimUmweEq9vYw0W18XNYAE7UMghIM0/Oz/bm0m2g5qjtZH3rgGOCdV 6FbSIMdVAKbXvlHwOtU1+gnPvdffcA== =o1FP -----END PGP SIGNATURE----- Merge tag 'asoc-fix-v6.13-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus ASoC: Fixes for v6.13 A mix of quirks and small fixes, nothing too major anywhere.
This commit is contained in:
commit
8cbd01ba9c
@ -4822,6 +4822,11 @@
|
||||
can be preempted anytime. Tasks will also yield
|
||||
contended spinlocks (if the critical section isn't
|
||||
explicitly preempt disabled beyond the lock itself).
|
||||
lazy - Scheduler controlled. Similar to full but instead
|
||||
of preempting the task immediately, the task gets
|
||||
one HZ tick time to yield itself before the
|
||||
preemption will be forced. One preemption is when the
|
||||
task returns to user space.
|
||||
|
||||
print-fatal-signals=
|
||||
[KNL] debug: print fatal signals
|
||||
|
@ -113,11 +113,8 @@ allOf:
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- fsl,imx95-usb-phy
|
||||
required:
|
||||
- orientation-switch
|
||||
then:
|
||||
$ref: /schemas/usb/usb-switch.yaml#
|
||||
|
||||
|
@ -18,6 +18,7 @@ properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,qca6390-pmu
|
||||
- qcom,wcn6750-pmu
|
||||
- qcom,wcn6855-pmu
|
||||
- qcom,wcn7850-pmu
|
||||
|
||||
@ -27,6 +28,9 @@ properties:
|
||||
vddaon-supply:
|
||||
description: VDD_AON supply regulator handle
|
||||
|
||||
vddasd-supply:
|
||||
description: VDD_ASD supply regulator handle
|
||||
|
||||
vdddig-supply:
|
||||
description: VDD_DIG supply regulator handle
|
||||
|
||||
@ -42,6 +46,9 @@ properties:
|
||||
vddio1p2-supply:
|
||||
description: VDD_IO_1P2 supply regulator handle
|
||||
|
||||
vddrfa0p8-supply:
|
||||
description: VDD_RFA_0P8 supply regulator handle
|
||||
|
||||
vddrfa0p95-supply:
|
||||
description: VDD_RFA_0P95 supply regulator handle
|
||||
|
||||
@ -51,12 +58,18 @@ properties:
|
||||
vddrfa1p3-supply:
|
||||
description: VDD_RFA_1P3 supply regulator handle
|
||||
|
||||
vddrfa1p7-supply:
|
||||
description: VDD_RFA_1P7 supply regulator handle
|
||||
|
||||
vddrfa1p8-supply:
|
||||
description: VDD_RFA_1P8 supply regulator handle
|
||||
|
||||
vddrfa1p9-supply:
|
||||
description: VDD_RFA_1P9 supply regulator handle
|
||||
|
||||
vddrfa2p2-supply:
|
||||
description: VDD_RFA_2P2 supply regulator handle
|
||||
|
||||
vddpcie1p3-supply:
|
||||
description: VDD_PCIE_1P3 supply regulator handle
|
||||
|
||||
@ -119,6 +132,20 @@ allOf:
|
||||
- vddpcie1p3-supply
|
||||
- vddpcie1p9-supply
|
||||
- vddio-supply
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: qcom,wcn6750-pmu
|
||||
then:
|
||||
required:
|
||||
- vddaon-supply
|
||||
- vddasd-supply
|
||||
- vddpmu-supply
|
||||
- vddrfa0p8-supply
|
||||
- vddrfa1p2-supply
|
||||
- vddrfa1p7-supply
|
||||
- vddrfa2p2-supply
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -51,7 +51,7 @@ properties:
|
||||
description: Power supply for AVDD, providing 1.8V.
|
||||
|
||||
cpvdd-supply:
|
||||
description: Power supply for CPVDD, providing 3.5V.
|
||||
description: Power supply for CPVDD, providing 1.8V.
|
||||
|
||||
hp-detect-gpios:
|
||||
description:
|
||||
|
@ -2170,6 +2170,12 @@ nexthop_compat_mode - BOOLEAN
|
||||
understands the new API, this sysctl can be disabled to achieve full
|
||||
performance benefits of the new API by disabling the nexthop expansion
|
||||
and extraneous notifications.
|
||||
|
||||
Note that as a backward-compatible mode, dumping of modern features
|
||||
might be incomplete or wrong. For example, resilient groups will not be
|
||||
shown as such, but rather as just a list of next hops. Also weights that
|
||||
do not fit into 8 bits will show incorrectly.
|
||||
|
||||
Default: true (backward compat mode)
|
||||
|
||||
fib_notify_on_flag_change - INTEGER
|
||||
|
@ -347,7 +347,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
|
||||
|
||||
`int pm_runtime_resume_and_get(struct device *dev);`
|
||||
- run pm_runtime_resume(dev) and if successful, increment the device's
|
||||
usage counter; return the result of pm_runtime_resume
|
||||
usage counter; returns 0 on success (whether or not the device's
|
||||
runtime PM status was already 'active') or the error code from
|
||||
pm_runtime_resume() on failure.
|
||||
|
||||
`int pm_request_idle(struct device *dev);`
|
||||
- submit a request to execute the subsystem-level idle callback for the
|
||||
|
@ -3893,7 +3893,7 @@ W: http://www.baycom.org/~tom/ham/ham.html
|
||||
F: drivers/net/hamradio/baycom*
|
||||
|
||||
BCACHE (BLOCK LAYER CACHE)
|
||||
M: Coly Li <colyli@suse.de>
|
||||
M: Coly Li <colyli@kernel.org>
|
||||
M: Kent Overstreet <kent.overstreet@linux.dev>
|
||||
L: linux-bcache@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -15345,7 +15345,7 @@ M: Daniel Machon <daniel.machon@microchip.com>
|
||||
M: UNGLinuxDriver@microchip.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/microchip/lan969x/*
|
||||
F: drivers/net/ethernet/microchip/sparx5/lan969x/*
|
||||
|
||||
MICROCHIP LCDFB DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
@ -16337,6 +16337,7 @@ F: Documentation/networking/
|
||||
F: Documentation/networking/net_cachelines/
|
||||
F: Documentation/process/maintainer-netdev.rst
|
||||
F: Documentation/userspace-api/netlink/
|
||||
F: include/linux/ethtool.h
|
||||
F: include/linux/framer/framer-provider.h
|
||||
F: include/linux/framer/framer.h
|
||||
F: include/linux/in.h
|
||||
@ -16351,6 +16352,7 @@ F: include/linux/rtnetlink.h
|
||||
F: include/linux/seq_file_net.h
|
||||
F: include/linux/skbuff*
|
||||
F: include/net/
|
||||
F: include/uapi/linux/ethtool.h
|
||||
F: include/uapi/linux/genetlink.h
|
||||
F: include/uapi/linux/hsr_netlink.h
|
||||
F: include/uapi/linux/in.h
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -297,7 +297,6 @@ config ARC_PAGE_SIZE_16K
|
||||
config ARC_PAGE_SIZE_4K
|
||||
bool "4KB"
|
||||
select HAVE_PAGE_SIZE_4KB
|
||||
depends on ARC_MMU_V3 || ARC_MMU_V4
|
||||
|
||||
endchoice
|
||||
|
||||
@ -474,7 +473,8 @@ config HIGHMEM
|
||||
|
||||
config ARC_HAS_PAE40
|
||||
bool "Support for the 40-bit Physical Address Extension"
|
||||
depends on ISA_ARCV2
|
||||
depends on ARC_MMU_V4
|
||||
depends on !ARC_PAGE_SIZE_4K
|
||||
select HIGHMEM
|
||||
select PHYS_ADDR_T_64BIT
|
||||
help
|
||||
|
@ -6,7 +6,7 @@
|
||||
KBUILD_DEFCONFIG := haps_hs_smp_defconfig
|
||||
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-)
|
||||
endif
|
||||
|
||||
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
||||
|
@ -54,7 +54,7 @@ ictl_intc: gpio-controller@0 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <30>;
|
||||
ngpios = <30>;
|
||||
reg = <0>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -62,7 +62,7 @@ ictl_intc: gpio-controller@0 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <30>;
|
||||
ngpios = <30>;
|
||||
reg = <0>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -69,7 +69,7 @@ ictl_intc: gpio-controller@0 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <30>;
|
||||
ngpios = <30>;
|
||||
reg = <0>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -250,7 +250,7 @@ gpio0_banka: gpio-controller@0 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <32>;
|
||||
ngpios = <32>;
|
||||
reg = <0>;
|
||||
};
|
||||
|
||||
@ -258,7 +258,7 @@ gpio0_bankb: gpio-controller@1 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <8>;
|
||||
ngpios = <8>;
|
||||
reg = <1>;
|
||||
};
|
||||
|
||||
@ -266,7 +266,7 @@ gpio0_bankc: gpio-controller@2 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <8>;
|
||||
ngpios = <8>;
|
||||
reg = <2>;
|
||||
};
|
||||
};
|
||||
@ -281,7 +281,7 @@ gpio1_banka: gpio-controller@0 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <30>;
|
||||
ngpios = <30>;
|
||||
reg = <0>;
|
||||
};
|
||||
|
||||
@ -289,7 +289,7 @@ gpio1_bankb: gpio-controller@1 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <10>;
|
||||
ngpios = <10>;
|
||||
reg = <1>;
|
||||
};
|
||||
|
||||
@ -297,7 +297,7 @@ gpio1_bankc: gpio-controller@2 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <8>;
|
||||
ngpios = <8>;
|
||||
reg = <2>;
|
||||
};
|
||||
};
|
||||
|
@ -308,7 +308,7 @@ gpio_port_a: gpio-controller@0 {
|
||||
compatible = "snps,dw-apb-gpio-port";
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
snps,nr-gpios = <24>;
|
||||
ngpios = <24>;
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
|
@ -146,7 +146,7 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <soc/arc/aux.h>
|
||||
#include <soc/arc/arc_aux.h>
|
||||
|
||||
/* Helpers */
|
||||
#define TO_KB(bytes) ((bytes) >> 10)
|
||||
|
@ -48,7 +48,7 @@
|
||||
\
|
||||
switch(sizeof((_p_))) { \
|
||||
case 1: \
|
||||
_prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
|
||||
_prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
|
||||
break; \
|
||||
case 4: \
|
||||
_prev_ = __cmpxchg(_p_, _o_, _n_); \
|
||||
|
@ -9,7 +9,7 @@
|
||||
#ifndef _ASM_ARC_MMU_ARCV2_H
|
||||
#define _ASM_ARC_MMU_ARCV2_H
|
||||
|
||||
#include <soc/arc/aux.h>
|
||||
#include <soc/arc/arc_aux.h>
|
||||
|
||||
/*
|
||||
* TLB Management regs
|
||||
|
@ -2916,7 +2916,7 @@ bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond)
|
||||
addendum = (cond == ARC_CC_AL) ? 0 : INSN_len_normal;
|
||||
disp = get_displacement(curr_off + addendum, targ_off);
|
||||
|
||||
if (ARC_CC_AL)
|
||||
if (cond == ARC_CC_AL)
|
||||
return is_valid_far_disp(disp);
|
||||
else
|
||||
return is_valid_near_disp(disp);
|
||||
|
@ -87,7 +87,7 @@
|
||||
1 << PMSCR_EL2_PA_SHIFT)
|
||||
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
|
||||
.Lskip_spe_el2_\@:
|
||||
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
||||
mov x0, #MDCR_EL2_E2PB_MASK
|
||||
orr x2, x2, x0 // If we don't have VHE, then
|
||||
// use EL1&0 translation.
|
||||
|
||||
@ -100,7 +100,7 @@
|
||||
and x0, x0, TRBIDR_EL1_P
|
||||
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
|
||||
|
||||
mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
||||
mov x0, #MDCR_EL2_E2TB_MASK
|
||||
orr x2, x2, x0 // allow the EL1&0 translation
|
||||
// to own it.
|
||||
|
||||
|
@ -114,8 +114,8 @@ SYM_CODE_START_LOCAL(__finalise_el2)
|
||||
|
||||
// Use EL2 translations for SPE & TRBE and disable access from EL1
|
||||
mrs x0, mdcr_el2
|
||||
bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
||||
bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
||||
bic x0, x0, #MDCR_EL2_E2PB_MASK
|
||||
bic x0, x0, #MDCR_EL2_E2TB_MASK
|
||||
msr mdcr_el2, x0
|
||||
|
||||
// Transfer the MM state from EL1 to EL2
|
||||
|
@ -1462,10 +1462,33 @@ static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
|
||||
struct rt_sigframe_user_layout *user, int usig)
|
||||
{
|
||||
__sigrestore_t sigtramp;
|
||||
int err;
|
||||
|
||||
if (ksig->ka.sa.sa_flags & SA_RESTORER)
|
||||
sigtramp = ksig->ka.sa.sa_restorer;
|
||||
else
|
||||
sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
|
||||
|
||||
err = gcs_signal_entry(sigtramp, ksig);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* We must not fail from this point onwards. We are going to update
|
||||
* registers, including SP, in order to invoke the signal handler. If
|
||||
* we failed and attempted to deliver a nested SIGSEGV to a handler
|
||||
* after that point, the subsequent sigreturn would end up restoring
|
||||
* the (partial) state for the original signal handler.
|
||||
*/
|
||||
|
||||
regs->regs[0] = usig;
|
||||
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
|
||||
regs->regs[1] = (unsigned long)&user->sigframe->info;
|
||||
regs->regs[2] = (unsigned long)&user->sigframe->uc;
|
||||
}
|
||||
regs->sp = (unsigned long)user->sigframe;
|
||||
regs->regs[29] = (unsigned long)&user->next_frame->fp;
|
||||
regs->regs[30] = (unsigned long)sigtramp;
|
||||
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
|
||||
|
||||
/*
|
||||
@ -1506,14 +1529,7 @@ static int setup_return(struct pt_regs *regs, struct ksignal *ksig,
|
||||
sme_smstop();
|
||||
}
|
||||
|
||||
if (ksig->ka.sa.sa_flags & SA_RESTORER)
|
||||
sigtramp = ksig->ka.sa.sa_restorer;
|
||||
else
|
||||
sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
|
||||
|
||||
regs->regs[30] = (unsigned long)sigtramp;
|
||||
|
||||
return gcs_signal_entry(sigtramp, ksig);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
||||
@ -1537,14 +1553,16 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
||||
|
||||
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
|
||||
err |= setup_sigframe(&user, regs, set, &ua_state);
|
||||
if (err == 0) {
|
||||
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
|
||||
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
||||
|
||||
if (err == 0)
|
||||
err = setup_return(regs, ksig, &user, usig);
|
||||
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
|
||||
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
|
||||
regs->regs[1] = (unsigned long)&frame->info;
|
||||
regs->regs[2] = (unsigned long)&frame->uc;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We must not fail if setup_return() succeeded - see comment at the
|
||||
* beginning of setup_return().
|
||||
*/
|
||||
|
||||
if (err == 0)
|
||||
set_handler_user_access_state();
|
||||
|
@ -26,7 +26,6 @@ enum kunwind_source {
|
||||
KUNWIND_SOURCE_CALLER,
|
||||
KUNWIND_SOURCE_TASK,
|
||||
KUNWIND_SOURCE_REGS_PC,
|
||||
KUNWIND_SOURCE_REGS_LR,
|
||||
};
|
||||
|
||||
union unwind_flags {
|
||||
@ -138,8 +137,10 @@ kunwind_recover_return_address(struct kunwind_state *state)
|
||||
orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
|
||||
state->common.pc,
|
||||
(void *)state->common.fp);
|
||||
if (WARN_ON_ONCE(state->common.pc == orig_pc))
|
||||
if (state->common.pc == orig_pc) {
|
||||
WARN_ON_ONCE(state->task == current);
|
||||
return -EINVAL;
|
||||
}
|
||||
state->common.pc = orig_pc;
|
||||
state->flags.fgraph = 1;
|
||||
}
|
||||
@ -178,23 +179,8 @@ int kunwind_next_regs_pc(struct kunwind_state *state)
|
||||
state->regs = regs;
|
||||
state->common.pc = regs->pc;
|
||||
state->common.fp = regs->regs[29];
|
||||
state->source = KUNWIND_SOURCE_REGS_PC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
kunwind_next_regs_lr(struct kunwind_state *state)
|
||||
{
|
||||
/*
|
||||
* The stack for the regs was consumed by kunwind_next_regs_pc(), so we
|
||||
* cannot consume that again here, but we know the regs are safe to
|
||||
* access.
|
||||
*/
|
||||
state->common.pc = state->regs->regs[30];
|
||||
state->common.fp = state->regs->regs[29];
|
||||
state->regs = NULL;
|
||||
state->source = KUNWIND_SOURCE_REGS_LR;
|
||||
|
||||
state->source = KUNWIND_SOURCE_REGS_PC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -215,12 +201,12 @@ kunwind_next_frame_record_meta(struct kunwind_state *state)
|
||||
case FRAME_META_TYPE_FINAL:
|
||||
if (meta == &task_pt_regs(tsk)->stackframe)
|
||||
return -ENOENT;
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ON_ONCE(tsk == current);
|
||||
return -EINVAL;
|
||||
case FRAME_META_TYPE_PT_REGS:
|
||||
return kunwind_next_regs_pc(state);
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ON_ONCE(tsk == current);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -274,11 +260,8 @@ kunwind_next(struct kunwind_state *state)
|
||||
case KUNWIND_SOURCE_FRAME:
|
||||
case KUNWIND_SOURCE_CALLER:
|
||||
case KUNWIND_SOURCE_TASK:
|
||||
case KUNWIND_SOURCE_REGS_LR:
|
||||
err = kunwind_next_frame_record(state);
|
||||
break;
|
||||
case KUNWIND_SOURCE_REGS_PC:
|
||||
err = kunwind_next_regs_lr(state);
|
||||
err = kunwind_next_frame_record(state);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
@ -436,7 +419,6 @@ static const char *state_source_string(const struct kunwind_state *state)
|
||||
case KUNWIND_SOURCE_CALLER: return "C";
|
||||
case KUNWIND_SOURCE_TASK: return "T";
|
||||
case KUNWIND_SOURCE_REGS_PC: return "P";
|
||||
case KUNWIND_SOURCE_REGS_LR: return "L";
|
||||
default: return "U";
|
||||
}
|
||||
}
|
||||
|
@ -739,8 +739,15 @@ static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
|
||||
final_attr = s1_parattr;
|
||||
break;
|
||||
default:
|
||||
/* MemAttr[2]=0, Device from S2 */
|
||||
final_attr = s2_memattr & GENMASK(1,0) << 2;
|
||||
/*
|
||||
* MemAttr[2]=0, Device from S2.
|
||||
*
|
||||
* FWB does not influence the way that stage 1
|
||||
* memory types and attributes are combined
|
||||
* with stage 2 Device type and attributes.
|
||||
*/
|
||||
final_attr = min(s2_memattr_to_attr(s2_memattr),
|
||||
s1_parattr);
|
||||
}
|
||||
} else {
|
||||
/* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
|
||||
|
@ -126,7 +126,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
||||
/* Trap SPE */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
|
||||
mdcr_set |= MDCR_EL2_TPMS;
|
||||
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
|
||||
mdcr_clear |= MDCR_EL2_E2PB_MASK;
|
||||
}
|
||||
|
||||
/* Trap Trace Filter */
|
||||
@ -143,7 +143,7 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* Trap External Trace */
|
||||
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
|
||||
mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
|
||||
mdcr_clear |= MDCR_EL2_E2TB_MASK;
|
||||
|
||||
vcpu->arch.mdcr_el2 |= mdcr_set;
|
||||
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
|
||||
|
@ -2618,7 +2618,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
|
||||
ID_AA64MMFR0_EL1_TGRAN4_2 |
|
||||
ID_AA64MMFR0_EL1_TGRAN64_2 |
|
||||
ID_AA64MMFR0_EL1_TGRAN16_2)),
|
||||
ID_AA64MMFR0_EL1_TGRAN16_2 |
|
||||
ID_AA64MMFR0_EL1_ASIDBITS)),
|
||||
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
|
||||
ID_AA64MMFR1_EL1_HCX |
|
||||
ID_AA64MMFR1_EL1_TWED |
|
||||
|
@ -608,12 +608,22 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
|
||||
lockdep_assert_held(&its->its_lock);
|
||||
vgic_get_irq_kref(irq);
|
||||
|
||||
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
|
||||
|
||||
/*
|
||||
* Put the reference taken on @irq if the store fails. Intentionally do
|
||||
* not return the error as the translation cache is best effort.
|
||||
*/
|
||||
if (xa_is_err(old)) {
|
||||
vgic_put_irq(kvm, irq);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We could have raced with another CPU caching the same
|
||||
* translation behind our back, ensure we don't leak a
|
||||
* reference if that is the case.
|
||||
*/
|
||||
old = xa_store(&its->translation_cache, cache_key, irq, GFP_KERNEL_ACCOUNT);
|
||||
if (old)
|
||||
vgic_put_irq(kvm, old);
|
||||
}
|
||||
|
@ -239,6 +239,8 @@ handler: ;\
|
||||
|
||||
/* =====================================================[ exceptions] === */
|
||||
|
||||
__REF
|
||||
|
||||
/* ---[ 0x100: RESET exception ]----------------------------------------- */
|
||||
|
||||
EXCEPTION_ENTRY(_tng_kernel_start)
|
||||
|
@ -26,15 +26,15 @@
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <linux/of_fdt.h>
|
||||
|
||||
#define tophys(rd,rs) \
|
||||
l.movhi rd,hi(-KERNELBASE) ;\
|
||||
#define tophys(rd,rs) \
|
||||
l.movhi rd,hi(-KERNELBASE) ;\
|
||||
l.add rd,rd,rs
|
||||
|
||||
#define CLEAR_GPR(gpr) \
|
||||
#define CLEAR_GPR(gpr) \
|
||||
l.movhi gpr,0x0
|
||||
|
||||
#define LOAD_SYMBOL_2_GPR(gpr,symbol) \
|
||||
l.movhi gpr,hi(symbol) ;\
|
||||
#define LOAD_SYMBOL_2_GPR(gpr,symbol) \
|
||||
l.movhi gpr,hi(symbol) ;\
|
||||
l.ori gpr,gpr,lo(symbol)
|
||||
|
||||
|
||||
@ -326,21 +326,21 @@
|
||||
l.addi r1,r1,-(INT_FRAME_SIZE) ;\
|
||||
/* r1 is KSP, r30 is __pa(KSP) */ ;\
|
||||
tophys (r30,r1) ;\
|
||||
l.sw PT_GPR12(r30),r12 ;\
|
||||
l.sw PT_GPR12(r30),r12 ;\
|
||||
l.mfspr r12,r0,SPR_EPCR_BASE ;\
|
||||
l.sw PT_PC(r30),r12 ;\
|
||||
l.mfspr r12,r0,SPR_ESR_BASE ;\
|
||||
l.sw PT_SR(r30),r12 ;\
|
||||
/* save r31 */ ;\
|
||||
EXCEPTION_T_LOAD_GPR30(r12) ;\
|
||||
l.sw PT_GPR30(r30),r12 ;\
|
||||
l.sw PT_GPR30(r30),r12 ;\
|
||||
/* save r10 as was prior to exception */ ;\
|
||||
EXCEPTION_T_LOAD_GPR10(r12) ;\
|
||||
l.sw PT_GPR10(r30),r12 ;\
|
||||
/* save PT_SP as was prior to exception */ ;\
|
||||
l.sw PT_GPR10(r30),r12 ;\
|
||||
/* save PT_SP as was prior to exception */ ;\
|
||||
EXCEPTION_T_LOAD_SP(r12) ;\
|
||||
l.sw PT_SP(r30),r12 ;\
|
||||
l.sw PT_GPR13(r30),r13 ;\
|
||||
l.sw PT_GPR13(r30),r13 ;\
|
||||
/* --> */ ;\
|
||||
/* save exception r4, set r4 = EA */ ;\
|
||||
l.sw PT_GPR4(r30),r4 ;\
|
||||
@ -357,6 +357,8 @@
|
||||
|
||||
/* =====================================================[ exceptions] === */
|
||||
|
||||
__HEAD
|
||||
|
||||
/* ---[ 0x100: RESET exception ]----------------------------------------- */
|
||||
.org 0x100
|
||||
/* Jump to .init code at _start which lives in the .head section
|
||||
@ -394,7 +396,7 @@ _dispatch_do_ipage_fault:
|
||||
.org 0x500
|
||||
EXCEPTION_HANDLE(_timer_handler)
|
||||
|
||||
/* ---[ 0x600: Alignment exception ]-------------------------------------- */
|
||||
/* ---[ 0x600: Alignment exception ]------------------------------------- */
|
||||
.org 0x600
|
||||
EXCEPTION_HANDLE(_alignment_handler)
|
||||
|
||||
@ -424,7 +426,7 @@ _dispatch_do_ipage_fault:
|
||||
.org 0xc00
|
||||
EXCEPTION_HANDLE(_sys_call_handler)
|
||||
|
||||
/* ---[ 0xd00: Floating point exception ]--------------------------------- */
|
||||
/* ---[ 0xd00: Floating point exception ]-------------------------------- */
|
||||
.org 0xd00
|
||||
EXCEPTION_HANDLE(_fpe_trap_handler)
|
||||
|
||||
@ -506,10 +508,10 @@ _dispatch_do_ipage_fault:
|
||||
|
||||
/* .text*/
|
||||
|
||||
/* This early stuff belongs in HEAD, but some of the functions below definitely
|
||||
/* This early stuff belongs in the .init.text section, but some of the functions below definitely
|
||||
* don't... */
|
||||
|
||||
__HEAD
|
||||
__INIT
|
||||
.global _start
|
||||
_start:
|
||||
/* Init r0 to zero as per spec */
|
||||
@ -816,7 +818,7 @@ secondary_start:
|
||||
|
||||
#endif
|
||||
|
||||
/* ========================================[ cache ]=== */
|
||||
/* ==========================================================[ cache ]=== */
|
||||
|
||||
/* alignment here so we don't change memory offsets with
|
||||
* memory controller defined
|
||||
|
@ -50,6 +50,7 @@ SECTIONS
|
||||
.text : AT(ADDR(.text) - LOAD_OFFSET)
|
||||
{
|
||||
_stext = .;
|
||||
HEAD_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
@ -83,8 +84,6 @@ SECTIONS
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_begin = .;
|
||||
|
||||
HEAD_TEXT_SECTION
|
||||
|
||||
/* Page aligned */
|
||||
INIT_TEXT_SECTION(PAGE_SIZE)
|
||||
|
||||
|
@ -22,7 +22,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||
else
|
||||
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
|
||||
|
||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||
preempt_disable();
|
||||
local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||
preempt_enable();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -36,9 +36,15 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
||||
insn = RISCV_INSN_NOP;
|
||||
}
|
||||
|
||||
mutex_lock(&text_mutex);
|
||||
patch_insn_write(addr, &insn, sizeof(insn));
|
||||
mutex_unlock(&text_mutex);
|
||||
if (early_boot_irqs_disabled) {
|
||||
riscv_patch_in_stop_machine = 1;
|
||||
patch_insn_write(addr, &insn, sizeof(insn));
|
||||
riscv_patch_in_stop_machine = 0;
|
||||
} else {
|
||||
mutex_lock(&text_mutex);
|
||||
patch_insn_write(addr, &insn, sizeof(insn));
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ static void __init init_resources(void)
|
||||
static void __init parse_dtb(void)
|
||||
{
|
||||
/* Early scan of device tree from init memory */
|
||||
if (early_init_dt_scan(dtb_early_va, __pa(dtb_early_va))) {
|
||||
if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
|
||||
const char *name = of_flat_dt_get_machine_name();
|
||||
|
||||
if (name) {
|
||||
|
@ -590,7 +590,7 @@ void kvm_riscv_aia_enable(void)
|
||||
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
|
||||
/* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
|
||||
if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
|
||||
csr_write(CSR_HVIEN, BIT(IRQ_PMU_OVF));
|
||||
csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
|
||||
}
|
||||
|
||||
void kvm_riscv_aia_disable(void)
|
||||
|
@ -1566,7 +1566,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
pmd_clear(pmd);
|
||||
}
|
||||
|
||||
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
||||
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemmap)
|
||||
{
|
||||
struct page *page = pud_page(*pud);
|
||||
struct ptdesc *ptdesc = page_ptdesc(page);
|
||||
@ -1579,7 +1579,8 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
||||
return;
|
||||
}
|
||||
|
||||
pagetable_pmd_dtor(ptdesc);
|
||||
if (!is_vmemmap)
|
||||
pagetable_pmd_dtor(ptdesc);
|
||||
if (PageReserved(page))
|
||||
free_reserved_page(page);
|
||||
else
|
||||
@ -1703,7 +1704,7 @@ static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, un
|
||||
remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
|
||||
|
||||
if (pgtable_l4_enabled)
|
||||
free_pmd_table(pmd_base, pudp);
|
||||
free_pmd_table(pmd_base, pudp, is_vmemmap);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7135,6 +7135,7 @@ __init int intel_pmu_init(void)
|
||||
|
||||
case INTEL_METEORLAKE:
|
||||
case INTEL_METEORLAKE_L:
|
||||
case INTEL_ARROWLAKE_U:
|
||||
intel_pmu_init_hybrid(hybrid_big_small);
|
||||
|
||||
x86_pmu.pebs_latency_data = cmt_latency_data;
|
||||
|
@ -1489,7 +1489,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
|
||||
* hence we need to drain when changing said
|
||||
* size.
|
||||
*/
|
||||
intel_pmu_drain_large_pebs(cpuc);
|
||||
intel_pmu_drain_pebs_buffer();
|
||||
adaptive_pebs_record_size_update();
|
||||
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
|
||||
cpuc->active_pebs_data_cfg = pebs_data_cfg;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/unwind_hints.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
/*
|
||||
* Must be relocatable PIC code callable as a C function, in particular
|
||||
|
@ -36,6 +36,26 @@
|
||||
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
|
||||
|
||||
struct cpuid_xstate_sizes {
|
||||
u32 eax;
|
||||
u32 ebx;
|
||||
u32 ecx;
|
||||
};
|
||||
|
||||
static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
|
||||
|
||||
void __init kvm_init_xstate_sizes(void)
|
||||
{
|
||||
u32 ign;
|
||||
int i;
|
||||
|
||||
for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
|
||||
struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
|
||||
|
||||
cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
|
||||
}
|
||||
}
|
||||
|
||||
u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
||||
{
|
||||
int feature_bit = 0;
|
||||
@ -44,14 +64,15 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
||||
xstate_bv &= XFEATURE_MASK_EXTEND;
|
||||
while (xstate_bv) {
|
||||
if (xstate_bv & 0x1) {
|
||||
u32 eax, ebx, ecx, edx, offset;
|
||||
cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
|
||||
struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
|
||||
u32 offset;
|
||||
|
||||
/* ECX[1]: 64B alignment in compacted form */
|
||||
if (compacted)
|
||||
offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
||||
offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
|
||||
else
|
||||
offset = ebx;
|
||||
ret = max(ret, offset + eax);
|
||||
offset = xs->ebx;
|
||||
ret = max(ret, offset + xs->eax);
|
||||
}
|
||||
|
||||
xstate_bv >>= 1;
|
||||
|
@ -31,6 +31,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
||||
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
|
||||
u32 *ecx, u32 *edx, bool exact_only);
|
||||
|
||||
void __init kvm_init_xstate_sizes(void);
|
||||
u32 xstate_required_size(u64 xstate_bv, bool compacted);
|
||||
|
||||
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
||||
|
@ -13997,6 +13997,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault);
|
||||
|
||||
static int __init kvm_x86_init(void)
|
||||
{
|
||||
kvm_init_xstate_sizes();
|
||||
|
||||
kvm_mmu_x86_module_init();
|
||||
mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
|
||||
return 0;
|
||||
|
@ -1171,7 +1171,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bio_release_pages);
|
||||
|
||||
void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
|
||||
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
|
||||
{
|
||||
WARN_ON_ONCE(bio->bi_max_vecs);
|
||||
|
||||
|
@ -1324,10 +1324,14 @@ void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
|
||||
struct blkcg *blkcg = css_to_blkcg(blkcg_css);
|
||||
|
||||
do {
|
||||
struct blkcg *parent;
|
||||
|
||||
if (!refcount_dec_and_test(&blkcg->online_pin))
|
||||
break;
|
||||
|
||||
parent = blkcg_parent(blkcg);
|
||||
blkcg_destroy_blkgs(blkcg);
|
||||
blkcg = blkcg_parent(blkcg);
|
||||
blkcg = parent;
|
||||
} while (blkcg);
|
||||
}
|
||||
|
||||
|
@ -1098,7 +1098,14 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
|
||||
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
|
||||
iocg->child_active_sum);
|
||||
} else {
|
||||
inuse = clamp_t(u32, inuse, 1, active);
|
||||
/*
|
||||
* It may be tempting to turn this into a clamp expression with
|
||||
* a lower limit of 1 but active may be 0, which cannot be used
|
||||
* as an upper limit in that situation. This expression allows
|
||||
* active to clamp inuse unless it is 0, in which case inuse
|
||||
* becomes 1.
|
||||
*/
|
||||
inuse = min(inuse, active) ?: 1;
|
||||
}
|
||||
|
||||
iocg->last_inuse = iocg->inuse;
|
||||
|
@ -574,7 +574,7 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
|
||||
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
|
||||
bio_iov_bvec_set(bio, iter);
|
||||
|
||||
/* check that the data layout matches the hardware restrictions */
|
||||
ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
|
||||
|
@ -275,15 +275,13 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned long i;
|
||||
|
||||
mutex_lock(&q->sysfs_dir_lock);
|
||||
lockdep_assert_held(&q->sysfs_dir_lock);
|
||||
|
||||
if (!q->mq_sysfs_init_done)
|
||||
goto unlock;
|
||||
return;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
blk_mq_unregister_hctx(hctx);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&q->sysfs_dir_lock);
|
||||
}
|
||||
|
||||
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
||||
@ -292,9 +290,10 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
||||
unsigned long i;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&q->sysfs_dir_lock);
|
||||
lockdep_assert_held(&q->sysfs_dir_lock);
|
||||
|
||||
if (!q->mq_sysfs_init_done)
|
||||
goto unlock;
|
||||
return ret;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
ret = blk_mq_register_hctx(hctx);
|
||||
@ -302,8 +301,5 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
|
||||
break;
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&q->sysfs_dir_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1544,19 +1544,17 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
||||
|
||||
while (!list_empty(&rq_list)) {
|
||||
rq = list_entry(rq_list.next, struct request, queuelist);
|
||||
list_del_init(&rq->queuelist);
|
||||
/*
|
||||
* If RQF_DONTPREP ist set, the request has been started by the
|
||||
* If RQF_DONTPREP is set, the request has been started by the
|
||||
* driver already and might have driver-specific data allocated
|
||||
* already. Insert it into the hctx dispatch list to avoid
|
||||
* block layer merges for the request.
|
||||
*/
|
||||
if (rq->rq_flags & RQF_DONTPREP) {
|
||||
list_del_init(&rq->queuelist);
|
||||
if (rq->rq_flags & RQF_DONTPREP)
|
||||
blk_mq_request_bypass_insert(rq, 0);
|
||||
} else {
|
||||
list_del_init(&rq->queuelist);
|
||||
else
|
||||
blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
|
||||
}
|
||||
}
|
||||
|
||||
while (!list_empty(&flush_list)) {
|
||||
@ -4455,7 +4453,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
||||
unsigned long i, j;
|
||||
|
||||
/* protect against switching io scheduler */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
lockdep_assert_held(&q->sysfs_lock);
|
||||
|
||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
int old_node;
|
||||
int node = blk_mq_get_hctx_node(set, i);
|
||||
@ -4488,7 +4487,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
||||
|
||||
xa_for_each_start(&q->hctx_table, j, hctx, j)
|
||||
blk_mq_exit_hctx(q, set, hctx, j);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
/* unregister cpuhp callbacks for exited hctxs */
|
||||
blk_mq_remove_hw_queues_cpuhp(q);
|
||||
@ -4520,10 +4518,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
|
||||
xa_init(&q->hctx_table);
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
|
||||
blk_mq_realloc_hw_ctxs(set, q);
|
||||
if (!q->nr_hw_queues)
|
||||
goto err_hctxs;
|
||||
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
|
||||
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
|
||||
|
||||
@ -4542,6 +4544,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
return 0;
|
||||
|
||||
err_hctxs:
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
blk_mq_release(q);
|
||||
err_exit:
|
||||
q->mq_ops = NULL;
|
||||
@ -4922,12 +4925,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
|
||||
return false;
|
||||
|
||||
/* q->elevator needs protection from ->sysfs_lock */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
lockdep_assert_held(&q->sysfs_lock);
|
||||
|
||||
/* the check has to be done with holding sysfs_lock */
|
||||
if (!q->elevator) {
|
||||
kfree(qe);
|
||||
goto unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&qe->node);
|
||||
@ -4937,9 +4940,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
|
||||
__elevator_get(qe->type);
|
||||
list_add(&qe->node, head);
|
||||
elevator_disable(q);
|
||||
unlock:
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
out:
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -4968,11 +4969,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
|
||||
list_del(&qe->node);
|
||||
kfree(qe);
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
elevator_switch(q, t);
|
||||
/* drop the reference acquired in blk_mq_elv_switch_none */
|
||||
elevator_put(t);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
}
|
||||
|
||||
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
@ -4992,8 +4991,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
|
||||
return;
|
||||
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||
mutex_lock(&q->sysfs_dir_lock);
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
blk_mq_freeze_queue(q);
|
||||
}
|
||||
/*
|
||||
* Switch IO scheduler to 'none', cleaning up the data associated
|
||||
* with the previous scheduler. We will switch back once we are done
|
||||
@ -5049,8 +5051,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
blk_mq_elv_switch_back(&head, q);
|
||||
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||
blk_mq_unfreeze_queue(q);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
mutex_unlock(&q->sysfs_dir_lock);
|
||||
}
|
||||
|
||||
/* Free the excess tags when nr_hw_queues shrink. */
|
||||
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
|
||||
|
@ -263,7 +263,7 @@ static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page)
|
||||
|
||||
static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page)
|
||||
{
|
||||
return queue_var_show(blk_queue_passthrough_stat(disk->queue), page);
|
||||
return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page);
|
||||
}
|
||||
|
||||
static ssize_t queue_iostats_passthrough_store(struct gendisk *disk,
|
||||
@ -706,11 +706,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
if (entry->load_module)
|
||||
entry->load_module(disk, page, length);
|
||||
|
||||
blk_mq_freeze_queue(q);
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
blk_mq_freeze_queue(q);
|
||||
res = entry->store(disk, page, length);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,6 @@ static const char *const zone_cond_name[] = {
|
||||
/*
|
||||
* Per-zone write plug.
|
||||
* @node: hlist_node structure for managing the plug using a hash table.
|
||||
* @link: To list the plug in the zone write plug error list of the disk.
|
||||
* @ref: Zone write plug reference counter. A zone write plug reference is
|
||||
* always at least 1 when the plug is hashed in the disk plug hash table.
|
||||
* The reference is incremented whenever a new BIO needing plugging is
|
||||
@ -63,7 +62,6 @@ static const char *const zone_cond_name[] = {
|
||||
*/
|
||||
struct blk_zone_wplug {
|
||||
struct hlist_node node;
|
||||
struct list_head link;
|
||||
refcount_t ref;
|
||||
spinlock_t lock;
|
||||
unsigned int flags;
|
||||
@ -80,8 +78,8 @@ struct blk_zone_wplug {
|
||||
* - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
|
||||
* that is, that write BIOs are being throttled due to a write BIO already
|
||||
* being executed or the zone write plug bio list is not empty.
|
||||
* - BLK_ZONE_WPLUG_ERROR: Indicates that a write error happened which will be
|
||||
* recovered with a report zone to update the zone write pointer offset.
|
||||
* - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone
|
||||
* write pointer offset and need to update it.
|
||||
* - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
|
||||
* from the disk hash table and that the initial reference to the zone
|
||||
* write plug set when the plug was first added to the hash table has been
|
||||
@ -91,11 +89,9 @@ struct blk_zone_wplug {
|
||||
* freed once all remaining references from BIOs or functions are dropped.
|
||||
*/
|
||||
#define BLK_ZONE_WPLUG_PLUGGED (1U << 0)
|
||||
#define BLK_ZONE_WPLUG_ERROR (1U << 1)
|
||||
#define BLK_ZONE_WPLUG_NEED_WP_UPDATE (1U << 1)
|
||||
#define BLK_ZONE_WPLUG_UNHASHED (1U << 2)
|
||||
|
||||
#define BLK_ZONE_WPLUG_BUSY (BLK_ZONE_WPLUG_PLUGGED | BLK_ZONE_WPLUG_ERROR)
|
||||
|
||||
/**
|
||||
* blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
|
||||
* @zone_cond: BLK_ZONE_COND_XXX.
|
||||
@ -115,6 +111,30 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
|
||||
|
||||
struct disk_report_zones_cb_args {
|
||||
struct gendisk *disk;
|
||||
report_zones_cb user_cb;
|
||||
void *user_data;
|
||||
};
|
||||
|
||||
static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
|
||||
struct blk_zone *zone);
|
||||
|
||||
static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx,
|
||||
void *data)
|
||||
{
|
||||
struct disk_report_zones_cb_args *args = data;
|
||||
struct gendisk *disk = args->disk;
|
||||
|
||||
if (disk->zone_wplugs_hash)
|
||||
disk_zone_wplug_sync_wp_offset(disk, zone);
|
||||
|
||||
if (!args->user_cb)
|
||||
return 0;
|
||||
|
||||
return args->user_cb(zone, idx, args->user_data);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkdev_report_zones - Get zones information
|
||||
* @bdev: Target block device
|
||||
@ -139,6 +159,11 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
sector_t capacity = get_capacity(disk);
|
||||
struct disk_report_zones_cb_args args = {
|
||||
.disk = disk,
|
||||
.user_cb = cb,
|
||||
.user_data = data,
|
||||
};
|
||||
|
||||
if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
|
||||
return -EOPNOTSUPP;
|
||||
@ -146,7 +171,8 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
|
||||
if (!nr_zones || sector >= capacity)
|
||||
return 0;
|
||||
|
||||
return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
|
||||
return disk->fops->report_zones(disk, sector, nr_zones,
|
||||
disk_report_zones_cb, &args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkdev_report_zones);
|
||||
|
||||
@ -427,7 +453,7 @@ static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
if (refcount_dec_and_test(&zwplug->ref)) {
|
||||
WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
|
||||
WARN_ON_ONCE(!list_empty(&zwplug->link));
|
||||
WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
|
||||
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
|
||||
|
||||
call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
|
||||
@ -441,8 +467,8 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
|
||||
return false;
|
||||
|
||||
/* If the zone write plug is still busy, it cannot be removed. */
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
|
||||
/* If the zone write plug is still plugged, it cannot be removed. */
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -525,12 +551,11 @@ static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
|
||||
return NULL;
|
||||
|
||||
INIT_HLIST_NODE(&zwplug->node);
|
||||
INIT_LIST_HEAD(&zwplug->link);
|
||||
refcount_set(&zwplug->ref, 2);
|
||||
spin_lock_init(&zwplug->lock);
|
||||
zwplug->flags = 0;
|
||||
zwplug->zone_no = zno;
|
||||
zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1);
|
||||
zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector);
|
||||
bio_list_init(&zwplug->bio_list);
|
||||
INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
|
||||
zwplug->disk = disk;
|
||||
@ -574,115 +599,22 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
|
||||
}
|
||||
|
||||
/*
|
||||
* Abort (fail) all plugged BIOs of a zone write plug that are not aligned
|
||||
* with the assumed write pointer location of the zone when the BIO will
|
||||
* be unplugged.
|
||||
*/
|
||||
static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
unsigned int wp_offset = zwplug->wp_offset;
|
||||
struct bio_list bl = BIO_EMPTY_LIST;
|
||||
struct bio *bio;
|
||||
|
||||
while ((bio = bio_list_pop(&zwplug->bio_list))) {
|
||||
if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
|
||||
(bio_op(bio) != REQ_OP_ZONE_APPEND &&
|
||||
bio_offset_from_zone_start(bio) != wp_offset)) {
|
||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||
continue;
|
||||
}
|
||||
|
||||
wp_offset += bio_sectors(bio);
|
||||
bio_list_add(&bl, bio);
|
||||
}
|
||||
|
||||
bio_list_merge(&zwplug->bio_list, &bl);
|
||||
}
|
||||
|
||||
static inline void disk_zone_wplug_set_error(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_ERROR)
|
||||
return;
|
||||
|
||||
/*
|
||||
* At this point, we already have a reference on the zone write plug.
|
||||
* However, since we are going to add the plug to the disk zone write
|
||||
* plugs work list, increase its reference count. This reference will
|
||||
* be dropped in disk_zone_wplugs_work() once the error state is
|
||||
* handled, or in disk_zone_wplug_clear_error() if the zone is reset or
|
||||
* finished.
|
||||
*/
|
||||
zwplug->flags |= BLK_ZONE_WPLUG_ERROR;
|
||||
refcount_inc(&zwplug->ref);
|
||||
|
||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
||||
list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list);
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
}
|
||||
|
||||
static inline void disk_zone_wplug_clear_error(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We are racing with the error handling work which drops the reference
|
||||
* on the zone write plug after handling the error state. So remove the
|
||||
* plug from the error list and drop its reference count only if the
|
||||
* error handling has not yet started, that is, if the zone write plug
|
||||
* is still listed.
|
||||
*/
|
||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
||||
if (!list_empty(&zwplug->link)) {
|
||||
list_del_init(&zwplug->link);
|
||||
zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set a zone write plug write pointer offset to either 0 (zone reset case)
|
||||
* or to the zone size (zone finish case). This aborts all plugged BIOs, which
|
||||
* is fine to do as doing a zone reset or zone finish while writes are in-flight
|
||||
* is a mistake from the user which will most likely cause all plugged BIOs to
|
||||
* fail anyway.
|
||||
* Set a zone write plug write pointer offset to the specified value.
|
||||
* This aborts all plugged BIOs, which is fine as this function is called for
|
||||
* a zone reset operation, a zone finish operation or if the zone needs a wp
|
||||
* update from a report zone after a write error.
|
||||
*/
|
||||
static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug,
|
||||
unsigned int wp_offset)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
/*
|
||||
* Make sure that a BIO completion or another zone reset or finish
|
||||
* operation has not already removed the plug from the hash table.
|
||||
*/
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
return;
|
||||
}
|
||||
lockdep_assert_held(&zwplug->lock);
|
||||
|
||||
/* Update the zone write pointer and abort all plugged BIOs. */
|
||||
zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE;
|
||||
zwplug->wp_offset = wp_offset;
|
||||
disk_zone_wplug_abort(zwplug);
|
||||
|
||||
/*
|
||||
* Updating the write pointer offset puts back the zone
|
||||
* in a good state. So clear the error flag and decrement the
|
||||
* error count if we were in error state.
|
||||
*/
|
||||
disk_zone_wplug_clear_error(disk, zwplug);
|
||||
|
||||
/*
|
||||
* The zone write plug now has no BIO plugged: remove it from the
|
||||
* hash table so that it cannot be seen. The plug will be freed
|
||||
@ -690,8 +622,58 @@ static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
|
||||
*/
|
||||
if (disk_should_remove_zone_wplug(disk, zwplug))
|
||||
disk_remove_zone_wplug(disk, zwplug);
|
||||
}
|
||||
|
||||
static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
|
||||
{
|
||||
switch (zone->cond) {
|
||||
case BLK_ZONE_COND_IMP_OPEN:
|
||||
case BLK_ZONE_COND_EXP_OPEN:
|
||||
case BLK_ZONE_COND_CLOSED:
|
||||
return zone->wp - zone->start;
|
||||
case BLK_ZONE_COND_FULL:
|
||||
return zone->len;
|
||||
case BLK_ZONE_COND_EMPTY:
|
||||
return 0;
|
||||
case BLK_ZONE_COND_NOT_WP:
|
||||
case BLK_ZONE_COND_OFFLINE:
|
||||
case BLK_ZONE_COND_READONLY:
|
||||
default:
|
||||
/*
|
||||
* Conventional, offline and read-only zones do not have a valid
|
||||
* write pointer.
|
||||
*/
|
||||
return UINT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
|
||||
struct blk_zone *zone)
|
||||
{
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned long flags;
|
||||
|
||||
zwplug = disk_get_zone_wplug(disk, zone->start);
|
||||
if (!zwplug)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
|
||||
disk_zone_wplug_set_wp_offset(disk, zwplug,
|
||||
blk_zone_wp_offset(zone));
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
|
||||
static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector)
|
||||
{
|
||||
struct disk_report_zones_cb_args args = {
|
||||
.disk = disk,
|
||||
};
|
||||
|
||||
return disk->fops->report_zones(disk, sector, 1,
|
||||
disk_report_zones_cb, &args);
|
||||
}
|
||||
|
||||
static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
||||
@ -700,6 +682,7 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
sector_t sector = bio->bi_iter.bi_sector;
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned long flags;
|
||||
|
||||
/* Conventional zones cannot be reset nor finished. */
|
||||
if (!bdev_zone_is_seq(bio->bi_bdev, sector)) {
|
||||
@ -707,6 +690,15 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* No-wait reset or finish BIOs do not make much sense as the callers
|
||||
* issue these as blocking operations in most cases. To avoid issues
|
||||
* the BIO execution potentially failing with BLK_STS_AGAIN, warn about
|
||||
* REQ_NOWAIT being set and ignore that flag.
|
||||
*/
|
||||
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
|
||||
bio->bi_opf &= ~REQ_NOWAIT;
|
||||
|
||||
/*
|
||||
* If we have a zone write plug, set its write pointer offset to 0
|
||||
* (reset case) or to the zone size (finish case). This will abort all
|
||||
@ -716,7 +708,9 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
|
||||
*/
|
||||
zwplug = disk_get_zone_wplug(disk, sector);
|
||||
if (zwplug) {
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
|
||||
@ -727,6 +721,7 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
|
||||
{
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned long flags;
|
||||
sector_t sector;
|
||||
|
||||
/*
|
||||
@ -738,7 +733,9 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
|
||||
sector += disk->queue->limits.chunk_sectors) {
|
||||
zwplug = disk_get_zone_wplug(disk, sector);
|
||||
if (zwplug) {
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
}
|
||||
@ -746,9 +743,25 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
|
||||
struct bio *bio, unsigned int nr_segs)
|
||||
static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
/*
|
||||
* Take a reference on the zone write plug and schedule the submission
|
||||
* of the next plugged BIO. blk_zone_wplug_bio_work() will release the
|
||||
* reference we take here.
|
||||
*/
|
||||
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
|
||||
refcount_inc(&zwplug->ref);
|
||||
queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
|
||||
}
|
||||
|
||||
static inline void disk_zone_wplug_add_bio(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug,
|
||||
struct bio *bio, unsigned int nr_segs)
|
||||
{
|
||||
bool schedule_bio_work = false;
|
||||
|
||||
/*
|
||||
* Grab an extra reference on the BIO request queue usage counter.
|
||||
* This reference will be reused to submit a request for the BIO for
|
||||
@ -764,6 +777,16 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
|
||||
*/
|
||||
bio_clear_polled(bio);
|
||||
|
||||
/*
|
||||
* REQ_NOWAIT BIOs are always handled using the zone write plug BIO
|
||||
* work, which can block. So clear the REQ_NOWAIT flag and schedule the
|
||||
* work if this is the first BIO we are plugging.
|
||||
*/
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
|
||||
bio->bi_opf &= ~REQ_NOWAIT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reuse the poll cookie field to store the number of segments when
|
||||
* split to the hardware limits.
|
||||
@ -777,6 +800,11 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
|
||||
* at the tail of the list to preserve the sequential write order.
|
||||
*/
|
||||
bio_list_add(&zwplug->bio_list, bio);
|
||||
|
||||
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
|
||||
|
||||
if (schedule_bio_work)
|
||||
disk_zone_wplug_schedule_bio_work(disk, zwplug);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -889,13 +917,23 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
|
||||
{
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
|
||||
/*
|
||||
* If we lost track of the zone write pointer due to a write error,
|
||||
* the user must either execute a report zones, reset the zone or finish
|
||||
* the to recover a reliable write pointer position. Fail BIOs if the
|
||||
* user did not do that as we cannot handle emulated zone append
|
||||
* otherwise.
|
||||
*/
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Check that the user is not attempting to write to a full zone.
|
||||
* We know such BIO will fail, and that would potentially overflow our
|
||||
* write pointer offset beyond the end of the zone.
|
||||
*/
|
||||
if (disk_zone_wplug_is_full(disk, zwplug))
|
||||
goto err;
|
||||
return false;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
|
||||
/*
|
||||
@ -914,24 +952,18 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
|
||||
bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
|
||||
} else {
|
||||
/*
|
||||
* Check for non-sequential writes early because we avoid a
|
||||
* whole lot of error handling trouble if we don't send it off
|
||||
* to the driver.
|
||||
* Check for non-sequential writes early as we know that BIOs
|
||||
* with a start sector not unaligned to the zone write pointer
|
||||
* will fail.
|
||||
*/
|
||||
if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
|
||||
goto err;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Advance the zone write pointer offset. */
|
||||
zwplug->wp_offset += bio_sectors(bio);
|
||||
|
||||
return true;
|
||||
|
||||
err:
|
||||
/* We detected an invalid write BIO: schedule error recovery. */
|
||||
disk_zone_wplug_set_error(disk, zwplug);
|
||||
kblockd_schedule_work(&disk->zone_wplugs_work);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
||||
@ -970,7 +1002,10 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
||||
|
||||
zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
|
||||
if (!zwplug) {
|
||||
bio_io_error(bio);
|
||||
if (bio->bi_opf & REQ_NOWAIT)
|
||||
bio_wouldblock_error(bio);
|
||||
else
|
||||
bio_io_error(bio);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -978,18 +1013,20 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
||||
bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
|
||||
|
||||
/*
|
||||
* If the zone is already plugged or has a pending error, add the BIO
|
||||
* to the plug BIO list. Otherwise, plug and let the BIO execute.
|
||||
* If the zone is already plugged, add the BIO to the plug BIO list.
|
||||
* Do the same for REQ_NOWAIT BIOs to ensure that we will not see a
|
||||
* BLK_STS_AGAIN failure if we let the BIO execute.
|
||||
* Otherwise, plug and let the BIO execute.
|
||||
*/
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_BUSY)
|
||||
if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) ||
|
||||
(bio->bi_opf & REQ_NOWAIT))
|
||||
goto plug;
|
||||
|
||||
/*
|
||||
* If an error is detected when preparing the BIO, add it to the BIO
|
||||
* list so that error recovery can deal with it.
|
||||
*/
|
||||
if (!blk_zone_wplug_prepare_bio(zwplug, bio))
|
||||
goto plug;
|
||||
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
bio_io_error(bio);
|
||||
return true;
|
||||
}
|
||||
|
||||
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
|
||||
|
||||
@ -998,8 +1035,7 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
|
||||
return false;
|
||||
|
||||
plug:
|
||||
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
|
||||
blk_zone_wplug_add_bio(zwplug, bio, nr_segs);
|
||||
disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs);
|
||||
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
|
||||
@ -1083,19 +1119,6 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
|
||||
|
||||
static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
/*
|
||||
* Take a reference on the zone write plug and schedule the submission
|
||||
* of the next plugged BIO. blk_zone_wplug_bio_work() will release the
|
||||
* reference we take here.
|
||||
*/
|
||||
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
|
||||
refcount_inc(&zwplug->ref);
|
||||
queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
|
||||
}
|
||||
|
||||
static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
@ -1103,16 +1126,6 @@ static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
|
||||
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
/*
|
||||
* If we had an error, schedule error recovery. The recovery work
|
||||
* will restart submission of plugged BIOs.
|
||||
*/
|
||||
if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) {
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
kblockd_schedule_work(&disk->zone_wplugs_work);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Schedule submission of the next plugged BIO if we have one. */
|
||||
if (!bio_list_empty(&zwplug->bio_list)) {
|
||||
disk_zone_wplug_schedule_bio_work(disk, zwplug);
|
||||
@ -1155,12 +1168,13 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the BIO failed, mark the plug as having an error to trigger
|
||||
* recovery.
|
||||
* If the BIO failed, abort all plugged BIOs and mark the plug as
|
||||
* needing a write pointer update.
|
||||
*/
|
||||
if (bio->bi_status != BLK_STS_OK) {
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
disk_zone_wplug_set_error(disk, zwplug);
|
||||
disk_zone_wplug_abort(zwplug);
|
||||
zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE;
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
}
|
||||
|
||||
@ -1216,6 +1230,7 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
||||
*/
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
again:
|
||||
bio = bio_list_pop(&zwplug->bio_list);
|
||||
if (!bio) {
|
||||
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
||||
@ -1224,10 +1239,8 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
|
||||
/* Error recovery will decide what to do with the BIO. */
|
||||
bio_list_add_head(&zwplug->bio_list, bio);
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
goto put_zwplug;
|
||||
blk_zone_wplug_bio_io_error(zwplug, bio);
|
||||
goto again;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
@ -1249,120 +1262,6 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
|
||||
disk_put_zone_wplug(zwplug);
|
||||
}
|
||||
|
||||
static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
|
||||
{
|
||||
switch (zone->cond) {
|
||||
case BLK_ZONE_COND_IMP_OPEN:
|
||||
case BLK_ZONE_COND_EXP_OPEN:
|
||||
case BLK_ZONE_COND_CLOSED:
|
||||
return zone->wp - zone->start;
|
||||
case BLK_ZONE_COND_FULL:
|
||||
return zone->len;
|
||||
case BLK_ZONE_COND_EMPTY:
|
||||
return 0;
|
||||
case BLK_ZONE_COND_NOT_WP:
|
||||
case BLK_ZONE_COND_OFFLINE:
|
||||
case BLK_ZONE_COND_READONLY:
|
||||
default:
|
||||
/*
|
||||
* Conventional, offline and read-only zones do not have a valid
|
||||
* write pointer.
|
||||
*/
|
||||
return UINT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static int blk_zone_wplug_report_zone_cb(struct blk_zone *zone,
|
||||
unsigned int idx, void *data)
|
||||
{
|
||||
struct blk_zone *zonep = data;
|
||||
|
||||
*zonep = *zone;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void disk_zone_wplug_handle_error(struct gendisk *disk,
|
||||
struct blk_zone_wplug *zwplug)
|
||||
{
|
||||
sector_t zone_start_sector =
|
||||
bdev_zone_sectors(disk->part0) * zwplug->zone_no;
|
||||
unsigned int noio_flag;
|
||||
struct blk_zone zone;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* Get the current zone information from the device. */
|
||||
noio_flag = memalloc_noio_save();
|
||||
ret = disk->fops->report_zones(disk, zone_start_sector, 1,
|
||||
blk_zone_wplug_report_zone_cb, &zone);
|
||||
memalloc_noio_restore(noio_flag);
|
||||
|
||||
spin_lock_irqsave(&zwplug->lock, flags);
|
||||
|
||||
/*
|
||||
* A zone reset or finish may have cleared the error already. In such
|
||||
* case, do nothing as the report zones may have seen the "old" write
|
||||
* pointer value before the reset/finish operation completed.
|
||||
*/
|
||||
if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
|
||||
goto unlock;
|
||||
|
||||
zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
|
||||
|
||||
if (ret != 1) {
|
||||
/*
|
||||
* We failed to get the zone information, meaning that something
|
||||
* is likely really wrong with the device. Abort all remaining
|
||||
* plugged BIOs as otherwise we could endup waiting forever on
|
||||
* plugged BIOs to complete if there is a queue freeze on-going.
|
||||
*/
|
||||
disk_zone_wplug_abort(zwplug);
|
||||
goto unplug;
|
||||
}
|
||||
|
||||
/* Update the zone write pointer offset. */
|
||||
zwplug->wp_offset = blk_zone_wp_offset(&zone);
|
||||
disk_zone_wplug_abort_unaligned(disk, zwplug);
|
||||
|
||||
/* Restart BIO submission if we still have any BIO left. */
|
||||
if (!bio_list_empty(&zwplug->bio_list)) {
|
||||
disk_zone_wplug_schedule_bio_work(disk, zwplug);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
unplug:
|
||||
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
|
||||
if (disk_should_remove_zone_wplug(disk, zwplug))
|
||||
disk_remove_zone_wplug(disk, zwplug);
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&zwplug->lock, flags);
|
||||
}
|
||||
|
||||
static void disk_zone_wplugs_work(struct work_struct *work)
|
||||
{
|
||||
struct gendisk *disk =
|
||||
container_of(work, struct gendisk, zone_wplugs_work);
|
||||
struct blk_zone_wplug *zwplug;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
||||
|
||||
while (!list_empty(&disk->zone_wplugs_err_list)) {
|
||||
zwplug = list_first_entry(&disk->zone_wplugs_err_list,
|
||||
struct blk_zone_wplug, link);
|
||||
list_del_init(&zwplug->link);
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
|
||||
disk_zone_wplug_handle_error(disk, zwplug);
|
||||
disk_put_zone_wplug(zwplug);
|
||||
|
||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
}
|
||||
|
||||
static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
|
||||
{
|
||||
return 1U << disk->zone_wplugs_hash_bits;
|
||||
@ -1371,8 +1270,6 @@ static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
|
||||
void disk_init_zone_resources(struct gendisk *disk)
|
||||
{
|
||||
spin_lock_init(&disk->zone_wplugs_lock);
|
||||
INIT_LIST_HEAD(&disk->zone_wplugs_err_list);
|
||||
INIT_WORK(&disk->zone_wplugs_work, disk_zone_wplugs_work);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1471,8 +1368,6 @@ void disk_free_zone_resources(struct gendisk *disk)
|
||||
if (!disk->zone_wplugs_pool)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&disk->zone_wplugs_work);
|
||||
|
||||
if (disk->zone_wplugs_wq) {
|
||||
destroy_workqueue(disk->zone_wplugs_wq);
|
||||
disk->zone_wplugs_wq = NULL;
|
||||
@ -1669,6 +1564,8 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
|
||||
if (!disk->zone_wplugs_hash)
|
||||
return 0;
|
||||
|
||||
disk_zone_wplug_sync_wp_offset(disk, zone);
|
||||
|
||||
wp_offset = blk_zone_wp_offset(zone);
|
||||
if (!wp_offset || wp_offset >= zone->capacity)
|
||||
return 0;
|
||||
@ -1799,6 +1696,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
|
||||
memalloc_noio_restore(noio_flag);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
||||
blk_revalidate_zone_cb, &args);
|
||||
if (!ret) {
|
||||
@ -1835,6 +1733,48 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
|
||||
|
||||
/**
|
||||
* blk_zone_issue_zeroout - zero-fill a block range in a zone
|
||||
* @bdev: blockdev to write
|
||||
* @sector: start sector
|
||||
* @nr_sects: number of sectors to write
|
||||
* @gfp_mask: memory allocation flags (for bio_alloc)
|
||||
*
|
||||
* Description:
|
||||
* Zero-fill a block range in a zone (@sector must be equal to the zone write
|
||||
* pointer), handling potential errors due to the (initially unknown) lack of
|
||||
* hardware offload (See blkdev_issue_zeroout()).
|
||||
*/
|
||||
int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
|
||||
return -EIO;
|
||||
|
||||
ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
|
||||
BLKDEV_ZERO_NOFALLBACK);
|
||||
if (ret != -EOPNOTSUPP)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The failed call to blkdev_issue_zeroout() advanced the zone write
|
||||
* pointer. Undo this using a report zone to update the zone write
|
||||
* pointer to the correct current value.
|
||||
*/
|
||||
ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector);
|
||||
if (ret != 1)
|
||||
return ret < 0 ? ret : -EIO;
|
||||
|
||||
/*
|
||||
* Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a
|
||||
* regular write with zero-pages.
|
||||
*/
|
||||
return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout);
|
||||
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
|
||||
int queue_zone_wplugs_show(void *data, struct seq_file *m)
|
||||
|
@ -698,8 +698,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
list_add(&rq->queuelist, &per_prio->dispatch);
|
||||
rq->fifo_time = jiffies;
|
||||
} else {
|
||||
struct list_head *insert_before;
|
||||
|
||||
deadline_add_rq_rb(per_prio, rq);
|
||||
|
||||
if (rq_mergeable(rq)) {
|
||||
@ -712,8 +710,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
* set expire time and add to fifo list
|
||||
*/
|
||||
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
|
||||
insert_before = &per_prio->fifo_list[data_dir];
|
||||
list_add_tail(&rq->queuelist, insert_before);
|
||||
list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,10 +163,6 @@ static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
|
||||
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
|
||||
const struct hash_prefix *hash_prefix = ictx->hash_prefix;
|
||||
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
|
||||
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
|
||||
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
|
||||
struct scatterlist in_sg[3], out_sg;
|
||||
struct crypto_wait cwait;
|
||||
unsigned int pad_len;
|
||||
unsigned int ps_end;
|
||||
unsigned int len;
|
||||
@ -187,37 +183,25 @@ static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
|
||||
|
||||
pad_len = ctx->key_size - slen - hash_prefix->size - 1;
|
||||
|
||||
child_req = kmalloc(sizeof(*child_req) + child_reqsize + pad_len,
|
||||
GFP_KERNEL);
|
||||
if (!child_req)
|
||||
return -ENOMEM;
|
||||
|
||||
/* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */
|
||||
in_buf = (u8 *)(child_req + 1) + child_reqsize;
|
||||
in_buf = dst;
|
||||
memmove(in_buf + pad_len + hash_prefix->size, src, slen);
|
||||
memcpy(in_buf + pad_len, hash_prefix->data, hash_prefix->size);
|
||||
|
||||
ps_end = pad_len - 1;
|
||||
in_buf[0] = 0x01;
|
||||
memset(in_buf + 1, 0xff, ps_end - 1);
|
||||
in_buf[ps_end] = 0x00;
|
||||
|
||||
/* RFC 8017 sec 8.2.1 step 2 - RSA signature */
|
||||
crypto_init_wait(&cwait);
|
||||
sg_init_table(in_sg, 3);
|
||||
sg_set_buf(&in_sg[0], in_buf, pad_len);
|
||||
sg_set_buf(&in_sg[1], hash_prefix->data, hash_prefix->size);
|
||||
sg_set_buf(&in_sg[2], src, slen);
|
||||
sg_init_one(&out_sg, dst, dlen);
|
||||
akcipher_request_set_tfm(child_req, ctx->child);
|
||||
akcipher_request_set_crypt(child_req, in_sg, &out_sg,
|
||||
ctx->key_size - 1, dlen);
|
||||
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
crypto_req_done, &cwait);
|
||||
|
||||
err = crypto_akcipher_decrypt(child_req);
|
||||
err = crypto_wait_req(err, &cwait);
|
||||
if (err)
|
||||
/* RFC 8017 sec 8.2.1 step 2 - RSA signature */
|
||||
err = crypto_akcipher_sync_decrypt(ctx->child, in_buf,
|
||||
ctx->key_size - 1, in_buf,
|
||||
ctx->key_size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
len = child_req->dst_len;
|
||||
len = err;
|
||||
pad_len = ctx->key_size - len;
|
||||
|
||||
/* Four billion to one */
|
||||
@ -239,8 +223,8 @@ static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
|
||||
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
|
||||
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
|
||||
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
|
||||
struct scatterlist in_sg, out_sg;
|
||||
struct crypto_wait cwait;
|
||||
struct scatterlist sg;
|
||||
unsigned int dst_len;
|
||||
unsigned int pos;
|
||||
u8 *out_buf;
|
||||
@ -259,13 +243,12 @@ static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
|
||||
return -ENOMEM;
|
||||
|
||||
out_buf = (u8 *)(child_req + 1) + child_reqsize;
|
||||
memcpy(out_buf, src, slen);
|
||||
|
||||
crypto_init_wait(&cwait);
|
||||
sg_init_one(&in_sg, src, slen);
|
||||
sg_init_one(&out_sg, out_buf, ctx->key_size);
|
||||
sg_init_one(&sg, out_buf, slen);
|
||||
akcipher_request_set_tfm(child_req, ctx->child);
|
||||
akcipher_request_set_crypt(child_req, &in_sg, &out_sg,
|
||||
slen, ctx->key_size);
|
||||
akcipher_request_set_crypt(child_req, &sg, &sg, slen, slen);
|
||||
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
crypto_req_done, &cwait);
|
||||
|
||||
|
@ -232,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device,
|
||||
|
||||
/* Now we can delete the handler object */
|
||||
|
||||
acpi_os_release_mutex(handler_obj->address_space.
|
||||
context_mutex);
|
||||
acpi_ut_remove_reference(handler_obj);
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
if (cmd_rc)
|
||||
*cmd_rc = -EINVAL;
|
||||
|
||||
if (cmd == ND_CMD_CALL)
|
||||
if (cmd == ND_CMD_CALL) {
|
||||
if (!buf || buf_len < sizeof(*call_pkg))
|
||||
return -EINVAL;
|
||||
|
||||
call_pkg = buf;
|
||||
}
|
||||
|
||||
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
|
||||
if (func < 0)
|
||||
return func;
|
||||
|
@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
|
||||
switch (addr->resource_type) {
|
||||
case ACPI_MEMORY_RANGE:
|
||||
acpi_dev_memresource_flags(res, len, wp);
|
||||
|
||||
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
|
||||
res->flags |= IORESOURCE_PREFETCH;
|
||||
break;
|
||||
case ACPI_IO_RANGE:
|
||||
acpi_dev_ioresource_flags(res, len, iodec,
|
||||
@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
|
||||
if (addr->producer_consumer == ACPI_PRODUCER)
|
||||
res->flags |= IORESOURCE_WINDOW;
|
||||
|
||||
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
|
||||
res->flags |= IORESOURCE_PREFETCH;
|
||||
|
||||
return !(res->flags & IORESOURCE_DISABLED);
|
||||
}
|
||||
|
||||
|
@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
|
||||
phy_nodes[phy] = phy_data.np;
|
||||
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
|
||||
if (cphy_base[phy] == NULL) {
|
||||
of_node_put(phy_data.np);
|
||||
return 0;
|
||||
}
|
||||
phy_count += 1;
|
||||
|
@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
struct btmtk_data *data = hci_get_priv(hdev);
|
||||
int err;
|
||||
bool complete = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
|
||||
kfree_skb(skb);
|
||||
@ -416,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
fallthrough;
|
||||
case HCI_DEVCOREDUMP_ACTIVE:
|
||||
default:
|
||||
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
|
||||
if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
|
||||
skb->len > MTK_COREDUMP_END_LEN)
|
||||
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
|
||||
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
|
||||
complete = true;
|
||||
|
||||
err = hci_devcd_append(hdev, skb);
|
||||
if (err < 0)
|
||||
break;
|
||||
data->cd_info.cnt++;
|
||||
|
||||
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
|
||||
if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
|
||||
skb->len > MTK_COREDUMP_END_LEN)
|
||||
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
|
||||
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
|
||||
bt_dev_info(hdev, "Mediatek coredump end");
|
||||
hci_devcd_complete(hdev);
|
||||
}
|
||||
if (complete) {
|
||||
bt_dev_info(hdev, "Mediatek coredump end");
|
||||
hci_devcd_complete(hdev);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -87,6 +87,7 @@ static const u32 slic_base[] = { 100000000, 3125000 };
|
||||
static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
|
||||
/* EN7581 */
|
||||
static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
|
||||
static const u32 bus7581_base[] = { 600000000, 540000000 };
|
||||
static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
|
||||
static const u32 crypto_base[] = { 540000000, 480000000 };
|
||||
|
||||
@ -222,8 +223,8 @@ static const struct en_clk_desc en7581_base_clks[] = {
|
||||
.base_reg = REG_BUS_CLK_DIV_SEL,
|
||||
.base_bits = 1,
|
||||
.base_shift = 8,
|
||||
.base_values = bus_base,
|
||||
.n_base_values = ARRAY_SIZE(bus_base),
|
||||
.base_values = bus7581_base,
|
||||
.n_base_values = ARRAY_SIZE(bus7581_base),
|
||||
|
||||
.div_bits = 3,
|
||||
.div_shift = 0,
|
||||
@ -503,6 +504,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
|
||||
u32 rate;
|
||||
int i;
|
||||
|
||||
clk_data->num = EN7523_NUM_CLOCKS;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
|
||||
const struct en_clk_desc *desc = &en7523_base_clks[i];
|
||||
u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
|
||||
@ -524,8 +527,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
|
||||
|
||||
hw = en7523_register_pcie_clk(dev, np_base);
|
||||
clk_data->hws[EN7523_CLK_PCIE] = hw;
|
||||
|
||||
clk_data->num = EN7523_NUM_CLOCKS;
|
||||
}
|
||||
|
||||
static int en7523_clk_hw_init(struct platform_device *pdev,
|
||||
|
@ -2530,7 +2530,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
|
||||
rate = clk_core_req_round_rate_nolock(core, req_rate);
|
||||
|
||||
/* bail early if nothing to do */
|
||||
if (rate == clk_core_get_rate_recalc(core))
|
||||
if (rate == clk_core_get_rate_nolock(core))
|
||||
return 0;
|
||||
|
||||
/* fail on a direct rate set of a protected provider */
|
||||
|
@ -106,7 +106,7 @@ config COMMON_CLK_AXG_AUDIO
|
||||
select COMMON_CLK_MESON_SCLK_DIV
|
||||
select COMMON_CLK_MESON_CLKC_UTILS
|
||||
select REGMAP_MMIO
|
||||
depends on RESET_MESON_AUX
|
||||
select RESET_CONTROLLER
|
||||
help
|
||||
Support for the audio clock controller on AmLogic A113D devices,
|
||||
aka axg, Say Y if you want audio subsystem to work.
|
||||
|
@ -15,8 +15,6 @@
|
||||
#include <linux/reset-controller.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <soc/amlogic/reset-meson-aux.h>
|
||||
|
||||
#include "meson-clkc-utils.h"
|
||||
#include "axg-audio.h"
|
||||
#include "clk-regmap.h"
|
||||
@ -1680,6 +1678,84 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
|
||||
&sm1_earcrx_dmac_clk,
|
||||
};
|
||||
|
||||
struct axg_audio_reset_data {
|
||||
struct reset_controller_dev rstc;
|
||||
struct regmap *map;
|
||||
unsigned int offset;
|
||||
};
|
||||
|
||||
static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst,
|
||||
unsigned long id,
|
||||
unsigned int *reg,
|
||||
unsigned int *bit)
|
||||
{
|
||||
unsigned int stride = regmap_get_reg_stride(rst->map);
|
||||
|
||||
*reg = (id / (stride * BITS_PER_BYTE)) * stride;
|
||||
*reg += rst->offset;
|
||||
*bit = id % (stride * BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
static int axg_audio_reset_update(struct reset_controller_dev *rcdev,
|
||||
unsigned long id, bool assert)
|
||||
{
|
||||
struct axg_audio_reset_data *rst =
|
||||
container_of(rcdev, struct axg_audio_reset_data, rstc);
|
||||
unsigned int offset, bit;
|
||||
|
||||
axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
|
||||
|
||||
regmap_update_bits(rst->map, offset, BIT(bit),
|
||||
assert ? BIT(bit) : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int axg_audio_reset_status(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
struct axg_audio_reset_data *rst =
|
||||
container_of(rcdev, struct axg_audio_reset_data, rstc);
|
||||
unsigned int val, offset, bit;
|
||||
|
||||
axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
|
||||
|
||||
regmap_read(rst->map, offset, &val);
|
||||
|
||||
return !!(val & BIT(bit));
|
||||
}
|
||||
|
||||
static int axg_audio_reset_assert(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
return axg_audio_reset_update(rcdev, id, true);
|
||||
}
|
||||
|
||||
static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
return axg_audio_reset_update(rcdev, id, false);
|
||||
}
|
||||
|
||||
static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = axg_audio_reset_assert(rcdev, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return axg_audio_reset_deassert(rcdev, id);
|
||||
}
|
||||
|
||||
static const struct reset_control_ops axg_audio_rstc_ops = {
|
||||
.assert = axg_audio_reset_assert,
|
||||
.deassert = axg_audio_reset_deassert,
|
||||
.reset = axg_audio_reset_toggle,
|
||||
.status = axg_audio_reset_status,
|
||||
};
|
||||
|
||||
static struct regmap_config axg_audio_regmap_cfg = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 32,
|
||||
@ -1690,14 +1766,16 @@ struct audioclk_data {
|
||||
struct clk_regmap *const *regmap_clks;
|
||||
unsigned int regmap_clk_num;
|
||||
struct meson_clk_hw_data hw_clks;
|
||||
unsigned int reset_offset;
|
||||
unsigned int reset_num;
|
||||
unsigned int max_register;
|
||||
const char *rst_drvname;
|
||||
};
|
||||
|
||||
static int axg_audio_clkc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
const struct audioclk_data *data;
|
||||
struct axg_audio_reset_data *rst;
|
||||
struct regmap *map;
|
||||
void __iomem *regs;
|
||||
struct clk_hw *hw;
|
||||
@ -1756,11 +1834,22 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Register auxiliary reset driver when applicable */
|
||||
if (data->rst_drvname)
|
||||
ret = devm_meson_rst_aux_register(dev, map, data->rst_drvname);
|
||||
/* Stop here if there is no reset */
|
||||
if (!data->reset_num)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL);
|
||||
if (!rst)
|
||||
return -ENOMEM;
|
||||
|
||||
rst->map = map;
|
||||
rst->offset = data->reset_offset;
|
||||
rst->rstc.nr_resets = data->reset_num;
|
||||
rst->rstc.ops = &axg_audio_rstc_ops;
|
||||
rst->rstc.of_node = dev->of_node;
|
||||
rst->rstc.owner = THIS_MODULE;
|
||||
|
||||
return devm_reset_controller_register(dev, &rst->rstc);
|
||||
}
|
||||
|
||||
static const struct audioclk_data axg_audioclk_data = {
|
||||
@ -1780,8 +1869,9 @@ static const struct audioclk_data g12a_audioclk_data = {
|
||||
.hws = g12a_audio_hw_clks,
|
||||
.num = ARRAY_SIZE(g12a_audio_hw_clks),
|
||||
},
|
||||
.reset_offset = AUDIO_SW_RESET,
|
||||
.reset_num = 26,
|
||||
.max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
|
||||
.rst_drvname = "rst-g12a",
|
||||
};
|
||||
|
||||
static const struct audioclk_data sm1_audioclk_data = {
|
||||
@ -1791,8 +1881,9 @@ static const struct audioclk_data sm1_audioclk_data = {
|
||||
.hws = sm1_audio_hw_clks,
|
||||
.num = ARRAY_SIZE(sm1_audio_hw_clks),
|
||||
},
|
||||
.reset_offset = AUDIO_SM1_SW_RESET0,
|
||||
.reset_num = 39,
|
||||
.max_register = AUDIO_EARCRX_DMAC_CLK_CTRL,
|
||||
.rst_drvname = "rst-sm1",
|
||||
};
|
||||
|
||||
static const struct of_device_id clkc_match_table[] = {
|
||||
|
@ -192,7 +192,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
|
||||
|
||||
down_read(&qm->qps_lock);
|
||||
if (qm->sqc) {
|
||||
memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc));
|
||||
memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc));
|
||||
sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
|
||||
sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
|
||||
dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
|
||||
@ -229,7 +229,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
|
||||
|
||||
down_read(&qm->qps_lock);
|
||||
if (qm->cqc) {
|
||||
memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc));
|
||||
memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc));
|
||||
cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
|
||||
cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
|
||||
dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
|
||||
|
@ -3362,36 +3362,24 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
|
||||
|
||||
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
|
||||
{
|
||||
u8 umc_en_mask = 0, ecc_en_mask = 0;
|
||||
u16 nid = pvt->mc_node_id;
|
||||
struct amd64_umc *umc;
|
||||
u8 ecc_en = 0, i;
|
||||
bool ecc_en = false;
|
||||
int i;
|
||||
|
||||
/* Check whether at least one UMC is enabled: */
|
||||
for_each_umc(i) {
|
||||
umc = &pvt->umc[i];
|
||||
|
||||
/* Only check enabled UMCs. */
|
||||
if (!(umc->sdp_ctrl & UMC_SDP_INIT))
|
||||
continue;
|
||||
|
||||
umc_en_mask |= BIT(i);
|
||||
|
||||
if (umc->umc_cap_hi & UMC_ECC_ENABLED)
|
||||
ecc_en_mask |= BIT(i);
|
||||
if (umc->sdp_ctrl & UMC_SDP_INIT &&
|
||||
umc->umc_cap_hi & UMC_ECC_ENABLED) {
|
||||
ecc_en = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check whether at least one UMC is enabled: */
|
||||
if (umc_en_mask)
|
||||
ecc_en = umc_en_mask == ecc_en_mask;
|
||||
else
|
||||
edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
|
||||
edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled"));
|
||||
|
||||
edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
|
||||
|
||||
if (!ecc_en)
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
return ecc_en;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -76,10 +76,6 @@ config EFI_ZBOOT
|
||||
bool "Enable the generic EFI decompressor"
|
||||
depends on EFI_GENERIC_STUB && !ARM
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZ4
|
||||
select HAVE_KERNEL_LZMA
|
||||
select HAVE_KERNEL_LZO
|
||||
select HAVE_KERNEL_XZ
|
||||
select HAVE_KERNEL_ZSTD
|
||||
help
|
||||
Create the bootable image as an EFI application that carries the
|
||||
|
@ -75,8 +75,6 @@ static LIST_HEAD(entry_list);
|
||||
struct esre_attribute {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct esre_entry *entry, char *buf);
|
||||
ssize_t (*store)(struct esre_entry *entry,
|
||||
const char *buf, size_t count);
|
||||
};
|
||||
|
||||
static struct esre_entry *to_entry(struct kobject *kobj)
|
||||
|
@ -12,22 +12,16 @@ quiet_cmd_copy_and_pad = PAD $@
|
||||
$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
|
||||
$(call if_changed,copy_and_pad)
|
||||
|
||||
comp-type-$(CONFIG_KERNEL_GZIP) := gzip
|
||||
comp-type-$(CONFIG_KERNEL_LZ4) := lz4
|
||||
comp-type-$(CONFIG_KERNEL_LZMA) := lzma
|
||||
comp-type-$(CONFIG_KERNEL_LZO) := lzo
|
||||
comp-type-$(CONFIG_KERNEL_XZ) := xzkern
|
||||
comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
|
||||
|
||||
# in GZIP, the appended le32 carrying the uncompressed size is part of the
|
||||
# format, but in other cases, we just append it at the end for convenience,
|
||||
# causing the original tools to complain when checking image integrity.
|
||||
# So disregard it when calculating the payload size in the zimage header.
|
||||
zboot-method-y := $(comp-type-y)_with_size
|
||||
zboot-size-len-y := 4
|
||||
comp-type-y := gzip
|
||||
zboot-method-y := gzip
|
||||
zboot-size-len-y := 0
|
||||
|
||||
zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
|
||||
zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
|
||||
comp-type-$(CONFIG_KERNEL_ZSTD) := zstd
|
||||
zboot-method-$(CONFIG_KERNEL_ZSTD) := zstd22_with_size
|
||||
zboot-size-len-$(CONFIG_KERNEL_ZSTD) := 4
|
||||
|
||||
$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,$(zboot-method-y))
|
||||
|
@ -482,8 +482,9 @@ config GPIO_MT7621
|
||||
Say yes here to support the Mediatek MT7621 SoC GPIO device.
|
||||
|
||||
config GPIO_MVEBU
|
||||
def_bool y
|
||||
bool "Marvell Orion and EBU GPIO support" if COMPILE_TEST
|
||||
depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
|
||||
default PLAT_ORION || ARCH_MVEBU
|
||||
select GENERIC_IRQ_CHIP
|
||||
select REGMAP_MMIO
|
||||
|
||||
|
@ -32,12 +32,14 @@
|
||||
#define GNR_PINS_PER_REG 32
|
||||
#define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG)
|
||||
|
||||
#define GNR_CFG_BAR 0x00
|
||||
#define GNR_CFG_PADBAR 0x00
|
||||
#define GNR_CFG_LOCK_OFFSET 0x04
|
||||
#define GNR_GPI_STATUS_OFFSET 0x20
|
||||
#define GNR_GPI_STATUS_OFFSET 0x14
|
||||
#define GNR_GPI_ENABLE_OFFSET 0x24
|
||||
|
||||
#define GNR_CFG_DW_RX_MASK GENMASK(25, 22)
|
||||
#define GNR_CFG_DW_HOSTSW_MODE BIT(27)
|
||||
#define GNR_CFG_DW_RX_MASK GENMASK(23, 22)
|
||||
#define GNR_CFG_DW_INTSEL_MASK GENMASK(21, 14)
|
||||
#define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2)
|
||||
#define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1)
|
||||
#define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0)
|
||||
@ -50,6 +52,7 @@
|
||||
* struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state
|
||||
* @gc: GPIO controller interface
|
||||
* @reg_base: base address of the GPIO registers
|
||||
* @pad_base: base address of the vGPIO pad configuration registers
|
||||
* @ro_bitmap: bitmap of read-only pins
|
||||
* @lock: guard the registers
|
||||
* @pad_backup: backup of the register state for suspend
|
||||
@ -57,6 +60,7 @@
|
||||
struct gnr_gpio {
|
||||
struct gpio_chip gc;
|
||||
void __iomem *reg_base;
|
||||
void __iomem *pad_base;
|
||||
DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS);
|
||||
raw_spinlock_t lock;
|
||||
u32 pad_backup[];
|
||||
@ -65,7 +69,7 @@ struct gnr_gpio {
|
||||
static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv,
|
||||
unsigned int gpio)
|
||||
{
|
||||
return priv->reg_base + gpio * sizeof(u32);
|
||||
return priv->pad_base + gpio * sizeof(u32);
|
||||
}
|
||||
|
||||
static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
|
||||
@ -88,6 +92,20 @@ static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gnr_gpio_request(struct gpio_chip *gc, unsigned int gpio)
|
||||
{
|
||||
struct gnr_gpio *priv = gpiochip_get_data(gc);
|
||||
u32 dw;
|
||||
|
||||
dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
|
||||
if (!(dw & GNR_CFG_DW_HOSTSW_MODE)) {
|
||||
dev_warn(gc->parent, "GPIO %u is not owned by host", gpio);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
|
||||
{
|
||||
const struct gnr_gpio *priv = gpiochip_get_data(gc);
|
||||
@ -139,6 +157,7 @@ static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, in
|
||||
|
||||
static const struct gpio_chip gnr_gpio_chip = {
|
||||
.owner = THIS_MODULE,
|
||||
.request = gnr_gpio_request,
|
||||
.get = gnr_gpio_get,
|
||||
.set = gnr_gpio_set,
|
||||
.get_direction = gnr_gpio_get_direction,
|
||||
@ -166,7 +185,7 @@ static void gnr_gpio_irq_ack(struct irq_data *d)
|
||||
guard(raw_spinlock_irqsave)(&priv->lock);
|
||||
|
||||
reg = readl(addr);
|
||||
reg &= ~BIT(bit_idx);
|
||||
reg |= BIT(bit_idx);
|
||||
writel(reg, addr);
|
||||
}
|
||||
|
||||
@ -209,10 +228,18 @@ static void gnr_gpio_irq_unmask(struct irq_data *d)
|
||||
static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
irq_hw_number_t pin = irqd_to_hwirq(d);
|
||||
u32 mask = GNR_CFG_DW_RX_MASK;
|
||||
struct gnr_gpio *priv = gpiochip_get_data(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
u32 reg;
|
||||
u32 set;
|
||||
|
||||
/* Allow interrupts only if Interrupt Select field is non-zero */
|
||||
reg = readl(gnr_gpio_get_padcfg_addr(priv, hwirq));
|
||||
if (!(reg & GNR_CFG_DW_INTSEL_MASK)) {
|
||||
dev_dbg(gc->parent, "GPIO %lu cannot be used as IRQ", hwirq);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* Falling edge and level low triggers not supported by the GPIO controller */
|
||||
switch (type) {
|
||||
case IRQ_TYPE_NONE:
|
||||
@ -230,10 +257,11 @@ static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return gnr_gpio_configure_line(gc, pin, mask, set);
|
||||
return gnr_gpio_configure_line(gc, hwirq, GNR_CFG_DW_RX_MASK, set);
|
||||
}
|
||||
|
||||
static const struct irq_chip gnr_gpio_irq_chip = {
|
||||
.name = "gpio-graniterapids",
|
||||
.irq_ack = gnr_gpio_irq_ack,
|
||||
.irq_mask = gnr_gpio_irq_mask,
|
||||
.irq_unmask = gnr_gpio_irq_unmask,
|
||||
@ -291,6 +319,7 @@ static int gnr_gpio_probe(struct platform_device *pdev)
|
||||
struct gnr_gpio *priv;
|
||||
void __iomem *regs;
|
||||
int irq, ret;
|
||||
u32 offset;
|
||||
|
||||
priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL);
|
||||
if (!priv)
|
||||
@ -302,6 +331,10 @@ static int gnr_gpio_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
priv->reg_base = regs;
|
||||
offset = readl(priv->reg_base + GNR_CFG_PADBAR);
|
||||
priv->pad_base = priv->reg_base + offset;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
@ -311,8 +344,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to request interrupt\n");
|
||||
|
||||
priv->reg_base = regs + readl(regs + GNR_CFG_BAR);
|
||||
|
||||
gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET,
|
||||
priv->ro_bitmap);
|
||||
|
||||
@ -324,7 +355,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
|
||||
|
||||
girq = &priv->gc.irq;
|
||||
gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip);
|
||||
girq->chip->name = dev_name(dev);
|
||||
girq->parent_handler = NULL;
|
||||
girq->num_parents = 0;
|
||||
girq->parents = NULL;
|
||||
|
@ -3,6 +3,9 @@
|
||||
* GPIO library for the ACCES IDIO-16 family
|
||||
* Copyright (C) 2022 William Breathitt Gray
|
||||
*/
|
||||
|
||||
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
@ -14,8 +17,6 @@
|
||||
|
||||
#include "gpio-idio-16.h"
|
||||
|
||||
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
|
||||
|
||||
#define IDIO_16_DAT_BASE 0x0
|
||||
#define IDIO_16_OUT_BASE IDIO_16_DAT_BASE
|
||||
#define IDIO_16_IN_BASE (IDIO_16_DAT_BASE + 1)
|
||||
|
@ -82,9 +82,9 @@ static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ljca_gpio->trans_lock);
|
||||
packet->num = 1;
|
||||
packet->item[0].index = gpio_id;
|
||||
packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
|
||||
packet->num = 1;
|
||||
|
||||
ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet,
|
||||
struct_size(packet, item, packet->num), NULL, 0);
|
||||
|
@ -1801,13 +1801,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
|
||||
return -EINVAL;
|
||||
|
||||
/* Make sure VRAM is allocated contigiously */
|
||||
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
|
||||
for (i = 0; i < (*bo)->placement.num_placement; i++)
|
||||
(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
|
||||
if (r)
|
||||
return r;
|
||||
if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
|
||||
!((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
|
||||
|
||||
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
|
||||
for (i = 0; i < (*bo)->placement.num_placement; i++)
|
||||
(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ const char *amdgpu_asic_name[] = {
|
||||
"LAST",
|
||||
};
|
||||
|
||||
#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM, 0)
|
||||
#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
|
||||
/*
|
||||
* Default init level where all blocks are expected to be initialized. This is
|
||||
* the level of initialization expected by default and also after a full reset
|
||||
|
@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
|
||||
for (i = 0; i < abo->placement.num_placement; ++i) {
|
||||
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
|
||||
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
|
||||
if (abo->placements[i].mem_type == TTM_PL_VRAM)
|
||||
abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -674,12 +674,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
|
||||
ring->funcs->emit_wreg;
|
||||
|
||||
if (adev->gfx.enable_cleaner_shader &&
|
||||
ring->funcs->emit_cleaner_shader &&
|
||||
job->enforce_isolation)
|
||||
ring->funcs->emit_cleaner_shader(ring);
|
||||
|
||||
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
|
||||
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
|
||||
!(job->enforce_isolation && !job->vmid))
|
||||
return 0;
|
||||
|
||||
amdgpu_ring_ib_begin(ring);
|
||||
@ -690,6 +686,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
|
||||
if (need_pipe_sync)
|
||||
amdgpu_ring_emit_pipeline_sync(ring);
|
||||
|
||||
if (adev->gfx.enable_cleaner_shader &&
|
||||
ring->funcs->emit_cleaner_shader &&
|
||||
job->enforce_isolation)
|
||||
ring->funcs->emit_cleaner_shader(ring);
|
||||
|
||||
if (vm_flush_needed) {
|
||||
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
|
||||
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
|
||||
|
@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
|
||||
|
||||
#define GFX9_MEC_HPD_SIZE 4096
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
@ -574,8 +576,12 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
|
||||
{
|
||||
int err;
|
||||
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
|
||||
"amdgpu/%s_mec.bin", chip_name);
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
|
||||
"amdgpu/%s_sjt_mec.bin", chip_name);
|
||||
else
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
|
||||
"amdgpu/%s_mec.bin", chip_name);
|
||||
if (err)
|
||||
goto out;
|
||||
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
|
||||
|
@ -1288,7 +1288,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
||||
struct amdgpu_ring *ring = amdgpu_job_ring(job);
|
||||
unsigned i;
|
||||
|
||||
/* No patching necessary for the first instance */
|
||||
|
@ -1423,6 +1423,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
|
||||
|
||||
|
||||
static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
|
||||
bool cache_line_size_missing,
|
||||
struct kfd_gpu_cache_info *pcache_info)
|
||||
{
|
||||
struct amdgpu_device *adev = kdev->adev;
|
||||
@ -1437,6 +1438,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
|
||||
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size;
|
||||
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
i++;
|
||||
}
|
||||
/* Scalar L1 Instruction Cache per SQC */
|
||||
@ -1449,6 +1452,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
|
||||
pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size;
|
||||
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
i++;
|
||||
}
|
||||
/* Scalar L1 Data Cache per SQC */
|
||||
@ -1460,6 +1465,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
|
||||
pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size;
|
||||
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
i++;
|
||||
}
|
||||
/* GL1 Data Cache per SA */
|
||||
@ -1472,7 +1479,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
|
||||
pcache_info[i].cache_line_size = 0;
|
||||
if (cache_line_size_missing)
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
i++;
|
||||
}
|
||||
/* L2 Data Cache per GPU (Total Tex Cache) */
|
||||
@ -1484,6 +1492,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
|
||||
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size;
|
||||
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
i++;
|
||||
}
|
||||
/* L3 Data Cache per GPU */
|
||||
@ -1494,7 +1504,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
|
||||
pcache_info[i].cache_line_size = 0;
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
i++;
|
||||
}
|
||||
return i;
|
||||
@ -1569,6 +1579,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
||||
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
|
||||
{
|
||||
int num_of_cache_types = 0;
|
||||
bool cache_line_size_missing = false;
|
||||
|
||||
switch (kdev->adev->asic_type) {
|
||||
case CHIP_KAVERI:
|
||||
@ -1692,10 +1703,17 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
|
||||
case IP_VERSION(11, 5, 0):
|
||||
case IP_VERSION(11, 5, 1):
|
||||
case IP_VERSION(11, 5, 2):
|
||||
/* Cacheline size not available in IP discovery for gc11.
|
||||
* kfd_fill_gpu_cache_info_from_gfx_config to hard code it
|
||||
*/
|
||||
cache_line_size_missing = true;
|
||||
fallthrough;
|
||||
case IP_VERSION(12, 0, 0):
|
||||
case IP_VERSION(12, 0, 1):
|
||||
num_of_cache_types =
|
||||
kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
|
||||
kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd,
|
||||
cache_line_size_missing,
|
||||
*pcache_info);
|
||||
break;
|
||||
default:
|
||||
*pcache_info = dummy_cache_info;
|
||||
|
@ -207,6 +207,21 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
|
||||
if (!down_read_trylock(&adev->reset_domain->sem))
|
||||
return -EIO;
|
||||
|
||||
if (!pdd->proc_ctx_cpu_ptr) {
|
||||
r = amdgpu_amdkfd_alloc_gtt_mem(adev,
|
||||
AMDGPU_MES_PROC_CTX_SIZE,
|
||||
&pdd->proc_ctx_bo,
|
||||
&pdd->proc_ctx_gpu_addr,
|
||||
&pdd->proc_ctx_cpu_ptr,
|
||||
false);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"failed to allocate process context bo\n");
|
||||
return r;
|
||||
}
|
||||
memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
|
||||
}
|
||||
|
||||
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
|
||||
queue_input.process_id = qpd->pqm->process->pasid;
|
||||
queue_input.page_table_base_addr = qpd->page_table_base;
|
||||
|
@ -306,7 +306,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
|
||||
spage = migrate_pfn_to_page(migrate->src[i]);
|
||||
if (spage && !is_zone_device_page(spage)) {
|
||||
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
DMA_BIDIRECTIONAL);
|
||||
r = dma_mapping_error(dev, src[i]);
|
||||
if (r) {
|
||||
dev_err(dev, "%s: fail %d dma_map_page\n",
|
||||
@ -629,7 +629,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
goto out_oom;
|
||||
}
|
||||
|
||||
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
r = dma_mapping_error(dev, dst[i]);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
|
||||
|
@ -1076,7 +1076,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
|
||||
|
||||
kfd_free_process_doorbells(pdd->dev->kfd, pdd);
|
||||
|
||||
if (pdd->dev->kfd->shared_resources.enable_mes)
|
||||
if (pdd->dev->kfd->shared_resources.enable_mes &&
|
||||
pdd->proc_ctx_cpu_ptr)
|
||||
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
|
||||
&pdd->proc_ctx_bo);
|
||||
/*
|
||||
@ -1608,7 +1609,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
|
||||
struct kfd_process *p)
|
||||
{
|
||||
struct kfd_process_device *pdd = NULL;
|
||||
int retval = 0;
|
||||
|
||||
if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
|
||||
return NULL;
|
||||
@ -1632,21 +1632,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
|
||||
pdd->user_gpu_id = dev->id;
|
||||
atomic64_set(&pdd->evict_duration_counter, 0);
|
||||
|
||||
if (dev->kfd->shared_resources.enable_mes) {
|
||||
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
|
||||
AMDGPU_MES_PROC_CTX_SIZE,
|
||||
&pdd->proc_ctx_bo,
|
||||
&pdd->proc_ctx_gpu_addr,
|
||||
&pdd->proc_ctx_cpu_ptr,
|
||||
false);
|
||||
if (retval) {
|
||||
dev_err(dev->adev->dev,
|
||||
"failed to allocate process context bo\n");
|
||||
goto err_free_pdd;
|
||||
}
|
||||
memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
|
||||
}
|
||||
|
||||
p->pdds[p->n_pdds++] = pdd;
|
||||
if (kfd_dbg_is_per_vmid_supported(pdd->dev))
|
||||
pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
|
||||
@ -1658,10 +1643,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
|
||||
idr_init(&pdd->alloc_idr);
|
||||
|
||||
return pdd;
|
||||
|
||||
err_free_pdd:
|
||||
kfree(pdd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -212,13 +212,17 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
|
||||
void pqm_uninit(struct process_queue_manager *pqm)
|
||||
{
|
||||
struct process_queue_node *pqn, *next;
|
||||
struct kfd_process_device *pdd;
|
||||
|
||||
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
|
||||
if (pqn->q) {
|
||||
pdd = kfd_get_process_device_data(pqn->q->device, pqm->process);
|
||||
kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
|
||||
kfd_queue_release_buffers(pdd, &pqn->q->properties);
|
||||
struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
|
||||
pqm->process);
|
||||
if (pdd) {
|
||||
kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
|
||||
kfd_queue_release_buffers(pdd, &pqn->q->properties);
|
||||
} else {
|
||||
WARN_ON(!pdd);
|
||||
}
|
||||
pqm_clean_queue_resource(pqm, pqn);
|
||||
}
|
||||
|
||||
|
@ -164,6 +164,7 @@ enum amd_pp_task {
|
||||
};
|
||||
|
||||
enum PP_SMC_POWER_PROFILE {
|
||||
PP_SMC_POWER_PROFILE_UNKNOWN = -1,
|
||||
PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
|
||||
PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
|
||||
PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
|
||||
|
@ -764,6 +764,7 @@ static int smu_early_init(struct amdgpu_ip_block *ip_block)
|
||||
smu->smu_baco.platform_support = false;
|
||||
smu->smu_baco.maco_support = false;
|
||||
smu->user_dpm_profile.fan_mode = -1;
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
|
||||
|
||||
mutex_init(&smu->message_lock);
|
||||
|
||||
@ -1248,6 +1249,21 @@ static bool smu_is_workload_profile_available(struct smu_context *smu,
|
||||
return smu->workload_map && smu->workload_map[profile].valid_mapping;
|
||||
}
|
||||
|
||||
static void smu_init_power_profile(struct smu_context *smu)
|
||||
{
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
|
||||
if (smu->is_apu ||
|
||||
!smu_is_workload_profile_available(
|
||||
smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
|
||||
smu->power_profile_mode =
|
||||
PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
else
|
||||
smu->power_profile_mode =
|
||||
PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
}
|
||||
smu_power_profile_mode_get(smu, smu->power_profile_mode);
|
||||
}
|
||||
|
||||
static int smu_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
@ -1269,13 +1285,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
|
||||
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
|
||||
|
||||
if (smu->is_apu ||
|
||||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
else
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
smu_power_profile_mode_get(smu, smu->power_profile_mode);
|
||||
|
||||
smu_init_power_profile(smu);
|
||||
smu->display_config = &adev->pm.pm_display_cfg;
|
||||
|
||||
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
|
@ -2810,4 +2810,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
|
||||
smu->workload_map = smu_v13_0_7_workload_map;
|
||||
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
|
||||
smu_v13_0_set_smu_mailbox_registers(smu);
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
}
|
||||
|
@ -929,7 +929,6 @@ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
|
||||
/// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
|
||||
///
|
||||
/// They must remain valid for the duration of the function call.
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn drm_panic_qr_generate(
|
||||
url: *const i8,
|
||||
|
@ -1343,6 +1343,17 @@ static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
|
||||
intel_de_write_fw(display, reg, val);
|
||||
}
|
||||
|
||||
static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
|
||||
i915_reg_t reg, u32 val)
|
||||
{
|
||||
struct intel_display *display = to_intel_display(crtc_state);
|
||||
|
||||
if (crtc_state->dsb_color_vblank)
|
||||
intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val);
|
||||
else
|
||||
intel_de_write_fw(display, reg, val);
|
||||
}
|
||||
|
||||
static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_property_blob *blob)
|
||||
{
|
||||
@ -1357,19 +1368,29 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
|
||||
lut = blob->data;
|
||||
|
||||
/*
|
||||
* DSB fails to correctly load the legacy LUT
|
||||
* unless we either write each entry twice,
|
||||
* or use non-posted writes
|
||||
* DSB fails to correctly load the legacy LUT unless
|
||||
* we either write each entry twice when using posted
|
||||
* writes, or we use non-posted writes.
|
||||
*
|
||||
* If palette anti-collision is active during LUT
|
||||
* register writes:
|
||||
* - posted writes simply get dropped and thus the LUT
|
||||
* contents may not be correctly updated
|
||||
* - non-posted writes are blocked and thus the LUT
|
||||
* contents are always correct, but simultaneous CPU
|
||||
* MMIO access will start to fail
|
||||
*
|
||||
* Choose the lesser of two evils and use posted writes.
|
||||
* Using posted writes is also faster, even when having
|
||||
* to write each register twice.
|
||||
*/
|
||||
if (crtc_state->dsb_color_vblank)
|
||||
intel_dsb_nonpost_start(crtc_state->dsb_color_vblank);
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
for (i = 0; i < 256; i++) {
|
||||
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
|
||||
i9xx_lut_8(&lut[i]));
|
||||
|
||||
if (crtc_state->dsb_color_vblank)
|
||||
intel_dsb_nonpost_end(crtc_state->dsb_color_vblank);
|
||||
if (crtc_state->dsb_color_vblank)
|
||||
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
|
||||
i9xx_lut_8(&lut[i]));
|
||||
}
|
||||
}
|
||||
|
||||
static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
|
||||
@ -1458,8 +1479,8 @@ static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state,
|
||||
prec_index);
|
||||
|
||||
for (i = 0; i < lut_size; i++)
|
||||
ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_10(&lut[i]));
|
||||
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_10(&lut[i]));
|
||||
|
||||
/*
|
||||
* Reset the index, otherwise it prevents the legacy palette to be
|
||||
@ -1612,16 +1633,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
|
||||
* ToDo: Extend to max 7.0. Enable 32 bit input value
|
||||
* as compared to just 16 to achieve this.
|
||||
*/
|
||||
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
|
||||
DISPLAY_VER(display) >= 14 ?
|
||||
mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
|
||||
ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
|
||||
DISPLAY_VER(display) >= 14 ?
|
||||
mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
|
||||
}
|
||||
|
||||
/* Clamp values > 1.0. */
|
||||
while (i++ < glk_degamma_lut_size(display))
|
||||
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
|
||||
DISPLAY_VER(display) >= 14 ?
|
||||
1 << 24 : 1 << 16);
|
||||
ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
|
||||
DISPLAY_VER(display) >= 14 ?
|
||||
1 << 24 : 1 << 16);
|
||||
|
||||
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
|
||||
}
|
||||
@ -1687,10 +1708,10 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
|
||||
for (i = 0; i < 9; i++) {
|
||||
const struct drm_color_lut *entry = &lut[i];
|
||||
|
||||
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
|
||||
ilk_lut_12p4_ldw(entry));
|
||||
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
|
||||
ilk_lut_12p4_udw(entry));
|
||||
ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
|
||||
ilk_lut_12p4_ldw(entry));
|
||||
ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
|
||||
ilk_lut_12p4_udw(entry));
|
||||
}
|
||||
|
||||
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
|
||||
@ -1726,10 +1747,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
|
||||
for (i = 1; i < 257; i++) {
|
||||
entry = &lut[i * 8];
|
||||
|
||||
ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_ldw(entry));
|
||||
ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_udw(entry));
|
||||
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_ldw(entry));
|
||||
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_udw(entry));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1747,10 +1768,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
|
||||
for (i = 0; i < 256; i++) {
|
||||
entry = &lut[i * 8 * 128];
|
||||
|
||||
ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_ldw(entry));
|
||||
ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_udw(entry));
|
||||
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_ldw(entry));
|
||||
ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
|
||||
ilk_lut_12p4_udw(entry));
|
||||
}
|
||||
|
||||
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
|
||||
|
@ -273,16 +273,20 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dsb_reg_write() - Emit register wriite to the DSB context
|
||||
* intel_dsb_reg_write_indexed() - Emit register wriite to the DSB context
|
||||
* @dsb: DSB context
|
||||
* @reg: register address.
|
||||
* @val: value.
|
||||
*
|
||||
* This function is used for writing register-value pair in command
|
||||
* buffer of DSB.
|
||||
*
|
||||
* Note that indexed writes are slower than normal MMIO writes
|
||||
* for a small number (less than 5 or so) of writes to the same
|
||||
* register.
|
||||
*/
|
||||
void intel_dsb_reg_write(struct intel_dsb *dsb,
|
||||
i915_reg_t reg, u32 val)
|
||||
void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
|
||||
i915_reg_t reg, u32 val)
|
||||
{
|
||||
/*
|
||||
* For example the buffer will look like below for 3 dwords for auto
|
||||
@ -340,6 +344,15 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dsb_reg_write(struct intel_dsb *dsb,
|
||||
i915_reg_t reg, u32 val)
|
||||
{
|
||||
intel_dsb_emit(dsb, val,
|
||||
(DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
|
||||
(DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
|
||||
i915_mmio_reg_offset(reg));
|
||||
}
|
||||
|
||||
static u32 intel_dsb_mask_to_byte_en(u32 mask)
|
||||
{
|
||||
return (!!(mask & 0xff000000) << 3 |
|
||||
|
@ -34,6 +34,8 @@ void intel_dsb_finish(struct intel_dsb *dsb);
|
||||
void intel_dsb_cleanup(struct intel_dsb *dsb);
|
||||
void intel_dsb_reg_write(struct intel_dsb *dsb,
|
||||
i915_reg_t reg, u32 val);
|
||||
void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
|
||||
i915_reg_t reg, u32 val);
|
||||
void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
|
||||
i915_reg_t reg, u32 mask, u32 val);
|
||||
void intel_dsb_noop(struct intel_dsb *dsb, int count);
|
||||
|
@ -1643,9 +1643,21 @@ capture_engine(struct intel_engine_cs *engine,
|
||||
return NULL;
|
||||
|
||||
intel_engine_get_hung_entity(engine, &ce, &rq);
|
||||
if (rq && !i915_request_started(rq))
|
||||
drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
|
||||
engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
|
||||
if (rq && !i915_request_started(rq)) {
|
||||
/*
|
||||
* We want to know also what is the guc_id of the context,
|
||||
* but if we don't have the context reference, then skip
|
||||
* printing it.
|
||||
*/
|
||||
if (ce)
|
||||
drm_info(&engine->gt->i915->drm,
|
||||
"Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
|
||||
engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
|
||||
else
|
||||
drm_info(&engine->gt->i915->drm,
|
||||
"Got hung context on %s with active request %lld:%lld not yet started\n",
|
||||
engine->name, rq->fence.context, rq->fence.seqno);
|
||||
}
|
||||
|
||||
if (rq) {
|
||||
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
|
||||
|
@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void)
|
||||
return 0;
|
||||
|
||||
err_priorities:
|
||||
kmem_cache_destroy(slab_priorities);
|
||||
kmem_cache_destroy(slab_dependencies);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -224,8 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
|
||||
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
|
||||
XE_BO_FLAG_PINNED);
|
||||
if (IS_ERR(tiny)) {
|
||||
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
|
||||
PTR_ERR(pt));
|
||||
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
|
||||
PTR_ERR(tiny));
|
||||
goto free_pt;
|
||||
}
|
||||
|
||||
|
@ -65,6 +65,14 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
|
||||
__invalidation_fence_signal(xe, fence);
|
||||
}
|
||||
|
||||
void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
|
||||
{
|
||||
if (WARN_ON_ONCE(!fence->gt))
|
||||
return;
|
||||
|
||||
__invalidation_fence_signal(gt_to_xe(fence->gt), fence);
|
||||
}
|
||||
|
||||
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
|
||||
{
|
||||
struct xe_gt *gt = container_of(work, struct xe_gt,
|
||||
|
@ -28,6 +28,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
||||
struct xe_gt_tlb_invalidation_fence *fence,
|
||||
bool stack);
|
||||
void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
|
||||
|
||||
static inline void
|
||||
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
|
||||
|
@ -1333,8 +1333,7 @@ static void invalidation_fence_cb(struct dma_fence *fence,
|
||||
queue_work(system_wq, &ifence->work);
|
||||
} else {
|
||||
ifence->base.base.error = ifence->fence->error;
|
||||
dma_fence_signal(&ifence->base.base);
|
||||
dma_fence_put(&ifence->base.base);
|
||||
xe_gt_tlb_invalidation_fence_signal(&ifence->base);
|
||||
}
|
||||
dma_fence_put(ifence->fence);
|
||||
}
|
||||
|
@ -27,46 +27,27 @@
|
||||
#include "xe_reg_whitelist.h"
|
||||
#include "xe_rtp_types.h"
|
||||
|
||||
#define XE_REG_SR_GROW_STEP_DEFAULT 16
|
||||
|
||||
static void reg_sr_fini(struct drm_device *drm, void *arg)
|
||||
{
|
||||
struct xe_reg_sr *sr = arg;
|
||||
struct xe_reg_sr_entry *entry;
|
||||
unsigned long reg;
|
||||
|
||||
xa_for_each(&sr->xa, reg, entry)
|
||||
kfree(entry);
|
||||
|
||||
xa_destroy(&sr->xa);
|
||||
kfree(sr->pool.arr);
|
||||
memset(&sr->pool, 0, sizeof(sr->pool));
|
||||
}
|
||||
|
||||
int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
|
||||
{
|
||||
xa_init(&sr->xa);
|
||||
memset(&sr->pool, 0, sizeof(sr->pool));
|
||||
sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT;
|
||||
sr->name = name;
|
||||
|
||||
return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
|
||||
|
||||
static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
|
||||
{
|
||||
if (sr->pool.used == sr->pool.allocated) {
|
||||
struct xe_reg_sr_entry *arr;
|
||||
|
||||
arr = krealloc_array(sr->pool.arr,
|
||||
ALIGN(sr->pool.allocated + 1, sr->pool.grow_step),
|
||||
sizeof(*arr), GFP_KERNEL);
|
||||
if (!arr)
|
||||
return NULL;
|
||||
|
||||
sr->pool.arr = arr;
|
||||
sr->pool.allocated += sr->pool.grow_step;
|
||||
}
|
||||
|
||||
return &sr->pool.arr[sr->pool.used++];
|
||||
}
|
||||
|
||||
static bool compatible_entries(const struct xe_reg_sr_entry *e1,
|
||||
const struct xe_reg_sr_entry *e2)
|
||||
{
|
||||
@ -112,7 +93,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
pentry = alloc_entry(sr);
|
||||
pentry = kmalloc(sizeof(*pentry), GFP_KERNEL);
|
||||
if (!pentry) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
|
@ -20,12 +20,6 @@ struct xe_reg_sr_entry {
|
||||
};
|
||||
|
||||
struct xe_reg_sr {
|
||||
struct {
|
||||
struct xe_reg_sr_entry *arr;
|
||||
unsigned int used;
|
||||
unsigned int allocated;
|
||||
unsigned int grow_step;
|
||||
} pool;
|
||||
struct xarray xa;
|
||||
const char *name;
|
||||
|
||||
|
@ -1075,6 +1075,7 @@ static const struct of_device_id nmk_i2c_eyeq_match_table[] = {
|
||||
.compatible = "mobileye,eyeq6h-i2c",
|
||||
.data = (void *)NMK_I2C_EYEQ_FLAG_32B_BUS,
|
||||
},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
|
@ -95,7 +95,7 @@ enum {
|
||||
|
||||
static inline int wait_timeout(struct i2c_pnx_algo_data *data)
|
||||
{
|
||||
long timeout = data->timeout;
|
||||
long timeout = jiffies_to_msecs(data->timeout);
|
||||
while (timeout > 0 &&
|
||||
(ioread32(I2C_REG_STS(data)) & mstatus_active)) {
|
||||
mdelay(1);
|
||||
@ -106,7 +106,7 @@ static inline int wait_timeout(struct i2c_pnx_algo_data *data)
|
||||
|
||||
static inline int wait_reset(struct i2c_pnx_algo_data *data)
|
||||
{
|
||||
long timeout = data->timeout;
|
||||
long timeout = jiffies_to_msecs(data->timeout);
|
||||
while (timeout > 0 &&
|
||||
(ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) {
|
||||
mdelay(1);
|
||||
|
@ -352,7 +352,7 @@ static int riic_init_hw(struct riic_dev *riic)
|
||||
if (brl <= (0x1F + 3))
|
||||
break;
|
||||
|
||||
total_ticks /= 2;
|
||||
total_ticks = DIV_ROUND_UP(total_ticks, 2);
|
||||
rate /= 2;
|
||||
}
|
||||
|
||||
|
@ -1415,6 +1415,7 @@ static int domain_flush_pages_v2(struct protection_domain *pdom,
|
||||
struct iommu_cmd cmd;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&pdom->lock);
|
||||
list_for_each_entry(dev_data, &pdom->dev_list, list) {
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
|
||||
u16 domid = dev_data->gcr3_info.domid;
|
||||
@ -1464,6 +1465,8 @@ static void __domain_flush_pages(struct protection_domain *domain,
|
||||
ioasid_t pasid = IOMMU_NO_PASID;
|
||||
bool gn = false;
|
||||
|
||||
lockdep_assert_held(&domain->lock);
|
||||
|
||||
if (pdom_is_v2_pgtbl_mode(domain)) {
|
||||
gn = true;
|
||||
ret = domain_flush_pages_v2(domain, address, size);
|
||||
@ -1585,6 +1588,8 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
|
||||
lockdep_assert_held(&domain->lock);
|
||||
|
||||
list_for_each_entry(dev_data, &domain->dev_list, list) {
|
||||
struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
|
||||
|
||||
@ -2073,6 +2078,7 @@ static int attach_device(struct device *dev,
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
struct pci_dev *pdev;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev_data->mutex);
|
||||
@ -2113,7 +2119,9 @@ static int attach_device(struct device *dev,
|
||||
|
||||
/* Update data structures */
|
||||
dev_data->domain = domain;
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_add(&dev_data->list, &domain->dev_list);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
/* Update device table */
|
||||
dev_update_dte(dev_data, true);
|
||||
@ -2160,6 +2168,7 @@ static void detach_device(struct device *dev)
|
||||
/* Flush IOTLB and wait for the flushes to finish */
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
amd_iommu_domain_flush_all(domain);
|
||||
list_del(&dev_data->list);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
/* Clear GCR3 table */
|
||||
@ -2168,7 +2177,6 @@ static void detach_device(struct device *dev)
|
||||
|
||||
/* Update data structures */
|
||||
dev_data->domain = NULL;
|
||||
list_del(&dev_data->list);
|
||||
|
||||
/* decrease reference counters - needs to happen after the flushes */
|
||||
pdom_detach_iommu(iommu, domain);
|
||||
|
@ -339,7 +339,7 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
|
||||
* one CPU at a time can enter the process, while the others
|
||||
* will be spinning at the same lock.
|
||||
*/
|
||||
lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
|
||||
lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
|
||||
vcmdq = vintf->lvcmdqs[lidx];
|
||||
if (!vcmdq || !READ_ONCE(vcmdq->enabled))
|
||||
return NULL;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user