This is the 6.12.6 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmdkVEUACgkQONu9yGCS
 aT7FYA/+OEDrmsHBVy0VShtpcrbC0O5VE/HInQ9A30sMLTbg3U3DvVi715jdSiYJ
 kAfusNMS+bcFJ8b3IeYgVfJWOPD7PplMVTgOo74dht89vI2/yYAHP2+Bpf57oqB9
 c1RzPSEmQSVsk8DTzAW7eg0lgmloA0W6F6et2Cz+qLm9eisagC+cl4LCLGMC0Zcw
 ILMj6tL+kIfvw1c87UF8PjXtF/UdgDeIaaX9zVl9SRi4c9Ha6Yy4zUrh2BteGeiy
 LXHIrommGyDanajvWbv2BluwCVvJ/cUzwrnLfrR0Cjwxrjlbbn0WNzt1wKQO61VX
 3YVRDF+rnGKmt0s7mLBWvwlqsRQgFdjngtviH33qdHy/+J9UbhHmOuvLyuX3wCw0
 vlX9cjVQPYDv3kSpTErCtQ1WOSCO/Eb+OlpYyNEnUdaOuySZlXdl4cPJPRspjNoY
 ZcqC9gREseYda3q+LfdvwN4vnPzY90LIfO5xerPf6PgRjyGRl6QUhGTc3c69neaf
 vwqVhWEhp5btuj9p+sBS6ozviH49oUV7E4E/ZtMuKMGUEwLaA07HS/BiUDmE306U
 5Sv0OMllJPiZTtCb7nR2HsyQEK0jK43eD0NMLD8K3l112L4i7BF41ER1GwcaTTUz
 maoJcswK422Dp1hTzODqAGvjZdFx5VGgO5m0rAINh1l2xEPtSN8=
 =pkpv
 -----END PGP SIGNATURE-----

Merge v6.12.6

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2024-12-19 18:13:24 +01:00
commit efe163851b
182 changed files with 2204 additions and 1298 deletions

View File

@ -2170,6 +2170,12 @@ nexthop_compat_mode - BOOLEAN
understands the new API, this sysctl can be disabled to achieve full understands the new API, this sysctl can be disabled to achieve full
performance benefits of the new API by disabling the nexthop expansion performance benefits of the new API by disabling the nexthop expansion
and extraneous notifications. and extraneous notifications.
Note that as a backward-compatible mode, dumping of modern features
might be incomplete or wrong. For example, resilient groups will not be
shown as such, but rather as just a list of next hops. Also weights that
do not fit into 8 bits will show incorrectly.
Default: true (backward compat mode) Default: true (backward compat mode)
fib_notify_on_flag_change - INTEGER fib_notify_on_flag_change - INTEGER

View File

@ -347,7 +347,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
`int pm_runtime_resume_and_get(struct device *dev);` `int pm_runtime_resume_and_get(struct device *dev);`
- run pm_runtime_resume(dev) and if successful, increment the device's - run pm_runtime_resume(dev) and if successful, increment the device's
usage counter; return the result of pm_runtime_resume usage counter; returns 0 on success (whether or not the device's
runtime PM status was already 'active') or the error code from
pm_runtime_resume() on failure.
`int pm_request_idle(struct device *dev);` `int pm_request_idle(struct device *dev);`
- submit a request to execute the subsystem-level idle callback for the - submit a request to execute the subsystem-level idle callback for the

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 6 VERSION = 6
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 5 SUBLEVEL = 6
EXTRAVERSION = EXTRAVERSION =
NAME = Baby Opossum Posse NAME = Baby Opossum Posse

View File

@ -1535,6 +1535,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
break; break;
case SYS_ID_AA64PFR2_EL1: case SYS_ID_AA64PFR2_EL1:
/* We only expose FPMR */ /* We only expose FPMR */
@ -1724,6 +1725,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
val &= ~ID_AA64PFR0_EL1_AMU_MASK; val &= ~ID_AA64PFR0_EL1_AMU_MASK;
/*
* MPAM is disabled by default as KVM also needs a set of PARTID to
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
* older kernels let the guest see the ID bit.
*/
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
return val; return val;
} }
@ -1834,6 +1842,42 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
return set_id_reg(vcpu, rd, val); return set_id_reg(vcpu, rd, val);
} }
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
/*
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
* guests, but didn't add trap handling. KVM doesn't support MPAM and
* always returns an UNDEF for these registers. The guest must see 0
* for this field.
*
* But KVM must also accept values from user-space that were provided
* by KVM. On CPUs that support MPAM, permit user-space to write
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
*/
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
return set_id_reg(vcpu, rd, user_val);
}
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
/* See set_id_aa64pfr0_el1 for comment about MPAM */
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
return set_id_reg(vcpu, rd, user_val);
}
/* /*
* cpufeature ID register user accessors * cpufeature ID register user accessors
* *
@ -2377,7 +2421,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ID_AA64PFR0_EL1), { SYS_DESC(SYS_ID_AA64PFR0_EL1),
.access = access_id_reg, .access = access_id_reg,
.get_user = get_id_reg, .get_user = get_id_reg,
.set_user = set_id_reg, .set_user = set_id_aa64pfr0_el1,
.reset = read_sanitised_id_aa64pfr0_el1, .reset = read_sanitised_id_aa64pfr0_el1,
.val = ~(ID_AA64PFR0_EL1_AMU | .val = ~(ID_AA64PFR0_EL1_AMU |
ID_AA64PFR0_EL1_MPAM | ID_AA64PFR0_EL1_MPAM |
@ -2385,7 +2429,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR0_EL1_RAS | ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_AdvSIMD | ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP), }, ID_AA64PFR0_EL1_FP), },
ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR | { SYS_DESC(SYS_ID_AA64PFR1_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_aa64pfr1_el1,
.reset = kvm_read_sanitised_id_reg,
.val = ~(ID_AA64PFR1_EL1_PFAR |
ID_AA64PFR1_EL1_DF2 | ID_AA64PFR1_EL1_DF2 |
ID_AA64PFR1_EL1_MTEX | ID_AA64PFR1_EL1_MTEX |
ID_AA64PFR1_EL1_THE | ID_AA64PFR1_EL1_THE |
@ -2397,7 +2446,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR1_EL1_RES0 | ID_AA64PFR1_EL1_RES0 |
ID_AA64PFR1_EL1_MPAM_frac | ID_AA64PFR1_EL1_MPAM_frac |
ID_AA64PFR1_EL1_RAS_frac | ID_AA64PFR1_EL1_RAS_frac |
ID_AA64PFR1_EL1_MTE)), ID_AA64PFR1_EL1_MTE), },
ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
ID_UNALLOCATED(4,3), ID_UNALLOCATED(4,3),
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),

View File

@ -22,7 +22,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
else else
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT)); set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
flush_tlb_kernel_range(addr, addr + PAGE_SIZE); preempt_disable();
local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
preempt_enable();
return true; return true;
} }

View File

@ -227,7 +227,7 @@ static void __init init_resources(void)
static void __init parse_dtb(void) static void __init parse_dtb(void)
{ {
/* Early scan of device tree from init memory */ /* Early scan of device tree from init memory */
if (early_init_dt_scan(dtb_early_va, __pa(dtb_early_va))) { if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
const char *name = of_flat_dt_get_machine_name(); const char *name = of_flat_dt_get_machine_name();
if (name) { if (name) {

View File

@ -1566,7 +1566,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
pmd_clear(pmd); pmd_clear(pmd);
} }
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemmap)
{ {
struct page *page = pud_page(*pud); struct page *page = pud_page(*pud);
struct ptdesc *ptdesc = page_ptdesc(page); struct ptdesc *ptdesc = page_ptdesc(page);
@ -1579,7 +1579,8 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
return; return;
} }
pagetable_pmd_dtor(ptdesc); if (!is_vmemmap)
pagetable_pmd_dtor(ptdesc);
if (PageReserved(page)) if (PageReserved(page))
free_reserved_page(page); free_reserved_page(page);
else else
@ -1703,7 +1704,7 @@ static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, un
remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap); remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
if (pgtable_l4_enabled) if (pgtable_l4_enabled)
free_pmd_table(pmd_base, pudp); free_pmd_table(pmd_base, pudp, is_vmemmap);
} }
} }

View File

@ -1468,7 +1468,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
* hence we need to drain when changing said * hence we need to drain when changing said
* size. * size.
*/ */
intel_pmu_drain_large_pebs(cpuc); intel_pmu_drain_pebs_buffer();
adaptive_pebs_record_size_update(); adaptive_pebs_record_size_update();
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg); wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
cpuc->active_pebs_data_cfg = pebs_data_cfg; cpuc->active_pebs_data_cfg = pebs_data_cfg;

View File

@ -212,6 +212,8 @@ static inline unsigned long long l1tf_pfn_limit(void)
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
} }
void init_cpu_devs(void);
void get_cpu_vendor(struct cpuinfo_x86 *c);
extern void early_cpu_init(void); extern void early_cpu_init(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *); extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *);

View File

@ -65,4 +65,19 @@
extern bool __static_call_fixup(void *tramp, u8 op, void *dest); extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
extern void __static_call_update_early(void *tramp, void *func);
#define static_call_update_early(name, _func) \
({ \
typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \
if (static_call_initialized) { \
__static_call_update(&STATIC_CALL_KEY(name), \
STATIC_CALL_TRAMP_ADDR(name), __F);\
} else { \
WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \
__static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
__F); \
} \
})
#endif /* _ASM_STATIC_CALL_H */ #endif /* _ASM_STATIC_CALL_H */

View File

@ -8,7 +8,7 @@
#include <asm/special_insns.h> #include <asm/special_insns.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
static inline void iret_to_self(void) static __always_inline void iret_to_self(void)
{ {
asm volatile ( asm volatile (
"pushfl\n\t" "pushfl\n\t"
@ -19,7 +19,7 @@ static inline void iret_to_self(void)
: ASM_CALL_CONSTRAINT : : "memory"); : ASM_CALL_CONSTRAINT : : "memory");
} }
#else #else
static inline void iret_to_self(void) static __always_inline void iret_to_self(void)
{ {
unsigned int tmp; unsigned int tmp;
@ -55,7 +55,7 @@ static inline void iret_to_self(void)
* Like all of Linux's memory ordering operations, this is a * Like all of Linux's memory ordering operations, this is a
* compiler barrier as well. * compiler barrier as well.
*/ */
static inline void sync_core(void) static __always_inline void sync_core(void)
{ {
/* /*
* The SERIALIZE instruction is the most straightforward way to * The SERIALIZE instruction is the most straightforward way to

View File

@ -39,9 +39,11 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/instrumentation.h>
#include <trace/events/xen.h> #include <trace/events/xen.h>
#include <asm/alternative.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
@ -86,11 +88,20 @@ struct xen_dm_op_buf;
* there aren't more than 5 arguments...) * there aren't more than 5 arguments...)
*/ */
extern struct { char _entry[32]; } hypercall_page[]; void xen_hypercall_func(void);
DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
#define __HYPERCALL "call hypercall_page+%c[offset]" #ifdef MODULE
#define __HYPERCALL_ENTRY(x) \ #define __ADDRESSABLE_xen_hypercall
[offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0])) #else
#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
#endif
#define __HYPERCALL \
__ADDRESSABLE_xen_hypercall \
"call __SCT__xen_hypercall"
#define __HYPERCALL_ENTRY(x) "a" (x)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __HYPERCALL_RETREG "eax" #define __HYPERCALL_RETREG "eax"
@ -148,7 +159,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_0ARG(); \ __HYPERCALL_0ARG(); \
asm volatile (__HYPERCALL \ asm volatile (__HYPERCALL \
: __HYPERCALL_0PARAM \ : __HYPERCALL_0PARAM \
: __HYPERCALL_ENTRY(name) \ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER0); \ : __HYPERCALL_CLOBBER0); \
(type)__res; \ (type)__res; \
}) })
@ -159,7 +170,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_1ARG(a1); \ __HYPERCALL_1ARG(a1); \
asm volatile (__HYPERCALL \ asm volatile (__HYPERCALL \
: __HYPERCALL_1PARAM \ : __HYPERCALL_1PARAM \
: __HYPERCALL_ENTRY(name) \ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER1); \ : __HYPERCALL_CLOBBER1); \
(type)__res; \ (type)__res; \
}) })
@ -170,7 +181,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_2ARG(a1, a2); \ __HYPERCALL_2ARG(a1, a2); \
asm volatile (__HYPERCALL \ asm volatile (__HYPERCALL \
: __HYPERCALL_2PARAM \ : __HYPERCALL_2PARAM \
: __HYPERCALL_ENTRY(name) \ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER2); \ : __HYPERCALL_CLOBBER2); \
(type)__res; \ (type)__res; \
}) })
@ -181,7 +192,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_3ARG(a1, a2, a3); \ __HYPERCALL_3ARG(a1, a2, a3); \
asm volatile (__HYPERCALL \ asm volatile (__HYPERCALL \
: __HYPERCALL_3PARAM \ : __HYPERCALL_3PARAM \
: __HYPERCALL_ENTRY(name) \ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER3); \ : __HYPERCALL_CLOBBER3); \
(type)__res; \ (type)__res; \
}) })
@ -192,7 +203,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_4ARG(a1, a2, a3, a4); \ __HYPERCALL_4ARG(a1, a2, a3, a4); \
asm volatile (__HYPERCALL \ asm volatile (__HYPERCALL \
: __HYPERCALL_4PARAM \ : __HYPERCALL_4PARAM \
: __HYPERCALL_ENTRY(name) \ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER4); \ : __HYPERCALL_CLOBBER4); \
(type)__res; \ (type)__res; \
}) })
@ -206,12 +217,9 @@ xen_single_call(unsigned int call,
__HYPERCALL_DECLS; __HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5); __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) asm volatile(__HYPERCALL
return -EINVAL;
asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM : __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call]) : __HYPERCALL_ENTRY(call)
: __HYPERCALL_CLOBBER5); : __HYPERCALL_CLOBBER5);
return (long)__res; return (long)__res;

View File

@ -142,11 +142,6 @@ static bool skip_addr(void *dest)
if (dest >= (void *)relocate_kernel && if (dest >= (void *)relocate_kernel &&
dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE) dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
return true; return true;
#endif
#ifdef CONFIG_XEN
if (dest >= (void *)hypercall_page &&
dest < (void*)hypercall_page + PAGE_SIZE)
return true;
#endif #endif
return false; return false;
} }

View File

@ -868,7 +868,7 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
} }
static void get_cpu_vendor(struct cpuinfo_x86 *c) void get_cpu_vendor(struct cpuinfo_x86 *c)
{ {
char *v = c->x86_vendor_id; char *v = c->x86_vendor_id;
int i; int i;
@ -1652,15 +1652,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
detect_nopl(); detect_nopl();
} }
void __init early_cpu_init(void) void __init init_cpu_devs(void)
{ {
const struct cpu_dev *const *cdev; const struct cpu_dev *const *cdev;
int count = 0; int count = 0;
#ifdef CONFIG_PROCESSOR_SELECT
pr_info("KERNEL supported cpus:\n");
#endif
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
const struct cpu_dev *cpudev = *cdev; const struct cpu_dev *cpudev = *cdev;
@ -1668,20 +1664,30 @@ void __init early_cpu_init(void)
break; break;
cpu_devs[count] = cpudev; cpu_devs[count] = cpudev;
count++; count++;
}
}
void __init early_cpu_init(void)
{
#ifdef CONFIG_PROCESSOR_SELECT
unsigned int i, j;
pr_info("KERNEL supported cpus:\n");
#endif
init_cpu_devs();
#ifdef CONFIG_PROCESSOR_SELECT #ifdef CONFIG_PROCESSOR_SELECT
{ for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
unsigned int j; for (j = 0; j < 2; j++) {
if (!cpu_devs[i]->c_ident[j])
for (j = 0; j < 2; j++) { continue;
if (!cpudev->c_ident[j]) pr_info(" %s %s\n", cpu_devs[i]->c_vendor,
continue; cpu_devs[i]->c_ident[j]);
pr_info(" %s %s\n", cpudev->c_vendor,
cpudev->c_ident[j]);
}
} }
#endif
} }
#endif
early_identify_cpu(&boot_cpu_data); early_identify_cpu(&boot_cpu_data);
} }

View File

@ -172,6 +172,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
} }
EXPORT_SYMBOL_GPL(arch_static_call_transform); EXPORT_SYMBOL_GPL(arch_static_call_transform);
noinstr void __static_call_update_early(void *tramp, void *func)
{
BUG_ON(system_state != SYSTEM_BOOTING);
BUG_ON(!early_boot_irqs_disabled);
BUG_ON(static_call_initialized);
__text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
sync_core();
}
#ifdef CONFIG_MITIGATION_RETHUNK #ifdef CONFIG_MITIGATION_RETHUNK
/* /*
* This is called by apply_returns() to fix up static call trampolines, * This is called by apply_returns() to fix up static call trampolines,

View File

@ -2,6 +2,7 @@
#include <linux/console.h> #include <linux/console.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/instrumentation.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -21,7 +22,8 @@
#include "xen-ops.h" #include "xen-ops.h"
EXPORT_SYMBOL_GPL(hypercall_page); DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm);
EXPORT_STATIC_CALL_TRAMP(xen_hypercall);
/* /*
* Pointer to the xen_vcpu_info structure or * Pointer to the xen_vcpu_info structure or
@ -68,6 +70,66 @@ EXPORT_SYMBOL(xen_start_flags);
*/ */
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
static __ref void xen_get_vendor(void)
{
init_cpu_devs();
cpu_detect(&boot_cpu_data);
get_cpu_vendor(&boot_cpu_data);
}
void xen_hypercall_setfunc(void)
{
if (static_call_query(xen_hypercall) != xen_hypercall_hvm)
return;
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
static_call_update(xen_hypercall, xen_hypercall_amd);
else
static_call_update(xen_hypercall, xen_hypercall_intel);
}
/*
* Evaluate processor vendor in order to select the correct hypercall
* function for HVM/PVH guests.
* Might be called very early in boot before vendor has been set by
* early_cpu_init().
*/
noinstr void *__xen_hypercall_setfunc(void)
{
void (*func)(void);
/*
* Xen is supported only on CPUs with CPUID, so testing for
* X86_FEATURE_CPUID is a test for early_cpu_init() having been
* run.
*
* Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
* dependency chain: it is being called via the xen_hypercall static
* call when running as a PVH or HVM guest. Hypercalls need to be
* noinstr due to PV guests using hypercalls in noinstr code. So we
* the PV guest requirement is not of interest here (xen_get_vendor()
* calls noinstr functions, and static_call_update_early() might do
* so, too).
*/
instrumentation_begin();
if (!boot_cpu_has(X86_FEATURE_CPUID))
xen_get_vendor();
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
func = xen_hypercall_amd;
else
func = xen_hypercall_intel;
static_call_update_early(xen_hypercall, func);
instrumentation_end();
return func;
}
static int xen_cpu_up_online(unsigned int cpu) static int xen_cpu_up_online(unsigned int cpu)
{ {
xen_init_lock_cpu(cpu); xen_init_lock_cpu(cpu);

View File

@ -106,15 +106,8 @@ static void __init init_hvm_pv_info(void)
/* PVH set up hypercall page in xen_prepare_pvh(). */ /* PVH set up hypercall page in xen_prepare_pvh(). */
if (xen_pvh_domain()) if (xen_pvh_domain())
pv_info.name = "Xen PVH"; pv_info.name = "Xen PVH";
else { else
u64 pfn;
uint32_t msr;
pv_info.name = "Xen HVM"; pv_info.name = "Xen HVM";
msr = cpuid_ebx(base + 2);
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
}
xen_setup_features(); xen_setup_features();
@ -300,6 +293,10 @@ static uint32_t __init xen_platform_hvm(void)
if (xen_pv_domain()) if (xen_pv_domain())
return 0; return 0;
/* Set correct hypercall function. */
if (xen_domain)
xen_hypercall_setfunc();
if (xen_pvh_domain() && nopv) { if (xen_pvh_domain() && nopv) {
/* Guest booting via the Xen-PVH boot entry goes here */ /* Guest booting via the Xen-PVH boot entry goes here */
pr_info("\"nopv\" parameter is ignored in PVH guest\n"); pr_info("\"nopv\" parameter is ignored in PVH guest\n");

View File

@ -1341,6 +1341,9 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
xen_domain_type = XEN_PV_DOMAIN; xen_domain_type = XEN_PV_DOMAIN;
xen_start_flags = xen_start_info->flags; xen_start_flags = xen_start_info->flags;
/* Interrupts are guaranteed to be off initially. */
early_boot_irqs_disabled = true;
static_call_update_early(xen_hypercall, xen_hypercall_pv);
xen_setup_features(); xen_setup_features();
@ -1431,7 +1434,6 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
local_irq_disable(); local_irq_disable();
early_boot_irqs_disabled = true;
xen_raw_console_write("mapping kernel into physical memory\n"); xen_raw_console_write("mapping kernel into physical memory\n");
xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,

View File

@ -129,17 +129,10 @@ static void __init pvh_arch_setup(void)
void __init xen_pvh_init(struct boot_params *boot_params) void __init xen_pvh_init(struct boot_params *boot_params)
{ {
u32 msr;
u64 pfn;
xen_pvh = 1; xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN; xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags; xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2);
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
x86_init.oem.arch_setup = pvh_arch_setup; x86_init.oem.arch_setup = pvh_arch_setup;
x86_init.oem.banner = xen_banner; x86_init.oem.banner = xen_banner;

View File

@ -20,9 +20,32 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/objtool.h>
#include <../entry/calling.h> #include <../entry/calling.h>
.pushsection .noinstr.text, "ax" .pushsection .noinstr.text, "ax"
/*
* PV hypercall interface to the hypervisor.
*
* Called via inline asm(), so better preserve %rcx and %r11.
*
* Input:
* %eax: hypercall number
* %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
* Output: %rax
*/
SYM_FUNC_START(xen_hypercall_pv)
ANNOTATE_NOENDBR
push %rcx
push %r11
UNWIND_HINT_SAVE
syscall
UNWIND_HINT_RESTORE
pop %r11
pop %rcx
RET
SYM_FUNC_END(xen_hypercall_pv)
/* /*
* Disabling events is simply a matter of making the event mask * Disabling events is simply a matter of making the event mask
* non-zero. * non-zero.
@ -176,7 +199,6 @@ SYM_CODE_START(xen_early_idt_handler_array)
SYM_CODE_END(xen_early_idt_handler_array) SYM_CODE_END(xen_early_idt_handler_array)
__FINIT __FINIT
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
/* /*
* Xen64 iret frame: * Xen64 iret frame:
* *
@ -186,17 +208,28 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
* cs * cs
* rip <-- standard iret frame * rip <-- standard iret frame
* *
* flags * flags <-- xen_iret must push from here on
* *
* rcx } * rcx
* r11 }<-- pushed by hypercall page * r11
* rsp->rax } * rsp->rax
*/ */
.macro xen_hypercall_iret
pushq $0 /* Flags */
push %rcx
push %r11
push %rax
mov $__HYPERVISOR_iret, %eax
syscall /* Do the IRET. */
#ifdef CONFIG_MITIGATION_SLS
int3
#endif
.endm
SYM_CODE_START(xen_iret) SYM_CODE_START(xen_iret)
UNWIND_HINT_UNDEFINED UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
pushq $0 xen_hypercall_iret
jmp hypercall_iret
SYM_CODE_END(xen_iret) SYM_CODE_END(xen_iret)
/* /*
@ -301,8 +334,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat)
ENDBR ENDBR
lea 16(%rsp), %rsp /* strip %rcx, %r11 */ lea 16(%rsp), %rsp /* strip %rcx, %r11 */
mov $-ENOSYS, %rax mov $-ENOSYS, %rax
pushq $0 xen_hypercall_iret
jmp hypercall_iret
SYM_CODE_END(xen_entry_SYSENTER_compat) SYM_CODE_END(xen_entry_SYSENTER_compat)
SYM_CODE_END(xen_entry_SYSCALL_compat) SYM_CODE_END(xen_entry_SYSCALL_compat)

View File

@ -6,9 +6,11 @@
#include <linux/elfnote.h> #include <linux/elfnote.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/instrumentation.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/frame.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
@ -20,28 +22,6 @@
#include <xen/interface/xen-mca.h> #include <xen/interface/xen-mca.h>
#include <asm/xen/interface.h> #include <asm/xen/interface.h>
.pushsection .noinstr.text, "ax"
.balign PAGE_SIZE
SYM_CODE_START(hypercall_page)
.rept (PAGE_SIZE / 32)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
/*
* Xen will write the hypercall page, and sort out ENDBR.
*/
.skip 31, 0xcc
.endr
#define HYPERCALL(n) \
.equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
.type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
#include <asm/xen-hypercalls.h>
#undef HYPERCALL
SYM_CODE_END(hypercall_page)
.popsection
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
__INIT __INIT
SYM_CODE_START(startup_xen) SYM_CODE_START(startup_xen)
@ -87,6 +67,87 @@ SYM_CODE_END(xen_cpu_bringup_again)
#endif #endif
#endif #endif
.pushsection .noinstr.text, "ax"
/*
* Xen hypercall interface to the hypervisor.
*
* Input:
* %eax: hypercall number
* 32-bit:
* %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall
* 64-bit:
* %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
* Output: %[er]ax
*/
SYM_FUNC_START(xen_hypercall_hvm)
ENDBR
FRAME_BEGIN
/* Save all relevant registers (caller save and arguments). */
#ifdef CONFIG_X86_32
push %eax
push %ebx
push %ecx
push %edx
push %esi
push %edi
#else
push %rax
push %rcx
push %rdx
push %rdi
push %rsi
push %r11
push %r10
push %r9
push %r8
#ifdef CONFIG_FRAME_POINTER
pushq $0 /* Dummy push for stack alignment. */
#endif
#endif
/* Set the vendor specific function. */
call __xen_hypercall_setfunc
/* Set ZF = 1 if AMD, Restore saved registers. */
#ifdef CONFIG_X86_32
lea xen_hypercall_amd, %ebx
cmp %eax, %ebx
pop %edi
pop %esi
pop %edx
pop %ecx
pop %ebx
pop %eax
#else
lea xen_hypercall_amd(%rip), %rbx
cmp %rax, %rbx
#ifdef CONFIG_FRAME_POINTER
pop %rax /* Dummy pop. */
#endif
pop %r8
pop %r9
pop %r10
pop %r11
pop %rsi
pop %rdi
pop %rdx
pop %rcx
pop %rax
#endif
/* Use correct hypercall function. */
jz xen_hypercall_amd
jmp xen_hypercall_intel
SYM_FUNC_END(xen_hypercall_hvm)
SYM_FUNC_START(xen_hypercall_amd)
vmmcall
RET
SYM_FUNC_END(xen_hypercall_amd)
SYM_FUNC_START(xen_hypercall_intel)
vmcall
RET
SYM_FUNC_END(xen_hypercall_intel)
.popsection
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
@ -115,7 +176,6 @@ SYM_CODE_END(xen_cpu_bringup_again)
#else #else
# define FEATURES_DOM0 0 # define FEATURES_DOM0 0
#endif #endif
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
.long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0) .long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0)
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")

View File

@ -326,4 +326,13 @@ static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
static inline void xen_smp_count_cpus(void) { } static inline void xen_smp_count_cpus(void) { }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_XEN_PV
void xen_hypercall_pv(void);
#endif
void xen_hypercall_hvm(void);
void xen_hypercall_amd(void);
void xen_hypercall_intel(void);
void xen_hypercall_setfunc(void);
void *__xen_hypercall_setfunc(void);
#endif /* XEN_OPS_H */ #endif /* XEN_OPS_H */

View File

@ -1324,10 +1324,14 @@ void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
struct blkcg *blkcg = css_to_blkcg(blkcg_css); struct blkcg *blkcg = css_to_blkcg(blkcg_css);
do { do {
struct blkcg *parent;
if (!refcount_dec_and_test(&blkcg->online_pin)) if (!refcount_dec_and_test(&blkcg->online_pin))
break; break;
parent = blkcg_parent(blkcg);
blkcg_destroy_blkgs(blkcg); blkcg_destroy_blkgs(blkcg);
blkcg = blkcg_parent(blkcg); blkcg = parent;
} while (blkcg); } while (blkcg);
} }

View File

@ -1098,7 +1098,14 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum, inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
iocg->child_active_sum); iocg->child_active_sum);
} else { } else {
inuse = clamp_t(u32, inuse, 1, active); /*
* It may be tempting to turn this into a clamp expression with
* a lower limit of 1 but active may be 0, which cannot be used
* as an upper limit in that situation. This expression allows
* active to clamp inuse unless it is 0, in which case inuse
* becomes 1.
*/
inuse = min(inuse, active) ?: 1;
} }
iocg->last_inuse = iocg->inuse; iocg->last_inuse = iocg->inuse;

View File

@ -275,15 +275,13 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
unsigned long i; unsigned long i;
mutex_lock(&q->sysfs_dir_lock); lockdep_assert_held(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done) if (!q->mq_sysfs_init_done)
goto unlock; return;
queue_for_each_hw_ctx(q, hctx, i) queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx); blk_mq_unregister_hctx(hctx);
unlock:
mutex_unlock(&q->sysfs_dir_lock);
} }
int blk_mq_sysfs_register_hctxs(struct request_queue *q) int blk_mq_sysfs_register_hctxs(struct request_queue *q)
@ -292,9 +290,10 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
unsigned long i; unsigned long i;
int ret = 0; int ret = 0;
mutex_lock(&q->sysfs_dir_lock); lockdep_assert_held(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done) if (!q->mq_sysfs_init_done)
goto unlock; return ret;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx); ret = blk_mq_register_hctx(hctx);
@ -302,8 +301,5 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
break; break;
} }
unlock:
mutex_unlock(&q->sysfs_dir_lock);
return ret; return ret;
} }

View File

@ -43,6 +43,7 @@
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
static DEFINE_MUTEX(blk_mq_cpuhp_lock);
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
static void blk_mq_request_bypass_insert(struct request *rq, static void blk_mq_request_bypass_insert(struct request *rq,
@ -3740,13 +3741,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
return 0; return 0;
} }
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{ {
if (!(hctx->flags & BLK_MQ_F_STACKING)) lockdep_assert_held(&blk_mq_cpuhp_lock);
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
!hlist_unhashed(&hctx->cpuhp_online)) {
cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
&hctx->cpuhp_online); &hctx->cpuhp_online);
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, INIT_HLIST_NODE(&hctx->cpuhp_online);
&hctx->cpuhp_dead); }
if (!hlist_unhashed(&hctx->cpuhp_dead)) {
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
&hctx->cpuhp_dead);
INIT_HLIST_NODE(&hctx->cpuhp_dead);
}
}
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
{
mutex_lock(&blk_mq_cpuhp_lock);
__blk_mq_remove_cpuhp(hctx);
mutex_unlock(&blk_mq_cpuhp_lock);
}
static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
{
lockdep_assert_held(&blk_mq_cpuhp_lock);
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
hlist_unhashed(&hctx->cpuhp_online))
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
&hctx->cpuhp_online);
if (hlist_unhashed(&hctx->cpuhp_dead))
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
&hctx->cpuhp_dead);
}
static void __blk_mq_remove_cpuhp_list(struct list_head *head)
{
struct blk_mq_hw_ctx *hctx;
lockdep_assert_held(&blk_mq_cpuhp_lock);
list_for_each_entry(hctx, head, hctx_list)
__blk_mq_remove_cpuhp(hctx);
}
/*
* Unregister cpuhp callbacks from exited hw queues
*
* Safe to call if this `request_queue` is live
*/
static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
{
LIST_HEAD(hctx_list);
spin_lock(&q->unused_hctx_lock);
list_splice_init(&q->unused_hctx_list, &hctx_list);
spin_unlock(&q->unused_hctx_lock);
mutex_lock(&blk_mq_cpuhp_lock);
__blk_mq_remove_cpuhp_list(&hctx_list);
mutex_unlock(&blk_mq_cpuhp_lock);
spin_lock(&q->unused_hctx_lock);
list_splice(&hctx_list, &q->unused_hctx_list);
spin_unlock(&q->unused_hctx_lock);
}
/*
* Register cpuhp callbacks from all hw queues
*
* Safe to call if this `request_queue` is live
*/
static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
mutex_lock(&blk_mq_cpuhp_lock);
queue_for_each_hw_ctx(q, hctx, i)
__blk_mq_add_cpuhp(hctx);
mutex_unlock(&blk_mq_cpuhp_lock);
} }
/* /*
@ -3797,8 +3876,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx) if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx); set->ops->exit_hctx(hctx, hctx_idx);
blk_mq_remove_cpuhp(hctx);
xa_erase(&q->hctx_table, hctx_idx); xa_erase(&q->hctx_table, hctx_idx);
spin_lock(&q->unused_hctx_lock); spin_lock(&q->unused_hctx_lock);
@ -3815,6 +3892,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue) if (i == nr_queue)
break; break;
blk_mq_remove_cpuhp(hctx);
blk_mq_exit_hctx(q, set, hctx, i); blk_mq_exit_hctx(q, set, hctx, i);
} }
} }
@ -3878,6 +3956,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
spin_lock_init(&hctx->lock); spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch); INIT_LIST_HEAD(&hctx->dispatch);
INIT_HLIST_NODE(&hctx->cpuhp_dead);
INIT_HLIST_NODE(&hctx->cpuhp_online);
hctx->queue = q; hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
@ -4382,7 +4462,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
unsigned long i, j; unsigned long i, j;
/* protect against switching io scheduler */ /* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock); lockdep_assert_held(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
int old_node; int old_node;
int node = blk_mq_get_hctx_node(set, i); int node = blk_mq_get_hctx_node(set, i);
@ -4415,7 +4496,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
xa_for_each_start(&q->hctx_table, j, hctx, j) xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j); blk_mq_exit_hctx(q, set, hctx, j);
mutex_unlock(&q->sysfs_lock);
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
/* register cpuhp for new initialized hctxs */
blk_mq_add_hw_queues_cpuhp(q);
} }
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
@ -4441,10 +4527,14 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
xa_init(&q->hctx_table); xa_init(&q->hctx_table);
mutex_lock(&q->sysfs_lock);
blk_mq_realloc_hw_ctxs(set, q); blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues) if (!q->nr_hw_queues)
goto err_hctxs; goto err_hctxs;
mutex_unlock(&q->sysfs_lock);
INIT_WORK(&q->timeout_work, blk_mq_timeout_work); INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
@ -4463,6 +4553,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return 0; return 0;
err_hctxs: err_hctxs:
mutex_unlock(&q->sysfs_lock);
blk_mq_release(q); blk_mq_release(q);
err_exit: err_exit:
q->mq_ops = NULL; q->mq_ops = NULL;
@ -4843,12 +4934,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
return false; return false;
/* q->elevator needs protection from ->sysfs_lock */ /* q->elevator needs protection from ->sysfs_lock */
mutex_lock(&q->sysfs_lock); lockdep_assert_held(&q->sysfs_lock);
/* the check has to be done with holding sysfs_lock */ /* the check has to be done with holding sysfs_lock */
if (!q->elevator) { if (!q->elevator) {
kfree(qe); kfree(qe);
goto unlock; goto out;
} }
INIT_LIST_HEAD(&qe->node); INIT_LIST_HEAD(&qe->node);
@ -4858,9 +4949,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
__elevator_get(qe->type); __elevator_get(qe->type);
list_add(&qe->node, head); list_add(&qe->node, head);
elevator_disable(q); elevator_disable(q);
unlock: out:
mutex_unlock(&q->sysfs_lock);
return true; return true;
} }
@ -4889,11 +4978,9 @@ static void blk_mq_elv_switch_back(struct list_head *head,
list_del(&qe->node); list_del(&qe->node);
kfree(qe); kfree(qe);
mutex_lock(&q->sysfs_lock);
elevator_switch(q, t); elevator_switch(q, t);
/* drop the reference acquired in blk_mq_elv_switch_none */ /* drop the reference acquired in blk_mq_elv_switch_none */
elevator_put(t); elevator_put(t);
mutex_unlock(&q->sysfs_lock);
} }
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
@ -4913,8 +5000,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
return; return;
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list) {
mutex_lock(&q->sysfs_dir_lock);
mutex_lock(&q->sysfs_lock);
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
}
/* /*
* Switch IO scheduler to 'none', cleaning up the data associated * Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done * with the previous scheduler. We will switch back once we are done
@ -4970,8 +5060,11 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_elv_switch_back(&head, q); blk_mq_elv_switch_back(&head, q);
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
mutex_unlock(&q->sysfs_lock);
mutex_unlock(&q->sysfs_dir_lock);
}
/* Free the excess tags when nr_hw_queues shrink. */ /* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)

View File

@ -690,11 +690,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
return res; return res;
} }
blk_mq_freeze_queue(q);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
blk_mq_freeze_queue(q);
res = entry->store(disk, page, length); res = entry->store(disk, page, length);
mutex_unlock(&q->sysfs_lock);
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
mutex_unlock(&q->sysfs_lock);
return res; return res;
} }

View File

@ -18,7 +18,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/atomic.h> #include <linux/refcount.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include "blk.h" #include "blk.h"
@ -41,7 +41,6 @@ static const char *const zone_cond_name[] = {
/* /*
* Per-zone write plug. * Per-zone write plug.
* @node: hlist_node structure for managing the plug using a hash table. * @node: hlist_node structure for managing the plug using a hash table.
* @link: To list the plug in the zone write plug error list of the disk.
* @ref: Zone write plug reference counter. A zone write plug reference is * @ref: Zone write plug reference counter. A zone write plug reference is
* always at least 1 when the plug is hashed in the disk plug hash table. * always at least 1 when the plug is hashed in the disk plug hash table.
* The reference is incremented whenever a new BIO needing plugging is * The reference is incremented whenever a new BIO needing plugging is
@ -63,8 +62,7 @@ static const char *const zone_cond_name[] = {
*/ */
struct blk_zone_wplug { struct blk_zone_wplug {
struct hlist_node node; struct hlist_node node;
struct list_head link; refcount_t ref;
atomic_t ref;
spinlock_t lock; spinlock_t lock;
unsigned int flags; unsigned int flags;
unsigned int zone_no; unsigned int zone_no;
@ -80,8 +78,8 @@ struct blk_zone_wplug {
* - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged, * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged,
* that is, that write BIOs are being throttled due to a write BIO already * that is, that write BIOs are being throttled due to a write BIO already
* being executed or the zone write plug bio list is not empty. * being executed or the zone write plug bio list is not empty.
* - BLK_ZONE_WPLUG_ERROR: Indicates that a write error happened which will be * - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone
* recovered with a report zone to update the zone write pointer offset. * write pointer offset and need to update it.
* - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed * - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed
* from the disk hash table and that the initial reference to the zone * from the disk hash table and that the initial reference to the zone
* write plug set when the plug was first added to the hash table has been * write plug set when the plug was first added to the hash table has been
@ -91,11 +89,9 @@ struct blk_zone_wplug {
* freed once all remaining references from BIOs or functions are dropped. * freed once all remaining references from BIOs or functions are dropped.
*/ */
#define BLK_ZONE_WPLUG_PLUGGED (1U << 0) #define BLK_ZONE_WPLUG_PLUGGED (1U << 0)
#define BLK_ZONE_WPLUG_ERROR (1U << 1) #define BLK_ZONE_WPLUG_NEED_WP_UPDATE (1U << 1)
#define BLK_ZONE_WPLUG_UNHASHED (1U << 2) #define BLK_ZONE_WPLUG_UNHASHED (1U << 2)
#define BLK_ZONE_WPLUG_BUSY (BLK_ZONE_WPLUG_PLUGGED | BLK_ZONE_WPLUG_ERROR)
/** /**
* blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
* @zone_cond: BLK_ZONE_COND_XXX. * @zone_cond: BLK_ZONE_COND_XXX.
@ -115,6 +111,30 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
} }
EXPORT_SYMBOL_GPL(blk_zone_cond_str); EXPORT_SYMBOL_GPL(blk_zone_cond_str);
struct disk_report_zones_cb_args {
struct gendisk *disk;
report_zones_cb user_cb;
void *user_data;
};
static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
struct blk_zone *zone);
static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
struct disk_report_zones_cb_args *args = data;
struct gendisk *disk = args->disk;
if (disk->zone_wplugs_hash)
disk_zone_wplug_sync_wp_offset(disk, zone);
if (!args->user_cb)
return 0;
return args->user_cb(zone, idx, args->user_data);
}
/** /**
* blkdev_report_zones - Get zones information * blkdev_report_zones - Get zones information
* @bdev: Target block device * @bdev: Target block device
@ -139,6 +159,11 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
{ {
struct gendisk *disk = bdev->bd_disk; struct gendisk *disk = bdev->bd_disk;
sector_t capacity = get_capacity(disk); sector_t capacity = get_capacity(disk);
struct disk_report_zones_cb_args args = {
.disk = disk,
.user_cb = cb,
.user_data = data,
};
if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -146,7 +171,8 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
if (!nr_zones || sector >= capacity) if (!nr_zones || sector >= capacity)
return 0; return 0;
return disk->fops->report_zones(disk, sector, nr_zones, cb, data); return disk->fops->report_zones(disk, sector, nr_zones,
disk_report_zones_cb, &args);
} }
EXPORT_SYMBOL_GPL(blkdev_report_zones); EXPORT_SYMBOL_GPL(blkdev_report_zones);
@ -417,7 +443,7 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk,
hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) { hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) {
if (zwplug->zone_no == zno && if (zwplug->zone_no == zno &&
atomic_inc_not_zero(&zwplug->ref)) { refcount_inc_not_zero(&zwplug->ref)) {
rcu_read_unlock(); rcu_read_unlock();
return zwplug; return zwplug;
} }
@ -438,9 +464,9 @@ static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head)
static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug) static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug)
{ {
if (atomic_dec_and_test(&zwplug->ref)) { if (refcount_dec_and_test(&zwplug->ref)) {
WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list)); WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list));
WARN_ON_ONCE(!list_empty(&zwplug->link)); WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)); WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED));
call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu); call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu);
@ -454,8 +480,8 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)
return false; return false;
/* If the zone write plug is still busy, it cannot be removed. */ /* If the zone write plug is still plugged, it cannot be removed. */
if (zwplug->flags & BLK_ZONE_WPLUG_BUSY) if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)
return false; return false;
/* /*
@ -469,7 +495,7 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
* taken when the plug was allocated and another reference taken by the * taken when the plug was allocated and another reference taken by the
* caller context). * caller context).
*/ */
if (atomic_read(&zwplug->ref) > 2) if (refcount_read(&zwplug->ref) > 2)
return false; return false;
/* We can remove zone write plugs for zones that are empty or full. */ /* We can remove zone write plugs for zones that are empty or full. */
@ -538,12 +564,11 @@ static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk,
return NULL; return NULL;
INIT_HLIST_NODE(&zwplug->node); INIT_HLIST_NODE(&zwplug->node);
INIT_LIST_HEAD(&zwplug->link); refcount_set(&zwplug->ref, 2);
atomic_set(&zwplug->ref, 2);
spin_lock_init(&zwplug->lock); spin_lock_init(&zwplug->lock);
zwplug->flags = 0; zwplug->flags = 0;
zwplug->zone_no = zno; zwplug->zone_no = zno;
zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1); zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector);
bio_list_init(&zwplug->bio_list); bio_list_init(&zwplug->bio_list);
INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work); INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work);
zwplug->disk = disk; zwplug->disk = disk;
@ -587,115 +612,22 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
} }
/* /*
* Abort (fail) all plugged BIOs of a zone write plug that are not aligned * Set a zone write plug write pointer offset to the specified value.
* with the assumed write pointer location of the zone when the BIO will * This aborts all plugged BIOs, which is fine as this function is called for
* be unplugged. * a zone reset operation, a zone finish operation or if the zone needs a wp
*/ * update from a report zone after a write error.
static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
unsigned int wp_offset = zwplug->wp_offset;
struct bio_list bl = BIO_EMPTY_LIST;
struct bio *bio;
while ((bio = bio_list_pop(&zwplug->bio_list))) {
if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
(bio_op(bio) != REQ_OP_ZONE_APPEND &&
bio_offset_from_zone_start(bio) != wp_offset)) {
blk_zone_wplug_bio_io_error(zwplug, bio);
continue;
}
wp_offset += bio_sectors(bio);
bio_list_add(&bl, bio);
}
bio_list_merge(&zwplug->bio_list, &bl);
}
static inline void disk_zone_wplug_set_error(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
unsigned long flags;
if (zwplug->flags & BLK_ZONE_WPLUG_ERROR)
return;
/*
* At this point, we already have a reference on the zone write plug.
* However, since we are going to add the plug to the disk zone write
* plugs work list, increase its reference count. This reference will
* be dropped in disk_zone_wplugs_work() once the error state is
* handled, or in disk_zone_wplug_clear_error() if the zone is reset or
* finished.
*/
zwplug->flags |= BLK_ZONE_WPLUG_ERROR;
atomic_inc(&zwplug->ref);
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
}
static inline void disk_zone_wplug_clear_error(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
unsigned long flags;
if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
return;
/*
* We are racing with the error handling work which drops the reference
* on the zone write plug after handling the error state. So remove the
* plug from the error list and drop its reference count only if the
* error handling has not yet started, that is, if the zone write plug
* is still listed.
*/
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
if (!list_empty(&zwplug->link)) {
list_del_init(&zwplug->link);
zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
disk_put_zone_wplug(zwplug);
}
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
}
/*
* Set a zone write plug write pointer offset to either 0 (zone reset case)
* or to the zone size (zone finish case). This aborts all plugged BIOs, which
* is fine to do as doing a zone reset or zone finish while writes are in-flight
* is a mistake from the user which will most likely cause all plugged BIOs to
* fail anyway.
*/ */
static void disk_zone_wplug_set_wp_offset(struct gendisk *disk, static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
struct blk_zone_wplug *zwplug, struct blk_zone_wplug *zwplug,
unsigned int wp_offset) unsigned int wp_offset)
{ {
unsigned long flags; lockdep_assert_held(&zwplug->lock);
spin_lock_irqsave(&zwplug->lock, flags);
/*
* Make sure that a BIO completion or another zone reset or finish
* operation has not already removed the plug from the hash table.
*/
if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) {
spin_unlock_irqrestore(&zwplug->lock, flags);
return;
}
/* Update the zone write pointer and abort all plugged BIOs. */ /* Update the zone write pointer and abort all plugged BIOs. */
zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE;
zwplug->wp_offset = wp_offset; zwplug->wp_offset = wp_offset;
disk_zone_wplug_abort(zwplug); disk_zone_wplug_abort(zwplug);
/*
* Updating the write pointer offset puts back the zone
* in a good state. So clear the error flag and decrement the
* error count if we were in error state.
*/
disk_zone_wplug_clear_error(disk, zwplug);
/* /*
* The zone write plug now has no BIO plugged: remove it from the * The zone write plug now has no BIO plugged: remove it from the
* hash table so that it cannot be seen. The plug will be freed * hash table so that it cannot be seen. The plug will be freed
@ -703,8 +635,58 @@ static void disk_zone_wplug_set_wp_offset(struct gendisk *disk,
*/ */
if (disk_should_remove_zone_wplug(disk, zwplug)) if (disk_should_remove_zone_wplug(disk, zwplug))
disk_remove_zone_wplug(disk, zwplug); disk_remove_zone_wplug(disk, zwplug);
}
static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
{
switch (zone->cond) {
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
case BLK_ZONE_COND_CLOSED:
return zone->wp - zone->start;
case BLK_ZONE_COND_FULL:
return zone->len;
case BLK_ZONE_COND_EMPTY:
return 0;
case BLK_ZONE_COND_NOT_WP:
case BLK_ZONE_COND_OFFLINE:
case BLK_ZONE_COND_READONLY:
default:
/*
* Conventional, offline and read-only zones do not have a valid
* write pointer.
*/
return UINT_MAX;
}
}
static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk,
struct blk_zone *zone)
{
struct blk_zone_wplug *zwplug;
unsigned long flags;
zwplug = disk_get_zone_wplug(disk, zone->start);
if (!zwplug)
return;
spin_lock_irqsave(&zwplug->lock, flags);
if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
disk_zone_wplug_set_wp_offset(disk, zwplug,
blk_zone_wp_offset(zone));
spin_unlock_irqrestore(&zwplug->lock, flags); spin_unlock_irqrestore(&zwplug->lock, flags);
disk_put_zone_wplug(zwplug);
}
static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector)
{
struct disk_report_zones_cb_args args = {
.disk = disk,
};
return disk->fops->report_zones(disk, sector, 1,
disk_report_zones_cb, &args);
} }
static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio, static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
@ -713,6 +695,7 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_bdev->bd_disk;
sector_t sector = bio->bi_iter.bi_sector; sector_t sector = bio->bi_iter.bi_sector;
struct blk_zone_wplug *zwplug; struct blk_zone_wplug *zwplug;
unsigned long flags;
/* Conventional zones cannot be reset nor finished. */ /* Conventional zones cannot be reset nor finished. */
if (disk_zone_is_conv(disk, sector)) { if (disk_zone_is_conv(disk, sector)) {
@ -720,6 +703,15 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
return true; return true;
} }
/*
* No-wait reset or finish BIOs do not make much sense as the callers
* issue these as blocking operations in most cases. To avoid issues
* the BIO execution potentially failing with BLK_STS_AGAIN, warn about
* REQ_NOWAIT being set and ignore that flag.
*/
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT))
bio->bi_opf &= ~REQ_NOWAIT;
/* /*
* If we have a zone write plug, set its write pointer offset to 0 * If we have a zone write plug, set its write pointer offset to 0
* (reset case) or to the zone size (finish case). This will abort all * (reset case) or to the zone size (finish case). This will abort all
@ -729,7 +721,9 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio,
*/ */
zwplug = disk_get_zone_wplug(disk, sector); zwplug = disk_get_zone_wplug(disk, sector);
if (zwplug) { if (zwplug) {
spin_lock_irqsave(&zwplug->lock, flags);
disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset); disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset);
spin_unlock_irqrestore(&zwplug->lock, flags);
disk_put_zone_wplug(zwplug); disk_put_zone_wplug(zwplug);
} }
@ -740,6 +734,7 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
{ {
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_bdev->bd_disk;
struct blk_zone_wplug *zwplug; struct blk_zone_wplug *zwplug;
unsigned long flags;
sector_t sector; sector_t sector;
/* /*
@ -751,7 +746,9 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
sector += disk->queue->limits.chunk_sectors) { sector += disk->queue->limits.chunk_sectors) {
zwplug = disk_get_zone_wplug(disk, sector); zwplug = disk_get_zone_wplug(disk, sector);
if (zwplug) { if (zwplug) {
spin_lock_irqsave(&zwplug->lock, flags);
disk_zone_wplug_set_wp_offset(disk, zwplug, 0); disk_zone_wplug_set_wp_offset(disk, zwplug, 0);
spin_unlock_irqrestore(&zwplug->lock, flags);
disk_put_zone_wplug(zwplug); disk_put_zone_wplug(zwplug);
} }
} }
@ -759,9 +756,25 @@ static bool blk_zone_wplug_handle_reset_all(struct bio *bio)
return false; return false;
} }
static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug, static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
struct bio *bio, unsigned int nr_segs) struct blk_zone_wplug *zwplug)
{ {
/*
* Take a reference on the zone write plug and schedule the submission
* of the next plugged BIO. blk_zone_wplug_bio_work() will release the
* reference we take here.
*/
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
refcount_inc(&zwplug->ref);
queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
}
static inline void disk_zone_wplug_add_bio(struct gendisk *disk,
struct blk_zone_wplug *zwplug,
struct bio *bio, unsigned int nr_segs)
{
bool schedule_bio_work = false;
/* /*
* Grab an extra reference on the BIO request queue usage counter. * Grab an extra reference on the BIO request queue usage counter.
* This reference will be reused to submit a request for the BIO for * This reference will be reused to submit a request for the BIO for
@ -777,6 +790,16 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
*/ */
bio_clear_polled(bio); bio_clear_polled(bio);
/*
* REQ_NOWAIT BIOs are always handled using the zone write plug BIO
* work, which can block. So clear the REQ_NOWAIT flag and schedule the
* work if this is the first BIO we are plugging.
*/
if (bio->bi_opf & REQ_NOWAIT) {
schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED);
bio->bi_opf &= ~REQ_NOWAIT;
}
/* /*
* Reuse the poll cookie field to store the number of segments when * Reuse the poll cookie field to store the number of segments when
* split to the hardware limits. * split to the hardware limits.
@ -790,6 +813,11 @@ static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug,
* at the tail of the list to preserve the sequential write order. * at the tail of the list to preserve the sequential write order.
*/ */
bio_list_add(&zwplug->bio_list, bio); bio_list_add(&zwplug->bio_list, bio);
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
if (schedule_bio_work)
disk_zone_wplug_schedule_bio_work(disk, zwplug);
} }
/* /*
@ -902,13 +930,23 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
{ {
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_bdev->bd_disk;
/*
* If we lost track of the zone write pointer due to a write error,
* the user must either execute a report zones, reset the zone or finish
* the to recover a reliable write pointer position. Fail BIOs if the
* user did not do that as we cannot handle emulated zone append
* otherwise.
*/
if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE)
return false;
/* /*
* Check that the user is not attempting to write to a full zone. * Check that the user is not attempting to write to a full zone.
* We know such BIO will fail, and that would potentially overflow our * We know such BIO will fail, and that would potentially overflow our
* write pointer offset beyond the end of the zone. * write pointer offset beyond the end of the zone.
*/ */
if (disk_zone_wplug_is_full(disk, zwplug)) if (disk_zone_wplug_is_full(disk, zwplug))
goto err; return false;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) { if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
/* /*
@ -927,24 +965,18 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND); bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND);
} else { } else {
/* /*
* Check for non-sequential writes early because we avoid a * Check for non-sequential writes early as we know that BIOs
* whole lot of error handling trouble if we don't send it off * with a start sector not unaligned to the zone write pointer
* to the driver. * will fail.
*/ */
if (bio_offset_from_zone_start(bio) != zwplug->wp_offset) if (bio_offset_from_zone_start(bio) != zwplug->wp_offset)
goto err; return false;
} }
/* Advance the zone write pointer offset. */ /* Advance the zone write pointer offset. */
zwplug->wp_offset += bio_sectors(bio); zwplug->wp_offset += bio_sectors(bio);
return true; return true;
err:
/* We detected an invalid write BIO: schedule error recovery. */
disk_zone_wplug_set_error(disk, zwplug);
kblockd_schedule_work(&disk->zone_wplugs_work);
return false;
} }
static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
@ -983,7 +1015,10 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags); zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags);
if (!zwplug) { if (!zwplug) {
bio_io_error(bio); if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
else
bio_io_error(bio);
return true; return true;
} }
@ -991,18 +1026,20 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING);
/* /*
* If the zone is already plugged or has a pending error, add the BIO * If the zone is already plugged, add the BIO to the plug BIO list.
* to the plug BIO list. Otherwise, plug and let the BIO execute. * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a
* BLK_STS_AGAIN failure if we let the BIO execute.
* Otherwise, plug and let the BIO execute.
*/ */
if (zwplug->flags & BLK_ZONE_WPLUG_BUSY) if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) ||
(bio->bi_opf & REQ_NOWAIT))
goto plug; goto plug;
/* if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
* If an error is detected when preparing the BIO, add it to the BIO spin_unlock_irqrestore(&zwplug->lock, flags);
* list so that error recovery can deal with it. bio_io_error(bio);
*/ return true;
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) }
goto plug;
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED;
@ -1011,8 +1048,7 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs)
return false; return false;
plug: plug:
zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs);
blk_zone_wplug_add_bio(zwplug, bio, nr_segs);
spin_unlock_irqrestore(&zwplug->lock, flags); spin_unlock_irqrestore(&zwplug->lock, flags);
@ -1096,19 +1132,6 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
} }
EXPORT_SYMBOL_GPL(blk_zone_plug_bio); EXPORT_SYMBOL_GPL(blk_zone_plug_bio);
static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
/*
* Take a reference on the zone write plug and schedule the submission
* of the next plugged BIO. blk_zone_wplug_bio_work() will release the
* reference we take here.
*/
WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED));
atomic_inc(&zwplug->ref);
queue_work(disk->zone_wplugs_wq, &zwplug->bio_work);
}
static void disk_zone_wplug_unplug_bio(struct gendisk *disk, static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
struct blk_zone_wplug *zwplug) struct blk_zone_wplug *zwplug)
{ {
@ -1116,16 +1139,6 @@ static void disk_zone_wplug_unplug_bio(struct gendisk *disk,
spin_lock_irqsave(&zwplug->lock, flags); spin_lock_irqsave(&zwplug->lock, flags);
/*
* If we had an error, schedule error recovery. The recovery work
* will restart submission of plugged BIOs.
*/
if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) {
spin_unlock_irqrestore(&zwplug->lock, flags);
kblockd_schedule_work(&disk->zone_wplugs_work);
return;
}
/* Schedule submission of the next plugged BIO if we have one. */ /* Schedule submission of the next plugged BIO if we have one. */
if (!bio_list_empty(&zwplug->bio_list)) { if (!bio_list_empty(&zwplug->bio_list)) {
disk_zone_wplug_schedule_bio_work(disk, zwplug); disk_zone_wplug_schedule_bio_work(disk, zwplug);
@ -1168,12 +1181,13 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
} }
/* /*
* If the BIO failed, mark the plug as having an error to trigger * If the BIO failed, abort all plugged BIOs and mark the plug as
* recovery. * needing a write pointer update.
*/ */
if (bio->bi_status != BLK_STS_OK) { if (bio->bi_status != BLK_STS_OK) {
spin_lock_irqsave(&zwplug->lock, flags); spin_lock_irqsave(&zwplug->lock, flags);
disk_zone_wplug_set_error(disk, zwplug); disk_zone_wplug_abort(zwplug);
zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE;
spin_unlock_irqrestore(&zwplug->lock, flags); spin_unlock_irqrestore(&zwplug->lock, flags);
} }
@ -1229,6 +1243,7 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
*/ */
spin_lock_irqsave(&zwplug->lock, flags); spin_lock_irqsave(&zwplug->lock, flags);
again:
bio = bio_list_pop(&zwplug->bio_list); bio = bio_list_pop(&zwplug->bio_list);
if (!bio) { if (!bio) {
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
@ -1237,10 +1252,8 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
} }
if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
/* Error recovery will decide what to do with the BIO. */ blk_zone_wplug_bio_io_error(zwplug, bio);
bio_list_add_head(&zwplug->bio_list, bio); goto again;
spin_unlock_irqrestore(&zwplug->lock, flags);
goto put_zwplug;
} }
spin_unlock_irqrestore(&zwplug->lock, flags); spin_unlock_irqrestore(&zwplug->lock, flags);
@ -1262,120 +1275,6 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
disk_put_zone_wplug(zwplug); disk_put_zone_wplug(zwplug);
} }
static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
{
switch (zone->cond) {
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
case BLK_ZONE_COND_CLOSED:
return zone->wp - zone->start;
case BLK_ZONE_COND_FULL:
return zone->len;
case BLK_ZONE_COND_EMPTY:
return 0;
case BLK_ZONE_COND_NOT_WP:
case BLK_ZONE_COND_OFFLINE:
case BLK_ZONE_COND_READONLY:
default:
/*
* Conventional, offline and read-only zones do not have a valid
* write pointer.
*/
return UINT_MAX;
}
}
static int blk_zone_wplug_report_zone_cb(struct blk_zone *zone,
unsigned int idx, void *data)
{
struct blk_zone *zonep = data;
*zonep = *zone;
return 0;
}
static void disk_zone_wplug_handle_error(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
sector_t zone_start_sector =
bdev_zone_sectors(disk->part0) * zwplug->zone_no;
unsigned int noio_flag;
struct blk_zone zone;
unsigned long flags;
int ret;
/* Get the current zone information from the device. */
noio_flag = memalloc_noio_save();
ret = disk->fops->report_zones(disk, zone_start_sector, 1,
blk_zone_wplug_report_zone_cb, &zone);
memalloc_noio_restore(noio_flag);
spin_lock_irqsave(&zwplug->lock, flags);
/*
* A zone reset or finish may have cleared the error already. In such
* case, do nothing as the report zones may have seen the "old" write
* pointer value before the reset/finish operation completed.
*/
if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR))
goto unlock;
zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR;
if (ret != 1) {
/*
* We failed to get the zone information, meaning that something
* is likely really wrong with the device. Abort all remaining
* plugged BIOs as otherwise we could endup waiting forever on
* plugged BIOs to complete if there is a queue freeze on-going.
*/
disk_zone_wplug_abort(zwplug);
goto unplug;
}
/* Update the zone write pointer offset. */
zwplug->wp_offset = blk_zone_wp_offset(&zone);
disk_zone_wplug_abort_unaligned(disk, zwplug);
/* Restart BIO submission if we still have any BIO left. */
if (!bio_list_empty(&zwplug->bio_list)) {
disk_zone_wplug_schedule_bio_work(disk, zwplug);
goto unlock;
}
unplug:
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
if (disk_should_remove_zone_wplug(disk, zwplug))
disk_remove_zone_wplug(disk, zwplug);
unlock:
spin_unlock_irqrestore(&zwplug->lock, flags);
}
static void disk_zone_wplugs_work(struct work_struct *work)
{
struct gendisk *disk =
container_of(work, struct gendisk, zone_wplugs_work);
struct blk_zone_wplug *zwplug;
unsigned long flags;
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
while (!list_empty(&disk->zone_wplugs_err_list)) {
zwplug = list_first_entry(&disk->zone_wplugs_err_list,
struct blk_zone_wplug, link);
list_del_init(&zwplug->link);
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
disk_zone_wplug_handle_error(disk, zwplug);
disk_put_zone_wplug(zwplug);
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
}
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
}
static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
{ {
return 1U << disk->zone_wplugs_hash_bits; return 1U << disk->zone_wplugs_hash_bits;
@ -1384,8 +1283,6 @@ static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk)
void disk_init_zone_resources(struct gendisk *disk) void disk_init_zone_resources(struct gendisk *disk)
{ {
spin_lock_init(&disk->zone_wplugs_lock); spin_lock_init(&disk->zone_wplugs_lock);
INIT_LIST_HEAD(&disk->zone_wplugs_err_list);
INIT_WORK(&disk->zone_wplugs_work, disk_zone_wplugs_work);
} }
/* /*
@ -1450,7 +1347,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
while (!hlist_empty(&disk->zone_wplugs_hash[i])) { while (!hlist_empty(&disk->zone_wplugs_hash[i])) {
zwplug = hlist_entry(disk->zone_wplugs_hash[i].first, zwplug = hlist_entry(disk->zone_wplugs_hash[i].first,
struct blk_zone_wplug, node); struct blk_zone_wplug, node);
atomic_inc(&zwplug->ref); refcount_inc(&zwplug->ref);
disk_remove_zone_wplug(disk, zwplug); disk_remove_zone_wplug(disk, zwplug);
disk_put_zone_wplug(zwplug); disk_put_zone_wplug(zwplug);
} }
@ -1484,8 +1381,6 @@ void disk_free_zone_resources(struct gendisk *disk)
if (!disk->zone_wplugs_pool) if (!disk->zone_wplugs_pool)
return; return;
cancel_work_sync(&disk->zone_wplugs_work);
if (disk->zone_wplugs_wq) { if (disk->zone_wplugs_wq) {
destroy_workqueue(disk->zone_wplugs_wq); destroy_workqueue(disk->zone_wplugs_wq);
disk->zone_wplugs_wq = NULL; disk->zone_wplugs_wq = NULL;
@ -1682,6 +1577,8 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
if (!disk->zone_wplugs_hash) if (!disk->zone_wplugs_hash)
return 0; return 0;
disk_zone_wplug_sync_wp_offset(disk, zone);
wp_offset = blk_zone_wp_offset(zone); wp_offset = blk_zone_wp_offset(zone);
if (!wp_offset || wp_offset >= zone->capacity) if (!wp_offset || wp_offset >= zone->capacity)
return 0; return 0;
@ -1818,6 +1715,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
memalloc_noio_restore(noio_flag); memalloc_noio_restore(noio_flag);
return ret; return ret;
} }
ret = disk->fops->report_zones(disk, 0, UINT_MAX, ret = disk->fops->report_zones(disk, 0, UINT_MAX,
blk_revalidate_zone_cb, &args); blk_revalidate_zone_cb, &args);
if (!ret) { if (!ret) {
@ -1854,6 +1752,48 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
} }
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
/**
* blk_zone_issue_zeroout - zero-fill a block range in a zone
* @bdev: blockdev to write
* @sector: start sector
* @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc)
*
* Description:
* Zero-fill a block range in a zone (@sector must be equal to the zone write
* pointer), handling potential errors due to the (initially unknown) lack of
* hardware offload (See blkdev_issue_zeroout()).
*/
int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
return -EIO;
ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
BLKDEV_ZERO_NOFALLBACK);
if (ret != -EOPNOTSUPP)
return ret;
/*
* The failed call to blkdev_issue_zeroout() advanced the zone write
* pointer. Undo this using a report zone to update the zone write
* pointer to the correct current value.
*/
ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector);
if (ret != 1)
return ret < 0 ? ret : -EIO;
/*
* Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a
* regular write with zero-pages.
*/
return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0);
}
EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout);
#ifdef CONFIG_BLK_DEBUG_FS #ifdef CONFIG_BLK_DEBUG_FS
int queue_zone_wplugs_show(void *data, struct seq_file *m) int queue_zone_wplugs_show(void *data, struct seq_file *m)
@ -1876,7 +1816,7 @@ int queue_zone_wplugs_show(void *data, struct seq_file *m)
spin_lock_irqsave(&zwplug->lock, flags); spin_lock_irqsave(&zwplug->lock, flags);
zwp_zone_no = zwplug->zone_no; zwp_zone_no = zwplug->zone_no;
zwp_flags = zwplug->flags; zwp_flags = zwplug->flags;
zwp_ref = atomic_read(&zwplug->ref); zwp_ref = refcount_read(&zwplug->ref);
zwp_wp_offset = zwplug->wp_offset; zwp_wp_offset = zwplug->wp_offset;
zwp_bio_list_size = bio_list_size(&zwplug->bio_list); zwp_bio_list_size = bio_list_size(&zwplug->bio_list);
spin_unlock_irqrestore(&zwplug->lock, flags); spin_unlock_irqrestore(&zwplug->lock, flags);

View File

@ -232,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Now we can delete the handler object */ /* Now we can delete the handler object */
acpi_os_release_mutex(handler_obj->address_space.
context_mutex);
acpi_ut_remove_reference(handler_obj); acpi_ut_remove_reference(handler_obj);
goto unlock_and_exit; goto unlock_and_exit;
} }

View File

@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd_rc) if (cmd_rc)
*cmd_rc = -EINVAL; *cmd_rc = -EINVAL;
if (cmd == ND_CMD_CALL) if (cmd == ND_CMD_CALL) {
if (!buf || buf_len < sizeof(*call_pkg))
return -EINVAL;
call_pkg = buf; call_pkg = buf;
}
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
if (func < 0) if (func < 0)
return func; return func;

View File

@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
switch (addr->resource_type) { switch (addr->resource_type) {
case ACPI_MEMORY_RANGE: case ACPI_MEMORY_RANGE:
acpi_dev_memresource_flags(res, len, wp); acpi_dev_memresource_flags(res, len, wp);
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
res->flags |= IORESOURCE_PREFETCH;
break; break;
case ACPI_IO_RANGE: case ACPI_IO_RANGE:
acpi_dev_ioresource_flags(res, len, iodec, acpi_dev_ioresource_flags(res, len, iodec,
@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
if (addr->producer_consumer == ACPI_PRODUCER) if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW; res->flags |= IORESOURCE_WINDOW;
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
res->flags |= IORESOURCE_PREFETCH;
return !(res->flags & IORESOURCE_DISABLED); return !(res->flags & IORESOURCE_DISABLED);
} }

View File

@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
phy_nodes[phy] = phy_data.np; phy_nodes[phy] = phy_data.np;
cphy_base[phy] = of_iomap(phy_nodes[phy], 0); cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
if (cphy_base[phy] == NULL) { if (cphy_base[phy] == NULL) {
of_node_put(phy_data.np);
return 0; return 0;
} }
phy_count += 1; phy_count += 1;

View File

@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
{ {
struct btmtk_data *data = hci_get_priv(hdev); struct btmtk_data *data = hci_get_priv(hdev);
int err; int err;
bool complete = false;
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) { if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
kfree_skb(skb); kfree_skb(skb);
@ -416,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
fallthrough; fallthrough;
case HCI_DEVCOREDUMP_ACTIVE: case HCI_DEVCOREDUMP_ACTIVE:
default: default:
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
skb->len > MTK_COREDUMP_END_LEN)
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
complete = true;
err = hci_devcd_append(hdev, skb); err = hci_devcd_append(hdev, skb);
if (err < 0) if (err < 0)
break; break;
data->cd_info.cnt++; data->cd_info.cnt++;
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */ if (complete) {
if (data->cd_info.cnt > MTK_COREDUMP_NUM && bt_dev_info(hdev, "Mediatek coredump end");
skb->len > MTK_COREDUMP_END_LEN) hci_devcd_complete(hdev);
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN], }
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
bt_dev_info(hdev, "Mediatek coredump end");
hci_devcd_complete(hdev);
}
break; break;
} }

View File

@ -92,6 +92,7 @@ static const u32 slic_base[] = { 100000000, 3125000 };
static const u32 npu_base[] = { 333000000, 400000000, 500000000 }; static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
/* EN7581 */ /* EN7581 */
static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 }; static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
static const u32 bus7581_base[] = { 600000000, 540000000 };
static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 }; static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
static const u32 crypto_base[] = { 540000000, 480000000 }; static const u32 crypto_base[] = { 540000000, 480000000 };
@ -227,8 +228,8 @@ static const struct en_clk_desc en7581_base_clks[] = {
.base_reg = REG_BUS_CLK_DIV_SEL, .base_reg = REG_BUS_CLK_DIV_SEL,
.base_bits = 1, .base_bits = 1,
.base_shift = 8, .base_shift = 8,
.base_values = bus_base, .base_values = bus7581_base,
.n_base_values = ARRAY_SIZE(bus_base), .n_base_values = ARRAY_SIZE(bus7581_base),
.div_bits = 3, .div_bits = 3,
.div_shift = 0, .div_shift = 0,

View File

@ -192,7 +192,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock); down_read(&qm->qps_lock);
if (qm->sqc) { if (qm->sqc) {
memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc)); memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc));
sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC"); dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
@ -229,7 +229,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock); down_read(&qm->qps_lock);
if (qm->cqc) { if (qm->cqc) {
memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc)); memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc));
cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK); cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK); cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC"); dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");

View File

@ -32,12 +32,14 @@
#define GNR_PINS_PER_REG 32 #define GNR_PINS_PER_REG 32
#define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG) #define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG)
#define GNR_CFG_BAR 0x00 #define GNR_CFG_PADBAR 0x00
#define GNR_CFG_LOCK_OFFSET 0x04 #define GNR_CFG_LOCK_OFFSET 0x04
#define GNR_GPI_STATUS_OFFSET 0x20 #define GNR_GPI_STATUS_OFFSET 0x14
#define GNR_GPI_ENABLE_OFFSET 0x24 #define GNR_GPI_ENABLE_OFFSET 0x24
#define GNR_CFG_DW_RX_MASK GENMASK(25, 22) #define GNR_CFG_DW_HOSTSW_MODE BIT(27)
#define GNR_CFG_DW_RX_MASK GENMASK(23, 22)
#define GNR_CFG_DW_INTSEL_MASK GENMASK(21, 14)
#define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2) #define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2)
#define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1) #define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1)
#define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0) #define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0)
@ -50,6 +52,7 @@
* struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state * struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state
* @gc: GPIO controller interface * @gc: GPIO controller interface
* @reg_base: base address of the GPIO registers * @reg_base: base address of the GPIO registers
* @pad_base: base address of the vGPIO pad configuration registers
* @ro_bitmap: bitmap of read-only pins * @ro_bitmap: bitmap of read-only pins
* @lock: guard the registers * @lock: guard the registers
* @pad_backup: backup of the register state for suspend * @pad_backup: backup of the register state for suspend
@ -57,6 +60,7 @@
struct gnr_gpio { struct gnr_gpio {
struct gpio_chip gc; struct gpio_chip gc;
void __iomem *reg_base; void __iomem *reg_base;
void __iomem *pad_base;
DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS); DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS);
raw_spinlock_t lock; raw_spinlock_t lock;
u32 pad_backup[]; u32 pad_backup[];
@ -65,7 +69,7 @@ struct gnr_gpio {
static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv, static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv,
unsigned int gpio) unsigned int gpio)
{ {
return priv->reg_base + gpio * sizeof(u32); return priv->pad_base + gpio * sizeof(u32);
} }
static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio, static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
@ -88,6 +92,20 @@ static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
return 0; return 0;
} }
static int gnr_gpio_request(struct gpio_chip *gc, unsigned int gpio)
{
struct gnr_gpio *priv = gpiochip_get_data(gc);
u32 dw;
dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
if (!(dw & GNR_CFG_DW_HOSTSW_MODE)) {
dev_warn(gc->parent, "GPIO %u is not owned by host", gpio);
return -EBUSY;
}
return 0;
}
static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio) static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{ {
const struct gnr_gpio *priv = gpiochip_get_data(gc); const struct gnr_gpio *priv = gpiochip_get_data(gc);
@ -139,6 +157,7 @@ static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, in
static const struct gpio_chip gnr_gpio_chip = { static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get, .get = gnr_gpio_get,
.set = gnr_gpio_set, .set = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction, .get_direction = gnr_gpio_get_direction,
@ -166,7 +185,7 @@ static void gnr_gpio_irq_ack(struct irq_data *d)
guard(raw_spinlock_irqsave)(&priv->lock); guard(raw_spinlock_irqsave)(&priv->lock);
reg = readl(addr); reg = readl(addr);
reg &= ~BIT(bit_idx); reg |= BIT(bit_idx);
writel(reg, addr); writel(reg, addr);
} }
@ -209,10 +228,18 @@ static void gnr_gpio_irq_unmask(struct irq_data *d)
static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type) static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t pin = irqd_to_hwirq(d); struct gnr_gpio *priv = gpiochip_get_data(gc);
u32 mask = GNR_CFG_DW_RX_MASK; irq_hw_number_t hwirq = irqd_to_hwirq(d);
u32 reg;
u32 set; u32 set;
/* Allow interrupts only if Interrupt Select field is non-zero */
reg = readl(gnr_gpio_get_padcfg_addr(priv, hwirq));
if (!(reg & GNR_CFG_DW_INTSEL_MASK)) {
dev_dbg(gc->parent, "GPIO %lu cannot be used as IRQ", hwirq);
return -EPERM;
}
/* Falling edge and level low triggers not supported by the GPIO controller */ /* Falling edge and level low triggers not supported by the GPIO controller */
switch (type) { switch (type) {
case IRQ_TYPE_NONE: case IRQ_TYPE_NONE:
@ -230,10 +257,11 @@ static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL; return -EINVAL;
} }
return gnr_gpio_configure_line(gc, pin, mask, set); return gnr_gpio_configure_line(gc, hwirq, GNR_CFG_DW_RX_MASK, set);
} }
static const struct irq_chip gnr_gpio_irq_chip = { static const struct irq_chip gnr_gpio_irq_chip = {
.name = "gpio-graniterapids",
.irq_ack = gnr_gpio_irq_ack, .irq_ack = gnr_gpio_irq_ack,
.irq_mask = gnr_gpio_irq_mask, .irq_mask = gnr_gpio_irq_mask,
.irq_unmask = gnr_gpio_irq_unmask, .irq_unmask = gnr_gpio_irq_unmask,
@ -291,6 +319,7 @@ static int gnr_gpio_probe(struct platform_device *pdev)
struct gnr_gpio *priv; struct gnr_gpio *priv;
void __iomem *regs; void __iomem *regs;
int irq, ret; int irq, ret;
u32 offset;
priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL); priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL);
if (!priv) if (!priv)
@ -302,6 +331,10 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (IS_ERR(regs)) if (IS_ERR(regs))
return PTR_ERR(regs); return PTR_ERR(regs);
priv->reg_base = regs;
offset = readl(priv->reg_base + GNR_CFG_PADBAR);
priv->pad_base = priv->reg_base + offset;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) if (irq < 0)
return irq; return irq;
@ -311,8 +344,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (ret) if (ret)
return dev_err_probe(dev, ret, "failed to request interrupt\n"); return dev_err_probe(dev, ret, "failed to request interrupt\n");
priv->reg_base = regs + readl(regs + GNR_CFG_BAR);
gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET, gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET,
priv->ro_bitmap); priv->ro_bitmap);
@ -324,7 +355,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
girq = &priv->gc.irq; girq = &priv->gc.irq;
gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip); gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip);
girq->chip->name = dev_name(dev);
girq->parent_handler = NULL; girq->parent_handler = NULL;
girq->num_parents = 0; girq->num_parents = 0;
girq->parents = NULL; girq->parents = NULL;

View File

@ -82,9 +82,9 @@ static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
int ret; int ret;
mutex_lock(&ljca_gpio->trans_lock); mutex_lock(&ljca_gpio->trans_lock);
packet->num = 1;
packet->item[0].index = gpio_id; packet->item[0].index = gpio_id;
packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id]; packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
packet->num = 1;
ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet, ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet,
struct_size(packet, item, packet->num), NULL, 0); struct_size(packet, item, packet->num), NULL, 0);

View File

@ -1801,13 +1801,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket) if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL; return -EINVAL;
/* Make sure VRAM is allocated contigiously */
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains); if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
for (i = 0; i < (*bo)->placement.num_placement; i++) !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
if (r) for (i = 0; i < (*bo)->placement.num_placement; i++)
return r; (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
}
return amdgpu_ttm_alloc_gart(&(*bo)->tbo); return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
} }

View File

@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
for (i = 0; i < abo->placement.num_placement; ++i) { for (i = 0; i < abo->placement.num_placement; ++i) {
abo->placements[i].fpfn = 0 >> PAGE_SHIFT; abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
if (abo->placements[i].mem_type == TTM_PL_VRAM)
abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
} }
} }

View File

@ -674,12 +674,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg; ring->funcs->emit_wreg;
if (adev->gfx.enable_cleaner_shader && if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
ring->funcs->emit_cleaner_shader && !(job->enforce_isolation && !job->vmid))
job->enforce_isolation)
ring->funcs->emit_cleaner_shader(ring);
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0; return 0;
amdgpu_ring_ib_begin(ring); amdgpu_ring_ib_begin(ring);
@ -690,6 +686,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (need_pipe_sync) if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring); amdgpu_ring_emit_pipeline_sync(ring);
if (adev->gfx.enable_cleaner_shader &&
ring->funcs->emit_cleaner_shader &&
job->enforce_isolation)
ring->funcs->emit_cleaner_shader(ring);
if (vm_flush_needed) { if (vm_flush_needed) {
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);

View File

@ -1289,7 +1289,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job, struct amdgpu_job *job,
struct amdgpu_ib *ib) struct amdgpu_ib *ib)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); struct amdgpu_ring *ring = amdgpu_job_ring(job);
unsigned i; unsigned i;
/* No patching necessary for the first instance */ /* No patching necessary for the first instance */

View File

@ -1422,6 +1422,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev, static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
bool cache_line_size_missing,
struct kfd_gpu_cache_info *pcache_info) struct kfd_gpu_cache_info *pcache_info)
{ {
struct amdgpu_device *adev = kdev->adev; struct amdgpu_device *adev = kdev->adev;
@ -1436,6 +1437,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE); CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2; pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size; pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size;
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
pcache_info[i].cache_line_size = 128;
i++; i++;
} }
/* Scalar L1 Instruction Cache per SQC */ /* Scalar L1 Instruction Cache per SQC */
@ -1448,6 +1451,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE); CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2; pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size; pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size;
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
pcache_info[i].cache_line_size = 128;
i++; i++;
} }
/* Scalar L1 Data Cache per SQC */ /* Scalar L1 Data Cache per SQC */
@ -1459,6 +1464,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE); CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2; pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size; pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size;
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
pcache_info[i].cache_line_size = 64;
i++; i++;
} }
/* GL1 Data Cache per SA */ /* GL1 Data Cache per SA */
@ -1471,7 +1478,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE); CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
pcache_info[i].cache_line_size = 0; if (cache_line_size_missing)
pcache_info[i].cache_line_size = 128;
i++; i++;
} }
/* L2 Data Cache per GPU (Total Tex Cache) */ /* L2 Data Cache per GPU (Total Tex Cache) */
@ -1483,6 +1491,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE); CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size; pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size;
if (cache_line_size_missing && !pcache_info[i].cache_line_size)
pcache_info[i].cache_line_size = 128;
i++; i++;
} }
/* L3 Data Cache per GPU */ /* L3 Data Cache per GPU */
@ -1493,7 +1503,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE | CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE); CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh; pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
pcache_info[i].cache_line_size = 0; pcache_info[i].cache_line_size = 64;
i++; i++;
} }
return i; return i;
@ -1568,6 +1578,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info) int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{ {
int num_of_cache_types = 0; int num_of_cache_types = 0;
bool cache_line_size_missing = false;
switch (kdev->adev->asic_type) { switch (kdev->adev->asic_type) {
case CHIP_KAVERI: case CHIP_KAVERI:
@ -1691,10 +1702,17 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
case IP_VERSION(11, 5, 0): case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1): case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2): case IP_VERSION(11, 5, 2):
/* Cacheline size not available in IP discovery for gc11.
* kfd_fill_gpu_cache_info_from_gfx_config to hard code it
*/
cache_line_size_missing = true;
fallthrough;
case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1): case IP_VERSION(12, 0, 1):
num_of_cache_types = num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info); kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd,
cache_line_size_missing,
*pcache_info);
break; break;
default: default:
*pcache_info = dummy_cache_info; *pcache_info = dummy_cache_info;

View File

@ -205,6 +205,21 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (!down_read_trylock(&adev->reset_domain->sem)) if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO; return -EIO;
if (!pdd->proc_ctx_cpu_ptr) {
r = amdgpu_amdkfd_alloc_gtt_mem(adev,
AMDGPU_MES_PROC_CTX_SIZE,
&pdd->proc_ctx_bo,
&pdd->proc_ctx_gpu_addr,
&pdd->proc_ctx_cpu_ptr,
false);
if (r) {
dev_err(adev->dev,
"failed to allocate process context bo\n");
return r;
}
memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
}
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input)); memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
queue_input.process_id = qpd->pqm->process->pasid; queue_input.process_id = qpd->pqm->process->pasid;
queue_input.page_table_base_addr = qpd->page_table_base; queue_input.page_table_base_addr = qpd->page_table_base;

View File

@ -1076,7 +1076,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfd_free_process_doorbells(pdd->dev->kfd, pdd); kfd_free_process_doorbells(pdd->dev->kfd, pdd);
if (pdd->dev->kfd->shared_resources.enable_mes) if (pdd->dev->kfd->shared_resources.enable_mes &&
pdd->proc_ctx_cpu_ptr)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
&pdd->proc_ctx_bo); &pdd->proc_ctx_bo);
/* /*
@ -1610,7 +1611,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p) struct kfd_process *p)
{ {
struct kfd_process_device *pdd = NULL; struct kfd_process_device *pdd = NULL;
int retval = 0;
if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
return NULL; return NULL;
@ -1634,21 +1634,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->user_gpu_id = dev->id; pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0); atomic64_set(&pdd->evict_duration_counter, 0);
if (dev->kfd->shared_resources.enable_mes) {
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
AMDGPU_MES_PROC_CTX_SIZE,
&pdd->proc_ctx_bo,
&pdd->proc_ctx_gpu_addr,
&pdd->proc_ctx_cpu_ptr,
false);
if (retval) {
dev_err(dev->adev->dev,
"failed to allocate process context bo\n");
goto err_free_pdd;
}
memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
}
p->pdds[p->n_pdds++] = pdd; p->pdds[p->n_pdds++] = pdd;
if (kfd_dbg_is_per_vmid_supported(pdd->dev)) if (kfd_dbg_is_per_vmid_supported(pdd->dev))
pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
@ -1660,10 +1645,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
idr_init(&pdd->alloc_idr); idr_init(&pdd->alloc_idr);
return pdd; return pdd;
err_free_pdd:
kfree(pdd);
return NULL;
} }
/** /**

View File

@ -212,13 +212,17 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
void pqm_uninit(struct process_queue_manager *pqm) void pqm_uninit(struct process_queue_manager *pqm)
{ {
struct process_queue_node *pqn, *next; struct process_queue_node *pqn, *next;
struct kfd_process_device *pdd;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
if (pqn->q) { if (pqn->q) {
pdd = kfd_get_process_device_data(pqn->q->device, pqm->process); struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
kfd_queue_unref_bo_vas(pdd, &pqn->q->properties); pqm->process);
kfd_queue_release_buffers(pdd, &pqn->q->properties); if (pdd) {
kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
kfd_queue_release_buffers(pdd, &pqn->q->properties);
} else {
WARN_ON(!pdd);
}
pqm_clean_queue_resource(pqm, pqn); pqm_clean_queue_resource(pqm, pqn);
} }

View File

@ -2717,4 +2717,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_7_workload_map; smu->workload_map = smu_v13_0_7_workload_map;
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu); smu_v13_0_set_smu_mailbox_registers(smu);
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
} }

View File

@ -929,7 +929,6 @@ fn draw_all(&mut self, data: impl Iterator<Item = u8>) {
/// * `tmp` must be valid for reading and writing for `tmp_size` bytes. /// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
/// ///
/// They must remain valid for the duration of the function call. /// They must remain valid for the duration of the function call.
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn drm_panic_qr_generate( pub unsafe extern "C" fn drm_panic_qr_generate(
url: *const i8, url: *const i8,

View File

@ -1333,19 +1333,29 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
lut = blob->data; lut = blob->data;
/* /*
* DSB fails to correctly load the legacy LUT * DSB fails to correctly load the legacy LUT unless
* unless we either write each entry twice, * we either write each entry twice when using posted
* or use non-posted writes * writes, or we use non-posted writes.
*
* If palette anti-collision is active during LUT
* register writes:
* - posted writes simply get dropped and thus the LUT
* contents may not be correctly updated
* - non-posted writes are blocked and thus the LUT
* contents are always correct, but simultaneous CPU
* MMIO access will start to fail
*
* Choose the lesser of two evils and use posted writes.
* Using posted writes is also faster, even when having
* to write each register twice.
*/ */
if (crtc_state->dsb_color_vblank) for (i = 0; i < 256; i++) {
intel_dsb_nonpost_start(crtc_state->dsb_color_vblank);
for (i = 0; i < 256; i++)
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i), ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i])); i9xx_lut_8(&lut[i]));
if (crtc_state->dsb_color_vblank)
if (crtc_state->dsb_color_vblank) ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
intel_dsb_nonpost_end(crtc_state->dsb_color_vblank); i9xx_lut_8(&lut[i]));
}
} }
static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state, static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,

View File

@ -1652,9 +1652,21 @@ capture_engine(struct intel_engine_cs *engine,
return NULL; return NULL;
intel_engine_get_hung_entity(engine, &ce, &rq); intel_engine_get_hung_entity(engine, &ce, &rq);
if (rq && !i915_request_started(rq)) if (rq && !i915_request_started(rq)) {
drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n", /*
engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id); * We want to know also what is the guc_id of the context,
* but if we don't have the context reference, then skip
* printing it.
*/
if (ce)
drm_info(&engine->gt->i915->drm,
"Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
else
drm_info(&engine->gt->i915->drm,
"Got hung context on %s with active request %lld:%lld not yet started\n",
engine->name, rq->fence.context, rq->fence.seqno);
}
if (rq) { if (rq) {
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);

View File

@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void)
return 0; return 0;
err_priorities: err_priorities:
kmem_cache_destroy(slab_priorities); kmem_cache_destroy(slab_dependencies);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -224,8 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED); XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) { if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(pt)); PTR_ERR(tiny));
goto free_pt; goto free_pt;
} }

View File

@ -65,6 +65,14 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
__invalidation_fence_signal(xe, fence); __invalidation_fence_signal(xe, fence);
} }
void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
{
if (WARN_ON_ONCE(!fence->gt))
return;
__invalidation_fence_signal(gt_to_xe(fence->gt), fence);
}
static void xe_gt_tlb_fence_timeout(struct work_struct *work) static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{ {
struct xe_gt *gt = container_of(work, struct xe_gt, struct xe_gt *gt = container_of(work, struct xe_gt,

View File

@ -28,6 +28,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence, struct xe_gt_tlb_invalidation_fence *fence,
bool stack); bool stack);
void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
static inline void static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence) xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)

View File

@ -1333,8 +1333,7 @@ static void invalidation_fence_cb(struct dma_fence *fence,
queue_work(system_wq, &ifence->work); queue_work(system_wq, &ifence->work);
} else { } else {
ifence->base.base.error = ifence->fence->error; ifence->base.base.error = ifence->fence->error;
dma_fence_signal(&ifence->base.base); xe_gt_tlb_invalidation_fence_signal(&ifence->base);
dma_fence_put(&ifence->base.base);
} }
dma_fence_put(ifence->fence); dma_fence_put(ifence->fence);
} }

View File

@ -26,46 +26,27 @@
#include "xe_reg_whitelist.h" #include "xe_reg_whitelist.h"
#include "xe_rtp_types.h" #include "xe_rtp_types.h"
#define XE_REG_SR_GROW_STEP_DEFAULT 16
static void reg_sr_fini(struct drm_device *drm, void *arg) static void reg_sr_fini(struct drm_device *drm, void *arg)
{ {
struct xe_reg_sr *sr = arg; struct xe_reg_sr *sr = arg;
struct xe_reg_sr_entry *entry;
unsigned long reg;
xa_for_each(&sr->xa, reg, entry)
kfree(entry);
xa_destroy(&sr->xa); xa_destroy(&sr->xa);
kfree(sr->pool.arr);
memset(&sr->pool, 0, sizeof(sr->pool));
} }
int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe) int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
{ {
xa_init(&sr->xa); xa_init(&sr->xa);
memset(&sr->pool, 0, sizeof(sr->pool));
sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT;
sr->name = name; sr->name = name;
return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr); return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
} }
EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init); EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
{
if (sr->pool.used == sr->pool.allocated) {
struct xe_reg_sr_entry *arr;
arr = krealloc_array(sr->pool.arr,
ALIGN(sr->pool.allocated + 1, sr->pool.grow_step),
sizeof(*arr), GFP_KERNEL);
if (!arr)
return NULL;
sr->pool.arr = arr;
sr->pool.allocated += sr->pool.grow_step;
}
return &sr->pool.arr[sr->pool.used++];
}
static bool compatible_entries(const struct xe_reg_sr_entry *e1, static bool compatible_entries(const struct xe_reg_sr_entry *e1,
const struct xe_reg_sr_entry *e2) const struct xe_reg_sr_entry *e2)
{ {
@ -111,7 +92,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
return 0; return 0;
} }
pentry = alloc_entry(sr); pentry = kmalloc(sizeof(*pentry), GFP_KERNEL);
if (!pentry) { if (!pentry) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;

View File

@ -20,12 +20,6 @@ struct xe_reg_sr_entry {
}; };
struct xe_reg_sr { struct xe_reg_sr {
struct {
struct xe_reg_sr_entry *arr;
unsigned int used;
unsigned int allocated;
unsigned int grow_step;
} pool;
struct xarray xa; struct xarray xa;
const char *name; const char *name;

View File

@ -339,7 +339,7 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
* one CPU at a time can enter the process, while the others * one CPU at a time can enter the process, while the others
* will be spinning at the same lock. * will be spinning at the same lock.
*/ */
lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf; lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
vcmdq = vintf->lvcmdqs[lidx]; vcmdq = vintf->lvcmdqs[lidx];
if (!vcmdq || !READ_ONCE(vcmdq->enabled)) if (!vcmdq || !READ_ONCE(vcmdq->enabled))
return NULL; return NULL;

View File

@ -105,12 +105,35 @@ static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
spin_unlock_irqrestore(&domain->cache_lock, flags); spin_unlock_irqrestore(&domain->cache_lock, flags);
} }
/* domain->qi_batch will be freed in iommu_free_domain() path. */
static int domain_qi_batch_alloc(struct dmar_domain *domain)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&domain->cache_lock, flags);
if (domain->qi_batch)
goto out_unlock;
domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_ATOMIC);
if (!domain->qi_batch)
ret = -ENOMEM;
out_unlock:
spin_unlock_irqrestore(&domain->cache_lock, flags);
return ret;
}
static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did, static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did,
struct device *dev, ioasid_t pasid) struct device *dev, ioasid_t pasid)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
int ret; int ret;
ret = domain_qi_batch_alloc(domain);
if (ret)
return ret;
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB); ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
if (ret || !info->ats_enabled) if (ret || !info->ats_enabled)
return ret; return ret;
@ -139,6 +162,10 @@ static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did,
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
int ret; int ret;
ret = domain_qi_batch_alloc(domain);
if (ret)
return ret;
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
if (ret || !info->ats_enabled) if (ret || !info->ats_enabled)
return ret; return ret;
@ -190,13 +217,6 @@ int cache_tag_assign_domain(struct dmar_domain *domain,
u16 did = domain_get_id_for_dev(domain, dev); u16 did = domain_get_id_for_dev(domain, dev);
int ret; int ret;
/* domain->qi_bach will be freed in iommu_free_domain() path. */
if (!domain->qi_batch) {
domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_KERNEL);
if (!domain->qi_batch)
return -ENOMEM;
}
ret = __cache_tag_assign_domain(domain, did, dev, pasid); ret = __cache_tag_assign_domain(domain, did, dev, pasid);
if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED) if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
return ret; return ret;

View File

@ -3372,6 +3372,9 @@ void device_block_translation(struct device *dev)
struct intel_iommu *iommu = info->iommu; struct intel_iommu *iommu = info->iommu;
unsigned long flags; unsigned long flags;
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
iommu_disable_pci_caps(info); iommu_disable_pci_caps(info);
if (!dev_is_real_dma_subdevice(dev)) { if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu)) if (sm_supported(iommu))
@ -3388,7 +3391,6 @@ void device_block_translation(struct device *dev)
list_del(&info->link); list_del(&info->link);
spin_unlock_irqrestore(&info->domain->lock, flags); spin_unlock_irqrestore(&info->domain->lock, flags);
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
domain_detach_iommu(info->domain, iommu); domain_detach_iommu(info->domain, iommu);
info->domain = NULL; info->domain = NULL;
} }

View File

@ -76,9 +76,9 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
* pointer and the requested position. * pointer and the requested position.
*/ */
nr_blocks = block - wp_block; nr_blocks = block - wp_block;
ret = blkdev_issue_zeroout(dev->bdev, ret = blk_zone_issue_zeroout(dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
dmz_blk2sect(nr_blocks), GFP_NOIO, 0); dmz_blk2sect(nr_blocks), GFP_NOIO);
if (ret) { if (ret) {
dmz_dev_err(dev, dmz_dev_err(dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",

View File

@ -1520,9 +1520,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
struct slave *slave; struct slave *slave;
mask = features; mask = features;
features = netdev_base_features(features);
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) { bond_for_each_slave(bond, slave, iter) {
features = netdev_increment_features(features, features = netdev_increment_features(features,
@ -1536,6 +1534,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_LRO) NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
@ -1564,8 +1563,9 @@ static void bond_compute_features(struct bonding *bond)
if (!bond_has_slaves(bond)) if (!bond_has_slaves(bond))
goto done; goto done;
vlan_features &= NETIF_F_ALL_FOR_ALL;
mpls_features &= NETIF_F_ALL_FOR_ALL; vlan_features = netdev_base_features(vlan_features);
mpls_features = netdev_base_features(mpls_features);
bond_for_each_slave(bond, slave, iter) { bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features, vlan_features = netdev_increment_features(vlan_features,

View File

@ -1098,10 +1098,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x1030, 0x1030), regmap_reg_range(0x1030, 0x1030),
regmap_reg_range(0x1100, 0x1115), regmap_reg_range(0x1100, 0x1115),
regmap_reg_range(0x111a, 0x111f), regmap_reg_range(0x111a, 0x111f),
regmap_reg_range(0x1122, 0x1127), regmap_reg_range(0x1120, 0x112b),
regmap_reg_range(0x112a, 0x112b), regmap_reg_range(0x1134, 0x113b),
regmap_reg_range(0x1136, 0x1139), regmap_reg_range(0x113c, 0x113f),
regmap_reg_range(0x113e, 0x113f),
regmap_reg_range(0x1400, 0x1401), regmap_reg_range(0x1400, 0x1401),
regmap_reg_range(0x1403, 0x1403), regmap_reg_range(0x1403, 0x1403),
regmap_reg_range(0x1410, 0x1417), regmap_reg_range(0x1410, 0x1417),
@ -1128,10 +1127,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x2030, 0x2030), regmap_reg_range(0x2030, 0x2030),
regmap_reg_range(0x2100, 0x2115), regmap_reg_range(0x2100, 0x2115),
regmap_reg_range(0x211a, 0x211f), regmap_reg_range(0x211a, 0x211f),
regmap_reg_range(0x2122, 0x2127), regmap_reg_range(0x2120, 0x212b),
regmap_reg_range(0x212a, 0x212b), regmap_reg_range(0x2134, 0x213b),
regmap_reg_range(0x2136, 0x2139), regmap_reg_range(0x213c, 0x213f),
regmap_reg_range(0x213e, 0x213f),
regmap_reg_range(0x2400, 0x2401), regmap_reg_range(0x2400, 0x2401),
regmap_reg_range(0x2403, 0x2403), regmap_reg_range(0x2403, 0x2403),
regmap_reg_range(0x2410, 0x2417), regmap_reg_range(0x2410, 0x2417),
@ -1158,10 +1156,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x3030, 0x3030), regmap_reg_range(0x3030, 0x3030),
regmap_reg_range(0x3100, 0x3115), regmap_reg_range(0x3100, 0x3115),
regmap_reg_range(0x311a, 0x311f), regmap_reg_range(0x311a, 0x311f),
regmap_reg_range(0x3122, 0x3127), regmap_reg_range(0x3120, 0x312b),
regmap_reg_range(0x312a, 0x312b), regmap_reg_range(0x3134, 0x313b),
regmap_reg_range(0x3136, 0x3139), regmap_reg_range(0x313c, 0x313f),
regmap_reg_range(0x313e, 0x313f),
regmap_reg_range(0x3400, 0x3401), regmap_reg_range(0x3400, 0x3401),
regmap_reg_range(0x3403, 0x3403), regmap_reg_range(0x3403, 0x3403),
regmap_reg_range(0x3410, 0x3417), regmap_reg_range(0x3410, 0x3417),
@ -1188,10 +1185,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x4030, 0x4030), regmap_reg_range(0x4030, 0x4030),
regmap_reg_range(0x4100, 0x4115), regmap_reg_range(0x4100, 0x4115),
regmap_reg_range(0x411a, 0x411f), regmap_reg_range(0x411a, 0x411f),
regmap_reg_range(0x4122, 0x4127), regmap_reg_range(0x4120, 0x412b),
regmap_reg_range(0x412a, 0x412b), regmap_reg_range(0x4134, 0x413b),
regmap_reg_range(0x4136, 0x4139), regmap_reg_range(0x413c, 0x413f),
regmap_reg_range(0x413e, 0x413f),
regmap_reg_range(0x4400, 0x4401), regmap_reg_range(0x4400, 0x4401),
regmap_reg_range(0x4403, 0x4403), regmap_reg_range(0x4403, 0x4403),
regmap_reg_range(0x4410, 0x4417), regmap_reg_range(0x4410, 0x4417),
@ -1218,10 +1214,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x5030, 0x5030), regmap_reg_range(0x5030, 0x5030),
regmap_reg_range(0x5100, 0x5115), regmap_reg_range(0x5100, 0x5115),
regmap_reg_range(0x511a, 0x511f), regmap_reg_range(0x511a, 0x511f),
regmap_reg_range(0x5122, 0x5127), regmap_reg_range(0x5120, 0x512b),
regmap_reg_range(0x512a, 0x512b), regmap_reg_range(0x5134, 0x513b),
regmap_reg_range(0x5136, 0x5139), regmap_reg_range(0x513c, 0x513f),
regmap_reg_range(0x513e, 0x513f),
regmap_reg_range(0x5400, 0x5401), regmap_reg_range(0x5400, 0x5401),
regmap_reg_range(0x5403, 0x5403), regmap_reg_range(0x5403, 0x5403),
regmap_reg_range(0x5410, 0x5417), regmap_reg_range(0x5410, 0x5417),
@ -1248,10 +1243,9 @@ static const struct regmap_range ksz9896_valid_regs[] = {
regmap_reg_range(0x6030, 0x6030), regmap_reg_range(0x6030, 0x6030),
regmap_reg_range(0x6100, 0x6115), regmap_reg_range(0x6100, 0x6115),
regmap_reg_range(0x611a, 0x611f), regmap_reg_range(0x611a, 0x611f),
regmap_reg_range(0x6122, 0x6127), regmap_reg_range(0x6120, 0x612b),
regmap_reg_range(0x612a, 0x612b), regmap_reg_range(0x6134, 0x613b),
regmap_reg_range(0x6136, 0x6139), regmap_reg_range(0x613c, 0x613f),
regmap_reg_range(0x613e, 0x613f),
regmap_reg_range(0x6300, 0x6301), regmap_reg_range(0x6300, 0x6301),
regmap_reg_range(0x6400, 0x6401), regmap_reg_range(0x6400, 0x6401),
regmap_reg_range(0x6403, 0x6403), regmap_reg_range(0x6403, 0x6403),

View File

@ -24,7 +24,7 @@
#define VSC9959_NUM_PORTS 6 #define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63 #define VSC9959_TAS_GCL_ENTRY_MAX 63
#define VSC9959_TAS_MIN_GATE_LEN_NS 33 #define VSC9959_TAS_MIN_GATE_LEN_NS 35
#define VSC9959_VCAP_POLICER_BASE 63 #define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383 #define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4 #define VSC9959_SWITCH_PCI_BAR 4
@ -1056,11 +1056,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio); mdiobus_free(felix->imdio);
} }
/* The switch considers any frame (regardless of size) as eligible for /* The switch considers any frame (regardless of size) as eligible
* transmission if the traffic class gate is open for at least 33 ns. * for transmission if the traffic class gate is open for at least
* VSC9959_TAS_MIN_GATE_LEN_NS.
*
* Overruns are prevented by cropping an interval at the end of the gate time * Overruns are prevented by cropping an interval at the end of the gate time
* slot for which egress scheduling is blocked, but we need to still keep 33 ns * slot for which egress scheduling is blocked, but we need to still keep
* available for one packet to be transmitted, otherwise the port tc will hang. * VSC9959_TAS_MIN_GATE_LEN_NS available for one packet to be transmitted,
* otherwise the port tc will hang.
*
* This function returns the size of a gate interval that remains available for * This function returns the size of a gate interval that remains available for
* setting the guard band, after reserving the space for one egress frame. * setting the guard band, after reserving the space for one egress frame.
*/ */
@ -1303,7 +1307,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
* per-tc static guard band lengths, so it reduces the * per-tc static guard band lengths, so it reduces the
* useful gate interval length. Therefore, be careful * useful gate interval length. Therefore, be careful
* to calculate a guard band (and therefore max_sdu) * to calculate a guard band (and therefore max_sdu)
* that still leaves 33 ns available in the time slot. * that still leaves VSC9959_TAS_MIN_GATE_LEN_NS
* available in the time slot.
*/ */
max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte); max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a /* A TC gate may be completely closed, which is a

View File

@ -1518,7 +1518,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (TPA_START_IS_IPV6(tpa_start1)) if (TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6; tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && else if (!BNXT_CHIP_P4_PLUS(bp) &&
TPA_START_HASH_TYPE(tpa_start) == 3) TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6; tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash = tpa_info->rss_hash =
@ -2212,15 +2212,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
type = bnxt_rss_ext_op(bp, rxcmp); type = bnxt_rss_ext_op(bp, rxcmp);
} else { } else {
u32 hash_type = RX_CMP_HASH_TYPE(rxcmp); u32 itypes = RX_CMP_ITYPES(rxcmp);
/* RSS profiles 1 and 3 with extract code 0 for inner if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
* 4-tuple itypes == RX_CMP_FLAGS_ITYPE_UDP)
*/
if (hash_type != 1 && hash_type != 3)
type = PKT_HASH_TYPE_L3;
else
type = PKT_HASH_TYPE_L4; type = PKT_HASH_TYPE_L4;
else
type = PKT_HASH_TYPE_L3;
} }
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
} }

View File

@ -267,6 +267,9 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
#define RX_CMP_ITYPES(rxcmp) \
(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \ #define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\ ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT) RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
@ -378,7 +381,7 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque; u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v; __le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0) #define RX_AGG_CMP_V (1 << 0)
#define RX_AGG_CMP_AGG_ID (0xffff << 16) #define RX_AGG_CMP_AGG_ID (0x0fff << 16)
#define RX_AGG_CMP_AGG_ID_SHIFT 16 #define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused; __le32 rx_agg_cmp_unused;
}; };
@ -416,7 +419,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7 #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25) #define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25 #define RX_TPA_START_CMP_AGG_ID_SHIFT 25
#define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16) #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16 #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
#define RX_TPA_START_CMP_METADATA1 (0xf << 28) #define RX_TPA_START_CMP_METADATA1 (0xf << 28)
#define RX_TPA_START_CMP_METADATA1_SHIFT 28 #define RX_TPA_START_CMP_METADATA1_SHIFT 28
@ -540,7 +543,7 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25) #define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25 #define RX_TPA_END_CMP_AGG_ID_SHIFT 25
#define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16) #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16 #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta; __le32 rx_tpa_end_cmp_tsdelta;

View File

@ -2077,7 +2077,7 @@ void t4_idma_monitor(struct adapter *adapter,
struct sge_idma_monitor_state *idma, struct sge_idma_monitor_state *idma,
int hz, int ticks); int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr); u8 start, unsigned int naddr, u8 *addr);
void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
u32 start_index, bool sleep_ok); u32 start_index, bool sleep_ok);
void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,

View File

@ -3246,7 +3246,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
dev_info(pi->adapter->pdev_dev, dev_info(pi->adapter->pdev_dev,
"Setting MAC %pM on VF %d\n", mac, vf); "Setting MAC %pM on VF %d\n", mac, vf);
ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac);
if (!ret) if (!ret)
ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
return ret; return ret;

View File

@ -10215,11 +10215,12 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
* t4_set_vf_mac_acl - Set MAC address for the specified VF * t4_set_vf_mac_acl - Set MAC address for the specified VF
* @adapter: The adapter * @adapter: The adapter
* @vf: one of the VFs instantiated by the specified PF * @vf: one of the VFs instantiated by the specified PF
* @start: The start port id associated with specified VF
* @naddr: the number of MAC addresses * @naddr: the number of MAC addresses
* @addr: the MAC address(es) to be set to the specified VF * @addr: the MAC address(es) to be set to the specified VF
*/ */
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr) u8 start, unsigned int naddr, u8 *addr)
{ {
struct fw_acl_mac_cmd cmd; struct fw_acl_mac_cmd cmd;
@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
cmd.nmac = naddr; cmd.nmac = naddr;
switch (adapter->pf) { switch (start) {
case 3: case 3:
memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
break; break;

View File

@ -297,7 +297,9 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
if (ret) { if (ret) {
mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret); mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
kvfree(vport_caps); kvfree(vport_caps);
return ERR_PTR(ret); if (ret == -EBUSY)
return ERR_PTR(-EBUSY);
return NULL;
} }
return vport_caps; return vport_caps;

View File

@ -693,12 +693,11 @@ static int sparx5_start(struct sparx5 *sparx5)
err = -ENXIO; err = -ENXIO;
if (sparx5->fdma_irq >= 0) { if (sparx5->fdma_irq >= 0) {
if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
err = devm_request_threaded_irq(sparx5->dev, err = devm_request_irq(sparx5->dev,
sparx5->fdma_irq, sparx5->fdma_irq,
NULL, sparx5_fdma_handler,
sparx5_fdma_handler, 0,
IRQF_ONESHOT, "sparx5-fdma", sparx5);
"sparx5-fdma", sparx5);
if (!err) if (!err)
err = sparx5_fdma_start(sparx5); err = sparx5_fdma_start(sparx5);
if (err) if (err)

View File

@ -1119,7 +1119,7 @@ int sparx5_port_init(struct sparx5 *sparx5,
spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
DEV10G_MAC_MAXLEN_CFG_MAX_LEN, DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
devinst, devinst,
DEV10G_MAC_ENA_CFG(0)); DEV10G_MAC_MAXLEN_CFG(0));
/* Handle Signal Detect in 10G PCS */ /* Handle Signal Detect in 10G PCS */
spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) | spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |

View File

@ -1315,7 +1315,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
GFP_KERNEL); GFP_KERNEL);
if (!gc->irq_contexts) { if (!gc->irq_contexts) {
err = -ENOMEM; err = -ENOMEM;
goto free_irq_vector; goto free_irq_array;
} }
for (i = 0; i < nvec; i++) { for (i = 0; i < nvec; i++) {
@ -1372,6 +1372,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
gc->max_num_msix = nvec; gc->max_num_msix = nvec;
gc->num_msix_usable = nvec; gc->num_msix_usable = nvec;
cpus_read_unlock(); cpus_read_unlock();
kfree(irqs);
return 0; return 0;
free_irq: free_irq:
@ -1384,8 +1385,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
} }
kfree(gc->irq_contexts); kfree(gc->irq_contexts);
kfree(irqs);
gc->irq_contexts = NULL; gc->irq_contexts = NULL;
free_irq_array:
kfree(irqs);
free_irq_vector: free_irq_vector:
cpus_read_unlock(); cpus_read_unlock();
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);

View File

@ -14,6 +14,8 @@
#include <soc/mscc/ocelot.h> #include <soc/mscc/ocelot.h>
#include "ocelot.h" #include "ocelot.h"
#define OCELOT_PTP_TX_TSTAMP_TIMEOUT (5 * HZ)
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{ {
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
@ -495,6 +497,28 @@ static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
return HWTSTAMP_FILTER_NONE; return HWTSTAMP_FILTER_NONE;
} }
static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd)
{
switch (tx_type) {
case HWTSTAMP_TX_ON:
*ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
/* IFH_REW_OP_ONE_STEP_PTP updates the correctionField,
* what we need to update is the originTimestamp.
*/
*ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
break;
case HWTSTAMP_TX_OFF:
*ptp_cmd = 0;
break;
default:
return -ERANGE;
}
return 0;
}
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
{ {
struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_port *ocelot_port = ocelot->ports[port];
@ -521,30 +545,19 @@ EXPORT_SYMBOL(ocelot_hwstamp_get);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
{ {
struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_port *ocelot_port = ocelot->ports[port];
int ptp_cmd, old_ptp_cmd = ocelot_port->ptp_cmd;
bool l2 = false, l4 = false; bool l2 = false, l4 = false;
struct hwtstamp_config cfg; struct hwtstamp_config cfg;
bool old_l2, old_l4;
int err; int err;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT; return -EFAULT;
/* Tx type sanity check */ /* Tx type sanity check */
switch (cfg.tx_type) { err = ocelot_ptp_tx_type_to_cmd(cfg.tx_type, &ptp_cmd);
case HWTSTAMP_TX_ON: if (err)
ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; return err;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
* need to update the origin time.
*/
ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
break;
case HWTSTAMP_TX_OFF:
ocelot_port->ptp_cmd = 0;
break;
default:
return -ERANGE;
}
switch (cfg.rx_filter) { switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE: case HWTSTAMP_FILTER_NONE:
@ -569,13 +582,27 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
return -ERANGE; return -ERANGE;
} }
old_l2 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L2;
old_l4 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L4;
err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
if (err) if (err)
return err; return err;
ocelot_port->ptp_cmd = ptp_cmd;
cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) {
err = -EFAULT;
goto out_restore_ptp_traps;
}
return 0;
out_restore_ptp_traps:
ocelot_setup_ptp_traps(ocelot, port, old_l2, old_l4);
ocelot_port->ptp_cmd = old_ptp_cmd;
return err;
} }
EXPORT_SYMBOL(ocelot_hwstamp_set); EXPORT_SYMBOL(ocelot_hwstamp_set);
@ -603,34 +630,87 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
} }
EXPORT_SYMBOL(ocelot_get_ts_info); EXPORT_SYMBOL(ocelot_get_ts_info);
static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, static struct sk_buff *ocelot_port_dequeue_ptp_tx_skb(struct ocelot *ocelot,
int port, u8 ts_id,
u32 seqid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct ptp_header *hdr;
spin_lock(&ocelot->ts_id_lock);
skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
if (OCELOT_SKB_CB(skb)->ts_id != ts_id)
continue;
/* Check that the timestamp ID is for the expected PTP
* sequenceId. We don't have to test ptp_parse_header() against
* NULL, because we've pre-validated the packet's ptp_class.
*/
hdr = ptp_parse_header(skb, OCELOT_SKB_CB(skb)->ptp_class);
if (seqid != ntohs(hdr->sequence_id))
continue;
__skb_unlink(skb, &ocelot_port->tx_skbs);
ocelot->ptp_skbs_in_flight--;
skb_match = skb;
break;
}
spin_unlock(&ocelot->ts_id_lock);
return skb_match;
}
static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port,
struct sk_buff *clone) struct sk_buff *clone)
{ {
struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_port *ocelot_port = ocelot->ports[port];
unsigned long flags; DECLARE_BITMAP(ts_id_in_flight, OCELOT_MAX_PTP_ID);
struct sk_buff *skb, *skb_tmp;
unsigned long n;
spin_lock_irqsave(&ocelot->ts_id_lock, flags); spin_lock(&ocelot->ts_id_lock);
if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || /* To get a better chance of acquiring a timestamp ID, first flush the
ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { * stale packets still waiting in the TX timestamping queue. They are
spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); * probably lost.
*/
skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) {
if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time +
OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) {
dev_warn_ratelimited(ocelot->dev,
"port %d invalidating stale timestamp ID %u which seems lost\n",
port, OCELOT_SKB_CB(skb)->ts_id);
__skb_unlink(skb, &ocelot_port->tx_skbs);
kfree_skb(skb);
ocelot->ptp_skbs_in_flight--;
} else {
__set_bit(OCELOT_SKB_CB(skb)->ts_id, ts_id_in_flight);
}
}
if (ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
spin_unlock(&ocelot->ts_id_lock);
return -EBUSY; return -EBUSY;
} }
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; n = find_first_zero_bit(ts_id_in_flight, OCELOT_MAX_PTP_ID);
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ if (n == OCELOT_MAX_PTP_ID) {
OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; spin_unlock(&ocelot->ts_id_lock);
return -EBUSY;
}
ocelot_port->ts_id++; /* Found an available timestamp ID, use it */
if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) OCELOT_SKB_CB(clone)->ts_id = n;
ocelot_port->ts_id = 0; OCELOT_SKB_CB(clone)->ptp_tx_time = jiffies;
ocelot_port->ptp_skbs_in_flight++;
ocelot->ptp_skbs_in_flight++; ocelot->ptp_skbs_in_flight++;
__skb_queue_tail(&ocelot_port->tx_skbs, clone);
skb_queue_tail(&ocelot_port->tx_skbs, clone); spin_unlock(&ocelot->ts_id_lock);
spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); dev_dbg_ratelimited(ocelot->dev, "port %d timestamp id %lu\n", port, n);
return 0; return 0;
} }
@ -687,10 +767,14 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (!(*clone)) if (!(*clone))
return -ENOMEM; return -ENOMEM;
err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
if (err) err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone);
if (err) {
kfree_skb(*clone);
return err; return err;
}
skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS;
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
} }
@ -726,28 +810,15 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
} }
static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
{
struct ptp_header *hdr;
hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
if (WARN_ON(!hdr))
return false;
return seqid == ntohs(hdr->sequence_id);
}
void ocelot_get_txtstamp(struct ocelot *ocelot) void ocelot_get_txtstamp(struct ocelot *ocelot)
{ {
int budget = OCELOT_PTP_QUEUE_SZ; int budget = OCELOT_PTP_QUEUE_SZ;
while (budget--) { while (budget--) {
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps; struct skb_shared_hwtstamps shhwtstamps;
u32 val, id, seqid, txport; u32 val, id, seqid, txport;
struct ocelot_port *port; struct sk_buff *skb_match;
struct timespec64 ts; struct timespec64 ts;
unsigned long flags;
val = ocelot_read(ocelot, SYS_PTP_STATUS); val = ocelot_read(ocelot, SYS_PTP_STATUS);
@ -762,36 +833,14 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
port = ocelot->ports[txport];
spin_lock(&ocelot->ts_id_lock);
port->ptp_skbs_in_flight--;
ocelot->ptp_skbs_in_flight--;
spin_unlock(&ocelot->ts_id_lock);
/* Retrieve its associated skb */ /* Retrieve its associated skb */
try_again: skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id,
spin_lock_irqsave(&port->tx_skbs.lock, flags); seqid);
if (!skb_match) {
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { dev_warn_ratelimited(ocelot->dev,
if (OCELOT_SKB_CB(skb)->ts_id != id) "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n",
continue; txport, seqid, id);
__skb_unlink(skb, &port->tx_skbs); goto next_ts;
skb_match = skb;
break;
}
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
if (WARN_ON(!skb_match))
continue;
if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
dev_err_ratelimited(ocelot->dev,
"port %d received stale TX timestamp for seqid %d, discarding\n",
txport, seqid);
dev_kfree_skb_any(skb);
goto try_again;
} }
/* Get the h/w timestamp */ /* Get the h/w timestamp */
@ -802,7 +851,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_complete_tx_timestamp(skb_match, &shhwtstamps); skb_complete_tx_timestamp(skb_match, &shhwtstamps);
/* Next ts */ next_ts:
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
} }
} }

View File

@ -53,7 +53,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
#define QCASPI_PLUGGABLE_MIN 0 #define QCASPI_PLUGGABLE_MIN 0
#define QCASPI_PLUGGABLE_MAX 1 #define QCASPI_PLUGGABLE_MAX 1
static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
module_param(qcaspi_pluggable, int, 0); module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
@ -812,7 +812,6 @@ qcaspi_netdev_init(struct net_device *dev)
dev->mtu = QCAFRM_MAX_MTU; dev->mtu = QCAFRM_MAX_MTU;
dev->type = ARPHRD_ETHER; dev->type = ARPHRD_ETHER;
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len; qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL; qca->spi_thread = NULL;
qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
@ -903,17 +902,15 @@ qca_spi_probe(struct spi_device *spi)
legacy_mode = of_property_read_bool(spi->dev.of_node, legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode"); "qca,legacy-mode");
if (qcaspi_clkspeed == 0) { if (qcaspi_clkspeed)
if (spi->max_speed_hz) spi->max_speed_hz = qcaspi_clkspeed;
qcaspi_clkspeed = spi->max_speed_hz; else if (!spi->max_speed_hz)
else spi->max_speed_hz = QCASPI_CLK_SPEED;
qcaspi_clkspeed = QCASPI_CLK_SPEED;
}
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
dev_err(&spi->dev, "Invalid clkspeed: %d\n", dev_err(&spi->dev, "Invalid clkspeed: %u\n",
qcaspi_clkspeed); spi->max_speed_hz);
return -EINVAL; return -EINVAL;
} }
@ -938,14 +935,13 @@ qca_spi_probe(struct spi_device *spi)
return -EINVAL; return -EINVAL;
} }
dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION, QCASPI_DRV_VERSION,
qcaspi_clkspeed, spi->max_speed_hz,
qcaspi_burst_len, qcaspi_burst_len,
qcaspi_pluggable); qcaspi_pluggable);
spi->mode = SPI_MODE_3; spi->mode = SPI_MODE_3;
spi->max_speed_hz = qcaspi_clkspeed;
if (spi_setup(spi) < 0) { if (spi_setup(spi) < 0) {
dev_err(&spi->dev, "Unable to setup SPI device\n"); dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT; return -EFAULT;

View File

@ -89,7 +89,6 @@ struct qcaspi {
#endif #endif
/* user configurable options */ /* user configurable options */
u32 clkspeed;
u8 legacy_mode; u8 legacy_mode;
u16 burst_len; u16 burst_len;
}; };

View File

@ -862,13 +862,10 @@ static void rswitch_tx_free(struct net_device *ndev)
struct rswitch_ext_desc *desc; struct rswitch_ext_desc *desc;
struct sk_buff *skb; struct sk_buff *skb;
for (; rswitch_get_num_cur_queues(gq) > 0; desc = &gq->tx_ring[gq->dirty];
gq->dirty = rswitch_next_queue_index(gq, false, 1)) { while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
desc = &gq->tx_ring[gq->dirty];
if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
break;
dma_rmb(); dma_rmb();
skb = gq->skbs[gq->dirty]; skb = gq->skbs[gq->dirty];
if (skb) { if (skb) {
rdev->ndev->stats.tx_packets++; rdev->ndev->stats.tx_packets++;
@ -879,7 +876,10 @@ static void rswitch_tx_free(struct net_device *ndev)
dev_kfree_skb_any(gq->skbs[gq->dirty]); dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL; gq->skbs[gq->dirty] = NULL;
} }
desc->desc.die_dt = DT_EEMPTY; desc->desc.die_dt = DT_EEMPTY;
gq->dirty = rswitch_next_queue_index(gq, false, 1);
desc = &gq->tx_ring[gq->dirty];
} }
} }
@ -908,8 +908,10 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, budget - quota)) { if (napi_complete_done(napi, budget - quota)) {
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true); if (test_bit(rdev->port, priv->opened_ports)) {
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true); rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
}
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
} }
@ -1114,25 +1116,40 @@ static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac) static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
{ {
u32 val; u32 pis, lsc;
rswitch_etha_write_mac_address(etha, mac); rswitch_etha_write_mac_address(etha, mac);
switch (etha->speed) { switch (etha->phy_interface) {
case 100: case PHY_INTERFACE_MODE_SGMII:
val = MPIC_LSC_100M; pis = MPIC_PIS_GMII;
break; break;
case 1000: case PHY_INTERFACE_MODE_USXGMII:
val = MPIC_LSC_1G; case PHY_INTERFACE_MODE_5GBASER:
break; pis = MPIC_PIS_XGMII;
case 2500:
val = MPIC_LSC_2_5G;
break; break;
default: default:
return; pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
break;
} }
iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC); switch (etha->speed) {
case 100:
lsc = MPIC_LSC_100M;
break;
case 1000:
lsc = MPIC_LSC_1G;
break;
case 2500:
lsc = MPIC_LSC_2_5G;
break;
default:
lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
break;
}
rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
} }
static void rswitch_etha_enable_mii(struct rswitch_etha *etha) static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
@ -1538,20 +1555,20 @@ static int rswitch_open(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_device *rdev = netdev_priv(ndev);
unsigned long flags; unsigned long flags;
phy_start(ndev->phydev); if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
napi_enable(&rdev->napi); napi_enable(&rdev->napi);
netif_start_queue(ndev);
spin_lock_irqsave(&rdev->priv->lock, flags); spin_lock_irqsave(&rdev->priv->lock, flags);
bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
spin_unlock_irqrestore(&rdev->priv->lock, flags); spin_unlock_irqrestore(&rdev->priv->lock, flags);
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) phy_start(ndev->phydev);
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
bitmap_set(rdev->priv->opened_ports, rdev->port, 1); netif_start_queue(ndev);
return 0; return 0;
}; };
@ -1563,7 +1580,16 @@ static int rswitch_stop(struct net_device *ndev)
unsigned long flags; unsigned long flags;
netif_tx_stop_all_queues(ndev); netif_tx_stop_all_queues(ndev);
phy_stop(ndev->phydev);
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
napi_disable(&rdev->napi);
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
@ -1576,14 +1602,6 @@ static int rswitch_stop(struct net_device *ndev)
kfree(ts_info); kfree(ts_info);
} }
spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
spin_unlock_irqrestore(&rdev->priv->lock, flags);
phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
return 0; return 0;
}; };
@ -1681,8 +1699,11 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
goto err_kfree; goto err_kfree;
gq->skbs[gq->cur] = skb; /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
gq->unmap_addrs[gq->cur] = dma_addr_orig; gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
dma_wmb();
/* DT_FSTART should be set at last. So, this is reverse order. */ /* DT_FSTART should be set at last. So, this is reverse order. */
for (i = nr_desc; i-- > 0; ) { for (i = nr_desc; i-- > 0; ) {
@ -1694,14 +1715,13 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
goto err_unmap; goto err_unmap;
} }
wmb(); /* gq->cur must be incremented after die_dt was set */
gq->cur = rswitch_next_queue_index(gq, true, nr_desc); gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret; return ret;
err_unmap: err_unmap:
gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
err_kfree: err_kfree:
@ -1889,7 +1909,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
rdev->np_port = rswitch_get_port_node(rdev); rdev->np_port = rswitch_get_port_node(rdev);
rdev->disabled = !rdev->np_port; rdev->disabled = !rdev->np_port;
err = of_get_ethdev_address(rdev->np_port, ndev); err = of_get_ethdev_address(rdev->np_port, ndev);
of_node_put(rdev->np_port);
if (err) { if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr)) if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr); eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@ -1919,6 +1938,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
out_rxdmac: out_rxdmac:
out_get_params: out_get_params:
of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi); netif_napi_del(&rdev->napi);
free_netdev(ndev); free_netdev(ndev);
@ -1932,6 +1952,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
rswitch_txdmac_free(ndev); rswitch_txdmac_free(ndev);
rswitch_rxdmac_free(ndev); rswitch_rxdmac_free(ndev);
of_node_put(rdev->np_port);
netif_napi_del(&rdev->napi); netif_napi_del(&rdev->napi);
free_netdev(ndev); free_netdev(ndev);
} }

View File

@ -724,13 +724,13 @@ enum rswitch_etha_mode {
#define EAVCC_VEM_SC_TAG (0x3 << 16) #define EAVCC_VEM_SC_TAG (0x3 << 16)
#define MPIC_PIS_MII 0x00 #define MPIC_PIS GENMASK(2, 0)
#define MPIC_PIS_GMII 0x02 #define MPIC_PIS_GMII 2
#define MPIC_PIS_XGMII 0x04 #define MPIC_PIS_XGMII 4
#define MPIC_LSC_SHIFT 3 #define MPIC_LSC GENMASK(5, 3)
#define MPIC_LSC_100M (1 << MPIC_LSC_SHIFT) #define MPIC_LSC_100M 1
#define MPIC_LSC_1G (2 << MPIC_LSC_SHIFT) #define MPIC_LSC_1G 2
#define MPIC_LSC_2_5G (3 << MPIC_LSC_SHIFT) #define MPIC_LSC_2_5G 3
#define MDIO_READ_C45 0x03 #define MDIO_READ_C45 0x03
#define MDIO_WRITE_C45 0x01 #define MDIO_WRITE_C45 0x01

View File

@ -983,7 +983,8 @@ static void team_port_disable(struct team *team,
#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO) NETIF_F_HIGHDMA | NETIF_F_LRO | \
NETIF_F_GSO_ENCAP_ALL)
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
@ -991,13 +992,14 @@ static void team_port_disable(struct team *team,
static void __team_compute_features(struct team *team) static void __team_compute_features(struct team *team)
{ {
struct team_port *port; struct team_port *port;
netdev_features_t vlan_features = TEAM_VLAN_FEATURES & netdev_features_t vlan_features = TEAM_VLAN_FEATURES;
NETIF_F_ALL_FOR_ALL;
netdev_features_t enc_features = TEAM_ENC_FEATURES; netdev_features_t enc_features = TEAM_ENC_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN; unsigned short max_hard_header_len = ETH_HLEN;
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM; IFF_XMIT_DST_RELEASE_PERM;
vlan_features = netdev_base_features(vlan_features);
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) { list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features, vlan_features = netdev_increment_features(vlan_features,
@ -2012,8 +2014,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
netdev_features_t mask; netdev_features_t mask;
mask = features; mask = features;
features &= ~NETIF_F_ONE_FOR_ALL; features = netdev_base_features(features);
features |= NETIF_F_ALL_FOR_ALL;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) { list_for_each_entry_rcu(port, &team->port_list, list) {

View File

@ -502,6 +502,7 @@ struct virtio_net_common_hdr {
}; };
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
struct net_device *dev, struct net_device *dev,
unsigned int *xdp_xmit, unsigned int *xdp_xmit,
@ -2898,7 +2899,6 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0) if (err < 0)
goto err_xdp_reg_mem_model; goto err_xdp_reg_mem_model;
netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index));
virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
@ -3166,7 +3166,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
virtnet_rx_pause(vi, rq); virtnet_rx_pause(vi, rq);
err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
if (err) if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
@ -3229,7 +3229,8 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
virtnet_tx_pause(vi, sq); virtnet_tx_pause(vi, sq);
err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
virtnet_sq_free_unused_buf_done);
if (err) if (err)
netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
@ -5997,6 +5998,14 @@ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
xdp_return_frame(ptr_to_xdp(buf)); xdp_return_frame(ptr_to_xdp(buf));
} }
static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
{
struct virtnet_info *vi = vq->vdev->priv;
int i = vq2txq(vq);
netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
}
static void free_unused_bufs(struct virtnet_info *vi) static void free_unused_bufs(struct virtnet_info *vi)
{ {
void *buf; void *buf;
@ -6728,11 +6737,20 @@ static int virtnet_probe(struct virtio_device *vdev)
static void remove_vq_common(struct virtnet_info *vi) static void remove_vq_common(struct virtnet_info *vi)
{ {
int i;
virtio_reset_device(vi->vdev); virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */ /* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi); free_unused_bufs(vi);
/*
* Rule of thumb is netdev_tx_reset_queue() should follow any
* skb freeing not followed by netdev_tx_completed_queue()
*/
for (i = 0; i < vi->max_queue_pairs; i++)
netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
free_receive_bufs(vi); free_receive_bufs(vi);
free_receive_page_frags(vi); free_receive_page_frags(vi);

View File

@ -1967,7 +1967,7 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
if (csa_err_mask & (CS_ERR_COUNT_ERROR | if (csa_err_mask & (CS_ERR_COUNT_ERROR |
CS_ERR_LONG_DELAY_AFTER_CS | CS_ERR_LONG_DELAY_AFTER_CS |
CS_ERR_TX_BLOCK_TIMER_EXPIRED)) CS_ERR_TX_BLOCK_TIMER_EXPIRED))
ieee80211_channel_switch_disconnect(vif, true); ieee80211_channel_switch_disconnect(vif);
rcu_read_unlock(); rcu_read_unlock();
} }

View File

@ -867,7 +867,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
static int xennet_close(struct net_device *dev) static int xennet_close(struct net_device *dev)
{ {
struct netfront_info *np = netdev_priv(dev); struct netfront_info *np = netdev_priv(dev);
unsigned int num_queues = dev->real_num_tx_queues; unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
unsigned int i; unsigned int i;
struct netfront_queue *queue; struct netfront_queue *queue;
netif_tx_stop_all_queues(np->netdev); netif_tx_stop_all_queues(np->netdev);
@ -882,6 +882,9 @@ static void xennet_destroy_queues(struct netfront_info *info)
{ {
unsigned int i; unsigned int i;
if (!info->queues)
return;
for (i = 0; i < info->netdev->real_num_tx_queues; i++) { for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
struct netfront_queue *queue = &info->queues[i]; struct netfront_queue *queue = &info->queues[i];

View File

@ -26,7 +26,7 @@ int kvm_arch_ptp_init(void)
long ret; long ret;
if (!kvm_para_available()) if (!kvm_para_available())
return -ENODEV; return -EOPNOTSUPP;
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
p = alloc_page(GFP_KERNEL | __GFP_ZERO); p = alloc_page(GFP_KERNEL | __GFP_ZERO);
@ -46,14 +46,14 @@ int kvm_arch_ptp_init(void)
clock_pair_gpa = slow_virt_to_phys(clock_pair); clock_pair_gpa = slow_virt_to_phys(clock_pair);
if (!pvclock_get_pvti_cpu0_va()) { if (!pvclock_get_pvti_cpu0_va()) {
ret = -ENODEV; ret = -EOPNOTSUPP;
goto err; goto err;
} }
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
KVM_CLOCK_PAIRING_WALLCLOCK); KVM_CLOCK_PAIRING_WALLCLOCK);
if (ret == -KVM_ENOSYS) { if (ret == -KVM_ENOSYS) {
ret = -ENODEV; ret = -EOPNOTSUPP;
goto err; goto err;
} }

View File

@ -371,8 +371,8 @@
.ops = &axp20x_ops, \ .ops = &axp20x_ops, \
} }
#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \ #define AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask) \ _vmask, _ereg, _emask, _ramp_delay) \
[_family##_##_id] = { \ [_family##_##_id] = { \
.name = (_match), \ .name = (_match), \
.supply_name = (_supply), \ .supply_name = (_supply), \
@ -388,9 +388,15 @@
.vsel_mask = (_vmask), \ .vsel_mask = (_vmask), \
.enable_reg = (_ereg), \ .enable_reg = (_ereg), \
.enable_mask = (_emask), \ .enable_mask = (_emask), \
.ramp_delay = (_ramp_delay), \
.ops = &axp20x_ops, \ .ops = &axp20x_ops, \
} }
#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask) \
AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask, 0)
#define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask) \ #define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask) \
[_family##_##_id] = { \ [_family##_##_id] = { \
.name = (_match), \ .name = (_match), \
@ -419,8 +425,8 @@
.ops = &axp20x_ops_fixed \ .ops = &axp20x_ops_fixed \
} }
#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages, \ #define AXP_DESC_RANGES_DELAY(_family, _id, _match, _supply, _ranges, _n_voltages, \
_vreg, _vmask, _ereg, _emask) \ _vreg, _vmask, _ereg, _emask, _ramp_delay) \
[_family##_##_id] = { \ [_family##_##_id] = { \
.name = (_match), \ .name = (_match), \
.supply_name = (_supply), \ .supply_name = (_supply), \
@ -436,9 +442,15 @@
.enable_mask = (_emask), \ .enable_mask = (_emask), \
.linear_ranges = (_ranges), \ .linear_ranges = (_ranges), \
.n_linear_ranges = ARRAY_SIZE(_ranges), \ .n_linear_ranges = ARRAY_SIZE(_ranges), \
.ramp_delay = (_ramp_delay), \
.ops = &axp20x_ops_range, \ .ops = &axp20x_ops_range, \
} }
#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages, \
_vreg, _vmask, _ereg, _emask) \
AXP_DESC_RANGES_DELAY(_family, _id, _match, _supply, _ranges, \
_n_voltages, _vreg, _vmask, _ereg, _emask, 0)
static const int axp209_dcdc2_ldo3_slew_rates[] = { static const int axp209_dcdc2_ldo3_slew_rates[] = {
1600, 1600,
800, 800,
@ -781,21 +793,21 @@ static const struct linear_range axp717_dcdc3_ranges[] = {
}; };
static const struct regulator_desc axp717_regulators[] = { static const struct regulator_desc axp717_regulators[] = {
AXP_DESC_RANGES(AXP717, DCDC1, "dcdc1", "vin1", AXP_DESC_RANGES_DELAY(AXP717, DCDC1, "dcdc1", "vin1",
axp717_dcdc1_ranges, AXP717_DCDC1_NUM_VOLTAGES, axp717_dcdc1_ranges, AXP717_DCDC1_NUM_VOLTAGES,
AXP717_DCDC1_CONTROL, AXP717_DCDC_V_OUT_MASK, AXP717_DCDC1_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(0)), AXP717_DCDC_OUTPUT_CONTROL, BIT(0), 640),
AXP_DESC_RANGES(AXP717, DCDC2, "dcdc2", "vin2", AXP_DESC_RANGES_DELAY(AXP717, DCDC2, "dcdc2", "vin2",
axp717_dcdc2_ranges, AXP717_DCDC2_NUM_VOLTAGES, axp717_dcdc2_ranges, AXP717_DCDC2_NUM_VOLTAGES,
AXP717_DCDC2_CONTROL, AXP717_DCDC_V_OUT_MASK, AXP717_DCDC2_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(1)), AXP717_DCDC_OUTPUT_CONTROL, BIT(1), 640),
AXP_DESC_RANGES(AXP717, DCDC3, "dcdc3", "vin3", AXP_DESC_RANGES_DELAY(AXP717, DCDC3, "dcdc3", "vin3",
axp717_dcdc3_ranges, AXP717_DCDC3_NUM_VOLTAGES, axp717_dcdc3_ranges, AXP717_DCDC3_NUM_VOLTAGES,
AXP717_DCDC3_CONTROL, AXP717_DCDC_V_OUT_MASK, AXP717_DCDC3_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(2)), AXP717_DCDC_OUTPUT_CONTROL, BIT(2), 640),
AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100, AXP_DESC_DELAY(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK, AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(3)), AXP717_DCDC_OUTPUT_CONTROL, BIT(3), 6400),
AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100, AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100,
AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK, AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(0)), AXP717_LDO0_OUTPUT_CONTROL, BIT(0)),

View File

@ -239,7 +239,7 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode); ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode);
if (ret < 0) if (ret < 0)
return ret; goto stop_user;
if (op->dummy.buswidth && op->dummy.nbytes) { if (op->dummy.buswidth && op->dummy.nbytes) {
for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++) for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
@ -249,8 +249,9 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
aspeed_spi_set_io_mode(chip, io_mode); aspeed_spi_set_io_mode(chip, io_mode);
aspeed_spi_read_from_ahb(buf, chip->ahb_base, len); aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
stop_user:
aspeed_spi_stop_user(chip); aspeed_spi_stop_user(chip);
return 0; return ret;
} }
static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip, static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
@ -261,10 +262,11 @@ static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
aspeed_spi_start_user(chip); aspeed_spi_start_user(chip);
ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode); ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode);
if (ret < 0) if (ret < 0)
return ret; goto stop_user;
aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes); aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
stop_user:
aspeed_spi_stop_user(chip); aspeed_spi_stop_user(chip);
return 0; return ret;
} }
/* support for 1-1-1, 1-1-2 or 1-1-4 */ /* support for 1-1-1, 1-1-2 or 1-1-4 */

View File

@ -241,6 +241,20 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
struct spi_controller *ctlr = spi->controller; struct spi_controller *ctlr = spi->controller;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable; bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
bool cs_actual;
/*
* SPI subsystem tries to avoid no-op calls that would break the PM
* refcount below. It can't however for the first time it is used.
* To detect this case we read it here and bail out early for no-ops.
*/
if (spi_get_csgpiod(spi, 0))
cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & 1);
else
cs_actual = !!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) &
BIT(spi_get_chipselect(spi, 0)));
if (unlikely(cs_actual == cs_asserted))
return;
if (cs_asserted) { if (cs_asserted) {
/* Keep things powered as long as CS is asserted */ /* Keep things powered as long as CS is asserted */

View File

@ -157,6 +157,7 @@ struct sci_port {
bool has_rtscts; bool has_rtscts;
bool autorts; bool autorts;
bool tx_occurred;
}; };
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@ -850,6 +851,7 @@ static void sci_transmit_chars(struct uart_port *port)
{ {
struct tty_port *tport = &port->state->port; struct tty_port *tport = &port->state->port;
unsigned int stopped = uart_tx_stopped(port); unsigned int stopped = uart_tx_stopped(port);
struct sci_port *s = to_sci_port(port);
unsigned short status; unsigned short status;
unsigned short ctrl; unsigned short ctrl;
int count; int count;
@ -885,6 +887,7 @@ static void sci_transmit_chars(struct uart_port *port)
} }
sci_serial_out(port, SCxTDR, c); sci_serial_out(port, SCxTDR, c);
s->tx_occurred = true;
port->icount.tx++; port->icount.tx++;
} while (--count > 0); } while (--count > 0);
@ -1241,6 +1244,8 @@ static void sci_dma_tx_complete(void *arg)
if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(port); uart_write_wakeup(port);
s->tx_occurred = true;
if (!kfifo_is_empty(&tport->xmit_fifo)) { if (!kfifo_is_empty(&tport->xmit_fifo)) {
s->cookie_tx = 0; s->cookie_tx = 0;
schedule_work(&s->work_tx); schedule_work(&s->work_tx);
@ -1731,6 +1736,19 @@ static void sci_flush_buffer(struct uart_port *port)
s->cookie_tx = -EINVAL; s->cookie_tx = -EINVAL;
} }
} }
static void sci_dma_check_tx_occurred(struct sci_port *s)
{
struct dma_tx_state state;
enum dma_status status;
if (!s->chan_tx)
return;
status = dmaengine_tx_status(s->chan_tx, s->cookie_tx, &state);
if (status == DMA_COMPLETE || status == DMA_IN_PROGRESS)
s->tx_occurred = true;
}
#else /* !CONFIG_SERIAL_SH_SCI_DMA */ #else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port) static inline void sci_request_dma(struct uart_port *port)
{ {
@ -1740,6 +1758,10 @@ static inline void sci_free_dma(struct uart_port *port)
{ {
} }
static void sci_dma_check_tx_occurred(struct sci_port *s)
{
}
#define sci_flush_buffer NULL #define sci_flush_buffer NULL
#endif /* !CONFIG_SERIAL_SH_SCI_DMA */ #endif /* !CONFIG_SERIAL_SH_SCI_DMA */
@ -2076,6 +2098,12 @@ static unsigned int sci_tx_empty(struct uart_port *port)
{ {
unsigned short status = sci_serial_in(port, SCxSR); unsigned short status = sci_serial_in(port, SCxSR);
unsigned short in_tx_fifo = sci_txfill(port); unsigned short in_tx_fifo = sci_txfill(port);
struct sci_port *s = to_sci_port(port);
sci_dma_check_tx_occurred(s);
if (!s->tx_occurred)
return TIOCSER_TEMT;
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
} }
@ -2247,6 +2275,7 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
s->tx_occurred = false;
sci_request_dma(port); sci_request_dma(port);
ret = sci_request_irq(s); ret = sci_request_irq(s);

View File

@ -5566,6 +5566,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
lrbp = &hba->lrb[task_tag]; lrbp = &hba->lrb[task_tag];
lrbp->compl_time_stamp = ktime_get(); lrbp->compl_time_stamp = ktime_get();
lrbp->compl_time_stamp_local_clock = local_clock();
cmd = lrbp->cmd; cmd = lrbp->cmd;
if (cmd) { if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))

View File

@ -2794,8 +2794,14 @@ int usb_add_hcd(struct usb_hcd *hcd,
int retval; int retval;
struct usb_device *rhdev; struct usb_device *rhdev;
struct usb_hcd *shared_hcd; struct usb_hcd *shared_hcd;
int skip_phy_initialization;
if (!hcd->skip_phy_initialization) { if (usb_hcd_is_primary_hcd(hcd))
skip_phy_initialization = hcd->skip_phy_initialization;
else
skip_phy_initialization = hcd->primary_hcd->skip_phy_initialization;
if (!skip_phy_initialization) {
if (usb_hcd_is_primary_hcd(hcd)) { if (usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
if (IS_ERR(hcd->phy_roothub)) if (IS_ERR(hcd->phy_roothub))

View File

@ -3546,11 +3546,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
port_status |= USB_PORT_STAT_C_OVERCURRENT << 16; port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
} }
if (!hsotg->flags.b.port_connect_status) { if (dwc2_is_device_mode(hsotg)) {
/* /*
* The port is disconnected, which means the core is * Just return 0's for the remainder of the port status
* either in device mode or it soon will be. Just
* return 0's for the remainder of the port status
* since the port register can't be read if the core * since the port register can't be read if the core
* is in device mode. * is in device mode.
*/ */
@ -3620,13 +3618,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1)) if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
goto error; goto error;
if (!hsotg->flags.b.port_connect_status) { if (dwc2_is_device_mode(hsotg)) {
/* /*
* The port is disconnected, which means the core is * Just return 0's for the remainder of the port status
* either in device mode or it soon will be. Just * since the port register can't be read if the core
* return without doing anything since the port * is in device mode.
* register can't be written if the core is in device
* mode.
*/ */
break; break;
} }
@ -4349,7 +4345,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (hsotg->bus_suspended) if (hsotg->bus_suspended)
goto skip_power_saving; goto skip_power_saving;
if (hsotg->flags.b.port_connect_status == 0) if (!(dwc2_read_hprt0(hsotg) & HPRT0_CONNSTS))
goto skip_power_saving; goto skip_power_saving;
switch (hsotg->params.power_down) { switch (hsotg->params.power_down) {
@ -4431,6 +4427,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
* Power Down mode. * Power Down mode.
*/ */
if (hprt0 & HPRT0_CONNSTS) { if (hprt0 & HPRT0_CONNSTS) {
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
hsotg->lx_state = DWC2_L0; hsotg->lx_state = DWC2_L0;
goto unlock; goto unlock;
} }

View File

@ -129,6 +129,16 @@ static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx)
writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL); writel(val, dwc3_imx->hsio_blk_base + USB_WAKEUP_CTRL);
} }
static const struct property_entry dwc3_imx8mp_properties[] = {
PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk"),
PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk"),
{},
};
static const struct software_node dwc3_imx8mp_swnode = {
.properties = dwc3_imx8mp_properties,
};
static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx) static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
{ {
struct dwc3_imx8mp *dwc3_imx = _dwc3_imx; struct dwc3_imx8mp *dwc3_imx = _dwc3_imx;
@ -148,17 +158,6 @@ static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int dwc3_imx8mp_set_software_node(struct device *dev)
{
struct property_entry props[3] = { 0 };
int prop_idx = 0;
props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-missing-cas-quirk");
props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-skip-phy-init-quirk");
return device_create_managed_software_node(dev, props, NULL);
}
static int dwc3_imx8mp_probe(struct platform_device *pdev) static int dwc3_imx8mp_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
@ -221,17 +220,17 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
if (err < 0) if (err < 0)
goto disable_rpm; goto disable_rpm;
err = dwc3_imx8mp_set_software_node(dev); err = device_add_software_node(dev, &dwc3_imx8mp_swnode);
if (err) { if (err) {
err = -ENODEV; err = -ENODEV;
dev_err(dev, "failed to create software node\n"); dev_err(dev, "failed to add software node\n");
goto disable_rpm; goto disable_rpm;
} }
err = of_platform_populate(node, NULL, NULL, dev); err = of_platform_populate(node, NULL, NULL, dev);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to create dwc3 core\n"); dev_err(&pdev->dev, "failed to create dwc3 core\n");
goto disable_rpm; goto remove_swnode;
} }
dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np); dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
@ -255,6 +254,8 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
depopulate: depopulate:
of_platform_depopulate(dev); of_platform_depopulate(dev);
remove_swnode:
device_remove_software_node(dev);
disable_rpm: disable_rpm:
pm_runtime_disable(dev); pm_runtime_disable(dev);
pm_runtime_put_noidle(dev); pm_runtime_put_noidle(dev);
@ -268,6 +269,7 @@ static void dwc3_imx8mp_remove(struct platform_device *pdev)
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
of_platform_depopulate(dev); of_platform_depopulate(dev);
device_remove_software_node(dev);
pm_runtime_disable(dev); pm_runtime_disable(dev);
pm_runtime_put_noidle(dev); pm_runtime_put_noidle(dev);

View File

@ -121,8 +121,11 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
* in use but the usb3-phy entry is missing from the device tree. * in use but the usb3-phy entry is missing from the device tree.
* Therefore, skip these operations in this case. * Therefore, skip these operations in this case.
*/ */
if (!priv_data->usb3_phy) if (!priv_data->usb3_phy) {
/* Deselect the PIPE Clock Select bit in FPD PIPE Clock register */
writel(PIPE_CLK_DESELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK);
goto skip_usb3_phy; goto skip_usb3_phy;
}
crst = devm_reset_control_get_exclusive(dev, "usb_crst"); crst = devm_reset_control_get_exclusive(dev, "usb_crst");
if (IS_ERR(crst)) { if (IS_ERR(crst)) {

View File

@ -1593,7 +1593,11 @@ static int f_midi2_create_card(struct f_midi2 *midi2)
fb->info.midi_ci_version = b->midi_ci_version; fb->info.midi_ci_version = b->midi_ci_version;
fb->info.ui_hint = reverse_dir(b->ui_hint); fb->info.ui_hint = reverse_dir(b->ui_hint);
fb->info.sysex8_streams = b->sysex8_streams; fb->info.sysex8_streams = b->sysex8_streams;
fb->info.flags |= b->is_midi1; if (b->is_midi1 < 2)
fb->info.flags |= b->is_midi1;
else
fb->info.flags |= SNDRV_UMP_BLOCK_IS_MIDI1 |
SNDRV_UMP_BLOCK_IS_LOWSPEED;
strscpy(fb->info.name, ump_fb_name(b), strscpy(fb->info.name, ump_fb_name(b),
sizeof(fb->info.name)); sizeof(fb->info.name));
} }

View File

@ -579,9 +579,12 @@ static int gs_start_io(struct gs_port *port)
* we didn't in gs_start_tx() */ * we didn't in gs_start_tx() */
tty_wakeup(port->port.tty); tty_wakeup(port->port.tty);
} else { } else {
gs_free_requests(ep, head, &port->read_allocated); /* Free reqs only if we are still connected */
gs_free_requests(port->port_usb->in, &port->write_pool, if (port->port_usb) {
&port->write_allocated); gs_free_requests(ep, head, &port->read_allocated);
gs_free_requests(port->port_usb->in, &port->write_pool,
&port->write_allocated);
}
status = -EIO; status = -EIO;
} }

View File

@ -119,8 +119,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
if (IS_ERR(priv->iclk)) if (IS_ERR(priv->iclk))
priv->iclk = NULL; priv->iclk = NULL;
clk_enable(priv->fclk); ret = clk_enable(priv->fclk);
clk_enable(priv->iclk); if (ret)
goto fail_request_resource;
ret = clk_enable(priv->iclk);
if (ret)
goto fail_iclk;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED); ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) { if (ret != 0) {
@ -136,6 +140,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
fail_add_hcd: fail_add_hcd:
clk_disable(priv->iclk); clk_disable(priv->iclk);
fail_iclk:
clk_disable(priv->fclk); clk_disable(priv->fclk);
fail_request_resource: fail_request_resource:

View File

@ -779,11 +779,17 @@ max3421_check_unlink(struct usb_hcd *hcd)
retval = 1; retval = 1;
dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
__func__, urb, urb->unlinked); __func__, urb, urb->unlinked);
usb_hcd_unlink_urb_from_ep(hcd, urb); if (urb == max3421_hcd->curr_urb) {
spin_unlock_irqrestore(&max3421_hcd->lock, max3421_hcd->urb_done = 1;
flags); max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
usb_hcd_giveback_urb(hcd, urb, 0); BIT(MAX3421_HI_RCVDAV_BIT));
spin_lock_irqsave(&max3421_hcd->lock, flags); } else {
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&max3421_hcd->lock,
flags);
usb_hcd_giveback_urb(hcd, urb, 0);
spin_lock_irqsave(&max3421_hcd->lock, flags);
}
} }
} }
} }

View File

@ -407,8 +407,10 @@ static int onboard_dev_probe(struct platform_device *pdev)
} }
if (of_device_is_compatible(pdev->dev.of_node, "usb424,2744") || if (of_device_is_compatible(pdev->dev.of_node, "usb424,2744") ||
of_device_is_compatible(pdev->dev.of_node, "usb424,5744")) of_device_is_compatible(pdev->dev.of_node, "usb424,5744")) {
err = onboard_dev_5744_i2c_init(client); err = onboard_dev_5744_i2c_init(client);
onboard_dev->always_powered_in_suspend = true;
}
put_device(&client->dev); put_device(&client->dev);
if (err < 0) if (err < 0)

View File

@ -290,6 +290,8 @@ struct anx7411_data {
struct power_supply *psy; struct power_supply *psy;
struct power_supply_desc psy_desc; struct power_supply_desc psy_desc;
struct device *dev; struct device *dev;
struct fwnode_handle *switch_node;
struct fwnode_handle *mux_node;
}; };
static u8 snk_identity[] = { static u8 snk_identity[] = {
@ -1021,6 +1023,16 @@ static void anx7411_port_unregister_altmodes(struct typec_altmode **adev)
} }
} }
static void anx7411_port_unregister(struct typec_params *typecp)
{
fwnode_handle_put(typecp->caps.fwnode);
anx7411_port_unregister_altmodes(typecp->port_amode);
if (typecp->port)
typec_unregister_port(typecp->port);
if (typecp->role_sw)
usb_role_switch_put(typecp->role_sw);
}
static int anx7411_usb_mux_set(struct typec_mux_dev *mux, static int anx7411_usb_mux_set(struct typec_mux_dev *mux,
struct typec_mux_state *state) struct typec_mux_state *state)
{ {
@ -1089,6 +1101,7 @@ static void anx7411_unregister_mux(struct anx7411_data *ctx)
if (ctx->typec.typec_mux) { if (ctx->typec.typec_mux) {
typec_mux_unregister(ctx->typec.typec_mux); typec_mux_unregister(ctx->typec.typec_mux);
ctx->typec.typec_mux = NULL; ctx->typec.typec_mux = NULL;
fwnode_handle_put(ctx->mux_node);
} }
} }
@ -1097,6 +1110,7 @@ static void anx7411_unregister_switch(struct anx7411_data *ctx)
if (ctx->typec.typec_switch) { if (ctx->typec.typec_switch) {
typec_switch_unregister(ctx->typec.typec_switch); typec_switch_unregister(ctx->typec.typec_switch);
ctx->typec.typec_switch = NULL; ctx->typec.typec_switch = NULL;
fwnode_handle_put(ctx->switch_node);
} }
} }
@ -1104,28 +1118,29 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
struct device *dev) struct device *dev)
{ {
int ret; int ret;
struct device_node *node;
node = of_get_child_by_name(dev->of_node, "orientation_switch"); ctx->switch_node = device_get_named_child_node(dev, "orientation_switch");
if (!node) if (!ctx->switch_node)
return 0; return 0;
ret = anx7411_register_switch(ctx, dev, &node->fwnode); ret = anx7411_register_switch(ctx, dev, ctx->switch_node);
if (ret) { if (ret) {
dev_err(dev, "failed register switch"); dev_err(dev, "failed register switch");
fwnode_handle_put(ctx->switch_node);
return ret; return ret;
} }
node = of_get_child_by_name(dev->of_node, "mode_switch"); ctx->mux_node = device_get_named_child_node(dev, "mode_switch");
if (!node) { if (!ctx->mux_node) {
dev_err(dev, "no typec mux exist"); dev_err(dev, "no typec mux exist");
ret = -ENODEV; ret = -ENODEV;
goto unregister_switch; goto unregister_switch;
} }
ret = anx7411_register_mux(ctx, dev, &node->fwnode); ret = anx7411_register_mux(ctx, dev, ctx->mux_node);
if (ret) { if (ret) {
dev_err(dev, "failed register mode switch"); dev_err(dev, "failed register mode switch");
fwnode_handle_put(ctx->mux_node);
ret = -ENODEV; ret = -ENODEV;
goto unregister_switch; goto unregister_switch;
} }
@ -1154,34 +1169,34 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
ret = fwnode_property_read_string(fwnode, "power-role", &buf); ret = fwnode_property_read_string(fwnode, "power-role", &buf);
if (ret) { if (ret) {
dev_err(dev, "power-role not found: %d\n", ret); dev_err(dev, "power-role not found: %d\n", ret);
return ret; goto put_fwnode;
} }
ret = typec_find_port_power_role(buf); ret = typec_find_port_power_role(buf);
if (ret < 0) if (ret < 0)
return ret; goto put_fwnode;
cap->type = ret; cap->type = ret;
ret = fwnode_property_read_string(fwnode, "data-role", &buf); ret = fwnode_property_read_string(fwnode, "data-role", &buf);
if (ret) { if (ret) {
dev_err(dev, "data-role not found: %d\n", ret); dev_err(dev, "data-role not found: %d\n", ret);
return ret; goto put_fwnode;
} }
ret = typec_find_port_data_role(buf); ret = typec_find_port_data_role(buf);
if (ret < 0) if (ret < 0)
return ret; goto put_fwnode;
cap->data = ret; cap->data = ret;
ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
if (ret) { if (ret) {
dev_err(dev, "try-power-role not found: %d\n", ret); dev_err(dev, "try-power-role not found: %d\n", ret);
return ret; goto put_fwnode;
} }
ret = typec_find_power_role(buf); ret = typec_find_power_role(buf);
if (ret < 0) if (ret < 0)
return ret; goto put_fwnode;
cap->prefer_role = ret; cap->prefer_role = ret;
/* Get source pdos */ /* Get source pdos */
@ -1193,7 +1208,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
typecp->src_pdo_nr); typecp->src_pdo_nr);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "source cap validate failed: %d\n", ret); dev_err(dev, "source cap validate failed: %d\n", ret);
return -EINVAL; goto put_fwnode;
} }
typecp->caps_flags |= HAS_SOURCE_CAP; typecp->caps_flags |= HAS_SOURCE_CAP;
@ -1207,7 +1222,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
typecp->sink_pdo_nr); typecp->sink_pdo_nr);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "sink cap validate failed: %d\n", ret); dev_err(dev, "sink cap validate failed: %d\n", ret);
return -EINVAL; goto put_fwnode;
} }
for (i = 0; i < typecp->sink_pdo_nr; i++) { for (i = 0; i < typecp->sink_pdo_nr; i++) {
@ -1251,13 +1266,21 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx,
ret = PTR_ERR(ctx->typec.port); ret = PTR_ERR(ctx->typec.port);
ctx->typec.port = NULL; ctx->typec.port = NULL;
dev_err(dev, "Failed to register type c port %d\n", ret); dev_err(dev, "Failed to register type c port %d\n", ret);
return ret; goto put_usb_role_switch;
} }
typec_port_register_altmodes(ctx->typec.port, NULL, ctx, typec_port_register_altmodes(ctx->typec.port, NULL, ctx,
ctx->typec.port_amode, ctx->typec.port_amode,
MAX_ALTMODE); MAX_ALTMODE);
return 0; return 0;
put_usb_role_switch:
if (ctx->typec.role_sw)
usb_role_switch_put(ctx->typec.role_sw);
put_fwnode:
fwnode_handle_put(fwnode);
return ret;
} }
static int anx7411_typec_check_connection(struct anx7411_data *ctx) static int anx7411_typec_check_connection(struct anx7411_data *ctx)
@ -1523,8 +1546,7 @@ static int anx7411_i2c_probe(struct i2c_client *client)
destroy_workqueue(plat->workqueue); destroy_workqueue(plat->workqueue);
free_typec_port: free_typec_port:
typec_unregister_port(plat->typec.port); anx7411_port_unregister(&plat->typec);
anx7411_port_unregister_altmodes(plat->typec.port_amode);
free_typec_switch: free_typec_switch:
anx7411_unregister_switch(plat); anx7411_unregister_switch(plat);
@ -1548,17 +1570,11 @@ static void anx7411_i2c_remove(struct i2c_client *client)
i2c_unregister_device(plat->spi_client); i2c_unregister_device(plat->spi_client);
if (plat->typec.role_sw)
usb_role_switch_put(plat->typec.role_sw);
anx7411_unregister_mux(plat); anx7411_unregister_mux(plat);
anx7411_unregister_switch(plat); anx7411_unregister_switch(plat);
if (plat->typec.port) anx7411_port_unregister(&plat->typec);
typec_unregister_port(plat->typec.port);
anx7411_port_unregister_altmodes(plat->typec.port_amode);
} }
static const struct i2c_device_id anx7411_id[] = { static const struct i2c_device_id anx7411_id[] = {

View File

@ -46,11 +46,11 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci)); ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci));
if (cci & UCSI_CCI_ACK_COMPLETE && if (cci & UCSI_CCI_ACK_COMPLETE &&
test_bit(ACK_PENDING, &ucsi->flags)) test_and_clear_bit(ACK_PENDING, &ucsi->flags))
complete(&ucsi->complete); complete(&ucsi->complete);
if (cci & UCSI_CCI_COMMAND_COMPLETE && if (cci & UCSI_CCI_COMMAND_COMPLETE &&
test_bit(COMMAND_PENDING, &ucsi->flags)) test_and_clear_bit(COMMAND_PENDING, &ucsi->flags))
complete(&ucsi->complete); complete(&ucsi->complete);
} }
EXPORT_SYMBOL_GPL(ucsi_notify_common); EXPORT_SYMBOL_GPL(ucsi_notify_common);
@ -65,6 +65,8 @@ int ucsi_sync_control_common(struct ucsi *ucsi, u64 command)
else else
set_bit(COMMAND_PENDING, &ucsi->flags); set_bit(COMMAND_PENDING, &ucsi->flags);
reinit_completion(&ucsi->complete);
ret = ucsi->ops->async_control(ucsi, command); ret = ucsi->ops->async_control(ucsi, command);
if (ret) if (ret)
goto out_clear_bit; goto out_clear_bit;

View File

@ -2716,6 +2716,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
* @_vq: the struct virtqueue we're talking about. * @_vq: the struct virtqueue we're talking about.
* @num: new ring num * @num: new ring num
* @recycle: callback to recycle unused buffers * @recycle: callback to recycle unused buffers
* @recycle_done: callback to be invoked when recycle for all unused buffers done
* *
* When it is really necessary to create a new vring, it will set the current vq * When it is really necessary to create a new vring, it will set the current vq
* into the reset state. Then call the passed callback to recycle the buffer * into the reset state. Then call the passed callback to recycle the buffer
@ -2736,7 +2737,8 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
* *
*/ */
int virtqueue_resize(struct virtqueue *_vq, u32 num, int virtqueue_resize(struct virtqueue *_vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf)) void (*recycle)(struct virtqueue *vq, void *buf),
void (*recycle_done)(struct virtqueue *vq))
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
int err; int err;
@ -2753,6 +2755,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
err = virtqueue_disable_and_recycle(_vq, recycle); err = virtqueue_disable_and_recycle(_vq, recycle);
if (err) if (err)
return err; return err;
if (recycle_done)
recycle_done(_vq);
if (vq->packed_ring) if (vq->packed_ring)
err = virtqueue_resize_packed(_vq, num); err = virtqueue_resize_packed(_vq, num);

View File

@ -1925,6 +1925,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
goto unlink_out; goto unlink_out;
} }
netfs_wait_for_outstanding_io(inode);
cifs_close_deferred_file_under_dentry(tcon, full_path); cifs_close_deferred_file_under_dentry(tcon, full_path);
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
@ -2442,8 +2443,10 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
} }
cifs_close_deferred_file_under_dentry(tcon, from_name); cifs_close_deferred_file_under_dentry(tcon, from_name);
if (d_inode(target_dentry) != NULL) if (d_inode(target_dentry) != NULL) {
netfs_wait_for_outstanding_io(d_inode(target_dentry));
cifs_close_deferred_file_under_dentry(tcon, to_name); cifs_close_deferred_file_under_dentry(tcon, to_name);
}
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
to_name); to_name);

Some files were not shown because too many files have changed in this diff Show More