mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
Merge branch 'sched/urgent' into sched/core, to pick up pending v6.7 fixes for the v6.8 merge window
This fix didn't make it upstream in time, pick it up for the v6.8 merge window. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
cdb3033e19
4
.mailmap
4
.mailmap
@ -191,6 +191,10 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
|
||||
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
|
||||
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
|
||||
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com>
|
||||
Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com>
|
||||
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
|
||||
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
|
||||
|
@ -6050,10 +6050,8 @@ M: Mikulas Patocka <mpatocka@redhat.com>
|
||||
M: dm-devel@lists.linux.dev
|
||||
L: dm-devel@lists.linux.dev
|
||||
S: Maintained
|
||||
W: http://sources.redhat.com/dm
|
||||
Q: http://patchwork.kernel.org/project/dm-devel/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
|
||||
T: quilt http://people.redhat.com/agk/patches/linux/editing/
|
||||
F: Documentation/admin-guide/device-mapper/
|
||||
F: drivers/md/Kconfig
|
||||
F: drivers/md/Makefile
|
||||
@ -9526,6 +9524,7 @@ F: drivers/bus/hisi_lpc.c
|
||||
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
|
||||
M: Yisen Zhuang <yisen.zhuang@huawei.com>
|
||||
M: Salil Mehta <salil.mehta@huawei.com>
|
||||
M: Jijie Shao <shaojijie@huawei.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.hisilicon.com
|
||||
|
@ -49,7 +49,6 @@ config ARC
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
select PCI_SYSCALL if PCI
|
||||
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
|
||||
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
|
||||
select TRACE_IRQFLAGS_SUPPORT
|
||||
|
||||
@ -232,10 +231,6 @@ config ARC_CACHE_PAGES
|
||||
Note that Global I/D ENABLE + Per Page DISABLE works but corollary
|
||||
Global DISABLE + Per Page ENABLE won't work
|
||||
|
||||
config ARC_CACHE_VIPT_ALIASING
|
||||
bool "Support VIPT Aliasing D$"
|
||||
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
|
||||
|
||||
endif #ARC_CACHE
|
||||
|
||||
config ARC_HAS_ICCM
|
||||
|
@ -44,31 +44,10 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
|
||||
|
||||
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
|
||||
|
||||
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
|
||||
|
||||
#define flush_cache_mm(mm) /* called on munmap/exit */
|
||||
#define flush_cache_range(mm, u_vstart, u_vend)
|
||||
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
|
||||
|
||||
#else /* VIPT aliasing dcache */
|
||||
|
||||
/* To clear out stale userspace mappings */
|
||||
void flush_cache_mm(struct mm_struct *mm);
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start,unsigned long end);
|
||||
void flush_cache_page(struct vm_area_struct *vma,
|
||||
unsigned long user_addr, unsigned long page);
|
||||
|
||||
/*
|
||||
* To make sure that userspace mapping is flushed to memory before
|
||||
* get_user_pages() uses a kernel mapping to access the page
|
||||
*/
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
void flush_anon_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long u_vaddr);
|
||||
|
||||
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
|
||||
|
||||
/*
|
||||
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
|
||||
* This works around some PIO based drivers which don't call flush_dcache_page
|
||||
@ -76,28 +55,6 @@ void flush_anon_page(struct vm_area_struct *vma,
|
||||
*/
|
||||
#define PG_dc_clean PG_arch_1
|
||||
|
||||
#define CACHE_COLORS_NUM 4
|
||||
#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
|
||||
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
|
||||
|
||||
/*
|
||||
* Simple wrapper over config option
|
||||
* Bootup code ensures that hardware matches kernel configuration
|
||||
*/
|
||||
static inline int cache_is_vipt_aliasing(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
|
||||
}
|
||||
|
||||
/*
|
||||
* checks if two addresses (after page aligning) index into same cache set
|
||||
*/
|
||||
#define addr_not_cache_congruent(addr1, addr2) \
|
||||
({ \
|
||||
cache_is_vipt_aliasing() ? \
|
||||
(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
|
||||
})
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
memcpy(dst, src, len); \
|
||||
|
@ -291,4 +291,36 @@
|
||||
/* M = 8-1 N = 8 */
|
||||
.endm
|
||||
|
||||
.macro SAVE_ABI_CALLEE_REGS
|
||||
push r13
|
||||
push r14
|
||||
push r15
|
||||
push r16
|
||||
push r17
|
||||
push r18
|
||||
push r19
|
||||
push r20
|
||||
push r21
|
||||
push r22
|
||||
push r23
|
||||
push r24
|
||||
push r25
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ABI_CALLEE_REGS
|
||||
pop r25
|
||||
pop r24
|
||||
pop r23
|
||||
pop r22
|
||||
pop r21
|
||||
pop r20
|
||||
pop r19
|
||||
pop r18
|
||||
pop r17
|
||||
pop r16
|
||||
pop r15
|
||||
pop r14
|
||||
pop r13
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
@ -33,6 +33,91 @@
|
||||
#include <asm/irqflags-compact.h>
|
||||
#include <asm/thread_info.h> /* For THREAD_SIZE */
|
||||
|
||||
/* Note on the LD/ST addr modes with addr reg wback
|
||||
*
|
||||
* LD.a same as LD.aw
|
||||
*
|
||||
* LD.a reg1, [reg2, x] => Pre Incr
|
||||
* Eff Addr for load = [reg2 + x]
|
||||
*
|
||||
* LD.ab reg1, [reg2, x] => Post Incr
|
||||
* Eff Addr for load = [reg2]
|
||||
*/
|
||||
|
||||
.macro PUSHAX aux
|
||||
lr r9, [\aux]
|
||||
push r9
|
||||
.endm
|
||||
|
||||
.macro POPAX aux
|
||||
pop r9
|
||||
sr r9, [\aux]
|
||||
.endm
|
||||
|
||||
.macro SAVE_R0_TO_R12
|
||||
push r0
|
||||
push r1
|
||||
push r2
|
||||
push r3
|
||||
push r4
|
||||
push r5
|
||||
push r6
|
||||
push r7
|
||||
push r8
|
||||
push r9
|
||||
push r10
|
||||
push r11
|
||||
push r12
|
||||
.endm
|
||||
|
||||
.macro RESTORE_R12_TO_R0
|
||||
pop r12
|
||||
pop r11
|
||||
pop r10
|
||||
pop r9
|
||||
pop r8
|
||||
pop r7
|
||||
pop r6
|
||||
pop r5
|
||||
pop r4
|
||||
pop r3
|
||||
pop r2
|
||||
pop r1
|
||||
pop r0
|
||||
.endm
|
||||
|
||||
.macro SAVE_ABI_CALLEE_REGS
|
||||
push r13
|
||||
push r14
|
||||
push r15
|
||||
push r16
|
||||
push r17
|
||||
push r18
|
||||
push r19
|
||||
push r20
|
||||
push r21
|
||||
push r22
|
||||
push r23
|
||||
push r24
|
||||
push r25
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ABI_CALLEE_REGS
|
||||
pop r25
|
||||
pop r24
|
||||
pop r23
|
||||
pop r22
|
||||
pop r21
|
||||
pop r20
|
||||
pop r19
|
||||
pop r18
|
||||
pop r17
|
||||
pop r16
|
||||
pop r15
|
||||
pop r14
|
||||
pop r13
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Switch to Kernel Mode stack if SP points to User Mode stack
|
||||
*
|
||||
@ -235,7 +320,7 @@
|
||||
SWITCH_TO_KERNEL_STK
|
||||
|
||||
|
||||
PUSH 0x003\LVL\()abcd /* Dummy ECR */
|
||||
st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */
|
||||
sub sp, sp, 8 /* skip orig_r0 (not needed)
|
||||
skip pt_regs->sp, already saved above */
|
||||
|
||||
|
@ -21,114 +21,12 @@
|
||||
#include <asm/entry-arcv2.h>
|
||||
#endif
|
||||
|
||||
/* Note on the LD/ST addr modes with addr reg wback
|
||||
*
|
||||
* LD.a same as LD.aw
|
||||
*
|
||||
* LD.a reg1, [reg2, x] => Pre Incr
|
||||
* Eff Addr for load = [reg2 + x]
|
||||
*
|
||||
* LD.ab reg1, [reg2, x] => Post Incr
|
||||
* Eff Addr for load = [reg2]
|
||||
*/
|
||||
|
||||
.macro PUSH reg
|
||||
st.a \reg, [sp, -4]
|
||||
.endm
|
||||
|
||||
.macro PUSHAX aux
|
||||
lr r9, [\aux]
|
||||
PUSH r9
|
||||
.endm
|
||||
|
||||
.macro POP reg
|
||||
ld.ab \reg, [sp, 4]
|
||||
.endm
|
||||
|
||||
.macro POPAX aux
|
||||
POP r9
|
||||
sr r9, [\aux]
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Helpers to save/restore Scratch Regs:
|
||||
* used by Interrupt/Exception Prologue/Epilogue
|
||||
*-------------------------------------------------------------*/
|
||||
.macro SAVE_R0_TO_R12
|
||||
PUSH r0
|
||||
PUSH r1
|
||||
PUSH r2
|
||||
PUSH r3
|
||||
PUSH r4
|
||||
PUSH r5
|
||||
PUSH r6
|
||||
PUSH r7
|
||||
PUSH r8
|
||||
PUSH r9
|
||||
PUSH r10
|
||||
PUSH r11
|
||||
PUSH r12
|
||||
.endm
|
||||
|
||||
.macro RESTORE_R12_TO_R0
|
||||
POP r12
|
||||
POP r11
|
||||
POP r10
|
||||
POP r9
|
||||
POP r8
|
||||
POP r7
|
||||
POP r6
|
||||
POP r5
|
||||
POP r4
|
||||
POP r3
|
||||
POP r2
|
||||
POP r1
|
||||
POP r0
|
||||
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Helpers to save/restore callee-saved regs:
|
||||
* used by several macros below
|
||||
*-------------------------------------------------------------*/
|
||||
.macro SAVE_R13_TO_R25
|
||||
PUSH r13
|
||||
PUSH r14
|
||||
PUSH r15
|
||||
PUSH r16
|
||||
PUSH r17
|
||||
PUSH r18
|
||||
PUSH r19
|
||||
PUSH r20
|
||||
PUSH r21
|
||||
PUSH r22
|
||||
PUSH r23
|
||||
PUSH r24
|
||||
PUSH r25
|
||||
.endm
|
||||
|
||||
.macro RESTORE_R25_TO_R13
|
||||
POP r25
|
||||
POP r24
|
||||
POP r23
|
||||
POP r22
|
||||
POP r21
|
||||
POP r20
|
||||
POP r19
|
||||
POP r18
|
||||
POP r17
|
||||
POP r16
|
||||
POP r15
|
||||
POP r14
|
||||
POP r13
|
||||
.endm
|
||||
|
||||
/*
|
||||
* save user mode callee regs as struct callee_regs
|
||||
* - needed by fork/do_signal/unaligned-access-emulation.
|
||||
*/
|
||||
.macro SAVE_CALLEE_SAVED_USER
|
||||
SAVE_R13_TO_R25
|
||||
SAVE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
/*
|
||||
@ -136,18 +34,18 @@
|
||||
* - could have been changed by ptrace tracer or unaligned-access fixup
|
||||
*/
|
||||
.macro RESTORE_CALLEE_SAVED_USER
|
||||
RESTORE_R25_TO_R13
|
||||
RESTORE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
/*
|
||||
* save/restore kernel mode callee regs at the time of context switch
|
||||
*/
|
||||
.macro SAVE_CALLEE_SAVED_KERNEL
|
||||
SAVE_R13_TO_R25
|
||||
SAVE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
.macro RESTORE_CALLEE_SAVED_KERNEL
|
||||
RESTORE_R25_TO_R13
|
||||
RESTORE_ABI_CALLEE_REGS
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
|
@ -10,6 +10,13 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/*
|
||||
* Hugetlb definitions.
|
||||
*/
|
||||
#define HPAGE_SHIFT PMD_SHIFT
|
||||
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||
|
||||
static inline pte_t pmd_pte(pmd_t pmd)
|
||||
{
|
||||
return __pte(pmd_val(pmd));
|
||||
|
@ -54,6 +54,10 @@ struct pt_regs {
|
||||
ecr_reg ecr;
|
||||
};
|
||||
|
||||
struct callee_regs {
|
||||
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
||||
};
|
||||
|
||||
#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
|
||||
|
||||
#else
|
||||
@ -92,16 +96,14 @@ struct pt_regs {
|
||||
unsigned long status32;
|
||||
};
|
||||
|
||||
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
|
||||
|
||||
#endif
|
||||
|
||||
/* Callee saved registers - need to be saved only when you are scheduled out */
|
||||
|
||||
struct callee_regs {
|
||||
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
||||
};
|
||||
|
||||
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
|
||||
|
||||
#endif
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->ret)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
|
||||
|
@ -153,7 +153,7 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
|
||||
{
|
||||
int n = 0;
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
const char *release, *cpu_nm, *isa_nm = "ARCv2";
|
||||
const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
|
||||
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
|
||||
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
|
||||
char mpy_nm[16], lpb_nm[32];
|
||||
@ -172,8 +172,6 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
|
||||
* releases only update it.
|
||||
*/
|
||||
|
||||
cpu_nm = "HS38";
|
||||
|
||||
if (info->arcver > 0x50 && info->arcver <= 0x53) {
|
||||
release = arc_hs_rel[info->arcver - 0x51].str;
|
||||
} else {
|
||||
|
@ -62,7 +62,7 @@ struct rt_sigframe {
|
||||
unsigned int sigret_magic;
|
||||
};
|
||||
|
||||
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
@ -75,12 +75,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
#else
|
||||
v2abi.r58 = v2abi.r59 = 0;
|
||||
#endif
|
||||
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
|
||||
err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
|
||||
static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
|
||||
{
|
||||
int err = 0;
|
||||
#ifndef CONFIG_ISA_ARCOMPACT
|
||||
|
@ -145,10 +145,9 @@ int arc_cache_mumbojumbo(int c, char *buf, int len)
|
||||
p_dc->sz_k = 1 << (dbcr.sz - 1);
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
|
||||
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
|
||||
p_dc->sz_k, assoc, p_dc->line_len,
|
||||
vipt ? "VIPT" : "PIPT",
|
||||
p_dc->colors > 1 ? " aliasing" : "",
|
||||
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
|
||||
|
||||
slc_chk:
|
||||
@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
|
||||
* Exported APIs
|
||||
*/
|
||||
|
||||
/*
|
||||
* Handle cache congruency of kernel and userspace mappings of page when kernel
|
||||
* writes-to/reads-from
|
||||
*
|
||||
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
|
||||
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
|
||||
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
|
||||
* -In SMP, if hardware caches are coherent
|
||||
*
|
||||
* There's a corollary case, where kernel READs from a userspace mapped page.
|
||||
* If the U-mapping is not congruent to K-mapping, former needs flushing.
|
||||
*/
|
||||
void flush_dcache_folio(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
|
||||
if (!cache_is_vipt_aliasing()) {
|
||||
clear_bit(PG_dc_clean, &folio->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* don't handle anon pages here */
|
||||
mapping = folio_flush_mapping(folio);
|
||||
if (!mapping)
|
||||
return;
|
||||
|
||||
/*
|
||||
* pagecache page, file not yet mapped to userspace
|
||||
* Make a note that K-mapping is dirty
|
||||
*/
|
||||
if (!mapping_mapped(mapping)) {
|
||||
clear_bit(PG_dc_clean, &folio->flags);
|
||||
} else if (folio_mapped(folio)) {
|
||||
/* kernel reading from page with U-mapping */
|
||||
phys_addr_t paddr = (unsigned long)folio_address(folio);
|
||||
unsigned long vaddr = folio_pos(folio);
|
||||
|
||||
/*
|
||||
* vaddr is not actually the virtual address, but is
|
||||
* congruent to every user mapping.
|
||||
*/
|
||||
if (addr_not_cache_congruent(paddr, vaddr))
|
||||
__flush_dcache_pages(paddr, vaddr,
|
||||
folio_nr_pages(folio));
|
||||
}
|
||||
clear_bit(PG_dc_clean, &folio->flags);
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_folio);
|
||||
|
||||
@ -921,44 +879,6 @@ noinline void flush_cache_all(void)
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
|
||||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
phys_addr_t paddr = pfn << PAGE_SHIFT;
|
||||
|
||||
u_vaddr &= PAGE_MASK;
|
||||
|
||||
__flush_dcache_pages(paddr, u_vaddr, 1);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
__inv_icache_pages(paddr, u_vaddr, 1);
|
||||
}
|
||||
|
||||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
flush_cache_all();
|
||||
}
|
||||
|
||||
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long u_vaddr)
|
||||
{
|
||||
/* TBD: do we really need to clear the kernel mapping */
|
||||
__flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
|
||||
__flush_dcache_pages((phys_addr_t)page_address(page),
|
||||
(phys_addr_t)page_address(page), 1);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long u_vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
@ -966,46 +886,11 @@ void copy_user_highpage(struct page *to, struct page *from,
|
||||
struct folio *dst = page_folio(to);
|
||||
void *kfrom = kmap_atomic(from);
|
||||
void *kto = kmap_atomic(to);
|
||||
int clean_src_k_mappings = 0;
|
||||
|
||||
/*
|
||||
* If SRC page was already mapped in userspace AND it's U-mapping is
|
||||
* not congruent with K-mapping, sync former to physical page so that
|
||||
* K-mapping in memcpy below, sees the right data
|
||||
*
|
||||
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
|
||||
* equally valid for SRC page as well
|
||||
*
|
||||
* For !VIPT cache, all of this gets compiled out as
|
||||
* addr_not_cache_congruent() is 0
|
||||
*/
|
||||
if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
|
||||
__flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
|
||||
clean_src_k_mappings = 1;
|
||||
}
|
||||
|
||||
copy_page(kto, kfrom);
|
||||
|
||||
/*
|
||||
* Mark DST page K-mapping as dirty for a later finalization by
|
||||
* update_mmu_cache(). Although the finalization could have been done
|
||||
* here as well (given that both vaddr/paddr are available).
|
||||
* But update_mmu_cache() already has code to do that for other
|
||||
* non copied user pages (e.g. read faults which wire in pagecache page
|
||||
* directly).
|
||||
*/
|
||||
clear_bit(PG_dc_clean, &dst->flags);
|
||||
|
||||
/*
|
||||
* if SRC was already usermapped and non-congruent to kernel mapping
|
||||
* sync the kernel mapping back to physical page
|
||||
*/
|
||||
if (clean_src_k_mappings) {
|
||||
__flush_dcache_pages((unsigned long)kfrom,
|
||||
(unsigned long)kfrom, 1);
|
||||
} else {
|
||||
clear_bit(PG_dc_clean, &src->flags);
|
||||
}
|
||||
clear_bit(PG_dc_clean, &src->flags);
|
||||
|
||||
kunmap_atomic(kto);
|
||||
kunmap_atomic(kfrom);
|
||||
@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
|
||||
dc->line_len, L1_CACHE_BYTES);
|
||||
|
||||
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
|
||||
if (is_isa_arcompact()) {
|
||||
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
|
||||
|
||||
if (dc->colors > 1) {
|
||||
if (!handled)
|
||||
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
if (CACHE_COLORS_NUM != dc->colors)
|
||||
panic("CACHE_COLORS_NUM not optimized for config\n");
|
||||
} else if (handled && dc->colors == 1) {
|
||||
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
}
|
||||
if (is_isa_arcompact() && dc->colors > 1) {
|
||||
panic("Aliasing VIPT cache not supported\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,10 +14,6 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define COLOUR_ALIGN(addr, pgoff) \
|
||||
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
|
||||
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
|
||||
|
||||
/*
|
||||
* Ensure that shared mappings are correctly aligned to
|
||||
* avoid aliasing issues with VIPT caches.
|
||||
@ -31,21 +27,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
int do_align = 0;
|
||||
int aliasing = cache_is_vipt_aliasing();
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
/*
|
||||
* We only need to do colour alignment if D cache aliases.
|
||||
*/
|
||||
if (aliasing)
|
||||
do_align = filp || (flags & MAP_SHARED);
|
||||
|
||||
/*
|
||||
* We enforce the MAP_FIXED case.
|
||||
*/
|
||||
if (flags & MAP_FIXED) {
|
||||
if (aliasing && flags & MAP_SHARED &&
|
||||
if (flags & MAP_SHARED &&
|
||||
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
|
||||
return -EINVAL;
|
||||
return addr;
|
||||
@ -55,10 +43,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
return -ENOMEM;
|
||||
|
||||
if (addr) {
|
||||
if (do_align)
|
||||
addr = COLOUR_ALIGN(addr, pgoff);
|
||||
else
|
||||
addr = PAGE_ALIGN(addr);
|
||||
addr = PAGE_ALIGN(addr);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr &&
|
||||
@ -70,7 +55,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
info.length = len;
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
|
||||
info.align_mask = 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
@ -478,21 +478,15 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
||||
|
||||
create_tlb(vma, vaddr, ptep);
|
||||
|
||||
if (page == ZERO_PAGE(0)) {
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Exec page : Independent of aliasing/page-color considerations,
|
||||
* since icache doesn't snoop dcache on ARC, any dirty
|
||||
* K-mapping of a code page needs to be wback+inv so that
|
||||
* icache fetch by userspace sees code correctly.
|
||||
* !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
|
||||
* so userspace sees the right data.
|
||||
* (Avoids the flush for Non-exec + congruent mapping case)
|
||||
* For executable pages, since icache doesn't snoop dcache, any
|
||||
* dirty K-mapping of a code page needs to be wback+inv so that
|
||||
* icache fetch by userspace sees code correctly.
|
||||
*/
|
||||
if ((vma->vm_flags & VM_EXEC) ||
|
||||
addr_not_cache_congruent(paddr, vaddr)) {
|
||||
if (vma->vm_flags & VM_EXEC) {
|
||||
struct folio *folio = page_folio(page);
|
||||
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
|
||||
if (dirty) {
|
||||
|
@ -359,6 +359,7 @@ usb: target-module@47400000 {
|
||||
<SYSC_IDLE_NO>,
|
||||
<SYSC_IDLE_SMART>,
|
||||
<SYSC_IDLE_SMART_WKUP>;
|
||||
ti,sysc-delay-us = <2>;
|
||||
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
#address-cells = <1>;
|
||||
|
@ -147,7 +147,7 @@ ocp: ocp {
|
||||
|
||||
l3-noc@44000000 {
|
||||
compatible = "ti,dra7-l3-noc";
|
||||
reg = <0x44000000 0x1000>,
|
||||
reg = <0x44000000 0x1000000>,
|
||||
<0x45000000 0x1000>;
|
||||
interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
|
||||
|
||||
soc_dev_attr->machine = soc_name;
|
||||
soc_dev_attr->family = omap_get_family();
|
||||
if (!soc_dev_attr->family) {
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
soc_dev_attr->revision = soc_rev;
|
||||
soc_dev_attr->custom_attr_group = omap_soc_groups[0];
|
||||
|
||||
soc_dev = soc_device_register(soc_dev_attr);
|
||||
if (IS_ERR(soc_dev)) {
|
||||
kfree(soc_dev_attr->family);
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
|
@ -68,10 +68,7 @@ &ehci1 {
|
||||
&emac0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&ext_rgmii_pins>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&ext_rgmii_phy>;
|
||||
allwinner,rx-delay-ps = <3100>;
|
||||
allwinner,tx-delay-ps = <700>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -13,6 +13,9 @@ / {
|
||||
};
|
||||
|
||||
&emac0 {
|
||||
allwinner,rx-delay-ps = <3100>;
|
||||
allwinner,tx-delay-ps = <700>;
|
||||
phy-mode = "rgmii";
|
||||
phy-supply = <®_dcdce>;
|
||||
};
|
||||
|
||||
|
@ -13,6 +13,8 @@ / {
|
||||
};
|
||||
|
||||
&emac0 {
|
||||
allwinner,tx-delay-ps = <700>;
|
||||
phy-mode = "rgmii-rxid";
|
||||
phy-supply = <®_dldo1>;
|
||||
};
|
||||
|
||||
|
@ -238,6 +238,7 @@ &i2c6 {
|
||||
mt6360: pmic@34 {
|
||||
compatible = "mediatek,mt6360";
|
||||
reg = <0x34>;
|
||||
interrupt-parent = <&pio>;
|
||||
interrupts = <128 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupt-names = "IRQB";
|
||||
interrupt-controller;
|
||||
|
@ -44,9 +44,6 @@
|
||||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
@ -81,6 +78,5 @@
|
||||
}
|
||||
|
||||
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
|
||||
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* __ASM_SYSCALL_WRAPPER_H */
|
||||
|
@ -410,7 +410,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|
||||
kvm_timer_vcpu_terminate(vcpu);
|
||||
kvm_pmu_vcpu_destroy(vcpu);
|
||||
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
kvm_arm_vcpu_destroy(vcpu);
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
|
||||
vgic_v4_teardown(kvm);
|
||||
}
|
||||
|
||||
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
|
||||
@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
vgic_flush_pending_lpis(vcpu);
|
||||
|
||||
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
|
||||
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
vgic_unregister_redist_iodev(vcpu);
|
||||
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
|
||||
}
|
||||
}
|
||||
|
||||
static void __kvm_vgic_destroy(struct kvm *kvm)
|
||||
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
vgic_debug_destroy(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
|
||||
kvm_vgic_dist_destroy(kvm);
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
__kvm_vgic_vcpu_destroy(vcpu);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
void kvm_vgic_destroy(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
vgic_debug_destroy(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
__kvm_vgic_vcpu_destroy(vcpu);
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
__kvm_vgic_destroy(kvm);
|
||||
|
||||
kvm_vgic_dist_destroy(kvm);
|
||||
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
type = VGIC_V3;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
__kvm_vgic_destroy(kvm);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
dist->ready = true;
|
||||
dist_base = dist->vgic_dist_base;
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist_base, type);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
kvm_err("Unable to register VGIC dist MMIO regions\n");
|
||||
kvm_vgic_destroy(kvm);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
|
||||
goto out_slots;
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
out_slots:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -820,7 +820,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
|
||||
|
||||
@ -833,6 +833,8 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
|
||||
unsigned long c;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
ret = vgic_register_redist_iodev(vcpu);
|
||||
if (ret)
|
||||
|
@ -241,6 +241,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
|
||||
int vgic_v3_save_pending_tables(struct kvm *kvm);
|
||||
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
|
||||
int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
|
||||
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
|
||||
bool vgic_v3_check_base(struct kvm *kvm);
|
||||
|
||||
void vgic_v3_load(struct kvm_vcpu *vcpu);
|
||||
|
@ -46,9 +46,6 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
|
||||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
@ -82,6 +79,4 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
|
||||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers);
|
||||
|
||||
#endif /* __ASM_SYSCALL_WRAPPER_H */
|
||||
|
@ -55,6 +55,7 @@ struct imsic {
|
||||
/* IMSIC SW-file */
|
||||
struct imsic_mrif *swfile;
|
||||
phys_addr_t swfile_pa;
|
||||
spinlock_t swfile_extirq_lock;
|
||||
};
|
||||
|
||||
#define imsic_vs_csr_read(__c) \
|
||||
@ -613,12 +614,23 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
|
||||
struct imsic_mrif *mrif = imsic->swfile;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* The critical section is necessary during external interrupt
|
||||
* updates to avoid the risk of losing interrupts due to potential
|
||||
* interruptions between reading topei and updating pending status.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
|
||||
|
||||
if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
|
||||
imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
|
||||
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
|
||||
else
|
||||
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
|
||||
|
||||
spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
|
||||
}
|
||||
|
||||
static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
|
||||
@ -1039,6 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
imsic->swfile = page_to_virt(swfile_page);
|
||||
imsic->swfile_pa = page_to_phys(swfile_page);
|
||||
spin_lock_init(&imsic->swfile_extirq_lock);
|
||||
|
||||
/* Setup IO device */
|
||||
kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
|
||||
|
@ -44,8 +44,7 @@ CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
@ -76,7 +75,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
@ -93,6 +91,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
||||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_SLUB_STATS=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
@ -619,6 +618,9 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
|
||||
CONFIG_BTRFS_DEBUG=y
|
||||
CONFIG_BTRFS_ASSERT=y
|
||||
CONFIG_NILFS2_FS=m
|
||||
CONFIG_BCACHEFS_FS=y
|
||||
CONFIG_BCACHEFS_QUOTA=y
|
||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
@ -691,7 +693,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_KEY_NOTIFICATIONS=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_FORTIFY_SOURCE=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
|
@ -42,8 +42,7 @@ CONFIG_KEXEC_FILE=y
|
||||
CONFIG_KEXEC_SIG=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_LIVEPATCH=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
CONFIG_NR_CPUS=512
|
||||
CONFIG_NUMA=y
|
||||
CONFIG_HZ_100=y
|
||||
@ -71,7 +70,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
@ -88,6 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
|
||||
CONFIG_IOSCHED_BFQ=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_MEMORY_HOTPLUG=y
|
||||
@ -605,6 +604,9 @@ CONFIG_OCFS2_FS=m
|
||||
CONFIG_BTRFS_FS=y
|
||||
CONFIG_BTRFS_FS_POSIX_ACL=y
|
||||
CONFIG_NILFS2_FS=m
|
||||
CONFIG_BCACHEFS_FS=m
|
||||
CONFIG_BCACHEFS_QUOTA=y
|
||||
CONFIG_BCACHEFS_POSIX_ACL=y
|
||||
CONFIG_FS_DAX=y
|
||||
CONFIG_EXPORTFS_BLOCK_OPS=y
|
||||
CONFIG_FS_ENCRYPTION=y
|
||||
@ -677,7 +679,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
|
||||
CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_KEY_NOTIFICATIONS=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
||||
CONFIG_SECURITY_LOCKDOWN_LSM=y
|
||||
|
@ -9,8 +9,7 @@ CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_MARCH_ZEC12=y
|
||||
CONFIG_TUNE_ZEC12=y
|
||||
CONFIG_MARCH_Z13=y
|
||||
# CONFIG_COMPAT is not set
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_HZ_100=y
|
||||
|
@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
|
||||
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
|
||||
|
||||
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
|
||||
|
||||
struct kernel_fpu;
|
||||
|
||||
|
@ -63,10 +63,6 @@
|
||||
cond_syscall(__s390x_sys_##name); \
|
||||
cond_syscall(__s390_sys_##name)
|
||||
|
||||
#define SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
|
||||
SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
|
||||
|
||||
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
|
||||
long __s390_compat_sys##name(struct pt_regs *regs); \
|
||||
ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
|
||||
@ -85,15 +81,11 @@
|
||||
|
||||
/*
|
||||
* As some compat syscalls may not be implemented, we need to expand
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
|
||||
* kernel/time/posix-stubs.c to cover this case as well.
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
|
||||
*/
|
||||
#define COND_SYSCALL_COMPAT(name) \
|
||||
cond_syscall(__s390_compat_sys_##name)
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
|
||||
|
||||
#define __S390_SYS_STUBx(x, name, ...) \
|
||||
long __s390_sys##name(struct pt_regs *regs); \
|
||||
ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
|
||||
@ -124,9 +116,6 @@
|
||||
#define COND_SYSCALL(name) \
|
||||
cond_syscall(__s390x_sys_##name)
|
||||
|
||||
#define SYS_NI(name) \
|
||||
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers)
|
||||
|
||||
#define __S390_SYS_STUBx(x, fullname, name, ...)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
@ -86,9 +86,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
return sys_ni_syscall(); \
|
||||
}
|
||||
|
||||
#define __SYS_NI(abi, name) \
|
||||
SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __X64_SYS_STUB0(name) \
|
||||
__SYS_STUB0(x64, sys_##name)
|
||||
@ -100,13 +97,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
#define __X64_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(x64, sys_##name)
|
||||
|
||||
#define __X64_SYS_NI(name) \
|
||||
__SYS_NI(x64, sys_##name)
|
||||
#else /* CONFIG_X86_64 */
|
||||
#define __X64_SYS_STUB0(name)
|
||||
#define __X64_SYS_STUBx(x, name, ...)
|
||||
#define __X64_COND_SYSCALL(name)
|
||||
#define __X64_SYS_NI(name)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
||||
@ -120,13 +114,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
#define __IA32_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(ia32, sys_##name)
|
||||
|
||||
#define __IA32_SYS_NI(name) \
|
||||
__SYS_NI(ia32, sys_##name)
|
||||
#else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
|
||||
#define __IA32_SYS_STUB0(name)
|
||||
#define __IA32_SYS_STUBx(x, name, ...)
|
||||
#define __IA32_COND_SYSCALL(name)
|
||||
#define __IA32_SYS_NI(name)
|
||||
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
@ -135,8 +126,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
* additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
|
||||
* ia32 regs in the proper order for shared or "common" syscalls. As some
|
||||
* syscalls may not be implemented, we need to expand COND_SYSCALL in
|
||||
* kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
|
||||
* case as well.
|
||||
* kernel/sys_ni.c to cover this case as well.
|
||||
*/
|
||||
#define __IA32_COMPAT_SYS_STUB0(name) \
|
||||
__SYS_STUB0(ia32, compat_sys_##name)
|
||||
@ -148,14 +138,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
#define __IA32_COMPAT_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(ia32, compat_sys_##name)
|
||||
|
||||
#define __IA32_COMPAT_SYS_NI(name) \
|
||||
__SYS_NI(ia32, compat_sys_##name)
|
||||
|
||||
#else /* CONFIG_IA32_EMULATION */
|
||||
#define __IA32_COMPAT_SYS_STUB0(name)
|
||||
#define __IA32_COMPAT_SYS_STUBx(x, name, ...)
|
||||
#define __IA32_COMPAT_COND_SYSCALL(name)
|
||||
#define __IA32_COMPAT_SYS_NI(name)
|
||||
#endif /* CONFIG_IA32_EMULATION */
|
||||
|
||||
|
||||
@ -175,13 +161,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
#define __X32_COMPAT_COND_SYSCALL(name) \
|
||||
__COND_SYSCALL(x64, compat_sys_##name)
|
||||
|
||||
#define __X32_COMPAT_SYS_NI(name) \
|
||||
__SYS_NI(x64, compat_sys_##name)
|
||||
#else /* CONFIG_X86_X32_ABI */
|
||||
#define __X32_COMPAT_SYS_STUB0(name)
|
||||
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
|
||||
#define __X32_COMPAT_COND_SYSCALL(name)
|
||||
#define __X32_COMPAT_SYS_NI(name)
|
||||
#endif /* CONFIG_X86_X32_ABI */
|
||||
|
||||
|
||||
@ -212,17 +195,12 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* As some compat syscalls may not be implemented, we need to expand
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
|
||||
* kernel/time/posix-stubs.c to cover this case as well.
|
||||
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
|
||||
*/
|
||||
#define COND_SYSCALL_COMPAT(name) \
|
||||
__IA32_COMPAT_COND_SYSCALL(name) \
|
||||
__X32_COMPAT_COND_SYSCALL(name)
|
||||
|
||||
#define COMPAT_SYS_NI(name) \
|
||||
__IA32_COMPAT_SYS_NI(name) \
|
||||
__X32_COMPAT_SYS_NI(name)
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
@ -243,8 +221,8 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
* As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
|
||||
* obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
|
||||
* hurt, we only need to re-define it here to keep the naming congruent to
|
||||
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
|
||||
* macros to work correctly.
|
||||
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro
|
||||
* to work correctly.
|
||||
*/
|
||||
#define SYSCALL_DEFINE0(sname) \
|
||||
SYSCALL_METADATA(_##sname, 0); \
|
||||
@ -257,10 +235,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
|
||||
__X64_COND_SYSCALL(name) \
|
||||
__IA32_COND_SYSCALL(name)
|
||||
|
||||
#define SYS_NI(name) \
|
||||
__X64_SYS_NI(name) \
|
||||
__IA32_SYS_NI(name)
|
||||
|
||||
|
||||
/*
|
||||
* For VSYSCALLS, we need to declare these three syscalls with the new
|
||||
|
@ -2972,6 +2972,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
|
||||
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
|
||||
}
|
||||
|
||||
/*
|
||||
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
|
||||
* the host/guest supports its use.
|
||||
*
|
||||
* guest_can_use() checks a number of requirements on the host/guest to
|
||||
* ensure that MSR_IA32_XSS is available, but it might report true even
|
||||
* if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
|
||||
* MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
|
||||
* to further check that the guest CPUID actually supports
|
||||
* X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
|
||||
* guests will still get intercepted and caught in the normal
|
||||
* kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
|
||||
*/
|
||||
if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
|
||||
guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
|
||||
else
|
||||
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
|
||||
}
|
||||
|
||||
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
|
||||
|
@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs {
|
||||
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
|
||||
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
|
||||
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
|
||||
{ .index = MSR_IA32_XSS, .always = false },
|
||||
{ .index = MSR_EFER, .always = false },
|
||||
{ .index = MSR_IA32_CR_PAT, .always = false },
|
||||
{ .index = MSR_AMD64_SEV_ES_GHCB, .always = true },
|
||||
|
@ -30,7 +30,7 @@
|
||||
#define IOPM_SIZE PAGE_SIZE * 3
|
||||
#define MSRPM_SIZE PAGE_SIZE * 2
|
||||
|
||||
#define MAX_DIRECT_ACCESS_MSRS 46
|
||||
#define MAX_DIRECT_ACCESS_MSRS 47
|
||||
#define MSRPM_OFFSETS 32
|
||||
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
|
||||
extern bool npt_enabled;
|
||||
|
@ -9,6 +9,7 @@ config XEN
|
||||
select PARAVIRT_CLOCK
|
||||
select X86_HV_CALLBACK_VECTOR
|
||||
depends on X86_64 || (X86_32 && X86_PAE)
|
||||
depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
|
||||
depends on X86_LOCAL_APIC && X86_TSC
|
||||
help
|
||||
This is the Linux Xen port. Enabling this will allow the
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
@ -44,6 +45,7 @@ struct vhci_data {
|
||||
bool wakeup;
|
||||
__u16 msft_opcode;
|
||||
bool aosp_capable;
|
||||
atomic_t initialized;
|
||||
};
|
||||
|
||||
static int vhci_open_dev(struct hci_dev *hdev)
|
||||
@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
mutex_lock(&data->open_mutex);
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
mutex_unlock(&data->open_mutex);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
if (atomic_read(&data->initialized))
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -464,7 +465,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
|
||||
skb_put_u8(skb, 0xff);
|
||||
skb_put_u8(skb, opcode);
|
||||
put_unaligned_le16(hdev->id, skb_put(skb, 2));
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
skb_queue_head(&data->readq, skb);
|
||||
atomic_inc(&data->initialized);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
|
@ -2158,13 +2158,23 @@ static int sysc_reset(struct sysc *ddata)
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
sysc_val |= sysc_mask;
|
||||
sysc_write(ddata, sysc_offset, sysc_val);
|
||||
/* Flush posted write */
|
||||
|
||||
/*
|
||||
* Some devices need a delay before reading registers
|
||||
* after reset. Presumably a srst_udelay is not needed
|
||||
* for devices that use a rstctrl register reset.
|
||||
*/
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
/*
|
||||
* Flush posted write. For devices needing srst_udelay
|
||||
* this should trigger an interconnect error if the
|
||||
* srst_udelay value is needed but not configured.
|
||||
*/
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
}
|
||||
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
if (ddata->post_reset_quirk)
|
||||
ddata->post_reset_quirk(ddata);
|
||||
|
||||
|
@ -282,13 +282,15 @@ static void dwapb_irq_enable(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
|
||||
val = dwapb_read(gpio, GPIO_INTEN);
|
||||
val |= BIT(irqd_to_hwirq(d));
|
||||
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTEN, val);
|
||||
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTMASK, val);
|
||||
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
|
||||
}
|
||||
|
||||
@ -296,12 +298,14 @@ static void dwapb_irq_disable(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
|
||||
val = dwapb_read(gpio, GPIO_INTEN);
|
||||
val &= ~BIT(irqd_to_hwirq(d));
|
||||
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTMASK, val);
|
||||
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTEN, val);
|
||||
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
|
||||
}
|
||||
|
@ -2481,10 +2481,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* gpio_ioctl() - ioctl handler for the GPIO chardev
|
||||
*/
|
||||
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct gpio_chardev_data *cdev = file->private_data;
|
||||
struct gpio_device *gdev = cdev->gdev;
|
||||
@ -2521,6 +2518,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* gpio_ioctl() - ioctl handler for the GPIO chardev
|
||||
*/
|
||||
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct gpio_chardev_data *cdev = file->private_data;
|
||||
|
||||
return call_ioctl_locked(file, cmd, arg, cdev->gdev,
|
||||
gpio_ioctl_unlocked);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
|
@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
|
||||
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
|
||||
struct amdgpu_bo *bo = vm_bo->bo;
|
||||
|
||||
vm_bo->moved = true;
|
||||
if (!bo || bo->tbo.type != ttm_bo_type_kernel)
|
||||
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
|
||||
else if (bo->parent)
|
||||
|
@ -1653,18 +1653,24 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
||||
if (test_bit(gpuidx, prange->bitmap_access))
|
||||
bitmap_set(ctx->bitmap, gpuidx, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* If prange is already mapped or with always mapped flag,
|
||||
* update mapping on GPUs with ACCESS attribute
|
||||
*/
|
||||
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
|
||||
if (prange->mapped_to_gpu ||
|
||||
prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
|
||||
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
|
||||
}
|
||||
} else {
|
||||
bitmap_or(ctx->bitmap, prange->bitmap_access,
|
||||
prange->bitmap_aip, MAX_GPU_INSTANCE);
|
||||
}
|
||||
|
||||
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
|
||||
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
|
||||
if (!prange->mapped_to_gpu ||
|
||||
bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
|
||||
r = 0;
|
||||
goto free_ctx;
|
||||
}
|
||||
r = 0;
|
||||
goto free_ctx;
|
||||
}
|
||||
|
||||
if (prange->actual_loc && !prange->ttm_res) {
|
||||
|
@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5(
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_DISPLAY_PORT:
|
||||
ss_info->spread_spectrum_percentage =
|
||||
if (bp->base.integrated_info) {
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
|
||||
ss_info->spread_spectrum_percentage =
|
||||
bp->base.integrated_info->gpuclk_ss_percentage;
|
||||
ss_info->type.CENTER_MODE =
|
||||
bp->base.integrated_info->gpuclk_ss_type;
|
||||
} else {
|
||||
ss_info->spread_spectrum_percentage =
|
||||
disp_cntl_tbl->dp_ss_percentage;
|
||||
ss_info->spread_spectrum_range =
|
||||
ss_info->spread_spectrum_range =
|
||||
disp_cntl_tbl->dp_ss_rate_10hz * 10;
|
||||
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
}
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_GPU_PLL:
|
||||
@ -2386,13 +2393,7 @@ static enum bp_result get_vram_info_v30(
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
info->num_chans = info_v30->channel_num;
|
||||
/* As suggested by VBIOS we should always use
|
||||
* dram_channel_width_bytes = 2 when using VRAM
|
||||
* table version 3.0. This is because the channel_width
|
||||
* param in the VRAM info table is changed in 7000 series and
|
||||
* no longer represents the memory channel width.
|
||||
*/
|
||||
info->dram_channel_width_bytes = 2;
|
||||
info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -2820,6 +2821,8 @@ static enum bp_result get_integrated_info_v2_2(
|
||||
info->ma_channel_number = info_v2_2->umachannelnumber;
|
||||
info->dp_ss_control =
|
||||
le16_to_cpu(info_v2_2->reserved1);
|
||||
info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
|
||||
info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
|
||||
|
||||
for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
|
||||
info->ext_disp_conn_info.gu_id[i] =
|
||||
|
@ -5095,18 +5095,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
|
||||
*/
|
||||
bool dc_is_dmub_outbox_supported(struct dc *dc)
|
||||
{
|
||||
/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
|
||||
if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
|
||||
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
|
||||
!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
switch (dc->ctx->asic_id.chip_family) {
|
||||
|
||||
if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
|
||||
!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
case FAMILY_YELLOW_CARP:
|
||||
/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
|
||||
if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
|
||||
!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
break;
|
||||
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
case AMDGPU_FAMILY_GC_11_5_0:
|
||||
if (!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* dmub aux needs dmub notifications to be enabled */
|
||||
return dc->debug.enable_dmub_aux_for_legacy_ddc;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -5420,7 +5420,7 @@ static void CalculateOutputLink(
|
||||
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
|
||||
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
|
||||
|
||||
if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
|
||||
if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
|
||||
*RequiresDSC = true;
|
||||
LinkDSCEnable = true;
|
||||
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
|
||||
|
@ -960,6 +960,12 @@ void dcn32_init_hw(struct dc *dc)
|
||||
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
|
||||
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
|
||||
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
|
||||
|
||||
if (dc->ctx->dmub_srv->dmub->fw_version <
|
||||
DMUB_FW_VERSION(7, 0, 35)) {
|
||||
dc->debug.force_disable_subvp = true;
|
||||
dc->debug.disable_fpo_optimizations = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -417,6 +417,8 @@ struct integrated_info {
|
||||
/* V2.1 */
|
||||
struct edp_info edp1_info;
|
||||
struct edp_info edp2_info;
|
||||
uint32_t gpuclk_ss_percentage;
|
||||
uint32_t gpuclk_ss_type;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2465,7 +2465,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
|
||||
|
||||
val |= XELPDP_FORWARD_CLOCK_UNGATE;
|
||||
|
||||
if (is_hdmi_frl(crtc_state->port_clock))
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
|
||||
is_hdmi_frl(crtc_state->port_clock))
|
||||
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
|
||||
else
|
||||
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
|
||||
|
@ -3747,8 +3747,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
|
||||
if (!active)
|
||||
goto out;
|
||||
|
||||
intel_dsc_get_config(pipe_config);
|
||||
intel_bigjoiner_get_config(pipe_config);
|
||||
intel_dsc_get_config(pipe_config);
|
||||
|
||||
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
|
||||
DISPLAY_VER(dev_priv) >= 11)
|
||||
@ -6033,6 +6033,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Bigjoiner+async flip is busted currently.
|
||||
* Remove this check once the issues are fixed.
|
||||
*/
|
||||
if (new_crtc_state->bigjoiner_pipes) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
||||
new_plane_state, i) {
|
||||
if (plane->pipe != crtc->pipe)
|
||||
|
@ -389,7 +389,7 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
|
||||
enum intel_dmc_id dmc_id;
|
||||
|
||||
/* TODO: check if the following applies to all D13+ platforms. */
|
||||
if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
|
||||
if (!IS_TIGERLAKE(i915))
|
||||
return;
|
||||
|
||||
for_each_dmc_id(dmc_id) {
|
||||
@ -493,6 +493,45 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
|
||||
intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
|
||||
}
|
||||
|
||||
static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
|
||||
enum intel_dmc_id dmc_id, i915_reg_t reg)
|
||||
{
|
||||
u32 offset = i915_mmio_reg_offset(reg);
|
||||
u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
|
||||
u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
|
||||
|
||||
return offset >= start && offset < end;
|
||||
}
|
||||
|
||||
static bool disable_dmc_evt(struct drm_i915_private *i915,
|
||||
enum intel_dmc_id dmc_id,
|
||||
i915_reg_t reg, u32 data)
|
||||
{
|
||||
if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
|
||||
return false;
|
||||
|
||||
/* keep all pipe DMC events disabled by default */
|
||||
if (dmc_id != DMC_FW_MAIN)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u32 dmc_mmiodata(struct drm_i915_private *i915,
|
||||
struct intel_dmc *dmc,
|
||||
enum intel_dmc_id dmc_id, int i)
|
||||
{
|
||||
if (disable_dmc_evt(i915, dmc_id,
|
||||
dmc->dmc_info[dmc_id].mmioaddr[i],
|
||||
dmc->dmc_info[dmc_id].mmiodata[i]))
|
||||
return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
|
||||
DMC_EVT_CTL_TYPE_EDGE_0_1) |
|
||||
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
|
||||
DMC_EVT_CTL_EVENT_ID_FALSE);
|
||||
else
|
||||
return dmc->dmc_info[dmc_id].mmiodata[i];
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dmc_load_program() - write the firmware from memory to register.
|
||||
* @i915: i915 drm device.
|
||||
@ -532,7 +571,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
|
||||
for_each_dmc_id(dmc_id) {
|
||||
for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
|
||||
intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
|
||||
dmc->dmc_info[dmc_id].mmiodata[i]);
|
||||
dmc_mmiodata(i915, dmc, dmc_id, i));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
|
||||
* tau4 = (4 | x) << y
|
||||
* but add 2 when doing the final right shift to account for units
|
||||
*/
|
||||
tau4 = ((1 << x_w) | x) << y;
|
||||
tau4 = (u64)((1 << x_w) | x) << y;
|
||||
/* val in hwmon interface units (millisec) */
|
||||
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
|
||||
|
||||
@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
|
||||
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
|
||||
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
|
||||
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
|
||||
tau4 = ((1 << x_w) | x) << y;
|
||||
tau4 = (u64)((1 << x_w) | x) << y;
|
||||
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
|
||||
|
||||
if (val > max_win)
|
||||
|
@ -325,28 +325,28 @@ struct joycon_imu_cal {
|
||||
* All the controller's button values are stored in a u32.
|
||||
* They can be accessed with bitwise ANDs.
|
||||
*/
|
||||
static const u32 JC_BTN_Y = BIT(0);
|
||||
static const u32 JC_BTN_X = BIT(1);
|
||||
static const u32 JC_BTN_B = BIT(2);
|
||||
static const u32 JC_BTN_A = BIT(3);
|
||||
static const u32 JC_BTN_SR_R = BIT(4);
|
||||
static const u32 JC_BTN_SL_R = BIT(5);
|
||||
static const u32 JC_BTN_R = BIT(6);
|
||||
static const u32 JC_BTN_ZR = BIT(7);
|
||||
static const u32 JC_BTN_MINUS = BIT(8);
|
||||
static const u32 JC_BTN_PLUS = BIT(9);
|
||||
static const u32 JC_BTN_RSTICK = BIT(10);
|
||||
static const u32 JC_BTN_LSTICK = BIT(11);
|
||||
static const u32 JC_BTN_HOME = BIT(12);
|
||||
static const u32 JC_BTN_CAP = BIT(13); /* capture button */
|
||||
static const u32 JC_BTN_DOWN = BIT(16);
|
||||
static const u32 JC_BTN_UP = BIT(17);
|
||||
static const u32 JC_BTN_RIGHT = BIT(18);
|
||||
static const u32 JC_BTN_LEFT = BIT(19);
|
||||
static const u32 JC_BTN_SR_L = BIT(20);
|
||||
static const u32 JC_BTN_SL_L = BIT(21);
|
||||
static const u32 JC_BTN_L = BIT(22);
|
||||
static const u32 JC_BTN_ZL = BIT(23);
|
||||
#define JC_BTN_Y BIT(0)
|
||||
#define JC_BTN_X BIT(1)
|
||||
#define JC_BTN_B BIT(2)
|
||||
#define JC_BTN_A BIT(3)
|
||||
#define JC_BTN_SR_R BIT(4)
|
||||
#define JC_BTN_SL_R BIT(5)
|
||||
#define JC_BTN_R BIT(6)
|
||||
#define JC_BTN_ZR BIT(7)
|
||||
#define JC_BTN_MINUS BIT(8)
|
||||
#define JC_BTN_PLUS BIT(9)
|
||||
#define JC_BTN_RSTICK BIT(10)
|
||||
#define JC_BTN_LSTICK BIT(11)
|
||||
#define JC_BTN_HOME BIT(12)
|
||||
#define JC_BTN_CAP BIT(13) /* capture button */
|
||||
#define JC_BTN_DOWN BIT(16)
|
||||
#define JC_BTN_UP BIT(17)
|
||||
#define JC_BTN_RIGHT BIT(18)
|
||||
#define JC_BTN_LEFT BIT(19)
|
||||
#define JC_BTN_SR_L BIT(20)
|
||||
#define JC_BTN_SL_L BIT(21)
|
||||
#define JC_BTN_L BIT(22)
|
||||
#define JC_BTN_ZL BIT(23)
|
||||
|
||||
enum joycon_msg_type {
|
||||
JOYCON_MSG_TYPE_NONE,
|
||||
@ -927,14 +927,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
|
||||
*/
|
||||
static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
|
||||
{
|
||||
int i;
|
||||
int i, divz = 0;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
|
||||
ctlr->accel_cal.offset[i];
|
||||
ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
|
||||
ctlr->gyro_cal.offset[i];
|
||||
|
||||
if (ctlr->imu_cal_accel_divisor[i] == 0) {
|
||||
ctlr->imu_cal_accel_divisor[i] = 1;
|
||||
divz++;
|
||||
}
|
||||
|
||||
if (ctlr->imu_cal_gyro_divisor[i] == 0) {
|
||||
ctlr->imu_cal_gyro_divisor[i] = 1;
|
||||
divz++;
|
||||
}
|
||||
}
|
||||
|
||||
if (divz)
|
||||
hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz);
|
||||
}
|
||||
|
||||
static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
|
||||
@ -1163,16 +1176,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
|
||||
JC_IMU_SAMPLES_PER_DELTA_AVG) {
|
||||
ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
|
||||
ctlr->imu_delta_samples_count;
|
||||
/* don't ever want divide by zero shenanigans */
|
||||
if (ctlr->imu_avg_delta_ms == 0) {
|
||||
ctlr->imu_avg_delta_ms = 1;
|
||||
hid_warn(ctlr->hdev,
|
||||
"calculated avg imu delta of 0\n");
|
||||
}
|
||||
ctlr->imu_delta_samples_count = 0;
|
||||
ctlr->imu_delta_samples_sum = 0;
|
||||
}
|
||||
|
||||
/* don't ever want divide by zero shenanigans */
|
||||
if (ctlr->imu_avg_delta_ms == 0) {
|
||||
ctlr->imu_avg_delta_ms = 1;
|
||||
hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n");
|
||||
}
|
||||
|
||||
/* useful for debugging IMU sample rate */
|
||||
hid_dbg(ctlr->hdev,
|
||||
"imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",
|
||||
|
@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
|
||||
if (!slave)
|
||||
return 0;
|
||||
|
||||
command = readl(bus->base + ASPEED_I2C_CMD_REG);
|
||||
/*
|
||||
* Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
|
||||
* transfers with low enough latency between the nak/stop phase of the current
|
||||
* command and the start/address phase of the following command that the
|
||||
* interrupts are coalesced by the time we process them.
|
||||
*/
|
||||
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
|
||||
/* Slave was requested, restart state machine. */
|
||||
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
|
||||
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
|
||||
/* Propagate any stop conditions to the slave implementation. */
|
||||
if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
|
||||
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we've dealt with any potentially coalesced stop conditions,
|
||||
* address any start conditions.
|
||||
*/
|
||||
if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_START;
|
||||
}
|
||||
|
||||
/* Slave is not currently active, irq was for someone else. */
|
||||
/*
|
||||
* If the slave has been stopped and not started then slave interrupt
|
||||
* handling is complete.
|
||||
*/
|
||||
if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
|
||||
return irq_handled;
|
||||
|
||||
command = readl(bus->base + ASPEED_I2C_CMD_REG);
|
||||
dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
|
||||
irq_status, command);
|
||||
|
||||
@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
|
||||
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
|
||||
}
|
||||
|
||||
/* Slave was asked to stop. */
|
||||
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
|
||||
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
|
||||
switch (bus->slave_state) {
|
||||
case ASPEED_I2C_SLAVE_READ_REQUESTED:
|
||||
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
|
||||
@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
|
||||
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
|
||||
break;
|
||||
case ASPEED_I2C_SLAVE_STOP:
|
||||
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
|
||||
/* Stop event handling is done early. Unreachable. */
|
||||
break;
|
||||
case ASPEED_I2C_SLAVE_START:
|
||||
/* Slave was just started. Waiting for the next event. */;
|
||||
|
@ -858,6 +858,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
|
||||
ret = geni_se_resources_on(&gi2c->se);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error turning on resources %d\n", ret);
|
||||
clk_disable_unprepare(gi2c->core_clk);
|
||||
return ret;
|
||||
}
|
||||
proto = geni_se_read_proto(&gi2c->se);
|
||||
@ -877,8 +878,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
|
||||
/* FIFO is disabled, so we can only use GPI DMA */
|
||||
gi2c->gpi_mode = true;
|
||||
ret = setup_gpi_dma(gi2c);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
geni_se_resources_off(&gi2c->se);
|
||||
clk_disable_unprepare(gi2c->core_clk);
|
||||
return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
|
||||
} else {
|
||||
@ -891,6 +895,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
if (!tx_depth) {
|
||||
dev_err(dev, "Invalid TX FIFO depth\n");
|
||||
geni_se_resources_off(&gi2c->se);
|
||||
clk_disable_unprepare(gi2c->core_clk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data {
|
||||
* @clk: function clk for rk3399 or function & Bus clks for others
|
||||
* @pclk: Bus clk for rk3399
|
||||
* @clk_rate_nb: i2c clk rate change notify
|
||||
* @irq: irq number
|
||||
* @t: I2C known timing information
|
||||
* @lock: spinlock for the i2c bus
|
||||
* @wait: the waitqueue to wait for i2c transfer
|
||||
@ -200,6 +201,7 @@ struct rk3x_i2c {
|
||||
struct clk *clk;
|
||||
struct clk *pclk;
|
||||
struct notifier_block clk_rate_nb;
|
||||
int irq;
|
||||
|
||||
/* Settings */
|
||||
struct i2c_timings t;
|
||||
@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
|
||||
|
||||
spin_unlock_irqrestore(&i2c->lock, flags);
|
||||
|
||||
rk3x_i2c_start(i2c);
|
||||
|
||||
if (!polling) {
|
||||
rk3x_i2c_start(i2c);
|
||||
|
||||
timeout = wait_event_timeout(i2c->wait, !i2c->busy,
|
||||
msecs_to_jiffies(WAIT_TIMEOUT));
|
||||
} else {
|
||||
disable_irq(i2c->irq);
|
||||
rk3x_i2c_start(i2c);
|
||||
|
||||
timeout = rk3x_i2c_wait_xfer_poll(i2c);
|
||||
|
||||
enable_irq(i2c->irq);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&i2c->lock, flags);
|
||||
@ -1310,6 +1317,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
i2c->irq = irq;
|
||||
|
||||
platform_set_drvdata(pdev, i2c);
|
||||
|
||||
if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
|
||||
|
@ -660,6 +660,7 @@ config DM_ZONED
|
||||
|
||||
config DM_AUDIT
|
||||
bool "DM audit events"
|
||||
depends on BLK_DEV_DM
|
||||
depends on AUDIT
|
||||
help
|
||||
Generate audit events for device-mapper.
|
||||
|
@ -1755,11 +1755,12 @@ static void integrity_metadata(struct work_struct *w)
|
||||
sectors_to_process = dio->range.n_sectors;
|
||||
|
||||
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
|
||||
struct bio_vec bv_copy = bv;
|
||||
unsigned int pos;
|
||||
char *mem, *checksums_ptr;
|
||||
|
||||
again:
|
||||
mem = bvec_kmap_local(&bv);
|
||||
mem = bvec_kmap_local(&bv_copy);
|
||||
pos = 0;
|
||||
checksums_ptr = checksums;
|
||||
do {
|
||||
@ -1768,7 +1769,7 @@ static void integrity_metadata(struct work_struct *w)
|
||||
sectors_to_process -= ic->sectors_per_block;
|
||||
pos += ic->sectors_per_block << SECTOR_SHIFT;
|
||||
sector += ic->sectors_per_block;
|
||||
} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
|
||||
} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
|
||||
kunmap_local(mem);
|
||||
|
||||
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
|
||||
@ -1793,9 +1794,9 @@ static void integrity_metadata(struct work_struct *w)
|
||||
if (!sectors_to_process)
|
||||
break;
|
||||
|
||||
if (unlikely(pos < bv.bv_len)) {
|
||||
bv.bv_offset += pos;
|
||||
bv.bv_len -= pos;
|
||||
if (unlikely(pos < bv_copy.bv_len)) {
|
||||
bv_copy.bv_offset += pos;
|
||||
bv_copy.bv_len -= pos;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
@ -3317,6 +3317,9 @@ static void raid_dtr(struct dm_target *ti)
|
||||
mddev_lock_nointr(&rs->md);
|
||||
md_stop(&rs->md);
|
||||
mddev_unlock(&rs->md);
|
||||
|
||||
if (work_pending(&rs->md.event_work))
|
||||
flush_work(&rs->md.event_work);
|
||||
raid_set_free(rs);
|
||||
}
|
||||
|
||||
|
@ -82,6 +82,14 @@ static struct module *md_cluster_mod;
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
|
||||
static struct workqueue_struct *md_wq;
|
||||
|
||||
/*
|
||||
* This workqueue is used for sync_work to register new sync_thread, and for
|
||||
* del_work to remove rdev, and for event_work that is only set by dm-raid.
|
||||
*
|
||||
* Noted that sync_work will grab reconfig_mutex, hence never flush this
|
||||
* workqueue whith reconfig_mutex grabbed.
|
||||
*/
|
||||
static struct workqueue_struct *md_misc_wq;
|
||||
struct workqueue_struct *md_bitmap_wq;
|
||||
|
||||
@ -6330,9 +6338,6 @@ static void __md_stop(struct mddev *mddev)
|
||||
struct md_personality *pers = mddev->pers;
|
||||
md_bitmap_destroy(mddev);
|
||||
mddev_detach(mddev);
|
||||
/* Ensure ->event_work is done */
|
||||
if (mddev->event_work.func)
|
||||
flush_workqueue(md_misc_wq);
|
||||
spin_lock(&mddev->lock);
|
||||
mddev->pers = NULL;
|
||||
spin_unlock(&mddev->lock);
|
||||
|
@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
|
||||
netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
|
||||
offset, adapter->ring_size);
|
||||
err = -1;
|
||||
goto failed;
|
||||
goto free_buffer;
|
||||
}
|
||||
|
||||
return 0;
|
||||
free_buffer:
|
||||
kfree(tx_ring->tx_buffer);
|
||||
tx_ring->tx_buffer = NULL;
|
||||
failed:
|
||||
if (adapter->ring_vir_addr != NULL) {
|
||||
dma_free_coherent(&pdev->dev, adapter->ring_size,
|
||||
|
@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
||||
for (i = 0; i < num_frags ; i++) {
|
||||
skb_frag_t *frag = &sinfo->frags[i];
|
||||
struct bnxt_sw_tx_bd *frag_tx_buf;
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
dma_addr_t frag_mapping;
|
||||
int frag_len;
|
||||
|
||||
@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
|
||||
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
|
||||
|
||||
frag_len = skb_frag_size(frag);
|
||||
frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
|
||||
frag_len, DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
|
||||
return NULL;
|
||||
|
||||
dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
|
||||
|
||||
flags = frag_len << TX_BD_LEN_SHIFT;
|
||||
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
|
||||
frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
|
||||
skb_frag_off(frag);
|
||||
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
|
||||
|
||||
len = frag_len;
|
||||
|
@ -207,7 +207,7 @@
|
||||
#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
|
||||
#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
|
||||
#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
|
||||
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
|
||||
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
|
||||
|
@ -37,11 +37,11 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
|
||||
#define I40E_QTX_CTL_VM_QUEUE 0x1
|
||||
#define I40E_QTX_CTL_PF_QUEUE 0x2
|
||||
|
||||
#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
|
||||
#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1)
|
||||
#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
|
||||
#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2)
|
||||
|
||||
#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
|
||||
#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0)
|
||||
#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0)
|
||||
#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
|
||||
#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)
|
||||
|
@ -1850,14 +1850,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
|
||||
linkmode_zero(ks->link_modes.supported);
|
||||
linkmode_zero(ks->link_modes.advertising);
|
||||
|
||||
for (i = 0; i < BITS_PER_TYPE(u64); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
|
||||
if (phy_types_low & BIT_ULL(i))
|
||||
ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
|
||||
req_speeds, advert_phy_type_lo,
|
||||
i);
|
||||
}
|
||||
|
||||
for (i = 0; i < BITS_PER_TYPE(u64); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
|
||||
if (phy_types_high & BIT_ULL(i))
|
||||
ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
|
||||
req_speeds, advert_phy_type_hi,
|
||||
|
@ -1981,6 +1981,8 @@ int ice_init_lag(struct ice_pf *pf)
|
||||
int n, err;
|
||||
|
||||
ice_lag_init_feature_support_flag(pf);
|
||||
if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
|
||||
return 0;
|
||||
|
||||
pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
|
||||
if (!pf->lag)
|
||||
|
@ -2371,6 +2371,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
|
||||
} else {
|
||||
max_txqs[i] = vsi->alloc_txq;
|
||||
}
|
||||
|
||||
if (vsi->type == ICE_VSI_PF)
|
||||
max_txqs[i] += vsi->num_xdp_txq;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
|
||||
@ -2620,10 +2623,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
|
||||
if (vsi->type == ICE_VSI_VF &&
|
||||
vsi->agg_node && vsi->agg_node->valid)
|
||||
vsi->agg_node->num_vsis--;
|
||||
if (vsi->agg_node) {
|
||||
vsi->agg_node->valid = false;
|
||||
vsi->agg_node->agg_id = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
{
|
||||
struct otx2_nic *pfvf = netdev_priv(dev);
|
||||
u8 old_pfc_en;
|
||||
int err;
|
||||
|
||||
/* Save PFC configuration to interface */
|
||||
old_pfc_en = pfvf->pfc_en;
|
||||
pfvf->pfc_en = pfc->pfc_en;
|
||||
|
||||
if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
|
||||
@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
* supported by the tx queue configuration
|
||||
*/
|
||||
err = otx2_check_pfc_config(pfvf);
|
||||
if (err)
|
||||
if (err) {
|
||||
pfvf->pfc_en = old_pfc_en;
|
||||
return err;
|
||||
}
|
||||
|
||||
process_pfc:
|
||||
err = otx2_config_priority_flow_ctrl(pfvf);
|
||||
if (err)
|
||||
if (err) {
|
||||
pfvf->pfc_en = old_pfc_en;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Request Per channel Bpids */
|
||||
if (pfc->pfc_en)
|
||||
@ -425,6 +430,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
|
||||
err = otx2_pfc_txschq_update(pfvf);
|
||||
if (err) {
|
||||
if (pfc->pfc_en)
|
||||
otx2_nix_config_bp(pfvf, false);
|
||||
|
||||
otx2_pfc_txschq_stop(pfvf);
|
||||
pfvf->pfc_en = old_pfc_en;
|
||||
otx2_config_priority_flow_ctrl(pfvf);
|
||||
dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
|
||||
return err;
|
||||
}
|
||||
|
@ -291,6 +291,9 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
|
||||
for (i = 0; i < q->n_desc; i++) {
|
||||
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
|
||||
|
||||
if (!entry->buf)
|
||||
continue;
|
||||
|
||||
dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
|
||||
DMA_TO_DEVICE);
|
||||
skb_free_frag(entry->buf);
|
||||
|
@ -156,15 +156,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
|
||||
return token;
|
||||
}
|
||||
|
||||
static int cmd_alloc_index(struct mlx5_cmd *cmd)
|
||||
static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
|
||||
if (ret < cmd->vars.max_reg_cmds)
|
||||
if (ret < cmd->vars.max_reg_cmds) {
|
||||
clear_bit(ret, &cmd->vars.bitmask);
|
||||
ent->idx = ret;
|
||||
cmd->ent_arr[ent->idx] = ent;
|
||||
}
|
||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||
|
||||
return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
|
||||
@ -979,7 +982,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
|
||||
down(sem);
|
||||
if (!ent->page_queue) {
|
||||
alloc_ret = cmd_alloc_index(cmd);
|
||||
alloc_ret = cmd_alloc_index(cmd, ent);
|
||||
if (alloc_ret < 0) {
|
||||
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
|
||||
if (ent->callback) {
|
||||
@ -994,15 +997,14 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
up(sem);
|
||||
return;
|
||||
}
|
||||
ent->idx = alloc_ret;
|
||||
} else {
|
||||
ent->idx = cmd->vars.max_reg_cmds;
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
clear_bit(ent->idx, &cmd->vars.bitmask);
|
||||
cmd->ent_arr[ent->idx] = ent;
|
||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||
}
|
||||
|
||||
cmd->ent_arr[ent->idx] = ent;
|
||||
lay = get_inst(cmd, ent->idx);
|
||||
ent->lay = lay;
|
||||
memset(lay, 0, sizeof(*lay));
|
||||
|
@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
|
||||
|
||||
while (block_timestamp > tracer->last_timestamp) {
|
||||
/* Check block override if it's not the first block */
|
||||
if (!tracer->last_timestamp) {
|
||||
if (tracer->last_timestamp) {
|
||||
u64 *ts_event;
|
||||
/* To avoid block override be the HW in case of buffer
|
||||
* wraparound, the time stamp of the previous block
|
||||
|
@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in || !ft->g) {
|
||||
kfree(ft->g);
|
||||
ft->g = NULL;
|
||||
kvfree(in);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
}
|
||||
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
|
||||
esw_attr->out_count++;
|
||||
/* attr->dests[].rep is resolved when we handle encap */
|
||||
/* attr->dests[].vport is resolved when we handle encap */
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -270,7 +270,8 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
|
||||
|
||||
out_priv = netdev_priv(out_dev);
|
||||
rpriv = out_priv->ppriv;
|
||||
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
|
||||
esw_attr->dests[esw_attr->out_count].vport_valid = true;
|
||||
esw_attr->dests[esw_attr->out_count].vport = rpriv->rep->vport;
|
||||
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
|
||||
|
||||
esw_attr->out_count++;
|
||||
|
@ -300,6 +300,10 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
encap_header = NULL;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -310,8 +314,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
reformat_params.type = e->reformat_type;
|
||||
reformat_params.size = ipv4_encap_size;
|
||||
reformat_params.data = encap_header;
|
||||
reformat_params.size = e->encap_size;
|
||||
reformat_params.data = e->encap_header;
|
||||
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
|
||||
MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (IS_ERR(e->pkt_reformat)) {
|
||||
@ -319,8 +323,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -403,18 +405,23 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
encap_header = NULL;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto free_encap;
|
||||
goto release_neigh;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
reformat_params.type = e->reformat_type;
|
||||
reformat_params.size = ipv4_encap_size;
|
||||
reformat_params.data = encap_header;
|
||||
reformat_params.size = e->encap_size;
|
||||
reformat_params.data = e->encap_header;
|
||||
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
|
||||
MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (IS_ERR(e->pkt_reformat)) {
|
||||
@ -422,10 +429,6 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -567,6 +570,10 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
encap_header = NULL;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -577,8 +584,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
reformat_params.type = e->reformat_type;
|
||||
reformat_params.size = ipv6_encap_size;
|
||||
reformat_params.data = encap_header;
|
||||
reformat_params.size = e->encap_size;
|
||||
reformat_params.data = e->encap_header;
|
||||
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
|
||||
MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (IS_ERR(e->pkt_reformat)) {
|
||||
@ -586,8 +593,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
@ -669,18 +674,23 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
encap_header = NULL;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto free_encap;
|
||||
goto release_neigh;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
reformat_params.type = e->reformat_type;
|
||||
reformat_params.size = ipv6_encap_size;
|
||||
reformat_params.data = encap_header;
|
||||
reformat_params.size = e->encap_size;
|
||||
reformat_params.data = e->encap_header;
|
||||
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
|
||||
MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (IS_ERR(e->pkt_reformat)) {
|
||||
@ -688,10 +698,6 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
|
@ -1064,7 +1064,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
|
||||
|
||||
out_priv = netdev_priv(encap_dev);
|
||||
rpriv = out_priv->ppriv;
|
||||
esw_attr->dests[out_index].rep = rpriv->rep;
|
||||
esw_attr->dests[out_index].vport_valid = true;
|
||||
esw_attr->dests[out_index].vport = rpriv->rep->vport;
|
||||
esw_attr->dests[out_index].mdev = out_priv->mdev;
|
||||
}
|
||||
|
||||
|
@ -493,6 +493,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
|
||||
dma_addr_t dma_addr = xdptxd->dma_addr;
|
||||
u32 dma_len = xdptxd->len;
|
||||
u16 ds_cnt, inline_hdr_sz;
|
||||
unsigned int frags_size;
|
||||
u8 num_wqebbs = 1;
|
||||
int num_frags = 0;
|
||||
bool inline_ok;
|
||||
@ -503,8 +504,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
|
||||
|
||||
inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
|
||||
dma_len >= MLX5E_XDP_MIN_INLINE;
|
||||
frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0;
|
||||
|
||||
if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
|
||||
if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) {
|
||||
stats->err++;
|
||||
return false;
|
||||
}
|
||||
|
@ -2142,7 +2142,7 @@ static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
|
||||
|
||||
static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
mdev->num_block_tc++;
|
||||
mdev->num_block_tc--;
|
||||
}
|
||||
|
||||
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
|
||||
|
@ -49,7 +49,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
if (count >= sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
|
@ -78,7 +78,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
if (count >= sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
|
@ -3778,7 +3778,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
|
||||
break;
|
||||
case FLOW_ACTION_ACCEPT:
|
||||
case FLOW_ACTION_PIPE:
|
||||
if (set_branch_dest_ft(flow->priv, attr))
|
||||
err = set_branch_dest_ft(flow->priv, attr);
|
||||
if (err)
|
||||
goto out_err;
|
||||
break;
|
||||
case FLOW_ACTION_JUMP:
|
||||
@ -3788,7 +3789,8 @@ alloc_branch_attr(struct mlx5e_tc_flow *flow,
|
||||
goto out_err;
|
||||
}
|
||||
*jump_count = cond->extval;
|
||||
if (set_branch_dest_ft(flow->priv, attr))
|
||||
err = set_branch_dest_ft(flow->priv, attr);
|
||||
if (err)
|
||||
goto out_err;
|
||||
break;
|
||||
default:
|
||||
@ -5736,8 +5738,10 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a
|
||||
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
|
||||
if (IS_ERR(attr->act_id_restore_rule))
|
||||
if (IS_ERR(attr->act_id_restore_rule)) {
|
||||
err = PTR_ERR(attr->act_id_restore_rule);
|
||||
goto err_rule;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -526,7 +526,8 @@ struct mlx5_esw_flow_attr {
|
||||
u8 total_vlan;
|
||||
struct {
|
||||
u32 flags;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
bool vport_valid;
|
||||
u16 vport;
|
||||
struct mlx5_pkt_reformat *pkt_reformat;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5_termtbl_handle *termtbl;
|
||||
|
@ -287,10 +287,9 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
|
||||
for (i = from; i < to; i++)
|
||||
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
|
||||
mlx5_chains_put_table(chains, 0, 1, 0);
|
||||
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
|
||||
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
|
||||
esw_attr->dests[i].mdev))
|
||||
mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
|
||||
false);
|
||||
mlx5_esw_indir_table_put(esw, esw_attr->dests[i].vport, false);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -358,8 +357,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
|
||||
* this criteria.
|
||||
*/
|
||||
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
|
||||
if (esw_attr->dests[i].rep &&
|
||||
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
|
||||
if (esw_attr->dests[i].vport_valid &&
|
||||
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].vport,
|
||||
esw_attr->dests[i].mdev)) {
|
||||
result = true;
|
||||
} else {
|
||||
@ -388,7 +387,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
|
||||
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
|
||||
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
|
||||
esw_attr->dests[j].rep->vport, false);
|
||||
esw_attr->dests[j].vport, false);
|
||||
if (IS_ERR(dest[*i].ft)) {
|
||||
err = PTR_ERR(dest[*i].ft);
|
||||
goto err_indir_tbl_get;
|
||||
@ -432,11 +431,11 @@ static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
|
||||
int attr_idx)
|
||||
{
|
||||
if (esw->offloads.ft_ipsec_tx_pol &&
|
||||
esw_attr->dests[attr_idx].rep &&
|
||||
esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
|
||||
esw_attr->dests[attr_idx].vport_valid &&
|
||||
esw_attr->dests[attr_idx].vport == MLX5_VPORT_UPLINK &&
|
||||
/* To be aligned with software, encryption is needed only for tunnel device */
|
||||
(esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
|
||||
esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
|
||||
esw_attr->dests[attr_idx].vport != esw_attr->in_rep->vport &&
|
||||
esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
|
||||
return true;
|
||||
|
||||
@ -469,7 +468,7 @@ esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
|
||||
int attr_idx, int dest_idx, bool pkt_reformat)
|
||||
{
|
||||
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
|
||||
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
|
||||
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].vport;
|
||||
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
|
||||
dest[dest_idx].vport.vhca_id =
|
||||
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
|
||||
@ -1177,9 +1176,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_handle *flow;
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_vport *vport;
|
||||
int err, pfindex;
|
||||
unsigned long i;
|
||||
void *misc;
|
||||
int err;
|
||||
|
||||
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||
return 0;
|
||||
@ -1255,7 +1254,15 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
|
||||
flows[vport->index] = flow;
|
||||
}
|
||||
}
|
||||
esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
|
||||
|
||||
pfindex = mlx5_get_dev_index(peer_dev);
|
||||
if (pfindex >= MLX5_MAX_PORTS) {
|
||||
esw_warn(esw->dev, "Peer dev index(%d) is over the max num defined(%d)\n",
|
||||
pfindex, MLX5_MAX_PORTS);
|
||||
err = -EINVAL;
|
||||
goto add_ec_vf_flow_err;
|
||||
}
|
||||
esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows;
|
||||
|
||||
kvfree(spec);
|
||||
return 0;
|
||||
|
@ -233,8 +233,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
|
||||
|
||||
/* hairpin */
|
||||
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
|
||||
if (!esw_attr->dest_int_port && esw_attr->dests[i].rep &&
|
||||
esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
|
||||
if (!esw_attr->dest_int_port && esw_attr->dests[i].vport_valid &&
|
||||
esw_attr->dests[i].vport == MLX5_VPORT_UPLINK)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
||||
req_list_size = max_list_size;
|
||||
}
|
||||
|
||||
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
|
||||
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
|
||||
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
|
||||
|
||||
out = kvzalloc(out_sz, GFP_KERNEL);
|
||||
|
@ -350,6 +350,8 @@ union ks8851_tx_hdr {
|
||||
* @rxd: Space for receiving SPI data, in DMA-able space.
|
||||
* @txd: Space for transmitting SPI data, in DMA-able space.
|
||||
* @msg_enable: The message flags controlling driver output (see ethtool).
|
||||
* @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR).
|
||||
* @queued_len: Space required in hardware TX buffer for queued packets in txq.
|
||||
* @fid: Incrementing frame id tag.
|
||||
* @rc_ier: Cached copy of KS_IER.
|
||||
* @rc_ccr: Cached copy of KS_CCR.
|
||||
@ -399,6 +401,7 @@ struct ks8851_net {
|
||||
struct work_struct rxctrl_work;
|
||||
|
||||
struct sk_buff_head txq;
|
||||
unsigned int queued_len;
|
||||
|
||||
struct eeprom_93cx6 eeprom;
|
||||
struct regulator *vdd_reg;
|
||||
|
@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
|
||||
handled |= IRQ_RXPSI;
|
||||
|
||||
if (status & IRQ_TXI) {
|
||||
handled |= IRQ_TXI;
|
||||
|
||||
/* no lock here, tx queue should have been stopped */
|
||||
|
||||
/* update our idea of how much tx space is available to the
|
||||
* system */
|
||||
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
|
||||
unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
|
||||
|
||||
netif_dbg(ks, intr, ks->netdev,
|
||||
"%s: txspace %d\n", __func__, ks->tx_space);
|
||||
"%s: txspace %d\n", __func__, tx_space);
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
ks->tx_space = tx_space;
|
||||
if (netif_queue_stopped(ks->netdev))
|
||||
netif_wake_queue(ks->netdev);
|
||||
spin_unlock(&ks->statelock);
|
||||
|
||||
handled |= IRQ_TXI;
|
||||
}
|
||||
|
||||
if (status & IRQ_RXI)
|
||||
@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
|
||||
if (status & IRQ_LCI)
|
||||
mii_check_link(&ks->mii);
|
||||
|
||||
if (status & IRQ_TXI)
|
||||
netif_wake_queue(ks->netdev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev)
|
||||
ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
|
||||
ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
|
||||
|
||||
ks->queued_len = 0;
|
||||
netif_start_queue(ks->netdev);
|
||||
|
||||
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
|
||||
|
@ -286,6 +286,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
|
||||
netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
|
||||
}
|
||||
|
||||
/**
|
||||
* calc_txlen - calculate size of message to send packet
|
||||
* @len: Length of data
|
||||
*
|
||||
* Returns the size of the TXFIFO message needed to send
|
||||
* this packet.
|
||||
*/
|
||||
static unsigned int calc_txlen(unsigned int len)
|
||||
{
|
||||
return ALIGN(len + 4, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_rx_skb_spi - receive skbuff
|
||||
* @ks: The device state
|
||||
@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
|
||||
*/
|
||||
static void ks8851_tx_work(struct work_struct *work)
|
||||
{
|
||||
unsigned int dequeued_len = 0;
|
||||
struct ks8851_net_spi *kss;
|
||||
unsigned short tx_space;
|
||||
struct ks8851_net *ks;
|
||||
unsigned long flags;
|
||||
struct sk_buff *txb;
|
||||
@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work)
|
||||
last = skb_queue_empty(&ks->txq);
|
||||
|
||||
if (txb) {
|
||||
dequeued_len += calc_txlen(txb->len);
|
||||
|
||||
ks8851_wrreg16_spi(ks, KS_RXQCR,
|
||||
ks->rc_rxqcr | RXQCR_SDA);
|
||||
ks8851_wrfifo_spi(ks, txb, last);
|
||||
@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
ks->queued_len -= dequeued_len;
|
||||
ks->tx_space = tx_space;
|
||||
spin_unlock(&ks->statelock);
|
||||
|
||||
ks8851_unlock_spi(ks, &flags);
|
||||
}
|
||||
|
||||
@ -346,18 +369,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
|
||||
flush_work(&kss->tx_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* calc_txlen - calculate size of message to send packet
|
||||
* @len: Length of data
|
||||
*
|
||||
* Returns the size of the TXFIFO message needed to send
|
||||
* this packet.
|
||||
*/
|
||||
static unsigned int calc_txlen(unsigned int len)
|
||||
{
|
||||
return ALIGN(len + 4, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_start_xmit_spi - transmit packet using SPI
|
||||
* @skb: The buffer to transmit
|
||||
@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
|
||||
if (needed > ks->tx_space) {
|
||||
if (ks->queued_len + needed > ks->tx_space) {
|
||||
netif_stop_queue(dev);
|
||||
ret = NETDEV_TX_BUSY;
|
||||
} else {
|
||||
ks->tx_space -= needed;
|
||||
ks->queued_len += needed;
|
||||
skb_queue_tail(&ks->txq, skb);
|
||||
}
|
||||
|
||||
spin_unlock(&ks->statelock);
|
||||
schedule_work(&kss->tx_work);
|
||||
if (ret == NETDEV_TX_OK)
|
||||
schedule_work(&kss->tx_work);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ config MICROSOFT_MANA
|
||||
depends on PCI_MSI && X86_64
|
||||
depends on PCI_HYPERV
|
||||
select AUXILIARY_BUS
|
||||
select PAGE_POOL
|
||||
help
|
||||
This driver supports Microsoft Azure Network Adapter (MANA).
|
||||
So far, the driver is only supported on X86_64.
|
||||
|
@ -582,10 +582,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
|
||||
rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
|
||||
rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
|
||||
rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
|
||||
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
|
||||
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
|
||||
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
|
||||
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
|
||||
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511];
|
||||
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023];
|
||||
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526];
|
||||
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX];
|
||||
}
|
||||
|
||||
static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
|
||||
@ -610,10 +610,10 @@ static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
|
||||
rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64];
|
||||
rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127];
|
||||
rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255];
|
||||
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255];
|
||||
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511];
|
||||
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023];
|
||||
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526];
|
||||
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_256_511];
|
||||
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_512_1023];
|
||||
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_1024_1526];
|
||||
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1527_MAX];
|
||||
}
|
||||
|
||||
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
|
||||
|
@ -237,7 +237,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
|
||||
*/
|
||||
ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
|
||||
|
||||
if (priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)
|
||||
if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN))
|
||||
return;
|
||||
|
||||
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
|
||||
|
@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
|
||||
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
|
||||
}
|
||||
|
||||
static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
|
||||
int rx_buffer_pgcnt)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
|
||||
/* avoid re-using remote and pfmemalloc pages */
|
||||
if (!dev_page_is_reusable(page))
|
||||
return false;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
/* if we are only owner of page we can reuse it */
|
||||
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
/* If we have drained the page fragment pool we need to update
|
||||
* the pagecnt_bias and page count so that we fully restock the
|
||||
* number of references the driver holds.
|
||||
*/
|
||||
if (unlikely(pagecnt_bias == 1)) {
|
||||
page_ref_add(page, USHRT_MAX - 1);
|
||||
rx_buffer->pagecnt_bias = USHRT_MAX;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* wx_reuse_rx_page - page flip buffer and store it back on the ring
|
||||
* @rx_ring: rx descriptor ring to store buffers on
|
||||
* @old_buff: donor buffer to have page reused
|
||||
*
|
||||
* Synchronizes page for reuse by the adapter
|
||||
**/
|
||||
static void wx_reuse_rx_page(struct wx_ring *rx_ring,
|
||||
struct wx_rx_buffer *old_buff)
|
||||
{
|
||||
u16 nta = rx_ring->next_to_alloc;
|
||||
struct wx_rx_buffer *new_buff;
|
||||
|
||||
new_buff = &rx_ring->rx_buffer_info[nta];
|
||||
|
||||
/* update, and store next to alloc */
|
||||
nta++;
|
||||
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
||||
|
||||
/* transfer page from old buffer to new buffer */
|
||||
new_buff->page = old_buff->page;
|
||||
new_buff->page_dma = old_buff->page_dma;
|
||||
new_buff->page_offset = old_buff->page_offset;
|
||||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
static void wx_dma_sync_frag(struct wx_ring *rx_ring,
|
||||
struct wx_rx_buffer *rx_buffer)
|
||||
{
|
||||
@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
|
||||
size,
|
||||
DMA_FROM_DEVICE);
|
||||
skip_sync:
|
||||
rx_buffer->pagecnt_bias--;
|
||||
|
||||
return rx_buffer;
|
||||
}
|
||||
|
||||
@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
|
||||
struct sk_buff *skb,
|
||||
int rx_buffer_pgcnt)
|
||||
{
|
||||
if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
|
||||
/* hand second half of page back to the ring */
|
||||
wx_reuse_rx_page(rx_ring, rx_buffer);
|
||||
} else {
|
||||
if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
|
||||
/* the page has been released from the ring */
|
||||
WX_CB(skb)->page_released = true;
|
||||
else
|
||||
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
|
||||
|
||||
__page_frag_cache_drain(rx_buffer->page,
|
||||
rx_buffer->pagecnt_bias);
|
||||
}
|
||||
if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
|
||||
/* the page has been released from the ring */
|
||||
WX_CB(skb)->page_released = true;
|
||||
|
||||
/* clear contents of rx_buffer */
|
||||
rx_buffer->page = NULL;
|
||||
@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
|
||||
if (size <= WX_RXBUFFER_256) {
|
||||
memcpy(__skb_put(skb, size), page_addr,
|
||||
ALIGN(size, sizeof(long)));
|
||||
rx_buffer->pagecnt_bias++;
|
||||
|
||||
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true);
|
||||
return skb;
|
||||
}
|
||||
|
||||
skb_mark_for_recycle(skb);
|
||||
|
||||
if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
|
||||
WX_CB(skb)->dma = rx_buffer->dma;
|
||||
|
||||
@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
|
||||
bi->page_dma = dma;
|
||||
bi->page = page;
|
||||
bi->page_offset = 0;
|
||||
page_ref_add(page, USHRT_MAX - 1);
|
||||
bi->pagecnt_bias = USHRT_MAX;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -723,7 +656,6 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
if (!skb) {
|
||||
rx_ring->rx_stats.alloc_rx_buff_failed++;
|
||||
rx_buffer->pagecnt_bias++;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2248,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
|
||||
|
||||
/* free resources associated with mapping */
|
||||
page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
|
||||
__page_frag_cache_drain(rx_buffer->page,
|
||||
rx_buffer->pagecnt_bias);
|
||||
|
||||
i++;
|
||||
rx_buffer++;
|
||||
|
@ -787,7 +787,6 @@ struct wx_rx_buffer {
|
||||
dma_addr_t page_dma;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
u16 pagecnt_bias;
|
||||
};
|
||||
|
||||
struct wx_queue_stats {
|
||||
|
@ -1548,7 +1548,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
|
||||
goto error;
|
||||
|
||||
phy_resume(phydev);
|
||||
phy_led_triggers_register(phydev);
|
||||
if (!phydev->is_on_sfp_module)
|
||||
phy_led_triggers_register(phydev);
|
||||
|
||||
/**
|
||||
* If the external phy used by current mac interface is managed by
|
||||
@ -1817,7 +1818,8 @@ void phy_detach(struct phy_device *phydev)
|
||||
}
|
||||
phydev->phylink = NULL;
|
||||
|
||||
phy_led_triggers_unregister(phydev);
|
||||
if (!phydev->is_on_sfp_module)
|
||||
phy_led_triggers_unregister(phydev);
|
||||
|
||||
if (phydev->mdio.dev.driver)
|
||||
module_put(phydev->mdio.dev.driver->owner);
|
||||
|
@ -1385,7 +1385,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
* if it is true then one of the handlers took the page.
|
||||
*/
|
||||
|
||||
if (reclaim) {
|
||||
if (reclaim && txq) {
|
||||
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
|
||||
int index = SEQ_TO_INDEX(sequence);
|
||||
int cmd_index = iwl_txq_get_cmd_index(txq, index);
|
||||
|
@ -3106,7 +3106,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
|
||||
u32 i, r, j, rb_len = 0;
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
spin_lock_bh(&rxq->lock);
|
||||
|
||||
r = iwl_get_closed_rb_stts(trans, rxq);
|
||||
|
||||
@ -3130,7 +3130,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
|
||||
*data = iwl_fw_error_next_data(*data);
|
||||
}
|
||||
|
||||
spin_unlock(&rxq->lock);
|
||||
spin_unlock_bh(&rxq->lock);
|
||||
|
||||
return rb_len;
|
||||
}
|
||||
|
@ -783,7 +783,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
|
||||
|
||||
static void
|
||||
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
|
||||
int len, bool more, u32 info)
|
||||
int len, bool more, u32 info, bool allow_direct)
|
||||
{
|
||||
struct sk_buff *skb = q->rx_head;
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
@ -795,7 +795,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
|
||||
|
||||
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
|
||||
} else {
|
||||
mt76_put_page_pool_buf(data, true);
|
||||
mt76_put_page_pool_buf(data, allow_direct);
|
||||
}
|
||||
|
||||
if (more)
|
||||
@ -815,6 +815,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
|
||||
struct sk_buff *skb;
|
||||
unsigned char *data;
|
||||
bool check_ddone = false;
|
||||
bool allow_direct = !mt76_queue_is_wed_rx(q);
|
||||
bool more;
|
||||
|
||||
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
|
||||
@ -855,7 +856,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
|
||||
}
|
||||
|
||||
if (q->rx_head) {
|
||||
mt76_add_fragment(dev, q, data, len, more, info);
|
||||
mt76_add_fragment(dev, q, data, len, more, info,
|
||||
allow_direct);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -884,7 +886,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
|
||||
continue;
|
||||
|
||||
free_frag:
|
||||
mt76_put_page_pool_buf(data, true);
|
||||
mt76_put_page_pool_buf(data, allow_direct);
|
||||
}
|
||||
|
||||
mt76_dma_rx_fill(dev, q, true);
|
||||
|
@ -4137,6 +4137,8 @@ static void nvme_fw_act_work(struct work_struct *work)
|
||||
struct nvme_ctrl, fw_act_work);
|
||||
unsigned long fw_act_timeout;
|
||||
|
||||
nvme_auth_stop(ctrl);
|
||||
|
||||
if (ctrl->mtfa)
|
||||
fw_act_timeout = jiffies +
|
||||
msecs_to_jiffies(ctrl->mtfa * 100);
|
||||
@ -4192,7 +4194,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
||||
* firmware activation.
|
||||
*/
|
||||
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
|
||||
nvme_auth_stop(ctrl);
|
||||
requeue = false;
|
||||
queue_work(nvme_wq, &ctrl->fw_act_work);
|
||||
}
|
||||
|
@ -2548,24 +2548,17 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
|
||||
* the controller. Abort any ios on the association and let the
|
||||
* create_association error path resolve things.
|
||||
*/
|
||||
enum nvme_ctrl_state state;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
state = ctrl->ctrl.state;
|
||||
if (state == NVME_CTRL_CONNECTING) {
|
||||
set_bit(ASSOC_FAILED, &ctrl->flags);
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
|
||||
__nvme_fc_abort_outstanding_ios(ctrl, true);
|
||||
set_bit(ASSOC_FAILED, &ctrl->flags);
|
||||
dev_warn(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: transport error during (re)connect\n",
|
||||
ctrl->cnum);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
||||
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
|
||||
if (state != NVME_CTRL_LIVE)
|
||||
if (ctrl->ctrl.state != NVME_CTRL_LIVE)
|
||||
return;
|
||||
|
||||
dev_warn(ctrl->ctrl.device,
|
||||
@ -3180,16 +3173,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
||||
else
|
||||
ret = nvme_fc_recreate_io_queues(ctrl);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
|
||||
ret = -EIO;
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
if (ret)
|
||||
goto out_term_aen_ops;
|
||||
}
|
||||
|
||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
||||
ctrl->ctrl.nr_reconnects = 0;
|
||||
|
||||
|
@ -923,6 +923,15 @@ static int amd_gpio_suspend(struct device *dev)
|
||||
|
||||
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
|
||||
gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
|
||||
|
||||
/* mask any interrupts not intended to be a wake source */
|
||||
if (!(gpio_dev->saved_regs[i] & WAKE_SOURCE)) {
|
||||
writel(gpio_dev->saved_regs[i] & ~BIT(INTERRUPT_MASK_OFF),
|
||||
gpio_dev->base + pin * 4);
|
||||
pm_pr_dbg("Disabling GPIO #%d interrupt for suspend.\n",
|
||||
pin);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user