Merge branch 'for-6.8/cxl-misc' into for-6.8/cxl

Pick up some miscellaneous fixups for v6.8.
This commit is contained in:
Dan Williams 2024-01-05 19:03:06 -08:00
commit 80dda9a69a
270 changed files with 2753 additions and 1911 deletions

View File

@ -191,6 +191,10 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>

View File

@ -15,9 +15,11 @@ allOf:
properties:
compatible:
enum:
- fsl,imx23-ocotp
- fsl,imx28-ocotp
items:
- enum:
- fsl,imx23-ocotp
- fsl,imx28-ocotp
- const: fsl,ocotp
reg:
maxItems: 1
@ -35,7 +37,7 @@ unevaluatedProperties: false
examples:
- |
ocotp: efuse@8002c000 {
compatible = "fsl,imx28-ocotp";
compatible = "fsl,imx28-ocotp", "fsl,ocotp";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x8002c000 0x2000>;

View File

@ -6050,10 +6050,8 @@ M: Mikulas Patocka <mpatocka@redhat.com>
M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
W: http://sources.redhat.com/dm
Q: http://patchwork.kernel.org/project/dm-devel/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
T: quilt http://people.redhat.com/agk/patches/linux/editing/
F: Documentation/admin-guide/device-mapper/
F: drivers/md/Kconfig
F: drivers/md/Makefile
@ -9526,6 +9524,7 @@ F: drivers/bus/hisi_lpc.c
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
M: Yisen Zhuang <yisen.zhuang@huawei.com>
M: Salil Mehta <salil.mehta@huawei.com>
M: Jijie Shao <shaojijie@huawei.com>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.hisilicon.com

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -49,7 +49,6 @@ config ARC
select OF
select OF_EARLY_FLATTREE
select PCI_SYSCALL if PCI
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
select TRACE_IRQFLAGS_SUPPORT
@ -232,10 +231,6 @@ config ARC_CACHE_PAGES
Note that Global I/D ENABLE + Per Page DISABLE works but corollary
Global DISABLE + Per Page ENABLE won't work
config ARC_CACHE_VIPT_ALIASING
bool "Support VIPT Aliasing D$"
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
endif #ARC_CACHE
config ARC_HAS_ICCM

View File

@ -44,31 +44,10 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
#define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
#else /* VIPT aliasing dcache */
/* To clear out stale userspace mappings */
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,unsigned long end);
void flush_cache_page(struct vm_area_struct *vma,
unsigned long user_addr, unsigned long page);
/*
* To make sure that userspace mapping is flushed to memory before
* get_user_pages() uses a kernel mapping to access the page
*/
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long u_vaddr);
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
/*
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
* This works around some PIO based drivers which don't call flush_dcache_page
@ -76,28 +55,6 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
#define CACHE_COLORS_NUM 4
#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
/*
* Simple wrapper over config option
* Bootup code ensures that hardware matches kernel configuration
*/
static inline int cache_is_vipt_aliasing(void)
{
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
}
/*
* checks if two addresses (after page aligning) index into same cache set
*/
#define addr_not_cache_congruent(addr1, addr2) \
({ \
cache_is_vipt_aliasing() ? \
(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
})
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \

View File

@ -291,4 +291,36 @@
/* M = 8-1 N = 8 */
.endm
.macro SAVE_ABI_CALLEE_REGS
push r13
push r14
push r15
push r16
push r17
push r18
push r19
push r20
push r21
push r22
push r23
push r24
push r25
.endm
.macro RESTORE_ABI_CALLEE_REGS
pop r25
pop r24
pop r23
pop r22
pop r21
pop r20
pop r19
pop r18
pop r17
pop r16
pop r15
pop r14
pop r13
.endm
#endif

View File

@ -33,6 +33,91 @@
#include <asm/irqflags-compact.h>
#include <asm/thread_info.h> /* For THREAD_SIZE */
/* Note on the LD/ST addr modes with addr reg wback
*
* LD.a same as LD.aw
*
* LD.a reg1, [reg2, x] => Pre Incr
* Eff Addr for load = [reg2 + x]
*
* LD.ab reg1, [reg2, x] => Post Incr
* Eff Addr for load = [reg2]
*/
.macro PUSHAX aux
lr r9, [\aux]
push r9
.endm
.macro POPAX aux
pop r9
sr r9, [\aux]
.endm
.macro SAVE_R0_TO_R12
push r0
push r1
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
.endm
.macro RESTORE_R12_TO_R0
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
pop r1
pop r0
.endm
.macro SAVE_ABI_CALLEE_REGS
push r13
push r14
push r15
push r16
push r17
push r18
push r19
push r20
push r21
push r22
push r23
push r24
push r25
.endm
.macro RESTORE_ABI_CALLEE_REGS
pop r25
pop r24
pop r23
pop r22
pop r21
pop r20
pop r19
pop r18
pop r17
pop r16
pop r15
pop r14
pop r13
.endm
/*--------------------------------------------------------------
* Switch to Kernel Mode stack if SP points to User Mode stack
*
@ -235,7 +320,7 @@
SWITCH_TO_KERNEL_STK
PUSH 0x003\LVL\()abcd /* Dummy ECR */
st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */
sub sp, sp, 8 /* skip orig_r0 (not needed)
skip pt_regs->sp, already saved above */

View File

@ -21,114 +21,12 @@
#include <asm/entry-arcv2.h>
#endif
/* Note on the LD/ST addr modes with addr reg wback
*
* LD.a same as LD.aw
*
* LD.a reg1, [reg2, x] => Pre Incr
* Eff Addr for load = [reg2 + x]
*
* LD.ab reg1, [reg2, x] => Post Incr
* Eff Addr for load = [reg2]
*/
.macro PUSH reg
st.a \reg, [sp, -4]
.endm
.macro PUSHAX aux
lr r9, [\aux]
PUSH r9
.endm
.macro POP reg
ld.ab \reg, [sp, 4]
.endm
.macro POPAX aux
POP r9
sr r9, [\aux]
.endm
/*--------------------------------------------------------------
* Helpers to save/restore Scratch Regs:
* used by Interrupt/Exception Prologue/Epilogue
*-------------------------------------------------------------*/
.macro SAVE_R0_TO_R12
PUSH r0
PUSH r1
PUSH r2
PUSH r3
PUSH r4
PUSH r5
PUSH r6
PUSH r7
PUSH r8
PUSH r9
PUSH r10
PUSH r11
PUSH r12
.endm
.macro RESTORE_R12_TO_R0
POP r12
POP r11
POP r10
POP r9
POP r8
POP r7
POP r6
POP r5
POP r4
POP r3
POP r2
POP r1
POP r0
.endm
/*--------------------------------------------------------------
* Helpers to save/restore callee-saved regs:
* used by several macros below
*-------------------------------------------------------------*/
.macro SAVE_R13_TO_R25
PUSH r13
PUSH r14
PUSH r15
PUSH r16
PUSH r17
PUSH r18
PUSH r19
PUSH r20
PUSH r21
PUSH r22
PUSH r23
PUSH r24
PUSH r25
.endm
.macro RESTORE_R25_TO_R13
POP r25
POP r24
POP r23
POP r22
POP r21
POP r20
POP r19
POP r18
POP r17
POP r16
POP r15
POP r14
POP r13
.endm
/*
* save user mode callee regs as struct callee_regs
* - needed by fork/do_signal/unaligned-access-emulation.
*/
.macro SAVE_CALLEE_SAVED_USER
SAVE_R13_TO_R25
SAVE_ABI_CALLEE_REGS
.endm
/*
@ -136,18 +34,18 @@
* - could have been changed by ptrace tracer or unaligned-access fixup
*/
.macro RESTORE_CALLEE_SAVED_USER
RESTORE_R25_TO_R13
RESTORE_ABI_CALLEE_REGS
.endm
/*
* save/restore kernel mode callee regs at the time of context switch
*/
.macro SAVE_CALLEE_SAVED_KERNEL
SAVE_R13_TO_R25
SAVE_ABI_CALLEE_REGS
.endm
.macro RESTORE_CALLEE_SAVED_KERNEL
RESTORE_R25_TO_R13
RESTORE_ABI_CALLEE_REGS
.endm
/*--------------------------------------------------------------

View File

@ -10,6 +10,13 @@
#include <linux/types.h>
#include <asm-generic/pgtable-nopmd.h>
/*
* Hugetlb definitions.
*/
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));

View File

@ -54,6 +54,10 @@ struct pt_regs {
ecr_reg ecr;
};
struct callee_regs {
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
#else
@ -92,16 +96,14 @@ struct pt_regs {
unsigned long status32;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
#endif
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
#endif
#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)

View File

@ -153,7 +153,7 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
int n = 0;
#ifdef CONFIG_ISA_ARCV2
const char *release, *cpu_nm, *isa_nm = "ARCv2";
const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
char mpy_nm[16], lpb_nm[32];
@ -172,8 +172,6 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
* releases only update it.
*/
cpu_nm = "HS38";
if (info->arcver > 0x50 && info->arcver <= 0x53) {
release = arc_hs_rel[info->arcver - 0x51].str;
} else {

View File

@ -62,7 +62,7 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
@ -75,12 +75,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
#else
v2abi.r58 = v2abi.r59 = 0;
#endif
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
#endif
return err;
}
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT

View File

@ -145,10 +145,9 @@ int arc_cache_mumbojumbo(int c, char *buf, int len)
p_dc->sz_k = 1 << (dbcr.sz - 1);
n += scnprintf(buf + n, len - n,
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
p_dc->sz_k, assoc, p_dc->line_len,
vipt ? "VIPT" : "PIPT",
p_dc->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
slc_chk:
@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
* Exported APIs
*/
/*
* Handle cache congruency of kernel and userspace mappings of page when kernel
* writes-to/reads-from
*
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
* -In SMP, if hardware caches are coherent
*
* There's a corollary case, where kernel READs from a userspace mapped page.
* If the U-mapping is not congruent to K-mapping, former needs flushing.
*/
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping;
if (!cache_is_vipt_aliasing()) {
clear_bit(PG_dc_clean, &folio->flags);
return;
}
/* don't handle anon pages here */
mapping = folio_flush_mapping(folio);
if (!mapping)
return;
/*
* pagecache page, file not yet mapped to userspace
* Make a note that K-mapping is dirty
*/
if (!mapping_mapped(mapping)) {
clear_bit(PG_dc_clean, &folio->flags);
} else if (folio_mapped(folio)) {
/* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)folio_address(folio);
unsigned long vaddr = folio_pos(folio);
/*
* vaddr is not actually the virtual address, but is
* congruent to every user mapping.
*/
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_pages(paddr, vaddr,
folio_nr_pages(folio));
}
clear_bit(PG_dc_clean, &folio->flags);
return;
}
EXPORT_SYMBOL(flush_dcache_folio);
@ -921,44 +879,6 @@ noinline void flush_cache_all(void)
}
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
void flush_cache_mm(struct mm_struct *mm)
{
flush_cache_all();
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
__flush_dcache_pages(paddr, u_vaddr, 1);
if (vma->vm_flags & VM_EXEC)
__inv_icache_pages(paddr, u_vaddr, 1);
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_cache_all();
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
__flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
__flush_dcache_pages((phys_addr_t)page_address(page),
(phys_addr_t)page_address(page), 1);
}
#endif
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
@ -966,46 +886,11 @@ void copy_user_highpage(struct page *to, struct page *from,
struct folio *dst = page_folio(to);
void *kfrom = kmap_atomic(from);
void *kto = kmap_atomic(to);
int clean_src_k_mappings = 0;
/*
* If SRC page was already mapped in userspace AND it's U-mapping is
* not congruent with K-mapping, sync former to physical page so that
* K-mapping in memcpy below, sees the right data
*
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
* equally valid for SRC page as well
*
* For !VIPT cache, all of this gets compiled out as
* addr_not_cache_congruent() is 0
*/
if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
__flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
clean_src_k_mappings = 1;
}
copy_page(kto, kfrom);
/*
* Mark DST page K-mapping as dirty for a later finalization by
* update_mmu_cache(). Although the finalization could have been done
* here as well (given that both vaddr/paddr are available).
* But update_mmu_cache() already has code to do that for other
* non copied user pages (e.g. read faults which wire in pagecache page
* directly).
*/
clear_bit(PG_dc_clean, &dst->flags);
/*
* if SRC was already usermapped and non-congruent to kernel mapping
* sync the kernel mapping back to physical page
*/
if (clean_src_k_mappings) {
__flush_dcache_pages((unsigned long)kfrom,
(unsigned long)kfrom, 1);
} else {
clear_bit(PG_dc_clean, &src->flags);
}
clear_bit(PG_dc_clean, &src->flags);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
dc->line_len, L1_CACHE_BYTES);
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
if (dc->colors > 1) {
if (!handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
if (CACHE_COLORS_NUM != dc->colors)
panic("CACHE_COLORS_NUM not optimized for config\n");
} else if (handled && dc->colors == 1) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
}
if (is_isa_arcompact() && dc->colors > 1) {
panic("Aliasing VIPT cache not supported\n");
}
}

View File

@ -14,10 +14,6 @@
#include <asm/cacheflush.h>
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
/*
* Ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches.
@ -31,21 +27,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if D cache aliases.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
@ -55,10 +43,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
@ -70,7 +55,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}

View File

@ -478,21 +478,15 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
create_tlb(vma, vaddr, ptep);
if (page == ZERO_PAGE(0)) {
if (page == ZERO_PAGE(0))
return;
}
/*
* Exec page : Independent of aliasing/page-color considerations,
* since icache doesn't snoop dcache on ARC, any dirty
* K-mapping of a code page needs to be wback+inv so that
* icache fetch by userspace sees code correctly.
* !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
* so userspace sees the right data.
* (Avoids the flush for Non-exec + congruent mapping case)
* For executable pages, since icache doesn't snoop dcache, any
* dirty K-mapping of a code page needs to be wback+inv so that
* icache fetch by userspace sees code correctly.
*/
if ((vma->vm_flags & VM_EXEC) ||
addr_not_cache_congruent(paddr, vaddr)) {
if (vma->vm_flags & VM_EXEC) {
struct folio *folio = page_folio(page);
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
if (dirty) {

View File

@ -359,6 +359,7 @@ usb: target-module@47400000 {
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,sysc-delay-us = <2>;
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;

View File

@ -147,7 +147,7 @@ ocp: ocp {
l3-noc@44000000 {
compatible = "ti,dra7-l3-noc";
reg = <0x44000000 0x1000>,
reg = <0x44000000 0x1000000>,
<0x45000000 0x1000>;
interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
soc_dev_attr->machine = soc_name;
soc_dev_attr->family = omap_get_family();
if (!soc_dev_attr->family) {
kfree(soc_dev_attr);
return;
}
soc_dev_attr->revision = soc_rev;
soc_dev_attr->custom_attr_group = omap_soc_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->family);
kfree(soc_dev_attr);
return;
}

View File

@ -68,10 +68,7 @@ &ehci1 {
&emac0 {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
phy-mode = "rgmii";
phy-handle = <&ext_rgmii_phy>;
allwinner,rx-delay-ps = <3100>;
allwinner,tx-delay-ps = <700>;
status = "okay";
};

View File

@ -13,6 +13,9 @@ / {
};
&emac0 {
allwinner,rx-delay-ps = <3100>;
allwinner,tx-delay-ps = <700>;
phy-mode = "rgmii";
phy-supply = <&reg_dcdce>;
};

View File

@ -13,6 +13,8 @@ / {
};
&emac0 {
allwinner,tx-delay-ps = <700>;
phy-mode = "rgmii-rxid";
phy-supply = <&reg_dldo1>;
};

View File

@ -238,6 +238,7 @@ &i2c6 {
mt6360: pmic@34 {
compatible = "mediatek,mt6360";
reg = <0x34>;
interrupt-parent = <&pio>;
interrupts = <128 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "IRQB";
interrupt-controller;

View File

@ -44,9 +44,6 @@
return sys_ni_syscall(); \
}
#define COMPAT_SYS_NI(name) \
SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@ -81,6 +78,5 @@
}
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
#endif /* __ASM_SYSCALL_WRAPPER_H */

View File

@ -410,7 +410,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
kvm_arm_vcpu_destroy(vcpu);
}

View File

@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
vgic_v4_teardown(kvm);
}
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
vgic_unregister_redist_iodev(vcpu);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
}
static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *vcpu;
unsigned long i;
struct kvm *kvm = vcpu->kvm;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_destroy(vcpu);
kvm_vgic_dist_destroy(kvm);
mutex_lock(&kvm->slots_lock);
__kvm_vgic_vcpu_destroy(vcpu);
mutex_unlock(&kvm->slots_lock);
}
void kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
mutex_lock(&kvm->slots_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
__kvm_vgic_vcpu_destroy(vcpu);
mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm);
kvm_vgic_dist_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->slots_lock);
}
/**
@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
type = VGIC_V3;
}
if (ret) {
__kvm_vgic_destroy(kvm);
if (ret)
goto out;
}
dist->ready = true;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) {
if (ret)
kvm_err("Unable to register VGIC dist MMIO regions\n");
kvm_vgic_destroy(kvm);
}
mutex_unlock(&kvm->slots_lock);
return ret;
goto out_slots;
out:
mutex_unlock(&kvm->arch.config_lock);
out_slots:
mutex_unlock(&kvm->slots_lock);
if (ret)
kvm_vgic_destroy(kvm);
return ret;
}

View File

@ -820,7 +820,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
return ret;
}
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
{
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
@ -833,6 +833,8 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
unsigned long c;
int ret = 0;
lockdep_assert_held(&kvm->slots_lock);
kvm_for_each_vcpu(c, vcpu, kvm) {
ret = vgic_register_redist_iodev(vcpu);
if (ret)

View File

@ -241,6 +241,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
int vgic_v3_save_pending_tables(struct kvm *kvm);
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
bool vgic_v3_check_base(struct kvm *kvm);
void vgic_v3_load(struct kvm_vcpu *vcpu);

View File

@ -46,9 +46,6 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
return sys_ni_syscall(); \
}
#define COMPAT_SYS_NI(name) \
SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers);
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@ -82,6 +79,4 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
return sys_ni_syscall(); \
}
#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers);
#endif /* __ASM_SYSCALL_WRAPPER_H */

View File

@ -55,6 +55,7 @@ struct imsic {
/* IMSIC SW-file */
struct imsic_mrif *swfile;
phys_addr_t swfile_pa;
spinlock_t swfile_extirq_lock;
};
#define imsic_vs_csr_read(__c) \
@ -613,12 +614,23 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
struct imsic_mrif *mrif = imsic->swfile;
unsigned long flags;
/*
* The critical section is necessary during external interrupt
* updates to avoid the risk of losing interrupts due to potential
* interruptions between reading topei and updating pending status.
*/
spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
else
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
}
static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
@ -1039,6 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
}
imsic->swfile = page_to_virt(swfile_page);
imsic->swfile_pa = page_to_phys(swfile_page);
spin_lock_init(&imsic->swfile_extirq_lock);
/* Setup IO device */
kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);

View File

@ -44,8 +44,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
@ -76,7 +75,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@ -93,6 +91,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLUB_STATS=y
# CONFIG_COMPAT_BRK is not set
@ -619,6 +618,9 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=y
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@ -691,7 +693,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y

View File

@ -42,8 +42,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
@ -71,7 +70,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@ -88,6 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
CONFIG_ZSMALLOC_STAT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
@ -605,6 +604,9 @@ CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@ -677,7 +679,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_LOCKDOWN_LSM=y

View File

@ -9,8 +9,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_CRASH_DUMP=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_MARCH_Z13=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
CONFIG_HZ_100=y

View File

@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
struct kernel_fpu;

View File

@ -63,10 +63,6 @@
cond_syscall(__s390x_sys_##name); \
cond_syscall(__s390_sys_##name)
#define SYS_NI(name) \
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
long __s390_compat_sys##name(struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
@ -85,15 +81,11 @@
/*
* As some compat syscalls may not be implemented, we need to expand
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
* kernel/time/posix-stubs.c to cover this case as well.
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
cond_syscall(__s390_compat_sys_##name)
#define COMPAT_SYS_NI(name) \
SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
#define __S390_SYS_STUBx(x, name, ...) \
long __s390_sys##name(struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
@ -124,9 +116,6 @@
#define COND_SYSCALL(name) \
cond_syscall(__s390x_sys_##name)
#define SYS_NI(name) \
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers)
#define __S390_SYS_STUBx(x, fullname, name, ...)
#endif /* CONFIG_COMPAT */

View File

@ -86,9 +86,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
return sys_ni_syscall(); \
}
#define __SYS_NI(abi, name) \
SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers);
#ifdef CONFIG_X86_64
#define __X64_SYS_STUB0(name) \
__SYS_STUB0(x64, sys_##name)
@ -100,13 +97,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __X64_COND_SYSCALL(name) \
__COND_SYSCALL(x64, sys_##name)
#define __X64_SYS_NI(name) \
__SYS_NI(x64, sys_##name)
#else /* CONFIG_X86_64 */
#define __X64_SYS_STUB0(name)
#define __X64_SYS_STUBx(x, name, ...)
#define __X64_COND_SYSCALL(name)
#define __X64_SYS_NI(name)
#endif /* CONFIG_X86_64 */
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
@ -120,13 +114,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __IA32_COND_SYSCALL(name) \
__COND_SYSCALL(ia32, sys_##name)
#define __IA32_SYS_NI(name) \
__SYS_NI(ia32, sys_##name)
#else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
#define __IA32_SYS_STUB0(name)
#define __IA32_SYS_STUBx(x, name, ...)
#define __IA32_COND_SYSCALL(name)
#define __IA32_SYS_NI(name)
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
#ifdef CONFIG_IA32_EMULATION
@ -135,8 +126,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
* ia32 regs in the proper order for shared or "common" syscalls. As some
* syscalls may not be implemented, we need to expand COND_SYSCALL in
* kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
* case as well.
* kernel/sys_ni.c to cover this case as well.
*/
#define __IA32_COMPAT_SYS_STUB0(name) \
__SYS_STUB0(ia32, compat_sys_##name)
@ -148,14 +138,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __IA32_COMPAT_COND_SYSCALL(name) \
__COND_SYSCALL(ia32, compat_sys_##name)
#define __IA32_COMPAT_SYS_NI(name) \
__SYS_NI(ia32, compat_sys_##name)
#else /* CONFIG_IA32_EMULATION */
#define __IA32_COMPAT_SYS_STUB0(name)
#define __IA32_COMPAT_SYS_STUBx(x, name, ...)
#define __IA32_COMPAT_COND_SYSCALL(name)
#define __IA32_COMPAT_SYS_NI(name)
#endif /* CONFIG_IA32_EMULATION */
@ -175,13 +161,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __X32_COMPAT_COND_SYSCALL(name) \
__COND_SYSCALL(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_NI(name) \
__SYS_NI(x64, compat_sys_##name)
#else /* CONFIG_X86_X32_ABI */
#define __X32_COMPAT_SYS_STUB0(name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
#define __X32_COMPAT_COND_SYSCALL(name)
#define __X32_COMPAT_SYS_NI(name)
#endif /* CONFIG_X86_X32_ABI */
@ -212,17 +195,12 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
/*
* As some compat syscalls may not be implemented, we need to expand
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
* kernel/time/posix-stubs.c to cover this case as well.
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
__IA32_COMPAT_COND_SYSCALL(name) \
__X32_COMPAT_COND_SYSCALL(name)
#define COMPAT_SYS_NI(name) \
__IA32_COMPAT_SYS_NI(name) \
__X32_COMPAT_SYS_NI(name)
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@ -243,8 +221,8 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
* obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
* hurt, we only need to re-define it here to keep the naming congruent to
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
* macros to work correctly.
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro
* to work correctly.
*/
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
@ -257,10 +235,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
__X64_COND_SYSCALL(name) \
__IA32_COND_SYSCALL(name)
#define SYS_NI(name) \
__X64_SYS_NI(name) \
__IA32_SYS_NI(name)
/*
* For VSYSCALLS, we need to declare these three syscalls with the new

View File

@ -293,6 +293,7 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
has_lapic_cpus = true;
return 0;
}
@ -1134,7 +1135,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
if (!count) {
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
acpi_parse_lapic, MAX_LOCAL_APIC);
has_lapic_cpus = count > 0;
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
acpi_parse_x2apic, MAX_LOCAL_APIC);
}

View File

@ -255,6 +255,16 @@ static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
}
}
static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
{
unsigned long flags;
local_irq_save(flags);
optimize_nops(instr, len);
sync_core();
local_irq_restore(flags);
}
/*
* In this context, "source" is where the instructions are placed in the
* section .altinstr_replacement, for example during kernel build by the
@ -438,7 +448,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present.
*/
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
optimize_nops(instr, a->instrlen);
optimize_nops_inplace(instr, a->instrlen);
continue;
}
@ -1685,8 +1695,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
} else {
local_irq_save(flags);
memcpy(addr, opcode, len);
local_irq_restore(flags);
sync_core();
local_irq_restore(flags);
/*
* Could also do a CLFLUSH here to speed up CPU recovery; but

View File

@ -255,6 +255,22 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
testl $X2APIC_ENABLE, %eax
jnz .Lread_apicid_msr
#ifdef CONFIG_X86_X2APIC
/*
* If system is in X2APIC mode then MMIO base might not be
* mapped causing the MMIO read below to fault. Faults can't
* be handled at that point.
*/
cmpl $0, x2apic_mode(%rip)
jz .Lread_apicid_mmio
/* Force the AP into X2APIC mode. */
orl $X2APIC_ENABLE, %eax
wrmsr
jmp .Lread_apicid_msr
#endif
.Lread_apicid_mmio:
/* Read the APIC ID from the fix-mapped MMIO space. */
movq apic_mmio_base(%rip), %rcx
addq $APIC_ID, %rcx

View File

@ -2972,6 +2972,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
}
/*
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
* the host/guest supports its use.
*
* guest_can_use() checks a number of requirements on the host/guest to
* ensure that MSR_IA32_XSS is available, but it might report true even
* if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
* MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
* to further check that the guest CPUID actually supports
* X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
* guests will still get intercepted and caught in the normal
* kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
*/
if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
else
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
}
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)

View File

@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
{ .index = MSR_IA32_XSS, .always = false },
{ .index = MSR_EFER, .always = false },
{ .index = MSR_IA32_CR_PAT, .always = false },
{ .index = MSR_AMD64_SEV_ES_GHCB, .always = true },

View File

@ -30,7 +30,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
#define MAX_DIRECT_ACCESS_MSRS 46
#define MAX_DIRECT_ACCESS_MSRS 47
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;

View File

@ -9,6 +9,7 @@ config XEN
select PARAVIRT_CLOCK
select X86_HV_CALLBACK_VECTOR
depends on X86_64 || (X86_32 && X86_PAE)
depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the

View File

@ -11,6 +11,7 @@
#include <linux/module.h>
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
@ -44,6 +45,7 @@ struct vhci_data {
bool wakeup;
__u16 msft_opcode;
bool aosp_capable;
atomic_t initialized;
};
static int vhci_open_dev(struct hci_dev *hdev)
@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
mutex_lock(&data->open_mutex);
skb_queue_tail(&data->readq, skb);
mutex_unlock(&data->open_mutex);
wake_up_interruptible(&data->read_wait);
if (atomic_read(&data->initialized))
wake_up_interruptible(&data->read_wait);
return 0;
}
@ -464,7 +465,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
skb_put_u8(skb, 0xff);
skb_put_u8(skb, opcode);
put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb_queue_tail(&data->readq, skb);
skb_queue_head(&data->readq, skb);
atomic_inc(&data->initialized);
wake_up_interruptible(&data->read_wait);
return 0;

View File

@ -2158,13 +2158,23 @@ static int sysc_reset(struct sysc *ddata)
sysc_val = sysc_read_sysconfig(ddata);
sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val);
/* Flush posted write */
/*
* Some devices need a delay before reading registers
* after reset. Presumably a srst_udelay is not needed
* for devices that use a rstctrl register reset.
*/
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
/*
* Flush posted write. For devices needing srst_udelay
* this should trigger an interconnect error if the
* srst_udelay value is needed but not configured.
*/
sysc_val = sysc_read_sysconfig(ddata);
}
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);

View File

@ -397,7 +397,7 @@ static ssize_t interleave_ways_store(struct device *dev,
return rc;
/*
* Even for x3, x9, and x12 interleaves the region interleave must be a
* Even for x3, x6, and x12 interleaves the region interleave must be a
* power of 2 multiple of the host bridge interleave.
*/
if (!is_power_of_2(val / cxld->interleave_ways) ||

View File

@ -282,13 +282,15 @@ static void dwapb_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
val |= BIT(irqd_to_hwirq(d));
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
@ -296,12 +298,14 @@ static void dwapb_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
val &= ~BIT(irqd_to_hwirq(d));
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}

View File

@ -2481,10 +2481,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
return 0;
}
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
@ -2521,6 +2518,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
}
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
return call_ioctl_locked(file, cmd, arg, cdev->gdev,
gpio_ioctl_unlocked);
}
#ifdef CONFIG_COMPAT
static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)

View File

@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
if (!bo || bo->tbo.type != ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
else if (bo->parent)

View File

@ -1653,18 +1653,24 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (test_bit(gpuidx, prange->bitmap_access))
bitmap_set(ctx->bitmap, gpuidx, 1);
}
/*
* If prange is already mapped or with always mapped flag,
* update mapping on GPUs with ACCESS attribute
*/
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
if (prange->mapped_to_gpu ||
prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
}
} else {
bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
if (!prange->mapped_to_gpu ||
bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
r = 0;
goto free_ctx;
}
r = 0;
goto free_ctx;
}
if (prange->actual_loc && !prange->ttm_res) {

View File

@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5(
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_DISPLAY_PORT:
ss_info->spread_spectrum_percentage =
if (bp->base.integrated_info) {
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
ss_info->spread_spectrum_percentage =
bp->base.integrated_info->gpuclk_ss_percentage;
ss_info->type.CENTER_MODE =
bp->base.integrated_info->gpuclk_ss_type;
} else {
ss_info->spread_spectrum_percentage =
disp_cntl_tbl->dp_ss_percentage;
ss_info->spread_spectrum_range =
ss_info->spread_spectrum_range =
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
}
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
@ -2386,13 +2393,7 @@ static enum bp_result get_vram_info_v30(
return BP_RESULT_BADBIOSTABLE;
info->num_chans = info_v30->channel_num;
/* As suggested by VBIOS we should always use
* dram_channel_width_bytes = 2 when using VRAM
* table version 3.0. This is because the channel_width
* param in the VRAM info table is changed in 7000 series and
* no longer represents the memory channel width.
*/
info->dram_channel_width_bytes = 2;
info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
return result;
}
@ -2820,6 +2821,8 @@ static enum bp_result get_integrated_info_v2_2(
info->ma_channel_number = info_v2_2->umachannelnumber;
info->dp_ss_control =
le16_to_cpu(info_v2_2->reserved1);
info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
info->ext_disp_conn_info.gu_id[i] =

View File

@ -5095,18 +5095,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
switch (dc->ctx->asic_id.chip_family) {
if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
case FAMILY_YELLOW_CARP:
/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
break;
case AMDGPU_FAMILY_GC_11_0_1:
case AMDGPU_FAMILY_GC_11_5_0:
if (!dc->debug.dpia_debug.bits.disable_dpia)
return true;
break;
default:
break;
}
/* dmub aux needs dmub notifications to be enabled */
return dc->debug.enable_dmub_aux_for_legacy_ddc;
}
/**

View File

@ -5420,7 +5420,7 @@ static void CalculateOutputLink(
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,

View File

@ -960,6 +960,12 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
if (dc->ctx->dmub_srv->dmub->fw_version <
DMUB_FW_VERSION(7, 0, 35)) {
dc->debug.force_disable_subvp = true;
dc->debug.disable_fpo_optimizations = true;
}
}
}

View File

@ -417,6 +417,8 @@ struct integrated_info {
/* V2.1 */
struct edp_info edp1_info;
struct edp_info edp2_info;
uint32_t gpuclk_ss_percentage;
uint32_t gpuclk_ss_type;
};
/*

View File

@ -2465,7 +2465,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
if (is_hdmi_frl(crtc_state->port_clock))
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
is_hdmi_frl(crtc_state->port_clock))
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);

View File

@ -3747,8 +3747,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
intel_dsc_get_config(pipe_config);
intel_bigjoiner_get_config(pipe_config);
intel_dsc_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(dev_priv) >= 11)
@ -6033,6 +6033,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return -EINVAL;
}
/*
* FIXME: Bigjoiner+async flip is busted currently.
* Remove this check once the issues are fixed.
*/
if (new_crtc_state->bigjoiner_pipes) {
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (plane->pipe != crtc->pipe)

View File

@ -389,7 +389,7 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
enum intel_dmc_id dmc_id;
/* TODO: check if the following applies to all D13+ platforms. */
if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
if (!IS_TIGERLAKE(i915))
return;
for_each_dmc_id(dmc_id) {
@ -493,6 +493,45 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
enum intel_dmc_id dmc_id, i915_reg_t reg)
{
u32 offset = i915_mmio_reg_offset(reg);
u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
return offset >= start && offset < end;
}
static bool disable_dmc_evt(struct drm_i915_private *i915,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
{
if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
return false;
/* keep all pipe DMC events disabled by default */
if (dmc_id != DMC_FW_MAIN)
return true;
return false;
}
static u32 dmc_mmiodata(struct drm_i915_private *i915,
struct intel_dmc *dmc,
enum intel_dmc_id dmc_id, int i)
{
if (disable_dmc_evt(i915, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
DMC_EVT_CTL_EVENT_ID_FALSE);
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @i915: i915 drm device.
@ -532,7 +571,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
for_each_dmc_id(dmc_id) {
for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]);
dmc_mmiodata(i915, dmc, dmc_id, i));
}
}

View File

@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
* tau4 = (4 | x) << y
* but add 2 when doing the final right shift to account for units
*/
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)

View File

@ -325,28 +325,28 @@ struct joycon_imu_cal {
* All the controller's button values are stored in a u32.
* They can be accessed with bitwise ANDs.
*/
static const u32 JC_BTN_Y = BIT(0);
static const u32 JC_BTN_X = BIT(1);
static const u32 JC_BTN_B = BIT(2);
static const u32 JC_BTN_A = BIT(3);
static const u32 JC_BTN_SR_R = BIT(4);
static const u32 JC_BTN_SL_R = BIT(5);
static const u32 JC_BTN_R = BIT(6);
static const u32 JC_BTN_ZR = BIT(7);
static const u32 JC_BTN_MINUS = BIT(8);
static const u32 JC_BTN_PLUS = BIT(9);
static const u32 JC_BTN_RSTICK = BIT(10);
static const u32 JC_BTN_LSTICK = BIT(11);
static const u32 JC_BTN_HOME = BIT(12);
static const u32 JC_BTN_CAP = BIT(13); /* capture button */
static const u32 JC_BTN_DOWN = BIT(16);
static const u32 JC_BTN_UP = BIT(17);
static const u32 JC_BTN_RIGHT = BIT(18);
static const u32 JC_BTN_LEFT = BIT(19);
static const u32 JC_BTN_SR_L = BIT(20);
static const u32 JC_BTN_SL_L = BIT(21);
static const u32 JC_BTN_L = BIT(22);
static const u32 JC_BTN_ZL = BIT(23);
#define JC_BTN_Y BIT(0)
#define JC_BTN_X BIT(1)
#define JC_BTN_B BIT(2)
#define JC_BTN_A BIT(3)
#define JC_BTN_SR_R BIT(4)
#define JC_BTN_SL_R BIT(5)
#define JC_BTN_R BIT(6)
#define JC_BTN_ZR BIT(7)
#define JC_BTN_MINUS BIT(8)
#define JC_BTN_PLUS BIT(9)
#define JC_BTN_RSTICK BIT(10)
#define JC_BTN_LSTICK BIT(11)
#define JC_BTN_HOME BIT(12)
#define JC_BTN_CAP BIT(13) /* capture button */
#define JC_BTN_DOWN BIT(16)
#define JC_BTN_UP BIT(17)
#define JC_BTN_RIGHT BIT(18)
#define JC_BTN_LEFT BIT(19)
#define JC_BTN_SR_L BIT(20)
#define JC_BTN_SL_L BIT(21)
#define JC_BTN_L BIT(22)
#define JC_BTN_ZL BIT(23)
enum joycon_msg_type {
JOYCON_MSG_TYPE_NONE,
@ -927,14 +927,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
*/
static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
{
int i;
int i, divz = 0;
for (i = 0; i < 3; i++) {
ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
ctlr->accel_cal.offset[i];
ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
ctlr->gyro_cal.offset[i];
if (ctlr->imu_cal_accel_divisor[i] == 0) {
ctlr->imu_cal_accel_divisor[i] = 1;
divz++;
}
if (ctlr->imu_cal_gyro_divisor[i] == 0) {
ctlr->imu_cal_gyro_divisor[i] = 1;
divz++;
}
}
if (divz)
hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz);
}
static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
@ -1163,16 +1176,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
JC_IMU_SAMPLES_PER_DELTA_AVG) {
ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
ctlr->imu_delta_samples_count;
/* don't ever want divide by zero shenanigans */
if (ctlr->imu_avg_delta_ms == 0) {
ctlr->imu_avg_delta_ms = 1;
hid_warn(ctlr->hdev,
"calculated avg imu delta of 0\n");
}
ctlr->imu_delta_samples_count = 0;
ctlr->imu_delta_samples_sum = 0;
}
/* don't ever want divide by zero shenanigans */
if (ctlr->imu_avg_delta_ms == 0) {
ctlr->imu_avg_delta_ms = 1;
hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n");
}
/* useful for debugging IMU sample rate */
hid_dbg(ctlr->hdev,
"imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",

View File

@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
if (!slave)
return 0;
command = readl(bus->base + ASPEED_I2C_CMD_REG);
/*
* Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
* transfers with low enough latency between the nak/stop phase of the current
* command and the start/address phase of the following command that the
* interrupts are coalesced by the time we process them.
*/
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
/* Slave was requested, restart state machine. */
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
/* Propagate any stop conditions to the slave implementation. */
if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
}
/*
* Now that we've dealt with any potentially coalesced stop conditions,
* address any start conditions.
*/
if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
bus->slave_state = ASPEED_I2C_SLAVE_START;
}
/* Slave is not currently active, irq was for someone else. */
/*
* If the slave has been stopped and not started then slave interrupt
* handling is complete.
*/
if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
return irq_handled;
command = readl(bus->base + ASPEED_I2C_CMD_REG);
dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
irq_status, command);
@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
}
/* Slave was asked to stop. */
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
switch (bus->slave_state) {
case ASPEED_I2C_SLAVE_READ_REQUESTED:
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
break;
case ASPEED_I2C_SLAVE_STOP:
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
/* Stop event handling is done early. Unreachable. */
break;
case ASPEED_I2C_SLAVE_START:
/* Slave was just started. Waiting for the next event. */;

View File

@ -858,6 +858,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
dev_err(dev, "Error turning on resources %d\n", ret);
clk_disable_unprepare(gi2c->core_clk);
return ret;
}
proto = geni_se_read_proto(&gi2c->se);
@ -877,8 +878,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
if (ret)
if (ret) {
geni_se_resources_off(&gi2c->se);
clk_disable_unprepare(gi2c->core_clk);
return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
}
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
@ -891,6 +895,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
if (!tx_depth) {
dev_err(dev, "Invalid TX FIFO depth\n");
geni_se_resources_off(&gi2c->se);
clk_disable_unprepare(gi2c->core_clk);
return -EINVAL;
}

View File

@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data {
* @clk: function clk for rk3399 or function & Bus clks for others
* @pclk: Bus clk for rk3399
* @clk_rate_nb: i2c clk rate change notify
* @irq: irq number
* @t: I2C known timing information
* @lock: spinlock for the i2c bus
* @wait: the waitqueue to wait for i2c transfer
@ -200,6 +201,7 @@ struct rk3x_i2c {
struct clk *clk;
struct clk *pclk;
struct notifier_block clk_rate_nb;
int irq;
/* Settings */
struct i2c_timings t;
@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
spin_unlock_irqrestore(&i2c->lock, flags);
rk3x_i2c_start(i2c);
if (!polling) {
rk3x_i2c_start(i2c);
timeout = wait_event_timeout(i2c->wait, !i2c->busy,
msecs_to_jiffies(WAIT_TIMEOUT));
} else {
disable_irq(i2c->irq);
rk3x_i2c_start(i2c);
timeout = rk3x_i2c_wait_xfer_poll(i2c);
enable_irq(i2c->irq);
}
spin_lock_irqsave(&i2c->lock, flags);
@ -1310,6 +1317,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
return ret;
}
i2c->irq = irq;
platform_set_drvdata(pdev, i2c);
if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {

View File

@ -393,17 +393,17 @@ static const unsigned int kx022a_odrs[] = {
* (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
* => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed
* in low-power mode(?) )
* => +/-2G => 4 / 2^16 * 9,80665 * 10^6 (to scale to micro)
* => +/-2G - 598.550415
* +/-4G - 1197.10083
* +/-8G - 2394.20166
* +/-16G - 4788.40332
* => +/-2G => 4 / 2^16 * 9,80665
* => +/-2G - 0.000598550415
* +/-4G - 0.00119710083
* +/-8G - 0.00239420166
* +/-16G - 0.00478840332
*/
static const int kx022a_scale_table[][2] = {
{ 598, 550415 },
{ 1197, 100830 },
{ 2394, 201660 },
{ 4788, 403320 },
{ 0, 598550 },
{ 0, 1197101 },
{ 0, 2394202 },
{ 0, 4788403 },
};
static int kx022a_read_avail(struct iio_dev *indio_dev,
@ -422,7 +422,7 @@ static int kx022a_read_avail(struct iio_dev *indio_dev,
*vals = (const int *)kx022a_scale_table;
*length = ARRAY_SIZE(kx022a_scale_table) *
ARRAY_SIZE(kx022a_scale_table[0]);
*type = IIO_VAL_INT_PLUS_MICRO;
*type = IIO_VAL_INT_PLUS_NANO;
return IIO_AVAIL_LIST;
default:
return -EINVAL;
@ -485,6 +485,20 @@ static int kx022a_turn_on_unlock(struct kx022a_data *data)
return ret;
}
static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
struct iio_chan_spec const *chan,
long mask)
{
switch (mask) {
case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_SAMP_FREQ:
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
}
static int kx022a_write_raw(struct iio_dev *idev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@ -629,7 +643,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
kx022a_reg2scale(regval, val, val2);
return IIO_VAL_INT_PLUS_MICRO;
return IIO_VAL_INT_PLUS_NANO;
}
return -EINVAL;
@ -856,6 +870,7 @@ static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples)
static const struct iio_info kx022a_info = {
.read_raw = &kx022a_read_raw,
.write_raw = &kx022a_write_raw,
.write_raw_get_fmt = &kx022a_write_raw_get_fmt,
.read_avail = &kx022a_read_avail,
.validate_trigger = iio_validate_own_trigger,

View File

@ -93,6 +93,10 @@ static const struct iio_chan_spec imx93_adc_iio_channels[] = {
IMX93_ADC_CHAN(1),
IMX93_ADC_CHAN(2),
IMX93_ADC_CHAN(3),
IMX93_ADC_CHAN(4),
IMX93_ADC_CHAN(5),
IMX93_ADC_CHAN(6),
IMX93_ADC_CHAN(7),
};
static void imx93_adc_power_down(struct imx93_adc *adc)

View File

@ -918,7 +918,7 @@ static int mcp3564_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&adc->lock);
return ret;
case IIO_CHAN_INFO_CALIBBIAS:
if (val < mcp3564_calib_bias[0] && val > mcp3564_calib_bias[2])
if (val < mcp3564_calib_bias[0] || val > mcp3564_calib_bias[2])
return -EINVAL;
mutex_lock(&adc->lock);
@ -928,7 +928,7 @@ static int mcp3564_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&adc->lock);
return ret;
case IIO_CHAN_INFO_CALIBSCALE:
if (val < mcp3564_calib_scale[0] && val > mcp3564_calib_scale[2])
if (val < mcp3564_calib_scale[0] || val > mcp3564_calib_scale[2])
return -EINVAL;
if (adc->calib_scale == val)
@ -1122,7 +1122,7 @@ static int mcp3564_config(struct iio_dev *indio_dev)
enum mcp3564_ids ids;
int ret = 0;
unsigned int tmp = 0x01;
bool err = true;
bool err = false;
/*
* The address is set on a per-device basis by fuses in the factory,
@ -1509,5 +1509,5 @@ static struct spi_driver mcp3564_driver = {
module_spi_driver(mcp3564_driver);
MODULE_AUTHOR("Marius Cristea <marius.cristea@microchip.com>");
MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP346xR ADCs");
MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP356xR ADCs");
MODULE_LICENSE("GPL v2");

View File

@ -1241,6 +1241,20 @@ static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
.has_reg11 = true,
.vref_volatge = 1,
.has_vref_select = true,
.vref_select = VREF_VDDA,
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
.has_bl30_integration = false,
.clock_rate = 1200000,
@ -1285,7 +1299,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
};
static const struct meson_sar_adc_data meson_sar_adc_axg_data = {
.param = &meson_sar_adc_gxl_param,
.param = &meson_sar_adc_axg_param,
.name = "meson-axg-saradc",
};

View File

@ -670,8 +670,10 @@ static int tiadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
err = tiadc_request_dma(pdev, adc_dev);
if (err && err == -EPROBE_DEFER)
if (err && err != -ENODEV) {
dev_err_probe(&pdev->dev, err, "DMA request failed\n");
goto err_dma;
}
return 0;

View File

@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
struct iio_buffer *buffer;
int ret;
/*
* iio_triggered_buffer_cleanup() assumes that the buffer allocated here
* is assigned to indio_dev->buffer but this is only the case if this
* function is the first caller to iio_device_attach_buffer(). If
* indio_dev->buffer is already set then we can't proceed otherwise the
* cleanup function will try to free a buffer that was not allocated here.
*/
if (indio_dev->buffer)
return -EADDRINUSE;
buffer = iio_kfifo_allocate();
if (!buffer) {
ret = -ENOMEM;

View File

@ -15,8 +15,8 @@
/* Conversion times in us */
static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
13000, 7000 };
static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
5000, 8000 };
static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
3000, 8000 };
static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
4100, 8220, 16440 };

View File

@ -70,8 +70,8 @@
#define ADIS16475_MAX_SCAN_DATA 20
/* spi max speed in brust mode */
#define ADIS16475_BURST_MAX_SPEED 1000000
#define ADIS16475_LSB_DEC_MASK BIT(0)
#define ADIS16475_LSB_FIR_MASK BIT(1)
#define ADIS16475_LSB_DEC_MASK 0
#define ADIS16475_LSB_FIR_MASK 1
#define ADIS16500_BURST_DATA_SEL_0_CHN_MASK GENMASK(5, 0)
#define ADIS16500_BURST_DATA_SEL_1_CHN_MASK GENMASK(12, 7)
@ -1406,6 +1406,59 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
return 0;
}
static int adis16475_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct adis16475 *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
st->info = spi_get_device_match_data(spi);
if (!st->info)
return -EINVAL;
ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data);
if (ret)
return ret;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
indio_dev->info = &adis16475_info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = __adis_initial_startup(&st->adis);
if (ret)
return ret;
ret = adis16475_config_irq_pin(st);
if (ret)
return ret;
ret = adis16475_config_sync_mode(st);
if (ret)
return ret;
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
adis16475_trigger_handler);
if (ret)
return ret;
ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
adis16475_debugfs_init(indio_dev);
return 0;
}
static const struct of_device_id adis16475_of_match[] = {
{ .compatible = "adi,adis16470",
.data = &adis16475_chip_info[ADIS16470] },
@ -1451,57 +1504,30 @@ static const struct of_device_id adis16475_of_match[] = {
};
MODULE_DEVICE_TABLE(of, adis16475_of_match);
static int adis16475_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct adis16475 *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
st->info = device_get_match_data(&spi->dev);
if (!st->info)
return -EINVAL;
ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data);
if (ret)
return ret;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
indio_dev->info = &adis16475_info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = __adis_initial_startup(&st->adis);
if (ret)
return ret;
ret = adis16475_config_irq_pin(st);
if (ret)
return ret;
ret = adis16475_config_sync_mode(st);
if (ret)
return ret;
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
adis16475_trigger_handler);
if (ret)
return ret;
ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
adis16475_debugfs_init(indio_dev);
return 0;
}
static const struct spi_device_id adis16475_ids[] = {
{ "adis16470", (kernel_ulong_t)&adis16475_chip_info[ADIS16470] },
{ "adis16475-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_1] },
{ "adis16475-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_2] },
{ "adis16475-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_3] },
{ "adis16477-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_1] },
{ "adis16477-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_2] },
{ "adis16477-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_3] },
{ "adis16465-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_1] },
{ "adis16465-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_2] },
{ "adis16465-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_3] },
{ "adis16467-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_1] },
{ "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] },
{ "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] },
{ "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] },
{ "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] },
{ "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] },
{ "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] },
{ "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] },
{ "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] },
{ "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] },
{ }
};
MODULE_DEVICE_TABLE(spi, adis16475_ids);
static struct spi_driver adis16475_driver = {
.driver = {
@ -1509,6 +1535,7 @@ static struct spi_driver adis16475_driver = {
.of_match_table = adis16475_of_match,
},
.probe = adis16475_probe,
.id_table = adis16475_ids,
};
module_spi_driver(adis16475_driver);

View File

@ -750,13 +750,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
return ret;
case IIO_ACCEL:
mutex_lock(&st->lock);
ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
return ret;
default:
return -EINVAL;

View File

@ -14,11 +14,8 @@
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
CHANNEL_SCAN_INDEX_INTENSITY,
CHANNEL_SCAN_INDEX_ILLUM,
CHANNEL_SCAN_INDEX_COLOR_TEMP,
CHANNEL_SCAN_INDEX_CHROMATICITY_X,
CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
CHANNEL_SCAN_INDEX_INTENSITY = 0,
CHANNEL_SCAN_INDEX_ILLUM = 1,
CHANNEL_SCAN_INDEX_MAX
};
@ -68,40 +65,6 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
},
{
.type = IIO_COLORTEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS) |
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_COLOR_TEMP,
},
{
.type = IIO_CHROMATICITY,
.modified = 1,
.channel2 = IIO_MOD_X,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS) |
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X,
},
{
.type = IIO_CHROMATICITY,
.modified = 1,
.channel2 = IIO_MOD_Y,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS) |
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
},
IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
@ -140,21 +103,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_ILLUM;
break;
case CHANNEL_SCAN_INDEX_COLOR_TEMP:
report_id = als_state->als[chan->scan_index].report_id;
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE;
break;
case CHANNEL_SCAN_INDEX_CHROMATICITY_X:
report_id = als_state->als[chan->scan_index].report_id;
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X;
break;
case CHANNEL_SCAN_INDEX_CHROMATICITY_Y:
report_id = als_state->als[chan->scan_index].report_id;
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y;
break;
default:
report_id = -1;
break;
@ -275,18 +223,6 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE:
als_state->scan.illum[CHANNEL_SCAN_INDEX_COLOR_TEMP] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X:
als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_X] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y:
als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_Y] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
@ -322,38 +258,6 @@ static int als_parse_report(struct platform_device *pdev,
st->als[i].report_id);
}
ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
usage_id,
HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE,
&st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP]);
if (ret < 0)
return ret;
als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_COLOR_TEMP,
st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].size);
dev_dbg(&pdev->dev, "als %x:%x\n",
st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].index,
st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].report_id);
for (i = 0; i < 2; i++) {
int next_scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X + i;
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT, usage_id,
HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X + i,
&st->als[next_scan_index]);
if (ret < 0)
return ret;
als_adjust_channel_bit_mask(channels,
CHANNEL_SCAN_INDEX_CHROMATICITY_X + i,
st->als[next_scan_index].size);
dev_dbg(&pdev->dev, "als %x:%x\n",
st->als[next_scan_index].index,
st->als[next_scan_index].report_id);
}
st->scale_precision = hid_sensor_format_scale(usage_id,
&st->als[CHANNEL_SCAN_INDEX_INTENSITY],
&st->scale_pre_decml, &st->scale_post_decml);

View File

@ -356,7 +356,7 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
*val = -266314;
*val = -16005;
return IIO_VAL_INT;
default:
return -EINVAL;

View File

@ -286,6 +286,7 @@ static const struct xpad_device {
{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },

View File

@ -765,6 +765,44 @@ static void atkbd_deactivate(struct atkbd *atkbd)
ps2dev->serio->phys);
}
#ifdef CONFIG_X86
static bool atkbd_is_portable_device(void)
{
static const char * const chassis_types[] = {
"8", /* Portable */
"9", /* Laptop */
"10", /* Notebook */
"14", /* Sub-Notebook */
"31", /* Convertible */
"32", /* Detachable */
};
int i;
for (i = 0; i < ARRAY_SIZE(chassis_types); i++)
if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i]))
return true;
return false;
}
/*
* On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops
* the controller is always in translated mode. In this mode mice/touchpads will
* not work. So in this case simply assume a keyboard is connected to avoid
* confusing some laptop keyboards.
*
* Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is
* ok in translated mode, only atkbd_select_set() checks atkbd->id and in
* translated mode that is a no-op.
*/
static bool atkbd_skip_getid(struct atkbd *atkbd)
{
return atkbd->translated && atkbd_is_portable_device();
}
#else
static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; }
#endif
/*
* atkbd_probe() probes for an AT keyboard on a serio port.
*/
@ -794,12 +832,12 @@ static int atkbd_probe(struct atkbd *atkbd)
*/
param[0] = param[1] = 0xa5; /* initialize with invalid values */
if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
/*
* If the get ID command failed, we check if we can at least set the LEDs on
* the keyboard. This should work on every keyboard out there. It also turns
* the LEDs off, which we want anyway.
* If the get ID command was skipped or failed, we check if we can at least set
* the LEDs on the keyboard. This should work on every keyboard out there.
* It also turns the LEDs off, which we want anyway.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))

View File

@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
keys->input->keycodesize * keys->input->keycodemax,
GFP_KERNEL);
if (!keys->codes)
return -ENOMEM;
keys->input->keycode = keys->codes;
__set_bit(EV_KEY, keys->input->evbit);

View File

@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev,
info->name = "power";
info->event_code = KEY_POWER;
info->wakeup = true;
} else if (upage == 0x01 && usage == 0xc6) {
info->name = "airplane mode switch";
info->event_type = EV_SW;
info->event_code = SW_RFKILL_ALL;
info->active_low = false;
} else if (upage == 0x01 && usage == 0xca) {
info->name = "rotation lock switch";
info->event_type = EV_SW;

View File

@ -125,16 +125,15 @@ static int __init amimouse_probe(struct platform_device *pdev)
return 0;
}
static int __exit amimouse_remove(struct platform_device *pdev)
static void __exit amimouse_remove(struct platform_device *pdev)
{
struct input_dev *dev = platform_get_drvdata(pdev);
input_unregister_device(dev);
return 0;
}
static struct platform_driver amimouse_driver = {
.remove = __exit_p(amimouse_remove),
.remove_new = __exit_p(amimouse_remove),
.driver = {
.name = "amiga-mouse",
},

View File

@ -183,6 +183,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN009b", /* T580 */
"LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
"LEN040f", /* P1 Gen 3 */
"LEN0411", /* L14 Gen 1 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */

View File

@ -360,6 +360,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
.driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
/* Acer TravelMate P459-G2-M */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Amoi M636/A737 */
.matches = {

View File

@ -395,6 +395,9 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
}
mutex_unlock(&icc_lock);
if (!node)
return ERR_PTR(-EINVAL);
if (IS_ERR(node))
return ERR_CAST(node);

View File

@ -307,7 +307,7 @@ static u64 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node
if (qn->ib_coeff) {
agg_peak_rate = qn->max_peak[ctx] * 100;
agg_peak_rate = div_u64(qn->max_peak[ctx], qn->ib_coeff);
agg_peak_rate = div_u64(agg_peak_rate, qn->ib_coeff);
} else {
agg_peak_rate = qn->max_peak[ctx];
}

View File

@ -1995,6 +1995,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);

View File

@ -660,6 +660,7 @@ config DM_ZONED
config DM_AUDIT
bool "DM audit events"
depends on BLK_DEV_DM
depends on AUDIT
help
Generate audit events for device-mapper.

View File

@ -1755,11 +1755,12 @@ static void integrity_metadata(struct work_struct *w)
sectors_to_process = dio->range.n_sectors;
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
struct bio_vec bv_copy = bv;
unsigned int pos;
char *mem, *checksums_ptr;
again:
mem = bvec_kmap_local(&bv);
mem = bvec_kmap_local(&bv_copy);
pos = 0;
checksums_ptr = checksums;
do {
@ -1768,7 +1769,7 @@ static void integrity_metadata(struct work_struct *w)
sectors_to_process -= ic->sectors_per_block;
pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block;
} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
kunmap_local(mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
@ -1793,9 +1794,9 @@ static void integrity_metadata(struct work_struct *w)
if (!sectors_to_process)
break;
if (unlikely(pos < bv.bv_len)) {
bv.bv_offset += pos;
bv.bv_len -= pos;
if (unlikely(pos < bv_copy.bv_len)) {
bv_copy.bv_offset += pos;
bv_copy.bv_len -= pos;
goto again;
}
}

View File

@ -3317,6 +3317,9 @@ static void raid_dtr(struct dm_target *ti)
mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
mddev_unlock(&rs->md);
if (work_pending(&rs->md.event_work))
flush_work(&rs->md.event_work);
raid_set_free(rs);
}

View File

@ -82,6 +82,14 @@ static struct module *md_cluster_mod;
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
/*
* This workqueue is used for sync_work to register new sync_thread, and for
* del_work to remove rdev, and for event_work that is only set by dm-raid.
*
* Noted that sync_work will grab reconfig_mutex, hence never flush this
* workqueue whith reconfig_mutex grabbed.
*/
static struct workqueue_struct *md_misc_wq;
struct workqueue_struct *md_bitmap_wq;
@ -6330,9 +6338,6 @@ static void __md_stop(struct mddev *mddev)
struct md_personality *pers = mddev->pers;
md_bitmap_destroy(mddev);
mddev_detach(mddev);
/* Ensure ->event_work is done */
if (mddev->event_work.func)
flush_workqueue(md_misc_wq);
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);

View File

@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
offset, adapter->ring_size);
err = -1;
goto failed;
goto free_buffer;
}
return 0;
free_buffer:
kfree(tx_ring->tx_buffer);
tx_ring->tx_buffer = NULL;
failed:
if (adapter->ring_vir_addr != NULL) {
dma_free_coherent(&pdev->dev, adapter->ring_size,

View File

@ -59,7 +59,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
for (i = 0; i < num_frags ; i++) {
skb_frag_t *frag = &sinfo->frags[i];
struct bnxt_sw_tx_bd *frag_tx_buf;
struct pci_dev *pdev = bp->pdev;
dma_addr_t frag_mapping;
int frag_len;
@ -73,16 +72,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag);
frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
frag_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
return NULL;
dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
flags = frag_len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
skb_frag_off(frag);
txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
len = frag_len;

View File

@ -207,7 +207,7 @@
#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31

View File

@ -37,11 +37,11 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(1)
#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2)
#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK(0)
#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0)
#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)

View File

@ -1850,14 +1850,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
linkmode_zero(ks->link_modes.supported);
linkmode_zero(ks->link_modes.advertising);
for (i = 0; i < BITS_PER_TYPE(u64); i++) {
for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
if (phy_types_low & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
req_speeds, advert_phy_type_lo,
i);
}
for (i = 0; i < BITS_PER_TYPE(u64); i++) {
for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
if (phy_types_high & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
req_speeds, advert_phy_type_hi,

View File

@ -1981,6 +1981,8 @@ int ice_init_lag(struct ice_pf *pf)
int n, err;
ice_lag_init_feature_support_flag(pf);
if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
return 0;
pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!pf->lag)

View File

@ -2371,6 +2371,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
} else {
max_txqs[i] = vsi->alloc_txq;
}
if (vsi->type == ICE_VSI_PF)
max_txqs[i] += vsi->num_xdp_txq;
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
@ -2620,10 +2623,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
if (vsi->agg_node) {
vsi->agg_node->valid = false;
vsi->agg_node->agg_id = 0;
}
}
/**

View File

@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct otx2_nic *pfvf = netdev_priv(dev);
u8 old_pfc_en;
int err;
/* Save PFC configuration to interface */
old_pfc_en = pfvf->pfc_en;
pfvf->pfc_en = pfc->pfc_en;
if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
* supported by the tx queue configuration
*/
err = otx2_check_pfc_config(pfvf);
if (err)
if (err) {
pfvf->pfc_en = old_pfc_en;
return err;
}
process_pfc:
err = otx2_config_priority_flow_ctrl(pfvf);
if (err)
if (err) {
pfvf->pfc_en = old_pfc_en;
return err;
}
/* Request Per channel Bpids */
if (pfc->pfc_en)
@ -425,6 +430,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
err = otx2_pfc_txschq_update(pfvf);
if (err) {
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, false);
otx2_pfc_txschq_stop(pfvf);
pfvf->pfc_en = old_pfc_en;
otx2_config_priority_flow_ctrl(pfvf);
dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
return err;
}

View File

@ -291,6 +291,9 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
for (i = 0; i < q->n_desc; i++) {
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
if (!entry->buf)
continue;
dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
DMA_TO_DEVICE);
skb_free_frag(entry->buf);

View File

@ -156,15 +156,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token;
}
static int cmd_alloc_index(struct mlx5_cmd *cmd)
static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
if (ret < cmd->vars.max_reg_cmds)
if (ret < cmd->vars.max_reg_cmds) {
clear_bit(ret, &cmd->vars.bitmask);
ent->idx = ret;
cmd->ent_arr[ent->idx] = ent;
}
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
@ -979,7 +982,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
down(sem);
if (!ent->page_queue) {
alloc_ret = cmd_alloc_index(cmd);
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
@ -994,15 +997,14 @@ static void cmd_work_handler(struct work_struct *work)
up(sem);
return;
}
ent->idx = alloc_ret;
} else {
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
cmd->ent_arr[ent->idx] = ent;
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));

View File

@ -718,7 +718,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
while (block_timestamp > tracer->last_timestamp) {
/* Check block override if it's not the first block */
if (!tracer->last_timestamp) {
if (tracer->last_timestamp) {
u64 *ts_event;
/* To avoid block override be the HW in case of buffer
* wraparound, the time stamp of the previous block

View File

@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
ft->g = NULL;
kvfree(in);
return -ENOMEM;
}

Some files were not shown because too many files have changed in this diff Show More