Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts were easy to resolve using immediate context mostly,
except the cls_u32.c one where I simply too the entire HEAD
chunk.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-10-12 21:38:46 -07:00
commit d864991b22
161 changed files with 1420 additions and 686 deletions

View File

@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
API for programming an FPGA API for programming an FPGA
--------------------------- ---------------------------
FPGA Manager flags
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:doc: FPGA Manager flags
.. kernel-doc:: include/linux/fpga/fpga-mgr.h .. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_image_info :functions: fpga_image_info

View File

@ -9680,7 +9680,8 @@ MIPS/LOONGSON2 ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com> M: Jiaxun Yang <jiaxun.yang@flygoat.com>
L: linux-mips@linux-mips.org L: linux-mips@linux-mips.org
S: Maintained S: Maintained
F: arch/mips/loongson64/*{2e/2f}* F: arch/mips/loongson64/fuloong-2e/
F: arch/mips/loongson64/lemote-2f/
F: arch/mips/include/asm/mach-loongson64/ F: arch/mips/include/asm/mach-loongson64/
F: drivers/*/*loongson2* F: drivers/*/*loongson2*
F: drivers/*/*/*loongson2* F: drivers/*/*/*loongson2*
@ -9887,7 +9888,7 @@ M: Peter Rosin <peda@axentia.se>
S: Maintained S: Maintained
F: Documentation/ABI/testing/sysfs-class-mux* F: Documentation/ABI/testing/sysfs-class-mux*
F: Documentation/devicetree/bindings/mux/ F: Documentation/devicetree/bindings/mux/
F: include/linux/dt-bindings/mux/ F: include/dt-bindings/mux/
F: include/linux/mux/ F: include/linux/mux/
F: drivers/mux/ F: drivers/mux/

View File

@ -2,7 +2,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Merciless Moray NAME = Merciless Moray
# *DOCUMENTATION* # *DOCUMENTATION*
@ -483,13 +483,15 @@ endif
ifeq ($(cc-name),clang) ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),) ifneq ($(CROSS_COMPILE),)
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif endif
ifneq ($(GCC_TOOLCHAIN),) ifneq ($(GCC_TOOLCHAIN),)
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif endif
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
endif endif

View File

@ -149,7 +149,7 @@ config ARC_CPU_770
Support for ARC770 core introduced with Rel 4.10 (Summer 2011) Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
This core has a bunch of cool new features: This core has a bunch of cool new features:
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4) -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
Shared Address Spaces (for sharing TLB entires in MMU) Shared Address Spaces (for sharing TLB entries in MMU)
-Caches: New Prog Model, Region Flush -Caches: New Prog Model, Region Flush
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr

View File

@ -6,33 +6,11 @@
# published by the Free Software Foundation. # published by the Free Software Foundation.
# #
ifeq ($(CROSS_COMPILE),)
ifndef CONFIG_CPU_BIG_ENDIAN
CROSS_COMPILE := arc-linux-
else
CROSS_COMPILE := arceb-linux-
endif
endif
KBUILD_DEFCONFIG := nsim_700_defconfig KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
ifdef CONFIG_ISA_ARCOMPACT
ifeq ($(is_700), 0)
$(error Toolchain not configured for ARCompact builds)
endif
endif
ifdef CONFIG_ISA_ARCV2
ifeq ($(is_700), 1)
$(error Toolchain not configured for ARCv2 builds)
endif
endif
ifdef CONFIG_ARC_CURR_IN_REG ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file # For a global register defintion, make sure it gets passed to every file
@ -79,7 +57,7 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel # Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode

View File

@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
task_thread_info(current)->thr_ptr; task_thread_info(current)->thr_ptr;
} }
/*
* setup usermode thread pointer #1:
* when child is picked by scheduler, __switch_to() uses @c_callee to
* populate usermode callee regs: this works (despite being in a kernel
* function) since special return path for child @ret_from_fork()
* ensures those regs are not clobbered all the way to RTIE to usermode
*/
c_callee->r25 = task_thread_info(p)->thr_ptr;
#ifdef CONFIG_ARC_CURR_IN_REG
/*
* setup usermode thread pointer #2:
* however for this special use of r25 in kernel, __switch_to() sets
* r25 for kernel needs and only in the final return path is usermode
* r25 setup, from pt_regs->user_r25. So set that up as well
*/
c_regs->user_r25 = c_callee->r25;
#endif
return 0; return 0;
} }

View File

@ -123,6 +123,17 @@ sound {
}; };
}; };
&cpu0 {
/* CPU rated to 1GHz, not 1.2GHz as per the default settings */
operating-points = <
/* kHz uV */
166666 850000
400000 900000
800000 1050000
1000000 1200000
>;
};
&esdhc1 { &esdhc1 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>; pinctrl-0 = <&pinctrl_esdhc1>;

View File

@ -49,6 +49,8 @@
#define ARM_DISCARD \ #define ARM_DISCARD \
*(.ARM.exidx.exit.text) \ *(.ARM.exidx.exit.text) \
*(.ARM.extab.exit.text) \ *(.ARM.extab.exit.text) \
*(.ARM.exidx.text.exit) \
*(.ARM.extab.text.exit) \
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \ ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \ ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
ARM_EXIT_DISCARD(EXIT_TEXT) \ ARM_EXIT_DISCARD(EXIT_TEXT) \

View File

@ -13,6 +13,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/sizes.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/cachectl.h> #include <asm/cachectl.h>
@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
#endif #endif
/* #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
* One page above the stack is used for branch delay slot "emulation".
* See dsemul.c for details. extern unsigned long mips_stack_top(void);
*/ #define STACK_TOP mips_stack_top()
#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
/* /*
* This decides where the kernel will search for a free chunk of vm * This decides where the kernel will search for a free chunk of vm

View File

@ -32,6 +32,7 @@
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/abi.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/cpu.h> #include <asm/cpu.h>
@ -39,6 +40,7 @@
#include <asm/dsp.h> #include <asm/dsp.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mips-cps.h>
#include <asm/msa.h> #include <asm/msa.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
@ -645,6 +647,29 @@ unsigned long get_wchan(struct task_struct *task)
return pc; return pc;
} }
unsigned long mips_stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
/* One page for branch delay slot "emulation" */
top -= PAGE_SIZE;
/* Space for the VDSO, data page & GIC user page */
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
top -= PAGE_SIZE;
top -= mips_gic_present() ? PAGE_SIZE : 0;
/* Space for cache colour alignment */
if (cpu_has_dc_aliases)
top -= shm_align_mask + 1;
/* Space to randomize the VDSO base */
if (current->flags & PF_RANDOMIZE)
top -= VDSO_RANDOMIZE_SIZE;
return top;
}
/* /*
* Don't forget that the stack pointer must be aligned on a 8 bytes * Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI. * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.

View File

@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
struct memblock_region *reg; struct memblock_region *reg;
extern void plat_mem_setup(void); extern void plat_mem_setup(void);
/*
* Initialize boot_command_line to an innocuous but non-empty string in
* order to prevent early_init_dt_scan_chosen() from copying
* CONFIG_CMDLINE into it without our knowledge. We handle
* CONFIG_CMDLINE ourselves below & don't want to duplicate its
* content because repeating arguments can be problematic.
*/
strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
/* call board setup routine */
plat_mem_setup();
/*
* Make sure all kernel memory is in the maps. The "UP" and
* "DOWN" are opposite for initdata since if it crosses over
* into another memory section you don't want that to be
* freed when the initdata is freed.
*/
arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
BOOT_MEM_RAM);
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
BOOT_MEM_INIT_RAM);
pr_info("Determined physical RAM map:\n");
print_memory_map();
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else #else
@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
} }
#endif #endif
#endif #endif
/* call board setup routine */
plat_mem_setup();
/*
* Make sure all kernel memory is in the maps. The "UP" and
* "DOWN" are opposite for initdata since if it crosses over
* into another memory section you don't want that to be
* freed when the initdata is freed.
*/
arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
BOOT_MEM_RAM);
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
BOOT_MEM_INIT_RAM);
pr_info("Determined physical RAM map:\n");
print_memory_map();
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line; *cmdline_p = command_line;

View File

@ -15,6 +15,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/timekeeper_internal.h> #include <linux/timekeeper_internal.h>
@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
} }
} }
static unsigned long vdso_base(void)
{
unsigned long base;
/* Skip the delay slot emulation page */
base = STACK_TOP + PAGE_SIZE;
if (current->flags & PF_RANDOMIZE) {
base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
base = PAGE_ALIGN(base);
}
return base;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{ {
struct mips_vdso_image *image = current->thread.abi->vdso; struct mips_vdso_image *image = current->thread.abi->vdso;
@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (cpu_has_dc_aliases) if (cpu_has_dc_aliases)
size += shm_align_mask + 1; size += shm_align_mask + 1;
base = get_unmapped_area(NULL, 0, size, 0, 0); base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(base)) { if (IS_ERR_VALUE(base)) {
ret = base; ret = base;
goto out; goto out;

View File

@ -280,9 +280,11 @@
* unset_bytes = end_addr - current_addr + 1 * unset_bytes = end_addr - current_addr + 1
* a2 = t1 - a0 + 1 * a2 = t1 - a0 + 1
*/ */
.set reorder
PTR_SUBU a2, t1, a0 PTR_SUBU a2, t1, a0
PTR_ADDIU a2, 1
jr ra jr ra
PTR_ADDIU a2, 1 .set noreorder
.endm .endm

View File

@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
/*
* Make sure the NIP points at userspace, not kernel text/data or
* elsewhere.
*/
if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
current->comm, current->pid);
return;
}
pr_info("%s[%d]: code: ", current->comm, current->pid); pr_info("%s[%d]: code: ", current->comm, current->pid);
for (i = 0; i < instructions_to_print; i++) { for (i = 0; i < instructions_to_print; i++) {

View File

@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
{ {
int err; int err;
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
return 0;
}
__put_user_size(instr, patch_addr, 4, err); __put_user_size(instr, patch_addr, 4, err);
if (err) if (err)
return err; return err;
@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
return 0; return 0;
} }
int patch_instruction(unsigned int *addr, unsigned int instr) static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{ {
int err; int err;
unsigned int *patch_addr = NULL; unsigned int *patch_addr = NULL;
@ -188,12 +182,22 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
} }
#else /* !CONFIG_STRICT_KERNEL_RWX */ #else /* !CONFIG_STRICT_KERNEL_RWX */
int patch_instruction(unsigned int *addr, unsigned int instr) static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{ {
return raw_patch_instruction(addr, instr); return raw_patch_instruction(addr, instr);
} }
#endif /* CONFIG_STRICT_KERNEL_RWX */ #endif /* CONFIG_STRICT_KERNEL_RWX */
int patch_instruction(unsigned int *addr, unsigned int instr)
{
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(addr, 4)) {
pr_debug("Skipping init section patching addr: 0x%px\n", addr);
return 0;
}
return do_patch_instruction(addr, instr);
}
NOKPROBE_SYMBOL(patch_instruction); NOKPROBE_SYMBOL(patch_instruction);
int patch_branch(unsigned int *addr, unsigned long target, int flags) int patch_branch(unsigned int *addr, unsigned long target, int flags)

View File

@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
* Need to ensure that NODE_DATA is initialized for a node from * Need to ensure that NODE_DATA is initialized for a node from
* available memory (see memblock_alloc_try_nid). If unable to * available memory (see memblock_alloc_try_nid). If unable to
* init the node, then default to nearest node that has memory * init the node, then default to nearest node that has memory
* installed. * installed. Skip onlining a node if the subsystems are not
* yet initialized.
*/ */
if (try_online_node(new_nid)) if (!topology_inited || try_online_node(new_nid))
new_nid = first_online_node; new_nid = first_online_node;
#else #else
/* /*

View File

@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info); void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void); void sclp_early_detect(void);
void sclp_early_printk(const char *s); void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len); void sclp_early_printk_force(const char *s);
void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
int _sclp_get_core_info(struct sclp_core_info *info); int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core); int sclp_core_configure(u8 core);

View File

@ -10,7 +10,7 @@
static void sclp_early_write(struct console *con, const char *s, unsigned int len) static void sclp_early_write(struct console *con, const char *s, unsigned int len)
{ {
__sclp_early_printk(s, len); __sclp_early_printk(s, len, 0);
} }
static struct console sclp_early_console = { static struct console sclp_early_console = {

View File

@ -198,12 +198,10 @@ pgm_check_entry:
/* Suspend CPU not available -> panic */ /* Suspend CPU not available -> panic */
larl %r15,init_thread_union larl %r15,init_thread_union
ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
aghi %r15,-STACK_FRAME_OVERHEAD
larl %r2,.Lpanic_string larl %r2,.Lpanic_string
lghi %r1,0 brasl %r14,sclp_early_printk_force
sam31
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
brasl %r14,sclp_early_printk
larl %r3,.Ldisabled_wait_31 larl %r3,.Ldisabled_wait_31
lpsw 0(%r3) lpsw 0(%r3)
4: 4:

View File

@ -115,8 +115,8 @@ static int auxio_probe(struct platform_device *dev)
auxio_devtype = AUXIO_TYPE_SBUS; auxio_devtype = AUXIO_TYPE_SBUS;
size = 1; size = 1;
} else { } else {
printk("auxio: Unknown parent bus type [%s]\n", printk("auxio: Unknown parent bus type [%pOFn]\n",
dp->parent->name); dp->parent);
return -ENODEV; return -ENODEV;
} }
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");

View File

@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr; linux_regs->pc = addr;
linux_regs->npc = addr + 4; linux_regs->npc = addr + 4;
} }
/* fallthru */ /* fall through */
case 'D': case 'D':
case 'k': case 'k':

View File

@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr; linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4; linux_regs->tnpc = addr + 4;
} }
/* fallthru */ /* fall through */
case 'D': case 'D':
case 'k': case 'k':

View File

@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op)
power_reg = of_ioremap(res, 0, 0x4, "power"); power_reg = of_ioremap(res, 0, 0x4, "power");
printk(KERN_INFO "%s: Control reg at %llx\n", printk(KERN_INFO "%pOFn: Control reg at %llx\n",
op->dev.of_node->name, res->start); op->dev.of_node, res->start);
if (has_button_interrupt(irq, op->dev.of_node)) { if (has_button_interrupt(irq, op->dev.of_node)) {
if (request_irq(irq, if (request_irq(irq,

View File

@ -68,8 +68,8 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
return; return;
regs = rprop->value; regs = rprop->value;
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
regs->which_io, regs->phys_addr); regs->which_io, regs->phys_addr);
} }
@ -84,8 +84,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
return; return;
regs = prop->value; regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
regs->which_io, regs->which_io,
regs->phys_addr); regs->phys_addr);
} }
@ -104,13 +104,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value; regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff; devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) { if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
devfn >> 3, devfn >> 3,
devfn & 0x07); devfn & 0x07);
} else { } else {
sprintf(tmp_buf, "%s@%x", sprintf(tmp_buf, "%pOFn@%x",
dp->name, dp,
devfn >> 3); devfn >> 3);
} }
} }
@ -127,8 +127,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value; regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
regs->which_io, regs->phys_addr); regs->which_io, regs->phys_addr);
} }
@ -167,8 +167,8 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
return; return;
device = prop->value; device = prop->value;
sprintf(tmp_buf, "%s:%d:%d@%x,%x", sprintf(tmp_buf, "%pOFn:%d:%d@%x,%x",
dp->name, *vendor, *device, dp, *vendor, *device,
*intr, reg0); *intr, reg0);
} }
@ -201,7 +201,7 @@ char * __init build_path_component(struct device_node *dp)
tmp_buf[0] = '\0'; tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf); __build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0') if (tmp_buf[0] == '\0')
strcpy(tmp_buf, dp->name); snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
n = prom_early_alloc(strlen(tmp_buf) + 1); n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf); strcpy(n, tmp_buf);

View File

@ -82,8 +82,8 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
regs = rprop->value; regs = rprop->value;
if (!of_node_is_root(dp->parent)) { if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
(unsigned int) (regs->phys_addr >> 32UL), (unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL)); (unsigned int) (regs->phys_addr & 0xffffffffUL));
return; return;
@ -97,17 +97,17 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
const char *prefix = (type == 0) ? "m" : "i"; const char *prefix = (type == 0) ? "m" : "i";
if (low_bits) if (low_bits)
sprintf(tmp_buf, "%s@%s%x,%x", sprintf(tmp_buf, "%pOFn@%s%x,%x",
dp->name, prefix, dp, prefix,
high_bits, low_bits); high_bits, low_bits);
else else
sprintf(tmp_buf, "%s@%s%x", sprintf(tmp_buf, "%pOFn@%s%x",
dp->name, dp,
prefix, prefix,
high_bits); high_bits);
} else if (type == 12) { } else if (type == 12) {
sprintf(tmp_buf, "%s@%x", sprintf(tmp_buf, "%pOFn@%x",
dp->name, high_bits); dp, high_bits);
} }
} }
@ -122,8 +122,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value; regs = prop->value;
if (!of_node_is_root(dp->parent)) { if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
(unsigned int) (regs->phys_addr >> 32UL), (unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL)); (unsigned int) (regs->phys_addr & 0xffffffffUL));
return; return;
@ -138,8 +138,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
if (tlb_type >= cheetah) if (tlb_type >= cheetah)
mask = 0x7fffff; mask = 0x7fffff;
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
*(u32 *)prop->value, *(u32 *)prop->value,
(unsigned int) (regs->phys_addr & mask)); (unsigned int) (regs->phys_addr & mask));
} }
@ -156,8 +156,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
return; return;
regs = prop->value; regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
regs->which_io, regs->which_io,
regs->phys_addr); regs->phys_addr);
} }
@ -176,13 +176,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value; regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff; devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) { if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
devfn >> 3, devfn >> 3,
devfn & 0x07); devfn & 0x07);
} else { } else {
sprintf(tmp_buf, "%s@%x", sprintf(tmp_buf, "%pOFn@%x",
dp->name, dp,
devfn >> 3); devfn >> 3);
} }
} }
@ -203,8 +203,8 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
if (!prop) if (!prop)
return; return;
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
*(u32 *) prop->value, *(u32 *) prop->value,
(unsigned int) (regs->phys_addr & 0xffffffffUL)); (unsigned int) (regs->phys_addr & 0xffffffffUL));
} }
@ -221,7 +221,7 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value; regs = prop->value;
sprintf(tmp_buf, "%s@%x", dp->name, *regs); sprintf(tmp_buf, "%pOFn@%x", dp, *regs);
} }
/* "name@addrhi,addrlo" */ /* "name@addrhi,addrlo" */
@ -236,8 +236,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value; regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, dp,
(unsigned int) (regs->phys_addr >> 32UL), (unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL)); (unsigned int) (regs->phys_addr & 0xffffffffUL));
} }
@ -257,8 +257,8 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
/* This actually isn't right... should look at the #address-cells /* This actually isn't right... should look at the #address-cells
* property of the i2c bus node etc. etc. * property of the i2c bus node etc. etc.
*/ */
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, regs[0], regs[1]); dp, regs[0], regs[1]);
} }
/* "name@reg0[,reg1]" */ /* "name@reg0[,reg1]" */
@ -274,11 +274,11 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value; regs = prop->value;
if (prop->length == sizeof(u32) || regs[1] == 1) { if (prop->length == sizeof(u32) || regs[1] == 1) {
sprintf(tmp_buf, "%s@%x", sprintf(tmp_buf, "%pOFn@%x",
dp->name, regs[0]); dp, regs[0]);
} else { } else {
sprintf(tmp_buf, "%s@%x,%x", sprintf(tmp_buf, "%pOFn@%x,%x",
dp->name, regs[0], regs[1]); dp, regs[0], regs[1]);
} }
} }
@ -295,11 +295,11 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf
regs = prop->value; regs = prop->value;
if (regs[2] || regs[3]) { if (regs[2] || regs[3]) {
sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", sprintf(tmp_buf, "%pOFn@%08x%08x,%04x%08x",
dp->name, regs[0], regs[1], regs[2], regs[3]); dp, regs[0], regs[1], regs[2], regs[3]);
} else { } else {
sprintf(tmp_buf, "%s@%08x%08x", sprintf(tmp_buf, "%pOFn@%08x%08x",
dp->name, regs[0], regs[1]); dp, regs[0], regs[1]);
} }
} }
@ -361,7 +361,7 @@ char * __init build_path_component(struct device_node *dp)
tmp_buf[0] = '\0'; tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf); __build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0') if (tmp_buf[0] == '\0')
strcpy(tmp_buf, dp->name); snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
n = prom_early_alloc(strlen(tmp_buf) + 1); n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf); strcpy(n, tmp_buf);

View File

@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
struct vio_dring_register pkt; struct vio_dring_register pkt;
char all[sizeof(struct vio_dring_register) + char all[sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) * (sizeof(struct ldc_trans_cookie) *
dr->ncookies)]; VIO_MAX_RING_COOKIES)];
} u; } u;
size_t bytes = sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
dr->ncookies);
int i; int i;
memset(&u, 0, sizeof(u)); if (WARN_ON(bytes > sizeof(u)))
return -EINVAL;
memset(&u, 0, bytes);
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
u.pkt.dring_ident = 0; u.pkt.dring_ident = 0;
u.pkt.num_descr = dr->num_entries; u.pkt.num_descr = dr->num_entries;
@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
(unsigned long long) u.pkt.cookies[i].cookie_size); (unsigned long long) u.pkt.cookies[i].cookie_size);
} }
return send_ctrl(vio, &u.pkt.tag, sizeof(u)); return send_ctrl(vio, &u.pkt.tag, bytes);
} }
static int send_rdx(struct vio_driver_state *vio) static int send_rdx(struct vio_driver_state *vio)

View File

@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs)
targets += $(vdso_img_cfiles) targets += $(vdso_img_cfiles)
targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
export CPPFLAGS_vdso.lds += -P -C CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-Wl,--no-undefined \ -Wl,--no-undefined \
-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \ -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
$(DISABLE_LTO) $(DISABLE_LTO)
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso) $(call if_changed,vdso)
HOST_EXTRACFLAGS += -I$(srctree)/tools/include HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += vdso2c hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@ quiet_cmd_vdso2c = VDSO2C $@
define cmd_vdso2c cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
$(obj)/vdso2c $< $(<:%.dbg=%) $@
endef
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c) $(call if_changed,vdso2c)

View File

@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
int rdtgroup_schemata_show(struct kernfs_open_file *of, int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v); struct seq_file *s, void *v);
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
u32 _cbm, int closid, bool exclusive); unsigned long cbm, int closid, bool exclusive);
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
u32 cbm); unsigned long cbm);
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
int rdtgroup_tasks_assigned(struct rdtgroup *r); int rdtgroup_tasks_assigned(struct rdtgroup *r);
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm); bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
int rdt_pseudo_lock_init(void); int rdt_pseudo_lock_init(void);
void rdt_pseudo_lock_release(void); void rdt_pseudo_lock_release(void);

View File

@ -797,25 +797,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
/** /**
* rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
* @d: RDT domain * @d: RDT domain
* @_cbm: CBM to test * @cbm: CBM to test
* *
* @d represents a cache instance and @_cbm a capacity bitmask that is * @d represents a cache instance and @cbm a capacity bitmask that is
* considered for it. Determine if @_cbm overlaps with any existing * considered for it. Determine if @cbm overlaps with any existing
* pseudo-locked region on @d. * pseudo-locked region on @d.
* *
* Return: true if @_cbm overlaps with pseudo-locked region on @d, false * @cbm is unsigned long, even if only 32 bits are used, to make the
* bitmap functions work correctly.
*
* Return: true if @cbm overlaps with pseudo-locked region on @d, false
* otherwise. * otherwise.
*/ */
bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm) bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
{ {
unsigned long *cbm = (unsigned long *)&_cbm;
unsigned long *cbm_b;
unsigned int cbm_len; unsigned int cbm_len;
unsigned long cbm_b;
if (d->plr) { if (d->plr) {
cbm_len = d->plr->r->cache.cbm_len; cbm_len = d->plr->r->cache.cbm_len;
cbm_b = (unsigned long *)&d->plr->cbm; cbm_b = d->plr->cbm;
if (bitmap_intersects(cbm, cbm_b, cbm_len)) if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
return true; return true;
} }
return false; return false;

View File

@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
* is false then overlaps with any resource group or hardware entities * is false then overlaps with any resource group or hardware entities
* will be considered. * will be considered.
* *
* @cbm is unsigned long, even if only 32 bits are used, to make the
* bitmap functions work correctly.
*
* Return: false if CBM does not overlap, true if it does. * Return: false if CBM does not overlap, true if it does.
*/ */
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
u32 _cbm, int closid, bool exclusive) unsigned long cbm, int closid, bool exclusive)
{ {
unsigned long *cbm = (unsigned long *)&_cbm;
unsigned long *ctrl_b;
enum rdtgrp_mode mode; enum rdtgrp_mode mode;
unsigned long ctrl_b;
u32 *ctrl; u32 *ctrl;
int i; int i;
/* Check for any overlap with regions used by hardware directly */ /* Check for any overlap with regions used by hardware directly */
if (!exclusive) { if (!exclusive) {
if (bitmap_intersects(cbm, ctrl_b = r->cache.shareable_bits;
(unsigned long *)&r->cache.shareable_bits, if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
r->cache.cbm_len))
return true; return true;
} }
/* Check for overlap with other resource groups */ /* Check for overlap with other resource groups */
ctrl = d->ctrl_val; ctrl = d->ctrl_val;
for (i = 0; i < closids_supported(); i++, ctrl++) { for (i = 0; i < closids_supported(); i++, ctrl++) {
ctrl_b = (unsigned long *)ctrl; ctrl_b = *ctrl;
mode = rdtgroup_mode_by_closid(i); mode = rdtgroup_mode_by_closid(i);
if (closid_allocated(i) && i != closid && if (closid_allocated(i) && i != closid &&
mode != RDT_MODE_PSEUDO_LOCKSETUP) { mode != RDT_MODE_PSEUDO_LOCKSETUP) {
if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) { if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
if (exclusive) { if (exclusive) {
if (mode == RDT_MODE_EXCLUSIVE) if (mode == RDT_MODE_EXCLUSIVE)
return true; return true;
@ -1138,15 +1139,18 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
* computed by first dividing the total cache size by the CBM length to * computed by first dividing the total cache size by the CBM length to
* determine how many bytes each bit in the bitmask represents. The result * determine how many bytes each bit in the bitmask represents. The result
* is multiplied with the number of bits set in the bitmask. * is multiplied with the number of bits set in the bitmask.
*
* @cbm is unsigned long, even if only 32 bits are used to make the
* bitmap functions work correctly.
*/ */
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
struct rdt_domain *d, u32 cbm) struct rdt_domain *d, unsigned long cbm)
{ {
struct cpu_cacheinfo *ci; struct cpu_cacheinfo *ci;
unsigned int size = 0; unsigned int size = 0;
int num_b, i; int num_b, i;
num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len); num_b = bitmap_weight(&cbm, r->cache.cbm_len);
ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
for (i = 0; i < ci->num_leaves; i++) { for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == r->cache_level) { if (ci->info_list[i].level == r->cache_level) {
@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
u32 used_b = 0, unused_b = 0; u32 used_b = 0, unused_b = 0;
u32 closid = rdtgrp->closid; u32 closid = rdtgrp->closid;
struct rdt_resource *r; struct rdt_resource *r;
unsigned long tmp_cbm;
enum rdtgrp_mode mode; enum rdtgrp_mode mode;
struct rdt_domain *d; struct rdt_domain *d;
int i, ret; int i, ret;
@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
* modify the CBM based on system availability. * modify the CBM based on system availability.
*/ */
cbm_ensure_valid(&d->new_ctrl, r); cbm_ensure_valid(&d->new_ctrl, r);
if (bitmap_weight((unsigned long *) &d->new_ctrl, /*
r->cache.cbm_len) < * Assign the u32 CBM to an unsigned long to ensure
r->cache.min_cbm_bits) { * that bitmap_weight() does not access out-of-bound
* memory.
*/
tmp_cbm = d->new_ctrl;
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
r->cache.min_cbm_bits) {
rdt_last_cmd_printf("no space on %s:%d\n", rdt_last_cmd_printf("no space on %s:%d\n",
r->name, d->id); r->name, d->id);
return -ENOSPC; return -ENOSPC;

View File

@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
#define UNSHARED_PTRS_PER_PGD \ #define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
#define MAX_UNSHARED_PTRS_PER_PGD \
max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
* and initialize the kernel pmds here. * and initialize the kernel pmds here.
*/ */
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
/* /*
* We allocate separate PMDs for the kernel part of the user page-table * We allocate separate PMDs for the kernel part of the user page-table
@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
*/ */
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
KERNEL_PGD_PTRS : 0) KERNEL_PGD_PTRS : 0)
#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{ {
@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */ /* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0 #define PREALLOCATED_PMDS 0
#define MAX_PREALLOCATED_PMDS 0
#define PREALLOCATED_USER_PMDS 0 #define PREALLOCATED_USER_PMDS 0
#define MAX_PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *u_pmds[PREALLOCATED_USER_PMDS]; pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
pmd_t *pmds[PREALLOCATED_PMDS]; pmd_t *pmds[MAX_PREALLOCATED_PMDS];
pgd = _pgd_alloc(); pgd = _pgd_alloc();

View File

@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
} }
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
if (tmp && !(opt_flags & FW_OPT_NOCACHE)) if (tmp) {
list_add(&tmp->list, &fwc->head); INIT_LIST_HEAD(&tmp->list);
if (!(opt_flags & FW_OPT_NOCACHE))
list_add(&tmp->list, &fwc->head);
}
spin_unlock(&fwc->lock); spin_unlock(&fwc->lock);
*fw_priv = tmp; *fw_priv = tmp;

View File

@ -1356,7 +1356,7 @@ static int qca_init_regulators(struct qca_power *qca,
{ {
int i; int i;
qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs * qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
sizeof(struct regulator_bulk_data), sizeof(struct regulator_bulk_data),
GFP_KERNEL); GFP_KERNEL);
if (!qca->vreg_bulk) if (!qca->vreg_bulk)

View File

@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv); safexcel_configure(priv);
priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring), priv->ring = devm_kcalloc(dev, priv->config.rings,
sizeof(*priv->ring),
GFP_KERNEL); GFP_KERNEL);
if (!priv->ring) { if (!priv->ring) {
ret = -ENOMEM; ret = -ENOMEM;
@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret) if (ret)
goto err_reg_clk; goto err_reg_clk;
priv->ring[i].rdr_req = devm_kzalloc(dev, priv->ring[i].rdr_req = devm_kcalloc(dev,
sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE, EIP197_DEFAULT_RING_SIZE,
sizeof(priv->ring[i].rdr_req),
GFP_KERNEL); GFP_KERNEL);
if (!priv->ring[i].rdr_req) { if (!priv->ring[i].rdr_req) {
ret = -ENOMEM; ret = -ENOMEM;

View File

@ -14,6 +14,7 @@
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-region.h> #include <linux/fpga/fpga-region.h>
#include "dfl-fme-pr.h" #include "dfl-fme-pr.h"
@ -66,9 +67,10 @@ static int fme_region_probe(struct platform_device *pdev)
static int fme_region_remove(struct platform_device *pdev) static int fme_region_remove(struct platform_device *pdev)
{ {
struct fpga_region *region = dev_get_drvdata(&pdev->dev); struct fpga_region *region = dev_get_drvdata(&pdev->dev);
struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region); fpga_region_unregister(region);
fpga_mgr_put(region->mgr); fpga_mgr_put(mgr);
return 0; return 0;
} }

View File

@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
* *
* Given a device, get an exclusive reference to a fpga bridge. * Given a device, get an exclusive reference to a fpga bridge.
* *
* Return: fpga manager struct or IS_ERR() condition containing error code. * Return: fpga bridge struct or IS_ERR() condition containing error code.
*/ */
struct fpga_bridge *fpga_bridge_get(struct device *dev, struct fpga_bridge *fpga_bridge_get(struct device *dev,
struct fpga_image_info *info) struct fpga_image_info *info)

View File

@ -437,9 +437,10 @@ static int of_fpga_region_probe(struct platform_device *pdev)
static int of_fpga_region_remove(struct platform_device *pdev) static int of_fpga_region_remove(struct platform_device *pdev)
{ {
struct fpga_region *region = platform_get_drvdata(pdev); struct fpga_region *region = platform_get_drvdata(pdev);
struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region); fpga_region_unregister(region);
fpga_mgr_put(region->mgr); fpga_mgr_put(mgr);
return 0; return 0;
} }

View File

@ -1682,7 +1682,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler, irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip); gpiochip);
gpiochip->irq.parents = &parent_irq; gpiochip->irq.parent_irq = parent_irq;
gpiochip->irq.parents = &gpiochip->irq.parent_irq;
gpiochip->irq.num_parents = 1; gpiochip->irq.num_parents = 1;
} }

View File

@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
} }
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
sizeof(struct drm_plane), sizeof(struct drm_plane),
GFP_KERNEL); GFP_KERNEL);

View File

@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev,
return 0; return 0;
} }
mp->clk_config = devm_kzalloc(&pdev->dev, mp->clk_config = devm_kcalloc(&pdev->dev,
sizeof(struct dss_clk) * num_clk, num_clk, sizeof(struct dss_clk),
GFP_KERNEL); GFP_KERNEL);
if (!mp->clk_config) if (!mp->clk_config)
return -ENOMEM; return -ENOMEM;

View File

@ -900,9 +900,22 @@ static enum drm_connector_status
nv50_mstc_detect(struct drm_connector *connector, bool force) nv50_mstc_detect(struct drm_connector *connector, bool force)
{ {
struct nv50_mstc *mstc = nv50_mstc(connector); struct nv50_mstc *mstc = nv50_mstc(connector);
enum drm_connector_status conn_status;
int ret;
if (!mstc->port) if (!mstc->port)
return connector_status_disconnected; return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
ret = pm_runtime_get_sync(connector->dev->dev);
if (ret < 0 && ret != -EACCES)
return connector_status_disconnected;
conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
return conn_status;
} }
static void static void

View File

@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
__u32 version) __u32 version)
{ {
int ret = 0; int ret = 0;
unsigned int cur_cpu;
struct vmbus_channel_initiate_contact *msg; struct vmbus_channel_initiate_contact *msg;
unsigned long flags; unsigned long flags;
@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* the CPU attempting to connect may not be CPU 0. * the CPU attempting to connect may not be CPU 0.
*/ */
if (version >= VERSION_WIN8_1) { if (version >= VERSION_WIN8_1) {
msg->target_vcpu = cur_cpu = get_cpu();
hv_cpu_number_to_vp_number(smp_processor_id()); msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
vmbus_connection.connect_cpu = smp_processor_id(); vmbus_connection.connect_cpu = cur_cpu;
put_cpu();
} else { } else {
msg->target_vcpu = 0; msg->target_vcpu = 0;
vmbus_connection.connect_cpu = 0; vmbus_connection.connect_cpu = 0;

View File

@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
if (fan_cnt < 1) if (fan_cnt < 1)
return -EINVAL; return -EINVAL;
fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL); fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL);
if (!fan_ch) if (!fan_ch)
return -ENOMEM; return -ENOMEM;

View File

@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{ {
u32 ic_clk = i2c_dw_clk_rate(dev);
const char *mode_str, *fp_str = ""; const char *mode_str, *fp_str = "";
u32 comp_param1; u32 comp_param1;
u32 sda_falling_time, scl_falling_time; u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings; struct i2c_timings *t = &dev->timings;
u32 ic_clk;
int ret; int ret;
ret = i2c_dw_acquire_lock(dev); ret = i2c_dw_acquire_lock(dev);
@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
/* Calculate SCL timing parameters for standard mode if not set */ /* Calculate SCL timing parameters for standard mode if not set */
if (!dev->ss_hcnt || !dev->ss_lcnt) { if (!dev->ss_hcnt || !dev->ss_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->ss_hcnt = dev->ss_hcnt =
i2c_dw_scl_hcnt(ic_clk, i2c_dw_scl_hcnt(ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */ 4000, /* tHD;STA = tHIGH = 4.0 us */
@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
* needed also in high speed mode. * needed also in high speed mode.
*/ */
if (!dev->fs_hcnt || !dev->fs_lcnt) { if (!dev->fs_hcnt || !dev->fs_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt = dev->fs_hcnt =
i2c_dw_scl_hcnt(ic_clk, i2c_dw_scl_hcnt(ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */ 600, /* tHD;STA = tHIGH = 0.6 us */

View File

@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
* run ~75 kHz instead which should do no harm. * run ~75 kHz instead which should do no harm.
*/ */
dev_notice(&sch_adapter.dev, dev_notice(&sch_adapter.dev,
"Clock divider unitialized. Setting defaults\n"); "Clock divider uninitialized. Setting defaults\n");
outw(backbone_speed / (4 * 100), SMBHSTCLK); outw(backbone_speed / (4 * 100), SMBHSTCLK);
} }

View File

@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t rx_dma; dma_addr_t rx_dma;
enum geni_se_xfer_mode mode; enum geni_se_xfer_mode mode;
unsigned long time_left = XFER_TIMEOUT; unsigned long time_left = XFER_TIMEOUT;
void *dma_buf;
gi2c->cur = msg; gi2c->cur = msg;
mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; mode = GENI_SE_FIFO;
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
mode = GENI_SE_DMA;
geni_se_select_mode(&gi2c->se, mode); geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN); writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param); geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
if (mode == GENI_SE_DMA) { if (mode == GENI_SE_DMA) {
int ret; int ret;
ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len, ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
&rx_dma); &rx_dma);
if (ret) { if (ret) {
mode = GENI_SE_FIFO; mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode); geni_se_select_mode(&gi2c->se, mode);
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
} }
} }
@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err) if (gi2c->err)
geni_i2c_rx_fsm_rst(gi2c); geni_i2c_rx_fsm_rst(gi2c);
geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len); geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
} }
return gi2c->err; return gi2c->err;
} }
@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t tx_dma; dma_addr_t tx_dma;
enum geni_se_xfer_mode mode; enum geni_se_xfer_mode mode;
unsigned long time_left; unsigned long time_left;
void *dma_buf;
gi2c->cur = msg; gi2c->cur = msg;
mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; mode = GENI_SE_FIFO;
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
if (dma_buf)
mode = GENI_SE_DMA;
geni_se_select_mode(&gi2c->se, mode); geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN); writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param); geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
if (mode == GENI_SE_DMA) { if (mode == GENI_SE_DMA) {
int ret; int ret;
ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len, ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
&tx_dma); &tx_dma);
if (ret) { if (ret) {
mode = GENI_SE_FIFO; mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode); geni_se_select_mode(&gi2c->se, mode);
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
} }
} }
@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err) if (gi2c->err)
geni_i2c_tx_fsm_rst(gi2c); geni_i2c_tx_fsm_rst(gi2c);
geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len); geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
} }
return gi2c->err; return gi2c->err;
} }

View File

@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
mt_params[3].type = ACPI_TYPE_INTEGER; mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len; mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER; mt_params[4].type = ACPI_TYPE_BUFFER;
mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1; mt_params[4].buffer.pointer = data->block + 1;
} }
break; break;

View File

@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int shrink = 0; int shrink = 0;
int c; int c;
if (!mr->allocated_from_cache)
return;
c = order2idx(dev, mr->order); c = order2idx(dev, mr->order);
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
umem = NULL; umem = NULL;
} }
#endif #endif
clean_mr(dev, mr); clean_mr(dev, mr);
/*
* We should unregister the DMA address from the HCA before
* remove the DMA mapping.
*/
mlx5_mr_cache_free(dev, mr);
if (umem) { if (umem) {
ib_umem_release(umem); ib_umem_release(umem);
atomic_sub(npages, &dev->mdev->priv.reg_pages); atomic_sub(npages, &dev->mdev->priv.reg_pages);
} }
if (!mr->allocated_from_cache) if (!mr->allocated_from_cache)
kfree(mr); kfree(mr);
else
mlx5_mr_cache_free(dev, mr);
} }
int mlx5_ib_dereg_mr(struct ib_mr *ibmr) int mlx5_ib_dereg_mr(struct ib_mr *ibmr)

View File

@ -564,6 +564,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
input_inject_event(&evdev->handle, input_inject_event(&evdev->handle,
event.type, event.code, event.value); event.type, event.code, event.value);
cond_resched();
} }
out: out:

View File

@ -231,6 +231,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@ -530,6 +531,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),

View File

@ -598,6 +598,7 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
input_event(udev->dev, ev.type, ev.code, ev.value); input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size(); bytes += input_event_size();
cond_resched();
} }
return bytes; return bytes;

View File

@ -707,6 +707,7 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer,
mousedev_generate_response(client, c); mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock); spin_unlock_irq(&client->packet_lock);
cond_resched();
} }
kill_fasync(&client->fasync, SIGIO, POLL_IN); kill_fasync(&client->fasync, SIGIO, POLL_IN);

View File

@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void)
for (i = 0; i < I8042_NUM_PORTS; i++) { for (i = 0; i < I8042_NUM_PORTS; i++) {
struct serio *serio = i8042_ports[i].serio; struct serio *serio = i8042_ports[i].serio;
if (serio) { if (!serio)
printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n", continue;
serio->name,
(unsigned long) I8042_DATA_REG, printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
(unsigned long) I8042_COMMAND_REG, serio->name,
i8042_ports[i].irq); (unsigned long) I8042_DATA_REG,
serio_register_port(serio); (unsigned long) I8042_COMMAND_REG,
device_set_wakeup_capable(&serio->dev, true); i8042_ports[i].irq);
} serio_register_port(serio);
device_set_wakeup_capable(&serio->dev, true);
/*
* On platforms using suspend-to-idle, allow the keyboard to
* wake up the system from sleep by enabling keyboard wakeups
* by default. This is consistent with keyboard wakeup
* behavior on many platforms using suspend-to-RAM (ACPI S3)
* by default.
*/
if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
device_set_wakeup_enable(&serio->dev, true);
} }
} }

View File

@ -3484,14 +3484,13 @@ static int __init dm_cache_init(void)
int r; int r;
migration_cache = KMEM_CACHE(dm_cache_migration, 0); migration_cache = KMEM_CACHE(dm_cache_migration, 0);
if (!migration_cache) { if (!migration_cache)
dm_unregister_target(&cache_target);
return -ENOMEM; return -ENOMEM;
}
r = dm_register_target(&cache_target); r = dm_register_target(&cache_target);
if (r) { if (r) {
DMERR("cache target registration failed: %d", r); DMERR("cache target registration failed: %d", r);
kmem_cache_destroy(migration_cache);
return r; return r;
} }

View File

@ -467,7 +467,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = { static struct target_type flakey_target = {
.name = "flakey", .name = "flakey",
.version = {1, 5, 0}, .version = {1, 5, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM, .features = DM_TARGET_ZONED_HM,
#endif
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = flakey_ctr, .ctr = flakey_ctr,
.dtr = flakey_dtr, .dtr = flakey_dtr,

View File

@ -3462,7 +3462,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -ENOMEM; r = -ENOMEM;
goto bad; goto bad;
} }
ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL); ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
if (!ic->recalc_tags) { if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating"; ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM; r = -ENOMEM;

View File

@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
#ifdef CONFIG_BLK_DEV_ZONED
static int linear_end_io(struct dm_target *ti, struct bio *bio, static int linear_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error) blk_status_t *error)
{ {
@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
} }
#endif
static void linear_status(struct dm_target *ti, status_type_t type, static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen) unsigned status_flags, char *result, unsigned maxlen)
@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
static struct target_type linear_target = { static struct target_type linear_target = {
.name = "linear", .name = "linear",
.version = {1, 4, 0}, .version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
#else
.features = DM_TARGET_PASSES_INTEGRITY,
#endif
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = linear_ctr, .ctr = linear_ctr,
.dtr = linear_dtr, .dtr = linear_dtr,
.map = linear_map, .map = linear_map,
.end_io = linear_end_io,
.status = linear_status, .status = linear_status,
.prepare_ioctl = linear_prepare_ioctl, .prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices, .iterate_devices = linear_iterate_devices,

View File

@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio); EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/* /*
* The zone descriptors obtained with a zone report indicate * The zone descriptors obtained with a zone report indicate zone positions
* zone positions within the target device. The zone descriptors * within the target backing device, regardless of that device is a partition
* must be remapped to match their position within the dm device. * and regardless of the target mapping start sector on the device or partition.
* A target may call dm_remap_zone_report after completion of a * The zone descriptors start sector and write pointer position must be adjusted
* REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained * to match their relative position within the dm device.
* from the target device mapping to the dm device. * A target may call dm_remap_zone_report() after completion of a
* REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
* backing device.
*/ */
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start) void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{ {
@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
struct blk_zone *zone; struct blk_zone *zone;
unsigned int nr_rep = 0; unsigned int nr_rep = 0;
unsigned int ofst; unsigned int ofst;
sector_t part_offset;
struct bio_vec bvec; struct bio_vec bvec;
struct bvec_iter iter; struct bvec_iter iter;
void *addr; void *addr;
@ -1178,6 +1181,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
if (bio->bi_status) if (bio->bi_status)
return; return;
/*
* bio sector was incremented by the request size on completion. Taking
* into account the original request sector, the target start offset on
* the backing device and the target mapping offset (ti->begin), the
* start sector of the backing device. The partition offset is always 0
* if the target uses a whole device.
*/
part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
/* /*
* Remap the start sector of the reported zones. For sequential zones, * Remap the start sector of the reported zones. For sequential zones,
* also remap the write pointer position. * also remap the write pointer position.
@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
/* Set zones start sector */ /* Set zones start sector */
while (hdr->nr_zones && ofst < bvec.bv_len) { while (hdr->nr_zones && ofst < bvec.bv_len) {
zone = addr + ofst; zone = addr + ofst;
zone->start -= part_offset;
if (zone->start >= start + ti->len) { if (zone->start >= start + ti->len) {
hdr->nr_zones = 0; hdr->nr_zones = 0;
break; break;
@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
else if (zone->cond == BLK_ZONE_COND_EMPTY) else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start; zone->wp = zone->start;
else else
zone->wp = zone->wp + ti->begin - start; zone->wp = zone->wp + ti->begin - start - part_offset;
} }
ofst += sizeof(struct blk_zone); ofst += sizeof(struct blk_zone);
hdr->nr_zones--; hdr->nr_zones--;

View File

@ -1370,6 +1370,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks = card->host->max_blk_count; brq->data.blocks = card->host->max_blk_count;
if (brq->data.blocks > 1) { if (brq->data.blocks > 1) {
/*
* Some SD cards in SPI mode return a CRC error or even lock up
* completely when trying to read the last block using a
* multiblock read command.
*/
if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
(blk_rq_pos(req) + blk_rq_sectors(req) ==
get_capacity(md->disk)))
brq->data.blocks--;
/* /*
* After a read error, we redo the request one sector * After a read error, we redo the request one sector
* at a time in order to accurately determine which * at a time in order to accurately determine which

View File

@ -128,4 +128,4 @@ module_spi_driver(adgs1408_driver);
MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>"); MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver"); MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL");

View File

@ -349,11 +349,11 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err = ena_rx_ctx->l3_csum_err =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
ena_rx_ctx->l4_csum_err = ena_rx_ctx->l4_csum_err =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
ena_rx_ctx->l4_csum_checked = ena_rx_ctx->l4_csum_checked =
!!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);

View File

@ -1595,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
if (rc) if (rc)
return rc; return rc;
ena_init_napi(adapter);
ena_change_mtu(adapter->netdev, adapter->netdev->mtu); ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
ena_refill_all_rx_bufs(adapter); ena_refill_all_rx_bufs(adapter);
@ -1754,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter)
ena_setup_io_intr(adapter); ena_setup_io_intr(adapter);
/* napi poll functions should be initialized before running
* request_irq(), to handle a rare condition where there is a pending
* interrupt, causing the ISR to fire immediately while the poll
* function wasn't set yet, causing a null dereference
*/
ena_init_napi(adapter);
rc = ena_request_io_irq(adapter); rc = ena_request_io_irq(adapter);
if (rc) if (rc)
goto err_req_irq; goto err_req_irq;
@ -2686,7 +2691,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter); ena_disable_msix(adapter);
err_device_destroy: err_device_destroy:
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev); ena_com_admin_destroy(ena_dev);
ena_com_mmio_reg_read_request_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
err: err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@ -3200,15 +3209,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{ {
int release_bars; int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
if (ena_dev->mem_bar)
devm_iounmap(&pdev->dev, ena_dev->mem_bar);
if (ena_dev->reg_bar)
devm_iounmap(&pdev->dev, ena_dev->reg_bar);
release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars); pci_release_selected_regions(pdev, release_bars);
} }

View File

@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
NULL, NULL, NULL), NULL, NULL, NULL),
}; };
static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
int err;
err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
if (err)
mlx4_warn(dev,
"devlink set parameter %u value failed (err = %d)",
param_id, err);
}
static void mlx4_devlink_set_params_init_values(struct devlink *devlink) static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
{ {
union devlink_param_value value; union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset; value.vbool = !!mlx4_internal_err_reset;
mlx4_devlink_set_init_value(devlink, devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
value); value);
value.vu32 = 1UL << log_num_mac; value.vu32 = 1UL << log_num_mac;
mlx4_devlink_set_init_value(devlink, devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
value);
value.vbool = enable_64b_cqe_eqe; value.vbool = enable_64b_cqe_eqe;
mlx4_devlink_set_init_value(devlink, devlink_param_driverinit_value_set(devlink,
MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
value); value);
value.vbool = enable_4k_uar; value.vbool = enable_4k_uar;
mlx4_devlink_set_init_value(devlink, devlink_param_driverinit_value_set(devlink,
MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
value); value);
value.vbool = false; value.vbool = false;
mlx4_devlink_set_init_value(devlink, devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
value); value);
} }
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,

View File

@ -4269,8 +4269,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break; break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
case RTL_GIGA_MAC_VER_35: case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break; break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:

View File

@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u16 idx = dring->tail; u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
/* reading the register clears the irq */
netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
break; break;
}
/* This barrier is needed to keep us from reading /* This barrier is needed to keep us from reading
* any other fields out of the netsec_de until we have * any other fields out of the netsec_de until we have

View File

@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
/* Give this long for the PHY to reset. */ /* Give this long for the PHY to reset. */
#define T_PHY_RESET_MS 50 #define T_PHY_RESET_MS 50
static DEFINE_MUTEX(sfp_mutex);
struct sff_data { struct sff_data {
unsigned int gpios; unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id); bool (*module_supported)(const struct sfp_eeprom_id *id);

View File

@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
{QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/

View File

@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n"); dev_info(dev, "Suspend without wake params -- powering down card\n");
if (priv->fw_ready) { if (priv->fw_ready) {
ret = lbs_suspend(priv);
if (ret)
return ret;
priv->power_up_on_resume = true; priv->power_up_on_resume = true;
if_sdio_power_off(card); if_sdio_power_off(card);
} }

View File

@ -318,7 +318,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!buf->urb) if (!buf->urb)
return -ENOMEM; return -ENOMEM;
buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg), buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
gfp); gfp);
if (!buf->urb->sg) if (!buf->urb->sg)
return -ENOMEM; return -ENOMEM;
@ -525,8 +525,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
spin_lock_init(&q->rx_page_lock); spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock); spin_lock_init(&q->lock);
q->entry = devm_kzalloc(dev->dev, q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES * sizeof(*q->entry), MT_NUM_RX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL); GFP_KERNEL);
if (!q->entry) if (!q->entry)
return -ENOMEM; return -ENOMEM;
@ -755,8 +755,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
INIT_LIST_HEAD(&q->swq); INIT_LIST_HEAD(&q->swq);
q->hw_idx = mt76_ac_to_hwq(i); q->hw_idx = mt76_ac_to_hwq(i);
q->entry = devm_kzalloc(dev->dev, q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES * sizeof(*q->entry), MT_NUM_TX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL); GFP_KERNEL);
if (!q->entry) if (!q->entry)
return -ENOMEM; return -ENOMEM;

View File

@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void)
struct of_phandle_args args; struct of_phandle_args args;
int i, rc; int i, rc;
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
return;
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0"); np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) { if (!np) {
pr_err("missing testcase data\n"); pr_err("missing testcase data\n");
@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
struct of_phandle_args args; struct of_phandle_args args;
int i, rc; int i, rc;
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
return;
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0"); np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) { if (!np) {
pr_err("missing testcase data\n"); pr_err("missing testcase data\n");
@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void)
pdev = of_find_device_by_node(np); pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n"); unittest(pdev, "device 1 creation failed\n");
irq = platform_get_irq(pdev, 0); if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq); irq = platform_get_irq(pdev, 0);
unittest(irq == -EPROBE_DEFER,
"device deferred probe failed - %d\n", irq);
/* Test that a parsing failure does not return -EPROBE_DEFER */ /* Test that a parsing failure does not return -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device2"); np = of_find_node_by_path("/testcase-data/testcase-device2");
pdev = of_find_device_by_node(np); pdev = of_find_device_by_node(np);
unittest(pdev, "device 2 creation failed\n"); unittest(pdev, "device 2 creation failed\n");
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq); unittest(irq < 0 && irq != -EPROBE_DEFER,
"device parsing error failed - %d\n", irq);
}
np = of_find_node_by_path("/testcase-data/platform-tests"); np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n"); unittest(np, "No testcase data in device tree\n");

View File

@ -180,11 +180,11 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
return 0; return 0;
} }
phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
if (!phy) if (!phy)
return -ENOMEM; return -ENOMEM;
link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
if (!link) if (!link)
return -ENOMEM; return -ENOMEM;

View File

@ -636,6 +636,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err; return err;
} }
return 0;
}
static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
{
struct gpio_chip *chip = &mcp->chip;
int err;
err = gpiochip_irqchip_add_nested(chip, err = gpiochip_irqchip_add_nested(chip,
&mcp23s08_irq_chip, &mcp23s08_irq_chip,
0, 0,
@ -912,7 +920,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
} }
if (mcp->irq && mcp->irq_controller) { if (mcp->irq && mcp->irq_controller) {
ret = mcp23s08_irq_setup(mcp); ret = mcp23s08_irqchip_setup(mcp);
if (ret) if (ret)
goto fail; goto fail;
} }
@ -944,6 +952,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail; goto fail;
} }
if (mcp->irq)
ret = mcp23s08_irq_setup(mcp);
fail: fail:
if (ret < 0) if (ret < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret); dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);

View File

@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg); ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret > 0) { if (ret > 0) {
ec_dev->event_size = ret - 1; ec_dev->event_size = ret - 1;
memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size); memcpy(&ec_dev->event_data, msg->data, ret);
} }
return ret; return ret;

View File

@ -210,11 +210,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
* Output one or more lines of text on the SCLP console (VT220 and / * Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode). * or line-mode).
*/ */
void __sclp_early_printk(const char *str, unsigned int len) void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
{ {
int have_linemode, have_vt220; int have_linemode, have_vt220;
if (sclp_init_state != sclp_init_state_uninitialized) if (!force && sclp_init_state != sclp_init_state_uninitialized)
return; return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0) if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return; return;
@ -227,5 +227,10 @@ void __sclp_early_printk(const char *str, unsigned int len)
void sclp_early_printk(const char *str) void sclp_early_printk(const char *str)
{ {
__sclp_early_printk(str, strlen(str)); __sclp_early_printk(str, strlen(str), 0);
}
void sclp_early_printk_force(const char *str)
{
__sclp_early_printk(str, strlen(str), 1);
} }

View File

@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
for (i = 0; i < pat->pat_nr; i++, pa++) for (i = 0; i < pat->pat_nr; i++, pa++)
for (j = 0; j < pa->pa_nr; j++) for (j = 0; j < pa->pa_nr; j++)
if (pa->pa_iova_pfn[i] == iova_pfn) if (pa->pa_iova_pfn[j] == iova_pfn)
return true; return true;
return false; return false;

View File

@ -22,6 +22,7 @@
#include "vfio_ccw_private.h" #include "vfio_ccw_private.h"
struct workqueue_struct *vfio_ccw_work_q; struct workqueue_struct *vfio_ccw_work_q;
struct kmem_cache *vfio_ccw_io_region;
/* /*
* Helpers * Helpers
@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
cp_update_scsw(&private->cp, &irb->scsw); cp_update_scsw(&private->cp, &irb->scsw);
cp_free(&private->cp); cp_free(&private->cp);
} }
memcpy(private->io_region.irb_area, irb, sizeof(*irb)); memcpy(private->io_region->irb_area, irb, sizeof(*irb));
if (private->io_trigger) if (private->io_trigger)
eventfd_signal(private->io_trigger, 1); eventfd_signal(private->io_trigger, 1);
@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) if (!private)
return -ENOMEM; return -ENOMEM;
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region) {
kfree(private);
return -ENOMEM;
}
private->sch = sch; private->sch = sch;
dev_set_drvdata(&sch->dev, private); dev_set_drvdata(&sch->dev, private);
@ -139,6 +148,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
cio_disable_subchannel(sch); cio_disable_subchannel(sch);
out_free: out_free:
dev_set_drvdata(&sch->dev, NULL); dev_set_drvdata(&sch->dev, NULL);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private); kfree(private);
return ret; return ret;
} }
@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL); dev_set_drvdata(&sch->dev, NULL);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private); kfree(private);
return 0; return 0;
@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_work_q) if (!vfio_ccw_work_q)
return -ENOMEM; return -ENOMEM;
vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
sizeof(struct ccw_io_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_io_region), NULL);
if (!vfio_ccw_io_region) {
destroy_workqueue(vfio_ccw_work_q);
return -ENOMEM;
}
isc_register(VFIO_CCW_ISC); isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver); ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) { if (ret) {
isc_unregister(VFIO_CCW_ISC); isc_unregister(VFIO_CCW_ISC);
kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q); destroy_workqueue(vfio_ccw_work_q);
} }
@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void)
{ {
css_driver_unregister(&vfio_ccw_sch_driver); css_driver_unregister(&vfio_ccw_sch_driver);
isc_unregister(VFIO_CCW_ISC); isc_unregister(VFIO_CCW_ISC);
kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q); destroy_workqueue(vfio_ccw_work_q);
} }
module_init(vfio_ccw_sch_init); module_init(vfio_ccw_sch_init);

View File

@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private,
enum vfio_ccw_event event) enum vfio_ccw_event event)
{ {
pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state); pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
private->io_region.ret_code = -EIO; private->io_region->ret_code = -EIO;
} }
static void fsm_io_busy(struct vfio_ccw_private *private, static void fsm_io_busy(struct vfio_ccw_private *private,
enum vfio_ccw_event event) enum vfio_ccw_event event)
{ {
private->io_region.ret_code = -EBUSY; private->io_region->ret_code = -EBUSY;
} }
static void fsm_disabled_irq(struct vfio_ccw_private *private, static void fsm_disabled_irq(struct vfio_ccw_private *private,
@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
{ {
union orb *orb; union orb *orb;
union scsw *scsw = &private->scsw; union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = &private->io_region; struct ccw_io_region *io_region = private->io_region;
struct mdev_device *mdev = private->mdev; struct mdev_device *mdev = private->mdev;
char *errstr = "request"; char *errstr = "request";

View File

@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
return -EINVAL; return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev)); private = dev_get_drvdata(mdev_parent_dev(mdev));
region = &private->io_region; region = private->io_region;
if (copy_to_user(buf, (void *)region + *ppos, count)) if (copy_to_user(buf, (void *)region + *ppos, count))
return -EFAULT; return -EFAULT;
@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
if (private->state != VFIO_CCW_STATE_IDLE) if (private->state != VFIO_CCW_STATE_IDLE)
return -EACCES; return -EACCES;
region = &private->io_region; region = private->io_region;
if (copy_from_user((void *)region + *ppos, buf, count)) if (copy_from_user((void *)region + *ppos, buf, count))
return -EFAULT; return -EFAULT;

View File

@ -41,7 +41,7 @@ struct vfio_ccw_private {
atomic_t avail; atomic_t avail;
struct mdev_device *mdev; struct mdev_device *mdev;
struct notifier_block nb; struct notifier_block nb;
struct ccw_io_region io_region; struct ccw_io_region *io_region;
struct channel_program cp; struct channel_program cp;
struct irb irb; struct irb irb;

View File

@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = {
static int __init openprom_init(void) static int __init openprom_init(void)
{ {
struct device_node *dp;
int err; int err;
err = misc_register(&openprom_dev); err = misc_register(&openprom_dev);
if (err) if (err)
return err; return err;
dp = of_find_node_by_path("/"); options_node = of_get_child_by_name(of_find_node_by_path("/"), "options");
dp = dp->child;
while (dp) {
if (!strcmp(dp->name, "options"))
break;
dp = dp->sibling;
}
options_node = dp;
if (!options_node) { if (!options_node) {
misc_deregister(&openprom_dev); misc_deregister(&openprom_dev);
return -EIO; return -EIO;

View File

@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f)
alloc_error: alloc_error:
kfree(ctx->ccb_buf); kfree(ctx->ccb_buf);
done: done:
if (ctx != NULL) kfree(ctx);
kfree(ctx);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
/* start qedi context */ /* start qedi context */
spin_lock_init(&qedi->hba_lock); spin_lock_init(&qedi->hba_lock);
spin_lock_init(&qedi->task_idx_lock); spin_lock_init(&qedi->task_idx_lock);
mutex_init(&qedi->stats_lock);
} }
qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
qedi_ops->ll2->start(qedi->cdev, &params); qedi_ops->ll2->start(qedi->cdev, &params);

View File

@ -120,6 +120,7 @@ static void bm_set_memory(u64 ba, u32 size)
*/ */
static dma_addr_t fbpr_a; static dma_addr_t fbpr_a;
static size_t fbpr_sz; static size_t fbpr_sz;
static int __bman_probed;
static int bman_fbpr(struct reserved_mem *rmem) static int bman_fbpr(struct reserved_mem *rmem)
{ {
@ -166,6 +167,12 @@ static irqreturn_t bman_isr(int irq, void *ptr)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
int bman_is_probed(void)
{
return __bman_probed;
}
EXPORT_SYMBOL_GPL(bman_is_probed);
static int fsl_bman_probe(struct platform_device *pdev) static int fsl_bman_probe(struct platform_device *pdev)
{ {
int ret, err_irq; int ret, err_irq;
@ -175,6 +182,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
u16 id, bm_pool_cnt; u16 id, bm_pool_cnt;
u8 major, minor; u8 major, minor;
__bman_probed = -1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) { if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@ -255,6 +264,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
return ret; return ret;
} }
__bman_probed = 1;
return 0; return 0;
}; };

View File

@ -273,6 +273,7 @@ static const struct qman_error_info_mdata error_mdata[] = {
static u32 __iomem *qm_ccsr_start; static u32 __iomem *qm_ccsr_start;
/* A SDQCR mask comprising all the available/visible pool channels */ /* A SDQCR mask comprising all the available/visible pool channels */
static u32 qm_pools_sdqcr; static u32 qm_pools_sdqcr;
static int __qman_probed;
static inline u32 qm_ccsr_in(u32 offset) static inline u32 qm_ccsr_in(u32 offset)
{ {
@ -686,6 +687,12 @@ static int qman_resource_init(struct device *dev)
return 0; return 0;
} }
int qman_is_probed(void)
{
return __qman_probed;
}
EXPORT_SYMBOL_GPL(qman_is_probed);
static int fsl_qman_probe(struct platform_device *pdev) static int fsl_qman_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
@ -695,6 +702,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
u16 id; u16 id;
u8 major, minor; u8 major, minor;
__qman_probed = -1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) { if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n", dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@ -828,6 +837,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
if (ret) if (ret)
return ret; return ret;
__qman_probed = 1;
return 0; return 0;
} }

View File

@ -227,6 +227,14 @@ static int qman_portal_probe(struct platform_device *pdev)
int irq, cpu, err; int irq, cpu, err;
u32 val; u32 val;
err = qman_is_probed();
if (!err)
return -EPROBE_DEFER;
if (err < 0) {
dev_err(&pdev->dev, "failing probe due to qman probe error\n");
return -ENODEV;
}
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg) if (!pcfg)
return -ENOMEM; return -ENOMEM;

View File

@ -738,14 +738,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
u8 link, depth; u8 link, depth;
u64 route; u64 route;
/*
* After NVM upgrade adding root switch device fails because we
* initiated reset. During that time ICM might still send
* XDomain connected message which we ignore here.
*/
if (!tb->root_switch)
return;
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT; ICM_LINK_INFO_DEPTH_SHIFT;
@ -1037,14 +1029,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
if (pkg->hdr.packet_id) if (pkg->hdr.packet_id)
return; return;
/*
* After NVM upgrade adding root switch device fails because we
* initiated reset. During that time ICM might still send device
* connected message which we ignore here.
*/
if (!tb->root_switch)
return;
route = get_route(pkg->route_hi, pkg->route_lo); route = get_route(pkg->route_hi, pkg->route_lo);
authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
@ -1408,19 +1392,26 @@ static void icm_handle_notification(struct work_struct *work)
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
switch (n->pkg->code) { /*
case ICM_EVENT_DEVICE_CONNECTED: * When the domain is stopped we flush its workqueue but before
icm->device_connected(tb, n->pkg); * that the root switch is removed. In that case we should treat
break; * the queued events as being canceled.
case ICM_EVENT_DEVICE_DISCONNECTED: */
icm->device_disconnected(tb, n->pkg); if (tb->root_switch) {
break; switch (n->pkg->code) {
case ICM_EVENT_XDOMAIN_CONNECTED: case ICM_EVENT_DEVICE_CONNECTED:
icm->xdomain_connected(tb, n->pkg); icm->device_connected(tb, n->pkg);
break; break;
case ICM_EVENT_XDOMAIN_DISCONNECTED: case ICM_EVENT_DEVICE_DISCONNECTED:
icm->xdomain_disconnected(tb, n->pkg); icm->device_disconnected(tb, n->pkg);
break; break;
case ICM_EVENT_XDOMAIN_CONNECTED:
icm->xdomain_connected(tb, n->pkg);
break;
case ICM_EVENT_XDOMAIN_DISCONNECTED:
icm->xdomain_disconnected(tb, n->pkg);
break;
}
} }
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);

View File

@ -1191,5 +1191,5 @@ static void __exit nhi_unload(void)
tb_domain_exit(); tb_domain_exit();
} }
fs_initcall(nhi_init); rootfs_initcall(nhi_init);
module_exit(nhi_unload); module_exit(nhi_unload);

View File

@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev)
if (!data->skip_autocfg) if (!data->skip_autocfg)
dw8250_setup_port(p); dw8250_setup_port(p);
#ifdef CONFIG_PM
uart.capabilities |= UART_CAP_RPM;
#endif
/* If we have a valid fifosize, try hooking up DMA */ /* If we have a valid fifosize, try hooking up DMA */
if (p->fifosize) { if (p->fifosize) {
data->dma.rxconf.src_maxburst = p->fifosize / 4; data->dma.rxconf.src_maxburst = p->fifosize / 4;

View File

@ -868,8 +868,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
geni_se_init(&port->se, port->rx_wm, port->rx_rfr); geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
geni_se_select_mode(&port->se, port->xfer_mode); geni_se_select_mode(&port->se, port->xfer_mode);
if (!uart_console(uport)) { if (!uart_console(uport)) {
port->rx_fifo = devm_kzalloc(uport->dev, port->rx_fifo = devm_kcalloc(uport->dev,
port->rx_fifo_depth * sizeof(u32), GFP_KERNEL); port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
if (!port->rx_fifo) if (!port->rx_fifo)
return -ENOMEM; return -ENOMEM;
} }

View File

@ -291,6 +291,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
.error_clear = SCIF_ERROR_CLEAR, .error_clear = SCIF_ERROR_CLEAR,
}, },
/*
* The "SCIFA" that is in RZ/T and RZ/A2.
* It looks like a normal SCIF with FIFO data, but with a
* compressed address space. Also, the break out of interrupts
* are different: ERI/BRI, RXI, TXI, TEI, DRI.
*/
[SCIx_RZ_SCIFA_REGTYPE] = {
.regs = {
[SCSMR] = { 0x00, 16 },
[SCBRR] = { 0x02, 8 },
[SCSCR] = { 0x04, 16 },
[SCxTDR] = { 0x06, 8 },
[SCxSR] = { 0x08, 16 },
[SCxRDR] = { 0x0A, 8 },
[SCFCR] = { 0x0C, 16 },
[SCFDR] = { 0x0E, 16 },
[SCSPTR] = { 0x10, 16 },
[SCLSR] = { 0x12, 16 },
},
.fifosize = 16,
.overrun_reg = SCLSR,
.overrun_mask = SCLSR_ORER,
.sampling_rate_mask = SCI_SR(32),
.error_mask = SCIF_DEFAULT_ERROR_MASK,
.error_clear = SCIF_ERROR_CLEAR,
},
/* /*
* Common SH-3 SCIF definitions. * Common SH-3 SCIF definitions.
*/ */
@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
[SCIx_SH4_SCIF_REGTYPE] = { [SCIx_SH4_SCIF_REGTYPE] = {
.regs = { .regs = {
[SCSMR] = { 0x00, 16 }, [SCSMR] = { 0x00, 16 },
[SCBRR] = { 0x02, 8 }, [SCBRR] = { 0x04, 8 },
[SCSCR] = { 0x04, 16 }, [SCSCR] = { 0x08, 16 },
[SCxTDR] = { 0x06, 8 }, [SCxTDR] = { 0x0c, 8 },
[SCxSR] = { 0x08, 16 }, [SCxSR] = { 0x10, 16 },
[SCxRDR] = { 0x0a, 8 }, [SCxRDR] = { 0x14, 8 },
[SCFCR] = { 0x0c, 16 }, [SCFCR] = { 0x18, 16 },
[SCFDR] = { 0x0e, 16 }, [SCFDR] = { 0x1c, 16 },
[SCSPTR] = { 0x10, 16 }, [SCSPTR] = { 0x20, 16 },
[SCLSR] = { 0x12, 16 }, [SCLSR] = { 0x24, 16 },
}, },
.fifosize = 16, .fifosize = 16,
.overrun_reg = SCLSR, .overrun_reg = SCLSR,
@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev,
{ {
struct uart_port *port = &sci_port->port; struct uart_port *port = &sci_port->port;
const struct resource *res; const struct resource *res;
unsigned int i, regtype; unsigned int i;
int ret; int ret;
sci_port->cfg = p; sci_port->cfg = p;
@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev,
if (unlikely(sci_port->params == NULL)) if (unlikely(sci_port->params == NULL))
return -EINVAL; return -EINVAL;
regtype = sci_port->params - sci_port_params;
switch (p->type) { switch (p->type) {
case PORT_SCIFB: case PORT_SCIFB:
sci_port->rx_trigger = 48; sci_port->rx_trigger = 48;
@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev,
port->regshift = 1; port->regshift = 1;
} }
if (regtype == SCIx_SH4_SCIF_REGTYPE)
if (sci_port->reg_size >= 0x20)
port->regshift = 1;
/* /*
* The UART port needs an IRQ value, so we peg this to the RX IRQ * The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily * for the multi-IRQ ports, which is where we are primarily
@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = {
.compatible = "renesas,scif-r7s72100", .compatible = "renesas,scif-r7s72100",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE), .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
}, },
{
.compatible = "renesas,scif-r7s9210",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
},
/* Family-specific types */ /* Family-specific types */
{ {
.compatible = "renesas,rcar-gen1-scif", .compatible = "renesas,rcar-gen1-scif",

View File

@ -1514,6 +1514,7 @@ static void acm_disconnect(struct usb_interface *intf)
{ {
struct acm *acm = usb_get_intfdata(intf); struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty; struct tty_struct *tty;
int i;
/* sibling interface is already cleaning up */ /* sibling interface is already cleaning up */
if (!acm) if (!acm)
@ -1544,6 +1545,11 @@ static void acm_disconnect(struct usb_interface *intf)
tty_unregister_device(acm_tty_driver, acm->minor); tty_unregister_device(acm_tty_driver, acm->minor);
usb_free_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_free_urb(acm->wb[i].urb);
for (i = 0; i < acm->rx_buflimit; i++)
usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm); acm_write_buffers_free(acm);
usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm); acm_read_buffers_free(acm);

View File

@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
xhci_mtk_host_enable(mtk); xhci_mtk_host_enable(mtk);
xhci_dbg(xhci, "%s: restart port polling\n", __func__); xhci_dbg(xhci, "%s: restart port polling\n", __func__);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd); usb_hcd_poll_rh_status(xhci->shared_hcd);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
return 0; return 0;
} }

View File

@ -185,6 +185,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
} }
if (pdev->vendor == PCI_VENDOR_ID_INTEL && if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS; xhci->quirks |= XHCI_MISSING_CAS;

View File

@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface is reserved */ /* Interface is reserved */
#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0) #define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
/* Interface must have two endpoints */
#define NUMEP2 BIT(16)
static const struct usb_device_id option_ids[] = { static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) | RSVD(5) }, .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@ -1999,6 +2003,13 @@ static int option_probe(struct usb_serial *serial,
if (device_flags & RSVD(iface_desc->bInterfaceNumber)) if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV; return -ENODEV;
/*
* Allow matching on bNumEndpoints for devices whose interface numbers
* can change (e.g. Quectel EP06).
*/
if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
return -ENODEV;
/* Store the device flags so we can use them during attach. */ /* Store the device flags so we can use them during attach. */
usb_set_serial_data(serial, (void *)device_flags); usb_set_serial_data(serial, (void *)device_flags);

View File

@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */ /* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \ #define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
{ USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */ /* Novatel Wireless GPS driver */

View File

@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll); extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par); extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
extern const u8 aty_postdividers[8];
/* /*
* Hardware cursor support * Hardware cursor support
@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
extern void aty_reset_engine(const struct atyfb_par *par); extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info); extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area); void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);

View File

@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
/* /*
* PLL Reference Divider M: * PLL Reference Divider M:
*/ */
M = pll_regs[2]; M = pll_regs[PLL_REF_DIV];
/* /*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL): * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/ */
N = pll_regs[7 + (clock_cntl & 3)]; N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/* /*
* PLL Post Divider P (Dependent on CLOCK_CNTL): * PLL Post Divider P (Dependent on CLOCK_CNTL):
*/ */
P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1)); P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/* /*
* PLL Divider Q: * PLL Divider Q:

View File

@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
*/ */
#define Maximum_DSP_PRECISION 7 #define Maximum_DSP_PRECISION 7
static u8 postdividers[] = {1,2,4,8,3}; const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll) static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{ {
@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
pll->vclk_post_div += (q < 64*8); pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8); pll->vclk_post_div += (q < 32*8);
} }
pll->vclk_post_div_real = postdividers[pll->vclk_post_div]; pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6; // pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8; pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) / pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
u8 mclk_fb_div, pll_ext_cntl; u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par); pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par); pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07]; pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par); mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B) if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1; mclk_fb_div <<= 1;
@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
xpost_div += (q < 64*8); xpost_div += (q < 64*8);
xpost_div += (q < 32*8); xpost_div += (q < 32*8);
} }
pll->ct.xclk_post_div_real = postdividers[xpost_div]; pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8; pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC #ifdef CONFIG_PPC
@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
mpost_div += (q < 64*8); mpost_div += (q < 64*8);
mpost_div += (q < 32*8); mpost_div += (q < 32*8);
} }
sclk_post_div_real = postdividers[mpost_div]; sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8; pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4; pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG #ifdef DEBUG

View File

@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell)
*/ */
static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
{ {
struct hlist_node **p;
struct afs_cell *pcell;
int ret; int ret;
if (!cell->anonymous_key) { if (!cell->anonymous_key) {
@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
return ret; return ret;
mutex_lock(&net->proc_cells_lock); mutex_lock(&net->proc_cells_lock);
list_add_tail(&cell->proc_link, &net->proc_cells); for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
pcell = hlist_entry(*p, struct afs_cell, proc_link);
if (strcmp(cell->name, pcell->name) < 0)
break;
}
cell->proc_link.pprev = p;
cell->proc_link.next = *p;
rcu_assign_pointer(*p, &cell->proc_link.next);
if (cell->proc_link.next)
cell->proc_link.next->pprev = &cell->proc_link.next;
afs_dynroot_mkdir(net, cell); afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock); mutex_unlock(&net->proc_cells_lock);
return 0; return 0;
@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_proc_cell_remove(cell); afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock); mutex_lock(&net->proc_cells_lock);
list_del_init(&cell->proc_link); hlist_del_rcu(&cell->proc_link);
afs_dynroot_rmdir(net, cell); afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock); mutex_unlock(&net->proc_cells_lock);

View File

@ -265,7 +265,7 @@ int afs_dynroot_populate(struct super_block *sb)
return -ERESTARTSYS; return -ERESTARTSYS;
net->dynroot_sb = sb; net->dynroot_sb = sb;
list_for_each_entry(cell, &net->proc_cells, proc_link) { hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
ret = afs_dynroot_mkdir(net, cell); ret = afs_dynroot_mkdir(net, cell);
if (ret < 0) if (ret < 0)
goto error; goto error;

Some files were not shown because too many files have changed in this diff Show More