mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
Linux 5.15
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmF/AjYeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG1hkIAJ6sFDbvb4M4LMwf Slh2NVL9o5sLMBDzVwnVlyMSKDbMn1WBKreGssaLgZjGDc74lxsdSmw5l9MZm0JN xlq95Q6XFiuu+0qDHPWwfDz3JFO4TqW2ZLLPWk9NnkNbRXqccSrlVRi1RpgE1t3/ NUtS8CQLu6A2BYMc6mkk3aV6IwSNKOkWbM5eBHSvU4j8B6lLbNQop0AfO/wyY1xB U6LiVE1RpN/b7Yv+75ITtNzuHzVIBx6305FvSnOlKbMKKvIClt96Vd2OeuoEkK+6 wGU8JraB1+fc0GckAhynNrjWQWdvi0MAhFWWEJxjS20OGcV1rXDduNfkVNauO1Zn +dNyJ3s= =g9fz -----END PGP SIGNATURE----- BackMerge tag 'v5.15' into drm-next I got a drm-fixes which had some 5.15 stuff in it, so to avoid the mess just backmerge here. Linux 5.15 Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
commit
447212bb4f
@ -104,6 +104,7 @@ Code Seq# Include File Comments
|
|||||||
'8' all SNP8023 advanced NIC card
|
'8' all SNP8023 advanced NIC card
|
||||||
<mailto:mcr@solidum.com>
|
<mailto:mcr@solidum.com>
|
||||||
';' 64-7F linux/vfio.h
|
';' 64-7F linux/vfio.h
|
||||||
|
'=' 00-3f uapi/linux/ptp_clock.h <mailto:richardcochran@gmail.com>
|
||||||
'@' 00-0F linux/radeonfb.h conflict!
|
'@' 00-0F linux/radeonfb.h conflict!
|
||||||
'@' 00-0F drivers/video/aty/aty128fb.c conflict!
|
'@' 00-0F drivers/video/aty/aty128fb.c conflict!
|
||||||
'A' 00-1F linux/apm_bios.h conflict!
|
'A' 00-1F linux/apm_bios.h conflict!
|
||||||
|
14
MAINTAINERS
14
MAINTAINERS
@ -5459,6 +5459,19 @@ F: include/net/devlink.h
|
|||||||
F: include/uapi/linux/devlink.h
|
F: include/uapi/linux/devlink.h
|
||||||
F: net/core/devlink.c
|
F: net/core/devlink.c
|
||||||
|
|
||||||
|
DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
|
||||||
|
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
|
||||||
|
L: kernel@dh-electronics.com
|
||||||
|
S: Maintained
|
||||||
|
F: arch/arm/boot/dts/imx6*-dhcom-*
|
||||||
|
|
||||||
|
DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
|
||||||
|
M: Marek Vasut <marex@denx.de>
|
||||||
|
L: kernel@dh-electronics.com
|
||||||
|
S: Maintained
|
||||||
|
F: arch/arm/boot/dts/stm32mp1*-dhcom-*
|
||||||
|
F: arch/arm/boot/dts/stm32mp1*-dhcor-*
|
||||||
|
|
||||||
DIALOG SEMICONDUCTOR DRIVERS
|
DIALOG SEMICONDUCTOR DRIVERS
|
||||||
M: Support Opensource <support.opensource@diasemi.com>
|
M: Support Opensource <support.opensource@diasemi.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
@ -11292,7 +11305,6 @@ F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
|
|||||||
F: drivers/net/ethernet/marvell/octeontx2/af/
|
F: drivers/net/ethernet/marvell/octeontx2/af/
|
||||||
|
|
||||||
MARVELL PRESTERA ETHERNET SWITCH DRIVER
|
MARVELL PRESTERA ETHERNET SWITCH DRIVER
|
||||||
M: Vadym Kochan <vkochan@marvell.com>
|
|
||||||
M: Taras Chornyi <tchornyi@marvell.com>
|
M: Taras Chornyi <tchornyi@marvell.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://github.com/Marvell-switching/switchdev-prestera
|
W: https://github.com/Marvell-switching/switchdev-prestera
|
||||||
|
4
Makefile
4
Makefile
@ -2,8 +2,8 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 15
|
PATCHLEVEL = 15
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc7
|
EXTRAVERSION =
|
||||||
NAME = Opossums on Parade
|
NAME = Trick or Treat
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
# To see a list of typical targets execute "make help"
|
# To see a list of typical targets execute "make help"
|
||||||
|
@ -112,7 +112,7 @@
|
|||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&gmac_rgmii_pins>;
|
pinctrl-0 = <&gmac_rgmii_pins>;
|
||||||
phy-handle = <&phy1>;
|
phy-handle = <&phy1>;
|
||||||
phy-mode = "rgmii";
|
phy-mode = "rgmii-id";
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@
|
|||||||
pinctrl-0 = <&emac_rgmii_pins>;
|
pinctrl-0 = <&emac_rgmii_pins>;
|
||||||
phy-supply = <®_gmac_3v3>;
|
phy-supply = <®_gmac_3v3>;
|
||||||
phy-handle = <&ext_rgmii_phy>;
|
phy-handle = <&ext_rgmii_phy>;
|
||||||
phy-mode = "rgmii";
|
phy-mode = "rgmii-id";
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -70,7 +70,9 @@
|
|||||||
regulator-name = "rst-usb-eth2";
|
regulator-name = "rst-usb-eth2";
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_usb_eth2>;
|
pinctrl-0 = <&pinctrl_usb_eth2>;
|
||||||
gpio = <&gpio3 2 GPIO_ACTIVE_LOW>;
|
gpio = <&gpio3 2 GPIO_ACTIVE_HIGH>;
|
||||||
|
enable-active-high;
|
||||||
|
regulator-always-on;
|
||||||
};
|
};
|
||||||
|
|
||||||
reg_vdd_5v: regulator-5v {
|
reg_vdd_5v: regulator-5v {
|
||||||
@ -95,7 +97,7 @@
|
|||||||
clocks = <&osc_can>;
|
clocks = <&osc_can>;
|
||||||
interrupt-parent = <&gpio4>;
|
interrupt-parent = <&gpio4>;
|
||||||
interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
|
interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
|
||||||
spi-max-frequency = <100000>;
|
spi-max-frequency = <10000000>;
|
||||||
vdd-supply = <®_vdd_3v3>;
|
vdd-supply = <®_vdd_3v3>;
|
||||||
xceiver-supply = <®_vdd_5v>;
|
xceiver-supply = <®_vdd_5v>;
|
||||||
};
|
};
|
||||||
@ -111,7 +113,7 @@
|
|||||||
&fec1 {
|
&fec1 {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
pinctrl-0 = <&pinctrl_enet>;
|
pinctrl-0 = <&pinctrl_enet>;
|
||||||
phy-connection-type = "rgmii";
|
phy-connection-type = "rgmii-rxid";
|
||||||
phy-handle = <ðphy>;
|
phy-handle = <ðphy>;
|
||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
||||||
|
@ -91,10 +91,12 @@
|
|||||||
reg_vdd_soc: BUCK1 {
|
reg_vdd_soc: BUCK1 {
|
||||||
regulator-name = "buck1";
|
regulator-name = "buck1";
|
||||||
regulator-min-microvolt = <800000>;
|
regulator-min-microvolt = <800000>;
|
||||||
regulator-max-microvolt = <900000>;
|
regulator-max-microvolt = <850000>;
|
||||||
regulator-boot-on;
|
regulator-boot-on;
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
regulator-ramp-delay = <3125>;
|
regulator-ramp-delay = <3125>;
|
||||||
|
nxp,dvs-run-voltage = <850000>;
|
||||||
|
nxp,dvs-standby-voltage = <800000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
reg_vdd_arm: BUCK2 {
|
reg_vdd_arm: BUCK2 {
|
||||||
@ -111,7 +113,7 @@
|
|||||||
reg_vdd_dram: BUCK3 {
|
reg_vdd_dram: BUCK3 {
|
||||||
regulator-name = "buck3";
|
regulator-name = "buck3";
|
||||||
regulator-min-microvolt = <850000>;
|
regulator-min-microvolt = <850000>;
|
||||||
regulator-max-microvolt = <900000>;
|
regulator-max-microvolt = <950000>;
|
||||||
regulator-boot-on;
|
regulator-boot-on;
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
};
|
};
|
||||||
@ -150,7 +152,7 @@
|
|||||||
|
|
||||||
reg_vdd_snvs: LDO2 {
|
reg_vdd_snvs: LDO2 {
|
||||||
regulator-name = "ldo2";
|
regulator-name = "ldo2";
|
||||||
regulator-min-microvolt = <850000>;
|
regulator-min-microvolt = <800000>;
|
||||||
regulator-max-microvolt = <900000>;
|
regulator-max-microvolt = <900000>;
|
||||||
regulator-boot-on;
|
regulator-boot-on;
|
||||||
regulator-always-on;
|
regulator-always-on;
|
||||||
|
@ -2590,9 +2590,10 @@
|
|||||||
power-domains = <&dispcc MDSS_GDSC>;
|
power-domains = <&dispcc MDSS_GDSC>;
|
||||||
|
|
||||||
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
|
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
|
||||||
|
<&gcc GCC_DISP_HF_AXI_CLK>,
|
||||||
<&gcc GCC_DISP_SF_AXI_CLK>,
|
<&gcc GCC_DISP_SF_AXI_CLK>,
|
||||||
<&dispcc DISP_CC_MDSS_MDP_CLK>;
|
<&dispcc DISP_CC_MDSS_MDP_CLK>;
|
||||||
clock-names = "iface", "nrt_bus", "core";
|
clock-names = "iface", "bus", "nrt_bus", "core";
|
||||||
|
|
||||||
assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
|
assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
|
||||||
assigned-clock-rates = <460000000>;
|
assigned-clock-rates = <460000000>;
|
||||||
|
@ -1136,6 +1136,11 @@ out:
|
|||||||
return prog;
|
return prog;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 bpf_jit_alloc_exec_limit(void)
|
||||||
|
{
|
||||||
|
return BPF_JIT_REGION_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
void *bpf_jit_alloc_exec(unsigned long size)
|
void *bpf_jit_alloc_exec(unsigned long size)
|
||||||
{
|
{
|
||||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||||
extern void (*ftrace_trace_function)(unsigned long, unsigned long,
|
extern void (*ftrace_trace_function)(unsigned long, unsigned long,
|
||||||
struct ftrace_ops*, struct pt_regs*);
|
struct ftrace_ops*, struct ftrace_regs*);
|
||||||
extern void ftrace_graph_caller(void);
|
extern void ftrace_graph_caller(void);
|
||||||
|
|
||||||
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
|
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
|
||||||
|
@ -37,6 +37,7 @@ config NIOS2_DTB_PHYS_ADDR
|
|||||||
|
|
||||||
config NIOS2_DTB_SOURCE_BOOL
|
config NIOS2_DTB_SOURCE_BOOL
|
||||||
bool "Compile and link device tree into kernel image"
|
bool "Compile and link device tree into kernel image"
|
||||||
|
depends on !COMPILE_TEST
|
||||||
help
|
help
|
||||||
This allows you to specify a dts (device tree source) file
|
This allows you to specify a dts (device tree source) file
|
||||||
which will be compiled and linked into the kernel image.
|
which will be compiled and linked into the kernel image.
|
||||||
|
@ -1302,6 +1302,12 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
struct property *default_win;
|
struct property *default_win;
|
||||||
int reset_win_ext;
|
int reset_win_ext;
|
||||||
|
|
||||||
|
/* DDW + IOMMU on single window may fail if there is any allocation */
|
||||||
|
if (iommu_table_in_use(tbl)) {
|
||||||
|
dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
|
||||||
|
goto out_failed;
|
||||||
|
}
|
||||||
|
|
||||||
default_win = of_find_property(pdn, "ibm,dma-window", NULL);
|
default_win = of_find_property(pdn, "ibm,dma-window", NULL);
|
||||||
if (!default_win)
|
if (!default_win)
|
||||||
goto out_failed;
|
goto out_failed;
|
||||||
@ -1356,12 +1362,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
query.largest_available_block,
|
query.largest_available_block,
|
||||||
1ULL << page_shift);
|
1ULL << page_shift);
|
||||||
|
|
||||||
/* DDW + IOMMU on single window may fail if there is any allocation */
|
|
||||||
if (default_win_removed && iommu_table_in_use(tbl)) {
|
|
||||||
dev_dbg(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
|
|
||||||
goto out_failed;
|
|
||||||
}
|
|
||||||
|
|
||||||
len = order_base_2(query.largest_available_block << page_shift);
|
len = order_base_2(query.largest_available_block << page_shift);
|
||||||
win_name = DMA64_PROPNAME;
|
win_name = DMA64_PROPNAME;
|
||||||
} else {
|
} else {
|
||||||
@ -1411,18 +1411,19 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
} else {
|
} else {
|
||||||
struct iommu_table *newtbl;
|
struct iommu_table *newtbl;
|
||||||
int i;
|
int i;
|
||||||
|
unsigned long start = 0, end = 0;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) {
|
for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) {
|
||||||
const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM;
|
const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM;
|
||||||
|
|
||||||
/* Look for MMIO32 */
|
/* Look for MMIO32 */
|
||||||
if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM)
|
if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) {
|
||||||
|
start = pci->phb->mem_resources[i].start;
|
||||||
|
end = pci->phb->mem_resources[i].end;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i == ARRAY_SIZE(pci->phb->mem_resources))
|
|
||||||
goto out_del_list;
|
|
||||||
|
|
||||||
/* New table for using DDW instead of the default DMA window */
|
/* New table for using DDW instead of the default DMA window */
|
||||||
newtbl = iommu_pseries_alloc_table(pci->phb->node);
|
newtbl = iommu_pseries_alloc_table(pci->phb->node);
|
||||||
if (!newtbl) {
|
if (!newtbl) {
|
||||||
@ -1432,15 +1433,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
|
|
||||||
iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr,
|
iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr,
|
||||||
1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
|
1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
|
||||||
iommu_init_table(newtbl, pci->phb->node, pci->phb->mem_resources[i].start,
|
iommu_init_table(newtbl, pci->phb->node, start, end);
|
||||||
pci->phb->mem_resources[i].end);
|
|
||||||
|
|
||||||
pci->table_group->tables[1] = newtbl;
|
pci->table_group->tables[1] = newtbl;
|
||||||
|
|
||||||
/* Keep default DMA window stuct if removed */
|
/* Keep default DMA window stuct if removed */
|
||||||
if (default_win_removed) {
|
if (default_win_removed) {
|
||||||
tbl->it_size = 0;
|
tbl->it_size = 0;
|
||||||
kfree(tbl->it_map);
|
vfree(tbl->it_map);
|
||||||
|
tbl->it_map = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
set_iommu_table_base(&dev->dev, newtbl);
|
set_iommu_table_base(&dev->dev, newtbl);
|
||||||
|
@ -163,6 +163,12 @@ config PAGE_OFFSET
|
|||||||
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
|
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
|
||||||
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
|
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
|
||||||
|
|
||||||
|
config KASAN_SHADOW_OFFSET
|
||||||
|
hex
|
||||||
|
depends on KASAN_GENERIC
|
||||||
|
default 0xdfffffc800000000 if 64BIT
|
||||||
|
default 0xffffffff if 32BIT
|
||||||
|
|
||||||
config ARCH_FLATMEM_ENABLE
|
config ARCH_FLATMEM_ENABLE
|
||||||
def_bool !NUMA
|
def_bool !NUMA
|
||||||
|
|
||||||
|
@ -30,8 +30,7 @@
|
|||||||
#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
|
#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
|
||||||
#define KASAN_SHADOW_START KERN_VIRT_START
|
#define KASAN_SHADOW_START KERN_VIRT_START
|
||||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
||||||
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
|
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
||||||
(64 - KASAN_SHADOW_SCALE_SHIFT)))
|
|
||||||
|
|
||||||
void kasan_init(void);
|
void kasan_init(void);
|
||||||
asmlinkage void kasan_early_init(void);
|
asmlinkage void kasan_early_init(void);
|
||||||
|
@ -193,6 +193,7 @@ setup_trap_vector:
|
|||||||
csrw CSR_SCRATCH, zero
|
csrw CSR_SCRATCH, zero
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
.align 2
|
||||||
.Lsecondary_park:
|
.Lsecondary_park:
|
||||||
/* We lack SMP support or have too many harts, so park this hart */
|
/* We lack SMP support or have too many harts, so park this hart */
|
||||||
wfi
|
wfi
|
||||||
|
@ -17,6 +17,9 @@ asmlinkage void __init kasan_early_init(void)
|
|||||||
uintptr_t i;
|
uintptr_t i;
|
||||||
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
|
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
|
||||||
|
|
||||||
|
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
|
||||||
|
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
|
||||||
|
|
||||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||||
set_pte(kasan_early_shadow_pte + i,
|
set_pte(kasan_early_shadow_pte + i,
|
||||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||||
@ -172,21 +175,10 @@ void __init kasan_init(void)
|
|||||||
phys_addr_t p_start, p_end;
|
phys_addr_t p_start, p_end;
|
||||||
u64 i;
|
u64 i;
|
||||||
|
|
||||||
/*
|
|
||||||
* Populate all kernel virtual address space with kasan_early_shadow_page
|
|
||||||
* except for the linear mapping and the modules/kernel/BPF mapping.
|
|
||||||
*/
|
|
||||||
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
|
||||||
(void *)kasan_mem_to_shadow((void *)
|
|
||||||
VMEMMAP_END));
|
|
||||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
|
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
|
||||||
kasan_shallow_populate(
|
kasan_shallow_populate(
|
||||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
||||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||||
else
|
|
||||||
kasan_populate_early_shadow(
|
|
||||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
|
||||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
|
||||||
|
|
||||||
/* Populate the linear mapping */
|
/* Populate the linear mapping */
|
||||||
for_each_mem_range(i, &p_start, &p_end) {
|
for_each_mem_range(i, &p_start, &p_end) {
|
||||||
|
@ -125,7 +125,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||||||
|
|
||||||
if (i == NR_JIT_ITERATIONS) {
|
if (i == NR_JIT_ITERATIONS) {
|
||||||
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
|
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
|
||||||
bpf_jit_binary_free(jit_data->header);
|
if (jit_data->header)
|
||||||
|
bpf_jit_binary_free(jit_data->header);
|
||||||
prog = orig_prog;
|
prog = orig_prog;
|
||||||
goto out_offset;
|
goto out_offset;
|
||||||
}
|
}
|
||||||
@ -166,6 +167,11 @@ out:
|
|||||||
return prog;
|
return prog;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 bpf_jit_alloc_exec_limit(void)
|
||||||
|
{
|
||||||
|
return BPF_JIT_REGION_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
void *bpf_jit_alloc_exec(unsigned long size)
|
void *bpf_jit_alloc_exec(unsigned long size)
|
||||||
{
|
{
|
||||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||||
|
@ -3053,13 +3053,14 @@ static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
|
|||||||
int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
|
int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
|
||||||
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
|
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
|
u8 vcpu_isc_mask;
|
||||||
|
|
||||||
for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
|
for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
|
||||||
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
|
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
|
||||||
if (psw_ioint_disabled(vcpu))
|
if (psw_ioint_disabled(vcpu))
|
||||||
continue;
|
continue;
|
||||||
deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
|
vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
|
||||||
if (deliverable_mask) {
|
if (deliverable_mask & vcpu_isc_mask) {
|
||||||
/* lately kicked but not yet running */
|
/* lately kicked but not yet running */
|
||||||
if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
|
if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
|
||||||
return;
|
return;
|
||||||
|
@ -3363,6 +3363,7 @@ out_free_sie_block:
|
|||||||
|
|
||||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
|
||||||
return kvm_s390_vcpu_has_irq(vcpu, 0);
|
return kvm_s390_vcpu_has_irq(vcpu, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@
|
|||||||
vpxor tmp0, x, x;
|
vpxor tmp0, x, x;
|
||||||
|
|
||||||
|
|
||||||
.section .rodata.cst164, "aM", @progbits, 164
|
.section .rodata.cst16, "aM", @progbits, 16
|
||||||
.align 16
|
.align 16
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -133,6 +133,10 @@
|
|||||||
.L0f0f0f0f:
|
.L0f0f0f0f:
|
||||||
.long 0x0f0f0f0f
|
.long 0x0f0f0f0f
|
||||||
|
|
||||||
|
/* 12 bytes, only for padding */
|
||||||
|
.Lpadding_deadbeef:
|
||||||
|
.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
|
||||||
|
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.align 16
|
.align 16
|
||||||
|
@ -93,7 +93,7 @@
|
|||||||
vpxor tmp0, x, x;
|
vpxor tmp0, x, x;
|
||||||
|
|
||||||
|
|
||||||
.section .rodata.cst164, "aM", @progbits, 164
|
.section .rodata.cst16, "aM", @progbits, 16
|
||||||
.align 16
|
.align 16
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -148,6 +148,10 @@
|
|||||||
.L0f0f0f0f:
|
.L0f0f0f0f:
|
||||||
.long 0x0f0f0f0f
|
.long 0x0f0f0f0f
|
||||||
|
|
||||||
|
/* 12 bytes, only for padding */
|
||||||
|
.Lpadding_deadbeef:
|
||||||
|
.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
|
||||||
|
|
||||||
.text
|
.text
|
||||||
.align 16
|
.align 16
|
||||||
|
|
||||||
|
@ -1098,7 +1098,7 @@ struct kvm_arch {
|
|||||||
u64 cur_tsc_generation;
|
u64 cur_tsc_generation;
|
||||||
int nr_vcpus_matched_tsc;
|
int nr_vcpus_matched_tsc;
|
||||||
|
|
||||||
spinlock_t pvclock_gtod_sync_lock;
|
raw_spinlock_t pvclock_gtod_sync_lock;
|
||||||
bool use_master_clock;
|
bool use_master_clock;
|
||||||
u64 master_kernel_ns;
|
u64 master_kernel_ns;
|
||||||
u64 master_cycle_now;
|
u64 master_cycle_now;
|
||||||
|
@ -2591,11 +2591,20 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
||||||
{
|
{
|
||||||
if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
|
int count;
|
||||||
|
int bytes;
|
||||||
|
|
||||||
|
if (svm->vmcb->control.exit_info_2 > INT_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return kvm_sev_es_string_io(&svm->vcpu, size, port,
|
count = svm->vmcb->control.exit_info_2;
|
||||||
svm->ghcb_sa, svm->ghcb_sa_len / size, in);
|
if (unlikely(check_mul_overflow(count, size, &bytes)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!setup_vmgexit_scratch(svm, in, bytes))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in);
|
||||||
}
|
}
|
||||||
|
|
||||||
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||||
|
@ -2542,7 +2542,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
|||||||
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
||||||
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
||||||
|
|
||||||
spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
raw_spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||||
if (!matched) {
|
if (!matched) {
|
||||||
kvm->arch.nr_vcpus_matched_tsc = 0;
|
kvm->arch.nr_vcpus_matched_tsc = 0;
|
||||||
} else if (!already_matched) {
|
} else if (!already_matched) {
|
||||||
@ -2550,7 +2550,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
kvm_track_tsc_matching(vcpu);
|
kvm_track_tsc_matching(vcpu);
|
||||||
spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
raw_spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
||||||
@ -2780,9 +2780,9 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
|
|||||||
kvm_make_mclock_inprogress_request(kvm);
|
kvm_make_mclock_inprogress_request(kvm);
|
||||||
|
|
||||||
/* no guest entries from this point */
|
/* no guest entries from this point */
|
||||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
pvclock_update_vm_gtod_copy(kvm);
|
pvclock_update_vm_gtod_copy(kvm);
|
||||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||||
@ -2800,15 +2800,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
if (!ka->use_master_clock) {
|
if (!ka->use_master_clock) {
|
||||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
return get_kvmclock_base_ns() + ka->kvmclock_offset;
|
return get_kvmclock_base_ns() + ka->kvmclock_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
hv_clock.tsc_timestamp = ka->master_cycle_now;
|
hv_clock.tsc_timestamp = ka->master_cycle_now;
|
||||||
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
|
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
|
||||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
|
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
|
||||||
get_cpu();
|
get_cpu();
|
||||||
@ -2902,13 +2902,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
|||||||
* If the host uses TSC clock, then passthrough TSC as stable
|
* If the host uses TSC clock, then passthrough TSC as stable
|
||||||
* to the guest.
|
* to the guest.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
use_master_clock = ka->use_master_clock;
|
use_master_clock = ka->use_master_clock;
|
||||||
if (use_master_clock) {
|
if (use_master_clock) {
|
||||||
host_tsc = ka->master_cycle_now;
|
host_tsc = ka->master_cycle_now;
|
||||||
kernel_ns = ka->master_kernel_ns;
|
kernel_ns = ka->master_kernel_ns;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
/* Keep irq disabled to prevent changes to the clock */
|
/* Keep irq disabled to prevent changes to the clock */
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
@ -6100,13 +6100,13 @@ set_pit2_out:
|
|||||||
* is slightly ahead) here we risk going negative on unsigned
|
* is slightly ahead) here we risk going negative on unsigned
|
||||||
* 'system_time' when 'user_ns.clock' is very small.
|
* 'system_time' when 'user_ns.clock' is very small.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&ka->pvclock_gtod_sync_lock);
|
raw_spin_lock_irq(&ka->pvclock_gtod_sync_lock);
|
||||||
if (kvm->arch.use_master_clock)
|
if (kvm->arch.use_master_clock)
|
||||||
now_ns = ka->master_kernel_ns;
|
now_ns = ka->master_kernel_ns;
|
||||||
else
|
else
|
||||||
now_ns = get_kvmclock_base_ns();
|
now_ns = get_kvmclock_base_ns();
|
||||||
ka->kvmclock_offset = user_ns.clock - now_ns;
|
ka->kvmclock_offset = user_ns.clock - now_ns;
|
||||||
spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
|
raw_spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
|
||||||
|
|
||||||
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
|
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
|
||||||
break;
|
break;
|
||||||
@ -8156,9 +8156,9 @@ static void kvm_hyperv_tsc_notifier(void)
|
|||||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||||
struct kvm_arch *ka = &kvm->arch;
|
struct kvm_arch *ka = &kvm->arch;
|
||||||
|
|
||||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
pvclock_update_vm_gtod_copy(kvm);
|
pvclock_update_vm_gtod_copy(kvm);
|
||||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
||||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||||
@ -8800,9 +8800,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
||||||
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The call to kvm_ready_for_interrupt_injection() may end up in
|
||||||
|
* kvm_xen_has_interrupt() which may require the srcu lock to be
|
||||||
|
* held, to protect against changes in the vcpu_info address.
|
||||||
|
*/
|
||||||
|
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||||
kvm_run->ready_for_interrupt_injection =
|
kvm_run->ready_for_interrupt_injection =
|
||||||
pic_in_kernel(vcpu->kvm) ||
|
pic_in_kernel(vcpu->kvm) ||
|
||||||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
|
kvm_vcpu_ready_for_interrupt_injection(vcpu);
|
||||||
|
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||||
|
|
||||||
if (is_smm(vcpu))
|
if (is_smm(vcpu))
|
||||||
kvm_run->flags |= KVM_RUN_X86_SMM;
|
kvm_run->flags |= KVM_RUN_X86_SMM;
|
||||||
@ -11199,7 +11207,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||||||
|
|
||||||
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
|
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
|
||||||
mutex_init(&kvm->arch.apic_map_lock);
|
mutex_init(&kvm->arch.apic_map_lock);
|
||||||
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
|
raw_spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
|
||||||
|
|
||||||
kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
|
kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
|
||||||
pvclock_update_vm_gtod_copy(kvm);
|
pvclock_update_vm_gtod_copy(kvm);
|
||||||
|
@ -190,6 +190,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
|||||||
|
|
||||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||||
{
|
{
|
||||||
|
int err;
|
||||||
u8 rc = 0;
|
u8 rc = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -216,13 +217,29 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
|||||||
if (likely(slots->generation == ghc->generation &&
|
if (likely(slots->generation == ghc->generation &&
|
||||||
!kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
|
!kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
|
||||||
/* Fast path */
|
/* Fast path */
|
||||||
__get_user(rc, (u8 __user *)ghc->hva + offset);
|
pagefault_disable();
|
||||||
} else {
|
err = __get_user(rc, (u8 __user *)ghc->hva + offset);
|
||||||
/* Slow path */
|
pagefault_enable();
|
||||||
kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
|
if (!err)
|
||||||
sizeof(rc));
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Slow path */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function gets called from kvm_vcpu_block() after setting the
|
||||||
|
* task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
|
||||||
|
* from a HLT. So we really mustn't sleep. If the page ended up absent
|
||||||
|
* at that point, just return 1 in order to trigger an immediate wake,
|
||||||
|
* and we'll end up getting called again from a context where we *can*
|
||||||
|
* fault in the page and wait for it.
|
||||||
|
*/
|
||||||
|
if (in_atomic() || !task_is_running(current))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
|
||||||
|
sizeof(rc));
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1325,6 +1325,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
|||||||
int errors, queued;
|
int errors, queued;
|
||||||
blk_status_t ret = BLK_STS_OK;
|
blk_status_t ret = BLK_STS_OK;
|
||||||
LIST_HEAD(zone_list);
|
LIST_HEAD(zone_list);
|
||||||
|
bool needs_resource = false;
|
||||||
|
|
||||||
if (list_empty(list))
|
if (list_empty(list))
|
||||||
return false;
|
return false;
|
||||||
@ -1370,6 +1371,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
|||||||
queued++;
|
queued++;
|
||||||
break;
|
break;
|
||||||
case BLK_STS_RESOURCE:
|
case BLK_STS_RESOURCE:
|
||||||
|
needs_resource = true;
|
||||||
|
fallthrough;
|
||||||
case BLK_STS_DEV_RESOURCE:
|
case BLK_STS_DEV_RESOURCE:
|
||||||
blk_mq_handle_dev_resource(rq, list);
|
blk_mq_handle_dev_resource(rq, list);
|
||||||
goto out;
|
goto out;
|
||||||
@ -1380,6 +1383,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
|||||||
* accept.
|
* accept.
|
||||||
*/
|
*/
|
||||||
blk_mq_handle_zone_resource(rq, &zone_list);
|
blk_mq_handle_zone_resource(rq, &zone_list);
|
||||||
|
needs_resource = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
errors++;
|
errors++;
|
||||||
@ -1406,7 +1410,6 @@ out:
|
|||||||
/* For non-shared tags, the RESTART check will suffice */
|
/* For non-shared tags, the RESTART check will suffice */
|
||||||
bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
|
bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
|
||||||
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
|
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
|
||||||
bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
|
|
||||||
|
|
||||||
if (nr_budgets)
|
if (nr_budgets)
|
||||||
blk_mq_release_budgets(q, list);
|
blk_mq_release_budgets(q, list);
|
||||||
@ -1447,14 +1450,16 @@ out:
|
|||||||
* If driver returns BLK_STS_RESOURCE and SCHED_RESTART
|
* If driver returns BLK_STS_RESOURCE and SCHED_RESTART
|
||||||
* bit is set, run queue after a delay to avoid IO stalls
|
* bit is set, run queue after a delay to avoid IO stalls
|
||||||
* that could otherwise occur if the queue is idle. We'll do
|
* that could otherwise occur if the queue is idle. We'll do
|
||||||
* similar if we couldn't get budget and SCHED_RESTART is set.
|
* similar if we couldn't get budget or couldn't lock a zone
|
||||||
|
* and SCHED_RESTART is set.
|
||||||
*/
|
*/
|
||||||
needs_restart = blk_mq_sched_needs_restart(hctx);
|
needs_restart = blk_mq_sched_needs_restart(hctx);
|
||||||
|
if (prep == PREP_DISPATCH_NO_BUDGET)
|
||||||
|
needs_resource = true;
|
||||||
if (!needs_restart ||
|
if (!needs_restart ||
|
||||||
(no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
|
(no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
|
||||||
blk_mq_run_hw_queue(hctx, true);
|
blk_mq_run_hw_queue(hctx, true);
|
||||||
else if (needs_restart && (ret == BLK_STS_RESOURCE ||
|
else if (needs_restart && needs_resource)
|
||||||
no_budget_avail))
|
|
||||||
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
|
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
|
||||||
|
|
||||||
blk_mq_update_dispatch_busy(hctx, true);
|
blk_mq_update_dispatch_busy(hctx, true);
|
||||||
|
@ -842,6 +842,24 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||||
|
|
||||||
|
static bool disk_has_partitions(struct gendisk *disk)
|
||||||
|
{
|
||||||
|
unsigned long idx;
|
||||||
|
struct block_device *part;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
xa_for_each(&disk->part_tbl, idx, part) {
|
||||||
|
if (bdev_is_partition(part)) {
|
||||||
|
ret = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_queue_set_zoned - configure a disk queue zoned model.
|
* blk_queue_set_zoned - configure a disk queue zoned model.
|
||||||
* @disk: the gendisk of the queue to configure
|
* @disk: the gendisk of the queue to configure
|
||||||
@ -876,7 +894,7 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
|||||||
* we do nothing special as far as the block layer is concerned.
|
* we do nothing special as far as the block layer is concerned.
|
||||||
*/
|
*/
|
||||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
|
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
|
||||||
!xa_empty(&disk->part_tbl))
|
disk_has_partitions(disk))
|
||||||
model = BLK_ZONED_NONE;
|
model = BLK_ZONED_NONE;
|
||||||
break;
|
break;
|
||||||
case BLK_ZONED_NONE:
|
case BLK_ZONED_NONE:
|
||||||
|
@ -588,16 +588,6 @@ void del_gendisk(struct gendisk *disk)
|
|||||||
* Prevent new I/O from crossing bio_queue_enter().
|
* Prevent new I/O from crossing bio_queue_enter().
|
||||||
*/
|
*/
|
||||||
blk_queue_start_drain(q);
|
blk_queue_start_drain(q);
|
||||||
blk_mq_freeze_queue_wait(q);
|
|
||||||
|
|
||||||
rq_qos_exit(q);
|
|
||||||
blk_sync_queue(q);
|
|
||||||
blk_flush_integrity();
|
|
||||||
/*
|
|
||||||
* Allow using passthrough request again after the queue is torn down.
|
|
||||||
*/
|
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
|
|
||||||
__blk_mq_unfreeze_queue(q, true);
|
|
||||||
|
|
||||||
if (!(disk->flags & GENHD_FL_HIDDEN)) {
|
if (!(disk->flags & GENHD_FL_HIDDEN)) {
|
||||||
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
|
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
|
||||||
@ -620,6 +610,18 @@ void del_gendisk(struct gendisk *disk)
|
|||||||
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
|
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
|
||||||
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
|
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
|
||||||
device_del(disk_to_dev(disk));
|
device_del(disk_to_dev(disk));
|
||||||
|
|
||||||
|
blk_mq_freeze_queue_wait(q);
|
||||||
|
|
||||||
|
rq_qos_exit(q);
|
||||||
|
blk_sync_queue(q);
|
||||||
|
blk_flush_integrity();
|
||||||
|
/*
|
||||||
|
* Allow using passthrough request again after the queue is torn down.
|
||||||
|
*/
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
|
||||||
|
__blk_mq_unfreeze_queue(q, true);
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(del_gendisk);
|
EXPORT_SYMBOL(del_gendisk);
|
||||||
|
|
||||||
|
@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
|||||||
if (!blk)
|
if (!blk)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
rbnode->block = blk;
|
||||||
|
|
||||||
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
||||||
present = krealloc(rbnode->cache_present,
|
present = krealloc(rbnode->cache_present,
|
||||||
BITS_TO_LONGS(blklen) * sizeof(*present),
|
BITS_TO_LONGS(blklen) * sizeof(*present),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!present) {
|
if (!present)
|
||||||
kfree(blk);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
||||||
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
||||||
@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* update the rbnode block, its size and the base register */
|
/* update the rbnode block, its size and the base register */
|
||||||
rbnode->block = blk;
|
|
||||||
rbnode->blklen = blklen;
|
rbnode->blklen = blklen;
|
||||||
rbnode->base_reg = base_reg;
|
rbnode->base_reg = base_reg;
|
||||||
rbnode->cache_present = present;
|
rbnode->cache_present = present;
|
||||||
|
@ -58,11 +58,8 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
|
|||||||
long rate;
|
long rate;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (rate_hw && rate_ops && rate_ops->determine_rate) {
|
if (rate_hw && rate_ops && rate_ops->round_rate &&
|
||||||
__clk_hw_set_clk(rate_hw, hw);
|
mux_hw && mux_ops && mux_ops->set_parent) {
|
||||||
return rate_ops->determine_rate(rate_hw, req);
|
|
||||||
} else if (rate_hw && rate_ops && rate_ops->round_rate &&
|
|
||||||
mux_hw && mux_ops && mux_ops->set_parent) {
|
|
||||||
req->best_parent_hw = NULL;
|
req->best_parent_hw = NULL;
|
||||||
|
|
||||||
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
|
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
|
||||||
@ -107,6 +104,9 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
|
|||||||
|
|
||||||
req->rate = best_rate;
|
req->rate = best_rate;
|
||||||
return 0;
|
return 0;
|
||||||
|
} else if (rate_hw && rate_ops && rate_ops->determine_rate) {
|
||||||
|
__clk_hw_set_clk(rate_hw, hw);
|
||||||
|
return rate_ops->determine_rate(rate_hw, req);
|
||||||
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
|
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
|
||||||
__clk_hw_set_clk(mux_hw, hw);
|
__clk_hw_set_clk(mux_hw, hw);
|
||||||
return mux_ops->determine_rate(mux_hw, req);
|
return mux_ops->determine_rate(mux_hw, req);
|
||||||
|
@ -256,6 +256,11 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
|
|||||||
NULL,
|
NULL,
|
||||||
0);
|
0);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "bgpio_init failed\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
gc->direction_input = mlxbf2_gpio_direction_input;
|
gc->direction_input = mlxbf2_gpio_direction_input;
|
||||||
gc->direction_output = mlxbf2_gpio_direction_output;
|
gc->direction_output = mlxbf2_gpio_direction_output;
|
||||||
gc->ngpio = npins;
|
gc->ngpio = npins;
|
||||||
|
@ -224,7 +224,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
chip->gc.label = dev_name(dev);
|
chip->gc.label = dev_name(dev);
|
||||||
if (of_property_read_u32(dn, "ngpios", &num_gpios))
|
if (!of_property_read_u32(dn, "ngpios", &num_gpios))
|
||||||
chip->gc.ngpio = num_gpios;
|
chip->gc.ngpio = num_gpios;
|
||||||
|
|
||||||
irq = platform_get_irq(pdev, 0);
|
irq = platform_get_irq(pdev, 0);
|
||||||
|
@ -228,7 +228,7 @@ enum {
|
|||||||
#define FAMILY_YELLOW_CARP 146
|
#define FAMILY_YELLOW_CARP 146
|
||||||
|
|
||||||
#define YELLOW_CARP_A0 0x01
|
#define YELLOW_CARP_A0 0x01
|
||||||
#define YELLOW_CARP_B0 0x1A
|
#define YELLOW_CARP_B0 0x20
|
||||||
#define YELLOW_CARP_UNKNOWN 0xFF
|
#define YELLOW_CARP_UNKNOWN 0xFF
|
||||||
|
|
||||||
#ifndef ASICREV_IS_YELLOW_CARP
|
#ifndef ASICREV_IS_YELLOW_CARP
|
||||||
|
@ -140,6 +140,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
||||||
},
|
},
|
||||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||||
|
}, { /* AYA NEO 2021 */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||||
}, { /* Chuwi HiBook (CWI514) */
|
}, { /* Chuwi HiBook (CWI514) */
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
|
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
|
||||||
@ -205,6 +211,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
|
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||||
},
|
},
|
||||||
.driver_data = (void *)&gpd_win2,
|
.driver_data = (void *)&gpd_win2,
|
||||||
|
}, { /* GPD Win 3 */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&lcd720x1280_rightside_up,
|
||||||
}, { /* I.T.Works TW891 */
|
}, { /* I.T.Works TW891 */
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
|
||||||
|
@ -2019,6 +2019,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
|
|||||||
{
|
{
|
||||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||||
|
|
||||||
|
if (!crtc_state)
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't clobber DPCD if it's been already read out during output
|
* Don't clobber DPCD if it's been already read out during output
|
||||||
* setup (eDP) or detect.
|
* setup (eDP) or detect.
|
||||||
|
@ -30,6 +30,7 @@ static void mock_setup(struct drm_plane_state *state)
|
|||||||
mock_device.driver = &mock_driver;
|
mock_device.driver = &mock_driver;
|
||||||
mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
|
mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
|
||||||
mock_plane.dev = &mock_device;
|
mock_plane.dev = &mock_device;
|
||||||
|
mock_obj_props.count = 0;
|
||||||
mock_plane.base.properties = &mock_obj_props;
|
mock_plane.base.properties = &mock_obj_props;
|
||||||
mock_prop.base.id = 1; /* 0 is an invalid id */
|
mock_prop.base.id = 1; /* 0 is an invalid id */
|
||||||
mock_prop.dev = &mock_device;
|
mock_prop.dev = &mock_device;
|
||||||
|
@ -189,6 +189,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
|
|||||||
struct ttm_transfer_obj *fbo;
|
struct ttm_transfer_obj *fbo;
|
||||||
|
|
||||||
fbo = container_of(bo, struct ttm_transfer_obj, base);
|
fbo = container_of(bo, struct ttm_transfer_obj, base);
|
||||||
|
dma_resv_fini(&fbo->base.base._resv);
|
||||||
ttm_bo_put(fbo->bo);
|
ttm_bo_put(fbo->bo);
|
||||||
kfree(fbo);
|
kfree(fbo);
|
||||||
}
|
}
|
||||||
|
@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
|
|||||||
|
|
||||||
/* Construct the family header first */
|
/* Construct the family header first */
|
||||||
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
|
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
|
||||||
memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
|
strscpy_pad(header->device_name,
|
||||||
LS_DEVICE_NAME_MAX);
|
dev_name(&query->port->agent->device->dev),
|
||||||
|
LS_DEVICE_NAME_MAX);
|
||||||
header->port_num = query->port->port_num;
|
header->port_num = query->port->port_num;
|
||||||
|
|
||||||
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
|
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
|
||||||
|
@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
|
|||||||
{
|
{
|
||||||
u64 reg;
|
u64 reg;
|
||||||
struct pio_buf *pbuf;
|
struct pio_buf *pbuf;
|
||||||
|
LIST_HEAD(wake_list);
|
||||||
|
|
||||||
if (!sc)
|
if (!sc)
|
||||||
return;
|
return;
|
||||||
@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
|
|||||||
spin_unlock(&sc->release_lock);
|
spin_unlock(&sc->release_lock);
|
||||||
|
|
||||||
write_seqlock(&sc->waitlock);
|
write_seqlock(&sc->waitlock);
|
||||||
while (!list_empty(&sc->piowait)) {
|
if (!list_empty(&sc->piowait))
|
||||||
|
list_move(&sc->piowait, &wake_list);
|
||||||
|
write_sequnlock(&sc->waitlock);
|
||||||
|
while (!list_empty(&wake_list)) {
|
||||||
struct iowait *wait;
|
struct iowait *wait;
|
||||||
struct rvt_qp *qp;
|
struct rvt_qp *qp;
|
||||||
struct hfi1_qp_priv *priv;
|
struct hfi1_qp_priv *priv;
|
||||||
|
|
||||||
wait = list_first_entry(&sc->piowait, struct iowait, list);
|
wait = list_first_entry(&wake_list, struct iowait, list);
|
||||||
qp = iowait_to_qp(wait);
|
qp = iowait_to_qp(wait);
|
||||||
priv = qp->priv;
|
priv = qp->priv;
|
||||||
list_del_init(&priv->s_iowait.list);
|
list_del_init(&priv->s_iowait.list);
|
||||||
priv->s_iowait.lock = NULL;
|
priv->s_iowait.lock = NULL;
|
||||||
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
||||||
}
|
}
|
||||||
write_sequnlock(&sc->waitlock);
|
|
||||||
|
|
||||||
spin_unlock_irq(&sc->alloc_lock);
|
spin_unlock_irq(&sc->alloc_lock);
|
||||||
}
|
}
|
||||||
|
@ -1092,12 +1092,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
|
|||||||
if (cq->avoid_mem_cflct) {
|
if (cq->avoid_mem_cflct) {
|
||||||
ext_cqe = (__le64 *)((u8 *)cqe + 32);
|
ext_cqe = (__le64 *)((u8 *)cqe + 32);
|
||||||
get_64bit_val(ext_cqe, 24, &qword7);
|
get_64bit_val(ext_cqe, 24, &qword7);
|
||||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
|
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
|
||||||
} else {
|
} else {
|
||||||
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
|
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
|
||||||
ext_cqe = cq->cq_base[peek_head].buf;
|
ext_cqe = cq->cq_base[peek_head].buf;
|
||||||
get_64bit_val(ext_cqe, 24, &qword7);
|
get_64bit_val(ext_cqe, 24, &qword7);
|
||||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
|
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
|
||||||
if (!peek_head)
|
if (!peek_head)
|
||||||
polarity ^= 1;
|
polarity ^= 1;
|
||||||
}
|
}
|
||||||
|
@ -3399,9 +3399,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (cq_poll_info->ud_vlan_valid) {
|
if (cq_poll_info->ud_vlan_valid) {
|
||||||
entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
||||||
entry->wc_flags |= IB_WC_WITH_VLAN;
|
|
||||||
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
|
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
|
||||||
|
if (vlan) {
|
||||||
|
entry->vlan_id = vlan;
|
||||||
|
entry->wc_flags |= IB_WC_WITH_VLAN;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
entry->sl = 0;
|
entry->sl = 0;
|
||||||
}
|
}
|
||||||
|
@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
|||||||
|
|
||||||
tc_node->enable = true;
|
tc_node->enable = true;
|
||||||
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
|
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
vsi->unregister_qset(vsi, tc_node);
|
||||||
goto reg_err;
|
goto reg_err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ibdev_dbg(to_ibdev(vsi->dev),
|
ibdev_dbg(to_ibdev(vsi->dev),
|
||||||
"WS: Using node %d which represents VSI %d TC %d\n",
|
"WS: Using node %d which represents VSI %d TC %d\n",
|
||||||
@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
|||||||
}
|
}
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
|
reg_err:
|
||||||
|
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
|
||||||
|
list_del(&tc_node->siblings);
|
||||||
|
irdma_free_node(vsi, tc_node);
|
||||||
leaf_add_err:
|
leaf_add_err:
|
||||||
if (list_empty(&vsi_node->child_list_head)) {
|
if (list_empty(&vsi_node->child_list_head)) {
|
||||||
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
|
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
|
||||||
@ -369,11 +375,6 @@ vsi_add_err:
|
|||||||
exit:
|
exit:
|
||||||
mutex_unlock(&vsi->dev->ws_mutex);
|
mutex_unlock(&vsi->dev->ws_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
reg_err:
|
|
||||||
mutex_unlock(&vsi->dev->ws_mutex);
|
|
||||||
irdma_ws_remove(vsi, user_pri);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1339,7 +1339,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
|
|||||||
goto err_2;
|
goto err_2;
|
||||||
}
|
}
|
||||||
mr->mmkey.type = MLX5_MKEY_MR;
|
mr->mmkey.type = MLX5_MKEY_MR;
|
||||||
mr->desc_size = sizeof(struct mlx5_mtt);
|
|
||||||
mr->umem = umem;
|
mr->umem = umem;
|
||||||
set_mr_fields(dev, mr, umem->length, access_flags);
|
set_mr_fields(dev, mr, umem->length, access_flags);
|
||||||
kvfree(in);
|
kvfree(in);
|
||||||
@ -1533,6 +1532,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||||||
ib_umem_release(&odp->umem);
|
ib_umem_release(&odp->umem);
|
||||||
return ERR_CAST(mr);
|
return ERR_CAST(mr);
|
||||||
}
|
}
|
||||||
|
xa_init(&mr->implicit_children);
|
||||||
|
|
||||||
odp->private = mr;
|
odp->private = mr;
|
||||||
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
|
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
|
||||||
|
@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||||||
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
|
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
|
||||||
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
|
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
|
||||||
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
|
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
|
||||||
|
if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
|
||||||
|
MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
|
||||||
|
|
||||||
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
||||||
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
||||||
|
@ -455,6 +455,7 @@ struct qedr_qp {
|
|||||||
/* synchronization objects used with iwarp ep */
|
/* synchronization objects used with iwarp ep */
|
||||||
struct kref refcnt;
|
struct kref refcnt;
|
||||||
struct completion iwarp_cm_comp;
|
struct completion iwarp_cm_comp;
|
||||||
|
struct completion qp_rel_comp;
|
||||||
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
|
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
|
|||||||
{
|
{
|
||||||
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
|
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
|
||||||
|
|
||||||
kfree(qp);
|
complete(&qp->qp_rel_comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -1357,6 +1357,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
|
|||||||
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||||
kref_init(&qp->refcnt);
|
kref_init(&qp->refcnt);
|
||||||
init_completion(&qp->iwarp_cm_comp);
|
init_completion(&qp->iwarp_cm_comp);
|
||||||
|
init_completion(&qp->qp_rel_comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
qp->pd = pd;
|
qp->pd = pd;
|
||||||
@ -2857,8 +2858,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|||||||
|
|
||||||
qedr_free_qp_resources(dev, qp, udata);
|
qedr_free_qp_resources(dev, qp, udata);
|
||||||
|
|
||||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||||
qedr_iw_qp_rem_ref(&qp->ibqp);
|
qedr_iw_qp_rem_ref(&qp->ibqp);
|
||||||
|
wait_for_completion(&qp->qp_rel_comp);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -602,7 +602,7 @@ done:
|
|||||||
/*
|
/*
|
||||||
* How many pages in this iovec element?
|
* How many pages in this iovec element?
|
||||||
*/
|
*/
|
||||||
static int qib_user_sdma_num_pages(const struct iovec *iov)
|
static size_t qib_user_sdma_num_pages(const struct iovec *iov)
|
||||||
{
|
{
|
||||||
const unsigned long addr = (unsigned long) iov->iov_base;
|
const unsigned long addr = (unsigned long) iov->iov_base;
|
||||||
const unsigned long len = iov->iov_len;
|
const unsigned long len = iov->iov_len;
|
||||||
@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
|
|||||||
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
||||||
struct qib_user_sdma_queue *pq,
|
struct qib_user_sdma_queue *pq,
|
||||||
struct qib_user_sdma_pkt *pkt,
|
struct qib_user_sdma_pkt *pkt,
|
||||||
unsigned long addr, int tlen, int npages)
|
unsigned long addr, int tlen, size_t npages)
|
||||||
{
|
{
|
||||||
struct page *pages[8];
|
struct page *pages[8];
|
||||||
int i, j;
|
int i, j;
|
||||||
@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
|
|||||||
unsigned long idx;
|
unsigned long idx;
|
||||||
|
|
||||||
for (idx = 0; idx < niov; idx++) {
|
for (idx = 0; idx < niov; idx++) {
|
||||||
const int npages = qib_user_sdma_num_pages(iov + idx);
|
const size_t npages = qib_user_sdma_num_pages(iov + idx);
|
||||||
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
||||||
|
|
||||||
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
|
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
|
||||||
@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
unsigned pktnw;
|
unsigned pktnw;
|
||||||
unsigned pktnwc;
|
unsigned pktnwc;
|
||||||
int nfrags = 0;
|
int nfrags = 0;
|
||||||
int npages = 0;
|
size_t npages = 0;
|
||||||
int bytes_togo = 0;
|
size_t bytes_togo = 0;
|
||||||
int tiddma = 0;
|
int tiddma = 0;
|
||||||
int cfur;
|
int cfur;
|
||||||
|
|
||||||
@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
|
|
||||||
npages += qib_user_sdma_num_pages(&iov[idx]);
|
npages += qib_user_sdma_num_pages(&iov[idx]);
|
||||||
|
|
||||||
bytes_togo += slen;
|
if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
|
||||||
|
bytes_togo > type_max(typeof(pkt->bytes_togo))) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_pbc;
|
||||||
|
}
|
||||||
pktnwc += slen >> 2;
|
pktnwc += slen >> 2;
|
||||||
idx++;
|
idx++;
|
||||||
nfrags++;
|
nfrags++;
|
||||||
@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (frag_size) {
|
if (frag_size) {
|
||||||
int tidsmsize, n;
|
size_t tidsmsize, n, pktsize, sz, addrlimit;
|
||||||
size_t pktsize;
|
|
||||||
|
|
||||||
n = npages*((2*PAGE_SIZE/frag_size)+1);
|
n = npages*((2*PAGE_SIZE/frag_size)+1);
|
||||||
pktsize = struct_size(pkt, addr, n);
|
pktsize = struct_size(pkt, addr, n);
|
||||||
@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||||||
else
|
else
|
||||||
tidsmsize = 0;
|
tidsmsize = 0;
|
||||||
|
|
||||||
pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
|
if (check_add_overflow(pktsize, tidsmsize, &sz)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_pbc;
|
||||||
|
}
|
||||||
|
pkt = kmalloc(sz, GFP_KERNEL);
|
||||||
if (!pkt) {
|
if (!pkt) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto free_pbc;
|
goto free_pbc;
|
||||||
}
|
}
|
||||||
pkt->largepkt = 1;
|
pkt->largepkt = 1;
|
||||||
pkt->frag_size = frag_size;
|
pkt->frag_size = frag_size;
|
||||||
pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
|
if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
|
||||||
|
&addrlimit) ||
|
||||||
|
addrlimit > type_max(typeof(pkt->addrlimit))) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto free_pbc;
|
||||||
|
}
|
||||||
|
pkt->addrlimit = addrlimit;
|
||||||
|
|
||||||
if (tiddma) {
|
if (tiddma) {
|
||||||
char *tidsm = (char *)pkt + pktsize;
|
char *tidsm = (char *)pkt + pktsize;
|
||||||
|
@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
|||||||
spin_lock(&rdi->n_qps_lock);
|
spin_lock(&rdi->n_qps_lock);
|
||||||
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
|
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
|
||||||
spin_unlock(&rdi->n_qps_lock);
|
spin_unlock(&rdi->n_qps_lock);
|
||||||
ret = ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto bail_ip;
|
goto bail_ip;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -506,7 +506,7 @@ config MMC_OMAP_HS
|
|||||||
|
|
||||||
config MMC_WBSD
|
config MMC_WBSD
|
||||||
tristate "Winbond W83L51xD SD/MMC Card Interface support"
|
tristate "Winbond W83L51xD SD/MMC Card Interface support"
|
||||||
depends on ISA_DMA_API
|
depends on ISA_DMA_API && !M68K
|
||||||
help
|
help
|
||||||
This selects the Winbond(R) W83L51xD Secure digital and
|
This selects the Winbond(R) W83L51xD Secure digital and
|
||||||
Multimedia card Interface.
|
Multimedia card Interface.
|
||||||
|
@ -282,6 +282,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
|
|||||||
|
|
||||||
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
|
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
|
||||||
|
|
||||||
|
if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
|
||||||
|
cqhci_writel(cq_host, 0, CQHCI_CTL);
|
||||||
|
|
||||||
mmc->cqe_on = true;
|
mmc->cqe_on = true;
|
||||||
|
|
||||||
if (cq_host->ops->enable)
|
if (cq_host->ops->enable)
|
||||||
|
@ -464,6 +464,18 @@ static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there is no cadiates value, then it needs to return -EIO.
|
||||||
|
* If there are candiates values and don't find bset clk sample value,
|
||||||
|
* then use a first candiates clock sample value.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < iter; i++) {
|
||||||
|
__c = ror8(candiates, i);
|
||||||
|
if ((__c & 0x1) == 0x1) {
|
||||||
|
loc = i;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
out:
|
out:
|
||||||
return loc;
|
return loc;
|
||||||
}
|
}
|
||||||
@ -494,6 +506,8 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
|||||||
priv->tuned_sample = found;
|
priv->tuned_sample = found;
|
||||||
} else {
|
} else {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
dev_warn(&mmc->class_dev,
|
||||||
|
"There is no candiates value about clksmpl!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2577,6 +2577,25 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
|||||||
host->dma_mask = DMA_BIT_MASK(32);
|
host->dma_mask = DMA_BIT_MASK(32);
|
||||||
mmc_dev(mmc)->dma_mask = &host->dma_mask;
|
mmc_dev(mmc)->dma_mask = &host->dma_mask;
|
||||||
|
|
||||||
|
host->timeout_clks = 3 * 1048576;
|
||||||
|
host->dma.gpd = dma_alloc_coherent(&pdev->dev,
|
||||||
|
2 * sizeof(struct mt_gpdma_desc),
|
||||||
|
&host->dma.gpd_addr, GFP_KERNEL);
|
||||||
|
host->dma.bd = dma_alloc_coherent(&pdev->dev,
|
||||||
|
MAX_BD_NUM * sizeof(struct mt_bdma_desc),
|
||||||
|
&host->dma.bd_addr, GFP_KERNEL);
|
||||||
|
if (!host->dma.gpd || !host->dma.bd) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto release_mem;
|
||||||
|
}
|
||||||
|
msdc_init_gpd_bd(host, &host->dma);
|
||||||
|
INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
|
||||||
|
spin_lock_init(&host->lock);
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, mmc);
|
||||||
|
msdc_ungate_clock(host);
|
||||||
|
msdc_init_hw(host);
|
||||||
|
|
||||||
if (mmc->caps2 & MMC_CAP2_CQE) {
|
if (mmc->caps2 & MMC_CAP2_CQE) {
|
||||||
host->cq_host = devm_kzalloc(mmc->parent,
|
host->cq_host = devm_kzalloc(mmc->parent,
|
||||||
sizeof(*host->cq_host),
|
sizeof(*host->cq_host),
|
||||||
@ -2597,25 +2616,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
|||||||
mmc->max_seg_size = 64 * 1024;
|
mmc->max_seg_size = 64 * 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
host->timeout_clks = 3 * 1048576;
|
|
||||||
host->dma.gpd = dma_alloc_coherent(&pdev->dev,
|
|
||||||
2 * sizeof(struct mt_gpdma_desc),
|
|
||||||
&host->dma.gpd_addr, GFP_KERNEL);
|
|
||||||
host->dma.bd = dma_alloc_coherent(&pdev->dev,
|
|
||||||
MAX_BD_NUM * sizeof(struct mt_bdma_desc),
|
|
||||||
&host->dma.bd_addr, GFP_KERNEL);
|
|
||||||
if (!host->dma.gpd || !host->dma.bd) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto release_mem;
|
|
||||||
}
|
|
||||||
msdc_init_gpd_bd(host, &host->dma);
|
|
||||||
INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
|
|
||||||
spin_lock_init(&host->lock);
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, mmc);
|
|
||||||
msdc_ungate_clock(host);
|
|
||||||
msdc_init_hw(host);
|
|
||||||
|
|
||||||
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
|
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
|
||||||
IRQF_TRIGGER_NONE, pdev->name, host);
|
IRQF_TRIGGER_NONE, pdev->name, host);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -1187,6 +1187,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
|
|||||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||||
u32 ctrl;
|
u32 ctrl;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* Reset the tuning circuit */
|
/* Reset the tuning circuit */
|
||||||
if (esdhc_is_usdhc(imx_data)) {
|
if (esdhc_is_usdhc(imx_data)) {
|
||||||
@ -1199,7 +1200,22 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
|
|||||||
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
|
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
|
||||||
ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||||
ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
|
ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
|
||||||
|
ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
|
||||||
writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||||
|
/* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */
|
||||||
|
ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS,
|
||||||
|
ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50);
|
||||||
|
if (ret == -ETIMEDOUT)
|
||||||
|
dev_warn(mmc_dev(host->mmc),
|
||||||
|
"Warning! clear execute tuning bit failed\n");
|
||||||
|
/*
|
||||||
|
* SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the
|
||||||
|
* usdhc IP internal logic flag execute_tuning_with_clr_buf, which
|
||||||
|
* will finally make sure the normal data transfer logic correct.
|
||||||
|
*/
|
||||||
|
ctrl = readl(host->ioaddr + SDHCI_INT_STATUS);
|
||||||
|
ctrl |= SDHCI_INT_DATA_AVAIL;
|
||||||
|
writel(ctrl, host->ioaddr + SDHCI_INT_STATUS);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -616,16 +616,12 @@ static int intel_select_drive_strength(struct mmc_card *card,
|
|||||||
return intel_host->drv_strength;
|
return intel_host->drv_strength;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bxt_get_cd(struct mmc_host *mmc)
|
static int sdhci_get_cd_nogpio(struct mmc_host *mmc)
|
||||||
{
|
{
|
||||||
int gpio_cd = mmc_gpio_get_cd(mmc);
|
|
||||||
struct sdhci_host *host = mmc_priv(mmc);
|
struct sdhci_host *host = mmc_priv(mmc);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!gpio_cd)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&host->lock, flags);
|
spin_lock_irqsave(&host->lock, flags);
|
||||||
|
|
||||||
if (host->flags & SDHCI_DEVICE_DEAD)
|
if (host->flags & SDHCI_DEVICE_DEAD)
|
||||||
@ -638,6 +634,21 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bxt_get_cd(struct mmc_host *mmc)
|
||||||
|
{
|
||||||
|
int gpio_cd = mmc_gpio_get_cd(mmc);
|
||||||
|
|
||||||
|
if (!gpio_cd)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return sdhci_get_cd_nogpio(mmc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mrfld_get_cd(struct mmc_host *mmc)
|
||||||
|
{
|
||||||
|
return sdhci_get_cd_nogpio(mmc);
|
||||||
|
}
|
||||||
|
|
||||||
#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
|
#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
|
||||||
#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
|
#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
|
||||||
|
|
||||||
@ -1341,6 +1352,14 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
|
|||||||
MMC_CAP_1_8V_DDR;
|
MMC_CAP_1_8V_DDR;
|
||||||
break;
|
break;
|
||||||
case INTEL_MRFLD_SD:
|
case INTEL_MRFLD_SD:
|
||||||
|
slot->cd_idx = 0;
|
||||||
|
slot->cd_override_level = true;
|
||||||
|
/*
|
||||||
|
* There are two PCB designs of SD card slot with the opposite
|
||||||
|
* card detection sense. Quirk this out by ignoring GPIO state
|
||||||
|
* completely in the custom ->get_cd() callback.
|
||||||
|
*/
|
||||||
|
slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
|
||||||
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
|
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
|
||||||
break;
|
break;
|
||||||
case INTEL_MRFLD_SDIO:
|
case INTEL_MRFLD_SDIO:
|
||||||
|
@ -2042,6 +2042,12 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
|
|||||||
break;
|
break;
|
||||||
case MMC_VDD_32_33:
|
case MMC_VDD_32_33:
|
||||||
case MMC_VDD_33_34:
|
case MMC_VDD_33_34:
|
||||||
|
/*
|
||||||
|
* 3.4 ~ 3.6V are valid only for those platforms where it's
|
||||||
|
* known that the voltage range is supported by hardware.
|
||||||
|
*/
|
||||||
|
case MMC_VDD_34_35:
|
||||||
|
case MMC_VDD_35_36:
|
||||||
pwr = SDHCI_POWER_330;
|
pwr = SDHCI_POWER_330;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -195,6 +195,10 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
|
|||||||
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
|
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
|
||||||
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
|
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
|
||||||
|
|
||||||
|
if (host->native_hotplug)
|
||||||
|
tmio_mmc_enable_mmc_irqs(host,
|
||||||
|
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
|
||||||
|
|
||||||
tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
|
tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
|
||||||
|
|
||||||
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
|
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
|
||||||
@ -956,8 +960,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||||||
case MMC_POWER_OFF:
|
case MMC_POWER_OFF:
|
||||||
tmio_mmc_power_off(host);
|
tmio_mmc_power_off(host);
|
||||||
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
|
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
|
||||||
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
|
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) {
|
||||||
host->reset(host);
|
host->reset(host);
|
||||||
|
|
||||||
|
if (host->native_hotplug)
|
||||||
|
tmio_mmc_enable_mmc_irqs(host,
|
||||||
|
TMIO_STAT_CARD_REMOVE |
|
||||||
|
TMIO_STAT_CARD_INSERT);
|
||||||
|
}
|
||||||
|
|
||||||
host->set_clock(host, 0);
|
host->set_clock(host, 0);
|
||||||
break;
|
break;
|
||||||
case MMC_POWER_UP:
|
case MMC_POWER_UP:
|
||||||
@ -1185,10 +1196,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
|
|||||||
_host->set_clock(_host, 0);
|
_host->set_clock(_host, 0);
|
||||||
tmio_mmc_reset(_host);
|
tmio_mmc_reset(_host);
|
||||||
|
|
||||||
if (_host->native_hotplug)
|
|
||||||
tmio_mmc_enable_mmc_irqs(_host,
|
|
||||||
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
|
|
||||||
|
|
||||||
spin_lock_init(&_host->lock);
|
spin_lock_init(&_host->lock);
|
||||||
mutex_init(&_host->ios_lock);
|
mutex_init(&_host->ios_lock);
|
||||||
|
|
||||||
|
@ -576,7 +576,7 @@ static void check_vub300_port_status(struct vub300_mmc_host *vub300)
|
|||||||
GET_SYSTEM_PORT_STATUS,
|
GET_SYSTEM_PORT_STATUS,
|
||||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||||
0x0000, 0x0000, &vub300->system_port_status,
|
0x0000, 0x0000, &vub300->system_port_status,
|
||||||
sizeof(vub300->system_port_status), HZ);
|
sizeof(vub300->system_port_status), 1000);
|
||||||
if (sizeof(vub300->system_port_status) == retval)
|
if (sizeof(vub300->system_port_status) == retval)
|
||||||
new_system_port_status(vub300);
|
new_system_port_status(vub300);
|
||||||
}
|
}
|
||||||
@ -1241,7 +1241,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
|
|||||||
SET_INTERRUPT_PSEUDOCODE,
|
SET_INTERRUPT_PSEUDOCODE,
|
||||||
USB_DIR_OUT | USB_TYPE_VENDOR |
|
USB_DIR_OUT | USB_TYPE_VENDOR |
|
||||||
USB_RECIP_DEVICE, 0x0000, 0x0000,
|
USB_RECIP_DEVICE, 0x0000, 0x0000,
|
||||||
xfer_buffer, xfer_length, HZ);
|
xfer_buffer, xfer_length, 1000);
|
||||||
kfree(xfer_buffer);
|
kfree(xfer_buffer);
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto copy_error_message;
|
goto copy_error_message;
|
||||||
@ -1284,7 +1284,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
|
|||||||
SET_TRANSFER_PSEUDOCODE,
|
SET_TRANSFER_PSEUDOCODE,
|
||||||
USB_DIR_OUT | USB_TYPE_VENDOR |
|
USB_DIR_OUT | USB_TYPE_VENDOR |
|
||||||
USB_RECIP_DEVICE, 0x0000, 0x0000,
|
USB_RECIP_DEVICE, 0x0000, 0x0000,
|
||||||
xfer_buffer, xfer_length, HZ);
|
xfer_buffer, xfer_length, 1000);
|
||||||
kfree(xfer_buffer);
|
kfree(xfer_buffer);
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto copy_error_message;
|
goto copy_error_message;
|
||||||
@ -1991,7 +1991,7 @@ static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
|
|||||||
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
||||||
SET_CLOCK_SPEED,
|
SET_CLOCK_SPEED,
|
||||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||||
0x00, 0x00, buf, buf_array_size, HZ);
|
0x00, 0x00, buf, buf_array_size, 1000);
|
||||||
if (retval != 8) {
|
if (retval != 8) {
|
||||||
dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
|
dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
|
||||||
" %dkHz failed with retval=%d\n", kHzClock, retval);
|
" %dkHz failed with retval=%d\n", kHzClock, retval);
|
||||||
@ -2013,14 +2013,14 @@ static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||||||
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
||||||
SET_SD_POWER,
|
SET_SD_POWER,
|
||||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||||
0x0000, 0x0000, NULL, 0, HZ);
|
0x0000, 0x0000, NULL, 0, 1000);
|
||||||
/* must wait for the VUB300 u-proc to boot up */
|
/* must wait for the VUB300 u-proc to boot up */
|
||||||
msleep(600);
|
msleep(600);
|
||||||
} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
|
} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
|
||||||
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
||||||
SET_SD_POWER,
|
SET_SD_POWER,
|
||||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||||
0x0001, 0x0000, NULL, 0, HZ);
|
0x0001, 0x0000, NULL, 0, 1000);
|
||||||
msleep(600);
|
msleep(600);
|
||||||
vub300->card_powered = 1;
|
vub300->card_powered = 1;
|
||||||
} else if (ios->power_mode == MMC_POWER_ON) {
|
} else if (ios->power_mode == MMC_POWER_ON) {
|
||||||
@ -2275,14 +2275,14 @@ static int vub300_probe(struct usb_interface *interface,
|
|||||||
GET_HC_INF0,
|
GET_HC_INF0,
|
||||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||||
0x0000, 0x0000, &vub300->hc_info,
|
0x0000, 0x0000, &vub300->hc_info,
|
||||||
sizeof(vub300->hc_info), HZ);
|
sizeof(vub300->hc_info), 1000);
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto error5;
|
goto error5;
|
||||||
retval =
|
retval =
|
||||||
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
|
||||||
SET_ROM_WAIT_STATES,
|
SET_ROM_WAIT_STATES,
|
||||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||||
firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
|
firmware_rom_wait_states, 0x0000, NULL, 0, 1000);
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto error5;
|
goto error5;
|
||||||
dev_info(&vub300->udev->dev,
|
dev_info(&vub300->udev->dev,
|
||||||
@ -2297,7 +2297,7 @@ static int vub300_probe(struct usb_interface *interface,
|
|||||||
GET_SYSTEM_PORT_STATUS,
|
GET_SYSTEM_PORT_STATUS,
|
||||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||||
0x0000, 0x0000, &vub300->system_port_status,
|
0x0000, 0x0000, &vub300->system_port_status,
|
||||||
sizeof(vub300->system_port_status), HZ);
|
sizeof(vub300->system_port_status), 1000);
|
||||||
if (retval < 0) {
|
if (retval < 0) {
|
||||||
goto error4;
|
goto error4;
|
||||||
} else if (sizeof(vub300->system_port_status) == retval) {
|
} else if (sizeof(vub300->system_port_status) == retval) {
|
||||||
|
@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||||||
.name = "uc",
|
.name = "uc",
|
||||||
.cmd = HNAE3_DBG_CMD_MAC_UC,
|
.cmd = HNAE3_DBG_CMD_MAC_UC,
|
||||||
.dentry = HNS3_DBG_DENTRY_MAC,
|
.dentry = HNS3_DBG_DENTRY_MAC,
|
||||||
.buf_len = HNS3_DBG_READ_LEN,
|
.buf_len = HNS3_DBG_READ_LEN_128KB,
|
||||||
.init = hns3_dbg_common_file_init,
|
.init = hns3_dbg_common_file_init,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||||||
.name = "tqp",
|
.name = "tqp",
|
||||||
.cmd = HNAE3_DBG_CMD_REG_TQP,
|
.cmd = HNAE3_DBG_CMD_REG_TQP,
|
||||||
.dentry = HNS3_DBG_DENTRY_REG,
|
.dentry = HNS3_DBG_DENTRY_REG,
|
||||||
.buf_len = HNS3_DBG_READ_LEN,
|
.buf_len = HNS3_DBG_READ_LEN_128KB,
|
||||||
.init = hns3_dbg_common_file_init,
|
.init = hns3_dbg_common_file_init,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||||||
.name = "fd_tcam",
|
.name = "fd_tcam",
|
||||||
.cmd = HNAE3_DBG_CMD_FD_TCAM,
|
.cmd = HNAE3_DBG_CMD_FD_TCAM,
|
||||||
.dentry = HNS3_DBG_DENTRY_FD,
|
.dentry = HNS3_DBG_DENTRY_FD,
|
||||||
.buf_len = HNS3_DBG_READ_LEN,
|
.buf_len = HNS3_DBG_READ_LEN_1MB,
|
||||||
.init = hns3_dbg_common_file_init,
|
.init = hns3_dbg_common_file_init,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -462,7 +462,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
|
|||||||
{ "TAIL", 2 },
|
{ "TAIL", 2 },
|
||||||
{ "HEAD", 2 },
|
{ "HEAD", 2 },
|
||||||
{ "FBDNUM", 2 },
|
{ "FBDNUM", 2 },
|
||||||
{ "PKTNUM", 2 },
|
{ "PKTNUM", 5 },
|
||||||
{ "COPYBREAK", 2 },
|
{ "COPYBREAK", 2 },
|
||||||
{ "RING_EN", 2 },
|
{ "RING_EN", 2 },
|
||||||
{ "RX_RING_EN", 2 },
|
{ "RX_RING_EN", 2 },
|
||||||
@ -565,7 +565,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
|
|||||||
{ "HEAD", 2 },
|
{ "HEAD", 2 },
|
||||||
{ "FBDNUM", 2 },
|
{ "FBDNUM", 2 },
|
||||||
{ "OFFSET", 2 },
|
{ "OFFSET", 2 },
|
||||||
{ "PKTNUM", 2 },
|
{ "PKTNUM", 5 },
|
||||||
{ "RING_EN", 2 },
|
{ "RING_EN", 2 },
|
||||||
{ "TX_RING_EN", 2 },
|
{ "TX_RING_EN", 2 },
|
||||||
{ "BASE_ADDR", 10 },
|
{ "BASE_ADDR", 10 },
|
||||||
@ -790,13 +790,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct hns3_dbg_item tx_bd_info_items[] = {
|
static const struct hns3_dbg_item tx_bd_info_items[] = {
|
||||||
{ "BD_IDX", 5 },
|
{ "BD_IDX", 2 },
|
||||||
{ "ADDRESS", 2 },
|
{ "ADDRESS", 13 },
|
||||||
{ "VLAN_TAG", 2 },
|
{ "VLAN_TAG", 2 },
|
||||||
{ "SIZE", 2 },
|
{ "SIZE", 2 },
|
||||||
{ "T_CS_VLAN_TSO", 2 },
|
{ "T_CS_VLAN_TSO", 2 },
|
||||||
{ "OT_VLAN_TAG", 3 },
|
{ "OT_VLAN_TAG", 3 },
|
||||||
{ "TV", 2 },
|
{ "TV", 5 },
|
||||||
{ "OLT_VLAN_LEN", 2 },
|
{ "OLT_VLAN_LEN", 2 },
|
||||||
{ "PAYLEN_OL4CS", 2 },
|
{ "PAYLEN_OL4CS", 2 },
|
||||||
{ "BD_FE_SC_VLD", 2 },
|
{ "BD_FE_SC_VLD", 2 },
|
||||||
|
@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
|
|||||||
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
||||||
int *pos)
|
int *pos)
|
||||||
{
|
{
|
||||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
struct hclge_dbg_bitmap_cmd req;
|
||||||
struct hclge_desc desc;
|
struct hclge_desc desc;
|
||||||
u16 qset_id, qset_num;
|
u16 qset_id, qset_num;
|
||||||
int ret;
|
int ret;
|
||||||
@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||||
|
|
||||||
*pos += scnprintf(buf + *pos, len - *pos,
|
*pos += scnprintf(buf + *pos, len - *pos,
|
||||||
"%04u %#x %#x %#x %#x\n",
|
"%04u %#x %#x %#x %#x\n",
|
||||||
qset_id, bitmap->bit0, bitmap->bit1,
|
qset_id, req.bit0, req.bit1, req.bit2,
|
||||||
bitmap->bit2, bitmap->bit3);
|
req.bit3);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
|
|||||||
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
||||||
int *pos)
|
int *pos)
|
||||||
{
|
{
|
||||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
struct hclge_dbg_bitmap_cmd req;
|
||||||
struct hclge_desc desc;
|
struct hclge_desc desc;
|
||||||
u8 pri_id, pri_num;
|
u8 pri_id, pri_num;
|
||||||
int ret;
|
int ret;
|
||||||
@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||||
|
|
||||||
*pos += scnprintf(buf + *pos, len - *pos,
|
*pos += scnprintf(buf + *pos, len - *pos,
|
||||||
"%03u %#x %#x %#x\n",
|
"%03u %#x %#x %#x\n",
|
||||||
pri_id, bitmap->bit0, bitmap->bit1,
|
pri_id, req.bit0, req.bit1, req.bit2);
|
||||||
bitmap->bit2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
|
|||||||
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
|
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
|
||||||
int *pos)
|
int *pos)
|
||||||
{
|
{
|
||||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
struct hclge_dbg_bitmap_cmd req;
|
||||||
struct hclge_desc desc;
|
struct hclge_desc desc;
|
||||||
u8 pg_id;
|
u8 pg_id;
|
||||||
int ret;
|
int ret;
|
||||||
@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||||
|
|
||||||
*pos += scnprintf(buf + *pos, len - *pos,
|
*pos += scnprintf(buf + *pos, len - *pos,
|
||||||
"%03u %#x %#x %#x\n",
|
"%03u %#x %#x %#x\n",
|
||||||
pg_id, bitmap->bit0, bitmap->bit1,
|
pg_id, req.bit0, req.bit1, req.bit2);
|
||||||
bitmap->bit2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
|
|||||||
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
|
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
|
||||||
int *pos)
|
int *pos)
|
||||||
{
|
{
|
||||||
struct hclge_dbg_bitmap_cmd *bitmap;
|
struct hclge_dbg_bitmap_cmd req;
|
||||||
struct hclge_desc desc;
|
struct hclge_desc desc;
|
||||||
u8 port_id = 0;
|
u8 port_id = 0;
|
||||||
int ret;
|
int ret;
|
||||||
@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
|
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
|
||||||
|
|
||||||
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
|
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
|
||||||
bitmap->bit0);
|
req.bit0);
|
||||||
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
|
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
|
||||||
bitmap->bit1);
|
req.bit1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2847,33 +2847,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
|
|||||||
{
|
{
|
||||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||||
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
|
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
|
||||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||||
hclge_wq, &hdev->service_task, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
|
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
|
||||||
{
|
{
|
||||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||||
|
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
|
||||||
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
|
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
|
||||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||||
hclge_wq, &hdev->service_task, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
|
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
|
||||||
{
|
{
|
||||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||||
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
|
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
|
||||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
|
||||||
hclge_wq, &hdev->service_task, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
|
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
|
||||||
{
|
{
|
||||||
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
|
||||||
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
|
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
|
||||||
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
|
mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
|
||||||
hclge_wq, &hdev->service_task,
|
|
||||||
delay_time);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
|
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
|
||||||
@ -3491,33 +3487,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
|
|||||||
hdev->num_msi_used += 1;
|
hdev->num_msi_used += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
|
|
||||||
const cpumask_t *mask)
|
|
||||||
{
|
|
||||||
struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
|
|
||||||
affinity_notify);
|
|
||||||
|
|
||||||
cpumask_copy(&hdev->affinity_mask, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void hclge_irq_affinity_release(struct kref *ref)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
|
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
|
||||||
{
|
{
|
||||||
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
|
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
|
||||||
&hdev->affinity_mask);
|
&hdev->affinity_mask);
|
||||||
|
|
||||||
hdev->affinity_notify.notify = hclge_irq_affinity_notify;
|
|
||||||
hdev->affinity_notify.release = hclge_irq_affinity_release;
|
|
||||||
irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
|
|
||||||
&hdev->affinity_notify);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
|
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
|
||||||
{
|
{
|
||||||
irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
|
|
||||||
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
|
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -13052,7 +13029,7 @@ static int hclge_init(void)
|
|||||||
{
|
{
|
||||||
pr_info("%s is initializing\n", HCLGE_NAME);
|
pr_info("%s is initializing\n", HCLGE_NAME);
|
||||||
|
|
||||||
hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
|
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
|
||||||
if (!hclge_wq) {
|
if (!hclge_wq) {
|
||||||
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
|
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -944,7 +944,6 @@ struct hclge_dev {
|
|||||||
|
|
||||||
/* affinity mask and notify for misc interrupt */
|
/* affinity mask and notify for misc interrupt */
|
||||||
cpumask_t affinity_mask;
|
cpumask_t affinity_mask;
|
||||||
struct irq_affinity_notify affinity_notify;
|
|
||||||
struct hclge_ptp *ptp;
|
struct hclge_ptp *ptp;
|
||||||
struct devlink *devlink;
|
struct devlink *devlink;
|
||||||
};
|
};
|
||||||
|
@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
|
|||||||
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
|
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
|
||||||
{
|
{
|
||||||
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
|
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
|
||||||
|
test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
|
||||||
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
|
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||||
&hdev->state))
|
&hdev->state))
|
||||||
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
|
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
|
||||||
@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||||||
|
|
||||||
hclgevf_init_rxd_adv_layout(hdev);
|
hclgevf_init_rxd_adv_layout(hdev);
|
||||||
|
|
||||||
|
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
|
||||||
|
|
||||||
hdev->last_reset_time = jiffies;
|
hdev->last_reset_time = jiffies;
|
||||||
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
|
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
|
||||||
HCLGEVF_DRIVER_NAME);
|
HCLGEVF_DRIVER_NAME);
|
||||||
@ -3899,7 +3902,7 @@ static int hclgevf_init(void)
|
|||||||
{
|
{
|
||||||
pr_info("%s is initializing\n", HCLGEVF_NAME);
|
pr_info("%s is initializing\n", HCLGEVF_NAME);
|
||||||
|
|
||||||
hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
|
hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
|
||||||
if (!hclgevf_wq) {
|
if (!hclgevf_wq) {
|
||||||
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
|
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -146,6 +146,7 @@ enum hclgevf_states {
|
|||||||
HCLGEVF_STATE_REMOVING,
|
HCLGEVF_STATE_REMOVING,
|
||||||
HCLGEVF_STATE_NIC_REGISTERED,
|
HCLGEVF_STATE_NIC_REGISTERED,
|
||||||
HCLGEVF_STATE_ROCE_REGISTERED,
|
HCLGEVF_STATE_ROCE_REGISTERED,
|
||||||
|
HCLGEVF_STATE_SERVICE_INITED,
|
||||||
/* task states */
|
/* task states */
|
||||||
HCLGEVF_STATE_RST_SERVICE_SCHED,
|
HCLGEVF_STATE_RST_SERVICE_SCHED,
|
||||||
HCLGEVF_STATE_RST_HANDLING,
|
HCLGEVF_STATE_RST_HANDLING,
|
||||||
|
@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
|
|||||||
*/
|
*/
|
||||||
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
||||||
{
|
{
|
||||||
struct net_device *event_netdev, *netdev_tmp;
|
|
||||||
struct netdev_notifier_bonding_info *info;
|
struct netdev_notifier_bonding_info *info;
|
||||||
struct netdev_bonding_info *bonding_info;
|
struct netdev_bonding_info *bonding_info;
|
||||||
|
struct net_device *event_netdev;
|
||||||
const char *lag_netdev_name;
|
const char *lag_netdev_name;
|
||||||
|
|
||||||
event_netdev = netdev_notifier_info_to_dev(ptr);
|
event_netdev = netdev_notifier_info_to_dev(ptr);
|
||||||
@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
|
|||||||
goto lag_out;
|
goto lag_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
|
|
||||||
if (!netif_is_ice(netdev_tmp))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (netdev_tmp && netdev_tmp != lag->netdev &&
|
|
||||||
lag->peer_netdev != netdev_tmp) {
|
|
||||||
dev_hold(netdev_tmp);
|
|
||||||
lag->peer_netdev = netdev_tmp;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
if (bonding_info->slave.state)
|
if (bonding_info->slave.state)
|
||||||
ice_lag_set_backup(lag);
|
ice_lag_set_backup(lag);
|
||||||
else
|
else
|
||||||
@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
|
|||||||
case NETDEV_BONDING_INFO:
|
case NETDEV_BONDING_INFO:
|
||||||
ice_lag_info_event(lag, ptr);
|
ice_lag_info_event(lag, ptr);
|
||||||
break;
|
break;
|
||||||
|
case NETDEV_UNREGISTER:
|
||||||
|
ice_lag_unlink(lag, ptr);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1571,6 +1571,9 @@ err_kworker:
|
|||||||
*/
|
*/
|
||||||
void ice_ptp_release(struct ice_pf *pf)
|
void ice_ptp_release(struct ice_pf *pf)
|
||||||
{
|
{
|
||||||
|
if (!test_bit(ICE_FLAG_PTP, pf->flags))
|
||||||
|
return;
|
||||||
|
|
||||||
/* Disable timestamping for both Tx and Rx */
|
/* Disable timestamping for both Tx and Rx */
|
||||||
ice_ptp_cfg_timestamp(pf, false);
|
ice_ptp_cfg_timestamp(pf, false);
|
||||||
|
|
||||||
|
@ -226,18 +226,85 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
|
|||||||
|
|
||||||
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
|
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
|
||||||
|
|
||||||
|
static void get_lf_str_list(struct rvu_block block, int pcifunc,
|
||||||
|
char *lfs)
|
||||||
|
{
|
||||||
|
int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
|
||||||
|
|
||||||
|
for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
|
||||||
|
if (lf >= block.lf.max)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (block.fn_map[lf] != pcifunc)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (lf == prev_lf + 1) {
|
||||||
|
prev_lf = lf;
|
||||||
|
seq = 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (seq)
|
||||||
|
len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
|
||||||
|
else
|
||||||
|
len += (len ? sprintf(lfs + len, ",%d", lf) :
|
||||||
|
sprintf(lfs + len, "%d", lf));
|
||||||
|
|
||||||
|
prev_lf = lf;
|
||||||
|
seq = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (seq)
|
||||||
|
len += sprintf(lfs + len, "-%d", prev_lf);
|
||||||
|
|
||||||
|
lfs[len] = '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
static int get_max_column_width(struct rvu *rvu)
|
||||||
|
{
|
||||||
|
int index, pf, vf, lf_str_size = 12, buf_size = 256;
|
||||||
|
struct rvu_block block;
|
||||||
|
u16 pcifunc;
|
||||||
|
char *buf;
|
||||||
|
|
||||||
|
buf = kzalloc(buf_size, GFP_KERNEL);
|
||||||
|
if (!buf)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||||
|
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
||||||
|
pcifunc = pf << 10 | vf;
|
||||||
|
if (!pcifunc)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
for (index = 0; index < BLK_COUNT; index++) {
|
||||||
|
block = rvu->hw->block[index];
|
||||||
|
if (!strlen(block.name))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
get_lf_str_list(block, pcifunc, buf);
|
||||||
|
if (lf_str_size <= strlen(buf))
|
||||||
|
lf_str_size = strlen(buf) + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(buf);
|
||||||
|
return lf_str_size;
|
||||||
|
}
|
||||||
|
|
||||||
/* Dumps current provisioning status of all RVU block LFs */
|
/* Dumps current provisioning status of all RVU block LFs */
|
||||||
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
||||||
char __user *buffer,
|
char __user *buffer,
|
||||||
size_t count, loff_t *ppos)
|
size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
int index, off = 0, flag = 0, go_back = 0, len = 0;
|
int index, off = 0, flag = 0, len = 0, i = 0;
|
||||||
struct rvu *rvu = filp->private_data;
|
struct rvu *rvu = filp->private_data;
|
||||||
int lf, pf, vf, pcifunc;
|
int bytes_not_copied = 0;
|
||||||
struct rvu_block block;
|
struct rvu_block block;
|
||||||
int bytes_not_copied;
|
int pf, vf, pcifunc;
|
||||||
int lf_str_size = 12;
|
|
||||||
int buf_size = 2048;
|
int buf_size = 2048;
|
||||||
|
int lf_str_size;
|
||||||
char *lfs;
|
char *lfs;
|
||||||
char *buf;
|
char *buf;
|
||||||
|
|
||||||
@ -249,6 +316,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||||||
if (!buf)
|
if (!buf)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
|
/* Get the maximum width of a column */
|
||||||
|
lf_str_size = get_max_column_width(rvu);
|
||||||
|
|
||||||
lfs = kzalloc(lf_str_size, GFP_KERNEL);
|
lfs = kzalloc(lf_str_size, GFP_KERNEL);
|
||||||
if (!lfs) {
|
if (!lfs) {
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
@ -262,65 +332,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||||||
"%-*s", lf_str_size,
|
"%-*s", lf_str_size,
|
||||||
rvu->hw->block[index].name);
|
rvu->hw->block[index].name);
|
||||||
}
|
}
|
||||||
|
|
||||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
||||||
|
bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
|
||||||
|
if (bytes_not_copied)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
i++;
|
||||||
|
*ppos += off;
|
||||||
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||||
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
||||||
|
off = 0;
|
||||||
|
flag = 0;
|
||||||
pcifunc = pf << 10 | vf;
|
pcifunc = pf << 10 | vf;
|
||||||
if (!pcifunc)
|
if (!pcifunc)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (vf) {
|
if (vf) {
|
||||||
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
|
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
|
||||||
go_back = scnprintf(&buf[off],
|
off = scnprintf(&buf[off],
|
||||||
buf_size - 1 - off,
|
buf_size - 1 - off,
|
||||||
"%-*s", lf_str_size, lfs);
|
"%-*s", lf_str_size, lfs);
|
||||||
} else {
|
} else {
|
||||||
sprintf(lfs, "PF%d", pf);
|
sprintf(lfs, "PF%d", pf);
|
||||||
go_back = scnprintf(&buf[off],
|
off = scnprintf(&buf[off],
|
||||||
buf_size - 1 - off,
|
buf_size - 1 - off,
|
||||||
"%-*s", lf_str_size, lfs);
|
"%-*s", lf_str_size, lfs);
|
||||||
}
|
}
|
||||||
|
|
||||||
off += go_back;
|
for (index = 0; index < BLK_COUNT; index++) {
|
||||||
for (index = 0; index < BLKTYPE_MAX; index++) {
|
|
||||||
block = rvu->hw->block[index];
|
block = rvu->hw->block[index];
|
||||||
if (!strlen(block.name))
|
if (!strlen(block.name))
|
||||||
continue;
|
continue;
|
||||||
len = 0;
|
len = 0;
|
||||||
lfs[len] = '\0';
|
lfs[len] = '\0';
|
||||||
for (lf = 0; lf < block.lf.max; lf++) {
|
get_lf_str_list(block, pcifunc, lfs);
|
||||||
if (block.fn_map[lf] != pcifunc)
|
if (strlen(lfs))
|
||||||
continue;
|
|
||||||
flag = 1;
|
flag = 1;
|
||||||
len += sprintf(&lfs[len], "%d,", lf);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (flag)
|
|
||||||
len--;
|
|
||||||
lfs[len] = '\0';
|
|
||||||
off += scnprintf(&buf[off], buf_size - 1 - off,
|
off += scnprintf(&buf[off], buf_size - 1 - off,
|
||||||
"%-*s", lf_str_size, lfs);
|
"%-*s", lf_str_size, lfs);
|
||||||
if (!strlen(lfs))
|
|
||||||
go_back += lf_str_size;
|
|
||||||
}
|
}
|
||||||
if (!flag)
|
if (flag) {
|
||||||
off -= go_back;
|
off += scnprintf(&buf[off],
|
||||||
else
|
buf_size - 1 - off, "\n");
|
||||||
flag = 0;
|
bytes_not_copied = copy_to_user(buffer +
|
||||||
off--;
|
(i * off),
|
||||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
buf, off);
|
||||||
|
if (bytes_not_copied)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
i++;
|
||||||
|
*ppos += off;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_not_copied = copy_to_user(buffer, buf, off);
|
out:
|
||||||
kfree(lfs);
|
kfree(lfs);
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
|
|
||||||
if (bytes_not_copied)
|
if (bytes_not_copied)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
*ppos = off;
|
return *ppos;
|
||||||
return off;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
|
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
|
||||||
@ -504,7 +578,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
|
|||||||
if (cmd_buf)
|
if (cmd_buf)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|
||||||
if (!strncmp(subtoken, "help", 4) || ret < 0) {
|
if (ret < 0 || !strncmp(subtoken, "help", 4)) {
|
||||||
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
|
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
|
||||||
goto qsize_write_done;
|
goto qsize_write_done;
|
||||||
}
|
}
|
||||||
@ -1719,6 +1793,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
|
|||||||
u16 pcifunc;
|
u16 pcifunc;
|
||||||
char *str;
|
char *str;
|
||||||
|
|
||||||
|
/* Ingress policers do not exist on all platforms */
|
||||||
|
if (!nix_hw->ipolicer)
|
||||||
|
return 0;
|
||||||
|
|
||||||
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
||||||
if (layer == BAND_PROF_INVAL_LAYER)
|
if (layer == BAND_PROF_INVAL_LAYER)
|
||||||
continue;
|
continue;
|
||||||
@ -1768,6 +1846,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
|
|||||||
int layer;
|
int layer;
|
||||||
char *str;
|
char *str;
|
||||||
|
|
||||||
|
/* Ingress policers do not exist on all platforms */
|
||||||
|
if (!nix_hw->ipolicer)
|
||||||
|
return 0;
|
||||||
|
|
||||||
seq_puts(m, "\nBandwidth profile resource free count\n");
|
seq_puts(m, "\nBandwidth profile resource free count\n");
|
||||||
seq_puts(m, "=====================================\n");
|
seq_puts(m, "=====================================\n");
|
||||||
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
|
||||||
|
@ -2507,6 +2507,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
nix_hw = get_nix_hw(rvu->hw, blkaddr);
|
||||||
|
if (!nix_hw)
|
||||||
|
return;
|
||||||
|
|
||||||
vlan = &nix_hw->txvlan;
|
vlan = &nix_hw->txvlan;
|
||||||
|
|
||||||
mutex_lock(&vlan->rsrc_lock);
|
mutex_lock(&vlan->rsrc_lock);
|
||||||
|
@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
elem_info->u.rdq.skb = NULL;
|
|
||||||
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
|
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Assume that wqe was previously zeroed. */
|
|
||||||
|
|
||||||
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
|
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
|
||||||
buf_len, DMA_FROM_DEVICE);
|
buf_len, DMA_FROM_DEVICE);
|
||||||
if (err)
|
if (err)
|
||||||
@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||||
struct mlxsw_pci_queue_elem_info *elem_info;
|
struct mlxsw_pci_queue_elem_info *elem_info;
|
||||||
struct mlxsw_rx_info rx_info = {};
|
struct mlxsw_rx_info rx_info = {};
|
||||||
char *wqe;
|
char wqe[MLXSW_PCI_WQE_SIZE];
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u16 byte_count;
|
u16 byte_count;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
|
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
|
||||||
skb = elem_info->u.sdq.skb;
|
skb = elem_info->u.rdq.skb;
|
||||||
if (!skb)
|
memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
|
||||||
return;
|
|
||||||
wqe = elem_info->elem;
|
|
||||||
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
if (q->consumer_counter++ != consumer_counter_limit)
|
if (q->consumer_counter++ != consumer_counter_limit)
|
||||||
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
|
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
|
||||||
|
|
||||||
|
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
|
||||||
|
if (err) {
|
||||||
|
dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
|
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
|
||||||
rx_info.is_lag = true;
|
rx_info.is_lag = true;
|
||||||
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
|
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
|
||||||
@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||||||
skb_put(skb, byte_count);
|
skb_put(skb, byte_count);
|
||||||
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
|
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
|
||||||
|
|
||||||
memset(wqe, 0, q->elem_size);
|
out:
|
||||||
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
|
|
||||||
if (err)
|
|
||||||
dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
|
|
||||||
/* Everything is set up, ring doorbell to pass elem to HW */
|
/* Everything is set up, ring doorbell to pass elem to HW */
|
||||||
q->producer_counter++;
|
q->producer_counter++;
|
||||||
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
|
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
|
||||||
|
@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
|
||||||
|
DMA_BIT_MASK(64))) {
|
||||||
|
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
|
||||||
|
DMA_BIT_MASK(32))) {
|
||||||
|
dev_warn(&tx->adapter->pdev->dev,
|
||||||
|
"lan743x_: No suitable DMA available\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
ring_allocation_size = ALIGN(tx->ring_size *
|
ring_allocation_size = ALIGN(tx->ring_size *
|
||||||
sizeof(struct lan743x_tx_descriptor),
|
sizeof(struct lan743x_tx_descriptor),
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
|
|||||||
index);
|
index);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
|
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
|
||||||
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = rx->adapter->netdev;
|
struct net_device *netdev = rx->adapter->netdev;
|
||||||
struct device *dev = &rx->adapter->pdev->dev;
|
struct device *dev = &rx->adapter->pdev->dev;
|
||||||
@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
|
|||||||
|
|
||||||
descriptor = &rx->ring_cpu_ptr[index];
|
descriptor = &rx->ring_cpu_ptr[index];
|
||||||
buffer_info = &rx->buffer_info[index];
|
buffer_info = &rx->buffer_info[index];
|
||||||
skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
|
skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
|
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
|
||||||
@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
|
|||||||
|
|
||||||
/* save existing skb, allocate new skb and map to dma */
|
/* save existing skb, allocate new skb and map to dma */
|
||||||
skb = buffer_info->skb;
|
skb = buffer_info->skb;
|
||||||
if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
|
if (lan743x_rx_init_ring_element(rx, rx->last_head,
|
||||||
|
GFP_ATOMIC | GFP_DMA)) {
|
||||||
/* failed to allocate next skb.
|
/* failed to allocate next skb.
|
||||||
* Memory is very low.
|
* Memory is very low.
|
||||||
* Drop this packet and reuse buffer.
|
* Drop this packet and reuse buffer.
|
||||||
@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
|||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
|
||||||
|
DMA_BIT_MASK(64))) {
|
||||||
|
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
|
||||||
|
DMA_BIT_MASK(32))) {
|
||||||
|
dev_warn(&rx->adapter->pdev->dev,
|
||||||
|
"lan743x_: No suitable DMA available\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
ring_allocation_size = ALIGN(rx->ring_size *
|
ring_allocation_size = ALIGN(rx->ring_size *
|
||||||
sizeof(struct lan743x_rx_descriptor),
|
sizeof(struct lan743x_rx_descriptor),
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
|
|||||||
|
|
||||||
rx->last_head = 0;
|
rx->last_head = 0;
|
||||||
for (index = 0; index < rx->ring_size; index++) {
|
for (index = 0; index < rx->ring_size; index++) {
|
||||||
ret = lan743x_rx_init_ring_element(rx, index);
|
ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
|
netif_warn(rx->adapter, ifup, rx->adapter->netdev,
|
||||||
|
"Error allocating memory for LAN743x\n");
|
||||||
|
|
||||||
lan743x_rx_ring_cleanup(rx);
|
lan743x_rx_ring_cleanup(rx);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
netif_err(adapter, probe, adapter->netdev,
|
netif_err(adapter, probe, adapter->netdev,
|
||||||
"lan743x_hardware_init returned %d\n", ret);
|
"lan743x_hardware_init returned %d\n", ret);
|
||||||
|
lan743x_pci_cleanup(adapter);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* open netdev when netdev is at running state while resume.
|
/* open netdev when netdev is at running state while resume.
|
||||||
|
@ -182,15 +182,21 @@ static int
|
|||||||
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
|
||||||
{
|
{
|
||||||
struct nfp_net *nn = netdev_priv(netdev);
|
struct nfp_net *nn = netdev_priv(netdev);
|
||||||
unsigned int max_mtu;
|
struct nfp_bpf_vnic *bv;
|
||||||
|
struct bpf_prog *prog;
|
||||||
|
|
||||||
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
|
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
if (nn->xdp_hw.prog) {
|
||||||
if (new_mtu > max_mtu) {
|
prog = nn->xdp_hw.prog;
|
||||||
nn_info(nn, "BPF offload active, MTU over %u not supported\n",
|
} else {
|
||||||
max_mtu);
|
bv = nn->app_priv;
|
||||||
|
prog = bv->tc_prog;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
|
||||||
|
nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
|
|||||||
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
|
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
|
||||||
int nfp_bpf_jit(struct nfp_prog *prog);
|
int nfp_bpf_jit(struct nfp_prog *prog);
|
||||||
bool nfp_bpf_supported_opcode(u8 code);
|
bool nfp_bpf_supported_opcode(u8 code);
|
||||||
|
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
|
||||||
|
unsigned int mtu);
|
||||||
|
|
||||||
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
|
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
|
||||||
int prev_insn_idx);
|
int prev_insn_idx);
|
||||||
|
@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
|
||||||
|
unsigned int mtu)
|
||||||
|
{
|
||||||
|
unsigned int fw_mtu, pkt_off;
|
||||||
|
|
||||||
|
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
||||||
|
pkt_off = min(prog->aux->max_pkt_offset, mtu);
|
||||||
|
|
||||||
|
return fw_mtu < pkt_off;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
|
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
|
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
|
||||||
unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
|
unsigned int max_stack, max_prog_len;
|
||||||
dma_addr_t dma_addr;
|
dma_addr_t dma_addr;
|
||||||
void *img;
|
void *img;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
|
if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
|
||||||
pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
|
|
||||||
if (fw_mtu < pkt_off) {
|
|
||||||
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
|
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
|
|||||||
napi_disable(&pldat->napi);
|
napi_disable(&pldat->napi);
|
||||||
netif_stop_queue(ndev);
|
netif_stop_queue(ndev);
|
||||||
|
|
||||||
if (ndev->phydev)
|
|
||||||
phy_stop(ndev->phydev);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&pldat->lock, flags);
|
spin_lock_irqsave(&pldat->lock, flags);
|
||||||
__lpc_eth_reset(pldat);
|
__lpc_eth_reset(pldat);
|
||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
|
|||||||
writel(0, LPC_ENET_MAC2(pldat->net_base));
|
writel(0, LPC_ENET_MAC2(pldat->net_base));
|
||||||
spin_unlock_irqrestore(&pldat->lock, flags);
|
spin_unlock_irqrestore(&pldat->lock, flags);
|
||||||
|
|
||||||
|
if (ndev->phydev)
|
||||||
|
phy_stop(ndev->phydev);
|
||||||
clk_disable_unprepare(pldat->clk);
|
clk_disable_unprepare(pldat->clk);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -157,6 +157,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
|
|||||||
{ PCI_VDEVICE(REALTEK, 0x8129) },
|
{ PCI_VDEVICE(REALTEK, 0x8129) },
|
||||||
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
|
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
|
||||||
{ PCI_VDEVICE(REALTEK, 0x8161) },
|
{ PCI_VDEVICE(REALTEK, 0x8161) },
|
||||||
|
{ PCI_VDEVICE(REALTEK, 0x8162) },
|
||||||
{ PCI_VDEVICE(REALTEK, 0x8167) },
|
{ PCI_VDEVICE(REALTEK, 0x8167) },
|
||||||
{ PCI_VDEVICE(REALTEK, 0x8168) },
|
{ PCI_VDEVICE(REALTEK, 0x8168) },
|
||||||
{ PCI_VDEVICE(NCUBE, 0x8168) },
|
{ PCI_VDEVICE(NCUBE, 0x8168) },
|
||||||
|
@ -243,62 +243,10 @@ static void phy_sanitize_settings(struct phy_device *phydev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int phy_ethtool_ksettings_set(struct phy_device *phydev,
|
|
||||||
const struct ethtool_link_ksettings *cmd)
|
|
||||||
{
|
|
||||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
|
|
||||||
u8 autoneg = cmd->base.autoneg;
|
|
||||||
u8 duplex = cmd->base.duplex;
|
|
||||||
u32 speed = cmd->base.speed;
|
|
||||||
|
|
||||||
if (cmd->base.phy_address != phydev->mdio.addr)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
linkmode_copy(advertising, cmd->link_modes.advertising);
|
|
||||||
|
|
||||||
/* We make sure that we don't pass unsupported values in to the PHY */
|
|
||||||
linkmode_and(advertising, advertising, phydev->supported);
|
|
||||||
|
|
||||||
/* Verify the settings we care about. */
|
|
||||||
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (autoneg == AUTONEG_DISABLE &&
|
|
||||||
((speed != SPEED_1000 &&
|
|
||||||
speed != SPEED_100 &&
|
|
||||||
speed != SPEED_10) ||
|
|
||||||
(duplex != DUPLEX_HALF &&
|
|
||||||
duplex != DUPLEX_FULL)))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
phydev->autoneg = autoneg;
|
|
||||||
|
|
||||||
if (autoneg == AUTONEG_DISABLE) {
|
|
||||||
phydev->speed = speed;
|
|
||||||
phydev->duplex = duplex;
|
|
||||||
}
|
|
||||||
|
|
||||||
linkmode_copy(phydev->advertising, advertising);
|
|
||||||
|
|
||||||
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
|
||||||
phydev->advertising, autoneg == AUTONEG_ENABLE);
|
|
||||||
|
|
||||||
phydev->master_slave_set = cmd->base.master_slave_cfg;
|
|
||||||
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
|
|
||||||
|
|
||||||
/* Restart the PHY */
|
|
||||||
phy_start_aneg(phydev);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
|
|
||||||
|
|
||||||
void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
||||||
struct ethtool_link_ksettings *cmd)
|
struct ethtool_link_ksettings *cmd)
|
||||||
{
|
{
|
||||||
|
mutex_lock(&phydev->lock);
|
||||||
linkmode_copy(cmd->link_modes.supported, phydev->supported);
|
linkmode_copy(cmd->link_modes.supported, phydev->supported);
|
||||||
linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
|
linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
|
||||||
linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
|
linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
|
||||||
@ -317,6 +265,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
|
|||||||
cmd->base.autoneg = phydev->autoneg;
|
cmd->base.autoneg = phydev->autoneg;
|
||||||
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
|
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
|
||||||
cmd->base.eth_tp_mdix = phydev->mdix;
|
cmd->base.eth_tp_mdix = phydev->mdix;
|
||||||
|
mutex_unlock(&phydev->lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
|
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
|
||||||
|
|
||||||
@ -750,6 +699,37 @@ static int phy_check_link_status(struct phy_device *phydev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* _phy_start_aneg - start auto-negotiation for this PHY device
|
||||||
|
* @phydev: the phy_device struct
|
||||||
|
*
|
||||||
|
* Description: Sanitizes the settings (if we're not autonegotiating
|
||||||
|
* them), and then calls the driver's config_aneg function.
|
||||||
|
* If the PHYCONTROL Layer is operating, we change the state to
|
||||||
|
* reflect the beginning of Auto-negotiation or forcing.
|
||||||
|
*/
|
||||||
|
static int _phy_start_aneg(struct phy_device *phydev)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
lockdep_assert_held(&phydev->lock);
|
||||||
|
|
||||||
|
if (!phydev->drv)
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
|
if (AUTONEG_DISABLE == phydev->autoneg)
|
||||||
|
phy_sanitize_settings(phydev);
|
||||||
|
|
||||||
|
err = phy_config_aneg(phydev);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (phy_is_started(phydev))
|
||||||
|
err = phy_check_link_status(phydev);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* phy_start_aneg - start auto-negotiation for this PHY device
|
* phy_start_aneg - start auto-negotiation for this PHY device
|
||||||
* @phydev: the phy_device struct
|
* @phydev: the phy_device struct
|
||||||
@ -763,21 +743,8 @@ int phy_start_aneg(struct phy_device *phydev)
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!phydev->drv)
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
mutex_lock(&phydev->lock);
|
mutex_lock(&phydev->lock);
|
||||||
|
err = _phy_start_aneg(phydev);
|
||||||
if (AUTONEG_DISABLE == phydev->autoneg)
|
|
||||||
phy_sanitize_settings(phydev);
|
|
||||||
|
|
||||||
err = phy_config_aneg(phydev);
|
|
||||||
if (err < 0)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
if (phy_is_started(phydev))
|
|
||||||
err = phy_check_link_status(phydev);
|
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&phydev->lock);
|
mutex_unlock(&phydev->lock);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -800,6 +767,61 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
|
|||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int phy_ethtool_ksettings_set(struct phy_device *phydev,
|
||||||
|
const struct ethtool_link_ksettings *cmd)
|
||||||
|
{
|
||||||
|
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
|
||||||
|
u8 autoneg = cmd->base.autoneg;
|
||||||
|
u8 duplex = cmd->base.duplex;
|
||||||
|
u32 speed = cmd->base.speed;
|
||||||
|
|
||||||
|
if (cmd->base.phy_address != phydev->mdio.addr)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
linkmode_copy(advertising, cmd->link_modes.advertising);
|
||||||
|
|
||||||
|
/* We make sure that we don't pass unsupported values in to the PHY */
|
||||||
|
linkmode_and(advertising, advertising, phydev->supported);
|
||||||
|
|
||||||
|
/* Verify the settings we care about. */
|
||||||
|
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (autoneg == AUTONEG_DISABLE &&
|
||||||
|
((speed != SPEED_1000 &&
|
||||||
|
speed != SPEED_100 &&
|
||||||
|
speed != SPEED_10) ||
|
||||||
|
(duplex != DUPLEX_HALF &&
|
||||||
|
duplex != DUPLEX_FULL)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
mutex_lock(&phydev->lock);
|
||||||
|
phydev->autoneg = autoneg;
|
||||||
|
|
||||||
|
if (autoneg == AUTONEG_DISABLE) {
|
||||||
|
phydev->speed = speed;
|
||||||
|
phydev->duplex = duplex;
|
||||||
|
}
|
||||||
|
|
||||||
|
linkmode_copy(phydev->advertising, advertising);
|
||||||
|
|
||||||
|
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
|
||||||
|
phydev->advertising, autoneg == AUTONEG_ENABLE);
|
||||||
|
|
||||||
|
phydev->master_slave_set = cmd->base.master_slave_cfg;
|
||||||
|
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
|
||||||
|
|
||||||
|
/* Restart the PHY */
|
||||||
|
_phy_start_aneg(phydev);
|
||||||
|
|
||||||
|
mutex_unlock(&phydev->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* phy_speed_down - set speed to lowest speed supported by both link partners
|
* phy_speed_down - set speed to lowest speed supported by both link partners
|
||||||
* @phydev: the phy_device struct
|
* @phydev: the phy_device struct
|
||||||
|
@ -4122,6 +4122,12 @@ static int lan78xx_probe(struct usb_interface *intf,
|
|||||||
|
|
||||||
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
|
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
|
||||||
|
|
||||||
|
/* Reject broken descriptors. */
|
||||||
|
if (dev->maxpacket == 0) {
|
||||||
|
ret = -ENODEV;
|
||||||
|
goto out4;
|
||||||
|
}
|
||||||
|
|
||||||
/* driver requires remote-wakeup capability during autosuspend. */
|
/* driver requires remote-wakeup capability during autosuspend. */
|
||||||
intf->needs_remote_wakeup = 1;
|
intf->needs_remote_wakeup = 1;
|
||||||
|
|
||||||
|
@ -1790,6 +1790,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||||||
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
|
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
|
||||||
if (dev->maxpacket == 0) {
|
if (dev->maxpacket == 0) {
|
||||||
/* that is a broken device */
|
/* that is a broken device */
|
||||||
|
status = -ENODEV;
|
||||||
goto out4;
|
goto out4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3833,7 +3833,6 @@ vmxnet3_suspend(struct device *device)
|
|||||||
vmxnet3_free_intr_resources(adapter);
|
vmxnet3_free_intr_resources(adapter);
|
||||||
|
|
||||||
netif_device_detach(netdev);
|
netif_device_detach(netdev);
|
||||||
netif_tx_stop_all_queues(netdev);
|
|
||||||
|
|
||||||
/* Create wake-up filters. */
|
/* Create wake-up filters. */
|
||||||
pmConf = adapter->pm_conf;
|
pmConf = adapter->pm_conf;
|
||||||
|
@ -1730,6 +1730,10 @@ static int netfront_resume(struct xenbus_device *dev)
|
|||||||
|
|
||||||
dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
dev_dbg(&dev->dev, "%s\n", dev->nodename);
|
||||||
|
|
||||||
|
netif_tx_lock_bh(info->netdev);
|
||||||
|
netif_device_detach(info->netdev);
|
||||||
|
netif_tx_unlock_bh(info->netdev);
|
||||||
|
|
||||||
xennet_disconnect_backend(info);
|
xennet_disconnect_backend(info);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2349,6 +2353,10 @@ static int xennet_connect(struct net_device *dev)
|
|||||||
* domain a kick because we've probably just requeued some
|
* domain a kick because we've probably just requeued some
|
||||||
* packets.
|
* packets.
|
||||||
*/
|
*/
|
||||||
|
netif_tx_lock_bh(np->netdev);
|
||||||
|
netif_device_attach(np->netdev);
|
||||||
|
netif_tx_unlock_bh(np->netdev);
|
||||||
|
|
||||||
netif_carrier_on(np->netdev);
|
netif_carrier_on(np->netdev);
|
||||||
for (j = 0; j < num_queues; ++j) {
|
for (j = 0; j < num_queues; ++j) {
|
||||||
queue = &np->queues[j];
|
queue = &np->queues[j];
|
||||||
|
@ -1006,11 +1006,11 @@ static u64 port100_get_command_type_mask(struct port100 *dev)
|
|||||||
|
|
||||||
skb = port100_alloc_skb(dev, 0);
|
skb = port100_alloc_skb(dev, 0);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return -ENOMEM;
|
return 0;
|
||||||
|
|
||||||
resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
|
resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
|
||||||
if (IS_ERR(resp))
|
if (IS_ERR(resp))
|
||||||
return PTR_ERR(resp);
|
return 0;
|
||||||
|
|
||||||
if (resp->len < 8)
|
if (resp->len < 8)
|
||||||
mask = 0;
|
mask = 0;
|
||||||
|
@ -333,26 +333,6 @@ static const struct attribute_group *pmem_attribute_groups[] = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
|
|
||||||
{
|
|
||||||
struct pmem_device *pmem = pgmap->owner;
|
|
||||||
|
|
||||||
blk_cleanup_disk(pmem->disk);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pmem_release_queue(void *pgmap)
|
|
||||||
{
|
|
||||||
pmem_pagemap_cleanup(pgmap);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
|
|
||||||
{
|
|
||||||
struct request_queue *q =
|
|
||||||
container_of(pgmap->ref, struct request_queue, q_usage_counter);
|
|
||||||
|
|
||||||
blk_freeze_queue_start(q);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pmem_release_disk(void *__pmem)
|
static void pmem_release_disk(void *__pmem)
|
||||||
{
|
{
|
||||||
struct pmem_device *pmem = __pmem;
|
struct pmem_device *pmem = __pmem;
|
||||||
@ -360,12 +340,9 @@ static void pmem_release_disk(void *__pmem)
|
|||||||
kill_dax(pmem->dax_dev);
|
kill_dax(pmem->dax_dev);
|
||||||
put_dax(pmem->dax_dev);
|
put_dax(pmem->dax_dev);
|
||||||
del_gendisk(pmem->disk);
|
del_gendisk(pmem->disk);
|
||||||
}
|
|
||||||
|
|
||||||
static const struct dev_pagemap_ops fsdax_pagemap_ops = {
|
blk_cleanup_disk(pmem->disk);
|
||||||
.kill = pmem_pagemap_kill,
|
}
|
||||||
.cleanup = pmem_pagemap_cleanup,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int pmem_attach_disk(struct device *dev,
|
static int pmem_attach_disk(struct device *dev,
|
||||||
struct nd_namespace_common *ndns)
|
struct nd_namespace_common *ndns)
|
||||||
@ -427,10 +404,8 @@ static int pmem_attach_disk(struct device *dev,
|
|||||||
pmem->disk = disk;
|
pmem->disk = disk;
|
||||||
pmem->pgmap.owner = pmem;
|
pmem->pgmap.owner = pmem;
|
||||||
pmem->pfn_flags = PFN_DEV;
|
pmem->pfn_flags = PFN_DEV;
|
||||||
pmem->pgmap.ref = &q->q_usage_counter;
|
|
||||||
if (is_nd_pfn(dev)) {
|
if (is_nd_pfn(dev)) {
|
||||||
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
||||||
pmem->pgmap.ops = &fsdax_pagemap_ops;
|
|
||||||
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
||||||
pfn_sb = nd_pfn->pfn_sb;
|
pfn_sb = nd_pfn->pfn_sb;
|
||||||
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
|
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
|
||||||
@ -444,16 +419,12 @@ static int pmem_attach_disk(struct device *dev,
|
|||||||
pmem->pgmap.range.end = res->end;
|
pmem->pgmap.range.end = res->end;
|
||||||
pmem->pgmap.nr_range = 1;
|
pmem->pgmap.nr_range = 1;
|
||||||
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
|
||||||
pmem->pgmap.ops = &fsdax_pagemap_ops;
|
|
||||||
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
addr = devm_memremap_pages(dev, &pmem->pgmap);
|
||||||
pmem->pfn_flags |= PFN_MAP;
|
pmem->pfn_flags |= PFN_MAP;
|
||||||
bb_range = pmem->pgmap.range;
|
bb_range = pmem->pgmap.range;
|
||||||
} else {
|
} else {
|
||||||
addr = devm_memremap(dev, pmem->phys_addr,
|
addr = devm_memremap(dev, pmem->phys_addr,
|
||||||
pmem->size, ARCH_MEMREMAP_PMEM);
|
pmem->size, ARCH_MEMREMAP_PMEM);
|
||||||
if (devm_add_action_or_reset(dev, pmem_release_queue,
|
|
||||||
&pmem->pgmap))
|
|
||||||
return -ENOMEM;
|
|
||||||
bb_range.start = res->start;
|
bb_range.start = res->start;
|
||||||
bb_range.end = res->end;
|
bb_range.end = res->end;
|
||||||
}
|
}
|
||||||
|
@ -926,12 +926,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
|
|||||||
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_queue *queue = req->queue;
|
struct nvme_tcp_queue *queue = req->queue;
|
||||||
|
int req_data_len = req->data_len;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
struct page *page = nvme_tcp_req_cur_page(req);
|
struct page *page = nvme_tcp_req_cur_page(req);
|
||||||
size_t offset = nvme_tcp_req_cur_offset(req);
|
size_t offset = nvme_tcp_req_cur_offset(req);
|
||||||
size_t len = nvme_tcp_req_cur_length(req);
|
size_t len = nvme_tcp_req_cur_length(req);
|
||||||
bool last = nvme_tcp_pdu_last_send(req, len);
|
bool last = nvme_tcp_pdu_last_send(req, len);
|
||||||
|
int req_data_sent = req->data_sent;
|
||||||
int ret, flags = MSG_DONTWAIT;
|
int ret, flags = MSG_DONTWAIT;
|
||||||
|
|
||||||
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
|
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
|
||||||
@ -958,7 +960,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
|
|||||||
* in the request where we don't want to modify it as we may
|
* in the request where we don't want to modify it as we may
|
||||||
* compete with the RX path completing the request.
|
* compete with the RX path completing the request.
|
||||||
*/
|
*/
|
||||||
if (req->data_sent + ret < req->data_len)
|
if (req_data_sent + ret < req_data_len)
|
||||||
nvme_tcp_advance_req(req, ret);
|
nvme_tcp_advance_req(req, ret);
|
||||||
|
|
||||||
/* fully successful last send in current PDU */
|
/* fully successful last send in current PDU */
|
||||||
@ -1048,10 +1050,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
|
|||||||
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_queue *queue = req->queue;
|
struct nvme_tcp_queue *queue = req->queue;
|
||||||
|
size_t offset = req->offset;
|
||||||
int ret;
|
int ret;
|
||||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
||||||
struct kvec iov = {
|
struct kvec iov = {
|
||||||
.iov_base = &req->ddgst + req->offset,
|
.iov_base = (u8 *)&req->ddgst + req->offset,
|
||||||
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
|
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1064,7 +1067,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
|
|||||||
if (unlikely(ret <= 0))
|
if (unlikely(ret <= 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
|
if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
|
||||||
nvme_tcp_done_send_req(queue);
|
nvme_tcp_done_send_req(queue);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
|||||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
||||||
struct kvec iov = {
|
struct kvec iov = {
|
||||||
.iov_base = &cmd->exp_ddgst + cmd->offset,
|
.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
|
||||||
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
|
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
@ -1096,7 +1096,7 @@ recv:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (queue->hdr_digest &&
|
if (queue->hdr_digest &&
|
||||||
nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
|
nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
|
||||||
nvmet_tcp_fatal_error(queue); /* fatal */
|
nvmet_tcp_fatal_error(queue); /* fatal */
|
||||||
return -EPROTO;
|
return -EPROTO;
|
||||||
}
|
}
|
||||||
@ -1428,6 +1428,7 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
|
|||||||
|
|
||||||
static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
||||||
{
|
{
|
||||||
|
struct page *page;
|
||||||
struct nvmet_tcp_queue *queue =
|
struct nvmet_tcp_queue *queue =
|
||||||
container_of(w, struct nvmet_tcp_queue, release_work);
|
container_of(w, struct nvmet_tcp_queue, release_work);
|
||||||
|
|
||||||
@ -1447,6 +1448,8 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
|
|||||||
nvmet_tcp_free_crypto(queue);
|
nvmet_tcp_free_crypto(queue);
|
||||||
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
|
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
|
||||||
|
|
||||||
|
page = virt_to_head_page(queue->pf_cache.va);
|
||||||
|
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
|
||||||
kfree(queue);
|
kfree(queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,8 +147,8 @@ config RESET_OXNAS
|
|||||||
bool
|
bool
|
||||||
|
|
||||||
config RESET_PISTACHIO
|
config RESET_PISTACHIO
|
||||||
bool "Pistachio Reset Driver" if COMPILE_TEST
|
bool "Pistachio Reset Driver"
|
||||||
default MACH_PISTACHIO
|
depends on MIPS || COMPILE_TEST
|
||||||
help
|
help
|
||||||
This enables the reset driver for ImgTec Pistachio SoCs.
|
This enables the reset driver for ImgTec Pistachio SoCs.
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
|
ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
|
||||||
!(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
|
(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(data->dev, "time out on SATA/PCIe rescal\n");
|
dev_err(data->dev, "time out on SATA/PCIe rescal\n");
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -92,3 +92,29 @@ void __init socfpga_reset_init(void)
|
|||||||
for_each_matching_node(np, socfpga_early_reset_dt_ids)
|
for_each_matching_node(np, socfpga_early_reset_dt_ids)
|
||||||
a10_reset_init(np);
|
a10_reset_init(np);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The early driver is problematic, because it doesn't register
|
||||||
|
* itself as a driver. This causes certain device links to prevent
|
||||||
|
* consumer devices from probing. The hacky solution is to register
|
||||||
|
* an empty driver, whose only job is to attach itself to the reset
|
||||||
|
* manager and call probe.
|
||||||
|
*/
|
||||||
|
static const struct of_device_id socfpga_reset_dt_ids[] = {
|
||||||
|
{ .compatible = "altr,rst-mgr", },
|
||||||
|
{ /* sentinel */ },
|
||||||
|
};
|
||||||
|
|
||||||
|
static int reset_simple_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct platform_driver reset_socfpga_driver = {
|
||||||
|
.probe = reset_simple_probe,
|
||||||
|
.driver = {
|
||||||
|
.name = "socfpga-reset",
|
||||||
|
.of_match_table = socfpga_reset_dt_ids,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
builtin_platform_driver(reset_socfpga_driver);
|
||||||
|
@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
|
|||||||
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
|
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
|
||||||
struct mrq_reset_request request;
|
struct mrq_reset_request request;
|
||||||
struct tegra_bpmp_message msg;
|
struct tegra_bpmp_message msg;
|
||||||
|
int err;
|
||||||
|
|
||||||
memset(&request, 0, sizeof(request));
|
memset(&request, 0, sizeof(request));
|
||||||
request.cmd = command;
|
request.cmd = command;
|
||||||
@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
|
|||||||
msg.tx.data = &request;
|
msg.tx.data = &request;
|
||||||
msg.tx.size = sizeof(request);
|
msg.tx.size = sizeof(request);
|
||||||
|
|
||||||
return tegra_bpmp_transfer(bpmp, &msg);
|
err = tegra_bpmp_transfer(bpmp, &msg);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
if (msg.rx.ret)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
|
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
|
||||||
|
@ -1696,6 +1696,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
|||||||
|
|
||||||
spin_lock_irqsave(&evt->queue->l_lock, flags);
|
spin_lock_irqsave(&evt->queue->l_lock, flags);
|
||||||
list_add_tail(&evt->queue_list, &evt->queue->sent);
|
list_add_tail(&evt->queue_list, &evt->queue->sent);
|
||||||
|
atomic_set(&evt->active, 1);
|
||||||
|
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
@ -1710,6 +1711,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
|||||||
be64_to_cpu(crq_as_u64[1]));
|
be64_to_cpu(crq_as_u64[1]));
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
atomic_set(&evt->active, 0);
|
||||||
list_del(&evt->queue_list);
|
list_del(&evt->queue_list);
|
||||||
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
||||||
del_timer(&evt->timer);
|
del_timer(&evt->timer);
|
||||||
@ -1737,7 +1739,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
|
|||||||
|
|
||||||
evt->done(evt);
|
evt->done(evt);
|
||||||
} else {
|
} else {
|
||||||
atomic_set(&evt->active, 1);
|
|
||||||
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
|
||||||
ibmvfc_trc_start(evt);
|
ibmvfc_trc_start(evt);
|
||||||
}
|
}
|
||||||
|
@ -5065,9 +5065,12 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
|
|||||||
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
|
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
|
||||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
|
||||||
|
|
||||||
if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
|
if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
|
||||||
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
|
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
|
||||||
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
|
|
||||||
|
if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
|
||||||
|
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
|
||||||
|
|
||||||
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
|
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
|
||||||
cpu_to_be32(scsi_prot_ref_tag(scmd));
|
cpu_to_be32(scsi_prot_ref_tag(scmd));
|
||||||
}
|
}
|
||||||
|
@ -642,9 +642,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* setting for three timeout values for traffic class #0 */
|
/* setting for three timeout values for traffic class #0 */
|
||||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
|
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
|
||||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
|
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
|
||||||
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
|
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
out:
|
out:
|
||||||
|
@ -2737,12 +2737,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|||||||
|
|
||||||
lrbp->req_abort_skip = false;
|
lrbp->req_abort_skip = false;
|
||||||
|
|
||||||
err = ufshpb_prep(hba, lrbp);
|
ufshpb_prep(hba, lrbp);
|
||||||
if (err == -EAGAIN) {
|
|
||||||
lrbp->cmd = NULL;
|
|
||||||
ufshcd_release(hba);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ufshcd_comp_scsi_upiu(hba, lrbp);
|
ufshcd_comp_scsi_upiu(hba, lrbp);
|
||||||
|
|
||||||
|
@ -84,16 +84,6 @@ static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
|
|||||||
return transfer_len <= hpb->pre_req_max_tr_len;
|
return transfer_len <= hpb->pre_req_max_tr_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
|
|
||||||
* default. It is possible to change range of transfer_len through sysfs.
|
|
||||||
*/
|
|
||||||
static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
|
|
||||||
{
|
|
||||||
return len > hpb->pre_req_min_tr_len &&
|
|
||||||
len <= hpb->pre_req_max_tr_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool ufshpb_is_general_lun(int lun)
|
static bool ufshpb_is_general_lun(int lun)
|
||||||
{
|
{
|
||||||
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
|
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
|
||||||
@ -334,7 +324,7 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
||||||
__be64 ppn, u8 transfer_len, int read_id)
|
__be64 ppn, u8 transfer_len)
|
||||||
{
|
{
|
||||||
unsigned char *cdb = lrbp->cmd->cmnd;
|
unsigned char *cdb = lrbp->cmd->cmnd;
|
||||||
__be64 ppn_tmp = ppn;
|
__be64 ppn_tmp = ppn;
|
||||||
@ -346,256 +336,11 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
|||||||
/* ppn value is stored as big-endian in the host memory */
|
/* ppn value is stored as big-endian in the host memory */
|
||||||
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
|
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
|
||||||
cdb[14] = transfer_len;
|
cdb[14] = transfer_len;
|
||||||
cdb[15] = read_id;
|
cdb[15] = 0;
|
||||||
|
|
||||||
lrbp->cmd->cmd_len = UFS_CDB_SIZE;
|
lrbp->cmd->cmd_len = UFS_CDB_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
|
|
||||||
unsigned long lpn, unsigned int len,
|
|
||||||
int read_id)
|
|
||||||
{
|
|
||||||
cdb[0] = UFSHPB_WRITE_BUFFER;
|
|
||||||
cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
|
|
||||||
|
|
||||||
put_unaligned_be32(lpn, &cdb[2]);
|
|
||||||
cdb[6] = read_id;
|
|
||||||
put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
|
|
||||||
|
|
||||||
cdb[9] = 0x00; /* Control = 0x00 */
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
|
|
||||||
{
|
|
||||||
struct ufshpb_req *pre_req;
|
|
||||||
|
|
||||||
if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
|
|
||||||
dev_info(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"pre_req throttle. inflight %d throttle %d",
|
|
||||||
hpb->num_inflight_pre_req, hpb->throttle_pre_req);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
|
|
||||||
struct ufshpb_req, list_req);
|
|
||||||
if (!pre_req) {
|
|
||||||
dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
list_del_init(&pre_req->list_req);
|
|
||||||
hpb->num_inflight_pre_req++;
|
|
||||||
|
|
||||||
return pre_req;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
|
|
||||||
struct ufshpb_req *pre_req)
|
|
||||||
{
|
|
||||||
pre_req->req = NULL;
|
|
||||||
bio_reset(pre_req->bio);
|
|
||||||
list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
|
|
||||||
hpb->num_inflight_pre_req--;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
|
|
||||||
{
|
|
||||||
struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
|
|
||||||
struct ufshpb_lu *hpb = pre_req->hpb;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (error) {
|
|
||||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
|
|
||||||
struct scsi_sense_hdr sshdr;
|
|
||||||
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
|
|
||||||
scsi_command_normalize_sense(cmd, &sshdr);
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"code %x sense_key %x asc %x ascq %x",
|
|
||||||
sshdr.response_code,
|
|
||||||
sshdr.sense_key, sshdr.asc, sshdr.ascq);
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"byte4 %x byte5 %x byte6 %x additional_len %x",
|
|
||||||
sshdr.byte4, sshdr.byte5,
|
|
||||||
sshdr.byte6, sshdr.additional_length);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_mq_free_request(req);
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
ufshpb_put_pre_req(pre_req->hpb, pre_req);
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
|
|
||||||
{
|
|
||||||
struct ufshpb_lu *hpb = pre_req->hpb;
|
|
||||||
struct ufshpb_region *rgn;
|
|
||||||
struct ufshpb_subregion *srgn;
|
|
||||||
__be64 *addr;
|
|
||||||
int offset = 0;
|
|
||||||
int copied;
|
|
||||||
unsigned long lpn = pre_req->wb.lpn;
|
|
||||||
int rgn_idx, srgn_idx, srgn_offset;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
addr = page_address(page);
|
|
||||||
ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
|
|
||||||
next_offset:
|
|
||||||
rgn = hpb->rgn_tbl + rgn_idx;
|
|
||||||
srgn = rgn->srgn_tbl + srgn_idx;
|
|
||||||
|
|
||||||
if (!ufshpb_is_valid_srgn(rgn, srgn))
|
|
||||||
goto mctx_error;
|
|
||||||
|
|
||||||
if (!srgn->mctx)
|
|
||||||
goto mctx_error;
|
|
||||||
|
|
||||||
copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
|
|
||||||
pre_req->wb.len - offset,
|
|
||||||
&addr[offset]);
|
|
||||||
|
|
||||||
if (copied < 0)
|
|
||||||
goto mctx_error;
|
|
||||||
|
|
||||||
offset += copied;
|
|
||||||
srgn_offset += copied;
|
|
||||||
|
|
||||||
if (srgn_offset == hpb->entries_per_srgn) {
|
|
||||||
srgn_offset = 0;
|
|
||||||
|
|
||||||
if (++srgn_idx == hpb->srgns_per_rgn) {
|
|
||||||
srgn_idx = 0;
|
|
||||||
rgn_idx++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (offset < pre_req->wb.len)
|
|
||||||
goto next_offset;
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
return 0;
|
|
||||||
mctx_error:
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
|
|
||||||
struct request_queue *q,
|
|
||||||
struct ufshpb_req *pre_req)
|
|
||||||
{
|
|
||||||
struct page *page = pre_req->wb.m_page;
|
|
||||||
struct bio *bio = pre_req->bio;
|
|
||||||
int entries_bytes, ret;
|
|
||||||
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (ufshpb_prep_entry(pre_req, page))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
entries_bytes = pre_req->wb.len * sizeof(__be64);
|
|
||||||
|
|
||||||
ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
|
|
||||||
if (ret != entries_bytes) {
|
|
||||||
dev_err(&hpb->sdev_ufs_lu->sdev_dev,
|
|
||||||
"bio_add_pc_page fail: %d", ret);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
|
|
||||||
{
|
|
||||||
if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
|
|
||||||
hpb->cur_read_id = 1;
|
|
||||||
return hpb->cur_read_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
|
|
||||||
struct ufshpb_req *pre_req, int read_id)
|
|
||||||
{
|
|
||||||
struct scsi_device *sdev = cmd->device;
|
|
||||||
struct request_queue *q = sdev->request_queue;
|
|
||||||
struct request *req;
|
|
||||||
struct scsi_request *rq;
|
|
||||||
struct bio *bio = pre_req->bio;
|
|
||||||
|
|
||||||
pre_req->hpb = hpb;
|
|
||||||
pre_req->wb.lpn = sectors_to_logical(cmd->device,
|
|
||||||
blk_rq_pos(scsi_cmd_to_rq(cmd)));
|
|
||||||
pre_req->wb.len = sectors_to_logical(cmd->device,
|
|
||||||
blk_rq_sectors(scsi_cmd_to_rq(cmd)));
|
|
||||||
if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
req = pre_req->req;
|
|
||||||
|
|
||||||
/* 1. request setup */
|
|
||||||
blk_rq_append_bio(req, bio);
|
|
||||||
req->rq_disk = NULL;
|
|
||||||
req->end_io_data = (void *)pre_req;
|
|
||||||
req->end_io = ufshpb_pre_req_compl_fn;
|
|
||||||
|
|
||||||
/* 2. scsi_request setup */
|
|
||||||
rq = scsi_req(req);
|
|
||||||
rq->retries = 1;
|
|
||||||
|
|
||||||
ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
|
|
||||||
read_id);
|
|
||||||
rq->cmd_len = scsi_command_size(rq->cmd);
|
|
||||||
|
|
||||||
if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
hpb->stats.pre_req_cnt++;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
|
|
||||||
int *read_id)
|
|
||||||
{
|
|
||||||
struct ufshpb_req *pre_req;
|
|
||||||
struct request *req = NULL;
|
|
||||||
unsigned long flags;
|
|
||||||
int _read_id;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
req = blk_get_request(cmd->device->request_queue,
|
|
||||||
REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
|
|
||||||
if (IS_ERR(req))
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
pre_req = ufshpb_get_pre_req(hpb);
|
|
||||||
if (!pre_req) {
|
|
||||||
ret = -EAGAIN;
|
|
||||||
goto unlock_out;
|
|
||||||
}
|
|
||||||
_read_id = ufshpb_get_read_id(hpb);
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
|
|
||||||
pre_req->req = req;
|
|
||||||
|
|
||||||
ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
|
|
||||||
if (ret)
|
|
||||||
goto free_pre_req;
|
|
||||||
|
|
||||||
*read_id = _read_id;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
free_pre_req:
|
|
||||||
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
|
|
||||||
ufshpb_put_pre_req(hpb, pre_req);
|
|
||||||
unlock_out:
|
|
||||||
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
|
|
||||||
blk_put_request(req);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function will set up HPB read command using host-side L2P map data.
|
* This function will set up HPB read command using host-side L2P map data.
|
||||||
*/
|
*/
|
||||||
@ -609,7 +354,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||||||
__be64 ppn;
|
__be64 ppn;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int transfer_len, rgn_idx, srgn_idx, srgn_offset;
|
int transfer_len, rgn_idx, srgn_idx, srgn_offset;
|
||||||
int read_id = 0;
|
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
hpb = ufshpb_get_hpb_data(cmd->device);
|
hpb = ufshpb_get_hpb_data(cmd->device);
|
||||||
@ -685,24 +429,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||||||
dev_err(hba->dev, "get ppn failed. err %d\n", err);
|
dev_err(hba->dev, "get ppn failed. err %d\n", err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
if (!ufshpb_is_legacy(hba) &&
|
|
||||||
ufshpb_is_required_wb(hpb, transfer_len)) {
|
|
||||||
err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
|
|
||||||
if (err) {
|
|
||||||
unsigned long timeout;
|
|
||||||
|
|
||||||
timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
|
ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
|
||||||
hpb->params.requeue_timeout_ms);
|
|
||||||
|
|
||||||
if (time_before(jiffies, timeout))
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
hpb->stats.miss_cnt++;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
|
|
||||||
|
|
||||||
hpb->stats.hit_cnt++;
|
hpb->stats.hit_cnt++;
|
||||||
return 0;
|
return 0;
|
||||||
@ -1841,16 +1569,11 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
|
|||||||
u32 entries_per_rgn;
|
u32 entries_per_rgn;
|
||||||
u64 rgn_mem_size, tmp;
|
u64 rgn_mem_size, tmp;
|
||||||
|
|
||||||
/* for pre_req */
|
|
||||||
hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
|
|
||||||
|
|
||||||
if (ufshpb_is_legacy(hba))
|
if (ufshpb_is_legacy(hba))
|
||||||
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
|
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
|
||||||
else
|
else
|
||||||
hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
|
hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
|
||||||
|
|
||||||
hpb->cur_read_id = 0;
|
|
||||||
|
|
||||||
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
|
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
|
||||||
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
|
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
|
||||||
(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
|
(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
|
||||||
|
@ -241,8 +241,6 @@ struct ufshpb_lu {
|
|||||||
spinlock_t param_lock;
|
spinlock_t param_lock;
|
||||||
|
|
||||||
struct list_head lh_pre_req_free;
|
struct list_head lh_pre_req_free;
|
||||||
int cur_read_id;
|
|
||||||
int pre_req_min_tr_len;
|
|
||||||
int pre_req_max_tr_len;
|
int pre_req_max_tr_len;
|
||||||
|
|
||||||
/* cached L2P map management worker */
|
/* cached L2P map management worker */
|
||||||
|
@ -134,7 +134,7 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
|
|||||||
if (!master)
|
if (!master)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
master->bus_num = dfl_dev->id;
|
master->bus_num = -1;
|
||||||
|
|
||||||
hw = spi_master_get_devdata(master);
|
hw = spi_master_get_devdata(master);
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ static int altera_spi_probe(struct platform_device *pdev)
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* setup the master state. */
|
/* setup the master state. */
|
||||||
master->bus_num = pdev->id;
|
master->bus_num = -1;
|
||||||
|
|
||||||
if (pdata) {
|
if (pdata) {
|
||||||
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
|
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
|
||||||
|
@ -1716,12 +1716,13 @@ static int verify_controller_parameters(struct pl022 *pl022,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
|
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
|
||||||
dev_err(&pl022->adev->dev,
|
dev_err(&pl022->adev->dev,
|
||||||
"Microwire half duplex mode requested,"
|
"Microwire half duplex mode requested,"
|
||||||
" but this is only available in the"
|
" but this is only available in the"
|
||||||
" ST version of PL022\n");
|
" ST version of PL022\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -80,6 +80,7 @@ struct vduse_dev {
|
|||||||
struct vdpa_callback config_cb;
|
struct vdpa_callback config_cb;
|
||||||
struct work_struct inject;
|
struct work_struct inject;
|
||||||
spinlock_t irq_lock;
|
spinlock_t irq_lock;
|
||||||
|
struct rw_semaphore rwsem;
|
||||||
int minor;
|
int minor;
|
||||||
bool broken;
|
bool broken;
|
||||||
bool connected;
|
bool connected;
|
||||||
@ -410,6 +411,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
|
|||||||
if (domain->bounce_map)
|
if (domain->bounce_map)
|
||||||
vduse_domain_reset_bounce_map(domain);
|
vduse_domain_reset_bounce_map(domain);
|
||||||
|
|
||||||
|
down_write(&dev->rwsem);
|
||||||
|
|
||||||
dev->status = 0;
|
dev->status = 0;
|
||||||
dev->driver_features = 0;
|
dev->driver_features = 0;
|
||||||
dev->generation++;
|
dev->generation++;
|
||||||
@ -443,6 +446,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
|
|||||||
flush_work(&vq->inject);
|
flush_work(&vq->inject);
|
||||||
flush_work(&vq->kick);
|
flush_work(&vq->kick);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
up_write(&dev->rwsem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
|
static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
|
||||||
@ -885,6 +890,23 @@ static void vduse_vq_irq_inject(struct work_struct *work)
|
|||||||
spin_unlock_irq(&vq->irq_lock);
|
spin_unlock_irq(&vq->irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
|
||||||
|
struct work_struct *irq_work)
|
||||||
|
{
|
||||||
|
int ret = -EINVAL;
|
||||||
|
|
||||||
|
down_read(&dev->rwsem);
|
||||||
|
if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
ret = 0;
|
||||||
|
queue_work(vduse_irq_wq, irq_work);
|
||||||
|
unlock:
|
||||||
|
up_read(&dev->rwsem);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
|
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
@ -966,8 +988,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case VDUSE_DEV_INJECT_CONFIG_IRQ:
|
case VDUSE_DEV_INJECT_CONFIG_IRQ:
|
||||||
ret = 0;
|
ret = vduse_dev_queue_irq_work(dev, &dev->inject);
|
||||||
queue_work(vduse_irq_wq, &dev->inject);
|
|
||||||
break;
|
break;
|
||||||
case VDUSE_VQ_SETUP: {
|
case VDUSE_VQ_SETUP: {
|
||||||
struct vduse_vq_config config;
|
struct vduse_vq_config config;
|
||||||
@ -1053,9 +1074,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
|
|||||||
if (index >= dev->vq_num)
|
if (index >= dev->vq_num)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
index = array_index_nospec(index, dev->vq_num);
|
index = array_index_nospec(index, dev->vq_num);
|
||||||
queue_work(vduse_irq_wq, &dev->vqs[index].inject);
|
ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -1136,6 +1156,7 @@ static struct vduse_dev *vduse_dev_create(void)
|
|||||||
INIT_LIST_HEAD(&dev->send_list);
|
INIT_LIST_HEAD(&dev->send_list);
|
||||||
INIT_LIST_HEAD(&dev->recv_list);
|
INIT_LIST_HEAD(&dev->recv_list);
|
||||||
spin_lock_init(&dev->irq_lock);
|
spin_lock_init(&dev->irq_lock);
|
||||||
|
init_rwsem(&dev->rwsem);
|
||||||
|
|
||||||
INIT_WORK(&dev->inject, vduse_dev_irq_inject);
|
INIT_WORK(&dev->inject, vduse_dev_irq_inject);
|
||||||
init_waitqueue_head(&dev->waitq);
|
init_waitqueue_head(&dev->waitq);
|
||||||
|
@ -576,7 +576,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||||||
/* Last one doesn't continue. */
|
/* Last one doesn't continue. */
|
||||||
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
|
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
|
||||||
if (!indirect && vq->use_dma_api)
|
if (!indirect && vq->use_dma_api)
|
||||||
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
|
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
|
||||||
~VRING_DESC_F_NEXT;
|
~VRING_DESC_F_NEXT;
|
||||||
|
|
||||||
if (indirect) {
|
if (indirect) {
|
||||||
|
@ -71,8 +71,6 @@
|
|||||||
#define TCOBASE(p) ((p)->tco_res->start)
|
#define TCOBASE(p) ((p)->tco_res->start)
|
||||||
/* SMI Control and Enable Register */
|
/* SMI Control and Enable Register */
|
||||||
#define SMI_EN(p) ((p)->smi_res->start)
|
#define SMI_EN(p) ((p)->smi_res->start)
|
||||||
#define TCO_EN (1 << 13)
|
|
||||||
#define GBL_SMI_EN (1 << 0)
|
|
||||||
|
|
||||||
#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
|
#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
|
||||||
#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
|
#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
|
||||||
@ -357,12 +355,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
|
|||||||
|
|
||||||
tmrval = seconds_to_ticks(p, t);
|
tmrval = seconds_to_ticks(p, t);
|
||||||
|
|
||||||
/*
|
/* For TCO v1 the timer counts down twice before rebooting */
|
||||||
* If TCO SMIs are off, the timer counts down twice before rebooting.
|
if (p->iTCO_version == 1)
|
||||||
* Otherwise, the BIOS generally reboots when the SMI triggers.
|
|
||||||
*/
|
|
||||||
if (p->smi_res &&
|
|
||||||
(inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
|
|
||||||
tmrval /= 2;
|
tmrval /= 2;
|
||||||
|
|
||||||
/* from the specs: */
|
/* from the specs: */
|
||||||
@ -527,7 +521,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
|
|||||||
* Disables TCO logic generating an SMI#
|
* Disables TCO logic generating an SMI#
|
||||||
*/
|
*/
|
||||||
val32 = inl(SMI_EN(p));
|
val32 = inl(SMI_EN(p));
|
||||||
val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
|
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
|
||||||
outl(val32, SMI_EN(p));
|
outl(val32, SMI_EN(p));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user