Linux 5.2

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl0idTweHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGZesIAJDKicw2Voyx8K8m
 3pXSK+71RuO/d3Y9M51mdfTMKRP4PHR9/4wVZ9wHPwC4dV6wxgsmIYCF69a1Wety
 LD1MpDCP1DK5wVfPNKVX2xmj7ua6iutPtSsJHzdzM2TlscgsrFKjmUccqJ5JLwL5
 c34nqwXWnzzRyI5Ga9cQSlwzAXq0vDHXyML3AnCosSsLX0lKFrHlK1zttdOPNkfj
 dXRN62g3q+9kVQozzhDXb8atZZ7IkBk8Q0lujpNXW83Ci1VjaVNv3SB8GZTXIlLj
 U15VdyuwfJDfpBgFBN6/unzVaAB6FFrEKy0jT1aeTyKarMKDKgOnJjn10aKjDNno
 /bXsKKc=
 =TVqV
 -----END PGP SIGNATURE-----

Merge tag 'v5.2' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2019-07-08 18:04:41 +02:00
commit 552a031ba1
234 changed files with 1832 additions and 829 deletions

View File

@ -152,17 +152,19 @@ examples:
- | - |
// Example 2: Spike ISA Simulator with 1 Hart // Example 2: Spike ISA Simulator with 1 Hart
cpus { cpus {
cpu@0 { #address-cells = <1>;
device_type = "cpu"; #size-cells = <0>;
reg = <0>; cpu@0 {
compatible = "riscv"; device_type = "cpu";
riscv,isa = "rv64imafdc"; reg = <0>;
mmu-type = "riscv,sv48"; compatible = "riscv";
interrupt-controller { riscv,isa = "rv64imafdc";
#interrupt-cells = <1>; mmu-type = "riscv,sv48";
interrupt-controller; interrupt-controller {
compatible = "riscv,cpu-intc"; #interrupt-cells = <1>;
}; interrupt-controller;
}; compatible = "riscv,cpu-intc";
};
};
}; };
... ...

View File

@ -3122,6 +3122,7 @@ F: arch/arm/mach-bcm/
BROADCOM BCM2835 ARM ARCHITECTURE BROADCOM BCM2835 ARM ARCHITECTURE
M: Eric Anholt <eric@anholt.net> M: Eric Anholt <eric@anholt.net>
M: Stefan Wahren <wahrenst@gmx.net> M: Stefan Wahren <wahrenst@gmx.net>
L: bcm-kernel-feedback-list@broadcom.com
L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/anholt/linux T: git git://github.com/anholt/linux
@ -3151,6 +3152,7 @@ F: arch/arm/boot/dts/bcm953012*
BROADCOM BCM53573 ARM ARCHITECTURE BROADCOM BCM53573 ARM ARCHITECTURE
M: Rafał Miłecki <rafal@milecki.pl> M: Rafał Miłecki <rafal@milecki.pl>
L: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org L: linux-arm-kernel@lists.infradead.org
S: Maintained S: Maintained
F: arch/arm/boot/dts/bcm53573* F: arch/arm/boot/dts/bcm53573*
@ -3940,6 +3942,14 @@ M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
S: Maintained S: Maintained
F: .clang-format F: .clang-format
CLANG/LLVM BUILD SUPPORT
L: clang-built-linux@googlegroups.com
W: https://clangbuiltlinux.github.io/
B: https://github.com/ClangBuiltLinux/linux/issues
C: irc://chat.freenode.net/clangbuiltlinux
S: Supported
K: \b(?i:clang|llvm)\b
CLEANCACHE API CLEANCACHE API
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
@ -7800,7 +7810,7 @@ INGENIC JZ4780 NAND DRIVER
M: Harvey Hunt <harveyhuntnexus@gmail.com> M: Harvey Hunt <harveyhuntnexus@gmail.com>
L: linux-mtd@lists.infradead.org L: linux-mtd@lists.infradead.org
S: Maintained S: Maintained
F: drivers/mtd/nand/raw/jz4780_* F: drivers/mtd/nand/raw/ingenic/
INOTIFY INOTIFY
M: Jan Kara <jack@suse.cz> M: Jan Kara <jack@suse.cz>
@ -15493,6 +15503,7 @@ F: drivers/dma/tegra*
TEGRA I2C DRIVER TEGRA I2C DRIVER
M: Laxman Dewangan <ldewangan@nvidia.com> M: Laxman Dewangan <ldewangan@nvidia.com>
R: Dmitry Osipenko <digetx@gmail.com>
S: Supported S: Supported
F: drivers/i2c/busses/i2c-tegra.c F: drivers/i2c/busses/i2c-tegra.c

View File

@ -2,8 +2,8 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 2 PATCHLEVEL = 2
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION =
NAME = Golden Lions NAME = Bobtail Squid
# *DOCUMENTATION* # *DOCUMENTATION*
# To see a list of typical targets execute "make help" # To see a list of typical targets execute "make help"

View File

@ -5,6 +5,10 @@
KBUILD_DEFCONFIG := nsim_hs_defconfig KBUILD_DEFCONFIG := nsim_hs_defconfig
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
endif
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38 cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38

View File

@ -32,8 +32,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
#define ARC_PERIPHERAL_BASE 0xf0000000 #define ARC_PERIPHERAL_BASE 0xf0000000
#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000) #define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000)
#define CREG_PAE (CREG_BASE + 0x180)
#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) #define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) #define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
@ -99,20 +97,167 @@ static void __init hsdk_enable_gpio_intc_wire(void)
iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
} }
static void __init hsdk_init_early(void) enum hsdk_axi_masters {
M_HS_CORE = 0,
M_HS_RTT,
M_AXI_TUN,
M_HDMI_VIDEO,
M_HDMI_AUDIO,
M_USB_HOST,
M_ETHERNET,
M_SDIO,
M_GPU,
M_DMAC_0,
M_DMAC_1,
M_DVFS
};
#define UPDATE_VAL 1
/*
* This is modified configuration of AXI bridge. Default settings
* are specified in "Table 111 CREG Address Decoder register reset values".
*
* AXI_M_m_SLV{0|1} - Slave Select register for master 'm'.
* Possible slaves are:
* - 0 => no slave selected
* - 1 => DDR controller port #1
* - 2 => SRAM controller
* - 3 => AXI tunnel
* - 4 => EBI controller
* - 5 => ROM controller
* - 6 => AXI2APB bridge
* - 7 => DDR controller port #2
* - 8 => DDR controller port #3
* - 9 => HS38x4 IOC
* - 10 => HS38x4 DMI
* AXI_M_m_OFFSET{0|1} - Addr Offset register for master 'm'
*
* Please read ARC HS Development IC Specification, section 17.2 for more
* information about apertures configuration.
*
* m master AXI_M_m_SLV0 AXI_M_m_SLV1 AXI_M_m_OFFSET0 AXI_M_m_OFFSET1
* 0 HS (CBU) 0x11111111 0x63111111 0xFEDCBA98 0x0E543210
* 1 HS (RTT) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 2 AXI Tunnel 0x88888888 0x88888888 0xFEDCBA98 0x76543210
* 3 HDMI-VIDEO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 4 HDMI-ADUIO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 5 USB-HOST 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
* 6 ETHERNET 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
* 7 SDIO 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
* 8 GPU 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 9 DMAC (port #1) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 10 DMAC (port #2) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 11 DVFS 0x00000000 0x60000000 0x00000000 0x00000000
*/
#define CREG_AXI_M_SLV0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m)))
#define CREG_AXI_M_SLV1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x04))
#define CREG_AXI_M_OFT0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x08))
#define CREG_AXI_M_OFT1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x0C))
#define CREG_AXI_M_UPDT(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x14))
#define CREG_AXI_M_HS_CORE_BOOT ((void __iomem *)(CREG_BASE + 0x010))
#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
static void __init hsdk_init_memory_bridge(void)
{ {
u32 reg;
/*
* M_HS_CORE has one unique register - BOOT.
* We need to clean boot mirror (BOOT[1:0]) bits in them to avoid first
* aperture to be masked by 'boot mirror'.
*/
reg = readl(CREG_AXI_M_HS_CORE_BOOT) & (~0x3);
writel(reg, CREG_AXI_M_HS_CORE_BOOT);
writel(0x11111111, CREG_AXI_M_SLV0(M_HS_CORE));
writel(0x63111111, CREG_AXI_M_SLV1(M_HS_CORE));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_CORE));
writel(0x0E543210, CREG_AXI_M_OFT1(M_HS_CORE));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_CORE));
writel(0x77777777, CREG_AXI_M_SLV0(M_HS_RTT));
writel(0x77777777, CREG_AXI_M_SLV1(M_HS_RTT));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_RTT));
writel(0x76543210, CREG_AXI_M_OFT1(M_HS_RTT));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_RTT));
writel(0x88888888, CREG_AXI_M_SLV0(M_AXI_TUN));
writel(0x88888888, CREG_AXI_M_SLV1(M_AXI_TUN));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_AXI_TUN));
writel(0x76543210, CREG_AXI_M_OFT1(M_AXI_TUN));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_AXI_TUN));
writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_VIDEO));
writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_VIDEO));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_VIDEO));
writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_VIDEO));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_VIDEO));
writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_AUDIO));
writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_AUDIO));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_AUDIO));
writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_AUDIO));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_AUDIO));
writel(0x77777777, CREG_AXI_M_SLV0(M_USB_HOST));
writel(0x77999999, CREG_AXI_M_SLV1(M_USB_HOST));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_USB_HOST));
writel(0x76DCBA98, CREG_AXI_M_OFT1(M_USB_HOST));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_USB_HOST));
writel(0x77777777, CREG_AXI_M_SLV0(M_ETHERNET));
writel(0x77999999, CREG_AXI_M_SLV1(M_ETHERNET));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_ETHERNET));
writel(0x76DCBA98, CREG_AXI_M_OFT1(M_ETHERNET));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_ETHERNET));
writel(0x77777777, CREG_AXI_M_SLV0(M_SDIO));
writel(0x77999999, CREG_AXI_M_SLV1(M_SDIO));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_SDIO));
writel(0x76DCBA98, CREG_AXI_M_OFT1(M_SDIO));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_SDIO));
writel(0x77777777, CREG_AXI_M_SLV0(M_GPU));
writel(0x77777777, CREG_AXI_M_SLV1(M_GPU));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_GPU));
writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
/* /*
* PAE remapping for DMA clients does not work due to an RTL bug, so * PAE remapping for DMA clients does not work due to an RTL bug, so
* CREG_PAE register must be programmed to all zeroes, otherwise it * CREG_PAE register must be programmed to all zeroes, otherwise it
* will cause problems with DMA to/from peripherals even if PAE40 is * will cause problems with DMA to/from peripherals even if PAE40 is
* not used. * not used.
*/ */
writel(0x00000000, CREG_PAE);
writel(UPDATE_VAL, CREG_PAE_UPDT);
}
/* Default is 1, which means "PAE offset = 4GByte" */ static void __init hsdk_init_early(void)
writel_relaxed(0, (void __iomem *) CREG_PAE); {
hsdk_init_memory_bridge();
/* Really apply settings made above */
writel(1, (void __iomem *) CREG_PAE_UPDATE);
/* /*
* Switch SDIO external ciu clock divider from default div-by-8 to * Switch SDIO external ciu clock divider from default div-by-8 to

View File

@ -336,3 +336,11 @@
status = "disabled"; status = "disabled";
}; };
&uart0 {
compatible = "marvell,armada-38x-uart";
};
&uart1 {
compatible = "marvell,armada-38x-uart";
};

View File

@ -20,7 +20,7 @@
}; };
chosen { chosen {
bootargs = "console=ttyS0,19200n8 root=/dev/sda1 rw rootwait"; bootargs = "console=ttyS0,19200n8 root=/dev/sda1 rw rootwait consoleblank=300";
stdout-path = "uart0:19200n8"; stdout-path = "uart0:19200n8";
}; };

View File

@ -11,7 +11,7 @@
/ { / {
model = "D-Link DNS-313 1-Bay Network Storage Enclosure"; model = "D-Link DNS-313 1-Bay Network Storage Enclosure";
compatible = "dlink,dir-313", "cortina,gemini"; compatible = "dlink,dns-313", "cortina,gemini";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;

View File

@ -358,7 +358,7 @@
pwm1: pwm@2080000 { pwm1: pwm@2080000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x02080000 0x4000>; reg = <0x02080000 0x4000>;
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM1>, clocks = <&clks IMX6UL_CLK_PWM1>,
<&clks IMX6UL_CLK_PWM1>; <&clks IMX6UL_CLK_PWM1>;
clock-names = "ipg", "per"; clock-names = "ipg", "per";
@ -369,7 +369,7 @@
pwm2: pwm@2084000 { pwm2: pwm@2084000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x02084000 0x4000>; reg = <0x02084000 0x4000>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM2>, clocks = <&clks IMX6UL_CLK_PWM2>,
<&clks IMX6UL_CLK_PWM2>; <&clks IMX6UL_CLK_PWM2>;
clock-names = "ipg", "per"; clock-names = "ipg", "per";
@ -380,7 +380,7 @@
pwm3: pwm@2088000 { pwm3: pwm@2088000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x02088000 0x4000>; reg = <0x02088000 0x4000>;
interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM3>, clocks = <&clks IMX6UL_CLK_PWM3>,
<&clks IMX6UL_CLK_PWM3>; <&clks IMX6UL_CLK_PWM3>;
clock-names = "ipg", "per"; clock-names = "ipg", "per";
@ -391,7 +391,7 @@
pwm4: pwm@208c000 { pwm4: pwm@208c000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x0208c000 0x4000>; reg = <0x0208c000 0x4000>;
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM4>, clocks = <&clks IMX6UL_CLK_PWM4>,
<&clks IMX6UL_CLK_PWM4>; <&clks IMX6UL_CLK_PWM4>;
clock-names = "ipg", "per"; clock-names = "ipg", "per";

View File

@ -248,8 +248,8 @@
<GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
@ -264,7 +264,6 @@
clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
clock-names = "bus", "core"; clock-names = "bus", "core";
operating-points-v2 = <&gpu_opp_table>; operating-points-v2 = <&gpu_opp_table>;
switch-delay = <0xffff>;
}; };
}; };
}; /* end of / */ }; /* end of / */

View File

@ -163,23 +163,23 @@
opp-255000000 { opp-255000000 {
opp-hz = /bits/ 64 <255000000>; opp-hz = /bits/ 64 <255000000>;
opp-microvolt = <1150000>; opp-microvolt = <1100000>;
}; };
opp-364300000 { opp-364300000 {
opp-hz = /bits/ 64 <364300000>; opp-hz = /bits/ 64 <364300000>;
opp-microvolt = <1150000>; opp-microvolt = <1100000>;
}; };
opp-425000000 { opp-425000000 {
opp-hz = /bits/ 64 <425000000>; opp-hz = /bits/ 64 <425000000>;
opp-microvolt = <1150000>; opp-microvolt = <1100000>;
}; };
opp-510000000 { opp-510000000 {
opp-hz = /bits/ 64 <510000000>; opp-hz = /bits/ 64 <510000000>;
opp-microvolt = <1150000>; opp-microvolt = <1100000>;
}; };
opp-637500000 { opp-637500000 {
opp-hz = /bits/ 64 <637500000>; opp-hz = /bits/ 64 <637500000>;
opp-microvolt = <1150000>; opp-microvolt = <1100000>;
turbo-mode; turbo-mode;
}; };
}; };
@ -229,7 +229,6 @@
clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
clock-names = "bus", "core"; clock-names = "bus", "core";
operating-points-v2 = <&gpu_opp_table>; operating-points-v2 = <&gpu_opp_table>;
switch-delay = <0xffff>;
}; };
}; };
}; /* end of / */ }; /* end of / */

View File

@ -61,6 +61,9 @@ static struct regulator_consumer_supply da830_evm_usb_supplies[] = {
static struct regulator_init_data da830_evm_usb_vbus_data = { static struct regulator_init_data da830_evm_usb_vbus_data = {
.consumer_supplies = da830_evm_usb_supplies, .consumer_supplies = da830_evm_usb_supplies,
.num_consumer_supplies = ARRAY_SIZE(da830_evm_usb_supplies), .num_consumer_supplies = ARRAY_SIZE(da830_evm_usb_supplies),
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
}; };
static struct fixed_voltage_config da830_evm_usb_vbus = { static struct fixed_voltage_config da830_evm_usb_vbus = {
@ -88,7 +91,7 @@ static struct gpiod_lookup_table da830_evm_usb_oc_gpio_lookup = {
static struct gpiod_lookup_table da830_evm_usb_vbus_gpio_lookup = { static struct gpiod_lookup_table da830_evm_usb_vbus_gpio_lookup = {
.dev_id = "reg-fixed-voltage.0", .dev_id = "reg-fixed-voltage.0",
.table = { .table = {
GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, "vbus", 0), GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, NULL, 0),
{ } { }
}, },
}; };

View File

@ -306,6 +306,9 @@ static struct regulator_consumer_supply hawk_usb_supplies[] = {
static struct regulator_init_data hawk_usb_vbus_data = { static struct regulator_init_data hawk_usb_vbus_data = {
.consumer_supplies = hawk_usb_supplies, .consumer_supplies = hawk_usb_supplies,
.num_consumer_supplies = ARRAY_SIZE(hawk_usb_supplies), .num_consumer_supplies = ARRAY_SIZE(hawk_usb_supplies),
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
}; };
static struct fixed_voltage_config hawk_usb_vbus = { static struct fixed_voltage_config hawk_usb_vbus = {

View File

@ -430,7 +430,7 @@ static void omap3_prm_reconfigure_io_chain(void)
* registers, and omap3xxx_prm_reconfigure_io_chain() must be called. * registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
* No return value. * No return value.
*/ */
static void __init omap3xxx_prm_enable_io_wakeup(void) static void omap3xxx_prm_enable_io_wakeup(void)
{ {
if (prm_features & PRM_HAS_IO_WAKEUP) if (prm_features & PRM_HAS_IO_WAKEUP)
omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,

View File

@ -28,7 +28,7 @@
enable-method = "psci"; enable-method = "psci";
clocks = <&clockgen 1 0>; clocks = <&clockgen 1 0>;
next-level-cache = <&l2>; next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>; cpu-idle-states = <&CPU_PW20>;
}; };
cpu1: cpu@1 { cpu1: cpu@1 {
@ -38,7 +38,7 @@
enable-method = "psci"; enable-method = "psci";
clocks = <&clockgen 1 0>; clocks = <&clockgen 1 0>;
next-level-cache = <&l2>; next-level-cache = <&l2>;
cpu-idle-states = <&CPU_PH20>; cpu-idle-states = <&CPU_PW20>;
}; };
l2: l2-cache { l2: l2-cache {
@ -53,13 +53,13 @@
*/ */
entry-method = "arm,psci"; entry-method = "arm,psci";
CPU_PH20: cpu-ph20 { CPU_PW20: cpu-pw20 {
compatible = "arm,idle-state"; compatible = "arm,idle-state";
idle-state-name = "PH20"; idle-state-name = "PW20";
arm,psci-suspend-param = <0x00010000>; arm,psci-suspend-param = <0x0>;
entry-latency-us = <1000>; entry-latency-us = <2000>;
exit-latency-us = <1000>; exit-latency-us = <2000>;
min-residency-us = <3000>; min-residency-us = <6000>;
}; };
}; };

View File

@ -613,6 +613,7 @@ CONFIG_RTC_DRV_TEGRA=y
CONFIG_RTC_DRV_IMX_SC=m CONFIG_RTC_DRV_IMX_SC=m
CONFIG_RTC_DRV_XGENE=y CONFIG_RTC_DRV_XGENE=y
CONFIG_DMADEVICES=y CONFIG_DMADEVICES=y
CONFIG_FSL_EDMA=y
CONFIG_DMA_BCM2835=m CONFIG_DMA_BCM2835=m
CONFIG_K3_DMA=y CONFIG_K3_DMA=y
CONFIG_MV_XOR=y CONFIG_MV_XOR=y

View File

@ -67,7 +67,11 @@
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
__efistub_stext_offset = stext - _text; /*
* Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
* https://github.com/ClangBuiltLinux/linux/issues/561
*/
__efistub_stext_offset = ABSOLUTE(stext - _text);
/* /*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to * The EFI stub has its own symbol namespace prefixed by __efistub_, to

View File

@ -21,6 +21,7 @@
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
gfp_t gfp_mask = GFP_KERNEL; gfp_t gfp_mask = GFP_KERNEL;
void *p; void *p;
@ -28,9 +29,12 @@ void *module_alloc(unsigned long size)
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN; gfp_mask |= __GFP_NOWARN;
if (IS_ENABLED(CONFIG_KASAN))
/* don't exceed the static module region - see below */
module_alloc_end = MODULES_END;
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE, module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0)); NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&

View File

@ -208,7 +208,7 @@ out:
#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq)) #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {

View File

@ -39,6 +39,11 @@ static int save_fpu_state(struct sigcontext __user *sc)
#endif #endif
struct rt_sigframe { struct rt_sigframe {
/*
* pad[3] is compatible with the same struct defined in
* gcc/libgcc/config/csky/linux-unwind.h
*/
int pad[3];
struct siginfo info; struct siginfo info;
struct ucontext uc; struct ucontext uc;
}; };

View File

@ -17,6 +17,7 @@ archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
KBUILD_DEFCONFIG := 32r2el_defconfig KBUILD_DEFCONFIG := 32r2el_defconfig
KBUILD_DTBS := dtbs
# #
# Select the object file format to substitute into the linker script. # Select the object file format to substitute into the linker script.
@ -384,7 +385,7 @@ quiet_cmd_64 = OBJCOPY $@
vmlinux.64: vmlinux vmlinux.64: vmlinux
$(call cmd,64) $(call cmd,64)
all: $(all-y) all: $(all-y) $(KBUILD_DTBS)
# boot # boot
$(boot-y): $(vmlinux-32) FORCE $(boot-y): $(vmlinux-32) FORCE

View File

@ -78,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
# Calculate the load address of the compressed kernel image # Calculate the load address of the compressed kernel image
hostprogs-y := calc_vmlinuz_load_addr hostprogs-y := calc_vmlinuz_load_addr

View File

@ -9,7 +9,7 @@
#include <stdint.h> #include <stdint.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include "../../../../include/linux/sizes.h" #include <linux/sizes.h>
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {

View File

@ -24,8 +24,8 @@
#define AR933X_UART_CS_PARITY_S 0 #define AR933X_UART_CS_PARITY_S 0
#define AR933X_UART_CS_PARITY_M 0x3 #define AR933X_UART_CS_PARITY_M 0x3
#define AR933X_UART_CS_PARITY_NONE 0 #define AR933X_UART_CS_PARITY_NONE 0
#define AR933X_UART_CS_PARITY_ODD 1 #define AR933X_UART_CS_PARITY_ODD 2
#define AR933X_UART_CS_PARITY_EVEN 2 #define AR933X_UART_CS_PARITY_EVEN 3
#define AR933X_UART_CS_IF_MODE_S 2 #define AR933X_UART_CS_IF_MODE_S 2
#define AR933X_UART_CS_IF_MODE_M 0x3 #define AR933X_UART_CS_IF_MODE_M 0x3
#define AR933X_UART_CS_IF_MODE_NONE 0 #define AR933X_UART_CS_IF_MODE_NONE 0

View File

@ -310,6 +310,36 @@ static inline bool mips_gic_present(void)
return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base; return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base;
} }
/**
* mips_gic_vx_map_reg() - Return GIC_Vx_<intr>_MAP register offset
* @intr: A GIC local interrupt
*
* Determine the index of the GIC_VL_<intr>_MAP or GIC_VO_<intr>_MAP register
* within the block of GIC map registers. This is almost the same as the order
* of interrupts in the pending & mask registers, as used by enum
* mips_gic_local_interrupt, but moves the FDC interrupt & thus offsets the
* interrupts after it...
*
* Return: The map register index corresponding to @intr.
*
* The return value is suitable for use with the (read|write)_gic_v[lo]_map
* accessor functions.
*/
static inline unsigned int
mips_gic_vx_map_reg(enum mips_gic_local_interrupt intr)
{
/* WD, Compare & Timer are 1:1 */
if (intr <= GIC_LOCAL_INT_TIMER)
return intr;
/* FDC moves to after Timer... */
if (intr == GIC_LOCAL_INT_FDC)
return GIC_LOCAL_INT_TIMER + 1;
/* As a result everything else is offset by 1 */
return intr + 1;
}
/** /**
* gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq
* *

View File

@ -203,7 +203,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
bool __virt_addr_valid(const volatile void *kaddr) bool __virt_addr_valid(const volatile void *kaddr)
{ {
unsigned long vaddr = (unsigned long)vaddr; unsigned long vaddr = (unsigned long)kaddr;
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
return false; return false;

View File

@ -391,6 +391,7 @@ static struct work_registers build_get_work_registers(u32 **p)
static void build_restore_work_registers(u32 **p) static void build_restore_work_registers(u32 **p)
{ {
if (scratch_reg >= 0) { if (scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return; return;
} }
@ -668,10 +669,12 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_i_mtc0(p, 0, C0_PAGEMASK);
uasm_il_b(p, r, lid); uasm_il_b(p, r, lid);
} }
if (scratch_reg >= 0) if (scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else } else {
UASM_i_LW(p, 1, scratchpad_offset(0), 0); UASM_i_LW(p, 1, scratchpad_offset(0), 0);
}
} else { } else {
/* Reset default page size */ /* Reset default page size */
if (PM_DEFAULT_MASK >> 16) { if (PM_DEFAULT_MASK >> 16) {
@ -938,10 +941,12 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_jr(p, ptr); uasm_i_jr(p, ptr);
if (mode == refill_scratch) { if (mode == refill_scratch) {
if (scratch_reg >= 0) if (scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else } else {
UASM_i_LW(p, 1, scratchpad_offset(0), 0); UASM_i_LW(p, 1, scratchpad_offset(0), 0);
}
} else { } else {
uasm_i_nop(p); uasm_i_nop(p);
} }
@ -1258,6 +1263,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
if (c0_scratch_reg >= 0) { if (c0_scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
build_tlb_write_entry(p, l, r, tlb_random); build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p); uasm_l_leave(l, *p);
@ -1603,15 +1609,17 @@ static void build_setup_pgd(void)
uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
uasm_l_tlbl_goaround1(&l, p); uasm_l_tlbl_goaround1(&l, p);
UASM_i_SLL(&p, a0, a0, 11); UASM_i_SLL(&p, a0, a0, 11);
uasm_i_jr(&p, 31);
UASM_i_MTC0(&p, a0, C0_CONTEXT); UASM_i_MTC0(&p, a0, C0_CONTEXT);
uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} else { } else {
/* PGD in c0_KScratch */ /* PGD in c0_KScratch */
uasm_i_jr(&p, 31);
if (cpu_has_ldpte) if (cpu_has_ldpte)
UASM_i_MTC0(&p, a0, C0_PWBASE); UASM_i_MTC0(&p, a0, C0_PWBASE);
else else
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} }
#else #else
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -1625,13 +1633,16 @@ static void build_setup_pgd(void)
UASM_i_LA_mostly(&p, a2, pgdc); UASM_i_LA_mostly(&p, a2, pgdc);
UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
#endif /* SMP */ #endif /* SMP */
uasm_i_jr(&p, 31);
/* if pgd_reg is allocated, save PGD also to scratch register */ /* if pgd_reg is allocated, save PGD also to scratch register */
if (pgd_reg != -1) if (pgd_reg != -1) {
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
else uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} else {
uasm_i_jr(&p, 31);
uasm_i_nop(&p); uasm_i_nop(&p);
}
#endif #endif
if (p >= (u32 *)tlbmiss_handler_setup_pgd_end) if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
panic("tlbmiss_handler_setup_pgd space exceeded"); panic("tlbmiss_handler_setup_pgd space exceeded");

View File

@ -786,6 +786,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
/* 32-bit PC relative address */ /* 32-bit PC relative address */
*loc = val - dot - 8 + addend; *loc = val - dot - 8 + addend;
break; break;
case R_PARISC_PCREL64:
/* 64-bit PC relative address */
*loc64 = val - dot - 8 + addend;
break;
case R_PARISC_DIR64: case R_PARISC_DIR64:
/* 64-bit effective address */ /* 64-bit effective address */
*loc64 = val + addend; *loc64 = val + addend;

View File

@ -315,7 +315,7 @@ TRAMP_REAL_BEGIN(machine_check_common_early)
mfspr r11,SPRN_DSISR /* Save DSISR */ mfspr r11,SPRN_DSISR /* Save DSISR */
std r11,_DSISR(r1) std r11,_DSISR(r1)
std r9,_CCR(r1) /* Save CR in stackframe */ std r9,_CCR(r1) /* Save CR in stackframe */
kuap_save_amr_and_lock r9, r10, cr1 /* We don't touch AMR here, we never go to virtual mode */
/* Save r9 through r13 from EXMC save area to stack frame. */ /* Save r9 through r13 from EXMC save area to stack frame. */
EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
mfmsr r11 /* get MSR value */ mfmsr r11 /* get MSR value */

View File

@ -50,20 +50,52 @@ EXPORT_SYMBOL_GPL(hash__alloc_context_id);
void slb_setup_new_exec(void); void slb_setup_new_exec(void);
static int realloc_context_ids(mm_context_t *ctx)
{
int i, id;
/*
* id 0 (aka. ctx->id) is special, we always allocate a new one, even if
* there wasn't one allocated previously (which happens in the exec
* case where ctx is newly allocated).
*
* We have to be a bit careful here. We must keep the existing ids in
* the array, so that we can test if they're non-zero to decide if we
* need to allocate a new one. However in case of error we must free the
* ids we've allocated but *not* any of the existing ones (or risk a
* UAF). That's why we decrement i at the start of the error handling
* loop, to skip the id that we just tested but couldn't reallocate.
*/
for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
if (i == 0 || ctx->extended_id[i]) {
id = hash__alloc_context_id();
if (id < 0)
goto error;
ctx->extended_id[i] = id;
}
}
/* The caller expects us to return id */
return ctx->id;
error:
for (i--; i >= 0; i--) {
if (ctx->extended_id[i])
ida_free(&mmu_context_ida, ctx->extended_id[i]);
}
return id;
}
static int hash__init_new_context(struct mm_struct *mm) static int hash__init_new_context(struct mm_struct *mm)
{ {
int index; int index;
index = hash__alloc_context_id();
if (index < 0)
return index;
mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
GFP_KERNEL); GFP_KERNEL);
if (!mm->context.hash_context) { if (!mm->context.hash_context)
ida_free(&mmu_context_ida, index);
return -ENOMEM; return -ENOMEM;
}
/* /*
* The old code would re-promote on fork, we don't do that when using * The old code would re-promote on fork, we don't do that when using
@ -91,13 +123,20 @@ static int hash__init_new_context(struct mm_struct *mm)
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL); GFP_KERNEL);
if (!mm->context.hash_context->spt) { if (!mm->context.hash_context->spt) {
ida_free(&mmu_context_ida, index);
kfree(mm->context.hash_context); kfree(mm->context.hash_context);
return -ENOMEM; return -ENOMEM;
} }
} }
#endif #endif
}
index = realloc_context_ids(&mm->context);
if (index < 0) {
#ifdef CONFIG_PPC_SUBPAGE_PROT
kfree(mm->context.hash_context->spt);
#endif
kfree(mm->context.hash_context);
return index;
} }
pkey_mm_init(mm); pkey_mm_init(mm);

View File

@ -163,6 +163,7 @@
interrupt-parent = <&plic0>; interrupt-parent = <&plic0>;
interrupts = <4>; interrupts = <4>;
clocks = <&prci PRCI_CLK_TLCLK>; clocks = <&prci PRCI_CLK_TLCLK>;
status = "disabled";
}; };
uart1: serial@10011000 { uart1: serial@10011000 {
compatible = "sifive,fu540-c000-uart", "sifive,uart0"; compatible = "sifive,fu540-c000-uart", "sifive,uart0";
@ -170,6 +171,7 @@
interrupt-parent = <&plic0>; interrupt-parent = <&plic0>;
interrupts = <5>; interrupts = <5>;
clocks = <&prci PRCI_CLK_TLCLK>; clocks = <&prci PRCI_CLK_TLCLK>;
status = "disabled";
}; };
i2c0: i2c@10030000 { i2c0: i2c@10030000 {
compatible = "sifive,fu540-c000-i2c", "sifive,i2c0"; compatible = "sifive,fu540-c000-i2c", "sifive,i2c0";
@ -181,6 +183,7 @@
reg-io-width = <1>; reg-io-width = <1>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
status = "disabled";
}; };
qspi0: spi@10040000 { qspi0: spi@10040000 {
compatible = "sifive,fu540-c000-spi", "sifive,spi0"; compatible = "sifive,fu540-c000-spi", "sifive,spi0";
@ -191,6 +194,7 @@
clocks = <&prci PRCI_CLK_TLCLK>; clocks = <&prci PRCI_CLK_TLCLK>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
status = "disabled";
}; };
qspi1: spi@10041000 { qspi1: spi@10041000 {
compatible = "sifive,fu540-c000-spi", "sifive,spi0"; compatible = "sifive,fu540-c000-spi", "sifive,spi0";
@ -201,6 +205,7 @@
clocks = <&prci PRCI_CLK_TLCLK>; clocks = <&prci PRCI_CLK_TLCLK>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
status = "disabled";
}; };
qspi2: spi@10050000 { qspi2: spi@10050000 {
compatible = "sifive,fu540-c000-spi", "sifive,spi0"; compatible = "sifive,fu540-c000-spi", "sifive,spi0";
@ -210,6 +215,7 @@
clocks = <&prci PRCI_CLK_TLCLK>; clocks = <&prci PRCI_CLK_TLCLK>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
status = "disabled";
}; };
}; };
}; };

View File

@ -42,7 +42,20 @@
}; };
}; };
&uart0 {
status = "okay";
};
&uart1 {
status = "okay";
};
&i2c0 {
status = "okay";
};
&qspi0 { &qspi0 {
status = "okay";
flash@0 { flash@0 {
compatible = "issi,is25wp256", "jedec,spi-nor"; compatible = "issi,is25wp256", "jedec,spi-nor";
reg = <0>; reg = <0>;

View File

@ -69,6 +69,7 @@ CONFIG_VIRTIO_MMIO=y
CONFIG_CLK_SIFIVE=y CONFIG_CLK_SIFIVE=y
CONFIG_CLK_SIFIVE_FU540_PRCI=y CONFIG_CLK_SIFIVE_FU540_PRCI=y
CONFIG_SIFIVE_PLIC=y CONFIG_SIFIVE_PLIC=y
CONFIG_SPI_SIFIVE=y
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y CONFIG_AUTOFS4_FS=y
@ -84,4 +85,8 @@ CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_SPI=y
CONFIG_MMC_SPI=y
CONFIG_MMC=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_RCU_TRACE is not set # CONFIG_RCU_TRACE is not set

View File

@ -272,9 +272,6 @@ vmalloc_fault:
* entries, but in RISC-V, SFENCE.VMA specifies an * entries, but in RISC-V, SFENCE.VMA specifies an
* ordering constraint, not a cache flush; it is * ordering constraint, not a cache flush; it is
* necessary even after writing invalid entries. * necessary even after writing invalid entries.
* Relying on flush_tlb_fix_spurious_fault would
* suffice, but the extra traps reduce
* performance. So, eagerly SFENCE.VMA.
*/ */
local_flush_tlb_page(addr); local_flush_tlb_page(addr);

View File

@ -561,14 +561,14 @@ int x86_pmu_hw_config(struct perf_event *event)
} }
/* sample_regs_user never support XMM registers */ /* sample_regs_user never support XMM registers */
if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS)) if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK))
return -EINVAL; return -EINVAL;
/* /*
* Besides the general purpose registers, XMM registers may * Besides the general purpose registers, XMM registers may
* be collected in PEBS on some platforms, e.g. Icelake * be collected in PEBS on some platforms, e.g. Icelake
*/ */
if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) { if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) {
if (x86_pmu.pebs_no_xmm_regs) if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS))
return -EINVAL; return -EINVAL;
if (!event->attr.precise_ip) if (!event->attr.precise_ip)
@ -2328,13 +2328,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return; return;
} }
if (perf_hw_regs(regs)) { if (perf_callchain_store(entry, regs->ip))
if (perf_callchain_store(entry, regs->ip)) return;
return;
if (perf_hw_regs(regs))
unwind_start(&state, current, regs, NULL); unwind_start(&state, current, regs, NULL);
} else { else
unwind_start(&state, current, NULL, (void *)regs->sp); unwind_start(&state, current, NULL, (void *)regs->sp);
}
for (; !unwind_done(&state); unwind_next_frame(&state)) { for (; !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state); addr = unwind_get_return_address(&state);

View File

@ -987,7 +987,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
pebs_data_cfg |= PEBS_DATACFG_GP; pebs_data_cfg |= PEBS_DATACFG_GP;
if ((sample_type & PERF_SAMPLE_REGS_INTR) && if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
(attr->sample_regs_intr & PEBS_XMM_REGS)) (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
pebs_data_cfg |= PEBS_DATACFG_XMMS; pebs_data_cfg |= PEBS_DATACFG_XMMS;
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
@ -1964,10 +1964,9 @@ void __init intel_ds_init(void)
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
if (x86_pmu.version <= 4) { if (x86_pmu.version <= 4)
x86_pmu.pebs_no_isolation = 1; x86_pmu.pebs_no_isolation = 1;
x86_pmu.pebs_no_xmm_regs = 1;
}
if (x86_pmu.pebs) { if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
char *pebs_qual = ""; char *pebs_qual = "";
@ -2020,9 +2019,9 @@ void __init intel_ds_init(void)
PERF_SAMPLE_TIME; PERF_SAMPLE_TIME;
x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.flags |= PMU_FL_PEBS_ALL;
pebs_qual = "-baseline"; pebs_qual = "-baseline";
x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
} else { } else {
/* Only basic record supported */ /* Only basic record supported */
x86_pmu.pebs_no_xmm_regs = 1;
x86_pmu.large_pebs_flags &= x86_pmu.large_pebs_flags &=
~(PERF_SAMPLE_ADDR | ~(PERF_SAMPLE_ADDR |
PERF_SAMPLE_TIME | PERF_SAMPLE_TIME |

View File

@ -121,24 +121,6 @@ struct amd_nb {
(1ULL << PERF_REG_X86_R14) | \ (1ULL << PERF_REG_X86_R14) | \
(1ULL << PERF_REG_X86_R15)) (1ULL << PERF_REG_X86_R15))
#define PEBS_XMM_REGS \
((1ULL << PERF_REG_X86_XMM0) | \
(1ULL << PERF_REG_X86_XMM1) | \
(1ULL << PERF_REG_X86_XMM2) | \
(1ULL << PERF_REG_X86_XMM3) | \
(1ULL << PERF_REG_X86_XMM4) | \
(1ULL << PERF_REG_X86_XMM5) | \
(1ULL << PERF_REG_X86_XMM6) | \
(1ULL << PERF_REG_X86_XMM7) | \
(1ULL << PERF_REG_X86_XMM8) | \
(1ULL << PERF_REG_X86_XMM9) | \
(1ULL << PERF_REG_X86_XMM10) | \
(1ULL << PERF_REG_X86_XMM11) | \
(1ULL << PERF_REG_X86_XMM12) | \
(1ULL << PERF_REG_X86_XMM13) | \
(1ULL << PERF_REG_X86_XMM14) | \
(1ULL << PERF_REG_X86_XMM15))
/* /*
* Per register state. * Per register state.
*/ */
@ -665,8 +647,7 @@ struct x86_pmu {
pebs_broken :1, pebs_broken :1,
pebs_prec_dist :1, pebs_prec_dist :1,
pebs_no_tlb :1, pebs_no_tlb :1,
pebs_no_isolation :1, pebs_no_isolation :1;
pebs_no_xmm_regs :1;
int pebs_record_size; int pebs_record_size;
int pebs_buffer_size; int pebs_buffer_size;
int max_pebs_events; int max_pebs_events;

View File

@ -52,4 +52,7 @@ enum perf_event_x86_regs {
/* These include both GPRs and XMMX registers */ /* These include both GPRs and XMMX registers */
PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
}; };
#define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1))
#endif /* _ASM_X86_PERF_REGS_H */ #endif /* _ASM_X86_PERF_REGS_H */

View File

@ -1464,7 +1464,8 @@ static void apic_pending_intr_clear(void)
if (queued) { if (queued) {
if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
ntsc = rdtsc(); ntsc = rdtsc();
max_loops = (cpu_khz << 10) - (ntsc - tsc); max_loops = (long long)cpu_khz << 10;
max_loops -= ntsc - tsc;
} else { } else {
max_loops--; max_loops--;
} }

View File

@ -835,6 +835,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
break; break;
} }
/*
* If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
* bit in the mask to allow guests to use the mitigation even in the
* case where the host does not enable it.
*/
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
}
/* /*
* We have three CPU feature flags that are in play here: * We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
@ -852,7 +862,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable(); x86_amd_ssb_disable();
} else { } else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
} }
} }

View File

@ -789,13 +789,16 @@ static struct syscore_ops mc_syscore_ops = {
.resume = mc_bp_resume, .resume = mc_bp_resume,
}; };
static int mc_cpu_online(unsigned int cpu) static int mc_cpu_starting(unsigned int cpu)
{ {
struct device *dev;
dev = get_cpu_device(cpu);
microcode_update_cpu(cpu); microcode_update_cpu(cpu);
pr_debug("CPU%d added\n", cpu); pr_debug("CPU%d added\n", cpu);
return 0;
}
static int mc_cpu_online(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
if (sysfs_create_group(&dev->kobj, &mc_attr_group)) if (sysfs_create_group(&dev->kobj, &mc_attr_group))
pr_err("Failed to create group for CPU%d\n", cpu); pr_err("Failed to create group for CPU%d\n", cpu);
@ -872,7 +875,9 @@ int __init microcode_init(void)
goto out_ucode_group; goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops); register_syscore_ops(&mc_syscore_ops);
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online", cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
mc_cpu_starting, NULL);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep); mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);

View File

@ -796,8 +796,12 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v) struct seq_file *seq, void *v)
{ {
struct rdt_resource *r = of->kn->parent->priv; struct rdt_resource *r = of->kn->parent->priv;
u32 sw_shareable = 0, hw_shareable = 0; /*
u32 exclusive = 0, pseudo_locked = 0; * Use unsigned long even though only 32 bits are used to ensure
* test_bit() is used safely.
*/
unsigned long sw_shareable = 0, hw_shareable = 0;
unsigned long exclusive = 0, pseudo_locked = 0;
struct rdt_domain *dom; struct rdt_domain *dom;
int i, hwb, swb, excl, psl; int i, hwb, swb, excl, psl;
enum rdtgrp_mode mode; enum rdtgrp_mode mode;
@ -842,10 +846,10 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
} }
for (i = r->cache.cbm_len - 1; i >= 0; i--) { for (i = r->cache.cbm_len - 1; i >= 0; i--) {
pseudo_locked = dom->plr ? dom->plr->cbm : 0; pseudo_locked = dom->plr ? dom->plr->cbm : 0;
hwb = test_bit(i, (unsigned long *)&hw_shareable); hwb = test_bit(i, &hw_shareable);
swb = test_bit(i, (unsigned long *)&sw_shareable); swb = test_bit(i, &sw_shareable);
excl = test_bit(i, (unsigned long *)&exclusive); excl = test_bit(i, &exclusive);
psl = test_bit(i, (unsigned long *)&pseudo_locked); psl = test_bit(i, &pseudo_locked);
if (hwb && swb) if (hwb && swb)
seq_putc(seq, 'X'); seq_putc(seq, 'X');
else if (hwb && !swb) else if (hwb && !swb)
@ -2486,26 +2490,19 @@ out_destroy:
*/ */
static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r) static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
{ {
/* unsigned long val = *_val;
* Convert the u32 _val to an unsigned long required by all the bit
* operations within this function. No more than 32 bits of this
* converted value can be accessed because all bit operations are
* additionally provided with cbm_len that is initialized during
* hardware enumeration using five bits from the EAX register and
* thus never can exceed 32 bits.
*/
unsigned long *val = (unsigned long *)_val;
unsigned int cbm_len = r->cache.cbm_len; unsigned int cbm_len = r->cache.cbm_len;
unsigned long first_bit, zero_bit; unsigned long first_bit, zero_bit;
if (*val == 0) if (val == 0)
return; return;
first_bit = find_first_bit(val, cbm_len); first_bit = find_first_bit(&val, cbm_len);
zero_bit = find_next_zero_bit(val, cbm_len, first_bit); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
/* Clear any remaining bits to ensure contiguous region */ /* Clear any remaining bits to ensure contiguous region */
bitmap_clear(val, zero_bit, cbm_len - zero_bit); bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
*_val = (u32)val;
} }
/* /*

View File

@ -22,6 +22,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/memory.h>
#include <trace/syscall.h> #include <trace/syscall.h>
@ -34,16 +35,25 @@
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
int ftrace_arch_code_modify_prepare(void) int ftrace_arch_code_modify_prepare(void)
__acquires(&text_mutex)
{ {
/*
* Need to grab text_mutex to prevent a race from module loading
* and live kernel patching from changing the text permissions while
* ftrace has it set to "read/write".
*/
mutex_lock(&text_mutex);
set_kernel_text_rw(); set_kernel_text_rw();
set_all_modules_text_rw(); set_all_modules_text_rw();
return 0; return 0;
} }
int ftrace_arch_code_modify_post_process(void) int ftrace_arch_code_modify_post_process(void)
__releases(&text_mutex)
{ {
set_all_modules_text_ro(); set_all_modules_text_ro();
set_kernel_text_ro(); set_kernel_text_ro();
mutex_unlock(&text_mutex);
return 0; return 0;
} }

View File

@ -184,24 +184,25 @@ unsigned long __head __startup_64(unsigned long physaddr,
pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
if (la57) { if (la57) {
p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
physaddr);
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D; i = physaddr >> P4D_SHIFT;
p4d[i + 0] = (pgdval_t)pud + pgtable_flags; p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
p4d[i + 1] = (pgdval_t)pud + pgtable_flags; p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
} else { } else {
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
pgd[i + 0] = (pgdval_t)pud + pgtable_flags; pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
pgd[i + 1] = (pgdval_t)pud + pgtable_flags; pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
} }
i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD; i = physaddr >> PUD_SHIFT;
pud[i + 0] = (pudval_t)pmd + pgtable_flags; pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
pud[i + 1] = (pudval_t)pmd + pgtable_flags; pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
/* Filter out unsupported __PAGE_KERNEL_* bits: */ /* Filter out unsupported __PAGE_KERNEL_* bits: */
@ -211,8 +212,9 @@ unsigned long __head __startup_64(unsigned long physaddr,
pmd_entry += physaddr; pmd_entry += physaddr;
for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD; int idx = i + (physaddr >> PMD_SHIFT);
pmd[idx] = pmd_entry + i * PMD_SIZE;
pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
} }
/* /*

View File

@ -74,6 +74,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return regs_get_register(regs, pt_regs_offset[idx]); return regs_get_register(regs, pt_regs_offset[idx]);
} }
#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \
~((1ULL << PERF_REG_X86_MAX) - 1))
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
(1ULL << PERF_REG_X86_R9) | \ (1ULL << PERF_REG_X86_R9) | \
@ -86,7 +89,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
int perf_reg_validate(u64 mask) int perf_reg_validate(u64 mask)
{ {
if (!mask || (mask & REG_NOSUPPORT)) if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
return -EINVAL; return -EINVAL;
return 0; return 0;
@ -112,7 +115,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,
int perf_reg_validate(u64 mask) int perf_reg_validate(u64 mask)
{ {
if (!mask || (mask & REG_NOSUPPORT)) if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
return -EINVAL; return -EINVAL;
return 0; return 0;

View File

@ -82,9 +82,9 @@ static struct orc_entry *orc_find(unsigned long ip);
* But they are copies of the ftrace entries that are static and * But they are copies of the ftrace entries that are static and
* defined in ftrace_*.S, which do have orc entries. * defined in ftrace_*.S, which do have orc entries.
* *
* If the undwinder comes across a ftrace trampoline, then find the * If the unwinder comes across a ftrace trampoline, then find the
* ftrace function that was used to create it, and use that ftrace * ftrace function that was used to create it, and use that ftrace
* function's orc entrie, as the placement of the return code in * function's orc entry, as the placement of the return code in
* the stack will be identical. * the stack will be identical.
*/ */
static struct orc_entry *orc_ftrace_find(unsigned long ip) static struct orc_entry *orc_ftrace_find(unsigned long ip)
@ -128,6 +128,16 @@ static struct orc_entry null_orc_entry = {
.type = ORC_TYPE_CALL .type = ORC_TYPE_CALL
}; };
/* Fake frame pointer entry -- used as a fallback for generated code */
static struct orc_entry orc_fp_entry = {
.type = ORC_TYPE_CALL,
.sp_reg = ORC_REG_BP,
.sp_offset = 16,
.bp_reg = ORC_REG_PREV_SP,
.bp_offset = -16,
.end = 0,
};
static struct orc_entry *orc_find(unsigned long ip) static struct orc_entry *orc_find(unsigned long ip)
{ {
static struct orc_entry *orc; static struct orc_entry *orc;
@ -392,8 +402,16 @@ bool unwind_next_frame(struct unwind_state *state)
* calls and calls to noreturn functions. * calls and calls to noreturn functions.
*/ */
orc = orc_find(state->signal ? state->ip : state->ip - 1); orc = orc_find(state->signal ? state->ip : state->ip - 1);
if (!orc) if (!orc) {
goto err; /*
* As a fallback, try to assume this code uses a frame pointer.
* This is useful for generated code, like BPF, which ORC
* doesn't know about. This is just a guess, so the rest of
* the unwind is no longer considered reliable.
*/
orc = &orc_fp_entry;
state->error = true;
}
/* End-of-stack check for kernel threads: */ /* End-of-stack check for kernel threads: */
if (orc->sp_reg == ORC_REG_UNDEFINED) { if (orc->sp_reg == ORC_REG_UNDEFINED) {

View File

@ -2339,7 +2339,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u32 ppr; u32 ppr;
if (!apic_enabled(apic)) if (!kvm_apic_hw_enabled(apic))
return -1; return -1;
__apic_update_ppr(apic, &ppr); __apic_update_ppr(apic, &ppr);

View File

@ -5240,9 +5240,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
vmx = to_vmx(vcpu); vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu); vmcs12 = get_vmcs12(vcpu);
if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
if (nested_vmx_allowed(vcpu) && if (nested_vmx_allowed(vcpu) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) { (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
@ -5251,6 +5248,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (vmx_has_valid_vmcs12(vcpu)) { if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
if (vmx->nested.hv_evmcs)
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
if (is_guest_mode(vcpu) && if (is_guest_mode(vcpu) &&
nested_cpu_has_shadow_vmcs(vmcs12) && nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) vmcs12->vmcs_link_pointer != -1ull)
@ -5350,6 +5350,15 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
return -EINVAL; return -EINVAL;
/*
* KVM_STATE_NESTED_EVMCS used to signal that KVM should
* enable eVMCS capability on vCPU. However, since then
* code was changed such that flag signals vmcs12 should
* be copied into eVMCS in guest memory.
*
* To preserve backwards compatability, allow user
* to set this flag even when there is no VMXON region.
*/
if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
return -EINVAL; return -EINVAL;
} else { } else {
@ -5358,7 +5367,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
return -EINVAL; return -EINVAL;
} }
if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
@ -5373,20 +5382,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
* must be zero. * must be zero.
*/ */
if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags) if (is_smm(vcpu) ?
(kvm_state->flags &
(KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
: kvm_state->hdr.vmx.smm.flags)
return -EINVAL; return -EINVAL;
if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL; return -EINVAL;
vmx_leave_nested(vcpu); if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
if (!nested_vmx_allowed(vcpu))
return -EINVAL; return -EINVAL;
nested_enable_evmcs(vcpu, NULL); vmx_leave_nested(vcpu);
}
if (kvm_state->hdr.vmx.vmxon_pa == -1ull) if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
return 0; return 0;

View File

@ -1554,7 +1554,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
vcpu->arch.tsc_always_catchup = 1; vcpu->arch.tsc_always_catchup = 1;
return 0; return 0;
} else { } else {
WARN(1, "user requested TSC rate below hardware speed\n"); pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
return -1; return -1;
} }
} }
@ -1564,8 +1564,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
user_tsc_khz, tsc_khz); user_tsc_khz, tsc_khz);
if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
user_tsc_khz); user_tsc_khz);
return -1; return -1;
} }

View File

@ -671,23 +671,25 @@ static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
unsigned long page_size_mask, bool init) unsigned long page_size_mask, bool init)
{ {
unsigned long paddr_next, paddr_last = paddr_end; unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
unsigned long vaddr = (unsigned long)__va(paddr);
int i = p4d_index(vaddr); paddr_last = paddr_end;
vaddr = (unsigned long)__va(paddr);
vaddr_end = (unsigned long)__va(paddr_end);
if (!pgtable_l5_enabled()) if (!pgtable_l5_enabled())
return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
page_size_mask, init); page_size_mask, init);
for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { for (; vaddr < vaddr_end; vaddr = vaddr_next) {
p4d_t *p4d; p4d_t *p4d = p4d_page + p4d_index(vaddr);
pud_t *pud; pud_t *pud;
vaddr = (unsigned long)__va(paddr); vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
p4d = p4d_page + p4d_index(vaddr); paddr = __pa(vaddr);
paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
if (paddr >= paddr_end) { if (paddr >= paddr_end) {
paddr_next = __pa(vaddr_next);
if (!after_bootmem && if (!after_bootmem &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next, !e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RAM) && E820_TYPE_RAM) &&
@ -699,13 +701,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
if (!p4d_none(*p4d)) { if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, 0); pud = pud_offset(p4d, 0);
paddr_last = phys_pud_init(pud, paddr, paddr_end, paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
page_size_mask, init); page_size_mask, init);
continue; continue;
} }
pud = alloc_low_page(); pud = alloc_low_page();
paddr_last = phys_pud_init(pud, paddr, paddr_end, paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
page_size_mask, init); page_size_mask, init);
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);

View File

@ -728,7 +728,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
* Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so * Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so
* page faulting on these addresses isn't expected. * page faulting on these addresses isn't expected.
*/ */
if (phys_addr >= 0x0000 && phys_addr <= 0x0fff) if (phys_addr <= 0x0fff)
return; return;
/* /*

View File

@ -240,7 +240,7 @@ static struct kmem_cache *bfq_pool;
* containing only random (seeky) I/O are prevented from being tagged * containing only random (seeky) I/O are prevented from being tagged
* as soft real-time. * as soft real-time.
*/ */
#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history & -1) #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
/* Min number of samples required to perform peak-rate update */ /* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32 #define BFQ_RATE_MIN_SAMPLES 32

View File

@ -934,6 +934,13 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
{ {
struct elevator_type *e = q->elevator->type; struct elevator_type *e = q->elevator->type;
/*
* If the parent directory has not been created yet, return, we will be
* called again later on and the directory/files will be created then.
*/
if (!q->debugfs_dir)
return;
if (!e->queue_debugfs_attrs) if (!e->queue_debugfs_attrs)
return; return;

View File

@ -388,6 +388,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst)
struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ctx->spawn); crypto_drop_skcipher(&ctx->spawn);
kfree(inst);
} }
static int cryptd_create_skcipher(struct crypto_template *tmpl, static int cryptd_create_skcipher(struct crypto_template *tmpl,

View File

@ -44,6 +44,9 @@ struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
list_for_each_entry(q, &crypto_alg_list, cra_list) { list_for_each_entry(q, &crypto_alg_list, cra_list) {
int match = 0; int match = 0;
if (crypto_is_larval(q))
continue;
if ((q->cra_flags ^ p->cru_type) & p->cru_mask) if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
continue; continue;

View File

@ -52,8 +52,9 @@ static const struct fb_var_screeninfo cfag12864bfb_var = {
static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma) static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{ {
return vm_insert_page(vma, vma->vm_start, struct page *pages = virt_to_page(cfag12864b_buffer);
virt_to_page(cfag12864b_buffer));
return vm_map_pages_zero(vma, &pages, 1);
} }
static struct fb_ops cfag12864bfb_ops = { static struct fb_ops cfag12864bfb_ops = {

View File

@ -223,9 +223,9 @@ static const struct backlight_ops ht16k33_bl_ops = {
static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
{ {
struct ht16k33_priv *priv = info->par; struct ht16k33_priv *priv = info->par;
struct page *pages = virt_to_page(priv->fbdev.buffer);
return vm_insert_page(vma, vma->vm_start, return vm_map_pages_zero(vma, &pages, 1);
virt_to_page(priv->fbdev.buffer));
} }
static struct fb_ops ht16k33_fb_ops = { static struct fb_ops ht16k33_fb_ops = {

View File

@ -368,7 +368,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
const char *dev_id = dev ? dev_name(dev) : NULL; const char *dev_id = dev ? dev_name(dev) : NULL;
struct device_node *np = core->of_node; struct device_node *np = core->of_node;
if (np && index >= 0) if (np && (name || index >= 0))
hw = of_clk_get_hw(np, index, name); hw = of_clk_get_hw(np, index, name);
/* /*

View File

@ -2734,8 +2734,8 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw, [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
[CLKID_MALI_1] = &g12a_mali_1.hw, [CLKID_MALI_1] = &g12a_mali_1.hw,
[CLKID_MALI] = &g12a_mali.hw, [CLKID_MALI] = &g12a_mali.hw,
[CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw, [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw,
[CLKID_MPLL_5OM] = &g12a_mpll_50m.hw, [CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw, [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw, [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
[CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw, [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,

View File

@ -166,7 +166,7 @@
#define CLKID_HDMI_DIV 167 #define CLKID_HDMI_DIV 167
#define CLKID_MALI_0_DIV 170 #define CLKID_MALI_0_DIV 170
#define CLKID_MALI_1_DIV 173 #define CLKID_MALI_1_DIV 173
#define CLKID_MPLL_5OM_DIV 176 #define CLKID_MPLL_50M_DIV 176
#define CLKID_SYS_PLL_DIV16_EN 178 #define CLKID_SYS_PLL_DIV16_EN 178
#define CLKID_SYS_PLL_DIV16 179 #define CLKID_SYS_PLL_DIV16 179
#define CLKID_CPU_CLK_DYN0_SEL 180 #define CLKID_CPU_CLK_DYN0_SEL 180

View File

@ -1761,7 +1761,7 @@ static struct clk_regmap meson8m2_gp_pll = {
}, },
}; };
static const char * const mmeson8b_vpu_0_1_parent_names[] = { static const char * const meson8b_vpu_0_1_parent_names[] = {
"fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7" "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
}; };
@ -1778,8 +1778,8 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "vpu_0_sel", .name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops, .ops = &clk_regmap_mux_ops,
.parent_names = mmeson8b_vpu_0_1_parent_names, .parent_names = meson8b_vpu_0_1_parent_names,
.num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -1837,8 +1837,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "vpu_1_sel", .name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops, .ops = &clk_regmap_mux_ops,
.parent_names = mmeson8b_vpu_0_1_parent_names, .parent_names = meson8b_vpu_0_1_parent_names,
.num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
}, },
}; };

View File

@ -103,9 +103,9 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
{ STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
0, 0, 0, 0x3C, 1}, 0, 0, 0, 0x3C, 1},
{ STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux), { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
0, 0, 4, 0xB0, 0}, 0, 0, 2, 0xB0, 0},
{ STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
0, 0, 4, 0xB0, 1}, 0, 0, 2, 0xB0, 1},
{ STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
{ STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,

View File

@ -3366,6 +3366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
{ TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
/* This MUST be the last entry. */ /* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 }, { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
}; };

View File

@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
{ {
struct omap_clkctrl_provider *provider = data; struct omap_clkctrl_provider *provider = data;
struct omap_clkctrl_clk *entry; struct omap_clkctrl_clk *entry;
bool found = false;
if (clkspec->args_count != 2) if (clkspec->args_count != 2)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
list_for_each_entry(entry, &provider->clocks, node) { list_for_each_entry(entry, &provider->clocks, node) {
if (entry->reg_offset == clkspec->args[0] && if (entry->reg_offset == clkspec->args[0] &&
entry->bit_offset == clkspec->args[1]) entry->bit_offset == clkspec->args[1]) {
found = true;
break; break;
}
} }
if (!entry) if (!found)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return entry->clk; return entry->clk;

View File

@ -718,12 +718,13 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{ {
struct jz4780_dma_dev *jzdma = data; struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels; unsigned int nb_channels = jzdma->soc_data->nb_channels;
uint32_t pending, dmac; unsigned long pending;
uint32_t dmac;
int i; int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
for_each_set_bit(i, (unsigned long *)&pending, nb_channels) { for_each_set_bit(i, &pending, nb_channels) {
if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i])) if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
pending &= ~BIT(i); pending &= ~BIT(i);
} }

View File

@ -703,7 +703,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
spin_lock_irqsave(&sdma->channel_0_lock, flags); spin_lock_irqsave(&sdma->channel_0_lock, flags);
bd0->mode.command = C0_SETPM; bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2; bd0->mode.count = size / 2;
bd0->buffer_addr = buf_phys; bd0->buffer_addr = buf_phys;
bd0->ext_buffer_addr = address; bd0->ext_buffer_addr = address;
@ -1025,7 +1025,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
context->gReg[7] = sdmac->watermark_level; context->gReg[7] = sdmac->watermark_level;
bd0->mode.command = C0_SETDM; bd0->mode.command = C0_SETDM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
bd0->mode.count = sizeof(*context) / 4; bd0->mode.count = sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys; bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
@ -2096,27 +2096,6 @@ static int sdma_probe(struct platform_device *pdev)
if (pdata && pdata->script_addrs) if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs); sdma_add_scripts(sdma, pdata->script_addrs);
if (pdata) {
ret = sdma_get_firmware(sdma, pdata->fw_name);
if (ret)
dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
} else {
/*
* Because that device tree does not encode ROM script address,
* the RAM script in firmware is mandatory for device tree
* probe, otherwise it fails.
*/
ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
&fw_name);
if (ret)
dev_warn(&pdev->dev, "failed to get firmware name\n");
else {
ret = sdma_get_firmware(sdma, fw_name);
if (ret)
dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
}
}
sdma->dma_device.dev = &pdev->dev; sdma->dma_device.dev = &pdev->dev;
sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@ -2161,6 +2140,33 @@ static int sdma_probe(struct platform_device *pdev)
of_node_put(spba_bus); of_node_put(spba_bus);
} }
/*
* Kick off firmware loading as the very last step:
* attempt to load firmware only if we're not on the error path, because
* the firmware callback requires a fully functional and allocated sdma
* instance.
*/
if (pdata) {
ret = sdma_get_firmware(sdma, pdata->fw_name);
if (ret)
dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
} else {
/*
* Because that device tree does not encode ROM script address,
* the RAM script in firmware is mandatory for device tree
* probe, otherwise it fails.
*/
ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
&fw_name);
if (ret) {
dev_warn(&pdev->dev, "failed to get firmware name\n");
} else {
ret = sdma_get_firmware(sdma, fw_name);
if (ret)
dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
}
}
return 0; return 0;
err_register: err_register:

View File

@ -799,6 +799,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
/* Number of bytes available to read */ /* Number of bytes available to read */
avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
if (offset < bchan->head)
avail--;
list_for_each_entry_safe(async_desc, tmp, list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) { &bchan->desc_list, desc_node) {
/* Not enough data to read */ /* Not enough data to read */

View File

@ -47,11 +47,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
bgrt->version); bgrt->version);
goto out; goto out;
} }
if (bgrt->status & 0xfe) {
pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
bgrt->status);
goto out;
}
if (bgrt->image_type != 0) { if (bgrt->image_type != 0) {
pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
bgrt->image_type); bgrt->image_type);

View File

@ -1009,14 +1009,16 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */ /* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
rsv = __va(prsv); rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) { if (index < rsv->size) {
rsv->entry[index].base = addr; rsv->entry[index].base = addr;
rsv->entry[index].size = size; rsv->entry[index].size = size;
memunmap(rsv);
return 0; return 0;
} }
memunmap(rsv);
} }
/* no slot found - allocate a new linked list entry */ /* no slot found - allocate a new linked list entry */
@ -1024,7 +1026,13 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
if (!rsv) if (!rsv)
return -ENOMEM; return -ENOMEM;
rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE); /*
* The memremap() call above assumes that a linux_efi_memreserve entry
* never crosses a page boundary, so let's ensure that this remains true
* even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
* using SZ_4K explicitly in the size calculation below.
*/
rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
atomic_set(&rsv->count, 1); atomic_set(&rsv->count, 1);
rsv->entry[0].base = addr; rsv->entry[0].base = addr;
rsv->entry[0].size = size; rsv->entry[0].size = size;

View File

@ -43,11 +43,13 @@ static int efibc_set_variable(const char *name, const char *value)
efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data); efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
memcpy(&entry->var.VendorGuid, &guid, sizeof(guid)); memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
ret = efivar_entry_set(entry, ret = efivar_entry_set_safe(entry->var.VariableName,
EFI_VARIABLE_NON_VOLATILE entry->var.VendorGuid,
| EFI_VARIABLE_BOOTSERVICE_ACCESS EFI_VARIABLE_NON_VOLATILE
| EFI_VARIABLE_RUNTIME_ACCESS, | EFI_VARIABLE_BOOTSERVICE_ACCESS
size, entry->var.Data, NULL); | EFI_VARIABLE_RUNTIME_ACCESS,
false, size, entry->var.Data);
if (ret) if (ret)
pr_err("failed to set %s EFI variable: 0x%x\n", pr_err("failed to set %s EFI variable: 0x%x\n",
name, ret); name, ret);

View File

@ -118,8 +118,15 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Legacy handling of SPI active high chip select. If we have a * Legacy handling of SPI active high chip select. If we have a
* property named "cs-gpios" we need to inspect the child node * property named "cs-gpios" we need to inspect the child node
* to determine if the flags should have inverted semantics. * to determine if the flags should have inverted semantics.
*
* This does not apply to an SPI device named "spi-gpio", because
* these have traditionally obtained their own GPIOs by parsing
* the device tree directly and did not respect any "spi-cs-high"
* property on the SPI bus children.
*/ */
if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") && if (IS_ENABLED(CONFIG_SPI_MASTER) &&
!strcmp(propname, "cs-gpios") &&
!of_device_is_compatible(np, "spi-gpio") &&
of_property_read_bool(np, "cs-gpios")) { of_property_read_bool(np, "cs-gpios")) {
struct device_node *child; struct device_node *child;
u32 cs; u32 cs;

View File

@ -1959,25 +1959,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
gfx_v9_0_init_compute_vmid(adev); gfx_v9_0_init_compute_vmid(adev);
mutex_lock(&adev->grbm_idx_mutex);
/*
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_prim_fifo_size_backend <<
PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_hiz_tile_fifo_size <<
PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_earlyz_tile_fifo_size <<
PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
mutex_unlock(&adev->grbm_idx_mutex);
} }
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)

View File

@ -326,7 +326,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
if (ret) if (ret)
return ret; return ret;
ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
return ret; return ret;
} }

View File

@ -916,8 +916,10 @@ static int init_thermal_controller(
PHM_PlatformCaps_ThermalController PHM_PlatformCaps_ThermalController
); );
if (0 == powerplay_table->usFanTableOffset) if (0 == powerplay_table->usFanTableOffset) {
hwmgr->thermal_controller.use_hw_fan_control = 1;
return 0; return 0;
}
fan_table = (const PPTable_Generic_SubTable_Header *) fan_table = (const PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) + (((unsigned long)powerplay_table) +

View File

@ -694,6 +694,7 @@ struct pp_thermal_controller_info {
uint8_t ucType; uint8_t ucType;
uint8_t ucI2cLine; uint8_t ucI2cLine;
uint8_t ucI2cAddress; uint8_t ucI2cAddress;
uint8_t use_hw_fan_control;
struct pp_fan_info fanInfo; struct pp_fan_info fanInfo;
struct pp_advance_fan_control_parameters advanceFanControlParameters; struct pp_advance_fan_control_parameters advanceFanControlParameters;
}; };

View File

@ -2092,6 +2092,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
/* use hardware fan control */
if (hwmgr->thermal_controller.use_hw_fan_control)
return 0;
tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
usPWMMin * duty100; usPWMMin * duty100;
do_div(tmp64, 10000); do_div(tmp64, 10000);

View File

@ -760,7 +760,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
if (IS_ERR(gpu->cmdbuf_suballoc)) { if (IS_ERR(gpu->cmdbuf_suballoc)) {
dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n"); dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
ret = PTR_ERR(gpu->cmdbuf_suballoc); ret = PTR_ERR(gpu->cmdbuf_suballoc);
goto fail; goto destroy_iommu;
} }
/* Create buffer: */ /* Create buffer: */
@ -768,7 +768,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
PAGE_SIZE); PAGE_SIZE);
if (ret) { if (ret) {
dev_err(gpu->dev, "could not create command buffer\n"); dev_err(gpu->dev, "could not create command buffer\n");
goto destroy_iommu; goto destroy_suballoc;
} }
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
@ -800,6 +800,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer: free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_cmdbuf_free(&gpu->buffer);
gpu->buffer.suballoc = NULL; gpu->buffer.suballoc = NULL;
destroy_suballoc:
etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
gpu->cmdbuf_suballoc = NULL;
destroy_iommu: destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu); etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL; gpu->mmu = NULL;

View File

@ -1888,12 +1888,12 @@ static int ring_request_alloc(struct i915_request *request)
*/ */
request->reserved_space += LEGACY_REQUEST_SIZE; request->reserved_space += LEGACY_REQUEST_SIZE;
ret = switch_context(request); /* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret) if (ret)
return ret; return ret;
/* Unconditionally invalidate GPU caches and TLBs. */ ret = switch_context(request);
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret) if (ret)
return ret; return ret;

View File

@ -91,14 +91,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable(ipu); ipu_dc_disable(ipu);
ipu_prg_disable(ipu); ipu_prg_disable(ipu);
drm_crtc_vblank_off(crtc);
spin_lock_irq(&crtc->dev->event_lock); spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) { if (crtc->state->event && !crtc->state->active) {
drm_crtc_send_vblank_event(crtc, crtc->state->event); drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL; crtc->state->event = NULL;
} }
spin_unlock_irq(&crtc->dev->event_lock); spin_unlock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_off(crtc);
} }
static void imx_drm_crtc_reset(struct drm_crtc *crtc) static void imx_drm_crtc_reset(struct drm_crtc *crtc)

View File

@ -63,7 +63,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
return 0; return 0;
err_free: err_free:
drm_gem_object_put_unlocked(&shmem->base); drm_gem_handle_delete(file, args->handle);
return ret; return ret;
} }

View File

@ -619,11 +619,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
output = vgdev->outputs + scanout; output = vgdev->outputs + scanout;
new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
drm_connector_update_edid_property(&output->conn, new_edid);
spin_lock(&vgdev->display_info_lock); spin_lock(&vgdev->display_info_lock);
old_edid = output->edid; old_edid = output->edid;
output->edid = new_edid; output->edid = new_edid;
drm_connector_update_edid_property(&output->conn, output->edid);
spin_unlock(&vgdev->display_info_lock); spin_unlock(&vgdev->display_info_lock);
kfree(old_edid); kfree(old_edid);

View File

@ -80,6 +80,7 @@
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215 #define HID_DEVICE_ID_ALPS_U1 0x1215
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
#define HID_DEVICE_ID_ALPS_1222 0x1222
#define USB_VENDOR_ID_AMI 0x046b #define USB_VENDOR_ID_AMI 0x046b
@ -269,6 +270,7 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 #define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
#define USB_DEVICE_ID_ASUS_AK1D 0x1125 #define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408 #define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
@ -569,6 +571,7 @@
#define USB_VENDOR_ID_HUION 0x256c #define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e #define USB_DEVICE_ID_HUION_TABLET 0x006e
#define USB_DEVICE_ID_HUION_HS64 0x006d
#define USB_VENDOR_ID_IBM 0x04b3 #define USB_VENDOR_ID_IBM 0x04b3
#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100 #define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100

View File

@ -30,6 +30,7 @@
#define REPORT_ID_HIDPP_SHORT 0x10 #define REPORT_ID_HIDPP_SHORT 0x10
#define REPORT_ID_HIDPP_LONG 0x11 #define REPORT_ID_HIDPP_LONG 0x11
#define REPORT_ID_HIDPP_VERY_LONG 0x12
#define HIDPP_REPORT_SHORT_LENGTH 7 #define HIDPP_REPORT_SHORT_LENGTH 7
#define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_LONG_LENGTH 20
@ -1242,7 +1243,8 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
int ret; int ret;
if ((buf[0] == REPORT_ID_HIDPP_SHORT) || if ((buf[0] == REPORT_ID_HIDPP_SHORT) ||
(buf[0] == REPORT_ID_HIDPP_LONG)) { (buf[0] == REPORT_ID_HIDPP_LONG) ||
(buf[0] == REPORT_ID_HIDPP_VERY_LONG)) {
if (count < 2) if (count < 2)
return -EINVAL; return -EINVAL;

View File

@ -1776,6 +1776,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ALPS_JP, USB_VENDOR_ID_ALPS_JP,
HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
{ .driver_data = MT_CLS_WIN_8_DUAL,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ALPS_JP,
HID_DEVICE_ID_ALPS_1222) },
/* Lenovo X1 TAB Gen 2 */ /* Lenovo X1 TAB Gen 2 */
{ .driver_data = MT_CLS_WIN_8_DUAL, { .driver_data = MT_CLS_WIN_8_DUAL,

View File

@ -42,6 +42,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },

View File

@ -369,6 +369,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, { HID_USB_DEVICE(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET) }, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_HS64) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET) }, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,

View File

@ -977,6 +977,8 @@ int uclogic_params_init(struct uclogic_params *params,
/* FALL THROUGH */ /* FALL THROUGH */
case VID_PID(USB_VENDOR_ID_HUION, case VID_PID(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_TABLET): USB_DEVICE_ID_HUION_TABLET):
case VID_PID(USB_VENDOR_ID_HUION,
USB_DEVICE_ID_HUION_HS64):
case VID_PID(USB_VENDOR_ID_UCLOGIC, case VID_PID(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_HUION_TABLET): USB_DEVICE_ID_HUION_TABLET):
case VID_PID(USB_VENDOR_ID_UCLOGIC, case VID_PID(USB_VENDOR_ID_UCLOGIC,

View File

@ -816,9 +816,9 @@ static int load_fw_from_host(struct ishtp_cl_data *client_data)
goto end_err_fw_release; goto end_err_fw_release;
release_firmware(fw); release_firmware(fw);
kfree(filename);
dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n", dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
filename); filename);
kfree(filename);
return 0; return 0;
end_err_fw_release: end_err_fw_release:

View File

@ -891,7 +891,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
*/ */
static int hid_ishtp_cl_suspend(struct device *device) static int hid_ishtp_cl_suspend(struct device *device)
{ {
struct ishtp_cl_device *cl_device = dev_get_drvdata(device); struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl); struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
@ -912,7 +912,7 @@ static int hid_ishtp_cl_suspend(struct device *device)
*/ */
static int hid_ishtp_cl_resume(struct device *device) static int hid_ishtp_cl_resume(struct device *device)
{ {
struct ishtp_cl_device *cl_device = dev_get_drvdata(device); struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl); struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);

View File

@ -471,7 +471,6 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
} }
ishtp_device_ready = true; ishtp_device_ready = true;
dev_set_drvdata(&device->dev, device);
return device; return device;
} }
@ -639,6 +638,20 @@ void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
} }
EXPORT_SYMBOL(ishtp_get_drvdata); EXPORT_SYMBOL(ishtp_get_drvdata);
/**
* ishtp_dev_to_cl_device() - get ishtp_cl_device instance from device instance
* @device: device instance
*
* Get ish_cl_device instance which embeds device instance in it.
*
* Return: pointer to ishtp_cl_device instance
*/
struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *device)
{
return to_ishtp_cl_device(device);
}
EXPORT_SYMBOL(ishtp_dev_to_cl_device);
/** /**
* ishtp_bus_new_client() - Create a new client * ishtp_bus_new_client() - Create a new client
* @dev: ISHTP device instance * @dev: ISHTP device instance

View File

@ -89,8 +89,19 @@ static int csky_irq_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
/* Enable interrupt destination */ /*
cpu |= BIT(31); * The csky,mpintc could support auto irq deliver, but it only
* could deliver external irq to one cpu or all cpus. So it
* doesn't support deliver external irq to a group of cpus
* with cpu_mask.
* SO we only use auto deliver mode when affinity mask_val is
* equal to cpu_present_mask.
*
*/
if (cpumask_equal(mask_val, cpu_present_mask))
cpu = 0;
else
cpu |= BIT(31);
writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset); writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);

View File

@ -733,32 +733,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
} }
static int its_wait_for_range_completion(struct its_node *its, static int its_wait_for_range_completion(struct its_node *its,
struct its_cmd_block *from, u64 prev_idx,
struct its_cmd_block *to) struct its_cmd_block *to)
{ {
u64 rd_idx, from_idx, to_idx; u64 rd_idx, to_idx, linear_idx;
u32 count = 1000000; /* 1s! */ u32 count = 1000000; /* 1s! */
from_idx = its_cmd_ptr_to_offset(its, from); /* Linearize to_idx if the command set has wrapped around */
to_idx = its_cmd_ptr_to_offset(its, to); to_idx = its_cmd_ptr_to_offset(its, to);
if (to_idx < prev_idx)
to_idx += ITS_CMD_QUEUE_SZ;
linear_idx = prev_idx;
while (1) { while (1) {
s64 delta;
rd_idx = readl_relaxed(its->base + GITS_CREADR); rd_idx = readl_relaxed(its->base + GITS_CREADR);
/* Direct case */ /*
if (from_idx < to_idx && rd_idx >= to_idx) * Compute the read pointer progress, taking the
break; * potential wrap-around into account.
*/
delta = rd_idx - prev_idx;
if (rd_idx < prev_idx)
delta += ITS_CMD_QUEUE_SZ;
/* Wrapped case */ linear_idx += delta;
if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) if (linear_idx >= to_idx)
break; break;
count--; count--;
if (!count) { if (!count) {
pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
from_idx, to_idx, rd_idx); to_idx, linear_idx);
return -1; return -1;
} }
prev_idx = rd_idx;
cpu_relax(); cpu_relax();
udelay(1); udelay(1);
} }
@ -775,6 +786,7 @@ void name(struct its_node *its, \
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
synctype *sync_obj; \ synctype *sync_obj; \
unsigned long flags; \ unsigned long flags; \
u64 rd_idx; \
\ \
raw_spin_lock_irqsave(&its->lock, flags); \ raw_spin_lock_irqsave(&its->lock, flags); \
\ \
@ -796,10 +808,11 @@ void name(struct its_node *its, \
} \ } \
\ \
post: \ post: \
rd_idx = readl_relaxed(its->base + GITS_CREADR); \
next_cmd = its_post_commands(its); \ next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \ raw_spin_unlock_irqrestore(&its->lock, flags); \
\ \
if (its_wait_for_range_completion(its, cmd, next_cmd)) \ if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
} }

View File

@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d); cd = irq_data_get_irq_chip_data(d);
write_gic_vl_map(intr, cd->map); write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
if (cd->mask) if (cd->mask)
write_gic_vl_smask(BIT(intr)); write_gic_vl_smask(BIT(intr));
} }
@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags); spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_map(intr, map); write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
} }
spin_unlock_irqrestore(&gic_lock, flags); spin_unlock_irqrestore(&gic_lock, flags);

View File

@ -159,9 +159,9 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
parent_fwspec.param[1] = vint_desc->vint_id; parent_fwspec.param[1] = vint_desc->vint_id;
parent_virq = irq_create_fwspec_mapping(&parent_fwspec); parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
if (parent_virq <= 0) { if (parent_virq == 0) {
kfree(vint_desc); kfree(vint_desc);
return ERR_PTR(parent_virq); return ERR_PTR(-EINVAL);
} }
vint_desc->parent_virq = parent_virq; vint_desc->parent_virq = parent_virq;

View File

@ -140,8 +140,8 @@ static char __init *dm_parse_table_entry(struct dm_device *dev, char *str)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
/* target_args */ /* target_args */
dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL, dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE,
DM_MAX_STR_SIZE); GFP_KERNEL);
if (!dev->target_args_array[n]) if (!dev->target_args_array[n])
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -272,10 +272,10 @@ static int __init dm_init_init(void)
return 0; return 0;
if (strlen(create) >= DM_MAX_STR_SIZE) { if (strlen(create) >= DM_MAX_STR_SIZE) {
DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE); DMERR("Argument is too big. Limit is %d", DM_MAX_STR_SIZE);
return -EINVAL; return -EINVAL;
} }
str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE); str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL);
if (!str) if (!str)
return -ENOMEM; return -ENOMEM;
@ -283,7 +283,7 @@ static int __init dm_init_init(void)
if (r) if (r)
goto out; goto out;
DMINFO("waiting for all devices to be available before creating mapped devices\n"); DMINFO("waiting for all devices to be available before creating mapped devices");
wait_for_device_probe(); wait_for_device_probe();
list_for_each_entry(dev, &devices, list) { list_for_each_entry(dev, &devices, list) {

View File

@ -60,6 +60,7 @@
#define WRITE_LOG_VERSION 1ULL #define WRITE_LOG_VERSION 1ULL
#define WRITE_LOG_MAGIC 0x6a736677736872ULL #define WRITE_LOG_MAGIC 0x6a736677736872ULL
#define WRITE_LOG_SUPER_SECTOR 0
/* /*
* The disk format for this is braindead simple. * The disk format for this is braindead simple.
@ -115,6 +116,7 @@ struct log_writes_c {
struct list_head logging_blocks; struct list_head logging_blocks;
wait_queue_head_t wait; wait_queue_head_t wait;
struct task_struct *log_kthread; struct task_struct *log_kthread;
struct completion super_done;
}; };
struct pending_block { struct pending_block {
@ -180,6 +182,14 @@ static void log_end_io(struct bio *bio)
bio_put(bio); bio_put(bio);
} }
static void log_end_super(struct bio *bio)
{
struct log_writes_c *lc = bio->bi_private;
complete(&lc->super_done);
log_end_io(bio);
}
/* /*
* Meant to be called if there is an error, it will free all the pages * Meant to be called if there is an error, it will free all the pages
* associated with the block. * associated with the block.
@ -215,7 +225,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev); bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io; bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@ -418,11 +429,18 @@ static int log_super(struct log_writes_c *lc)
super.nr_entries = cpu_to_le64(lc->logged_entries); super.nr_entries = cpu_to_le64(lc->logged_entries);
super.sectorsize = cpu_to_le32(lc->sectorsize); super.sectorsize = cpu_to_le32(lc->sectorsize);
if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { if (write_metadata(lc, &super, sizeof(super), NULL, 0,
WRITE_LOG_SUPER_SECTOR)) {
DMERR("Couldn't write super"); DMERR("Couldn't write super");
return -1; return -1;
} }
/*
* Super sector should be writen in-order, otherwise the
* nr_entries could be rewritten incorrectly by an old bio.
*/
wait_for_completion_io(&lc->super_done);
return 0; return 0;
} }
@ -531,6 +549,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&lc->unflushed_blocks); INIT_LIST_HEAD(&lc->unflushed_blocks);
INIT_LIST_HEAD(&lc->logging_blocks); INIT_LIST_HEAD(&lc->logging_blocks);
init_waitqueue_head(&lc->wait); init_waitqueue_head(&lc->wait);
init_completion(&lc->super_done);
atomic_set(&lc->io_blocks, 0); atomic_set(&lc->io_blocks, 0);
atomic_set(&lc->pending_blocks, 0); atomic_set(&lc->pending_blocks, 0);

View File

@ -561,7 +561,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
gfp = GFP_NOIO; gfp = GFP_NOIO;
} }
argv = kmalloc_array(new_size, sizeof(*argv), gfp); argv = kmalloc_array(new_size, sizeof(*argv), gfp);
if (argv) { if (argv && old_argv) {
memcpy(argv, old_argv, *size * sizeof(*argv)); memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size; *size = new_size;
} }

View File

@ -235,8 +235,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
BUG(); BUG();
} }
DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str, DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
block); type_str, block);
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
DMERR("%s: reached maximum errors", v->data_dev->name); DMERR("%s: reached maximum errors", v->data_dev->name);

View File

@ -204,12 +204,11 @@ static struct irq_chip stmfx_irq_chip = {
static irqreturn_t stmfx_irq_handler(int irq, void *data) static irqreturn_t stmfx_irq_handler(int irq, void *data)
{ {
struct stmfx *stmfx = data; struct stmfx *stmfx = data;
unsigned long n, pending; unsigned long bits;
u32 ack; u32 pending, ack;
int ret; int n, ret;
ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, &pending);
(u32 *)&pending);
if (ret) if (ret)
return IRQ_NONE; return IRQ_NONE;
@ -224,7 +223,8 @@ static irqreturn_t stmfx_irq_handler(int irq, void *data)
return IRQ_NONE; return IRQ_NONE;
} }
for_each_set_bit(n, &pending, STMFX_REG_IRQ_SRC_MAX) bits = pending;
for_each_set_bit(n, &bits, STMFX_REG_IRQ_SRC_MAX)
handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n)); handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n));
return IRQ_HANDLED; return IRQ_HANDLED;

View File

@ -16,7 +16,7 @@ config MTD_NAND_JZ4780
if MTD_NAND_JZ4780 if MTD_NAND_JZ4780
config MTD_NAND_INGENIC_ECC config MTD_NAND_INGENIC_ECC
tristate bool
config MTD_NAND_JZ4740_ECC config MTD_NAND_JZ4740_ECC
tristate "Hardware BCH support for JZ4740 SoC" tristate "Hardware BCH support for JZ4740 SoC"

Some files were not shown because too many files have changed in this diff Show More