mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 02:36:21 +00:00
ASoC: Intel: Soundwire related board and match updates
Merge series from Peter Ujfalusi <peter.ujfalusi@linux.intel.com>: A small update for SDW machine support: Small fixes for sof_sdw machine driver Support for rt722 New TGL/MTL and LNL match for new configurations
This commit is contained in:
commit
ef858b6194
@ -375,9 +375,9 @@ Developer web site of Loongson and LoongArch (Software and Documentation):
|
||||
|
||||
Documentation of LoongArch ISA:
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-CN.pdf (in Chinese)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-CN.pdf (in Chinese)
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-EN.pdf (in English)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-EN.pdf (in English)
|
||||
|
||||
Documentation of LoongArch ELF psABI:
|
||||
|
||||
|
@ -77,7 +77,7 @@ Protocol 2.14 BURNT BY INCORRECT COMMIT
|
||||
Protocol 2.15 (Kernel 5.5) Added the kernel_info and kernel_info.setup_type_max.
|
||||
============= ============================================================
|
||||
|
||||
.. note::
|
||||
.. note::
|
||||
The protocol version number should be changed only if the setup header
|
||||
is changed. There is no need to update the version number if boot_params
|
||||
or kernel_info are changed. Additionally, it is recommended to use
|
||||
|
@ -275,12 +275,12 @@ allOf:
|
||||
properties:
|
||||
rx-internal-delay-ps:
|
||||
description:
|
||||
RGMII Receive Clock Delay defined in pico seconds.This is used for
|
||||
RGMII Receive Clock Delay defined in pico seconds. This is used for
|
||||
controllers that have configurable RX internal delays. If this
|
||||
property is present then the MAC applies the RX delay.
|
||||
tx-internal-delay-ps:
|
||||
description:
|
||||
RGMII Transmit Clock Delay defined in pico seconds.This is used for
|
||||
RGMII Transmit Clock Delay defined in pico seconds. This is used for
|
||||
controllers that have configurable TX internal delays. If this
|
||||
property is present then the MAC applies the TX delay.
|
||||
|
||||
|
@ -36,6 +36,7 @@ properties:
|
||||
- qcom,sm8350-ufshc
|
||||
- qcom,sm8450-ufshc
|
||||
- qcom,sm8550-ufshc
|
||||
- qcom,sm8650-ufshc
|
||||
- const: qcom,ufshc
|
||||
- const: jedec,ufs-2.0
|
||||
|
||||
@ -122,6 +123,7 @@ allOf:
|
||||
- qcom,sm8350-ufshc
|
||||
- qcom,sm8450-ufshc
|
||||
- qcom,sm8550-ufshc
|
||||
- qcom,sm8650-ufshc
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
|
@ -36,7 +36,11 @@ properties:
|
||||
|
||||
vdd-supply:
|
||||
description:
|
||||
VDD power supply to the hub
|
||||
3V3 power supply to the hub
|
||||
|
||||
vdd2-supply:
|
||||
description:
|
||||
1V2 power supply to the hub
|
||||
|
||||
peer-hub:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
@ -62,6 +66,7 @@ allOf:
|
||||
properties:
|
||||
reset-gpios: false
|
||||
vdd-supply: false
|
||||
vdd2-supply: false
|
||||
peer-hub: false
|
||||
i2c-bus: false
|
||||
else:
|
||||
|
@ -521,8 +521,8 @@ examples:
|
||||
|
||||
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
|
||||
<GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>,
|
||||
<GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>;
|
||||
interrupt-names = "hs_phy_irq", "ss_phy_irq",
|
||||
"dm_hs_phy_irq", "dp_hs_phy_irq";
|
||||
|
||||
|
@ -41,7 +41,7 @@ examples:
|
||||
- |
|
||||
usb {
|
||||
phys = <&usb2_phy1>, <&usb3_phy1>;
|
||||
phy-names = "usb";
|
||||
phy-names = "usb2", "usb3";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -91,6 +91,10 @@ compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs):
|
||||
|
||||
- git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git
|
||||
|
||||
For more information, please also refer to the documentation site:
|
||||
|
||||
- https://erofs.docs.kernel.org
|
||||
|
||||
Bugs and patches are welcome, please kindly help us and send to the following
|
||||
linux-erofs mailing list:
|
||||
|
||||
|
@ -193,9 +193,23 @@ Review timelines
|
||||
Generally speaking, the patches get triaged quickly (in less than
|
||||
48h). But be patient, if your patch is active in patchwork (i.e. it's
|
||||
listed on the project's patch list) the chances it was missed are close to zero.
|
||||
Asking the maintainer for status updates on your
|
||||
patch is a good way to ensure your patch is ignored or pushed to the
|
||||
bottom of the priority list.
|
||||
|
||||
The high volume of development on netdev makes reviewers move on
|
||||
from discussions relatively quickly. New comments and replies
|
||||
are very unlikely to arrive after a week of silence. If a patch
|
||||
is no longer active in patchwork and the thread went idle for more
|
||||
than a week - clarify the next steps and/or post the next version.
|
||||
|
||||
For RFC postings specifically, if nobody responded in a week - reviewers
|
||||
either missed the posting or have no strong opinions. If the code is ready,
|
||||
repost as a PATCH.
|
||||
|
||||
Emails saying just "ping" or "bump" are considered rude. If you can't figure
|
||||
out the status of the patch from patchwork or where the discussion has
|
||||
landed - describe your best guess and ask if it's correct. For example::
|
||||
|
||||
I don't understand what the next steps are. Person X seems to be unhappy
|
||||
with A, should I do B and repost the patches?
|
||||
|
||||
.. _Changes requested:
|
||||
|
||||
|
@ -338,9 +338,9 @@ Loongson与LoongArch的开发者网站(软件与文档资源):
|
||||
|
||||
LoongArch指令集架构的文档:
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-CN.pdf (中文版)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-CN.pdf (中文版)
|
||||
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.02-EN.pdf (英文版)
|
||||
https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-Vol1-v1.10-EN.pdf (英文版)
|
||||
|
||||
LoongArch的ELF psABI文档:
|
||||
|
||||
|
40
MAINTAINERS
40
MAINTAINERS
@ -7855,6 +7855,7 @@ R: Yue Hu <huyue2@coolpad.com>
|
||||
R: Jeffle Xu <jefflexu@linux.alibaba.com>
|
||||
L: linux-erofs@lists.ozlabs.org
|
||||
S: Maintained
|
||||
W: https://erofs.docs.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
|
||||
F: Documentation/ABI/testing/sysfs-fs-erofs
|
||||
F: Documentation/filesystems/erofs.rst
|
||||
@ -8949,7 +8950,6 @@ S: Maintained
|
||||
F: scripts/get_maintainer.pl
|
||||
|
||||
GFS2 FILE SYSTEM
|
||||
M: Bob Peterson <rpeterso@redhat.com>
|
||||
M: Andreas Gruenbacher <agruenba@redhat.com>
|
||||
L: gfs2@lists.linux.dev
|
||||
S: Supported
|
||||
@ -11024,7 +11024,6 @@ F: drivers/net/wireless/intel/iwlwifi/
|
||||
|
||||
INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER
|
||||
M: Jithu Joseph <jithu.joseph@intel.com>
|
||||
R: Maurice Ma <maurice.ma@intel.com>
|
||||
S: Maintained
|
||||
W: https://slimbootloader.github.io/security/firmware-update.html
|
||||
F: drivers/platform/x86/intel/wmi/sbl-fw-update.c
|
||||
@ -13778,7 +13777,6 @@ F: drivers/net/ethernet/mellanox/mlxfw/
|
||||
MELLANOX HARDWARE PLATFORM SUPPORT
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Mark Gross <markgross@kernel.org>
|
||||
M: Vadim Pasternak <vadimp@nvidia.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Supported
|
||||
@ -14387,7 +14385,6 @@ F: drivers/platform/surface/surface_gpe.c
|
||||
MICROSOFT SURFACE HARDWARE PLATFORM SUPPORT
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Mark Gross <markgross@kernel.org>
|
||||
M: Maximilian Luz <luzmaximilian@gmail.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -14994,6 +14991,7 @@ M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Paolo Abeni <pabeni@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
P: Documentation/process/maintainer-netdev.rst
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
@ -15045,6 +15043,7 @@ M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Paolo Abeni <pabeni@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
P: Documentation/process/maintainer-netdev.rst
|
||||
Q: https://patchwork.kernel.org/project/netdevbpf/list/
|
||||
B: mailto:netdev@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
@ -15055,6 +15054,7 @@ F: Documentation/networking/
|
||||
F: Documentation/process/maintainer-netdev.rst
|
||||
F: Documentation/userspace-api/netlink/
|
||||
F: include/linux/in.h
|
||||
F: include/linux/indirect_call_wrapper.h
|
||||
F: include/linux/net.h
|
||||
F: include/linux/netdevice.h
|
||||
F: include/net/
|
||||
@ -21768,7 +21768,9 @@ F: Documentation/devicetree/bindings/counter/ti-eqep.yaml
|
||||
F: drivers/counter/ti-eqep.c
|
||||
|
||||
TI ETHERNET SWITCH DRIVER (CPSW)
|
||||
R: Grygorii Strashko <grygorii.strashko@ti.com>
|
||||
R: Siddharth Vadapalli <s-vadapalli@ti.com>
|
||||
R: Ravi Gunasekaran <r-gunasekaran@ti.com>
|
||||
R: Roger Quadros <rogerq@kernel.org>
|
||||
L: linux-omap@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -21792,6 +21794,15 @@ F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
|
||||
F: drivers/media/i2c/ds90*
|
||||
F: include/media/i2c/ds90*
|
||||
|
||||
TI ICSSG ETHERNET DRIVER (ICSSG)
|
||||
R: MD Danish Anwar <danishanwar@ti.com>
|
||||
R: Roger Quadros <rogerq@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/ti,icss*.yaml
|
||||
F: drivers/net/ethernet/ti/icssg/*
|
||||
|
||||
TI J721E CSI2RX DRIVER
|
||||
M: Jai Luthra <j-luthra@ti.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -22067,6 +22078,7 @@ F: drivers/watchdog/tqmx86_wdt.c
|
||||
TRACING
|
||||
M: Steven Rostedt <rostedt@goodmis.org>
|
||||
M: Masami Hiramatsu <mhiramat@kernel.org>
|
||||
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-trace-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -23653,7 +23665,6 @@ F: drivers/platform/x86/x86-android-tablets/
|
||||
X86 PLATFORM DRIVERS
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
|
||||
M: Mark Gross <markgross@kernel.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
Q: https://patchwork.kernel.org/project/platform-driver-x86/list/
|
||||
@ -23691,6 +23702,20 @@ F: arch/x86/kernel/dumpstack.c
|
||||
F: arch/x86/kernel/stacktrace.c
|
||||
F: arch/x86/kernel/unwind_*.c
|
||||
|
||||
X86 TRUST DOMAIN EXTENSIONS (TDX)
|
||||
M: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
|
||||
R: Dave Hansen <dave.hansen@linux.intel.com>
|
||||
L: x86@kernel.org
|
||||
L: linux-coco@lists.linux.dev
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/tdx
|
||||
F: arch/x86/boot/compressed/tdx*
|
||||
F: arch/x86/coco/tdx/
|
||||
F: arch/x86/include/asm/shared/tdx.h
|
||||
F: arch/x86/include/asm/tdx.h
|
||||
F: arch/x86/virt/vmx/tdx/
|
||||
F: drivers/virt/coco/tdx-guest
|
||||
|
||||
X86 VDSO
|
||||
M: Andy Lutomirski <luto@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
@ -23871,8 +23896,7 @@ T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
|
||||
P: Documentation/filesystems/xfs-maintainer-entry-profile.rst
|
||||
F: Documentation/ABI/testing/sysfs-fs-xfs
|
||||
F: Documentation/admin-guide/xfs.rst
|
||||
F: Documentation/filesystems/xfs-delayed-logging-design.rst
|
||||
F: Documentation/filesystems/xfs-self-describing-metadata.rst
|
||||
F: Documentation/filesystems/xfs-*
|
||||
F: fs/xfs/
|
||||
F: include/uapi/linux/dqblk_xfs.h
|
||||
F: include/uapi/linux/fsmap.h
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -484,7 +484,8 @@ static int __init xen_guest_init(void)
|
||||
* for secondary CPUs as they are brought up.
|
||||
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
|
||||
*/
|
||||
xen_vcpu_info = alloc_percpu(struct vcpu_info);
|
||||
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
|
||||
1 << fls(sizeof(struct vcpu_info) - 1));
|
||||
if (xen_vcpu_info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -158,7 +158,7 @@ endif
|
||||
|
||||
all: $(notdir $(KBUILD_IMAGE))
|
||||
|
||||
|
||||
vmlinuz.efi: Image
|
||||
Image vmlinuz.efi: vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||
|
||||
|
@ -21,9 +21,22 @@ static inline bool arch_parse_debug_rodata(char *arg)
|
||||
extern bool rodata_enabled;
|
||||
extern bool rodata_full;
|
||||
|
||||
if (arg && !strcmp(arg, "full")) {
|
||||
if (!arg)
|
||||
return false;
|
||||
|
||||
if (!strcmp(arg, "full")) {
|
||||
rodata_enabled = rodata_full = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!strcmp(arg, "off")) {
|
||||
rodata_enabled = rodata_full = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!strcmp(arg, "on")) {
|
||||
rodata_enabled = true;
|
||||
rodata_full = true;
|
||||
rodata_full = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,8 @@ bool can_set_direct_map(void)
|
||||
*
|
||||
* KFENCE pool requires page-granular mapping if initialized late.
|
||||
*/
|
||||
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
|
||||
arm64_kfence_can_set_direct_map();
|
||||
return rodata_full || debug_pagealloc_enabled() ||
|
||||
arm64_kfence_can_set_direct_map();
|
||||
}
|
||||
|
||||
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
|
||||
@ -105,8 +105,7 @@ static int change_memory_common(unsigned long addr, int numpages,
|
||||
* If we are manipulating read-only permissions, apply the same
|
||||
* change to the linear mapping of the pages that back this VM area.
|
||||
*/
|
||||
if (rodata_enabled &&
|
||||
rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
|
||||
if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
|
||||
pgprot_val(clear_mask) == PTE_RDONLY)) {
|
||||
for (i = 0; i < area->nr_pages; i++) {
|
||||
__change_memory_common((u64)page_address(area->pages[i]),
|
||||
|
@ -68,6 +68,7 @@ LDFLAGS_vmlinux += -static -n -nostdlib
|
||||
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
|
||||
cflags-y += $(call cc-option,-mexplicit-relocs)
|
||||
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
|
||||
KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdirect-access-external-data)
|
||||
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
|
||||
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
|
||||
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
|
||||
@ -142,6 +143,8 @@ vdso-install-y += arch/loongarch/vdso/vdso.so.dbg
|
||||
|
||||
all: $(notdir $(KBUILD_IMAGE))
|
||||
|
||||
vmlinuz.efi: vmlinux.efi
|
||||
|
||||
vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@
|
||||
|
||||
|
@ -609,8 +609,7 @@
|
||||
lu32i.d \reg, 0
|
||||
lu52i.d \reg, \reg, 0
|
||||
.pushsection ".la_abs", "aw", %progbits
|
||||
768:
|
||||
.dword 768b-766b
|
||||
.dword 766b
|
||||
.dword \sym
|
||||
.popsection
|
||||
#endif
|
||||
|
@ -40,13 +40,13 @@ static __always_inline unsigned long __percpu_##op(void *ptr, \
|
||||
switch (size) { \
|
||||
case 4: \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
|
||||
"am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
|
||||
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
|
||||
: [val] "r" (val)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
|
||||
"am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
|
||||
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
|
||||
: [val] "r" (val)); \
|
||||
break; \
|
||||
@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
|
||||
PERCPU_OP(or, or, |)
|
||||
#undef PERCPU_OP
|
||||
|
||||
static __always_inline unsigned long __percpu_read(void *ptr, int size)
|
||||
static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
@ -100,7 +100,7 @@ static __always_inline unsigned long __percpu_read(void *ptr, int size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
@ -132,8 +132,7 @@ static __always_inline void __percpu_write(void *ptr, unsigned long val, int siz
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
|
@ -25,7 +25,7 @@ extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
|
||||
struct rela_la_abs {
|
||||
long offset;
|
||||
long pc;
|
||||
long symvalue;
|
||||
};
|
||||
|
||||
|
@ -52,7 +52,7 @@ static inline void __init relocate_absolute(long random_offset)
|
||||
for (p = begin; (void *)p < end; p++) {
|
||||
long v = p->symvalue;
|
||||
uint32_t lu12iw, ori, lu32id, lu52id;
|
||||
union loongarch_instruction *insn = (void *)p - p->offset;
|
||||
union loongarch_instruction *insn = (void *)p->pc;
|
||||
|
||||
lu12iw = (v >> 12) & 0xfffff;
|
||||
ori = v & 0xfff;
|
||||
@ -102,6 +102,14 @@ static inline __init unsigned long get_random_boot(void)
|
||||
return hash;
|
||||
}
|
||||
|
||||
static int __init nokaslr(char *p)
|
||||
{
|
||||
pr_info("KASLR is disabled.\n");
|
||||
|
||||
return 0; /* Print a notice and silence the boot warning */
|
||||
}
|
||||
early_param("nokaslr", nokaslr);
|
||||
|
||||
static inline __init bool kaslr_disabled(void)
|
||||
{
|
||||
char *str;
|
||||
|
@ -58,21 +58,6 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long timer_config;
|
||||
|
||||
raw_spin_lock(&state_lock);
|
||||
|
||||
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
|
||||
timer_config &= ~CSR_TCFG_EN;
|
||||
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
|
||||
|
||||
raw_spin_unlock(&state_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int constant_set_state_periodic(struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long period;
|
||||
@ -92,6 +77,16 @@ static int constant_set_state_periodic(struct clock_event_device *evt)
|
||||
|
||||
static int constant_set_state_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long timer_config;
|
||||
|
||||
raw_spin_lock(&state_lock);
|
||||
|
||||
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
|
||||
timer_config &= ~CSR_TCFG_EN;
|
||||
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
|
||||
|
||||
raw_spin_unlock(&state_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -161,7 +156,7 @@ int constant_clockevent_init(void)
|
||||
cd->rating = 320;
|
||||
cd->cpumask = cpumask_of(cpu);
|
||||
cd->set_state_oneshot = constant_set_state_oneshot;
|
||||
cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
|
||||
cd->set_state_oneshot_stopped = constant_set_state_shutdown;
|
||||
cd->set_state_periodic = constant_set_state_periodic;
|
||||
cd->set_state_shutdown = constant_set_state_shutdown;
|
||||
cd->set_next_event = constant_timer_next_event;
|
||||
|
@ -13,13 +13,13 @@ struct page *dmw_virt_to_page(unsigned long kaddr)
|
||||
{
|
||||
return pfn_to_page(virt_to_pfn(kaddr));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dmw_virt_to_page);
|
||||
EXPORT_SYMBOL(dmw_virt_to_page);
|
||||
|
||||
struct page *tlb_virt_to_page(unsigned long kaddr)
|
||||
{
|
||||
return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tlb_virt_to_page);
|
||||
EXPORT_SYMBOL(tlb_virt_to_page);
|
||||
|
||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
|
@ -115,9 +115,12 @@ config ARCH_HAS_ILOG2_U64
|
||||
default n
|
||||
|
||||
config GENERIC_BUG
|
||||
bool
|
||||
default y
|
||||
def_bool y
|
||||
depends on BUG
|
||||
select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
|
||||
|
||||
config GENERIC_BUG_RELATIVE_POINTERS
|
||||
bool
|
||||
|
||||
config GENERIC_HWEIGHT
|
||||
bool
|
||||
@ -140,11 +143,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
|
||||
default 8
|
||||
|
||||
config ARCH_MMAP_RND_BITS_MAX
|
||||
default 24 if 64BIT
|
||||
default 17
|
||||
default 18 if 64BIT
|
||||
default 13
|
||||
|
||||
config ARCH_MMAP_RND_COMPAT_BITS_MAX
|
||||
default 17
|
||||
default 13
|
||||
|
||||
# unless you want to implement ACPI on PA-RISC ... ;-)
|
||||
config PM
|
||||
|
@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
|
||||
|
||||
/* Alternative SMP implementation. */
|
||||
#define ALTERNATIVE(cond, replacement) "!0:" \
|
||||
".section .altinstructions, \"aw\" !" \
|
||||
".section .altinstructions, \"a\" !" \
|
||||
".align 4 !" \
|
||||
".word (0b-4-.) !" \
|
||||
".hword 1, " __stringify(cond) " !" \
|
||||
".word " __stringify(replacement) " !" \
|
||||
@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
|
||||
|
||||
/* to replace one single instructions by a new instruction */
|
||||
#define ALTERNATIVE(from, to, cond, replacement)\
|
||||
.section .altinstructions, "aw" ! \
|
||||
.section .altinstructions, "a" ! \
|
||||
.align 4 ! \
|
||||
.word (from - .) ! \
|
||||
.hword (to - from)/4, cond ! \
|
||||
.word replacement ! \
|
||||
@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
|
||||
|
||||
/* to replace multiple instructions by new code */
|
||||
#define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
|
||||
.section .altinstructions, "aw" ! \
|
||||
.section .altinstructions, "a" ! \
|
||||
.align 4 ! \
|
||||
.word (from - .) ! \
|
||||
.hword -num_instructions, cond ! \
|
||||
.word (new_instr_ptr - .) ! \
|
||||
|
@ -574,6 +574,7 @@
|
||||
*/
|
||||
#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
|
||||
.section __ex_table,"aw" ! \
|
||||
.align 4 ! \
|
||||
.word (fault_addr - .), (except_addr - .) ! \
|
||||
.previous
|
||||
|
||||
|
@ -17,24 +17,27 @@
|
||||
#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
|
||||
#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
#define ASM_WORD_INSN ".dword\t"
|
||||
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
|
||||
# define __BUG_REL(val) ".word " __stringify(val) " - ."
|
||||
#else
|
||||
#define ASM_WORD_INSN ".word\t"
|
||||
# define __BUG_REL(val) ".word " __stringify(val)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
#define BUG() \
|
||||
do { \
|
||||
asm volatile("\n" \
|
||||
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
|
||||
"\t.pushsection __bug_table,\"aw\"\n" \
|
||||
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
|
||||
"\t.short %c1, %c2\n" \
|
||||
"\t.org 2b+%c3\n" \
|
||||
"\t.pushsection __bug_table,\"a\"\n" \
|
||||
"\t.align 4\n" \
|
||||
"2:\t" __BUG_REL(1b) "\n" \
|
||||
"\t" __BUG_REL(%c0) "\n" \
|
||||
"\t.short %1, %2\n" \
|
||||
"\t.blockz %3-2*4-2*2\n" \
|
||||
"\t.popsection" \
|
||||
: : "i" (__FILE__), "i" (__LINE__), \
|
||||
"i" (0), "i" (sizeof(struct bug_entry)) ); \
|
||||
"i" (0), "i" (sizeof(struct bug_entry)) ); \
|
||||
unreachable(); \
|
||||
} while(0)
|
||||
|
||||
@ -51,10 +54,12 @@
|
||||
do { \
|
||||
asm volatile("\n" \
|
||||
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
|
||||
"\t.pushsection __bug_table,\"aw\"\n" \
|
||||
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
|
||||
"\t.short %c1, %c2\n" \
|
||||
"\t.org 2b+%c3\n" \
|
||||
"\t.pushsection __bug_table,\"a\"\n" \
|
||||
"\t.align 4\n" \
|
||||
"2:\t" __BUG_REL(1b) "\n" \
|
||||
"\t" __BUG_REL(%c0) "\n" \
|
||||
"\t.short %1, %2\n" \
|
||||
"\t.blockz %3-2*4-2*2\n" \
|
||||
"\t.popsection" \
|
||||
: : "i" (__FILE__), "i" (__LINE__), \
|
||||
"i" (BUGFLAG_WARNING|(flags)), \
|
||||
@ -65,10 +70,11 @@
|
||||
do { \
|
||||
asm volatile("\n" \
|
||||
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
|
||||
"\t.pushsection __bug_table,\"aw\"\n" \
|
||||
"2:\t" ASM_WORD_INSN "1b\n" \
|
||||
"\t.short %c0\n" \
|
||||
"\t.org 2b+%c1\n" \
|
||||
"\t.pushsection __bug_table,\"a\"\n" \
|
||||
"\t.align %2\n" \
|
||||
"2:\t" __BUG_REL(1b) "\n" \
|
||||
"\t.short %0\n" \
|
||||
"\t.blockz %1-4-2\n" \
|
||||
"\t.popsection" \
|
||||
: : "i" (BUGFLAG_WARNING|(flags)), \
|
||||
"i" (sizeof(struct bug_entry)) ); \
|
||||
|
@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */
|
||||
|
||||
#define ELF_HWCAP 0
|
||||
|
||||
/* Masks for stack and mmap randomization */
|
||||
#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
|
||||
#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
|
||||
#define STACK_RND_MASK MMAP_RND_MASK
|
||||
|
||||
struct mm_struct;
|
||||
extern unsigned long arch_randomize_brk(struct mm_struct *);
|
||||
#define arch_randomize_brk arch_randomize_brk
|
||||
|
||||
#define STACK_RND_MASK 0x7ff /* 8MB of VA */
|
||||
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
||||
struct linux_binprm;
|
||||
|
@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
|
||||
asm_volatile_goto("1:\n\t"
|
||||
"nop\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align %1\n\t"
|
||||
".word 1b - ., %l[l_yes] - .\n\t"
|
||||
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
|
||||
".popsection\n\t"
|
||||
: : "i" (&((char *)key)[branch]) : : l_yes);
|
||||
: : "i" (&((char *)key)[branch]), "i" (sizeof(long))
|
||||
: : l_yes);
|
||||
|
||||
return false;
|
||||
l_yes:
|
||||
@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
|
||||
asm_volatile_goto("1:\n\t"
|
||||
"b,n %l[l_yes]\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".align %1\n\t"
|
||||
".word 1b - ., %l[l_yes] - .\n\t"
|
||||
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
|
||||
".popsection\n\t"
|
||||
: : "i" (&((char *)key)[branch]) : : l_yes);
|
||||
: : "i" (&((char *)key)[branch]), "i" (sizeof(long))
|
||||
: : l_yes);
|
||||
|
||||
return false;
|
||||
l_yes:
|
||||
|
@ -55,7 +55,7 @@
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define __lock_aligned __section(".data..lock_aligned")
|
||||
# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
|
||||
#endif
|
||||
|
||||
#endif /* __PARISC_LDCW_H */
|
||||
|
@ -47,6 +47,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct rlimit;
|
||||
unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
|
||||
unsigned long calc_max_stack_size(unsigned long stack_max);
|
||||
|
||||
/*
|
||||
|
@ -41,6 +41,7 @@ struct exception_table_entry {
|
||||
|
||||
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
|
||||
".section __ex_table,\"aw\"\n" \
|
||||
".align 4\n" \
|
||||
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
|
||||
".previous\n"
|
||||
|
||||
|
@ -75,7 +75,6 @@
|
||||
|
||||
/* We now return you to your regularly scheduled HPUX. */
|
||||
|
||||
#define ENOSYM 215 /* symbol does not exist in executable */
|
||||
#define ENOTSOCK 216 /* Socket operation on non-socket */
|
||||
#define EDESTADDRREQ 217 /* Destination address required */
|
||||
#define EMSGSIZE 218 /* Message too long */
|
||||
@ -101,7 +100,6 @@
|
||||
#define ETIMEDOUT 238 /* Connection timed out */
|
||||
#define ECONNREFUSED 239 /* Connection refused */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EHOSTDOWN 241 /* Host is down */
|
||||
#define EHOSTUNREACH 242 /* No route to host */
|
||||
|
||||
|
@ -383,7 +383,7 @@ show_cpuinfo (struct seq_file *m, void *v)
|
||||
char cpu_name[60], *p;
|
||||
|
||||
/* strip PA path from CPU name to not confuse lscpu */
|
||||
strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
|
||||
strscpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
|
||||
p = strrchr(cpu_name, '[');
|
||||
if (p)
|
||||
*(--p) = 0;
|
||||
|
@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
|
||||
* indicating that "current" should be used instead of a passed-in
|
||||
* value from the exec bprm as done with arch_pick_mmap_layout().
|
||||
*/
|
||||
static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
|
||||
unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
|
||||
{
|
||||
unsigned long stack_base;
|
||||
|
||||
|
@ -130,6 +130,7 @@ SECTIONS
|
||||
RO_DATA(8)
|
||||
|
||||
/* unwind info */
|
||||
. = ALIGN(4);
|
||||
.PARISC.unwind : {
|
||||
__start___unwind = .;
|
||||
*(.PARISC.unwind)
|
||||
|
@ -228,7 +228,6 @@ typedef struct thread_struct thread_struct;
|
||||
execve_tail(); \
|
||||
} while (0)
|
||||
|
||||
/* Forward declaration, a strange C thing */
|
||||
struct task_struct;
|
||||
struct mm_struct;
|
||||
struct seq_file;
|
||||
|
@ -666,6 +666,7 @@ static int __init ipl_init(void)
|
||||
&ipl_ccw_attr_group_lpar);
|
||||
break;
|
||||
case IPL_TYPE_ECKD:
|
||||
case IPL_TYPE_ECKD_DUMP:
|
||||
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_eckd_attr_group);
|
||||
break;
|
||||
case IPL_TYPE_FCP:
|
||||
|
@ -279,12 +279,6 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
if (IS_ERR(cpump))
|
||||
return PTR_ERR(cpump);
|
||||
|
||||
/* Event initialization sets last_tag to 0. When later on the events
|
||||
* are deleted and re-added, do not reset the event count value to zero.
|
||||
* Events are added, deleted and re-added when 2 or more events
|
||||
* are active at the same time.
|
||||
*/
|
||||
event->hw.last_tag = 0;
|
||||
event->destroy = paicrypt_event_destroy;
|
||||
|
||||
if (a->sample_period) {
|
||||
@ -318,6 +312,11 @@ static void paicrypt_start(struct perf_event *event, int flags)
|
||||
{
|
||||
u64 sum;
|
||||
|
||||
/* Event initialization sets last_tag to 0. When later on the events
|
||||
* are deleted and re-added, do not reset the event count value to zero.
|
||||
* Events are added, deleted and re-added when 2 or more events
|
||||
* are active at the same time.
|
||||
*/
|
||||
if (!event->hw.last_tag) {
|
||||
event->hw.last_tag = 1;
|
||||
sum = paicrypt_getall(event); /* Get current value */
|
||||
|
@ -260,7 +260,6 @@ static int paiext_event_init(struct perf_event *event)
|
||||
rc = paiext_alloc(a, event);
|
||||
if (rc)
|
||||
return rc;
|
||||
event->hw.last_tag = 0;
|
||||
event->destroy = paiext_event_destroy;
|
||||
|
||||
if (a->sample_period) {
|
||||
|
@ -4660,7 +4660,7 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
|
||||
if (pmu->intel_cap.pebs_output_pt_available)
|
||||
pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
|
||||
else
|
||||
pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT;
|
||||
pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT;
|
||||
|
||||
intel_pmu_check_event_constraints(pmu->event_constraints,
|
||||
pmu->num_counters,
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/hypervisor.h>
|
||||
@ -286,15 +287,31 @@ static int hv_cpu_die(unsigned int cpu)
|
||||
|
||||
static int __init hv_pci_init(void)
|
||||
{
|
||||
int gen2vm = efi_enabled(EFI_BOOT);
|
||||
bool gen2vm = efi_enabled(EFI_BOOT);
|
||||
|
||||
/*
|
||||
* For Generation-2 VM, we exit from pci_arch_init() by returning 0.
|
||||
* The purpose is to suppress the harmless warning:
|
||||
* A Generation-2 VM doesn't support legacy PCI/PCIe, so both
|
||||
* raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() ->
|
||||
* pcibios_init() doesn't call pcibios_resource_survey() ->
|
||||
* e820__reserve_resources_late(); as a result, any emulated persistent
|
||||
* memory of E820_TYPE_PRAM (12) via the kernel parameter
|
||||
* memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be
|
||||
* detected by register_e820_pmem(). Fix this by directly calling
|
||||
* e820__reserve_resources_late() here: e820__reserve_resources_late()
|
||||
* depends on e820__reserve_resources(), which has been called earlier
|
||||
* from setup_arch(). Note: e820__reserve_resources_late() also adds
|
||||
* any memory of E820_TYPE_PMEM (7) into iomem_resource, and
|
||||
* acpi_nfit_register_region() -> acpi_nfit_insert_resource() ->
|
||||
* region_intersects() returns REGION_INTERSECTS, so the memory of
|
||||
* E820_TYPE_PMEM won't get added twice.
|
||||
*
|
||||
* We return 0 here so that pci_arch_init() won't print the warning:
|
||||
* "PCI: Fatal: No config space access function found"
|
||||
*/
|
||||
if (gen2vm)
|
||||
if (gen2vm) {
|
||||
e820__reserve_resources_late();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* For Generation-1 VM, we'll proceed in pci_arch_init(). */
|
||||
return 1;
|
||||
|
@ -16,6 +16,9 @@
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
|
||||
#ifdef CONFIG_ACPI_APEI
|
||||
# include <asm/pgtable_types.h>
|
||||
@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
|
||||
if (!cpu_has(c, X86_FEATURE_MWAIT) ||
|
||||
boot_option_idle_override == IDLE_NOMWAIT)
|
||||
*cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
|
||||
|
||||
if (xen_initial_domain()) {
|
||||
/*
|
||||
* When Linux is running as Xen dom0, the hypervisor is the
|
||||
* entity in charge of the processor power management, and so
|
||||
* Xen needs to check the OS capabilities reported in the
|
||||
* processor capabilities buffer matches what the hypervisor
|
||||
* driver supports.
|
||||
*/
|
||||
xen_sanitize_proc_cap_bits(cap);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool acpi_has_cpu_in_madt(void)
|
||||
|
@ -100,4 +100,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
|
||||
|
||||
enum xen_lazy_mode xen_get_lazy_mode(void);
|
||||
|
||||
#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI)
|
||||
void xen_sanitize_proc_cap_bits(uint32_t *buf);
|
||||
#else
|
||||
static inline void xen_sanitize_proc_cap_bits(uint32_t *buf)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
@ -63,6 +63,7 @@ int acpi_fix_pin2_polarity __initdata;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
|
||||
static bool has_lapic_cpus __initdata;
|
||||
static bool acpi_support_online_capable;
|
||||
#endif
|
||||
|
||||
@ -232,6 +233,14 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
|
||||
if (!acpi_is_processor_usable(processor->lapic_flags))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* According to https://uefi.org/specs/ACPI/6.5/05_ACPI_Software_Programming_Model.html#processor-local-x2apic-structure
|
||||
* when MADT provides both valid LAPIC and x2APIC entries, the APIC ID
|
||||
* in x2APIC must be equal or greater than 0xff.
|
||||
*/
|
||||
if (has_lapic_cpus && apic_id < 0xff)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We need to register disabled CPU as well to permit
|
||||
* counting disabled CPUs. This allows us to size
|
||||
@ -1114,10 +1123,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
|
||||
|
||||
static int __init acpi_parse_madt_lapic_entries(void)
|
||||
{
|
||||
int count;
|
||||
int x2count = 0;
|
||||
int ret;
|
||||
struct acpi_subtable_proc madt_proc[2];
|
||||
int count, x2count = 0;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC))
|
||||
return -ENODEV;
|
||||
@ -1126,21 +1132,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
|
||||
acpi_parse_sapic, MAX_LOCAL_APIC);
|
||||
|
||||
if (!count) {
|
||||
memset(madt_proc, 0, sizeof(madt_proc));
|
||||
madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
|
||||
madt_proc[0].handler = acpi_parse_lapic;
|
||||
madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
|
||||
madt_proc[1].handler = acpi_parse_x2apic;
|
||||
ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
|
||||
sizeof(struct acpi_table_madt),
|
||||
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
|
||||
if (ret < 0) {
|
||||
pr_err("Error parsing LAPIC/X2APIC entries\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
count = madt_proc[0].count;
|
||||
x2count = madt_proc[1].count;
|
||||
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
|
||||
acpi_parse_lapic, MAX_LOCAL_APIC);
|
||||
has_lapic_cpus = count > 0;
|
||||
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
|
||||
acpi_parse_x2apic, MAX_LOCAL_APIC);
|
||||
}
|
||||
if (!count && !x2count) {
|
||||
pr_err("No LAPIC entries present\n");
|
||||
|
@ -104,8 +104,6 @@ struct cont_desc {
|
||||
size_t size;
|
||||
};
|
||||
|
||||
static u32 ucode_new_rev;
|
||||
|
||||
/*
|
||||
* Microcode patch container file is prepended to the initrd in cpio
|
||||
* format. See Documentation/arch/x86/microcode.rst
|
||||
@ -442,12 +440,11 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
|
||||
*
|
||||
* Returns true if container found (sets @desc), false otherwise.
|
||||
*/
|
||||
static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
|
||||
static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size)
|
||||
{
|
||||
struct cont_desc desc = { 0 };
|
||||
struct microcode_amd *mc;
|
||||
bool ret = false;
|
||||
u32 rev, dummy;
|
||||
|
||||
desc.cpuid_1_eax = cpuid_1_eax;
|
||||
|
||||
@ -457,22 +454,15 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
|
||||
if (!mc)
|
||||
return ret;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
*/
|
||||
if (rev > mc->hdr.patch_id)
|
||||
if (old_rev > mc->hdr.patch_id)
|
||||
return ret;
|
||||
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
ucode_new_rev = mc->hdr.patch_id;
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return !__apply_microcode_amd(mc);
|
||||
}
|
||||
|
||||
static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
|
||||
@ -506,9 +496,12 @@ static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpi
|
||||
*ret = cp;
|
||||
}
|
||||
|
||||
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
|
||||
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
|
||||
{
|
||||
struct cpio_data cp = { };
|
||||
u32 dummy;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
|
||||
|
||||
/* Needed in load_microcode_amd() */
|
||||
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
|
||||
@ -517,7 +510,8 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
|
||||
if (!(cp.data && cp.size))
|
||||
return;
|
||||
|
||||
early_apply_microcode(cpuid_1_eax, cp.data, cp.size);
|
||||
if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size))
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
|
||||
}
|
||||
|
||||
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||
@ -625,10 +619,8 @@ void reload_ucode_amd(unsigned int cpu)
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
if (rev < mc->hdr.patch_id) {
|
||||
if (!__apply_microcode_amd(mc)) {
|
||||
ucode_new_rev = mc->hdr.patch_id;
|
||||
pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
|
||||
}
|
||||
if (!__apply_microcode_amd(mc))
|
||||
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -649,8 +641,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
||||
if (p && (p->patch_id == csig->rev))
|
||||
uci->mc = p->data;
|
||||
|
||||
pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -691,8 +681,6 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
||||
rev = mc_amd->hdr.patch_id;
|
||||
ret = UCODE_UPDATED;
|
||||
|
||||
pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
|
||||
|
||||
out:
|
||||
uci->cpu_sig.rev = rev;
|
||||
c->microcode = rev;
|
||||
@ -935,11 +923,6 @@ struct microcode_ops * __init init_amd_microcode(void)
|
||||
pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (ucode_new_rev)
|
||||
pr_info_once("microcode updated early to new patch_level=0x%08x\n",
|
||||
ucode_new_rev);
|
||||
|
||||
return µcode_amd_ops;
|
||||
}
|
||||
|
||||
|
@ -41,8 +41,6 @@
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#define DRIVER_VERSION "2.2"
|
||||
|
||||
static struct microcode_ops *microcode_ops;
|
||||
bool dis_ucode_ldr = true;
|
||||
|
||||
@ -77,6 +75,8 @@ static u32 final_levels[] = {
|
||||
0, /* T-101 terminator */
|
||||
};
|
||||
|
||||
struct early_load_data early_data;
|
||||
|
||||
/*
|
||||
* Check the current patch level on this CPU.
|
||||
*
|
||||
@ -155,9 +155,9 @@ void __init load_ucode_bsp(void)
|
||||
return;
|
||||
|
||||
if (intel)
|
||||
load_ucode_intel_bsp();
|
||||
load_ucode_intel_bsp(&early_data);
|
||||
else
|
||||
load_ucode_amd_bsp(cpuid_1_eax);
|
||||
load_ucode_amd_bsp(&early_data, cpuid_1_eax);
|
||||
}
|
||||
|
||||
void load_ucode_ap(void)
|
||||
@ -828,6 +828,11 @@ static int __init microcode_init(void)
|
||||
if (!microcode_ops)
|
||||
return -ENODEV;
|
||||
|
||||
pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev));
|
||||
|
||||
if (early_data.new_rev)
|
||||
pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev);
|
||||
|
||||
microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0);
|
||||
if (IS_ERR(microcode_pdev))
|
||||
return PTR_ERR(microcode_pdev);
|
||||
@ -846,8 +851,6 @@ static int __init microcode_init(void)
|
||||
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
|
||||
mc_cpu_online, mc_cpu_down_prep);
|
||||
|
||||
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
|
||||
|
||||
return 0;
|
||||
|
||||
out_pdev:
|
||||
|
@ -339,16 +339,9 @@ static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci,
|
||||
static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci)
|
||||
{
|
||||
struct microcode_intel *mc = uci->mc;
|
||||
enum ucode_state ret;
|
||||
u32 cur_rev, date;
|
||||
u32 cur_rev;
|
||||
|
||||
ret = __apply_microcode(uci, mc, &cur_rev);
|
||||
if (ret == UCODE_UPDATED) {
|
||||
date = mc->hdr.date;
|
||||
pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n",
|
||||
cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff);
|
||||
}
|
||||
return ret;
|
||||
return __apply_microcode(uci, mc, &cur_rev);
|
||||
}
|
||||
|
||||
static __init bool load_builtin_intel_microcode(struct cpio_data *cp)
|
||||
@ -413,13 +406,17 @@ static int __init save_builtin_microcode(void)
|
||||
early_initcall(save_builtin_microcode);
|
||||
|
||||
/* Load microcode on BSP from initrd or builtin blobs */
|
||||
void __init load_ucode_intel_bsp(void)
|
||||
void __init load_ucode_intel_bsp(struct early_load_data *ed)
|
||||
{
|
||||
struct ucode_cpu_info uci;
|
||||
|
||||
ed->old_rev = intel_get_microcode_revision();
|
||||
|
||||
uci.mc = get_microcode_blob(&uci, false);
|
||||
if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED)
|
||||
ucode_patch_va = UCODE_BSP_LOADED;
|
||||
|
||||
ed->new_rev = uci.cpu_sig.rev;
|
||||
}
|
||||
|
||||
void load_ucode_intel_ap(void)
|
||||
|
@ -37,6 +37,12 @@ struct microcode_ops {
|
||||
use_nmi : 1;
|
||||
};
|
||||
|
||||
struct early_load_data {
|
||||
u32 old_rev;
|
||||
u32 new_rev;
|
||||
};
|
||||
|
||||
extern struct early_load_data early_data;
|
||||
extern struct ucode_cpu_info ucode_cpu_info[];
|
||||
struct cpio_data find_microcode_in_initrd(const char *path);
|
||||
|
||||
@ -92,14 +98,14 @@ extern bool dis_ucode_ldr;
|
||||
extern bool force_minrev;
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
void load_ucode_amd_bsp(unsigned int family);
|
||||
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
|
||||
void load_ucode_amd_ap(unsigned int family);
|
||||
int save_microcode_in_initrd_amd(unsigned int family);
|
||||
void reload_ucode_amd(unsigned int cpu);
|
||||
struct microcode_ops *init_amd_microcode(void);
|
||||
void exit_amd_microcode(void);
|
||||
#else /* CONFIG_CPU_SUP_AMD */
|
||||
static inline void load_ucode_amd_bsp(unsigned int family) { }
|
||||
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
|
||||
static inline void load_ucode_amd_ap(unsigned int family) { }
|
||||
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
||||
static inline void reload_ucode_amd(unsigned int cpu) { }
|
||||
@ -108,12 +114,12 @@ static inline void exit_amd_microcode(void) { }
|
||||
#endif /* !CONFIG_CPU_SUP_AMD */
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
void load_ucode_intel_bsp(void);
|
||||
void load_ucode_intel_bsp(struct early_load_data *ed);
|
||||
void load_ucode_intel_ap(void);
|
||||
void reload_ucode_intel(void);
|
||||
struct microcode_ops *init_intel_microcode(void);
|
||||
#else /* CONFIG_CPU_SUP_INTEL */
|
||||
static inline void load_ucode_intel_bsp(void) { }
|
||||
static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
|
||||
static inline void load_ucode_intel_ap(void) { }
|
||||
static inline void reload_ucode_intel(void) { }
|
||||
static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
|
||||
|
@ -262,11 +262,14 @@ static uint32_t __init ms_hyperv_platform(void)
|
||||
static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
|
||||
{
|
||||
static atomic_t nmi_cpu = ATOMIC_INIT(-1);
|
||||
unsigned int old_cpu, this_cpu;
|
||||
|
||||
if (!unknown_nmi_panic)
|
||||
return NMI_DONE;
|
||||
|
||||
if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
|
||||
old_cpu = -1;
|
||||
this_cpu = raw_smp_processor_id();
|
||||
if (!atomic_try_cmpxchg(&nmi_cpu, &old_cpu, this_cpu))
|
||||
return NMI_HANDLED;
|
||||
|
||||
return NMI_DONE;
|
||||
|
@ -175,9 +175,6 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
|
||||
frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
|
||||
uc_flags = frame_uc_flags(regs);
|
||||
|
||||
if (setup_signal_shadow_stack(ksig))
|
||||
return -EFAULT;
|
||||
|
||||
if (!user_access_begin(frame, sizeof(*frame)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -198,6 +195,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (setup_signal_shadow_stack(ksig))
|
||||
return -EFAULT;
|
||||
|
||||
/* Set up registers for signal handler */
|
||||
regs->di = ksig->sig;
|
||||
/* In case the signal handler was declared without prototypes */
|
||||
|
@ -425,6 +425,8 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
|
||||
|
||||
void bdev_add(struct block_device *bdev, dev_t dev)
|
||||
{
|
||||
if (bdev_stable_writes(bdev))
|
||||
mapping_set_stable_writes(bdev->bd_inode->i_mapping);
|
||||
bdev->bd_dev = dev;
|
||||
bdev->bd_inode->i_rdev = dev;
|
||||
bdev->bd_inode->i_ino = dev;
|
||||
|
@ -577,6 +577,7 @@ static void blkg_destroy_all(struct gendisk *disk)
|
||||
struct request_queue *q = disk->queue;
|
||||
struct blkcg_gq *blkg, *n;
|
||||
int count = BLKG_DESTROY_BATCH_SIZE;
|
||||
int i;
|
||||
|
||||
restart:
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
@ -602,6 +603,18 @@ restart:
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark policy deactivated since policy offline has been done, and
|
||||
* the free is scheduled, so future blkcg_deactivate_policy() can
|
||||
* be bypassed
|
||||
*/
|
||||
for (i = 0; i < BLKCG_MAX_POLS; i++) {
|
||||
struct blkcg_policy *pol = blkcg_policy[i];
|
||||
|
||||
if (pol)
|
||||
__clear_bit(pol->plid, q->blkcg_pols);
|
||||
}
|
||||
|
||||
q->root_blkg = NULL;
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
}
|
||||
|
@ -249,8 +249,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
|
||||
{
|
||||
struct blkcg_gq *blkg;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
if (blkcg == &blkcg_root)
|
||||
return q->root_blkg;
|
||||
|
||||
|
@ -2858,11 +2858,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
||||
};
|
||||
struct request *rq;
|
||||
|
||||
if (unlikely(bio_queue_enter(bio)))
|
||||
return NULL;
|
||||
|
||||
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
|
||||
goto queue_exit;
|
||||
return NULL;
|
||||
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
@ -2878,35 +2875,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
||||
rq_qos_cleanup(q, bio);
|
||||
if (bio->bi_opf & REQ_NOWAIT)
|
||||
bio_wouldblock_error(bio);
|
||||
queue_exit:
|
||||
blk_queue_exit(q);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
||||
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
|
||||
/* return true if this @rq can be used for @bio */
|
||||
static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct request *rq;
|
||||
enum hctx_type type, hctx_type;
|
||||
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
|
||||
enum hctx_type hctx_type = rq->mq_hctx->type;
|
||||
|
||||
if (!plug)
|
||||
return NULL;
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
if (!rq || rq->q != q)
|
||||
return NULL;
|
||||
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
|
||||
|
||||
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
|
||||
*bio = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
type = blk_mq_get_hctx_type((*bio)->bi_opf);
|
||||
hctx_type = rq->mq_hctx->type;
|
||||
if (type != hctx_type &&
|
||||
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
|
||||
return NULL;
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
|
||||
return NULL;
|
||||
return false;
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If any qos ->throttle() end up blocking, we will have flushed the
|
||||
@ -2914,12 +2899,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
|
||||
* before we throttle.
|
||||
*/
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
rq_qos_throttle(q, *bio);
|
||||
rq_qos_throttle(rq->q, bio);
|
||||
|
||||
blk_mq_rq_time_init(rq, 0);
|
||||
rq->cmd_flags = (*bio)->bi_opf;
|
||||
rq->cmd_flags = bio->bi_opf;
|
||||
INIT_LIST_HEAD(&rq->queuelist);
|
||||
return rq;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void bio_set_ioprio(struct bio *bio)
|
||||
@ -2949,7 +2934,7 @@ void blk_mq_submit_bio(struct bio *bio)
|
||||
struct blk_plug *plug = blk_mq_plug(bio);
|
||||
const int is_sync = op_is_sync(bio->bi_opf);
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct request *rq;
|
||||
struct request *rq = NULL;
|
||||
unsigned int nr_segs = 1;
|
||||
blk_status_t ret;
|
||||
|
||||
@ -2960,20 +2945,36 @@ void blk_mq_submit_bio(struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!bio_integrity_prep(bio))
|
||||
return;
|
||||
|
||||
bio_set_ioprio(bio);
|
||||
|
||||
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
|
||||
if (!rq) {
|
||||
if (!bio)
|
||||
if (plug) {
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
if (rq && rq->q != q)
|
||||
rq = NULL;
|
||||
}
|
||||
if (rq) {
|
||||
if (!bio_integrity_prep(bio))
|
||||
return;
|
||||
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
|
||||
if (unlikely(!rq))
|
||||
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
|
||||
return;
|
||||
if (blk_mq_can_use_cached_rq(rq, plug, bio))
|
||||
goto done;
|
||||
percpu_ref_get(&q->q_usage_counter);
|
||||
} else {
|
||||
if (unlikely(bio_queue_enter(bio)))
|
||||
return;
|
||||
if (!bio_integrity_prep(bio))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
|
||||
if (unlikely(!rq)) {
|
||||
fail:
|
||||
blk_queue_exit(q);
|
||||
return;
|
||||
}
|
||||
|
||||
done:
|
||||
trace_block_getrq(bio);
|
||||
|
||||
rq_qos_track(q, rq, bio);
|
||||
|
@ -163,38 +163,15 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
|
||||
* @q: the queue of the device
|
||||
*
|
||||
* Description:
|
||||
* For historical reasons, this routine merely calls blk_set_runtime_active()
|
||||
* to do the real work of restarting the queue. It does this regardless of
|
||||
* whether the device's runtime-resume succeeded; even if it failed the
|
||||
* Restart the queue of a runtime suspended device. It does this regardless
|
||||
* of whether the device's runtime-resume succeeded; even if it failed the
|
||||
* driver or error handler will need to communicate with the device.
|
||||
*
|
||||
* This function should be called near the end of the device's
|
||||
* runtime_resume callback.
|
||||
* runtime_resume callback to correct queue runtime PM status and re-enable
|
||||
* peeking requests from the queue.
|
||||
*/
|
||||
void blk_post_runtime_resume(struct request_queue *q)
|
||||
{
|
||||
blk_set_runtime_active(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_post_runtime_resume);
|
||||
|
||||
/**
|
||||
* blk_set_runtime_active - Force runtime status of the queue to be active
|
||||
* @q: the queue of the device
|
||||
*
|
||||
* If the device is left runtime suspended during system suspend the resume
|
||||
* hook typically resumes the device and corrects runtime status
|
||||
* accordingly. However, that does not affect the queue runtime PM status
|
||||
* which is still "suspended". This prevents processing requests from the
|
||||
* queue.
|
||||
*
|
||||
* This function can be used in driver's resume hook to correct queue
|
||||
* runtime PM status and re-enable peeking requests from the queue. It
|
||||
* should be called before first request is added to the queue.
|
||||
*
|
||||
* This function is also called by blk_post_runtime_resume() for
|
||||
* runtime resumes. It does everything necessary to restart the queue.
|
||||
*/
|
||||
void blk_set_runtime_active(struct request_queue *q)
|
||||
{
|
||||
int old_status;
|
||||
|
||||
@ -211,4 +188,4 @@ void blk_set_runtime_active(struct request_queue *q)
|
||||
if (old_status != RPM_ACTIVE)
|
||||
blk_clear_pm_only(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_runtime_active);
|
||||
EXPORT_SYMBOL(blk_post_runtime_resume);
|
||||
|
@ -1320,6 +1320,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
|
||||
tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
|
||||
tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
|
||||
|
||||
rcu_read_lock();
|
||||
/*
|
||||
* Update has_rules[] flags for the updated tg's subtree. A tg is
|
||||
* considered to have rules if either the tg itself or any of its
|
||||
@ -1347,6 +1348,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
|
||||
this_tg->latency_target = max(this_tg->latency_target,
|
||||
parent_tg->latency_target);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* We're already holding queue_lock and know @tg is valid. Let's
|
||||
|
@ -502,6 +502,16 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_boot_dpu_active_drive(vdev, false);
|
||||
ivpu_boot_pwr_island_isolation_drive(vdev, true);
|
||||
ivpu_boot_pwr_island_trickle_drive(vdev, false);
|
||||
ivpu_boot_pwr_island_drive(vdev, false);
|
||||
|
||||
return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
|
||||
}
|
||||
|
||||
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
|
||||
@ -600,25 +610,17 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
|
||||
|
||||
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
int ret = 0;
|
||||
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return 0;
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
|
||||
return ret;
|
||||
if (ivpu_boot_pwr_domain_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable power domain\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
|
||||
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
|
||||
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
|
||||
if (ivpu_pll_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable PLL\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -651,10 +653,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ivpu_hw_37xx_reset(vdev);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
|
||||
|
||||
ret = ivpu_hw_37xx_d0i3_disable(vdev);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
|
||||
@ -722,11 +720,11 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
|
||||
ivpu_err(vdev, "Failed to reset the VPU\n");
|
||||
if (!ivpu_hw_37xx_is_idle(vdev))
|
||||
ivpu_warn(vdev, "VPU not idle during power down\n");
|
||||
|
||||
if (ivpu_pll_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable PLL\n");
|
||||
if (ivpu_hw_37xx_reset(vdev)) {
|
||||
ivpu_err(vdev, "Failed to reset VPU\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
|
@ -250,9 +250,6 @@ int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n",
|
||||
atomic_read(&vdev->drm.dev->power.usage_count));
|
||||
|
||||
ret = pm_runtime_get_if_active(vdev->drm.dev, false);
|
||||
drm_WARN_ON(&vdev->drm, ret < 0);
|
||||
|
||||
|
@ -2031,7 +2031,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
|
||||
* HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
|
||||
* evaluated to have functional panel brightness control.
|
||||
*/
|
||||
acpi_device_fix_up_power_extended(device);
|
||||
acpi_device_fix_up_power_children(device);
|
||||
|
||||
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
|
||||
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
|
||||
|
@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
|
||||
|
||||
/**
|
||||
* acpi_device_fix_up_power_children - Force a device's children into D0.
|
||||
* @adev: Parent device object whose children's power state is to be fixed up.
|
||||
*
|
||||
* Call acpi_device_fix_up_power() for @adev's children so long as they
|
||||
* are reported as present and enabled.
|
||||
*/
|
||||
void acpi_device_fix_up_power_children(struct acpi_device *adev)
|
||||
{
|
||||
acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
|
||||
|
||||
int acpi_device_update_power(struct acpi_device *device, int *state_p)
|
||||
{
|
||||
int state;
|
||||
|
@ -592,7 +592,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
|
||||
while (1) {
|
||||
|
||||
if (cx->entry_method == ACPI_CSTATE_HALT)
|
||||
safe_halt();
|
||||
raw_safe_halt();
|
||||
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
|
||||
io_idle(cx->address);
|
||||
} else
|
||||
|
@ -447,6 +447,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1402CVA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1502CBA */
|
||||
.matches = {
|
||||
|
@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
|
||||
if (pnp_port_valid(idev, 1)) {
|
||||
ctl_addr = devm_ioport_map(&idev->dev,
|
||||
pnp_port_start(idev, 1), 1);
|
||||
if (!ctl_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
ap->ioaddr.altstatus_addr = ctl_addr;
|
||||
ap->ioaddr.ctl_addr = ctl_addr;
|
||||
ap->ops = &isapnp_port_ops;
|
||||
|
@ -67,6 +67,7 @@ struct nbd_sock {
|
||||
struct recv_thread_args {
|
||||
struct work_struct work;
|
||||
struct nbd_device *nbd;
|
||||
struct nbd_sock *nsock;
|
||||
int index;
|
||||
};
|
||||
|
||||
@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
|
||||
}
|
||||
}
|
||||
|
||||
static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
|
||||
{
|
||||
if (refcount_inc_not_zero(&nbd->config_refs)) {
|
||||
/*
|
||||
* Add smp_mb__after_atomic to ensure that reading nbd->config_refs
|
||||
* and reading nbd->config is ordered. The pair is the barrier in
|
||||
* nbd_alloc_and_init_config(), avoid nbd->config_refs is set
|
||||
* before nbd->config.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
return nbd->config;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
|
||||
{
|
||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
|
||||
@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
||||
if (!refcount_inc_not_zero(&nbd->config_refs)) {
|
||||
config = nbd_get_config_unlocked(nbd);
|
||||
if (!config) {
|
||||
cmd->status = BLK_STS_TIMEOUT;
|
||||
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
|
||||
mutex_unlock(&cmd->lock);
|
||||
goto done;
|
||||
}
|
||||
config = nbd->config;
|
||||
|
||||
if (config->num_connections > 1 ||
|
||||
(config->num_connections == 1 && nbd->tag_set.timeout)) {
|
||||
@ -489,15 +506,9 @@ done:
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send or receive packet. Return a positive value on success and
|
||||
* negtive value on failue, and never return 0.
|
||||
*/
|
||||
static int sock_xmit(struct nbd_device *nbd, int index, int send,
|
||||
struct iov_iter *iter, int msg_flags, int *sent)
|
||||
static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
|
||||
struct iov_iter *iter, int msg_flags, int *sent)
|
||||
{
|
||||
struct nbd_config *config = nbd->config;
|
||||
struct socket *sock = config->socks[index]->sock;
|
||||
int result;
|
||||
struct msghdr msg;
|
||||
unsigned int noreclaim_flag;
|
||||
@ -540,6 +551,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send or receive packet. Return a positive value on success and
|
||||
* negtive value on failure, and never return 0.
|
||||
*/
|
||||
static int sock_xmit(struct nbd_device *nbd, int index, int send,
|
||||
struct iov_iter *iter, int msg_flags, int *sent)
|
||||
{
|
||||
struct nbd_config *config = nbd->config;
|
||||
struct socket *sock = config->socks[index]->sock;
|
||||
|
||||
return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
|
||||
}
|
||||
|
||||
/*
|
||||
* Different settings for sk->sk_sndtimeo can result in different return values
|
||||
* if there is a signal pending when we enter sendmsg, because reasons?
|
||||
@ -696,7 +720,7 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nbd_read_reply(struct nbd_device *nbd, int index,
|
||||
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
|
||||
struct nbd_reply *reply)
|
||||
{
|
||||
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
|
||||
@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
|
||||
|
||||
reply->magic = 0;
|
||||
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
|
||||
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
|
||||
result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
|
||||
if (result < 0) {
|
||||
if (!nbd_disconnected(nbd->config))
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
|
||||
struct nbd_device *nbd = args->nbd;
|
||||
struct nbd_config *config = nbd->config;
|
||||
struct request_queue *q = nbd->disk->queue;
|
||||
struct nbd_sock *nsock;
|
||||
struct nbd_sock *nsock = args->nsock;
|
||||
struct nbd_cmd *cmd;
|
||||
struct request *rq;
|
||||
|
||||
while (1) {
|
||||
struct nbd_reply reply;
|
||||
|
||||
if (nbd_read_reply(nbd, args->index, &reply))
|
||||
if (nbd_read_reply(nbd, nsock->sock, &reply))
|
||||
break;
|
||||
|
||||
/*
|
||||
@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
|
||||
percpu_ref_put(&q->q_usage_counter);
|
||||
}
|
||||
|
||||
nsock = config->socks[args->index];
|
||||
mutex_lock(&nsock->tx_lock);
|
||||
nbd_mark_nsock_dead(nbd, nsock, 1);
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||
struct nbd_sock *nsock;
|
||||
int ret;
|
||||
|
||||
if (!refcount_inc_not_zero(&nbd->config_refs)) {
|
||||
config = nbd_get_config_unlocked(nbd);
|
||||
if (!config) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Socks array is empty\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
config = nbd->config;
|
||||
|
||||
if (index >= config->num_connections) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
|
||||
INIT_WORK(&args->work, recv_work);
|
||||
args->index = i;
|
||||
args->nbd = nbd;
|
||||
args->nsock = nsock;
|
||||
nsock->cookie++;
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
sockfd_put(old);
|
||||
@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
|
||||
refcount_inc(&nbd->config_refs);
|
||||
INIT_WORK(&args->work, recv_work);
|
||||
args->nbd = nbd;
|
||||
args->nsock = config->socks[i];
|
||||
args->index = i;
|
||||
queue_work(nbd->recv_workq, &args->work);
|
||||
}
|
||||
@ -1530,17 +1555,20 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct nbd_config *nbd_alloc_config(void)
|
||||
static int nbd_alloc_and_init_config(struct nbd_device *nbd)
|
||||
{
|
||||
struct nbd_config *config;
|
||||
|
||||
if (WARN_ON(nbd->config))
|
||||
return -EINVAL;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return ERR_PTR(-ENODEV);
|
||||
return -ENODEV;
|
||||
|
||||
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
|
||||
if (!config) {
|
||||
module_put(THIS_MODULE);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
atomic_set(&config->recv_threads, 0);
|
||||
@ -1548,12 +1576,24 @@ static struct nbd_config *nbd_alloc_config(void)
|
||||
init_waitqueue_head(&config->conn_wait);
|
||||
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
|
||||
atomic_set(&config->live_connections, 0);
|
||||
return config;
|
||||
|
||||
nbd->config = config;
|
||||
/*
|
||||
* Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
|
||||
* its pair is the barrier in nbd_get_config_unlocked().
|
||||
* So nbd_get_config_unlocked() won't see nbd->config as null after
|
||||
* refcount_inc_not_zero() succeed.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nbd_open(struct gendisk *disk, blk_mode_t mode)
|
||||
{
|
||||
struct nbd_device *nbd;
|
||||
struct nbd_config *config;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&nbd_index_mutex);
|
||||
@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
if (!refcount_inc_not_zero(&nbd->config_refs)) {
|
||||
struct nbd_config *config;
|
||||
|
||||
config = nbd_get_config_unlocked(nbd);
|
||||
if (!config) {
|
||||
mutex_lock(&nbd->config_lock);
|
||||
if (refcount_inc_not_zero(&nbd->config_refs)) {
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
goto out;
|
||||
}
|
||||
config = nbd_alloc_config();
|
||||
if (IS_ERR(config)) {
|
||||
ret = PTR_ERR(config);
|
||||
ret = nbd_alloc_and_init_config(nbd);
|
||||
if (ret) {
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
goto out;
|
||||
}
|
||||
nbd->config = config;
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
|
||||
refcount_inc(&nbd->refs);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
if (max_part)
|
||||
set_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
} else if (nbd_disconnected(nbd->config)) {
|
||||
} else if (nbd_disconnected(config)) {
|
||||
if (max_part)
|
||||
set_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
}
|
||||
@ -1990,22 +2028,17 @@ again:
|
||||
pr_err("nbd%d already in use\n", index);
|
||||
return -EBUSY;
|
||||
}
|
||||
if (WARN_ON(nbd->config)) {
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
nbd_put(nbd);
|
||||
return -EINVAL;
|
||||
}
|
||||
config = nbd_alloc_config();
|
||||
if (IS_ERR(config)) {
|
||||
|
||||
ret = nbd_alloc_and_init_config(nbd);
|
||||
if (ret) {
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
nbd_put(nbd);
|
||||
pr_err("couldn't allocate config\n");
|
||||
return PTR_ERR(config);
|
||||
return ret;
|
||||
}
|
||||
nbd->config = config;
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
set_bit(NBD_RT_BOUND, &config->runtime_flags);
|
||||
|
||||
config = nbd->config;
|
||||
set_bit(NBD_RT_BOUND, &config->runtime_flags);
|
||||
ret = nbd_genl_size_set(info, nbd);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -2208,7 +2241,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
mutex_unlock(&nbd_index_mutex);
|
||||
|
||||
if (!refcount_inc_not_zero(&nbd->config_refs)) {
|
||||
config = nbd_get_config_unlocked(nbd);
|
||||
if (!config) {
|
||||
dev_err(nbd_to_dev(nbd),
|
||||
"not configured, cannot reconfigure\n");
|
||||
nbd_put(nbd);
|
||||
@ -2216,7 +2250,6 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
mutex_lock(&nbd->config_lock);
|
||||
config = nbd->config;
|
||||
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
|
||||
!nbd->pid) {
|
||||
dev_err(nbd_to_dev(nbd),
|
||||
|
@ -1464,19 +1464,13 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
|
||||
sector_t nr_sectors, enum req_op op)
|
||||
static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
|
||||
sector_t nr_sectors, enum req_op op)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
struct nullb *nullb = dev->nullb;
|
||||
blk_status_t sts;
|
||||
|
||||
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
|
||||
sts = null_handle_throttled(cmd);
|
||||
if (sts != BLK_STS_OK)
|
||||
return sts;
|
||||
}
|
||||
|
||||
if (op == REQ_OP_FLUSH) {
|
||||
cmd->error = errno_to_blk_status(null_handle_flush(nullb));
|
||||
goto out;
|
||||
@ -1493,7 +1487,6 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
|
||||
|
||||
out:
|
||||
nullb_complete_cmd(cmd);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
|
||||
@ -1724,8 +1717,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
cmd->fake_timeout = should_timeout_request(rq) ||
|
||||
blk_should_fake_timeout(rq->q);
|
||||
|
||||
blk_mq_start_request(rq);
|
||||
|
||||
if (should_requeue_request(rq)) {
|
||||
/*
|
||||
* Alternate between hitting the core BUSY path, and the
|
||||
@ -1738,6 +1729,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
|
||||
blk_status_t sts = null_handle_throttled(cmd);
|
||||
|
||||
if (sts != BLK_STS_OK)
|
||||
return sts;
|
||||
}
|
||||
|
||||
blk_mq_start_request(rq);
|
||||
|
||||
if (is_poll) {
|
||||
spin_lock(&nq->poll_lock);
|
||||
list_add_tail(&rq->queuelist, &nq->poll_list);
|
||||
@ -1747,7 +1747,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
if (cmd->fake_timeout)
|
||||
return BLK_STS_OK;
|
||||
|
||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
|
||||
null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void null_queue_rqs(struct request **rqlist)
|
||||
|
@ -1093,9 +1093,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
return -ENOMEM;
|
||||
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
|
||||
DPLL_CMD_PIN_ID_GET);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(msg);
|
||||
return -EMSGSIZE;
|
||||
|
||||
}
|
||||
pin = dpll_pin_find_from_nlattr(info);
|
||||
if (!IS_ERR(pin)) {
|
||||
ret = dpll_msg_add_pin_handle(msg, pin);
|
||||
@ -1123,8 +1124,10 @@ int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
return -ENOMEM;
|
||||
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
|
||||
DPLL_CMD_PIN_GET);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(msg);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
ret = dpll_cmd_pin_get_one(msg, pin, info->extack);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
@ -1256,8 +1259,10 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
return -ENOMEM;
|
||||
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
|
||||
DPLL_CMD_DEVICE_ID_GET);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(msg);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
dpll = dpll_device_find_from_nlattr(info);
|
||||
if (!IS_ERR(dpll)) {
|
||||
@ -1284,8 +1289,10 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
return -ENOMEM;
|
||||
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
|
||||
DPLL_CMD_DEVICE_GET);
|
||||
if (!hdr)
|
||||
if (!hdr) {
|
||||
nlmsg_free(msg);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
ret = dpll_device_get_one(dpll, msg, info->extack);
|
||||
if (ret) {
|
||||
|
@ -131,7 +131,7 @@ config RASPBERRYPI_FIRMWARE
|
||||
|
||||
config FW_CFG_SYSFS
|
||||
tristate "QEMU fw_cfg device support in sysfs"
|
||||
depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86)
|
||||
depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || RISCV || SPARC || X86)
|
||||
depends on HAS_IOPORT_MAP
|
||||
default n
|
||||
help
|
||||
|
@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void)
|
||||
|
||||
/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
|
||||
#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
|
||||
# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
|
||||
# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV))
|
||||
# define FW_CFG_CTRL_OFF 0x08
|
||||
# define FW_CFG_DATA_OFF 0x00
|
||||
# define FW_CFG_DMA_OFF 0x10
|
||||
|
@ -248,6 +248,7 @@ extern int amdgpu_umsch_mm;
|
||||
extern int amdgpu_seamless;
|
||||
|
||||
extern int amdgpu_user_partt_mode;
|
||||
extern int amdgpu_agp;
|
||||
|
||||
#define AMDGPU_VM_MAX_NUM_CTX 4096
|
||||
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
|
||||
|
@ -207,7 +207,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
|
||||
for (i = 0; i < p->nchunks; i++) {
|
||||
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
|
||||
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
|
||||
struct drm_amdgpu_cs_chunk user_chunk;
|
||||
uint32_t __user *cdata;
|
||||
|
||||
|
@ -207,6 +207,7 @@ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
|
||||
int amdgpu_umsch_mm;
|
||||
int amdgpu_seamless = -1; /* auto */
|
||||
uint amdgpu_debug_mask;
|
||||
int amdgpu_agp = -1; /* auto */
|
||||
|
||||
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
|
||||
|
||||
@ -961,6 +962,15 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
|
||||
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
|
||||
module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: agp (int)
|
||||
* Enable the AGP aperture. This provides an aperture in the GPU's internal
|
||||
* address space for direct access to system memory. Note that these accesses
|
||||
* are non-snooped, so they are only used for access to uncached memory.
|
||||
*/
|
||||
MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
|
||||
module_param_named(agp, amdgpu_agp, int, 0444);
|
||||
|
||||
/* These devices are not supported by amdgpu.
|
||||
* They are supported by the mach64, r128, radeon drivers
|
||||
*/
|
||||
|
@ -1473,6 +1473,11 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
|
||||
topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
|
||||
topology->nodes[i].num_links : node_num_links;
|
||||
}
|
||||
/* popluate the connected port num info if supported and available */
|
||||
if (ta_port_num_support && topology->nodes[i].num_links) {
|
||||
memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
|
||||
sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
|
||||
}
|
||||
|
||||
/* reflect the topology information for bi-directionality */
|
||||
if (requires_reflection && topology->nodes[i].num_hops)
|
||||
|
@ -150,6 +150,7 @@ struct psp_xgmi_node_info {
|
||||
uint8_t is_sharing_enabled;
|
||||
enum ta_xgmi_assigned_sdma_engine sdma_engine;
|
||||
uint8_t num_links;
|
||||
struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM];
|
||||
};
|
||||
|
||||
struct psp_xgmi_topology_info {
|
||||
|
@ -1188,7 +1188,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
if (block_obj->hw_ops->query_ras_error_count)
|
||||
block_obj->hw_ops->query_ras_error_count(adev, &err_data);
|
||||
block_obj->hw_ops->query_ras_error_count(adev, err_data);
|
||||
|
||||
if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
|
||||
(info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
|
||||
|
@ -398,6 +398,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||
* amdgpu_uvd_entity_init - init entity
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring pointer to check
|
||||
*
|
||||
* Initialize the entity used for handle management in the kernel driver.
|
||||
*/
|
||||
|
@ -230,6 +230,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
|
||||
* amdgpu_vce_entity_init - init entity
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @ring: amdgpu_ring pointer to check
|
||||
*
|
||||
* Initialize the entity used for handle management in the kernel driver.
|
||||
*/
|
||||
|
@ -675,7 +675,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
amdgpu_gmc_set_agp_default(adev, mc);
|
||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
|
||||
amdgpu_gmc_agp_location(adev, mc);
|
||||
|
||||
/* base offset of vram pages */
|
||||
|
@ -640,8 +640,9 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
amdgpu_gmc_set_agp_default(adev, mc);
|
||||
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
|
||||
if (!amdgpu_sriov_vf(adev) ||
|
||||
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)))
|
||||
if (!amdgpu_sriov_vf(adev) &&
|
||||
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) &&
|
||||
(amdgpu_agp == 1))
|
||||
amdgpu_gmc_agp_location(adev, mc);
|
||||
|
||||
/* base offset of vram pages */
|
||||
|
@ -1630,7 +1630,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
} else {
|
||||
amdgpu_gmc_vram_location(adev, mc, base);
|
||||
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
|
||||
amdgpu_gmc_agp_location(adev, mc);
|
||||
}
|
||||
/* base offset of vram pages */
|
||||
@ -2170,8 +2170,6 @@ static int gmc_v9_0_sw_fini(void *handle)
|
||||
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
|
||||
amdgpu_gmc_sysfs_fini(adev);
|
||||
adev->gmc.num_mem_partitions = 0;
|
||||
kfree(adev->gmc.mem_partitions);
|
||||
|
||||
amdgpu_gmc_ras_fini(adev);
|
||||
amdgpu_gem_force_release(adev);
|
||||
@ -2185,6 +2183,9 @@ static int gmc_v9_0_sw_fini(void *handle)
|
||||
amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
|
||||
amdgpu_bo_fini(adev);
|
||||
|
||||
adev->gmc.num_mem_partitions = 0;
|
||||
kfree(adev->gmc.mem_partitions);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -130,6 +130,9 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
uint64_t value;
|
||||
int i;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
inst_mask = adev->aid_mask;
|
||||
for_each_inst(i, inst_mask) {
|
||||
/* Program the AGP BAR */
|
||||
@ -139,9 +142,6 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
|
||||
adev->gmc.agp_end >> 24);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||
|
@ -2079,7 +2079,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
struct dmub_srv_create_params create_params;
|
||||
struct dmub_srv_region_params region_params;
|
||||
struct dmub_srv_region_info region_info;
|
||||
struct dmub_srv_fb_params fb_params;
|
||||
struct dmub_srv_memory_params memory_params;
|
||||
struct dmub_srv_fb_info *fb_info;
|
||||
struct dmub_srv *dmub_srv;
|
||||
const struct dmcub_firmware_header_v1_0 *hdr;
|
||||
@ -2182,6 +2182,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
adev->dm.dmub_fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
||||
PSP_HEADER_BYTES;
|
||||
region_params.is_mailbox_in_inbox = false;
|
||||
|
||||
status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
|
||||
®ion_info);
|
||||
@ -2205,10 +2206,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
|
||||
/* Rebase the regions on the framebuffer address. */
|
||||
memset(&fb_params, 0, sizeof(fb_params));
|
||||
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
|
||||
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
|
||||
fb_params.region_info = ®ion_info;
|
||||
memset(&memory_params, 0, sizeof(memory_params));
|
||||
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
|
||||
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
|
||||
memory_params.region_info = ®ion_info;
|
||||
|
||||
adev->dm.dmub_fb_info =
|
||||
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
|
||||
@ -2220,7 +2221,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
|
||||
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
|
||||
return -EINVAL;
|
||||
@ -7481,6 +7482,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
int i;
|
||||
int result = -EIO;
|
||||
|
||||
if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
|
||||
return result;
|
||||
|
||||
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
|
||||
|
||||
if (!cmd.payloads)
|
||||
@ -9603,14 +9607,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
|
||||
struct drm_plane *other;
|
||||
struct drm_plane_state *old_other_state, *new_other_state;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
struct amdgpu_device *adev = drm_to_adev(plane->dev);
|
||||
int i;
|
||||
|
||||
/*
|
||||
* TODO: Remove this hack once the checks below are sufficient
|
||||
* enough to determine when we need to reset all the planes on
|
||||
* the stream.
|
||||
* TODO: Remove this hack for all asics once it proves that the
|
||||
* fast updates works fine on DCN3.2+.
|
||||
*/
|
||||
if (state->allow_modeset)
|
||||
if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
|
||||
return true;
|
||||
|
||||
/* Exit early if we know that we're adding or removing the plane. */
|
||||
|
@ -536,11 +536,8 @@ bool dm_helpers_dp_read_dpcd(
|
||||
|
||||
struct amdgpu_dm_connector *aconnector = link->priv;
|
||||
|
||||
if (!aconnector) {
|
||||
drm_dbg_dp(aconnector->base.dev,
|
||||
"Failed to find connector for link!\n");
|
||||
if (!aconnector)
|
||||
return false;
|
||||
}
|
||||
|
||||
return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
|
||||
size) == size;
|
||||
|
@ -1604,31 +1604,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
|
||||
unsigned int max_compressed_bw_in_kbps = 0;
|
||||
struct dc_dsc_bw_range bw_range = {0};
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
|
||||
|
||||
/*
|
||||
* check if the mode could be supported if DSC pass-through is supported
|
||||
* AND check if there enough bandwidth available to support the mode
|
||||
* with DSC enabled.
|
||||
* Consider the case with the depth of the mst topology tree is equal or less than 2
|
||||
* A. When dsc bitstream can be transmitted along the entire path
|
||||
* 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
|
||||
* 2. dsc passthrough supported at MST branch, or
|
||||
* 3. dsc decoding supported at leaf MST device
|
||||
* Use maximum dsc compression as bw constraint
|
||||
* B. When dsc bitstream cannot be transmitted along the entire path
|
||||
* Use native bw as bw constraint
|
||||
*/
|
||||
if (is_dsc_common_config_possible(stream, &bw_range) &&
|
||||
aconnector->mst_output_port->passthrough_aux) {
|
||||
mst_mgr = aconnector->mst_output_port->mgr;
|
||||
mutex_lock(&mst_mgr->lock);
|
||||
|
||||
(aconnector->mst_output_port->passthrough_aux ||
|
||||
aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
|
||||
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
|
||||
&cur_link_settings
|
||||
);
|
||||
down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
&cur_link_settings);
|
||||
down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
|
||||
|
||||
/* pick the bottleneck */
|
||||
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
|
||||
down_link_bw_in_kbps);
|
||||
|
||||
mutex_unlock(&mst_mgr->lock);
|
||||
|
||||
/*
|
||||
* use the maximum dsc compression bandwidth as the required
|
||||
* bandwidth for the mode
|
||||
@ -1643,8 +1643,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
||||
/* check if mode could be supported within full_pbn */
|
||||
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
|
||||
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
|
||||
|
||||
if (pbn > aconnector->mst_output_port->full_pbn)
|
||||
if (pbn > full_pbn)
|
||||
return DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
}
|
||||
|
||||
|
@ -820,22 +820,22 @@ static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
|
||||
|
||||
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
|
||||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
|
||||
val |= DMUB_IPS1_ALLOW_MASK;
|
||||
val |= DMUB_IPS2_ALLOW_MASK;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
|
||||
val = val & ~DMUB_IPS1_ALLOW_MASK;
|
||||
val = val & ~DMUB_IPS2_ALLOW_MASK;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
|
||||
val |= DMUB_IPS1_ALLOW_MASK;
|
||||
val = val & ~DMUB_IPS2_ALLOW_MASK;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
|
||||
val |= DMUB_IPS1_ALLOW_MASK;
|
||||
val |= DMUB_IPS2_ALLOW_MASK;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
|
||||
val = val & ~DMUB_IPS1_ALLOW_MASK;
|
||||
val |= DMUB_IPS2_ALLOW_MASK;
|
||||
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
|
||||
val = val & ~DMUB_IPS1_ALLOW_MASK;
|
||||
val = val & ~DMUB_IPS2_ALLOW_MASK;
|
||||
}
|
||||
|
||||
if (!allow_idle) {
|
||||
val = val & ~DMUB_IPS1_ALLOW_MASK;
|
||||
val = val & ~DMUB_IPS2_ALLOW_MASK;
|
||||
val |= DMUB_IPS1_ALLOW_MASK;
|
||||
val |= DMUB_IPS2_ALLOW_MASK;
|
||||
}
|
||||
|
||||
dcn35_smu_write_ips_scratch(clk_mgr, val);
|
||||
|
@ -3178,7 +3178,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
|
||||
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
|
||||
context->streams[i]);
|
||||
|
||||
if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
|
||||
if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
|
||||
resource_build_test_pattern_params(&context->res_ctx, otg_master);
|
||||
}
|
||||
}
|
||||
@ -4934,8 +4934,8 @@ bool dc_dmub_is_ips_idle_state(struct dc *dc)
|
||||
if (dc->hwss.get_idle_state)
|
||||
idle_state = dc->hwss.get_idle_state(dc);
|
||||
|
||||
if ((idle_state & DMUB_IPS1_ALLOW_MASK) ||
|
||||
(idle_state & DMUB_IPS2_ALLOW_MASK))
|
||||
if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
|
||||
!(idle_state & DMUB_IPS2_ALLOW_MASK))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -5190,6 +5190,9 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
|
||||
sec_next = sec_pipe->next_odm_pipe;
|
||||
sec_prev = sec_pipe->prev_odm_pipe;
|
||||
|
||||
if (pri_pipe == NULL)
|
||||
return false;
|
||||
|
||||
*sec_pipe = *pri_pipe;
|
||||
|
||||
sec_pipe->top_pipe = sec_top;
|
||||
|
@ -1202,11 +1202,11 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
allow_state = dc->hwss.get_idle_state(dc);
|
||||
dc->hwss.set_idle_state(dc, false);
|
||||
|
||||
if (allow_state & DMUB_IPS2_ALLOW_MASK) {
|
||||
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
|
||||
// Wait for evaluation time
|
||||
udelay(dc->debug.ips2_eval_delay_us);
|
||||
commit_state = dc->hwss.get_idle_state(dc);
|
||||
if (commit_state & DMUB_IPS2_COMMIT_MASK) {
|
||||
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
|
||||
// Tell PMFW to exit low power state
|
||||
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
|
||||
|
||||
@ -1216,7 +1216,7 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
|
||||
for (i = 0; i < max_num_polls; ++i) {
|
||||
commit_state = dc->hwss.get_idle_state(dc);
|
||||
if (!(commit_state & DMUB_IPS2_COMMIT_MASK))
|
||||
if (commit_state & DMUB_IPS2_COMMIT_MASK)
|
||||
break;
|
||||
|
||||
udelay(1);
|
||||
@ -1235,10 +1235,10 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
}
|
||||
|
||||
dc_dmub_srv_notify_idle(dc, false);
|
||||
if (allow_state & DMUB_IPS1_ALLOW_MASK) {
|
||||
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
|
||||
for (i = 0; i < max_num_polls; ++i) {
|
||||
commit_state = dc->hwss.get_idle_state(dc);
|
||||
if (!(commit_state & DMUB_IPS1_COMMIT_MASK))
|
||||
if (commit_state & DMUB_IPS1_COMMIT_MASK)
|
||||
break;
|
||||
|
||||
udelay(1);
|
||||
|
@ -177,6 +177,7 @@ struct dc_panel_patch {
|
||||
unsigned int disable_fams;
|
||||
unsigned int skip_avmute;
|
||||
unsigned int mst_start_top_delay;
|
||||
unsigned int remove_sink_ext_caps;
|
||||
};
|
||||
|
||||
struct dc_edid_caps {
|
||||
|
@ -261,12 +261,6 @@ static void enc35_stream_encoder_enable(
|
||||
/* invalid mode ! */
|
||||
ASSERT_CRITICAL(false);
|
||||
}
|
||||
|
||||
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
|
||||
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
|
||||
} else {
|
||||
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
|
||||
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -436,6 +430,8 @@ static void enc35_disable_fifo(struct stream_encoder *enc)
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
|
||||
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
|
||||
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
|
||||
}
|
||||
|
||||
static void enc35_enable_fifo(struct stream_encoder *enc)
|
||||
@ -443,6 +439,8 @@ static void enc35_enable_fifo(struct stream_encoder *enc)
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
|
||||
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
|
||||
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
|
||||
|
||||
enc35_reset_fifo(enc, true);
|
||||
enc35_reset_fifo(enc, false);
|
||||
|
@ -1088,6 +1088,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
||||
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
|
||||
link->ctx->dc->debug.hdmi20_disable = true;
|
||||
|
||||
if (sink->edid_caps.panel_patch.remove_sink_ext_caps)
|
||||
link->dpcd_sink_ext_caps.raw = 0;
|
||||
|
||||
if (dc_is_hdmi_signal(link->connector_signal))
|
||||
read_scdc_caps(link->ddc, link->local_sink);
|
||||
|
||||
|
@ -195,6 +195,7 @@ struct dmub_srv_region_params {
|
||||
uint32_t vbios_size;
|
||||
const uint8_t *fw_inst_const;
|
||||
const uint8_t *fw_bss_data;
|
||||
bool is_mailbox_in_inbox;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -214,20 +215,25 @@ struct dmub_srv_region_params {
|
||||
*/
|
||||
struct dmub_srv_region_info {
|
||||
uint32_t fb_size;
|
||||
uint32_t inbox_size;
|
||||
uint8_t num_regions;
|
||||
struct dmub_region regions[DMUB_WINDOW_TOTAL];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dmub_srv_fb_params - parameters used for driver fb setup
|
||||
* struct dmub_srv_memory_params - parameters used for driver fb setup
|
||||
* @region_info: region info calculated by dmub service
|
||||
* @cpu_addr: base cpu address for the framebuffer
|
||||
* @gpu_addr: base gpu virtual address for the framebuffer
|
||||
* @cpu_fb_addr: base cpu address for the framebuffer
|
||||
* @cpu_inbox_addr: base cpu address for the gart
|
||||
* @gpu_fb_addr: base gpu virtual address for the framebuffer
|
||||
* @gpu_inbox_addr: base gpu virtual address for the gart
|
||||
*/
|
||||
struct dmub_srv_fb_params {
|
||||
struct dmub_srv_memory_params {
|
||||
const struct dmub_srv_region_info *region_info;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
void *cpu_fb_addr;
|
||||
void *cpu_inbox_addr;
|
||||
uint64_t gpu_fb_addr;
|
||||
uint64_t gpu_inbox_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -563,8 +569,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
* DMUB_STATUS_OK - success
|
||||
* DMUB_STATUS_INVALID - unspecified error
|
||||
*/
|
||||
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_fb_params *params,
|
||||
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_memory_params *params,
|
||||
struct dmub_srv_fb_info *out);
|
||||
|
||||
/**
|
||||
|
@ -434,7 +434,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
|
||||
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
|
||||
uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
|
||||
|
||||
uint32_t previous_top = 0;
|
||||
if (!dmub->sw_init)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
@ -459,8 +459,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
bios->base = dmub_align(stack->top, 256);
|
||||
bios->top = bios->base + params->vbios_size;
|
||||
|
||||
mail->base = dmub_align(bios->top, 256);
|
||||
mail->top = mail->base + DMUB_MAILBOX_SIZE;
|
||||
if (params->is_mailbox_in_inbox) {
|
||||
mail->base = 0;
|
||||
mail->top = mail->base + DMUB_MAILBOX_SIZE;
|
||||
previous_top = bios->top;
|
||||
} else {
|
||||
mail->base = dmub_align(bios->top, 256);
|
||||
mail->top = mail->base + DMUB_MAILBOX_SIZE;
|
||||
previous_top = mail->top;
|
||||
}
|
||||
|
||||
fw_info = dmub_get_fw_meta_info(params);
|
||||
|
||||
@ -479,7 +486,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
dmub->fw_version = fw_info->fw_version;
|
||||
}
|
||||
|
||||
trace_buff->base = dmub_align(mail->top, 256);
|
||||
trace_buff->base = dmub_align(previous_top, 256);
|
||||
trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
|
||||
|
||||
fw_state->base = dmub_align(trace_buff->top, 256);
|
||||
@ -490,11 +497,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
|
||||
out->fb_size = dmub_align(scratch_mem->top, 4096);
|
||||
|
||||
if (params->is_mailbox_in_inbox)
|
||||
out->inbox_size = dmub_align(mail->top, 4096);
|
||||
|
||||
return DMUB_STATUS_OK;
|
||||
}
|
||||
|
||||
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_fb_params *params,
|
||||
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_memory_params *params,
|
||||
struct dmub_srv_fb_info *out)
|
||||
{
|
||||
uint8_t *cpu_base;
|
||||
@ -509,8 +519,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
cpu_base = (uint8_t *)params->cpu_addr;
|
||||
gpu_base = params->gpu_addr;
|
||||
cpu_base = (uint8_t *)params->cpu_fb_addr;
|
||||
gpu_base = params->gpu_fb_addr;
|
||||
|
||||
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
|
||||
const struct dmub_region *reg =
|
||||
@ -518,6 +528,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
|
||||
out->fb[i].cpu_addr = cpu_base + reg->base;
|
||||
out->fb[i].gpu_addr = gpu_base + reg->base;
|
||||
|
||||
if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
|
||||
out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
|
||||
out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
|
||||
}
|
||||
|
||||
out->fb[i].size = reg->top - reg->base;
|
||||
}
|
||||
|
||||
@ -707,9 +723,16 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
|
||||
dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
|
||||
dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
|
||||
dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
|
||||
uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
|
||||
uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
|
||||
|
||||
if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
|
||||
return DMUB_STATUS_HW_FAILURE;
|
||||
} else {
|
||||
dmub->inbox1_rb.rptr = rptr;
|
||||
dmub->inbox1_rb.wrpt = wptr;
|
||||
dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
|
||||
}
|
||||
}
|
||||
|
||||
return DMUB_STATUS_OK;
|
||||
@ -743,6 +766,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
|
||||
if (!dmub->hw_init)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
|
||||
dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
|
||||
return DMUB_STATUS_HW_FAILURE;
|
||||
}
|
||||
|
||||
if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
|
||||
return DMUB_STATUS_OK;
|
||||
|
||||
|
@ -123,7 +123,7 @@ typedef enum {
|
||||
VOLTAGE_GUARDBAND_COUNT
|
||||
} GFX_GUARDBAND_e;
|
||||
|
||||
#define SMU_METRICS_TABLE_VERSION 0x8
|
||||
#define SMU_METRICS_TABLE_VERSION 0x9
|
||||
|
||||
typedef struct __attribute__((packed, aligned(4))) {
|
||||
uint32_t AccumulationCounter;
|
||||
@ -211,6 +211,14 @@ typedef struct __attribute__((packed, aligned(4))) {
|
||||
//XGMI Data tranfser size
|
||||
uint64_t XgmiReadDataSizeAcc[8];//in KByte
|
||||
uint64_t XgmiWriteDataSizeAcc[8];//in KByte
|
||||
|
||||
//PCIE BW Data and error count
|
||||
uint32_t PcieBandwidth[4];
|
||||
uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
|
||||
uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
|
||||
uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
|
||||
uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
|
||||
uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
|
||||
} MetricsTable_t;
|
||||
|
||||
#define SMU_VF_METRICS_TABLE_VERSION 0x3
|
||||
|
@ -1454,7 +1454,7 @@ static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
|
||||
|
||||
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
|
||||
{
|
||||
if (smu->smc_fw_version <= 0x553500)
|
||||
if (amdgpu_in_reset(smu->adev))
|
||||
return 0;
|
||||
|
||||
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
|
||||
@ -2095,6 +2095,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
|
||||
smu_v13_0_6_get_current_pcie_link_speed(smu);
|
||||
gpu_metrics->pcie_bandwidth_acc =
|
||||
SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
|
||||
gpu_metrics->pcie_bandwidth_inst =
|
||||
SMUQ10_ROUND(metrics->PcieBandwidth[0]);
|
||||
gpu_metrics->pcie_l0_to_recov_count_acc =
|
||||
metrics->PCIeL0ToRecoveryCountAcc;
|
||||
gpu_metrics->pcie_replay_count_acc =
|
||||
metrics->PCIenReplayAAcc;
|
||||
gpu_metrics->pcie_replay_rover_count_acc =
|
||||
metrics->PCIenReplayARolloverCountAcc;
|
||||
}
|
||||
|
||||
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
|
||||
|
@ -174,6 +174,17 @@ to_ast_sil164_connector(struct drm_connector *connector)
|
||||
return container_of(connector, struct ast_sil164_connector, base);
|
||||
}
|
||||
|
||||
struct ast_bmc_connector {
|
||||
struct drm_connector base;
|
||||
struct drm_connector *physical_connector;
|
||||
};
|
||||
|
||||
static inline struct ast_bmc_connector *
|
||||
to_ast_bmc_connector(struct drm_connector *connector)
|
||||
{
|
||||
return container_of(connector, struct ast_bmc_connector, base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Device
|
||||
*/
|
||||
@ -218,7 +229,7 @@ struct ast_device {
|
||||
} astdp;
|
||||
struct {
|
||||
struct drm_encoder encoder;
|
||||
struct drm_connector connector;
|
||||
struct ast_bmc_connector bmc_connector;
|
||||
} bmc;
|
||||
} output;
|
||||
|
||||
|
@ -1767,6 +1767,30 @@ static const struct drm_encoder_funcs ast_bmc_encoder_funcs = {
|
||||
.destroy = drm_encoder_cleanup,
|
||||
};
|
||||
|
||||
static int ast_bmc_connector_helper_detect_ctx(struct drm_connector *connector,
|
||||
struct drm_modeset_acquire_ctx *ctx,
|
||||
bool force)
|
||||
{
|
||||
struct ast_bmc_connector *bmc_connector = to_ast_bmc_connector(connector);
|
||||
struct drm_connector *physical_connector = bmc_connector->physical_connector;
|
||||
|
||||
/*
|
||||
* Most user-space compositors cannot handle more than one connected
|
||||
* connector per CRTC. Hence, we only mark the BMC as connected if the
|
||||
* physical connector is disconnected. If the physical connector's status
|
||||
* is connected or unknown, the BMC remains disconnected. This has no
|
||||
* effect on the output of the BMC.
|
||||
*
|
||||
* FIXME: Remove this logic once user-space compositors can handle more
|
||||
* than one connector per CRTC. The BMC should always be connected.
|
||||
*/
|
||||
|
||||
if (physical_connector && physical_connector->status == connector_status_disconnected)
|
||||
return connector_status_connected;
|
||||
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
|
||||
static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
return drm_add_modes_noedid(connector, 4096, 4096);
|
||||
@ -1774,6 +1798,7 @@ static int ast_bmc_connector_helper_get_modes(struct drm_connector *connector)
|
||||
|
||||
static const struct drm_connector_helper_funcs ast_bmc_connector_helper_funcs = {
|
||||
.get_modes = ast_bmc_connector_helper_get_modes,
|
||||
.detect_ctx = ast_bmc_connector_helper_detect_ctx,
|
||||
};
|
||||
|
||||
static const struct drm_connector_funcs ast_bmc_connector_funcs = {
|
||||
@ -1784,12 +1809,33 @@ static const struct drm_connector_funcs ast_bmc_connector_funcs = {
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int ast_bmc_output_init(struct ast_device *ast)
|
||||
static int ast_bmc_connector_init(struct drm_device *dev,
|
||||
struct ast_bmc_connector *bmc_connector,
|
||||
struct drm_connector *physical_connector)
|
||||
{
|
||||
struct drm_connector *connector = &bmc_connector->base;
|
||||
int ret;
|
||||
|
||||
ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
|
||||
|
||||
bmc_connector->physical_connector = physical_connector;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ast_bmc_output_init(struct ast_device *ast,
|
||||
struct drm_connector *physical_connector)
|
||||
{
|
||||
struct drm_device *dev = &ast->base;
|
||||
struct drm_crtc *crtc = &ast->crtc;
|
||||
struct drm_encoder *encoder = &ast->output.bmc.encoder;
|
||||
struct drm_connector *connector = &ast->output.bmc.connector;
|
||||
struct ast_bmc_connector *bmc_connector = &ast->output.bmc.bmc_connector;
|
||||
struct drm_connector *connector = &bmc_connector->base;
|
||||
int ret;
|
||||
|
||||
ret = drm_encoder_init(dev, encoder,
|
||||
@ -1799,13 +1845,10 @@ static int ast_bmc_output_init(struct ast_device *ast)
|
||||
return ret;
|
||||
encoder->possible_crtcs = drm_crtc_mask(crtc);
|
||||
|
||||
ret = drm_connector_init(dev, connector, &ast_bmc_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
ret = ast_bmc_connector_init(dev, bmc_connector, physical_connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_connector_helper_add(connector, &ast_bmc_connector_helper_funcs);
|
||||
|
||||
ret = drm_connector_attach_encoder(connector, encoder);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1864,6 +1907,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
|
||||
int ast_mode_config_init(struct ast_device *ast)
|
||||
{
|
||||
struct drm_device *dev = &ast->base;
|
||||
struct drm_connector *physical_connector = NULL;
|
||||
int ret;
|
||||
|
||||
ret = drmm_mode_config_init(dev);
|
||||
@ -1904,23 +1948,27 @@ int ast_mode_config_init(struct ast_device *ast)
|
||||
ret = ast_vga_output_init(ast);
|
||||
if (ret)
|
||||
return ret;
|
||||
physical_connector = &ast->output.vga.vga_connector.base;
|
||||
}
|
||||
if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
|
||||
ret = ast_sil164_output_init(ast);
|
||||
if (ret)
|
||||
return ret;
|
||||
physical_connector = &ast->output.sil164.sil164_connector.base;
|
||||
}
|
||||
if (ast->tx_chip_types & AST_TX_DP501_BIT) {
|
||||
ret = ast_dp501_output_init(ast);
|
||||
if (ret)
|
||||
return ret;
|
||||
physical_connector = &ast->output.dp501.connector;
|
||||
}
|
||||
if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
|
||||
ret = ast_astdp_output_init(ast);
|
||||
if (ret)
|
||||
return ret;
|
||||
physical_connector = &ast->output.astdp.connector;
|
||||
}
|
||||
ret = ast_bmc_output_init(ast);
|
||||
ret = ast_bmc_output_init(ast, physical_connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -5,7 +5,7 @@ termcolor==2.3.0
|
||||
certifi==2023.7.22
|
||||
charset-normalizer==3.2.0
|
||||
idna==3.4
|
||||
pip==23.2.1
|
||||
pip==23.3
|
||||
python-gitlab==3.15.0
|
||||
requests==2.31.0
|
||||
requests-toolbelt==1.0.0
|
||||
@ -13,5 +13,5 @@ ruamel.yaml==0.17.32
|
||||
ruamel.yaml.clib==0.2.7
|
||||
setuptools==68.0.0
|
||||
tenacity==8.2.3
|
||||
urllib3==2.0.4
|
||||
wheel==0.41.1
|
||||
urllib3==2.0.7
|
||||
wheel==0.41.1
|
||||
|
@ -336,6 +336,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||
}, { /* Lenovo Legion Go 8APU1 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1600x2560_leftside_up,
|
||||
}, { /* Lenovo Yoga Book X90F / X90L */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
|
@ -1161,6 +1161,14 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
||||
intel_connector->port = port;
|
||||
drm_dp_mst_get_port_malloc(port);
|
||||
|
||||
/*
|
||||
* TODO: set the AUX for the actual MST port decompressing the stream.
|
||||
* At the moment the driver only supports enabling this globally in the
|
||||
* first downstream MST branch, via intel_dp's (root port) AUX.
|
||||
*/
|
||||
intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
|
||||
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
|
||||
|
||||
connector = &intel_connector->base;
|
||||
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DisplayPort);
|
||||
@ -1172,14 +1180,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
||||
|
||||
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
|
||||
|
||||
/*
|
||||
* TODO: set the AUX for the actual MST port decompressing the stream.
|
||||
* At the moment the driver only supports enabling this globally in the
|
||||
* first downstream MST branch, via intel_dp's (root port) AUX.
|
||||
*/
|
||||
intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
|
||||
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
struct drm_encoder *enc =
|
||||
&intel_dp->mst_encoders[pipe]->base.base;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user