mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 02:14:58 +00:00
This is the 6.12.5 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmdd11UACgkQONu9yGCS aT5dPBAAsP8dl7AvHyza22AUZ84ydiL5ddw7tOVmz+4f3Iz2sIUZrUYR4KX+hWmy M6wRzuOL73Zm3JrzcXzOx6BDvxM4W+wbAvEJEMRsW0lVEcNAr8VAADJQJE+ZRSu3 TEuoRBFJRQJh63Y0BIo+TpdVwi7xLQ2FxYM2bZVagwH1MSGH1zw0IA8kwe+XBsPm 2Wm1m+2Ge6xld+C+b85LVJ3NayQJ3nkhGZpdSyWIzqhmR6/nXhVC8t03M8EvyKKm sIu45Qzp+TutZ5YY7DWK/5soqxFa6Tb+Bvh2bSaUweGjS0R6PQEKRRHWEWUv2ZwR spntDebm7sz566Yf6LDHiaXlsYh3KAsXd0myVubDGVvOnE4MD3kwPFWYArMgnsTy Ab0YIIAehhLed6519MKKxh56gxlDJrvvQk+rCeTNL2WcWVFZ54TVbXzAhiqKPqgy ET7jPmEaHI0ncjAO7bymMo94TKVbsrROKiPyN6jpT4Ax5bhDe5iILZ6AhNm+R2hX 8d7eIy086K1Mn7wC9R4o+jdJj4EAuLBidnK5CDHAXZNjVmA3nFmgKRxEmLEk0AAI QnXB/YP2C30XkSESOSdIsa7kiE9OEeFC7eQ/gcR3a4xCSUYyLvQy9+sWlQ+OBM49 zIN07K7Qox7sL6gFA/Hd8c4rjc3L2rs2+kWNBJNESYfCo9f6WBM= =7ueo -----END PGP SIGNATURE----- Merge v6.12.5 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
42c1a94379
@ -163,6 +163,17 @@ Description:
|
||||
will be present in sysfs. Writing 1 to this file
|
||||
will perform reset.
|
||||
|
||||
What: /sys/bus/pci/devices/.../reset_subordinate
|
||||
Date: October 2024
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
This is visible only for bridge devices. If you want to reset
|
||||
all devices attached through the subordinate bus of a specific
|
||||
bridge device, writing 1 to this will try to do it. This will
|
||||
affect all devices attached to the system through this bridge
|
||||
similiar to writing 1 to their individual "reset" file, so use
|
||||
with caution.
|
||||
|
||||
What: /sys/bus/pci/devices/.../vpd
|
||||
Date: February 2008
|
||||
Contact: Ben Hutchings <bwh@kernel.org>
|
||||
|
@ -822,3 +822,9 @@ Description: It controls the valid block ratio threshold not to trigger excessiv
|
||||
for zoned deivces. The initial value of it is 95(%). F2FS will stop the
|
||||
background GC thread from intiating GC for sections having valid blocks
|
||||
exceeding the ratio.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/max_read_extent_count
|
||||
Date: November 2024
|
||||
Contact: "Chao Yu" <chao@kernel.org>
|
||||
Description: It controls max read extent count for per-inode, the value of threshold
|
||||
is 10240 by default.
|
||||
|
14
Documentation/accel/qaic/aic080.rst
Normal file
14
Documentation/accel/qaic/aic080.rst
Normal file
@ -0,0 +1,14 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
===============================
|
||||
Qualcomm Cloud AI 80 (AIC080)
|
||||
===============================
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
The Qualcomm Cloud AI 80/AIC080 family of products are a derivative of AIC100.
|
||||
The number of NSPs and clock rates are reduced to fit within resource
|
||||
constrained solutions. The PCIe Product ID is 0xa080.
|
||||
|
||||
As a derivative product, all AIC100 documentation applies.
|
@ -10,4 +10,5 @@ accelerator cards.
|
||||
.. toctree::
|
||||
|
||||
qaic
|
||||
aic080
|
||||
aic100
|
||||
|
@ -258,6 +258,8 @@ stable kernels.
|
||||
| Hisilicon | Hip{08,09,10,10C| #162001900 | N/A |
|
||||
| | ,11} SMMU PMCG | | |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Hisilicon | Hip09 | #162100801 | HISILICON_ERRATUM_162100801 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
@ -49,6 +49,7 @@ Supported adapters:
|
||||
* Intel Meteor Lake (SOC and PCH)
|
||||
* Intel Birch Stream (SOC)
|
||||
* Intel Arrow Lake (SOC)
|
||||
* Intel Panther Lake (SOC)
|
||||
|
||||
Datasheets: Publicly available at the Intel website
|
||||
|
||||
|
@ -96,7 +96,12 @@ attribute-sets:
|
||||
name: bits
|
||||
type: nest
|
||||
nested-attributes: bitset-bits
|
||||
|
||||
-
|
||||
name: value
|
||||
type: binary
|
||||
-
|
||||
name: mask
|
||||
type: binary
|
||||
-
|
||||
name: string
|
||||
attributes:
|
||||
|
3
Makefile
3
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 4
|
||||
SUBLEVEL = 5
|
||||
EXTRAVERSION =
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
@ -456,6 +456,7 @@ export rust_common_flags := --edition=2021 \
|
||||
-Wclippy::mut_mut \
|
||||
-Wclippy::needless_bitwise_bool \
|
||||
-Wclippy::needless_continue \
|
||||
-Aclippy::needless_lifetimes \
|
||||
-Wclippy::no_mangle_with_rust_abi \
|
||||
-Wclippy::dbg_macro
|
||||
|
||||
|
@ -1232,6 +1232,17 @@ config HISILICON_ERRATUM_161600802
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config HISILICON_ERRATUM_162100801
|
||||
bool "Hip09 162100801 erratum support"
|
||||
default y
|
||||
help
|
||||
When enabling GICv4.1 in hip09, VMAPP will fail to clear some caches
|
||||
during unmapping operation, which will cause some vSGIs lost.
|
||||
To fix the issue, invalidate related vPE cache through GICR_INVALLR
|
||||
after VMOVP.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config QCOM_FALKOR_ERRATUM_1003
|
||||
bool "Falkor E1003: Incorrect translation due to ASID change"
|
||||
default y
|
||||
|
@ -719,6 +719,8 @@ static int fpmr_set(struct task_struct *target, const struct user_regset *regset
|
||||
if (!system_supports_fpmr())
|
||||
return -EINVAL;
|
||||
|
||||
fpmr = target->thread.uw.fpmr;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1418,7 +1420,7 @@ static int tagged_addr_ctrl_get(struct task_struct *target,
|
||||
{
|
||||
long ctrl = get_tagged_addr_ctrl(target);
|
||||
|
||||
if (IS_ERR_VALUE(ctrl))
|
||||
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||
return ctrl;
|
||||
|
||||
return membuf_write(&to, &ctrl, sizeof(ctrl));
|
||||
@ -1432,6 +1434,10 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct
|
||||
int ret;
|
||||
long ctrl;
|
||||
|
||||
ctrl = get_tagged_addr_ctrl(target);
|
||||
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||
return ctrl;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1463,6 +1469,8 @@ static int poe_set(struct task_struct *target, const struct
|
||||
if (!system_supports_poe())
|
||||
return -EINVAL;
|
||||
|
||||
ctrl = target->thread.por_el0;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -32,9 +32,9 @@ static unsigned long nr_pinned_asids;
|
||||
static unsigned long *pinned_asid_map;
|
||||
|
||||
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
||||
#define ASID_FIRST_VERSION (1UL << asid_bits)
|
||||
#define ASID_FIRST_VERSION (1UL << 16)
|
||||
|
||||
#define NUM_USER_ASIDS ASID_FIRST_VERSION
|
||||
#define NUM_USER_ASIDS (1UL << asid_bits)
|
||||
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
|
||||
#define asid2ctxid(asid, genid) ((asid) | (genid))
|
||||
|
||||
|
@ -116,15 +116,6 @@ static void __init arch_reserve_crashkernel(void)
|
||||
|
||||
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
|
||||
{
|
||||
/**
|
||||
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
||||
* bus constraints. Devices using DMA might have their own limitations.
|
||||
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
||||
* DMA zone on platforms that have RAM there.
|
||||
*/
|
||||
if (memblock_start_of_DRAM() < U32_MAX)
|
||||
zone_limit = min(zone_limit, U32_MAX);
|
||||
|
||||
return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
|
||||
}
|
||||
|
||||
@ -140,6 +131,14 @@ static void __init zone_sizes_init(void)
|
||||
acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
|
||||
dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
|
||||
zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
|
||||
/*
|
||||
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
||||
* bus constraints. Devices using DMA might have their own limitations.
|
||||
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
||||
* DMA zone on platforms that have RAM there.
|
||||
*/
|
||||
if (memblock_start_of_DRAM() < U32_MAX)
|
||||
zone_dma_limit = min(zone_dma_limit, U32_MAX);
|
||||
arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
|
||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
||||
#endif
|
||||
|
@ -29,6 +29,16 @@ static inline int prepare_hugepage_range(struct file *file,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTE_CLEAR
|
||||
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
|
||||
pte_val(clear) = (unsigned long)invalid_pte_table;
|
||||
set_pte_at(mm, addr, ptep, clear);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
|
@ -240,7 +240,7 @@ static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
int idx, ret;
|
||||
|
||||
/*
|
||||
* Check conditions before entering the guest
|
||||
@ -249,7 +249,9 @@ static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_check_requests(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ static void setup_tlb_handler(int cpu)
|
||||
/* Avoid lockdep warning */
|
||||
rcutree_report_cpu_starting(cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT)
|
||||
vec_sz = sizeof(exception_handlers);
|
||||
|
||||
if (pcpu_handlers[cpu])
|
||||
|
@ -70,7 +70,6 @@
|
||||
device_type = "pci";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <2>;
|
||||
msi-parent = <&msi>;
|
||||
|
||||
reg = <0 0x1a000000 0 0x02000000>,
|
||||
@ -234,7 +233,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
pci_bridge@9,0 {
|
||||
pcie@9,0 {
|
||||
compatible = "pci0014,7a19.1",
|
||||
"pci0014,7a19",
|
||||
"pciclass060400",
|
||||
@ -244,12 +243,16 @@
|
||||
interrupts = <32 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 32 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@a,0 {
|
||||
pcie@a,0 {
|
||||
compatible = "pci0014,7a09.1",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
@ -259,12 +262,16 @@
|
||||
interrupts = <33 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 33 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@b,0 {
|
||||
pcie@b,0 {
|
||||
compatible = "pci0014,7a09.1",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
@ -274,12 +281,16 @@
|
||||
interrupts = <34 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 34 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@c,0 {
|
||||
pcie@c,0 {
|
||||
compatible = "pci0014,7a09.1",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
@ -289,12 +300,16 @@
|
||||
interrupts = <35 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 35 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@d,0 {
|
||||
pcie@d,0 {
|
||||
compatible = "pci0014,7a19.1",
|
||||
"pci0014,7a19",
|
||||
"pciclass060400",
|
||||
@ -304,12 +319,16 @@
|
||||
interrupts = <36 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 36 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@e,0 {
|
||||
pcie@e,0 {
|
||||
compatible = "pci0014,7a09.1",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
@ -319,12 +338,16 @@
|
||||
interrupts = <37 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 37 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@f,0 {
|
||||
pcie@f,0 {
|
||||
compatible = "pci0014,7a29.1",
|
||||
"pci0014,7a29",
|
||||
"pciclass060400",
|
||||
@ -334,12 +357,16 @@
|
||||
interrupts = <40 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 40 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@10,0 {
|
||||
pcie@10,0 {
|
||||
compatible = "pci0014,7a19.1",
|
||||
"pci0014,7a19",
|
||||
"pciclass060400",
|
||||
@ -349,12 +376,16 @@
|
||||
interrupts = <41 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 41 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@11,0 {
|
||||
pcie@11,0 {
|
||||
compatible = "pci0014,7a29.1",
|
||||
"pci0014,7a29",
|
||||
"pciclass060400",
|
||||
@ -364,12 +395,16 @@
|
||||
interrupts = <42 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 42 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@12,0 {
|
||||
pcie@12,0 {
|
||||
compatible = "pci0014,7a19.1",
|
||||
"pci0014,7a19",
|
||||
"pciclass060400",
|
||||
@ -379,12 +414,16 @@
|
||||
interrupts = <43 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 43 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@13,0 {
|
||||
pcie@13,0 {
|
||||
compatible = "pci0014,7a29.1",
|
||||
"pci0014,7a29",
|
||||
"pciclass060400",
|
||||
@ -394,12 +433,16 @@
|
||||
interrupts = <38 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 38 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
|
||||
pci_bridge@14,0 {
|
||||
pcie@14,0 {
|
||||
compatible = "pci0014,7a19.1",
|
||||
"pci0014,7a19",
|
||||
"pciclass060400",
|
||||
@ -409,9 +452,13 @@
|
||||
interrupts = <39 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&pic>;
|
||||
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &pic 39 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -2932,7 +2932,7 @@ static void __init fixup_device_tree_chrp(void)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
|
||||
static void __init fixup_device_tree_pmac(void)
|
||||
static void __init fixup_device_tree_pmac64(void)
|
||||
{
|
||||
phandle u3, i2c, mpic;
|
||||
u32 u3_rev;
|
||||
@ -2972,7 +2972,31 @@ static void __init fixup_device_tree_pmac(void)
|
||||
&parent, sizeof(parent));
|
||||
}
|
||||
#else
|
||||
#define fixup_device_tree_pmac()
|
||||
#define fixup_device_tree_pmac64()
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
static void __init fixup_device_tree_pmac(void)
|
||||
{
|
||||
__be32 val = 1;
|
||||
char type[8];
|
||||
phandle node;
|
||||
|
||||
// Some pmacs are missing #size-cells on escc nodes
|
||||
for (node = 0; prom_next_node(&node); ) {
|
||||
type[0] = '\0';
|
||||
prom_getprop(node, "device_type", type, sizeof(type));
|
||||
if (prom_strcmp(type, "escc"))
|
||||
continue;
|
||||
|
||||
if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
|
||||
continue;
|
||||
|
||||
prom_setprop(node, NULL, "#size-cells", &val, sizeof(val));
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void fixup_device_tree_pmac(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_EFIKA
|
||||
@ -3197,6 +3221,7 @@ static void __init fixup_device_tree(void)
|
||||
fixup_device_tree_maple_memory_controller();
|
||||
fixup_device_tree_chrp();
|
||||
fixup_device_tree_pmac();
|
||||
fixup_device_tree_pmac64();
|
||||
fixup_device_tree_efika();
|
||||
fixup_device_tree_pasemi();
|
||||
}
|
||||
|
@ -301,7 +301,6 @@ CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_DEBUG_PER_CPU_MAPS=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
CONFIG_WQ_WATCHDOG=y
|
||||
CONFIG_DEBUG_TIMEKEEPING=y
|
||||
CONFIG_DEBUG_RT_MUTEXES=y
|
||||
CONFIG_DEBUG_SPINLOCK=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
|
@ -106,9 +106,10 @@ struct zpci_bus {
|
||||
struct list_head resources;
|
||||
struct list_head bus_next;
|
||||
struct resource bus_resource;
|
||||
int pchid;
|
||||
int topo; /* TID if topo_is_tid, PCHID otherwise */
|
||||
int domain_nr;
|
||||
bool multifunction;
|
||||
u8 multifunction : 1;
|
||||
u8 topo_is_tid : 1;
|
||||
enum pci_bus_speed max_bus_speed;
|
||||
};
|
||||
|
||||
@ -129,6 +130,8 @@ struct zpci_dev {
|
||||
u16 vfn; /* virtual function number */
|
||||
u16 pchid; /* physical channel ID */
|
||||
u16 maxstbl; /* Maximum store block size */
|
||||
u16 rid; /* RID as supplied by firmware */
|
||||
u16 tid; /* Topology for which RID is valid */
|
||||
u8 pfgid; /* function group ID */
|
||||
u8 pft; /* pci function type */
|
||||
u8 port;
|
||||
@ -139,7 +142,8 @@ struct zpci_dev {
|
||||
u8 is_physfn : 1;
|
||||
u8 util_str_avail : 1;
|
||||
u8 irqs_registered : 1;
|
||||
u8 reserved : 2;
|
||||
u8 tid_avail : 1;
|
||||
u8 reserved : 1;
|
||||
unsigned int devfn; /* DEVFN part of the RID*/
|
||||
|
||||
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
|
||||
@ -210,12 +214,14 @@ extern struct airq_iv *zpci_aif_sbv;
|
||||
----------------------------------------------------------------------------- */
|
||||
/* Base stuff */
|
||||
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
|
||||
int zpci_add_device(struct zpci_dev *zdev);
|
||||
int zpci_enable_device(struct zpci_dev *);
|
||||
int zpci_disable_device(struct zpci_dev *);
|
||||
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
|
||||
int zpci_deconfigure_device(struct zpci_dev *zdev);
|
||||
void zpci_device_reserved(struct zpci_dev *zdev);
|
||||
bool zpci_is_device_configured(struct zpci_dev *zdev);
|
||||
int zpci_scan_devices(void);
|
||||
|
||||
int zpci_hot_reset_device(struct zpci_dev *zdev);
|
||||
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64, u8 *);
|
||||
@ -225,7 +231,7 @@ void zpci_update_fh(struct zpci_dev *zdev, u32 fh);
|
||||
|
||||
/* CLP */
|
||||
int clp_setup_writeback_mio(void);
|
||||
int clp_scan_pci_devices(void);
|
||||
int clp_scan_pci_devices(struct list_head *scan_list);
|
||||
int clp_query_pci_fn(struct zpci_dev *zdev);
|
||||
int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
|
||||
int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
|
||||
|
@ -110,7 +110,8 @@ struct clp_req_query_pci {
|
||||
struct clp_rsp_query_pci {
|
||||
struct clp_rsp_hdr hdr;
|
||||
u16 vfn; /* virtual fn number */
|
||||
u16 : 3;
|
||||
u16 : 2;
|
||||
u16 tid_avail : 1;
|
||||
u16 rid_avail : 1;
|
||||
u16 is_physfn : 1;
|
||||
u16 reserved1 : 1;
|
||||
@ -130,8 +131,9 @@ struct clp_rsp_query_pci {
|
||||
u64 edma; /* end dma as */
|
||||
#define ZPCI_RID_MASK_DEVFN 0x00ff
|
||||
u16 rid; /* BUS/DEVFN PCI address */
|
||||
u16 reserved0;
|
||||
u32 reserved[10];
|
||||
u32 reserved0;
|
||||
u16 tid;
|
||||
u32 reserved[9];
|
||||
u32 uid; /* user defined id */
|
||||
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
|
||||
u32 reserved2[16];
|
||||
|
@ -1780,7 +1780,9 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
|
||||
event->hw.state |= PERF_HES_STOPPED;
|
||||
|
||||
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
|
||||
hw_perf_event_update(event, 1);
|
||||
/* CPU hotplug off removes SDBs. No samples to extract. */
|
||||
if (cpuhw->flags & PMU_F_RESERVED)
|
||||
hw_perf_event_update(event, 1);
|
||||
event->hw.state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
perf_pmu_enable(event->pmu);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include <asm/isc.h>
|
||||
#include <asm/airq.h>
|
||||
@ -778,8 +779,9 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
|
||||
* @fh: Current Function Handle of the device to be created
|
||||
* @state: Initial state after creation either Standby or Configured
|
||||
*
|
||||
* Creates a new zpci device and adds it to its, possibly newly created, zbus
|
||||
* as well as zpci_list.
|
||||
* Allocates a new struct zpci_dev and queries the platform for its details.
|
||||
* If successful the device can subsequently be added to the zPCI subsystem
|
||||
* using zpci_add_device().
|
||||
*
|
||||
* Returns: the zdev on success or an error pointer otherwise
|
||||
*/
|
||||
@ -788,7 +790,6 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
||||
struct zpci_dev *zdev;
|
||||
int rc;
|
||||
|
||||
zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
|
||||
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
|
||||
if (!zdev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -803,11 +804,34 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
||||
goto error;
|
||||
zdev->state = state;
|
||||
|
||||
kref_init(&zdev->kref);
|
||||
mutex_init(&zdev->state_lock);
|
||||
mutex_init(&zdev->fmb_lock);
|
||||
mutex_init(&zdev->kzdev_lock);
|
||||
|
||||
return zdev;
|
||||
|
||||
error:
|
||||
zpci_dbg(0, "crt fid:%x, rc:%d\n", fid, rc);
|
||||
kfree(zdev);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
/**
|
||||
* zpci_add_device() - Add a previously created zPCI device to the zPCI subsystem
|
||||
* @zdev: The zPCI device to be added
|
||||
*
|
||||
* A struct zpci_dev is added to the zPCI subsystem and to a virtual PCI bus creating
|
||||
* a new one as necessary. A hotplug slot is created and events start to be handled.
|
||||
* If successful from this point on zpci_zdev_get() and zpci_zdev_put() must be used.
|
||||
* If adding the struct zpci_dev fails the device was not added and should be freed.
|
||||
*
|
||||
* Return: 0 on success, or an error code otherwise
|
||||
*/
|
||||
int zpci_add_device(struct zpci_dev *zdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", zdev->fid, zdev->fh, zdev->state);
|
||||
rc = zpci_init_iommu(zdev);
|
||||
if (rc)
|
||||
goto error;
|
||||
@ -816,18 +840,17 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
||||
if (rc)
|
||||
goto error_destroy_iommu;
|
||||
|
||||
kref_init(&zdev->kref);
|
||||
spin_lock(&zpci_list_lock);
|
||||
list_add_tail(&zdev->entry, &zpci_list);
|
||||
spin_unlock(&zpci_list_lock);
|
||||
|
||||
return zdev;
|
||||
return 0;
|
||||
|
||||
error_destroy_iommu:
|
||||
zpci_destroy_iommu(zdev);
|
||||
error:
|
||||
zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
|
||||
kfree(zdev);
|
||||
return ERR_PTR(rc);
|
||||
zpci_dbg(0, "add fid:%x, rc:%d\n", zdev->fid, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool zpci_is_device_configured(struct zpci_dev *zdev)
|
||||
@ -1069,6 +1092,50 @@ bool zpci_is_enabled(void)
|
||||
return s390_pci_initialized;
|
||||
}
|
||||
|
||||
static int zpci_cmp_rid(void *priv, const struct list_head *a,
|
||||
const struct list_head *b)
|
||||
{
|
||||
struct zpci_dev *za = container_of(a, struct zpci_dev, entry);
|
||||
struct zpci_dev *zb = container_of(b, struct zpci_dev, entry);
|
||||
|
||||
/*
|
||||
* PCI functions without RID available maintain original order
|
||||
* between themselves but sort before those with RID.
|
||||
*/
|
||||
if (za->rid == zb->rid)
|
||||
return za->rid_available > zb->rid_available;
|
||||
/*
|
||||
* PCI functions with RID sort by RID ascending.
|
||||
*/
|
||||
return za->rid > zb->rid;
|
||||
}
|
||||
|
||||
static void zpci_add_devices(struct list_head *scan_list)
|
||||
{
|
||||
struct zpci_dev *zdev, *tmp;
|
||||
|
||||
list_sort(NULL, scan_list, &zpci_cmp_rid);
|
||||
list_for_each_entry_safe(zdev, tmp, scan_list, entry) {
|
||||
list_del_init(&zdev->entry);
|
||||
if (zpci_add_device(zdev))
|
||||
kfree(zdev);
|
||||
}
|
||||
}
|
||||
|
||||
int zpci_scan_devices(void)
|
||||
{
|
||||
LIST_HEAD(scan_list);
|
||||
int rc;
|
||||
|
||||
rc = clp_scan_pci_devices(&scan_list);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
zpci_add_devices(&scan_list);
|
||||
zpci_bus_scan_busses();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init pci_base_init(void)
|
||||
{
|
||||
int rc;
|
||||
@ -1098,10 +1165,9 @@ static int __init pci_base_init(void)
|
||||
if (rc)
|
||||
goto out_irq;
|
||||
|
||||
rc = clp_scan_pci_devices();
|
||||
rc = zpci_scan_devices();
|
||||
if (rc)
|
||||
goto out_find;
|
||||
zpci_bus_scan_busses();
|
||||
|
||||
s390_pci_initialized = 1;
|
||||
return 0;
|
||||
|
@ -168,9 +168,16 @@ void zpci_bus_scan_busses(void)
|
||||
mutex_unlock(&zbus_list_lock);
|
||||
}
|
||||
|
||||
static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
|
||||
{
|
||||
return !s390_pci_no_rid && zdev->rid_available &&
|
||||
zpci_is_device_configured(zdev) &&
|
||||
!zdev->vfn;
|
||||
}
|
||||
|
||||
/* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus
|
||||
* @zbus: the zbus holding the zdevices
|
||||
* @fr: PCI root function that will determine the bus's domain, and bus speeed
|
||||
* @fr: PCI root function that will determine the bus's domain, and bus speed
|
||||
* @ops: the pci operations
|
||||
*
|
||||
* The PCI function @fr determines the domain (its UID), multifunction property
|
||||
@ -188,7 +195,7 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s
|
||||
return domain;
|
||||
|
||||
zbus->domain_nr = domain;
|
||||
zbus->multifunction = fr->rid_available;
|
||||
zbus->multifunction = zpci_bus_is_multifunction_root(fr);
|
||||
zbus->max_bus_speed = fr->max_bus_speed;
|
||||
|
||||
/*
|
||||
@ -232,13 +239,15 @@ static void zpci_bus_put(struct zpci_bus *zbus)
|
||||
kref_put(&zbus->kref, zpci_bus_release);
|
||||
}
|
||||
|
||||
static struct zpci_bus *zpci_bus_get(int pchid)
|
||||
static struct zpci_bus *zpci_bus_get(int topo, bool topo_is_tid)
|
||||
{
|
||||
struct zpci_bus *zbus;
|
||||
|
||||
mutex_lock(&zbus_list_lock);
|
||||
list_for_each_entry(zbus, &zbus_list, bus_next) {
|
||||
if (pchid == zbus->pchid) {
|
||||
if (!zbus->multifunction)
|
||||
continue;
|
||||
if (topo_is_tid == zbus->topo_is_tid && topo == zbus->topo) {
|
||||
kref_get(&zbus->kref);
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -249,7 +258,7 @@ out_unlock:
|
||||
return zbus;
|
||||
}
|
||||
|
||||
static struct zpci_bus *zpci_bus_alloc(int pchid)
|
||||
static struct zpci_bus *zpci_bus_alloc(int topo, bool topo_is_tid)
|
||||
{
|
||||
struct zpci_bus *zbus;
|
||||
|
||||
@ -257,7 +266,8 @@ static struct zpci_bus *zpci_bus_alloc(int pchid)
|
||||
if (!zbus)
|
||||
return NULL;
|
||||
|
||||
zbus->pchid = pchid;
|
||||
zbus->topo = topo;
|
||||
zbus->topo_is_tid = topo_is_tid;
|
||||
INIT_LIST_HEAD(&zbus->bus_next);
|
||||
mutex_lock(&zbus_list_lock);
|
||||
list_add_tail(&zbus->bus_next, &zbus_list);
|
||||
@ -292,19 +302,22 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||
{
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (zbus->multifunction) {
|
||||
if (!zdev->rid_available) {
|
||||
WARN_ONCE(1, "rid_available not set for multifunction\n");
|
||||
return rc;
|
||||
}
|
||||
zdev->devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
|
||||
}
|
||||
|
||||
if (zbus->function[zdev->devfn]) {
|
||||
pr_err("devfn %04x is already assigned\n", zdev->devfn);
|
||||
return rc;
|
||||
}
|
||||
|
||||
zdev->zbus = zbus;
|
||||
zbus->function[zdev->devfn] = zdev;
|
||||
zpci_nb_devices++;
|
||||
|
||||
if (zbus->multifunction && !zdev->rid_available) {
|
||||
WARN_ONCE(1, "rid_available not set for multifunction\n");
|
||||
goto error;
|
||||
}
|
||||
rc = zpci_init_slot(zdev);
|
||||
if (rc)
|
||||
goto error;
|
||||
@ -321,8 +334,9 @@ error:
|
||||
|
||||
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
|
||||
{
|
||||
bool topo_is_tid = zdev->tid_avail;
|
||||
struct zpci_bus *zbus = NULL;
|
||||
int rc = -EBADF;
|
||||
int topo, rc = -EBADF;
|
||||
|
||||
if (zpci_nb_devices == ZPCI_NR_DEVICES) {
|
||||
pr_warn("Adding PCI function %08x failed because the configured limit of %d is reached\n",
|
||||
@ -330,14 +344,10 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
|
||||
return -EINVAL;
|
||||
|
||||
if (!s390_pci_no_rid && zdev->rid_available)
|
||||
zbus = zpci_bus_get(zdev->pchid);
|
||||
|
||||
topo = topo_is_tid ? zdev->tid : zdev->pchid;
|
||||
zbus = zpci_bus_get(topo, topo_is_tid);
|
||||
if (!zbus) {
|
||||
zbus = zpci_bus_alloc(zdev->pchid);
|
||||
zbus = zpci_bus_alloc(topo, topo_is_tid);
|
||||
if (!zbus)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -164,10 +164,13 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
|
||||
zdev->port = response->port;
|
||||
zdev->uid = response->uid;
|
||||
zdev->fmb_length = sizeof(u32) * response->fmb_len;
|
||||
zdev->rid_available = response->rid_avail;
|
||||
zdev->is_physfn = response->is_physfn;
|
||||
if (!s390_pci_no_rid && zdev->rid_available)
|
||||
zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
|
||||
zdev->rid_available = response->rid_avail;
|
||||
if (zdev->rid_available)
|
||||
zdev->rid = response->rid;
|
||||
zdev->tid_avail = response->tid_avail;
|
||||
if (zdev->tid_avail)
|
||||
zdev->tid = response->tid;
|
||||
|
||||
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
|
||||
if (response->util_str_avail) {
|
||||
@ -407,6 +410,7 @@ static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
|
||||
|
||||
static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
||||
{
|
||||
struct list_head *scan_list = data;
|
||||
struct zpci_dev *zdev;
|
||||
|
||||
if (!entry->vendor_id)
|
||||
@ -417,10 +421,11 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
||||
zpci_zdev_put(zdev);
|
||||
return;
|
||||
}
|
||||
zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
||||
zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
||||
list_add_tail(&zdev->entry, scan_list);
|
||||
}
|
||||
|
||||
int clp_scan_pci_devices(void)
|
||||
int clp_scan_pci_devices(struct list_head *scan_list)
|
||||
{
|
||||
struct clp_req_rsp_list_pci *rrb;
|
||||
int rc;
|
||||
@ -429,7 +434,7 @@ int clp_scan_pci_devices(void)
|
||||
if (!rrb)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = clp_list_pci(rrb, NULL, __clp_add);
|
||||
rc = clp_list_pci(rrb, scan_list, __clp_add);
|
||||
|
||||
clp_free_block(rrb);
|
||||
return rc;
|
||||
|
@ -340,6 +340,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
|
||||
if (IS_ERR(zdev))
|
||||
break;
|
||||
if (zpci_add_device(zdev)) {
|
||||
kfree(zdev);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* the configuration request may be stale */
|
||||
if (zdev->state != ZPCI_FN_STATE_STANDBY)
|
||||
@ -349,10 +353,17 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
zpci_scan_configured_device(zdev, ccdf->fh);
|
||||
break;
|
||||
case 0x0302: /* Reserved -> Standby */
|
||||
if (!zdev)
|
||||
zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
|
||||
else
|
||||
if (!zdev) {
|
||||
zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
|
||||
if (IS_ERR(zdev))
|
||||
break;
|
||||
if (zpci_add_device(zdev)) {
|
||||
kfree(zdev);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
zpci_update_fh(zdev, ccdf->fh);
|
||||
}
|
||||
break;
|
||||
case 0x0303: /* Deconfiguration requested */
|
||||
if (zdev) {
|
||||
@ -381,7 +392,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
break;
|
||||
case 0x0306: /* 0x308 or 0x302 for multiple devices */
|
||||
zpci_remove_reserved_devices();
|
||||
clp_scan_pci_devices();
|
||||
zpci_scan_devices();
|
||||
break;
|
||||
case 0x0308: /* Standby -> Reserved */
|
||||
if (!zdev)
|
||||
|
@ -145,7 +145,6 @@ config X86
|
||||
select ARCH_HAS_PARANOID_L1D_FLUSH
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLKEVT_I8253
|
||||
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
|
||||
select CLOCKSOURCE_WATCHDOG
|
||||
# Word-size accesses may read uninitialized data past the trailing \0
|
||||
# in strings and cause false KMSAN reports.
|
||||
|
@ -943,11 +943,12 @@ static int amd_pmu_v2_snapshot_branch_stack(struct perf_branch_entry *entries, u
|
||||
static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
static atomic64_t status_warned = ATOMIC64_INIT(0);
|
||||
u64 reserved, status, mask, new_bits, prev_bits;
|
||||
struct perf_sample_data data;
|
||||
struct hw_perf_event *hwc;
|
||||
struct perf_event *event;
|
||||
int handled = 0, idx;
|
||||
u64 reserved, status, mask;
|
||||
bool pmu_enabled;
|
||||
|
||||
/*
|
||||
@ -1012,7 +1013,12 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
|
||||
* the corresponding PMCs are expected to be inactive according to the
|
||||
* active_mask
|
||||
*/
|
||||
WARN_ON(status > 0);
|
||||
if (status > 0) {
|
||||
prev_bits = atomic64_fetch_or(status, &status_warned);
|
||||
// A new bit was set for the very first time.
|
||||
new_bits = status & ~prev_bits;
|
||||
WARN(new_bits, "New overflows for inactive PMCs: %llx\n", new_bits);
|
||||
}
|
||||
|
||||
/* Clear overflow and freeze bits */
|
||||
amd_pmu_ack_global_status(~status);
|
||||
|
@ -36,10 +36,12 @@
|
||||
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
|
||||
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */
|
||||
#else
|
||||
/* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */
|
||||
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */
|
||||
#endif
|
||||
|
||||
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
||||
@ -139,6 +141,8 @@
|
||||
|
||||
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
|
||||
|
||||
#define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW)
|
||||
|
||||
/*
|
||||
* Set of bits not changed in pte_modify. The pte's
|
||||
* protection key is treated like _PAGE_RW, for
|
||||
|
@ -1065,7 +1065,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
|
||||
|
||||
/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
||||
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
||||
|
@ -178,8 +178,6 @@ struct _cpuid4_info_regs {
|
||||
struct amd_northbridge *nb;
|
||||
};
|
||||
|
||||
static unsigned short num_cache_leaves;
|
||||
|
||||
/* AMD doesn't have CPUID4. Emulate it here to report the same
|
||||
information to the user. This makes some assumptions about the machine:
|
||||
L2 not shared, no SMT etc. that is currently true on AMD CPUs.
|
||||
@ -717,20 +715,23 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
|
||||
|
||||
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
} else if (c->extended_cpuid_level >= 0x80000006) {
|
||||
if (cpuid_edx(0x80000006) & 0xf000)
|
||||
num_cache_leaves = 4;
|
||||
ci->num_leaves = 4;
|
||||
else
|
||||
num_cache_leaves = 3;
|
||||
ci->num_leaves = 3;
|
||||
}
|
||||
}
|
||||
|
||||
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
}
|
||||
|
||||
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
@ -740,21 +741,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
||||
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
||||
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
if (c->cpuid_level > 3) {
|
||||
static int is_initialized;
|
||||
|
||||
if (is_initialized == 0) {
|
||||
/* Init num_cache_leaves from boot CPU */
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
is_initialized++;
|
||||
}
|
||||
/*
|
||||
* There should be at least one leaf. A non-zero value means
|
||||
* that the number of leaves has been initialized.
|
||||
*/
|
||||
if (!ci->num_leaves)
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
|
||||
/*
|
||||
* Whenever possible use cpuid(4), deterministic cache
|
||||
* parameters cpuid leaf to find the cache details
|
||||
*/
|
||||
for (i = 0; i < num_cache_leaves; i++) {
|
||||
for (i = 0; i < ci->num_leaves; i++) {
|
||||
struct _cpuid4_info_regs this_leaf = {};
|
||||
int retval;
|
||||
|
||||
@ -790,14 +791,14 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
|
||||
* trace cache
|
||||
*/
|
||||
if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
|
||||
if ((!ci->num_leaves || c->x86 == 15) && c->cpuid_level > 1) {
|
||||
/* supports eax=2 call */
|
||||
int j, n;
|
||||
unsigned int regs[4];
|
||||
unsigned char *dp = (unsigned char *)regs;
|
||||
int only_trace = 0;
|
||||
|
||||
if (num_cache_leaves != 0 && c->x86 == 15)
|
||||
if (ci->num_leaves && c->x86 == 15)
|
||||
only_trace = 1;
|
||||
|
||||
/* Number of times to iterate */
|
||||
@ -991,14 +992,12 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
|
||||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
|
||||
|
||||
if (!num_cache_leaves)
|
||||
/* There should be at least one leaf. */
|
||||
if (!ci->num_leaves)
|
||||
return -ENOENT;
|
||||
if (!this_cpu_ci)
|
||||
return -EINVAL;
|
||||
this_cpu_ci->num_levels = 3;
|
||||
this_cpu_ci->num_leaves = num_cache_leaves;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -586,7 +586,9 @@ static void init_intel(struct cpuinfo_x86 *c)
|
||||
c->x86_vfm == INTEL_WESTMERE_EX))
|
||||
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)
|
||||
if (boot_cpu_has(X86_FEATURE_MWAIT) &&
|
||||
(c->x86_vfm == INTEL_ATOM_GOLDMONT ||
|
||||
c->x86_vfm == INTEL_LUNARLAKE_M))
|
||||
set_cpu_bug(c, X86_BUG_MONITOR);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -428,8 +428,8 @@ void __init topology_apply_cmdline_limits_early(void)
|
||||
{
|
||||
unsigned int possible = nr_cpu_ids;
|
||||
|
||||
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' 'noapic' */
|
||||
if (!setup_max_cpus || ioapic_is_disabled || apic_is_disabled)
|
||||
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' */
|
||||
if (!setup_max_cpus || apic_is_disabled)
|
||||
possible = 1;
|
||||
|
||||
/* 'possible_cpus=N' */
|
||||
@ -443,7 +443,7 @@ void __init topology_apply_cmdline_limits_early(void)
|
||||
|
||||
static __init bool restrict_to_up(void)
|
||||
{
|
||||
if (!smp_found_config || ioapic_is_disabled)
|
||||
if (!smp_found_config)
|
||||
return true;
|
||||
/*
|
||||
* XEN PV is special as it does not advertise the local APIC
|
||||
|
@ -63,16 +63,6 @@ setfx:
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the value of PKRU register that was already pushed onto the signal frame.
|
||||
*/
|
||||
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
||||
return 0;
|
||||
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
||||
}
|
||||
|
||||
/*
|
||||
* Signal frame handlers.
|
||||
*/
|
||||
@ -168,14 +158,8 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
|
||||
|
||||
static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (use_xsave()) {
|
||||
err = xsave_to_user_sigframe(buf);
|
||||
if (!err)
|
||||
err = update_pkru_in_sigframe(buf, pkru);
|
||||
return err;
|
||||
}
|
||||
if (use_xsave())
|
||||
return xsave_to_user_sigframe(buf, pkru);
|
||||
|
||||
if (use_fxsr())
|
||||
return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
|
||||
|
@ -69,6 +69,28 @@ static inline u64 xfeatures_mask_independent(void)
|
||||
return fpu_kernel_cfg.independent_features;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the value of PKRU register that was already pushed onto the signal frame.
|
||||
*/
|
||||
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
|
||||
{
|
||||
u64 xstate_bv;
|
||||
int err;
|
||||
|
||||
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
||||
return 0;
|
||||
|
||||
/* Mark PKRU as in-use so that it is restored correctly. */
|
||||
xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
|
||||
|
||||
err = __put_user(xstate_bv, &buf->header.xfeatures);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Update PKRU value in the userspace xsave buffer. */
|
||||
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
||||
}
|
||||
|
||||
/* XSAVE/XRSTOR wrapper functions */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -256,7 +278,7 @@ static inline u64 xfeatures_need_sigframe_write(void)
|
||||
* The caller has to zero buf::header before calling this because XSAVE*
|
||||
* does not touch the reserved fields in the header.
|
||||
*/
|
||||
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
||||
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
/*
|
||||
* Include the features which are not xsaved/rstored by the kernel
|
||||
@ -281,6 +303,9 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
||||
XSTATE_OP(XSAVE, buf, lmask, hmask, err);
|
||||
clac();
|
||||
|
||||
if (!err)
|
||||
err = update_pkru_in_sigframe(buf, mask, pkru);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/unwind_hints.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
/*
|
||||
* Must be relocatable PIC code callable as a C function, in particular
|
||||
@ -242,6 +243,13 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
||||
movq CR0(%r8), %r8
|
||||
movq %rax, %cr3
|
||||
movq %r8, %cr0
|
||||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
/* Saved in save_processor_state. */
|
||||
movq $saved_context, %rax
|
||||
lgdt saved_context_gdt_desc(%rax)
|
||||
#endif
|
||||
|
||||
movq %rbp, %rax
|
||||
|
||||
popf
|
||||
|
@ -4580,6 +4580,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
|
||||
|
||||
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
{
|
||||
kvm_pfn_t orig_pfn;
|
||||
int r;
|
||||
|
||||
/* Dummy roots are used only for shadowing bad guest roots. */
|
||||
@ -4601,6 +4602,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
if (r != RET_PF_CONTINUE)
|
||||
return r;
|
||||
|
||||
orig_pfn = fault->pfn;
|
||||
|
||||
r = RET_PF_RETRY;
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
@ -4615,7 +4618,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
|
||||
out_unlock:
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(fault->pfn);
|
||||
kvm_release_pfn_clean(orig_pfn);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -4675,6 +4678,7 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
|
||||
static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_page_fault *fault)
|
||||
{
|
||||
kvm_pfn_t orig_pfn;
|
||||
int r;
|
||||
|
||||
if (page_fault_handle_page_track(vcpu, fault))
|
||||
@ -4692,6 +4696,8 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
|
||||
if (r != RET_PF_CONTINUE)
|
||||
return r;
|
||||
|
||||
orig_pfn = fault->pfn;
|
||||
|
||||
r = RET_PF_RETRY;
|
||||
read_lock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
@ -4702,7 +4708,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&vcpu->kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(fault->pfn);
|
||||
kvm_release_pfn_clean(orig_pfn);
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
|
@ -778,6 +778,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
{
|
||||
struct guest_walker walker;
|
||||
kvm_pfn_t orig_pfn;
|
||||
int r;
|
||||
|
||||
WARN_ON_ONCE(fault->is_tdp);
|
||||
@ -836,6 +837,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
walker.pte_access &= ~ACC_EXEC_MASK;
|
||||
}
|
||||
|
||||
orig_pfn = fault->pfn;
|
||||
|
||||
r = RET_PF_RETRY;
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
@ -849,7 +852,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
|
||||
out_unlock:
|
||||
write_unlock(&vcpu->kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(fault->pfn);
|
||||
kvm_release_pfn_clean(orig_pfn);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
|
||||
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -218,14 +218,14 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
||||
if (result)
|
||||
return result;
|
||||
if (pgtable_l5_enabled()) {
|
||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
|
||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
} else {
|
||||
/*
|
||||
* With p4d folded, pgd is equal to p4d.
|
||||
* The pgd entry has to point to the pud page table in this case.
|
||||
*/
|
||||
pud_t *pud = pud_offset(p4d, 0);
|
||||
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
|
||||
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
|
||||
* Top-level entries added to init_mm's usermode pgd after boot
|
||||
* will not be automatically propagated to other mms.
|
||||
*/
|
||||
if (!pgdp_maps_userspace(pgdp))
|
||||
if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
|
||||
return pgd;
|
||||
|
||||
/*
|
||||
|
@ -250,6 +250,125 @@ void __init pci_acpi_crs_quirks(void)
|
||||
pr_info("Please notify linux-pci@vger.kernel.org so future kernels can do this automatically\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if pdev is part of a PCIe switch that is directly below the
|
||||
* specified bridge.
|
||||
*/
|
||||
static bool pcie_switch_directly_under(struct pci_dev *bridge,
|
||||
struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *parent = pci_upstream_bridge(pdev);
|
||||
|
||||
/* If the device doesn't have a parent, it's not under anything */
|
||||
if (!parent)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the device has a PCIe type, check if it is below the
|
||||
* corresponding PCIe switch components (if applicable). Then check
|
||||
* if its upstream port is directly beneath the specified bridge.
|
||||
*/
|
||||
switch (pci_pcie_type(pdev)) {
|
||||
case PCI_EXP_TYPE_UPSTREAM:
|
||||
return parent == bridge;
|
||||
|
||||
case PCI_EXP_TYPE_DOWNSTREAM:
|
||||
if (pci_pcie_type(parent) != PCI_EXP_TYPE_UPSTREAM)
|
||||
return false;
|
||||
parent = pci_upstream_bridge(parent);
|
||||
return parent == bridge;
|
||||
|
||||
case PCI_EXP_TYPE_ENDPOINT:
|
||||
if (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM)
|
||||
return false;
|
||||
parent = pci_upstream_bridge(parent);
|
||||
if (!parent || pci_pcie_type(parent) != PCI_EXP_TYPE_UPSTREAM)
|
||||
return false;
|
||||
parent = pci_upstream_bridge(parent);
|
||||
return parent == bridge;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool pcie_has_usb4_host_interface(struct pci_dev *pdev)
|
||||
{
|
||||
struct fwnode_handle *fwnode;
|
||||
|
||||
/*
|
||||
* For USB4, the tunneled PCIe Root or Downstream Ports are marked
|
||||
* with the "usb4-host-interface" ACPI property, so we look for
|
||||
* that first. This should cover most cases.
|
||||
*/
|
||||
fwnode = fwnode_find_reference(dev_fwnode(&pdev->dev),
|
||||
"usb4-host-interface", 0);
|
||||
if (!IS_ERR(fwnode)) {
|
||||
fwnode_handle_put(fwnode);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any integrated Thunderbolt 3/4 PCIe Root Ports from Intel
|
||||
* before Alder Lake do not have the "usb4-host-interface"
|
||||
* property so we use their PCI IDs instead. All these are
|
||||
* tunneled. This list is not expected to grow.
|
||||
*/
|
||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
|
||||
switch (pdev->device) {
|
||||
/* Ice Lake Thunderbolt 3 PCIe Root Ports */
|
||||
case 0x8a1d:
|
||||
case 0x8a1f:
|
||||
case 0x8a21:
|
||||
case 0x8a23:
|
||||
/* Tiger Lake-LP Thunderbolt 4 PCIe Root Ports */
|
||||
case 0x9a23:
|
||||
case 0x9a25:
|
||||
case 0x9a27:
|
||||
case 0x9a29:
|
||||
/* Tiger Lake-H Thunderbolt 4 PCIe Root Ports */
|
||||
case 0x9a2b:
|
||||
case 0x9a2d:
|
||||
case 0x9a2f:
|
||||
case 0x9a31:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool arch_pci_dev_is_removable(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *parent, *root;
|
||||
|
||||
/* pdev without a parent or Root Port is never tunneled */
|
||||
parent = pci_upstream_bridge(pdev);
|
||||
if (!parent)
|
||||
return false;
|
||||
root = pcie_find_root_port(pdev);
|
||||
if (!root)
|
||||
return false;
|
||||
|
||||
/* Internal PCIe devices are not tunneled */
|
||||
if (!root->external_facing)
|
||||
return false;
|
||||
|
||||
/* Anything directly behind a "usb4-host-interface" is tunneled */
|
||||
if (pcie_has_usb4_host_interface(parent))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Check if this is a discrete Thunderbolt/USB4 controller that is
|
||||
* directly behind the non-USB4 PCIe Root Port marked as
|
||||
* "ExternalFacingPort". Those are not behind a PCIe tunnel.
|
||||
*/
|
||||
if (pcie_switch_directly_under(root, pdev))
|
||||
return false;
|
||||
|
||||
/* PCIe devices after the discrete chip are tunneled */
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_MMCONFIG
|
||||
static int check_segment(u16 seg, struct device *dev, char *estr)
|
||||
{
|
||||
|
@ -350,9 +350,15 @@ fail:
|
||||
|
||||
static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
|
||||
{
|
||||
if (!disk->conv_zones_bitmap)
|
||||
return false;
|
||||
return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
|
||||
unsigned long *bitmap;
|
||||
bool is_conv;
|
||||
|
||||
rcu_read_lock();
|
||||
bitmap = rcu_dereference(disk->conv_zones_bitmap);
|
||||
is_conv = bitmap && test_bit(disk_zone_no(disk, sector), bitmap);
|
||||
rcu_read_unlock();
|
||||
|
||||
return is_conv;
|
||||
}
|
||||
|
||||
static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
|
||||
@ -1455,6 +1461,24 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
|
||||
disk->zone_wplugs_hash_bits = 0;
|
||||
}
|
||||
|
||||
static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk,
|
||||
unsigned long *bitmap)
|
||||
{
|
||||
unsigned int nr_conv_zones = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&disk->zone_wplugs_lock, flags);
|
||||
if (bitmap)
|
||||
nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones);
|
||||
bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap,
|
||||
lockdep_is_held(&disk->zone_wplugs_lock));
|
||||
spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags);
|
||||
|
||||
kfree_rcu_mightsleep(bitmap);
|
||||
|
||||
return nr_conv_zones;
|
||||
}
|
||||
|
||||
void disk_free_zone_resources(struct gendisk *disk)
|
||||
{
|
||||
if (!disk->zone_wplugs_pool)
|
||||
@ -1478,8 +1502,7 @@ void disk_free_zone_resources(struct gendisk *disk)
|
||||
mempool_destroy(disk->zone_wplugs_pool);
|
||||
disk->zone_wplugs_pool = NULL;
|
||||
|
||||
bitmap_free(disk->conv_zones_bitmap);
|
||||
disk->conv_zones_bitmap = NULL;
|
||||
disk_set_conv_zones_bitmap(disk, NULL);
|
||||
disk->zone_capacity = 0;
|
||||
disk->last_zone_capacity = 0;
|
||||
disk->nr_zones = 0;
|
||||
@ -1538,7 +1561,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
|
||||
struct blk_revalidate_zone_args *args)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
unsigned int nr_seq_zones, nr_conv_zones = 0;
|
||||
unsigned int nr_seq_zones, nr_conv_zones;
|
||||
unsigned int pool_size;
|
||||
struct queue_limits lim;
|
||||
int ret;
|
||||
@ -1546,10 +1569,8 @@ static int disk_update_zone_resources(struct gendisk *disk,
|
||||
disk->nr_zones = args->nr_zones;
|
||||
disk->zone_capacity = args->zone_capacity;
|
||||
disk->last_zone_capacity = args->last_zone_capacity;
|
||||
swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
|
||||
if (disk->conv_zones_bitmap)
|
||||
nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
|
||||
disk->nr_zones);
|
||||
nr_conv_zones =
|
||||
disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap);
|
||||
if (nr_conv_zones >= disk->nr_zones) {
|
||||
pr_warn("%s: Invalid number of conventional zones %u / %u\n",
|
||||
disk->disk_name, nr_conv_zones, disk->nr_zones);
|
||||
@ -1829,8 +1850,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
|
||||
blk_mq_unfreeze_queue(q);
|
||||
}
|
||||
|
||||
kfree(args.conv_zones_bitmap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
|
||||
|
@ -36,29 +36,24 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
|
||||
const void *value, size_t vlen, unsigned int ndigits)
|
||||
{
|
||||
size_t bufsize = ndigits * sizeof(u64);
|
||||
ssize_t diff = vlen - bufsize;
|
||||
const char *d = value;
|
||||
|
||||
if (!value || !vlen)
|
||||
if (!value || !vlen || vlen > bufsize + 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* diff = 0: 'value' has exacly the right size
|
||||
* diff > 0: 'value' has too many bytes; one leading zero is allowed that
|
||||
* makes the value a positive integer; error on more
|
||||
* diff < 0: 'value' is missing leading zeros
|
||||
/*
|
||||
* vlen may be 1 byte larger than bufsize due to a leading zero byte
|
||||
* (necessary if the most significant bit of the integer is set).
|
||||
*/
|
||||
if (diff > 0) {
|
||||
if (vlen > bufsize) {
|
||||
/* skip over leading zeros that make 'value' a positive int */
|
||||
if (*d == 0) {
|
||||
vlen -= 1;
|
||||
diff--;
|
||||
d++;
|
||||
}
|
||||
if (diff)
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
if (-diff >= bufsize)
|
||||
return -EINVAL;
|
||||
|
||||
ecc_digits_from_bytes(d, vlen, dest, ndigits);
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
||||
|
||||
#define PCI_DEV_AIC080 0xa080
|
||||
#define PCI_DEV_AIC100 0xa100
|
||||
#define QAIC_NAME "qaic"
|
||||
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
|
||||
@ -365,7 +366,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
|
||||
return NULL;
|
||||
|
||||
qdev->dev_state = QAIC_OFFLINE;
|
||||
if (id->device == PCI_DEV_AIC100) {
|
||||
if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
|
||||
qdev->num_dbc = 16;
|
||||
qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
|
||||
if (!qdev->dbc)
|
||||
@ -607,6 +608,7 @@ static struct mhi_driver qaic_mhi_driver = {
|
||||
};
|
||||
|
||||
static const struct pci_device_id qaic_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
|
||||
{ }
|
||||
};
|
||||
|
@ -549,6 +549,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
/* Apple MacBook Air 7,2 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
/* Apple MacBook Air 9,1 */
|
||||
@ -565,6 +573,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_native,
|
||||
/* Apple MacBook Pro 11,2 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
|
||||
.callback = video_detect_force_native,
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
@ -295,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
/*
|
||||
* 2. Devices which also have the skip i2c/serdev quirks and which
|
||||
* need the x86-android-tablets module to properly work.
|
||||
* Sorted alphabetically.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
|
||||
{
|
||||
@ -308,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Acer Iconia One 8 A1-840 (non FHD version) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
|
||||
/* Above strings are too generic also match BIOS date */
|
||||
DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Asus ME176C tablet */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
|
||||
@ -317,6 +332,16 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Asus TF103C transformer 2-in-1 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga Book X90F/L */
|
||||
.matches = {
|
||||
@ -329,15 +354,6 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga Tablet 2 1050F/L */
|
||||
.matches = {
|
||||
@ -391,6 +407,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
|
||||
},
|
||||
{
|
||||
/* Vexia Edu Atla 10 tablet 9V version */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
|
||||
/* Above strings are too generic, also match on BIOS date */
|
||||
DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
|
||||
},
|
||||
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
|
||||
ACPI_QUIRK_UART1_SKIP |
|
||||
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
|
||||
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
|
||||
},
|
||||
{
|
||||
/* Whitelabel (sold as various brands) TM800A550L */
|
||||
.matches = {
|
||||
@ -411,6 +440,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
|
||||
{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
|
||||
{ "10EC5651", 0 }, /* RealTek ALC5651 audio codec */
|
||||
{ "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
|
||||
{ "INT33F5", 0 }, /* TI Dollar Cove PMIC */
|
||||
{ "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
|
||||
{ "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */
|
||||
{ "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
|
||||
@ -439,18 +469,35 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
|
||||
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
|
||||
const struct dmi_system_id *dmi_id;
|
||||
long quirks = 0;
|
||||
u64 uid;
|
||||
int ret;
|
||||
|
||||
ret = acpi_dev_uid_to_integer(adev, &uid);
|
||||
if (ret)
|
||||
return 0;
|
||||
u64 uid = 0;
|
||||
|
||||
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
|
||||
if (dmi_id)
|
||||
quirks = (unsigned long)dmi_id->driver_data;
|
||||
if (!dmi_id)
|
||||
return 0;
|
||||
|
||||
if (!dev_is_platform(controller_parent)) {
|
||||
quirks = (unsigned long)dmi_id->driver_data;
|
||||
|
||||
/* uid is left at 0 on errors and 0 is not a valid UART UID */
|
||||
acpi_dev_uid_to_integer(adev, &uid);
|
||||
|
||||
/* For PCI UARTs without an UID */
|
||||
if (!uid && dev_is_pci(controller_parent)) {
|
||||
struct pci_dev *pdev = to_pci_dev(controller_parent);
|
||||
|
||||
/*
|
||||
* Devfn values for PCI UARTs on Bay Trail SoCs, which are
|
||||
* the only devices where this fallback is necessary.
|
||||
*/
|
||||
if (pdev->devfn == PCI_DEVFN(0x1e, 3))
|
||||
uid = 1;
|
||||
else if (pdev->devfn == PCI_DEVFN(0x1e, 4))
|
||||
uid = 2;
|
||||
}
|
||||
|
||||
if (!uid)
|
||||
return 0;
|
||||
|
||||
if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) {
|
||||
/* PNP enumerated UARTs */
|
||||
if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
|
||||
*skip = true;
|
||||
@ -505,7 +552,7 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
|
||||
* Set skip to true so that the tty core creates a serdev ctrl device.
|
||||
* The backlight driver will manually create the serdev client device.
|
||||
*/
|
||||
if (acpi_dev_hid_match(adev, "DELL0501")) {
|
||||
if (adev && acpi_dev_hid_match(adev, "DELL0501")) {
|
||||
*skip = true;
|
||||
/*
|
||||
* Create a platform dev for dell-uart-backlight to bind to.
|
||||
|
@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
|
||||
{
|
||||
int nid;
|
||||
|
||||
/* Check the validity of the memblock/node mapping */
|
||||
if (!memblock_validate_numa_coverage(0))
|
||||
return -EINVAL;
|
||||
|
||||
/* Finally register nodes. */
|
||||
for_each_node_mask(nid, numa_nodes_parsed) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
|
@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
|
||||
{
|
||||
struct cacheinfo *llc;
|
||||
|
||||
if (!cache_leaves(cpu))
|
||||
if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
|
||||
return false;
|
||||
|
||||
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
|
||||
@ -463,11 +463,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline
|
||||
int allocate_cache_info(int cpu)
|
||||
static inline int allocate_cache_info(int cpu)
|
||||
{
|
||||
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
|
||||
sizeof(struct cacheinfo), GFP_ATOMIC);
|
||||
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
|
||||
if (!per_cpu_cacheinfo(cpu)) {
|
||||
cache_leaves(cpu) = 0;
|
||||
return -ENOMEM;
|
||||
@ -539,7 +537,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
|
||||
*/
|
||||
ci_cacheinfo(cpu)->early_ci_levels = false;
|
||||
|
||||
if (cache_leaves(cpu) <= early_leaves)
|
||||
/*
|
||||
* Some architectures (e.g., x86) do not use early initialization.
|
||||
* Allocate memory now in such case.
|
||||
*/
|
||||
if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
|
||||
return 0;
|
||||
|
||||
kfree(per_cpu_cacheinfo(cpu));
|
||||
|
@ -59,6 +59,7 @@ struct regmap {
|
||||
unsigned long raw_spinlock_flags;
|
||||
};
|
||||
};
|
||||
struct lock_class_key *lock_key;
|
||||
regmap_lock lock;
|
||||
regmap_unlock unlock;
|
||||
void *lock_arg; /* This is passed to lock/unlock functions */
|
||||
|
@ -355,6 +355,9 @@ static int regcache_maple_init(struct regmap *map)
|
||||
|
||||
mt_init(mt);
|
||||
|
||||
if (!mt_external_lock(mt) && map->lock_key)
|
||||
lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
|
||||
|
||||
if (!map->num_reg_defaults)
|
||||
return 0;
|
||||
|
||||
|
@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_attach_dev);
|
||||
|
||||
static int dev_get_regmap_match(struct device *dev, void *res, void *data);
|
||||
|
||||
static int regmap_detach_dev(struct device *dev, struct regmap *map)
|
||||
{
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
||||
return devres_release(dev, dev_get_regmap_release,
|
||||
dev_get_regmap_match, (void *)map->name);
|
||||
}
|
||||
|
||||
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
|
||||
const struct regmap_config *config)
|
||||
{
|
||||
@ -745,6 +756,7 @@ struct regmap *__regmap_init(struct device *dev,
|
||||
lock_key, lock_name);
|
||||
}
|
||||
map->lock_arg = map;
|
||||
map->lock_key = lock_key;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1444,6 +1456,7 @@ void regmap_exit(struct regmap *map)
|
||||
{
|
||||
struct regmap_async *async;
|
||||
|
||||
regmap_detach_dev(map->dev, map);
|
||||
regcache_exit(map);
|
||||
|
||||
regmap_debugfs_exit(map);
|
||||
|
@ -298,17 +298,30 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
|
||||
/*
|
||||
* Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
|
||||
* See the comment in writeback_store.
|
||||
*
|
||||
* Also do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
|
||||
* post-processing (recompress, writeback) happens to the
|
||||
* ZRAM_SAME slot.
|
||||
*
|
||||
* And ZRAM_WB slots simply cannot be ZRAM_IDLE.
|
||||
*/
|
||||
zram_slot_lock(zram, index);
|
||||
if (zram_allocated(zram, index) &&
|
||||
!zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
|
||||
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
|
||||
is_idle = !cutoff || ktime_after(cutoff,
|
||||
zram->table[index].ac_time);
|
||||
#endif
|
||||
if (is_idle)
|
||||
zram_set_flag(zram, index, ZRAM_IDLE);
|
||||
if (!zram_allocated(zram, index) ||
|
||||
zram_test_flag(zram, index, ZRAM_WB) ||
|
||||
zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
|
||||
zram_test_flag(zram, index, ZRAM_SAME)) {
|
||||
zram_slot_unlock(zram, index);
|
||||
continue;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
|
||||
is_idle = !cutoff ||
|
||||
ktime_after(cutoff, zram->table[index].ac_time);
|
||||
#endif
|
||||
if (is_idle)
|
||||
zram_set_flag(zram, index, ZRAM_IDLE);
|
||||
else
|
||||
zram_clear_flag(zram, index, ZRAM_IDLE);
|
||||
zram_slot_unlock(zram, index);
|
||||
}
|
||||
}
|
||||
|
@ -524,6 +524,8 @@ static const struct usb_device_id quirks_table[] = {
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
@ -563,6 +565,16 @@ static const struct usb_device_id quirks_table[] = {
|
||||
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Additional MediaTek MT7920 Bluetooth devices */
|
||||
{ USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Additional MediaTek MT7921 Bluetooth devices */
|
||||
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
@ -630,12 +642,24 @@ static const struct usb_device_id quirks_table[] = {
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Additional MediaTek MT7925 Bluetooth devices */
|
||||
{ USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe124), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe151), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
|
||||
@ -3897,6 +3921,8 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, &hdev->quirks);
|
||||
}
|
||||
|
||||
if (!reset)
|
||||
|
@ -508,6 +508,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
|
||||
u32 rate;
|
||||
int i;
|
||||
|
||||
clk_data->num = EN7523_NUM_CLOCKS;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
|
||||
const struct en_clk_desc *desc = &en7523_base_clks[i];
|
||||
u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
|
||||
@ -529,8 +531,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
|
||||
|
||||
hw = en7523_register_pcie_clk(dev, np_base);
|
||||
clk_data->hws[EN7523_CLK_PCIE] = hw;
|
||||
|
||||
clk_data->num = EN7523_NUM_CLOCKS;
|
||||
}
|
||||
|
||||
static int en7523_clk_hw_init(struct platform_device *pdev,
|
||||
|
@ -959,10 +959,10 @@ config SM_DISPCC_8450
|
||||
config SM_DISPCC_8550
|
||||
tristate "SM8550 Display Clock Controller"
|
||||
depends on ARM64 || COMPILE_TEST
|
||||
depends on SM_GCC_8550 || SM_GCC_8650
|
||||
depends on SM_GCC_8550 || SM_GCC_8650 || SAR_GCC_2130P
|
||||
help
|
||||
Support for the display clock controller on Qualcomm Technologies, Inc
|
||||
SM8550 or SM8650 devices.
|
||||
SAR2130P, SM8550 or SM8650 devices.
|
||||
Say Y if you want to support display devices and functionality such as
|
||||
splash screen.
|
||||
|
||||
|
@ -267,6 +267,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
|
||||
[PLL_OFF_OPMODE] = 0x30,
|
||||
[PLL_OFF_STATUS] = 0x3c,
|
||||
},
|
||||
[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
|
||||
[PLL_OFF_L_VAL] = 0x04,
|
||||
[PLL_OFF_ALPHA_VAL] = 0x08,
|
||||
[PLL_OFF_TEST_CTL] = 0x0c,
|
||||
[PLL_OFF_TEST_CTL_U] = 0x10,
|
||||
[PLL_OFF_USER_CTL] = 0x14,
|
||||
[PLL_OFF_CONFIG_CTL] = 0x18,
|
||||
[PLL_OFF_CONFIG_CTL_U] = 0x1c,
|
||||
[PLL_OFF_STATUS] = 0x20,
|
||||
},
|
||||
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
|
||||
|
||||
|
@ -32,6 +32,7 @@ enum {
|
||||
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
|
||||
CLK_ALPHA_PLL_TYPE_STROMER,
|
||||
CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
|
||||
CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
|
||||
CLK_ALPHA_PLL_TYPE_MAX,
|
||||
};
|
||||
|
||||
|
@ -198,6 +198,7 @@ extern const struct clk_ops clk_byte2_ops;
|
||||
extern const struct clk_ops clk_pixel_ops;
|
||||
extern const struct clk_ops clk_gfx3d_ops;
|
||||
extern const struct clk_ops clk_rcg2_shared_ops;
|
||||
extern const struct clk_ops clk_rcg2_shared_floor_ops;
|
||||
extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
|
||||
extern const struct clk_ops clk_dp_ops;
|
||||
|
||||
|
@ -1186,15 +1186,23 @@ clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
|
||||
return clk_rcg2_clear_force_enable(hw);
|
||||
}
|
||||
|
||||
static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate,
|
||||
enum freq_policy policy)
|
||||
{
|
||||
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
|
||||
const struct freq_tbl *f;
|
||||
|
||||
f = qcom_find_freq(rcg->freq_tbl, rate);
|
||||
if (!f)
|
||||
switch (policy) {
|
||||
case FLOOR:
|
||||
f = qcom_find_freq_floor(rcg->freq_tbl, rate);
|
||||
break;
|
||||
case CEIL:
|
||||
f = qcom_find_freq(rcg->freq_tbl, rate);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* In case clock is disabled, update the M, N and D registers, cache
|
||||
@ -1207,10 +1215,28 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
return clk_rcg2_shared_force_enable_clear(hw, f);
|
||||
}
|
||||
|
||||
static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
|
||||
}
|
||||
|
||||
static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
|
||||
unsigned long rate, unsigned long parent_rate, u8 index)
|
||||
{
|
||||
return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
|
||||
return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
|
||||
}
|
||||
|
||||
static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
|
||||
}
|
||||
|
||||
static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
|
||||
unsigned long rate, unsigned long parent_rate, u8 index)
|
||||
{
|
||||
return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
|
||||
}
|
||||
|
||||
static int clk_rcg2_shared_enable(struct clk_hw *hw)
|
||||
@ -1348,6 +1374,18 @@ const struct clk_ops clk_rcg2_shared_ops = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
|
||||
|
||||
const struct clk_ops clk_rcg2_shared_floor_ops = {
|
||||
.enable = clk_rcg2_shared_enable,
|
||||
.disable = clk_rcg2_shared_disable,
|
||||
.get_parent = clk_rcg2_shared_get_parent,
|
||||
.set_parent = clk_rcg2_shared_set_parent,
|
||||
.recalc_rate = clk_rcg2_shared_recalc_rate,
|
||||
.determine_rate = clk_rcg2_determine_floor_rate,
|
||||
.set_rate = clk_rcg2_shared_set_floor_rate,
|
||||
.set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
|
||||
|
||||
static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
|
||||
{
|
||||
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
|
||||
|
@ -389,6 +389,18 @@ DEFINE_CLK_RPMH_BCM(ipa, "IP0");
|
||||
DEFINE_CLK_RPMH_BCM(pka, "PKA0");
|
||||
DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0");
|
||||
|
||||
static struct clk_hw *sar2130p_rpmh_clocks[] = {
|
||||
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div1.hw,
|
||||
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div1_ao.hw,
|
||||
[RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
|
||||
[RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
|
||||
};
|
||||
|
||||
static const struct clk_rpmh_desc clk_rpmh_sar2130p = {
|
||||
.clks = sar2130p_rpmh_clocks,
|
||||
.num_clks = ARRAY_SIZE(sar2130p_rpmh_clocks),
|
||||
};
|
||||
|
||||
static struct clk_hw *sdm845_rpmh_clocks[] = {
|
||||
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
|
||||
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
|
||||
@ -880,6 +892,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
|
||||
static const struct of_device_id clk_rpmh_match_table[] = {
|
||||
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
|
||||
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
|
||||
{ .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p},
|
||||
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
|
||||
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
|
||||
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
|
||||
|
@ -75,7 +75,7 @@ static struct pll_vco lucid_ole_vco[] = {
|
||||
{ 249600000, 2000000000, 0 },
|
||||
};
|
||||
|
||||
static const struct alpha_pll_config disp_cc_pll0_config = {
|
||||
static struct alpha_pll_config disp_cc_pll0_config = {
|
||||
.l = 0xd,
|
||||
.alpha = 0x6492,
|
||||
.config_ctl_val = 0x20485699,
|
||||
@ -106,7 +106,7 @@ static struct clk_alpha_pll disp_cc_pll0 = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct alpha_pll_config disp_cc_pll1_config = {
|
||||
static struct alpha_pll_config disp_cc_pll1_config = {
|
||||
.l = 0x1f,
|
||||
.alpha = 0x4000,
|
||||
.config_ctl_val = 0x20485699,
|
||||
@ -594,6 +594,13 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sar2130p[] = {
|
||||
F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
|
||||
F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
|
||||
F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = {
|
||||
F(19200000, P_BI_TCXO, 1, 0, 0),
|
||||
F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
|
||||
@ -1750,6 +1757,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = {
|
||||
};
|
||||
|
||||
static const struct of_device_id disp_cc_sm8550_match_table[] = {
|
||||
{ .compatible = "qcom,sar2130p-dispcc" },
|
||||
{ .compatible = "qcom,sm8550-dispcc" },
|
||||
{ .compatible = "qcom,sm8650-dispcc" },
|
||||
{ }
|
||||
@ -1780,6 +1788,12 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
|
||||
disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650;
|
||||
disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] =
|
||||
&disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw;
|
||||
} else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sar2130p-dispcc")) {
|
||||
disp_cc_pll0_config.l = 0x1f;
|
||||
disp_cc_pll0_config.alpha = 0x4000;
|
||||
disp_cc_pll0_config.user_ctl_val = 0x1;
|
||||
disp_cc_pll1_config.user_ctl_val = 0x1;
|
||||
disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sar2130p;
|
||||
}
|
||||
|
||||
clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
|
||||
|
@ -129,6 +129,13 @@ static struct clk_branch tcsr_usb3_clkref_en = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct clk_regmap *tcsr_cc_sar2130p_clocks[] = {
|
||||
[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
|
||||
[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
|
||||
[TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
|
||||
[TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
|
||||
};
|
||||
|
||||
static struct clk_regmap *tcsr_cc_sm8550_clocks[] = {
|
||||
[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
|
||||
[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
|
||||
@ -146,6 +153,12 @@ static const struct regmap_config tcsr_cc_sm8550_regmap_config = {
|
||||
.fast_io = true,
|
||||
};
|
||||
|
||||
static const struct qcom_cc_desc tcsr_cc_sar2130p_desc = {
|
||||
.config = &tcsr_cc_sm8550_regmap_config,
|
||||
.clks = tcsr_cc_sar2130p_clocks,
|
||||
.num_clks = ARRAY_SIZE(tcsr_cc_sar2130p_clocks),
|
||||
};
|
||||
|
||||
static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
|
||||
.config = &tcsr_cc_sm8550_regmap_config,
|
||||
.clks = tcsr_cc_sm8550_clocks,
|
||||
@ -153,7 +166,8 @@ static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
|
||||
};
|
||||
|
||||
static const struct of_device_id tcsr_cc_sm8550_match_table[] = {
|
||||
{ .compatible = "qcom,sm8550-tcsr" },
|
||||
{ .compatible = "qcom,sar2130p-tcsr", .data = &tcsr_cc_sar2130p_desc },
|
||||
{ .compatible = "qcom,sm8550-tcsr", .data = &tcsr_cc_sm8550_desc },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tcsr_cc_sm8550_match_table);
|
||||
@ -162,7 +176,7 @@ static int tcsr_cc_sm8550_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct regmap *regmap;
|
||||
|
||||
regmap = qcom_cc_map(pdev, &tcsr_cc_sm8550_desc);
|
||||
regmap = qcom_cc_map(pdev, of_device_get_match_data(&pdev->dev));
|
||||
if (IS_ERR(regmap))
|
||||
return PTR_ERR(regmap);
|
||||
|
||||
|
@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
|
||||
static bool dma_fence_array_signaled(struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence_array *array = to_dma_fence_array(fence);
|
||||
int num_pending;
|
||||
unsigned int i;
|
||||
|
||||
if (atomic_read(&array->num_pending) > 0)
|
||||
/*
|
||||
* We need to read num_pending before checking the enable_signal bit
|
||||
* to avoid racing with the enable_signaling() implementation, which
|
||||
* might decrement the counter, and cause a partial check.
|
||||
* atomic_read_acquire() pairs with atomic_dec_and_test() in
|
||||
* dma_fence_array_enable_signaling()
|
||||
*
|
||||
* The !--num_pending check is here to account for the any_signaled case
|
||||
* if we race with enable_signaling(), that means the !num_pending check
|
||||
* in the is_signalling_enabled branch might be outdated (num_pending
|
||||
* might have been decremented), but that's fine. The user will get the
|
||||
* right value when testing again later.
|
||||
*/
|
||||
num_pending = atomic_read_acquire(&array->num_pending);
|
||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
|
||||
if (num_pending <= 0)
|
||||
goto signal;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < array->num_fences; ++i) {
|
||||
if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
|
||||
goto signal;
|
||||
}
|
||||
return false;
|
||||
|
||||
signal:
|
||||
dma_fence_array_clear_pending_error(array);
|
||||
return true;
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/dma-fence-chain.h>
|
||||
#include <linux/dma-fence-unwrap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
/* Internal helper to start new array iteration, don't use directly */
|
||||
static struct dma_fence *
|
||||
@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
|
||||
|
||||
|
||||
static int fence_cmp(const void *_a, const void *_b)
|
||||
{
|
||||
struct dma_fence *a = *(struct dma_fence **)_a;
|
||||
struct dma_fence *b = *(struct dma_fence **)_b;
|
||||
|
||||
if (a->context < b->context)
|
||||
return -1;
|
||||
else if (a->context > b->context)
|
||||
return 1;
|
||||
|
||||
if (dma_fence_is_later(b, a))
|
||||
return 1;
|
||||
else if (dma_fence_is_later(a, b))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Implementation for the dma_fence_merge() marco, don't use directly */
|
||||
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
struct dma_fence **fences,
|
||||
@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
struct dma_fence_array *result;
|
||||
struct dma_fence *tmp, **array;
|
||||
ktime_t timestamp;
|
||||
unsigned int i;
|
||||
size_t count;
|
||||
int i, j, count;
|
||||
|
||||
count = 0;
|
||||
timestamp = ns_to_ktime(0);
|
||||
@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
if (!array)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* This trashes the input fence array and uses it as position for the
|
||||
* following merge loop. This works because the dma_fence_merge()
|
||||
* wrapper macro is creating this temporary array on the stack together
|
||||
* with the iterators.
|
||||
*/
|
||||
for (i = 0; i < num_fences; ++i)
|
||||
fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
|
||||
|
||||
count = 0;
|
||||
do {
|
||||
unsigned int sel;
|
||||
|
||||
restart:
|
||||
tmp = NULL;
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
struct dma_fence *next;
|
||||
|
||||
while (fences[i] && dma_fence_is_signaled(fences[i]))
|
||||
fences[i] = dma_fence_unwrap_next(&iter[i]);
|
||||
|
||||
next = fences[i];
|
||||
if (!next)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We can't guarantee that inpute fences are ordered by
|
||||
* context, but it is still quite likely when this
|
||||
* function is used multiple times. So attempt to order
|
||||
* the fences by context as we pass over them and merge
|
||||
* fences with the same context.
|
||||
*/
|
||||
if (!tmp || tmp->context > next->context) {
|
||||
tmp = next;
|
||||
sel = i;
|
||||
|
||||
} else if (tmp->context < next->context) {
|
||||
continue;
|
||||
|
||||
} else if (dma_fence_is_later(tmp, next)) {
|
||||
fences[i] = dma_fence_unwrap_next(&iter[i]);
|
||||
goto restart;
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
array[count++] = dma_fence_get(tmp);
|
||||
} else {
|
||||
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
|
||||
goto restart;
|
||||
ktime_t t = dma_fence_timestamp(tmp);
|
||||
|
||||
if (ktime_after(t, timestamp))
|
||||
timestamp = t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tmp) {
|
||||
array[count++] = dma_fence_get(tmp);
|
||||
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
|
||||
if (count == 0 || count == 1)
|
||||
goto return_fastpath;
|
||||
|
||||
sort(array, count, sizeof(*array), fence_cmp, NULL);
|
||||
|
||||
/*
|
||||
* Only keep the most recent fence for each context.
|
||||
*/
|
||||
j = 0;
|
||||
for (i = 1; i < count; i++) {
|
||||
if (array[i]->context == array[j]->context)
|
||||
dma_fence_put(array[i]);
|
||||
else
|
||||
array[++j] = array[i];
|
||||
}
|
||||
count = ++j;
|
||||
|
||||
if (count > 1) {
|
||||
result = dma_fence_array_create(count, array,
|
||||
dma_fence_context_alloc(1),
|
||||
1, false);
|
||||
if (!result) {
|
||||
for (i = 0; i < count; i++)
|
||||
dma_fence_put(array[i]);
|
||||
tmp = NULL;
|
||||
goto return_tmp;
|
||||
}
|
||||
} while (tmp);
|
||||
|
||||
if (count == 0) {
|
||||
tmp = dma_fence_allocate_private_stub(ktime_get());
|
||||
goto return_tmp;
|
||||
return &result->base;
|
||||
}
|
||||
|
||||
if (count == 1) {
|
||||
return_fastpath:
|
||||
if (count == 0)
|
||||
tmp = dma_fence_allocate_private_stub(timestamp);
|
||||
else
|
||||
tmp = array[0];
|
||||
goto return_tmp;
|
||||
}
|
||||
|
||||
result = dma_fence_array_create(count, array,
|
||||
dma_fence_context_alloc(1),
|
||||
1, false);
|
||||
if (!result) {
|
||||
tmp = NULL;
|
||||
goto return_tmp;
|
||||
}
|
||||
return &result->base;
|
||||
|
||||
return_tmp:
|
||||
kfree(array);
|
||||
|
@ -1742,9 +1742,11 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
|
||||
+ any potential issues with this, only allow validated machines for now.
|
||||
*/
|
||||
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
|
||||
{ .compatible = "dell,xps13-9345" },
|
||||
{ .compatible = "lenovo,flex-5g" },
|
||||
{ .compatible = "lenovo,thinkpad-t14s" },
|
||||
{ .compatible = "lenovo,thinkpad-x13s", },
|
||||
{ .compatible = "lenovo,yoga-slim7x" },
|
||||
{ .compatible = "microsoft,romulus13", },
|
||||
{ .compatible = "microsoft,romulus15", },
|
||||
{ .compatible = "qcom,sc8180x-primus" },
|
||||
|
@ -328,6 +328,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
|
||||
static int grgpio_probe(struct platform_device *ofdev)
|
||||
{
|
||||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct device *dev = &ofdev->dev;
|
||||
void __iomem *regs;
|
||||
struct gpio_chip *gc;
|
||||
struct grgpio_priv *priv;
|
||||
@ -337,7 +338,7 @@ static int grgpio_probe(struct platform_device *ofdev)
|
||||
int size;
|
||||
int i;
|
||||
|
||||
priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -346,28 +347,31 @@ static int grgpio_probe(struct platform_device *ofdev)
|
||||
return PTR_ERR(regs);
|
||||
|
||||
gc = &priv->gc;
|
||||
err = bgpio_init(gc, &ofdev->dev, 4, regs + GRGPIO_DATA,
|
||||
err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
|
||||
regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
|
||||
BGPIOF_BIG_ENDIAN_BYTE_ORDER);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "bgpio_init() failed\n");
|
||||
dev_err(dev, "bgpio_init() failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
priv->regs = regs;
|
||||
priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
|
||||
priv->dev = &ofdev->dev;
|
||||
priv->dev = dev;
|
||||
|
||||
gc->owner = THIS_MODULE;
|
||||
gc->to_irq = grgpio_to_irq;
|
||||
gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
|
||||
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
|
||||
if (!gc->label)
|
||||
return -ENOMEM;
|
||||
|
||||
gc->base = -1;
|
||||
|
||||
err = of_property_read_u32(np, "nbits", &prop);
|
||||
if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
|
||||
gc->ngpio = GRGPIO_MAX_NGPIO;
|
||||
dev_dbg(&ofdev->dev,
|
||||
"No or invalid nbits property: assume %d\n", gc->ngpio);
|
||||
dev_dbg(dev, "No or invalid nbits property: assume %d\n",
|
||||
gc->ngpio);
|
||||
} else {
|
||||
gc->ngpio = prop;
|
||||
}
|
||||
@ -379,7 +383,7 @@ static int grgpio_probe(struct platform_device *ofdev)
|
||||
irqmap = (s32 *)of_get_property(np, "irqmap", &size);
|
||||
if (irqmap) {
|
||||
if (size < gc->ngpio) {
|
||||
dev_err(&ofdev->dev,
|
||||
dev_err(dev,
|
||||
"irqmap shorter than ngpio (%d < %d)\n",
|
||||
size, gc->ngpio);
|
||||
return -EINVAL;
|
||||
@ -389,7 +393,7 @@ static int grgpio_probe(struct platform_device *ofdev)
|
||||
&grgpio_irq_domain_ops,
|
||||
priv);
|
||||
if (!priv->domain) {
|
||||
dev_err(&ofdev->dev, "Could not add irq domain\n");
|
||||
dev_err(dev, "Could not add irq domain\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -419,13 +423,13 @@ static int grgpio_probe(struct platform_device *ofdev)
|
||||
|
||||
err = gpiochip_add_data(gc, priv);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "Could not add gpiochip\n");
|
||||
dev_err(dev, "Could not add gpiochip\n");
|
||||
if (priv->domain)
|
||||
irq_domain_remove(priv->domain);
|
||||
return err;
|
||||
}
|
||||
|
||||
dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
|
||||
dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
|
||||
priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
|
||||
|
||||
return 0;
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/idr.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/lockdep.h>
|
||||
@ -713,6 +714,45 @@ bool gpiochip_line_is_valid(const struct gpio_chip *gc,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
|
||||
|
||||
static void gpiod_free_irqs(struct gpio_desc *desc)
|
||||
{
|
||||
int irq = gpiod_to_irq(desc);
|
||||
struct irq_desc *irqd = irq_to_desc(irq);
|
||||
void *cookie;
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Make sure the action doesn't go away while we're
|
||||
* dereferencing it. Retrieve and store the cookie value.
|
||||
* If the irq is freed after we release the lock, that's
|
||||
* alright - the underlying maple tree lookup will return NULL
|
||||
* and nothing will happen in free_irq().
|
||||
*/
|
||||
scoped_guard(mutex, &irqd->request_mutex) {
|
||||
if (!irq_desc_has_action(irqd))
|
||||
return;
|
||||
|
||||
cookie = irqd->action->dev_id;
|
||||
}
|
||||
|
||||
free_irq(irq, cookie);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The chip is going away but there may be users who had requested interrupts
|
||||
* on its GPIO lines who have no idea about its removal and have no way of
|
||||
* being notified about it. We need to free any interrupts still in use here or
|
||||
* we'll leak memory and resources (like procfs files).
|
||||
*/
|
||||
static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
|
||||
{
|
||||
struct gpio_desc *desc;
|
||||
|
||||
for_each_gpio_desc_with_flag(gc, desc, FLAG_USED_AS_IRQ)
|
||||
gpiod_free_irqs(desc);
|
||||
}
|
||||
|
||||
static void gpiodev_release(struct device *dev)
|
||||
{
|
||||
struct gpio_device *gdev = to_gpio_device(dev);
|
||||
@ -1125,6 +1165,7 @@ void gpiochip_remove(struct gpio_chip *gc)
|
||||
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
|
||||
gpiochip_sysfs_unregister(gdev);
|
||||
gpiochip_free_hogs(gc);
|
||||
gpiochip_free_remaining_irqs(gc);
|
||||
|
||||
scoped_guard(mutex, &gpio_devices_lock)
|
||||
list_del_rcu(&gdev->list);
|
||||
|
@ -800,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
kfree(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3666,7 +3666,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* First resume function for hardware IPs. The list of all the hardware
|
||||
* Second resume function for hardware IPs. The list of all the hardware
|
||||
* IPs that make up the asic is walked and the resume callbacks are run for
|
||||
* all blocks except COMMON, GMC, and IH. resume puts the hardware into a
|
||||
* functional state after a suspend and updates the software state as
|
||||
@ -3684,6 +3684,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||
@ -3698,6 +3699,36 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Third resume function for hardware IPs. The list of all the hardware
|
||||
* IPs that make up the asic is walked and the resume callbacks are run for
|
||||
* all DCE. resume puts the hardware into a functional state after a suspend
|
||||
* and updates the software state as necessary. This function is also used
|
||||
* for restoring the GPU after a GPU reset.
|
||||
*
|
||||
* Returns 0 on success, negative error code on failure.
|
||||
*/
|
||||
static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
|
||||
r = adev->ip_blocks[i].version->funcs->resume(adev);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_ip_resume - run resume for hardware IPs
|
||||
*
|
||||
@ -3727,6 +3758,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
|
||||
if (adev->mman.buffer_funcs_ring->sched.ready)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
r = amdgpu_device_ip_resume_phase3(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -4809,7 +4847,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
||||
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
|
||||
goto exit;
|
||||
}
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
if (!adev->in_s0ix) {
|
||||
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
|
||||
@ -5431,6 +5468,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
|
||||
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
|
||||
|
||||
r = amdgpu_device_ip_resume_phase3(tmp_adev);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
if (vram_lost)
|
||||
amdgpu_device_fill_reset_magic(tmp_adev);
|
||||
|
||||
@ -6344,6 +6385,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
int r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return false;
|
||||
|
||||
r = pci_save_state(pdev);
|
||||
if (!r) {
|
||||
kfree(adev->pci_state);
|
||||
|
@ -812,7 +812,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
|
||||
/* Map SG to device */
|
||||
r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
|
||||
if (r)
|
||||
goto release_sg;
|
||||
goto release_sg_table;
|
||||
|
||||
/* convert SG to linear array of pages and dma addresses */
|
||||
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
|
||||
@ -820,6 +820,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
|
||||
|
||||
return 0;
|
||||
|
||||
release_sg_table:
|
||||
sg_free_table(ttm->sg);
|
||||
release_sg:
|
||||
kfree(ttm->sg);
|
||||
ttm->sg = NULL;
|
||||
@ -1849,6 +1851,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
|
||||
mutex_init(&adev->mman.gtt_window_lock);
|
||||
|
||||
dma_set_max_seg_size(adev->dev, UINT_MAX);
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
|
||||
adev_to_drm(adev)->anon_inode->i_mapping,
|
||||
|
@ -2223,6 +2223,18 @@ static int gfx_v9_0_sw_init(void *handle)
|
||||
}
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(9, 4, 2):
|
||||
adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
|
||||
adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
|
||||
if (adev->gfx.mec_fw_version >= 88) {
|
||||
adev->gfx.enable_cleaner_shader = true;
|
||||
r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
|
||||
if (r) {
|
||||
adev->gfx.enable_cleaner_shader = false;
|
||||
dev_err(adev->dev, "Failed to initialize cleaner shader\n");
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
adev->gfx.enable_cleaner_shader = false;
|
||||
break;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
* Copyright 2024 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@ -24,3 +24,45 @@
|
||||
static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
|
||||
/* Add the cleaner shader code here */
|
||||
};
|
||||
|
||||
/* Define the cleaner shader gfx_9_4_2 */
|
||||
static const u32 gfx_9_4_2_cleaner_shader_hex[] = {
|
||||
0xbf068100, 0xbf84003b,
|
||||
0xbf8a0000, 0xb07c0000,
|
||||
0xbe8200ff, 0x00000078,
|
||||
0xbf110802, 0x7e000280,
|
||||
0x7e020280, 0x7e040280,
|
||||
0x7e060280, 0x7e080280,
|
||||
0x7e0a0280, 0x7e0c0280,
|
||||
0x7e0e0280, 0x80828802,
|
||||
0xbe803202, 0xbf84fff5,
|
||||
0xbf9c0000, 0xbe8200ff,
|
||||
0x80000000, 0x86020102,
|
||||
0xbf840011, 0xbefe00c1,
|
||||
0xbeff00c1, 0xd28c0001,
|
||||
0x0001007f, 0xd28d0001,
|
||||
0x0002027e, 0x10020288,
|
||||
0xbe8200bf, 0xbefc00c1,
|
||||
0xd89c2000, 0x00020201,
|
||||
0xd89c6040, 0x00040401,
|
||||
0x320202ff, 0x00000400,
|
||||
0x80828102, 0xbf84fff8,
|
||||
0xbefc00ff, 0x0000005c,
|
||||
0xbf800000, 0xbe802c80,
|
||||
0xbe812c80, 0xbe822c80,
|
||||
0xbe832c80, 0x80fc847c,
|
||||
0xbf84fffa, 0xbee60080,
|
||||
0xbee70080, 0xbeea0180,
|
||||
0xbeec0180, 0xbeee0180,
|
||||
0xbef00180, 0xbef20180,
|
||||
0xbef40180, 0xbef60180,
|
||||
0xbef80180, 0xbefa0180,
|
||||
0xbf810000, 0xbf8d0001,
|
||||
0xbefc00ff, 0x0000005c,
|
||||
0xbf800000, 0xbe802c80,
|
||||
0xbe812c80, 0xbe822c80,
|
||||
0xbe832c80, 0x80fc847c,
|
||||
0xbf84fffa, 0xbee60080,
|
||||
0xbee70080, 0xbeea01ff,
|
||||
0x000000ee, 0xbf810000,
|
||||
};
|
||||
|
153
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
Normal file
153
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
Normal file
@ -0,0 +1,153 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2024 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
|
||||
//To turn this shader program on for complitaion change this to main and lower shader main to main_1
|
||||
|
||||
// MI200 : Clear SGPRs, VGPRs and LDS
|
||||
// Uses two kernels launched separately:
|
||||
// 1. Clean VGPRs, LDS, and lower SGPRs
|
||||
// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
|
||||
// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
|
||||
// Waves in the workgroup share the 64KB of LDS
|
||||
// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
|
||||
// Each wave clears 128 VGPRs, so all 512 in the SIMD
|
||||
// The first wave of the workgroup clears its 64KB of LDS
|
||||
// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
|
||||
// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
|
||||
// 2. Clean remaining SGPRs
|
||||
// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
|
||||
// Waves are allocating 96 SGPRs
|
||||
// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
|
||||
// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
|
||||
// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
|
||||
// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
|
||||
// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
|
||||
|
||||
shader main
|
||||
asic(MI200)
|
||||
type(CS)
|
||||
wave_size(64)
|
||||
// Note: original source code from SQ team
|
||||
|
||||
// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks)
|
||||
|
||||
s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
|
||||
s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
|
||||
S_BARRIER
|
||||
|
||||
s_movk_i32 m0, 0x0000
|
||||
s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance)
|
||||
//
|
||||
// CLEAR VGPRs
|
||||
//
|
||||
s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing
|
||||
label_0005:
|
||||
v_mov_b32 v0, 0
|
||||
v_mov_b32 v1, 0
|
||||
v_mov_b32 v2, 0
|
||||
v_mov_b32 v3, 0
|
||||
v_mov_b32 v4, 0
|
||||
v_mov_b32 v5, 0
|
||||
v_mov_b32 v6, 0
|
||||
v_mov_b32 v7, 0
|
||||
s_sub_u32 s2, s2, 8
|
||||
s_set_gpr_idx_idx s2
|
||||
s_cbranch_scc0 label_0005
|
||||
s_set_gpr_idx_off
|
||||
|
||||
//
|
||||
//
|
||||
|
||||
s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
|
||||
s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
|
||||
s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup
|
||||
// CLEAR LDS
|
||||
//
|
||||
s_mov_b32 exec_lo, 0xffffffff
|
||||
s_mov_b32 exec_hi, 0xffffffff
|
||||
v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
|
||||
v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
|
||||
v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
|
||||
s_mov_b32 s2, 0x00000003f // 64 loop iterations
|
||||
s_mov_b32 m0, 0xffffffff
|
||||
// Clear all of LDS space
|
||||
// Each FirstWave of WorkGroup clears 64kbyte block
|
||||
|
||||
label_001F:
|
||||
ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
|
||||
ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
|
||||
v_add_co_u32 v1, vcc, 0x00000400, v1
|
||||
s_sub_u32 s2, s2, 1
|
||||
s_cbranch_scc0 label_001F
|
||||
//
|
||||
// CLEAR SGPRs
|
||||
//
|
||||
label_clean_sgpr_1:
|
||||
s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
|
||||
s_nop 0
|
||||
label_sgpr_loop:
|
||||
s_movreld_b32 s0, 0
|
||||
s_movreld_b32 s1, 0
|
||||
s_movreld_b32 s2, 0
|
||||
s_movreld_b32 s3, 0
|
||||
s_sub_u32 m0, m0, 4
|
||||
s_cbranch_scc0 label_sgpr_loop
|
||||
|
||||
//clear vcc, flat scratch
|
||||
s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
|
||||
s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
|
||||
s_mov_b64 vcc, 0 //clear vcc
|
||||
s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
|
||||
s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
|
||||
s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
|
||||
s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
|
||||
s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
|
||||
s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
|
||||
s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
|
||||
s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
|
||||
s_endpgm
|
||||
|
||||
label_0023:
|
||||
|
||||
s_sethalt 1
|
||||
|
||||
s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
|
||||
s_nop 0
|
||||
label_sgpr_loop1:
|
||||
|
||||
s_movreld_b32 s0, 0
|
||||
s_movreld_b32 s1, 0
|
||||
s_movreld_b32 s2, 0
|
||||
s_movreld_b32 s3, 0
|
||||
s_sub_u32 m0, m0, 4
|
||||
s_cbranch_scc0 label_sgpr_loop1
|
||||
|
||||
//clear vcc, flat scratch
|
||||
s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
|
||||
s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
|
||||
s_mov_b64 vcc, 0xee //clear vcc
|
||||
|
||||
s_endpgm
|
||||
end
|
||||
|
@ -40,10 +40,12 @@
|
||||
static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
|
||||
@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
|
||||
amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5))
|
||||
return;
|
||||
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
|
||||
else
|
||||
RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
|
||||
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
|
||||
|
@ -31,10 +31,12 @@
|
||||
static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
|
||||
@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
|
||||
RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
|
||||
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
|
||||
|
@ -31,13 +31,15 @@
|
||||
static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
|
||||
0);
|
||||
else
|
||||
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring,
|
||||
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
|
||||
|
@ -34,10 +34,12 @@
|
||||
static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
|
||||
|
@ -31,10 +31,12 @@
|
||||
static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
|
||||
|
@ -116,6 +116,20 @@ static int vcn_v4_0_3_early_init(void *handle)
|
||||
return amdgpu_vcn_early_init(adev);
|
||||
}
|
||||
|
||||
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
|
||||
{
|
||||
struct amdgpu_vcn4_fw_shared *fw_shared;
|
||||
|
||||
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
|
||||
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
|
||||
fw_shared->sq.is_enabled = 1;
|
||||
|
||||
if (amdgpu_vcnfw_log)
|
||||
amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v4_0_3_sw_init - sw init for VCN block
|
||||
*
|
||||
@ -148,8 +162,6 @@ static int vcn_v4_0_3_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
|
||||
|
||||
vcn_inst = GET_INST(VCN, i);
|
||||
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
@ -172,12 +184,7 @@ static int vcn_v4_0_3_sw_init(void *handle)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
|
||||
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
|
||||
fw_shared->sq.is_enabled = true;
|
||||
|
||||
if (amdgpu_vcnfw_log)
|
||||
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
|
||||
vcn_v4_0_3_fw_shared_init(adev, i);
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
@ -273,6 +280,8 @@ static int vcn_v4_0_3_hw_init(void *handle)
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
struct amdgpu_vcn4_fw_shared *fw_shared;
|
||||
|
||||
vcn_inst = GET_INST(VCN, i);
|
||||
ring = &adev->vcn.inst[i].ring_enc[0];
|
||||
|
||||
@ -296,6 +305,11 @@ static int vcn_v4_0_3_hw_init(void *handle)
|
||||
regVCN_RB1_DB_CTRL);
|
||||
}
|
||||
|
||||
/* Re-init fw_shared when RAS fatal error occurred */
|
||||
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
|
||||
if (!fw_shared->sq.is_enabled)
|
||||
vcn_v4_0_3_fw_shared_init(adev, i);
|
||||
|
||||
r = amdgpu_ring_test_helper(ring);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
|
||||
|
||||
if (enable) {
|
||||
/* Unset the CLEAR_OVERFLOW bit to make sure the next step
|
||||
* is switching the bit from 0 to 1
|
||||
*/
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
|
||||
}
|
||||
|
||||
/* Clear RB_OVERFLOW bit */
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
|
||||
return -ETIMEDOUT;
|
||||
} else {
|
||||
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
|
||||
}
|
||||
|
||||
/* Unset the CLEAR_OVERFLOW bit immediately so new overflows
|
||||
* can be detected.
|
||||
*/
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
|
||||
}
|
||||
|
||||
/* enable_intr field is only valid in ring0 */
|
||||
if (ih == &adev->irq.ih)
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
|
||||
|
@ -1509,6 +1509,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
||||
if (adev->gfx.config.gc_tcp_size_per_cu) {
|
||||
pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
|
||||
pcache_info[i].cache_level = 1;
|
||||
/* Cacheline size not available in IP discovery for gc943,gc944 */
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
@ -1520,6 +1522,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
||||
pcache_info[i].cache_size =
|
||||
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
|
||||
pcache_info[i].cache_level = 1;
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_INST_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
@ -1530,6 +1533,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
||||
if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
|
||||
pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
|
||||
pcache_info[i].cache_level = 1;
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
@ -1540,6 +1544,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
||||
if (adev->gfx.config.gc_tcc_size) {
|
||||
pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
|
||||
pcache_info[i].cache_level = 2;
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
@ -1550,6 +1555,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
||||
if (adev->gmc.mall_size) {
|
||||
pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
|
||||
pcache_info[i].cache_level = 3;
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
|
@ -235,6 +235,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
|
||||
*/
|
||||
kfd->device_info.needs_pci_atomics = true;
|
||||
kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
|
||||
} else if (gc_version < IP_VERSION(13, 0, 0)) {
|
||||
kfd->device_info.needs_pci_atomics = true;
|
||||
kfd->device_info.no_atomic_fw_version = 2090;
|
||||
} else {
|
||||
kfd->device_info.needs_pci_atomics = true;
|
||||
}
|
||||
|
@ -1910,7 +1910,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
else
|
||||
init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
|
||||
} else {
|
||||
init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
|
||||
if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
|
||||
init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
|
||||
else
|
||||
init_data.flags.gpu_vm_support =
|
||||
(amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
|
||||
}
|
||||
|
||||
adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
|
||||
@ -7337,10 +7341,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
|
||||
int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
|
||||
enum dc_status dc_result = DC_OK;
|
||||
uint8_t bpc_limit = 6;
|
||||
|
||||
if (!dm_state)
|
||||
return NULL;
|
||||
|
||||
if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
|
||||
aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
|
||||
bpc_limit = 8;
|
||||
|
||||
do {
|
||||
stream = create_stream_for_sink(connector, drm_mode,
|
||||
dm_state, old_stream,
|
||||
@ -7361,11 +7370,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
|
||||
|
||||
if (dc_result != DC_OK) {
|
||||
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
|
||||
DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
|
||||
drm_mode->hdisplay,
|
||||
drm_mode->vdisplay,
|
||||
drm_mode->clock,
|
||||
dc_result,
|
||||
dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
|
||||
dc_color_depth_to_str(stream->timing.display_color_depth),
|
||||
dc_status_to_str(dc_result));
|
||||
|
||||
dc_stream_release(stream);
|
||||
@ -7373,10 +7383,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
requested_bpc -= 2; /* lower bpc to retry validation */
|
||||
}
|
||||
|
||||
} while (stream == NULL && requested_bpc >= 6);
|
||||
} while (stream == NULL && requested_bpc >= bpc_limit);
|
||||
|
||||
if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
|
||||
DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
|
||||
if ((dc_result == DC_FAIL_ENC_VALIDATE ||
|
||||
dc_result == DC_EXCEED_DONGLE_CAP) &&
|
||||
!aconnector->force_yuv420_output) {
|
||||
DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
|
||||
__func__, __LINE__);
|
||||
|
||||
aconnector->force_yuv420_output = true;
|
||||
stream = create_validate_stream_for_sink(aconnector, drm_mode,
|
||||
|
@ -132,6 +132,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
|
||||
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
|
||||
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct dccg *dccg = clk_mgr_internal->dccg;
|
||||
struct pipe_ctx *pipe = safe_to_lower
|
||||
? &context->res_ctx.pipe_ctx[i]
|
||||
: &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
@ -148,8 +150,13 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
|
||||
new_pipe->stream_res.stream_enc &&
|
||||
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
|
||||
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
|
||||
!pipe->stream->link_enc) && !stream_changed_otg_dig_on) {
|
||||
bool has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
|
||||
|
||||
if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
|
||||
(pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
|
||||
!pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
|
||||
|
||||
|
||||
/* This w/a should not trigger when we have a dig active */
|
||||
if (disable) {
|
||||
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
|
||||
@ -257,11 +264,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 };
|
||||
int i;
|
||||
|
||||
for (i = 0; i < context->stream_count; ++i) {
|
||||
const struct dc_stream_state *stream = context->streams[i];
|
||||
const struct dc_link *link = stream->link;
|
||||
uint8_t lowest_dpia_index = 0, hr_index = 0;
|
||||
uint8_t lowest_dpia_index = 0;
|
||||
unsigned int hr_index = 0;
|
||||
|
||||
if (!link)
|
||||
continue;
|
||||
@ -271,6 +278,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
|
||||
continue;
|
||||
|
||||
hr_index = (link->link_index - lowest_dpia_index) / 2;
|
||||
if (hr_index >= MAX_HOST_ROUTERS_NUM)
|
||||
continue;
|
||||
host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing(
|
||||
&stream->timing, dc_link_get_highest_encoding_format(link));
|
||||
}
|
||||
|
@ -6006,3 +6006,21 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
|
||||
|
||||
return profile;
|
||||
}
|
||||
|
||||
/*
|
||||
**********************************************************************************
|
||||
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
|
||||
*
|
||||
* Called when DM wants to log detile buffer size from dc_state
|
||||
*
|
||||
**********************************************************************************
|
||||
*/
|
||||
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
|
||||
{
|
||||
struct dc *dc = context->clk_mgr->ctx->dc;
|
||||
|
||||
if (dc->res_pool->funcs->get_det_buffer_size)
|
||||
return dc->res_pool->funcs->get_det_buffer_size(context);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
@ -434,3 +434,43 @@ char *dc_status_to_str(enum dc_status status)
|
||||
|
||||
return "Unexpected status error";
|
||||
}
|
||||
|
||||
char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding)
|
||||
{
|
||||
switch (pixel_encoding) {
|
||||
case PIXEL_ENCODING_RGB:
|
||||
return "RGB";
|
||||
case PIXEL_ENCODING_YCBCR422:
|
||||
return "YUV422";
|
||||
case PIXEL_ENCODING_YCBCR444:
|
||||
return "YUV444";
|
||||
case PIXEL_ENCODING_YCBCR420:
|
||||
return "YUV420";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
char *dc_color_depth_to_str(enum dc_color_depth color_depth)
|
||||
{
|
||||
switch (color_depth) {
|
||||
case COLOR_DEPTH_666:
|
||||
return "6-bpc";
|
||||
case COLOR_DEPTH_888:
|
||||
return "8-bpc";
|
||||
case COLOR_DEPTH_101010:
|
||||
return "10-bpc";
|
||||
case COLOR_DEPTH_121212:
|
||||
return "12-bpc";
|
||||
case COLOR_DEPTH_141414:
|
||||
return "14-bpc";
|
||||
case COLOR_DEPTH_161616:
|
||||
return "16-bpc";
|
||||
case COLOR_DEPTH_999:
|
||||
return "9-bpc";
|
||||
case COLOR_DEPTH_111111:
|
||||
return "11-bpc";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
|
@ -765,25 +765,6 @@ static inline void get_vp_scan_direction(
|
||||
*flip_horz_scan_dir = !*flip_horz_scan_dir;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a preliminary vp size calculation to allow us to check taps support.
|
||||
* The result is completely overridden afterwards.
|
||||
*/
|
||||
static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
|
||||
|
||||
data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
|
||||
data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
|
||||
data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
|
||||
data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
|
||||
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
|
||||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
|
||||
swap(data->viewport.width, data->viewport.height);
|
||||
swap(data->viewport_c.width, data->viewport_c.height);
|
||||
}
|
||||
}
|
||||
|
||||
static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
|
||||
{
|
||||
struct rect rec;
|
||||
@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
|
||||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
|
||||
struct scaling_taps temp = {0};
|
||||
bool res = false;
|
||||
|
||||
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
|
||||
@ -1519,14 +1501,16 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
res = spl_calculate_scaler_params(spl_in, spl_out);
|
||||
// Convert respective out params from SPL to scaler data
|
||||
translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
|
||||
|
||||
/* Ignore scaler failure if pipe context plane is phantom plane */
|
||||
if (!res && plane_state->is_phantom)
|
||||
res = true;
|
||||
} else {
|
||||
#endif
|
||||
/* depends on h_active */
|
||||
calculate_recout(pipe_ctx);
|
||||
/* depends on pixel format */
|
||||
calculate_scaling_ratios(pipe_ctx);
|
||||
/* depends on scaling ratios and recout, does not calculate offset yet */
|
||||
calculate_viewport_size(pipe_ctx);
|
||||
|
||||
/*
|
||||
* LB calculations depend on vp size, h/v_active and scaling ratios
|
||||
@ -1547,6 +1531,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
|
||||
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
|
||||
|
||||
// get TAP value with 100x100 dummy data for max scaling qualify, override
|
||||
// if a new scaling quality required
|
||||
pipe_ctx->plane_res.scl_data.viewport.width = 100;
|
||||
pipe_ctx->plane_res.scl_data.viewport.height = 100;
|
||||
pipe_ctx->plane_res.scl_data.viewport_c.width = 100;
|
||||
pipe_ctx->plane_res.scl_data.viewport_c.height = 100;
|
||||
if (pipe_ctx->plane_res.xfm != NULL)
|
||||
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
|
||||
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
|
||||
|
||||
if (pipe_ctx->plane_res.dpp != NULL)
|
||||
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
|
||||
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
|
||||
|
||||
temp = pipe_ctx->plane_res.scl_data.taps;
|
||||
|
||||
calculate_inits_and_viewports(pipe_ctx);
|
||||
|
||||
if (pipe_ctx->plane_res.xfm != NULL)
|
||||
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
|
||||
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
|
||||
@ -1573,11 +1575,14 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
&plane_state->scaling_quality);
|
||||
}
|
||||
|
||||
/*
|
||||
* Depends on recout, scaling ratios, h_active and taps
|
||||
* May need to re-check lb size after this in some obscure scenario
|
||||
*/
|
||||
if (res)
|
||||
/* Ignore scaler failure if pipe context plane is phantom plane */
|
||||
if (!res && plane_state->is_phantom)
|
||||
res = true;
|
||||
|
||||
if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps ||
|
||||
pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps ||
|
||||
pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c ||
|
||||
pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c))
|
||||
calculate_inits_and_viewports(pipe_ctx);
|
||||
|
||||
/*
|
||||
|
@ -819,12 +819,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
|
||||
stream->dst.height,
|
||||
stream->output_color_space);
|
||||
DC_LOG_DC(
|
||||
"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
|
||||
"\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixel_encoding:%s, color_depth:%s\n",
|
||||
stream->timing.pix_clk_100hz / 10,
|
||||
stream->timing.h_total,
|
||||
stream->timing.v_total,
|
||||
stream->timing.pixel_encoding,
|
||||
stream->timing.display_color_depth);
|
||||
dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
|
||||
dc_color_depth_to_str(stream->timing.display_color_depth));
|
||||
DC_LOG_DC(
|
||||
"\tlink: %d\n",
|
||||
stream->link->link_index);
|
||||
|
@ -285,6 +285,7 @@ struct dc_caps {
|
||||
uint16_t subvp_vertical_int_margin_us;
|
||||
bool seamless_odm;
|
||||
uint32_t max_v_total;
|
||||
bool vtotal_limited_by_fp2;
|
||||
uint32_t max_disp_clock_khz_at_vmin;
|
||||
uint8_t subvp_drr_vblank_start_margin_us;
|
||||
bool cursor_not_scaled;
|
||||
@ -2543,6 +2544,8 @@ struct dc_power_profile {
|
||||
|
||||
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context);
|
||||
|
||||
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
|
||||
|
||||
/* DSC Interfaces */
|
||||
#include "dc_dsc.h"
|
||||
|
||||
|
@ -1294,6 +1294,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
|
||||
|
||||
memset(&new_signals, 0, sizeof(new_signals));
|
||||
|
||||
new_signals.bits.allow_idle = 1; /* always set */
|
||||
|
||||
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
|
||||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
|
||||
new_signals.bits.allow_pg = 1;
|
||||
@ -1389,7 +1391,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
*/
|
||||
dc_dmub_srv->needs_idle_wake = false;
|
||||
|
||||
if (prev_driver_signals.bits.allow_ips2 &&
|
||||
if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
|
||||
(!dc->debug.optimize_ips_handshake ||
|
||||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
|
||||
DC_LOG_IPS(
|
||||
@ -1450,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
|
||||
}
|
||||
|
||||
dc_dmub_srv_notify_idle(dc, false);
|
||||
if (prev_driver_signals.bits.allow_ips1) {
|
||||
if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
|
||||
DC_LOG_IPS(
|
||||
"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
|
||||
ips_fw->signals.bits.ips1_commit,
|
||||
|
@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc)
|
||||
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
|
||||
}
|
||||
|
||||
static bool enc314_is_fifo_enabled(struct stream_encoder *enc)
|
||||
{
|
||||
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
|
||||
uint32_t reset_val;
|
||||
|
||||
REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
|
||||
return (reset_val != 0);
|
||||
}
|
||||
|
||||
void enc314_dp_set_odm_combine(
|
||||
struct stream_encoder *enc,
|
||||
bool odm_combine)
|
||||
@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
|
||||
|
||||
.enable_fifo = enc314_enable_fifo,
|
||||
.disable_fifo = enc314_disable_fifo,
|
||||
.is_fifo_enabled = enc314_is_fifo_enabled,
|
||||
.set_input_mode = enc314_set_dig_input_mode,
|
||||
};
|
||||
|
||||
|
@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st *
|
||||
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
|
||||
|
||||
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal);
|
||||
s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
|
||||
|
@ -339,11 +339,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
|
||||
// }
|
||||
}
|
||||
|
||||
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
|
||||
{
|
||||
unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
|
||||
|
||||
if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
|
||||
max_hw_v_total -= stream->timing.v_front_porch + 1;
|
||||
}
|
||||
|
||||
return max_hw_v_total;
|
||||
}
|
||||
|
||||
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
|
||||
struct dc_stream_state *stream,
|
||||
struct dml2_context *dml_ctx)
|
||||
{
|
||||
unsigned int hblank_start, vblank_start;
|
||||
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
|
||||
|
||||
timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
|
||||
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
|
||||
@ -371,11 +382,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
|
||||
- stream->timing.v_border_top - stream->timing.v_border_bottom;
|
||||
|
||||
timing->drr_config.enabled = stream->ignore_msa_timing_param;
|
||||
timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
|
||||
timing->drr_config.drr_active_variable = stream->vrr_active_variable;
|
||||
timing->drr_config.drr_active_fixed = stream->vrr_active_fixed;
|
||||
timing->drr_config.disallowed = !stream->allow_freesync;
|
||||
|
||||
/* limit min refresh rate to DC cap */
|
||||
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
|
||||
if (stream->ctx->dc->caps.max_v_total != 0) {
|
||||
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
|
||||
(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
|
||||
}
|
||||
|
||||
if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
|
||||
timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
|
||||
} else {
|
||||
timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
|
||||
}
|
||||
|
||||
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
|
||||
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
|
||||
timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false);
|
||||
@ -859,7 +882,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
|
||||
plane->immediate_flip = plane_state->flip_immediate;
|
||||
|
||||
plane->composition.rect_out_height_spans_vactive =
|
||||
plane_state->dst_rect.height >= stream->timing.v_addressable &&
|
||||
plane_state->dst_rect.height >= stream->src.height &&
|
||||
stream->dst.height >= stream->timing.v_addressable;
|
||||
}
|
||||
|
||||
|
@ -355,6 +355,20 @@ void dcn314_calculate_pix_rate_divider(
|
||||
}
|
||||
}
|
||||
|
||||
static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe)
|
||||
{
|
||||
return pipe && pipe->stream
|
||||
// Check dig's otg instance.
|
||||
&& pipe->stream_res.stream_enc
|
||||
&& pipe->stream_res.stream_enc->funcs->dig_source_otg
|
||||
&& pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc)
|
||||
&& pipe->stream->link && pipe->stream->link->link_enc
|
||||
&& pipe->stream->link->link_enc->funcs->is_dig_enabled
|
||||
&& pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)
|
||||
&& pipe->stream_res.stream_enc->funcs->is_fifo_enabled
|
||||
&& pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc);
|
||||
}
|
||||
|
||||
void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -371,7 +385,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) &&
|
||||
!pipe->stream->apply_seamless_boot_optimization &&
|
||||
!pipe->stream->apply_edp_fast_boot_optimization) {
|
||||
if (dcn314_is_pipe_dig_fifo_on(pipe))
|
||||
continue;
|
||||
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
otg_disabled[i] = true;
|
||||
|
@ -60,5 +60,7 @@ enum dc_status {
|
||||
};
|
||||
|
||||
char *dc_status_to_str(enum dc_status status);
|
||||
char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding);
|
||||
char *dc_color_depth_to_str(enum dc_color_depth color_depth);
|
||||
|
||||
#endif /* _CORE_STATUS_H_ */
|
||||
|
@ -215,6 +215,7 @@ struct resource_funcs {
|
||||
|
||||
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
|
||||
void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
|
||||
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
|
||||
};
|
||||
|
||||
struct audio_support{
|
||||
|
@ -1511,6 +1511,7 @@ bool dcn20_split_stream_for_odm(
|
||||
|
||||
if (prev_odm_pipe->plane_state) {
|
||||
struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
|
||||
struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp;
|
||||
int new_width;
|
||||
|
||||
/* HACTIVE halved for odm combine */
|
||||
@ -1544,7 +1545,28 @@ bool dcn20_split_stream_for_odm(
|
||||
sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
|
||||
sd->ratios.horz_c, sd->h_active - sd->recout.x));
|
||||
sd->recout.x = 0;
|
||||
|
||||
/*
|
||||
* When odm is used in YcbCr422 or 420 colour space, a split screen
|
||||
* will be seen with the previous calculations since the extra left
|
||||
* edge pixel is accounted for in fmt but not in viewport.
|
||||
*
|
||||
* Below are calculations which fix the split by fixing the calculations
|
||||
* if there is an extra left edge pixel.
|
||||
*/
|
||||
if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count
|
||||
&& opp->funcs->opp_get_left_edge_extra_pixel_count(
|
||||
opp, next_odm_pipe->stream->timing.pixel_encoding,
|
||||
resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) {
|
||||
sd->h_active += 1;
|
||||
sd->recout.width += 1;
|
||||
sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
}
|
||||
}
|
||||
|
||||
if (!next_odm_pipe->top_pipe)
|
||||
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
|
||||
else
|
||||
@ -2133,6 +2155,7 @@ bool dcn20_fast_validate_bw(
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Actual dsc count per stream dsc validation*/
|
||||
if (!dcn20_validate_dsc(dc, context)) {
|
||||
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
|
||||
|
@ -1298,7 +1298,7 @@ static struct link_encoder *dcn21_link_encoder_create(
|
||||
kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
|
||||
int link_regs_id;
|
||||
|
||||
if (!enc21)
|
||||
if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
|
||||
return NULL;
|
||||
|
||||
link_regs_id =
|
||||
|
@ -2354,6 +2354,7 @@ static bool dcn30_resource_construct(
|
||||
|
||||
dc->caps.dp_hdmi21_pcon_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* read VBIOS LTTPR caps */
|
||||
{
|
||||
|
@ -1234,6 +1234,7 @@ static bool dcn302_resource_construct(
|
||||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
@ -1179,6 +1179,7 @@ static bool dcn303_resource_construct(
|
||||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user