mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
ASoC: Fixes for v6.5
A collection of device specific fixes, none particularly remarkable. There's a set of repetitive fixes for the RealTek drivers fixing an issue with suspend that was replicated in multiple drivers. -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmTCYMAACgkQJNaLcl1U h9Do+Af/USa8kLylJn0vzxbfkwpSu3rCbgQurw9KKCDa7lTB7jqZzpCAmPbs7txO WEwKKz8YSka2YlmXm0rRzhqHIdTdkHlvJ3aircrolfpedeelRyqthhCjdgl6pJAj 3+Kpi7a2QaSqxc2Z45GX4vR86xOmlivWS4gOKZV4GuJt2FkmTIgbGYjtumU0GPla DneK7yxQpNe68Z+AHxmGoAvKkXggqE49up1PGRiV2nlyioHeQLqDyUlvZsc4MP3Y Qx/RKvvFoh20HVNKv+iXss7VxYebIzkHuAJLwRDFHkcQajFHcri+ZWEv9lVd/pak Hiso2ryviIrUFIKfsCWKb9xHYbptCQ== =HNYO -----END PGP SIGNATURE----- Merge tag 'asoc-fix-v6.5-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus ASoC: Fixes for v6.5 A collection of device specific fixes, none particularly remarkable. There's a set of repetitive fixes for the RealTek drivers fixing an issue with suspend that was replicated in multiple drivers.
This commit is contained in:
commit
3b9adfbba5
3
.mailmap
3
.mailmap
@ -241,6 +241,7 @@ Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
|
||||
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
||||
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
||||
John Crispin <john@phrozen.org> <blogic@openwrt.org>
|
||||
John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com>
|
||||
John Keeping <john@keeping.me.uk> <john@metanate.com>
|
||||
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
John Stultz <johnstul@us.ibm.com>
|
||||
@ -454,6 +455,8 @@ Sebastian Reichel <sre@kernel.org> <sre@debian.org>
|
||||
Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
|
||||
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
|
||||
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
|
||||
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
|
||||
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
|
||||
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
|
||||
|
@ -105,7 +105,7 @@ properties:
|
||||
G coefficient for temperature equation.
|
||||
Default for series 5 = 60000
|
||||
Default for series 6 = 57400
|
||||
multipleOf: 1000
|
||||
multipleOf: 100
|
||||
minimum: 1000
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
|
||||
@ -114,7 +114,7 @@ properties:
|
||||
H coefficient for temperature equation.
|
||||
Default for series 5 = 200000
|
||||
Default for series 6 = 249400
|
||||
multipleOf: 1000
|
||||
multipleOf: 100
|
||||
minimum: 1000
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
|
||||
@ -131,7 +131,7 @@ properties:
|
||||
J coefficient for temperature equation.
|
||||
Default for series 5 = -100
|
||||
Default for series 6 = 0
|
||||
multipleOf: 1000
|
||||
multipleOf: 100
|
||||
maximum: 0
|
||||
$ref: /schemas/types.yaml#/definitions/int32
|
||||
|
||||
|
@ -1,19 +0,0 @@
|
||||
* Universal Asynchronous Receiver/Transmitter (UART)
|
||||
|
||||
- compatible: "cavium,octeon-3860-uart"
|
||||
|
||||
Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
|
||||
|
||||
- reg: The base address of the UART register bank.
|
||||
|
||||
- interrupts: A single interrupt specifier.
|
||||
|
||||
- current-speed: Optional, the current bit rate in bits per second.
|
||||
|
||||
Example:
|
||||
uart1: serial@1180000000c00 {
|
||||
compatible = "cavium,octeon-3860-uart","ns16550";
|
||||
reg = <0x11800 0x00000c00 0x0 0x400>;
|
||||
current-speed = <115200>;
|
||||
interrupts = <0 35>;
|
||||
};
|
@ -1,28 +0,0 @@
|
||||
* NXP LPC1850 UART
|
||||
|
||||
Required properties:
|
||||
- compatible : "nxp,lpc1850-uart", "ns16550a".
|
||||
- reg : offset and length of the register set for the device.
|
||||
- interrupts : should contain uart interrupt.
|
||||
- clocks : phandle to the input clocks.
|
||||
- clock-names : required elements: "uartclk", "reg".
|
||||
|
||||
Optional properties:
|
||||
- dmas : Two or more DMA channel specifiers following the
|
||||
convention outlined in bindings/dma/dma.txt
|
||||
- dma-names : Names for the dma channels, if present. There must
|
||||
be at least one channel named "tx" for transmit
|
||||
and named "rx" for receive.
|
||||
|
||||
Since it's also possible to also use the of_serial.c driver all
|
||||
parameters from 8250.txt also apply but are optional.
|
||||
|
||||
Example:
|
||||
uart0: serial@40081000 {
|
||||
compatible = "nxp,lpc1850-uart", "ns16550a";
|
||||
reg = <0x40081000 0x1000>;
|
||||
reg-shift = <2>;
|
||||
interrupts = <24>;
|
||||
clocks = <&ccu2 CLK_APB0_UART0>, <&ccu1 CLK_CPU_UART0>;
|
||||
clock-names = "uartclk", "reg";
|
||||
};
|
24
Makefile
24
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -555,11 +555,23 @@ LINUXINCLUDE := \
|
||||
$(USERINCLUDE)
|
||||
|
||||
KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE
|
||||
KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \
|
||||
-Werror=implicit-function-declaration -Werror=implicit-int \
|
||||
-Werror=return-type -Wno-format-security -funsigned-char \
|
||||
-std=gnu11
|
||||
|
||||
KBUILD_CFLAGS :=
|
||||
KBUILD_CFLAGS += -std=gnu11
|
||||
KBUILD_CFLAGS += -fshort-wchar
|
||||
KBUILD_CFLAGS += -funsigned-char
|
||||
KBUILD_CFLAGS += -fno-common
|
||||
KBUILD_CFLAGS += -fno-PIE
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing
|
||||
KBUILD_CFLAGS += -Wall
|
||||
KBUILD_CFLAGS += -Wundef
|
||||
KBUILD_CFLAGS += -Werror=implicit-function-declaration
|
||||
KBUILD_CFLAGS += -Werror=implicit-int
|
||||
KBUILD_CFLAGS += -Werror=return-type
|
||||
KBUILD_CFLAGS += -Werror=strict-prototypes
|
||||
KBUILD_CFLAGS += -Wno-format-security
|
||||
KBUILD_CFLAGS += -Wno-trigraphs
|
||||
|
||||
KBUILD_CPPFLAGS := -D__KERNEL__
|
||||
KBUILD_RUSTFLAGS := $(rust_common_flags) \
|
||||
--target=$(objtree)/scripts/target.json \
|
||||
|
@ -727,6 +727,8 @@ struct kvm_vcpu_arch {
|
||||
#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
|
||||
/* PMUSERENR for the guest EL0 is on physical CPU */
|
||||
#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6))
|
||||
/* WFI instruction trapped */
|
||||
#define IN_WFI __vcpu_single_flag(sflags, BIT(7))
|
||||
|
||||
|
||||
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
|
||||
|
@ -608,22 +608,26 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
||||
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
|
||||
* kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
|
||||
* flag in a page-table entry.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||
* @addr: Intermediate physical address to identify the page-table entry.
|
||||
* @size: Size of the address range to visit.
|
||||
* @mkold: True if the access flag should be cleared.
|
||||
*
|
||||
* The offset of @addr within a page is ignored.
|
||||
*
|
||||
* If there is a valid, leaf page-table entry used to translate @addr, then
|
||||
* clear the access flag in that entry.
|
||||
* Tests and conditionally clears the access flag for every valid, leaf
|
||||
* page-table entry used to translate the range [@addr, @addr + @size).
|
||||
*
|
||||
* Note that it is the caller's responsibility to invalidate the TLB after
|
||||
* calling this function to ensure that the updated permissions are visible
|
||||
* to the CPUs.
|
||||
*
|
||||
* Return: The old page-table entry prior to clearing the flag, 0 on failure.
|
||||
* Return: True if any of the visited PTEs had the access flag set.
|
||||
*/
|
||||
kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
|
||||
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
|
||||
u64 size, bool mkold);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
|
||||
@ -645,18 +649,6 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
|
||||
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
|
||||
enum kvm_pgtable_prot prot);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
|
||||
* access flag set.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||
* @addr: Intermediate physical address to identify the page-table entry.
|
||||
*
|
||||
* The offset of @addr within a page is ignored.
|
||||
*
|
||||
* Return: True if the page-table entry has the access flag set, false otherwise.
|
||||
*/
|
||||
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
|
||||
* of Coherency for guest stage-2 address
|
||||
|
@ -78,6 +78,7 @@ extern u32 __boot_cpu_mode[2];
|
||||
|
||||
void __hyp_set_vectors(phys_addr_t phys_vector_base);
|
||||
void __hyp_reset_vectors(void);
|
||||
bool is_kvm_arm_initialised(void);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
|
||||
|
||||
|
@ -847,6 +847,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
|
||||
int vec_set_vector_length(struct task_struct *task, enum vec_type type,
|
||||
unsigned long vl, unsigned long flags)
|
||||
{
|
||||
bool free_sme = false;
|
||||
|
||||
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
|
||||
PR_SVE_SET_VL_ONEXEC))
|
||||
return -EINVAL;
|
||||
@ -897,21 +899,36 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
|
||||
task->thread.fp_type = FP_STATE_FPSIMD;
|
||||
}
|
||||
|
||||
if (system_supports_sme() && type == ARM64_VEC_SME) {
|
||||
task->thread.svcr &= ~(SVCR_SM_MASK |
|
||||
SVCR_ZA_MASK);
|
||||
clear_thread_flag(TIF_SME);
|
||||
if (system_supports_sme()) {
|
||||
if (type == ARM64_VEC_SME ||
|
||||
!(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) {
|
||||
/*
|
||||
* We are changing the SME VL or weren't using
|
||||
* SME anyway, discard the state and force a
|
||||
* reallocation.
|
||||
*/
|
||||
task->thread.svcr &= ~(SVCR_SM_MASK |
|
||||
SVCR_ZA_MASK);
|
||||
clear_thread_flag(TIF_SME);
|
||||
free_sme = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (task == current)
|
||||
put_cpu_fpsimd_context();
|
||||
|
||||
/*
|
||||
* Force reallocation of task SVE and SME state to the correct
|
||||
* size on next use:
|
||||
* Free the changed states if they are not in use, SME will be
|
||||
* reallocated to the correct size on next use and we just
|
||||
* allocate SVE now in case it is needed for use in streaming
|
||||
* mode.
|
||||
*/
|
||||
sve_free(task);
|
||||
if (system_supports_sme() && type == ARM64_VEC_SME)
|
||||
if (system_supports_sve()) {
|
||||
sve_free(task);
|
||||
sve_alloc(task, true);
|
||||
}
|
||||
|
||||
if (free_sme)
|
||||
sme_free(task);
|
||||
|
||||
task_set_vl(task, type, vl);
|
||||
|
@ -6,6 +6,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
int __kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
|
||||
int __kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
|
||||
int __kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
|
||||
|
||||
int __kernel_clock_gettime(clockid_t clock,
|
||||
struct __kernel_timespec *ts)
|
||||
{
|
||||
|
@ -827,8 +827,8 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
|
||||
assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr);
|
||||
assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr);
|
||||
|
||||
/* This only happens on VHE, so use the CNTKCTL_EL1 accessor */
|
||||
sysreg_clear_set(cntkctl_el1, clr, set);
|
||||
/* This only happens on VHE, so use the CNTHCTL_EL2 accessor. */
|
||||
sysreg_clear_set(cnthctl_el2, clr, set);
|
||||
}
|
||||
|
||||
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
|
||||
@ -1563,7 +1563,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
|
||||
void kvm_timer_init_vhe(void)
|
||||
{
|
||||
if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
|
||||
sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV);
|
||||
sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV);
|
||||
}
|
||||
|
||||
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
|
||||
|
@ -53,11 +53,16 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
|
||||
DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
|
||||
static bool vgic_present;
|
||||
static bool vgic_present, kvm_arm_initialised;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
|
||||
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
bool is_kvm_arm_initialised(void)
|
||||
{
|
||||
return kvm_arm_initialised;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
|
||||
@ -713,13 +718,15 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
preempt_disable();
|
||||
kvm_vgic_vmcr_sync(vcpu);
|
||||
vgic_v4_put(vcpu, true);
|
||||
vcpu_set_flag(vcpu, IN_WFI);
|
||||
vgic_v4_put(vcpu);
|
||||
preempt_enable();
|
||||
|
||||
kvm_vcpu_halt(vcpu);
|
||||
vcpu_clear_flag(vcpu, IN_WFIT);
|
||||
|
||||
preempt_disable();
|
||||
vcpu_clear_flag(vcpu, IN_WFI);
|
||||
vgic_v4_load(vcpu);
|
||||
preempt_enable();
|
||||
}
|
||||
@ -787,7 +794,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
|
||||
if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
|
||||
/* The distributor enable bits were changed */
|
||||
preempt_disable();
|
||||
vgic_v4_put(vcpu, false);
|
||||
vgic_v4_put(vcpu);
|
||||
vgic_v4_load(vcpu);
|
||||
preempt_enable();
|
||||
}
|
||||
@ -1867,8 +1874,17 @@ static void _kvm_arch_hardware_enable(void *discard)
|
||||
|
||||
int kvm_arch_hardware_enable(void)
|
||||
{
|
||||
int was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
|
||||
int was_enabled;
|
||||
|
||||
/*
|
||||
* Most calls to this function are made with migration
|
||||
* disabled, but not with preemption disabled. The former is
|
||||
* enough to ensure correctness, but most of the helpers
|
||||
* expect the later and will throw a tantrum otherwise.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
|
||||
_kvm_arch_hardware_enable(NULL);
|
||||
|
||||
if (!was_enabled) {
|
||||
@ -1876,6 +1892,8 @@ int kvm_arch_hardware_enable(void)
|
||||
kvm_timer_cpu_up();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2482,6 +2500,8 @@ static __init int kvm_arm_init(void)
|
||||
if (err)
|
||||
goto out_subs;
|
||||
|
||||
kvm_arm_initialised = true;
|
||||
|
||||
return 0;
|
||||
|
||||
out_subs:
|
||||
|
@ -154,6 +154,12 @@ SYM_CODE_END(\label)
|
||||
esb
|
||||
stp x0, x1, [sp, #-16]!
|
||||
662:
|
||||
/*
|
||||
* spectre vectors __bp_harden_hyp_vecs generate br instructions at runtime
|
||||
* that jump at offset 8 at __kvm_hyp_vector.
|
||||
* As hyp .text is guarded section, it needs bti j.
|
||||
*/
|
||||
bti j
|
||||
b \target
|
||||
|
||||
check_preamble_length 661b, 662b
|
||||
@ -165,6 +171,8 @@ check_preamble_length 661b, 662b
|
||||
nop
|
||||
stp x0, x1, [sp, #-16]!
|
||||
662:
|
||||
/* Check valid_vect */
|
||||
bti j
|
||||
b \target
|
||||
|
||||
check_preamble_length 661b, 662b
|
||||
|
@ -297,3 +297,13 @@ SYM_CODE_START(__kvm_hyp_host_forward_smc)
|
||||
|
||||
ret
|
||||
SYM_CODE_END(__kvm_hyp_host_forward_smc)
|
||||
|
||||
/*
|
||||
* kvm_host_psci_cpu_entry is called through br instruction, which requires
|
||||
* bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external
|
||||
* functions, but bti c instead.
|
||||
*/
|
||||
SYM_CODE_START(kvm_host_psci_cpu_entry)
|
||||
bti j
|
||||
b __kvm_host_psci_cpu_entry
|
||||
SYM_CODE_END(kvm_host_psci_cpu_entry)
|
||||
|
@ -200,7 +200,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
||||
__hyp_pa(init_params), 0);
|
||||
}
|
||||
|
||||
asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
|
||||
asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
|
||||
{
|
||||
struct psci_boot_args *boot_args;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
|
@ -1195,25 +1195,54 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
|
||||
return pte;
|
||||
}
|
||||
|
||||
kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
|
||||
struct stage2_age_data {
|
||||
bool mkold;
|
||||
bool young;
|
||||
};
|
||||
|
||||
static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
enum kvm_pgtable_walk_flags visit)
|
||||
{
|
||||
kvm_pte_t pte = 0;
|
||||
stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
|
||||
&pte, NULL, 0);
|
||||
kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
|
||||
struct stage2_age_data *data = ctx->arg;
|
||||
|
||||
if (!kvm_pte_valid(ctx->old) || new == ctx->old)
|
||||
return 0;
|
||||
|
||||
data->young = true;
|
||||
|
||||
/*
|
||||
* stage2_age_walker() is always called while holding the MMU lock for
|
||||
* write, so this will always succeed. Nonetheless, this deliberately
|
||||
* follows the race detection pattern of the other stage-2 walkers in
|
||||
* case the locking mechanics of the MMU notifiers is ever changed.
|
||||
*/
|
||||
if (data->mkold && !stage2_try_set_pte(ctx, new))
|
||||
return -EAGAIN;
|
||||
|
||||
/*
|
||||
* "But where's the TLBI?!", you scream.
|
||||
* "Over in the core code", I sigh.
|
||||
*
|
||||
* See the '->clear_flush_young()' callback on the KVM mmu notifier.
|
||||
*/
|
||||
return pte;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
|
||||
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
|
||||
u64 size, bool mkold)
|
||||
{
|
||||
kvm_pte_t pte = 0;
|
||||
stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0);
|
||||
return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
|
||||
struct stage2_age_data data = {
|
||||
.mkold = mkold,
|
||||
};
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_age_walker,
|
||||
.arg = &data,
|
||||
.flags = KVM_PGTABLE_WALK_LEAF,
|
||||
};
|
||||
|
||||
WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
|
||||
return data.young;
|
||||
}
|
||||
|
||||
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
|
||||
|
@ -1756,27 +1756,25 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||
{
|
||||
u64 size = (range->end - range->start) << PAGE_SHIFT;
|
||||
kvm_pte_t kpte;
|
||||
pte_t pte;
|
||||
|
||||
if (!kvm->arch.mmu.pgt)
|
||||
return false;
|
||||
|
||||
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
|
||||
|
||||
kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
|
||||
range->start << PAGE_SHIFT);
|
||||
pte = __pte(kpte);
|
||||
return pte_valid(pte) && pte_young(pte);
|
||||
return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
|
||||
range->start << PAGE_SHIFT,
|
||||
size, true);
|
||||
}
|
||||
|
||||
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
|
||||
{
|
||||
u64 size = (range->end - range->start) << PAGE_SHIFT;
|
||||
|
||||
if (!kvm->arch.mmu.pgt)
|
||||
return false;
|
||||
|
||||
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
|
||||
range->start << PAGE_SHIFT);
|
||||
return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
|
||||
range->start << PAGE_SHIFT,
|
||||
size, false);
|
||||
}
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void)
|
||||
|
@ -244,7 +244,7 @@ static int __init finalize_pkvm(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!is_protected_kvm_enabled())
|
||||
if (!is_protected_kvm_enabled() || !is_kvm_arm_initialised())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -986,7 +986,6 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
|
||||
if (p->is_write) {
|
||||
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
|
||||
__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
} else {
|
||||
p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
|
||||
@ -1115,18 +1114,19 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
|
||||
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
|
||||
|
||||
#define PMU_SYS_REG(r) \
|
||||
SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
|
||||
#define PMU_SYS_REG(name) \
|
||||
SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
|
||||
.visibility = pmu_visibility
|
||||
|
||||
/* Macro to expand the PMEVCNTRn_EL0 register */
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
|
||||
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
|
||||
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
|
||||
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
|
||||
|
||||
/* Macro to expand the PMEVTYPERn_EL0 register */
|
||||
#define PMU_PMEVTYPER_EL0(n) \
|
||||
{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
|
||||
{ PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
|
||||
.reset = reset_pmevtyper, \
|
||||
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
|
||||
|
||||
@ -2115,9 +2115,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
|
||||
/* PMBIDR_EL1 is not trapped */
|
||||
|
||||
{ PMU_SYS_REG(SYS_PMINTENSET_EL1),
|
||||
{ PMU_SYS_REG(PMINTENSET_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
|
||||
{ PMU_SYS_REG(PMINTENCLR_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
|
||||
|
||||
@ -2164,41 +2164,41 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
|
||||
{ SYS_DESC(SYS_SVCR), undef_access },
|
||||
|
||||
{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
|
||||
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr,
|
||||
.reset = reset_pmcr, .reg = PMCR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCNTENSET_EL0),
|
||||
{ PMU_SYS_REG(PMCNTENSET_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
|
||||
{ PMU_SYS_REG(PMCNTENCLR_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMOVSCLR_EL0),
|
||||
{ PMU_SYS_REG(PMOVSCLR_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
/*
|
||||
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
|
||||
* previously (and pointlessly) advertised in the past...
|
||||
*/
|
||||
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
|
||||
{ PMU_SYS_REG(PMSWINC_EL0),
|
||||
.get_user = get_raz_reg, .set_user = set_wi_reg,
|
||||
.access = access_pmswinc, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMSELR_EL0),
|
||||
{ PMU_SYS_REG(PMSELR_EL0),
|
||||
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
|
||||
{ PMU_SYS_REG(PMCEID0_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
|
||||
{ PMU_SYS_REG(PMCEID1_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
|
||||
{ PMU_SYS_REG(PMCCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reset = reset_unknown,
|
||||
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
|
||||
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
|
||||
{ PMU_SYS_REG(PMXEVTYPER_EL0),
|
||||
.access = access_pmu_evtyper, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
|
||||
{ PMU_SYS_REG(PMXEVCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reset = NULL },
|
||||
/*
|
||||
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
|
||||
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
|
||||
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
|
||||
{ PMU_SYS_REG(SYS_PMOVSSET_EL0),
|
||||
{ PMU_SYS_REG(PMOVSSET_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
|
||||
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
||||
@ -2354,7 +2354,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
|
||||
{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
|
||||
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
|
||||
|
||||
EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
|
||||
|
@ -749,7 +749,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||
|
||||
WARN_ON(vgic_v4_put(vcpu, false));
|
||||
WARN_ON(vgic_v4_put(vcpu));
|
||||
|
||||
vgic_v3_vmcr_sync(vcpu);
|
||||
|
||||
|
@ -336,14 +336,14 @@ void vgic_v4_teardown(struct kvm *kvm)
|
||||
its_vm->vpes = NULL;
|
||||
}
|
||||
|
||||
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
|
||||
int vgic_v4_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
|
||||
|
||||
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
|
||||
return 0;
|
||||
|
||||
return its_make_vpe_non_resident(vpe, need_db);
|
||||
return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
|
||||
}
|
||||
|
||||
int vgic_v4_load(struct kvm_vcpu *vcpu)
|
||||
@ -354,6 +354,9 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
|
||||
if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
|
||||
return 0;
|
||||
|
||||
if (vcpu_get_flag(vcpu, IN_WFI))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Before making the VPE resident, make sure the redistributor
|
||||
* corresponding to our current CPU expects us here. See the
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/kfence.h>
|
||||
|
||||
static void *trans_alloc(struct trans_pgd_info *info)
|
||||
{
|
||||
@ -41,7 +42,8 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
|
||||
* the temporary mappings we use during restore.
|
||||
*/
|
||||
set_pte(dst_ptep, pte_mkwrite(pte));
|
||||
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
|
||||
} else if ((debug_pagealloc_enabled() ||
|
||||
is_kfence_address((void *)addr)) && !pte_none(pte)) {
|
||||
/*
|
||||
* debug_pagealloc will removed the PTE_VALID bit if
|
||||
* the page isn't in use by the resume kernel. It may have
|
||||
|
@ -322,7 +322,13 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
|
||||
*
|
||||
*/
|
||||
|
||||
emit_bti(A64_BTI_C, ctx);
|
||||
/* bpf function may be invoked by 3 instruction types:
|
||||
* 1. bl, attached via freplace to bpf prog via short jump
|
||||
* 2. br, attached via freplace to bpf prog via long jump
|
||||
* 3. blr, working as a function pointer, used by emit_call.
|
||||
* So BTI_JC should used here to support both br and blr.
|
||||
*/
|
||||
emit_bti(A64_BTI_JC, ctx);
|
||||
|
||||
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
|
||||
emit(A64_NOP, ctx);
|
||||
|
@ -2017,7 +2017,7 @@ Field 0 SM
|
||||
EndSysreg
|
||||
|
||||
SysregFields HFGxTR_EL2
|
||||
Field 63 nAMIAIR2_EL1
|
||||
Field 63 nAMAIR2_EL1
|
||||
Field 62 nMAIR2_EL1
|
||||
Field 61 nS2POR_EL1
|
||||
Field 60 nPOR_EL1
|
||||
@ -2032,9 +2032,9 @@ Field 52 nGCS_EL0
|
||||
Res0 51
|
||||
Field 50 nACCDATA_EL1
|
||||
Field 49 ERXADDR_EL1
|
||||
Field 48 EXRPFGCDN_EL1
|
||||
Field 47 EXPFGCTL_EL1
|
||||
Field 46 EXPFGF_EL1
|
||||
Field 48 ERXPFGCDN_EL1
|
||||
Field 47 ERXPFGCTL_EL1
|
||||
Field 46 ERXPFGF_EL1
|
||||
Field 45 ERXMISCn_EL1
|
||||
Field 44 ERXSTATUS_EL1
|
||||
Field 43 ERXCTLR_EL1
|
||||
@ -2049,8 +2049,8 @@ Field 35 TPIDR_EL0
|
||||
Field 34 TPIDRRO_EL0
|
||||
Field 33 TPIDR_EL1
|
||||
Field 32 TCR_EL1
|
||||
Field 31 SCTXNUM_EL0
|
||||
Field 30 SCTXNUM_EL1
|
||||
Field 31 SCXTNUM_EL0
|
||||
Field 30 SCXTNUM_EL1
|
||||
Field 29 SCTLR_EL1
|
||||
Field 28 REVIDR_EL1
|
||||
Field 27 PAR_EL1
|
||||
|
@ -63,7 +63,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
|
||||
info.low_limit = addr;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = align_mask;
|
||||
info.align_offset = 0;
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
|
@ -27,12 +27,17 @@
|
||||
#include <linux/elf-randomize.h>
|
||||
|
||||
/*
|
||||
* Construct an artificial page offset for the mapping based on the physical
|
||||
* Construct an artificial page offset for the mapping based on the virtual
|
||||
* address of the kernel file mapping variable.
|
||||
* If filp is zero the calculated pgoff value aliases the memory of the given
|
||||
* address. This is useful for io_uring where the mapping shall alias a kernel
|
||||
* address and a userspace adress where both the kernel and the userspace
|
||||
* access the same memory region.
|
||||
*/
|
||||
#define GET_FILP_PGOFF(filp) \
|
||||
(filp ? (((unsigned long) filp->f_mapping) >> 8) \
|
||||
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
|
||||
#define GET_FILP_PGOFF(filp, addr) \
|
||||
((filp ? (((unsigned long) filp->f_mapping) >> 8) \
|
||||
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL) \
|
||||
+ (addr >> PAGE_SHIFT))
|
||||
|
||||
static unsigned long shared_align_offset(unsigned long filp_pgoff,
|
||||
unsigned long pgoff)
|
||||
@ -112,7 +117,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
||||
do_color_align = 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
do_color_align = 1;
|
||||
filp_pgoff = GET_FILP_PGOFF(filp);
|
||||
filp_pgoff = GET_FILP_PGOFF(filp, addr);
|
||||
|
||||
if (flags & MAP_FIXED) {
|
||||
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
|
||||
|
3
arch/powerpc/crypto/.gitignore
vendored
Normal file
3
arch/powerpc/crypto/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
aesp10-ppc.S
|
||||
ghashp10-ppc.S
|
@ -4,14 +4,13 @@
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/asm-compat.h>
|
||||
#include <asm/extable.h>
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
#include <asm/asm-offsets.h>
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
.macro __EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.section __bug_table,"aw"
|
||||
5001: .4byte \addr - .
|
||||
.4byte 5002f - .
|
||||
@ -23,7 +22,7 @@
|
||||
.previous
|
||||
.endm
|
||||
#else
|
||||
.macro __EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.section __bug_table,"aw"
|
||||
5001: .4byte \addr - .
|
||||
.short \flags
|
||||
@ -32,18 +31,6 @@
|
||||
.endm
|
||||
#endif /* verbose */
|
||||
|
||||
.macro EMIT_WARN_ENTRY addr,file,line,flags
|
||||
EX_TABLE(\addr,\addr+4)
|
||||
__EMIT_BUG_ENTRY \addr,\file,\line,\flags
|
||||
.endm
|
||||
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.if \flags & 1 /* BUGFLAG_WARNING */
|
||||
.err /* Use EMIT_WARN_ENTRY for warnings */
|
||||
.endif
|
||||
__EMIT_BUG_ENTRY \addr,\file,\line,\flags
|
||||
.endm
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
|
||||
sizeof(struct bug_entry), respectively */
|
||||
@ -73,16 +60,6 @@
|
||||
"i" (sizeof(struct bug_entry)), \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define WARN_ENTRY(insn, flags, label, ...) \
|
||||
asm_volatile_goto( \
|
||||
"1: " insn "\n" \
|
||||
EX_TABLE(1b, %l[label]) \
|
||||
_EMIT_BUG_ENTRY \
|
||||
: : "i" (__FILE__), "i" (__LINE__), \
|
||||
"i" (flags), \
|
||||
"i" (sizeof(struct bug_entry)), \
|
||||
##__VA_ARGS__ : : label)
|
||||
|
||||
/*
|
||||
* BUG_ON() and WARN_ON() do their best to cooperate with compile-time
|
||||
* optimisations. However depending on the complexity of the condition
|
||||
@ -95,16 +72,7 @@
|
||||
} while (0)
|
||||
#define HAVE_ARCH_BUG
|
||||
|
||||
#define __WARN_FLAGS(flags) do { \
|
||||
__label__ __label_warn_on; \
|
||||
\
|
||||
WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_unreachable(); \
|
||||
\
|
||||
__label_warn_on: \
|
||||
break; \
|
||||
} while (0)
|
||||
#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags))
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define BUG_ON(x) do { \
|
||||
@ -117,25 +85,15 @@ __label_warn_on: \
|
||||
} while (0)
|
||||
|
||||
#define WARN_ON(x) ({ \
|
||||
bool __ret_warn_on = false; \
|
||||
do { \
|
||||
if (__builtin_constant_p((x))) { \
|
||||
if (!(x)) \
|
||||
break; \
|
||||
int __ret_warn_on = !!(x); \
|
||||
if (__builtin_constant_p(__ret_warn_on)) { \
|
||||
if (__ret_warn_on) \
|
||||
__WARN(); \
|
||||
__ret_warn_on = true; \
|
||||
} else { \
|
||||
__label__ __label_warn_on; \
|
||||
\
|
||||
WARN_ENTRY(PPC_TLNEI " %4, 0", \
|
||||
BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \
|
||||
__label_warn_on, \
|
||||
"r" ((__force long)(x))); \
|
||||
break; \
|
||||
__label_warn_on: \
|
||||
__ret_warn_on = true; \
|
||||
} \
|
||||
} while (0); \
|
||||
} else { \
|
||||
BUG_ENTRY(PPC_TLNEI " %4, 0", \
|
||||
BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \
|
||||
"r" (__ret_warn_on)); \
|
||||
} \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
@ -148,14 +106,13 @@ __label_warn_on: \
|
||||
#ifdef __ASSEMBLY__
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.endm
|
||||
.macro EMIT_WARN_ENTRY addr,file,line,flags
|
||||
.endm
|
||||
#else /* !__ASSEMBLY__ */
|
||||
#define _EMIT_BUG_ENTRY
|
||||
#define _EMIT_WARN_ENTRY
|
||||
#endif
|
||||
#endif /* CONFIG_BUG */
|
||||
|
||||
#define EMIT_WARN_ENTRY EMIT_BUG_ENTRY
|
||||
|
||||
#include <asm-generic/bug.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -12,14 +12,8 @@
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
* 64le only supports ELFv2 64-bit binaries (64be supports v1 and v2).
|
||||
*/
|
||||
#if defined(CONFIG_PPC64) && defined(CONFIG_CPU_LITTLE_ENDIAN)
|
||||
#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) && \
|
||||
(((x)->e_flags & 0x3) == 0x2))
|
||||
#else
|
||||
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
|
||||
#endif
|
||||
#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
|
||||
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
|
@ -183,13 +183,9 @@ static inline bool test_thread_local_flags(unsigned int flags)
|
||||
#define clear_tsk_compat_task(tsk) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#if defined(CONFIG_PPC64)
|
||||
#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI))
|
||||
#else
|
||||
#define is_elf2_task() (1)
|
||||
#endif
|
||||
#else
|
||||
#define is_elf2_task() (0)
|
||||
#endif
|
||||
|
||||
|
@ -1508,13 +1508,8 @@ static void do_program_check(struct pt_regs *regs)
|
||||
|
||||
if (!(regs->msr & MSR_PR) && /* not user-mode */
|
||||
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
entry = search_exception_tables(bugaddr);
|
||||
if (entry) {
|
||||
regs_set_return_ip(regs, extable_fixup(entry) + regs->nip - bugaddr);
|
||||
return;
|
||||
}
|
||||
regs_add_return_ip(regs, 4);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE) && user_mode(regs)) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
KASAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
obj-$(CONFIG_PPC32) += init_32.o
|
||||
obj-$(CONFIG_PPC_8xx) += 8xx.o
|
||||
|
@ -477,7 +477,7 @@ static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mpc512x_lpbfifo_remove(struct platform_device *pdev)
|
||||
static void mpc512x_lpbfifo_remove(struct platform_device *pdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dma_device *dma_dev = lpbfifo.chan->device;
|
||||
@ -494,8 +494,6 @@ static int mpc512x_lpbfifo_remove(struct platform_device *pdev)
|
||||
free_irq(lpbfifo.irq, &pdev->dev);
|
||||
irq_dispose_mapping(lpbfifo.irq);
|
||||
dma_release_channel(lpbfifo.chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id mpc512x_lpbfifo_match[] = {
|
||||
@ -506,7 +504,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
|
||||
|
||||
static struct platform_driver mpc512x_lpbfifo_driver = {
|
||||
.probe = mpc512x_lpbfifo_probe,
|
||||
.remove = mpc512x_lpbfifo_remove,
|
||||
.remove_new = mpc512x_lpbfifo_remove,
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.of_match_table = mpc512x_lpbfifo_match,
|
||||
|
@ -744,6 +744,12 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
|
||||
}
|
||||
|
||||
task_ref = &win->vas_win.task_ref;
|
||||
/*
|
||||
* VAS mmap (coproc_mmap()) and its fault handler
|
||||
* (vas_mmap_fault()) are called after holding mmap lock.
|
||||
* So hold mmap mutex after mmap_lock to avoid deadlock.
|
||||
*/
|
||||
mmap_write_lock(task_ref->mm);
|
||||
mutex_lock(&task_ref->mmap_mutex);
|
||||
vma = task_ref->vma;
|
||||
/*
|
||||
@ -752,7 +758,6 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
|
||||
*/
|
||||
win->vas_win.status |= flag;
|
||||
|
||||
mmap_write_lock(task_ref->mm);
|
||||
/*
|
||||
* vma is set in the original mapping. But this mapping
|
||||
* is done with mmap() after the window is opened with ioctl.
|
||||
@ -762,8 +767,8 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
|
||||
if (vma)
|
||||
zap_vma_pages(vma);
|
||||
|
||||
mmap_write_unlock(task_ref->mm);
|
||||
mutex_unlock(&task_ref->mmap_mutex);
|
||||
mmap_write_unlock(task_ref->mm);
|
||||
/*
|
||||
* Close VAS window in the hypervisor, but do not
|
||||
* free vas_window struct since it may be reused
|
||||
|
@ -103,7 +103,7 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
|
||||
{
|
||||
if (kb->key && kb->key != kb->keybuf
|
||||
&& kb->keylen > sizeof(kb->keybuf)) {
|
||||
kfree(kb->key);
|
||||
kfree_sensitive(kb->key);
|
||||
kb->key = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -411,8 +411,12 @@ int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
u16 _rc, _rrc;
|
||||
int cc = 0;
|
||||
|
||||
/* Make sure the counter does not reach 0 before calling s390_uv_destroy_range */
|
||||
atomic_inc(&kvm->mm->context.protected_count);
|
||||
/*
|
||||
* Nothing to do if the counter was already 0. Otherwise make sure
|
||||
* the counter does not reach 0 before calling s390_uv_destroy_range.
|
||||
*/
|
||||
if (!atomic_inc_not_zero(&kvm->mm->context.protected_count))
|
||||
return 0;
|
||||
|
||||
*rc = 1;
|
||||
/* If the current VM is protected, destroy it */
|
||||
|
@ -421,6 +421,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
vma_end_read(vma);
|
||||
if (!(fault & VM_FAULT_RETRY)) {
|
||||
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
|
||||
if (likely(!(fault & VM_FAULT_ERROR)))
|
||||
fault = 0;
|
||||
goto out;
|
||||
}
|
||||
count_vm_vma_lock_event(VMA_LOCK_RETRY);
|
||||
|
@ -2853,6 +2853,7 @@ int s390_replace_asce(struct gmap *gmap)
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
page->index = 0;
|
||||
table = page_to_virt(page);
|
||||
memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
|
||||
|
||||
|
@ -1144,8 +1144,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
if (!list_empty(&plug->cb_list))
|
||||
flush_plug_callbacks(plug, from_schedule);
|
||||
if (!rq_list_empty(plug->mq_list))
|
||||
blk_mq_flush_plug_list(plug, from_schedule);
|
||||
blk_mq_flush_plug_list(plug, from_schedule);
|
||||
/*
|
||||
* Unconditionally flush out cached requests, even if the unplug
|
||||
* event came from schedule. Since we know hold references to the
|
||||
|
@ -2516,6 +2516,10 @@ static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
|
||||
u64 seek_pages = 0;
|
||||
u64 cost = 0;
|
||||
|
||||
/* Can't calculate cost for empty bio */
|
||||
if (!bio->bi_iter.bi_size)
|
||||
goto out;
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_READ:
|
||||
coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
|
||||
|
@ -2754,7 +2754,14 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
if (rq_list_empty(plug->mq_list))
|
||||
/*
|
||||
* We may have been called recursively midway through handling
|
||||
* plug->mq_list via a schedule() in the driver's queue_rq() callback.
|
||||
* To avoid mq_list changing under our feet, clear rq_count early and
|
||||
* bail out specifically if rq_count is 0 rather than checking
|
||||
* whether the mq_list is empty.
|
||||
*/
|
||||
if (plug->rq_count == 0)
|
||||
return;
|
||||
plug->rq_count = 0;
|
||||
|
||||
|
@ -3980,6 +3980,15 @@ static inline void hl_debugfs_fini(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int hl_debugfs_device_init(struct hl_device *hdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hl_debugfs_device_fini(struct hl_device *hdev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hl_debugfs_add_device(struct hl_device *hdev)
|
||||
{
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/types.h>
|
||||
@ -366,7 +367,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap
|
||||
if (in_trans->hdr.len % 8 != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_EXT_MSG_LENGTH)
|
||||
if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
|
||||
return -ENOSPC;
|
||||
|
||||
trans_wrapper = add_wrapper(wrappers,
|
||||
@ -418,9 +419,12 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
|
||||
}
|
||||
|
||||
ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
|
||||
if (ret < 0 || ret != nr_pages) {
|
||||
ret = -EFAULT;
|
||||
if (ret < 0)
|
||||
goto free_page_list;
|
||||
if (ret != nr_pages) {
|
||||
nr_pages = ret;
|
||||
ret = -EFAULT;
|
||||
goto put_pages;
|
||||
}
|
||||
|
||||
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
@ -557,11 +561,8 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
|
||||
msg = &wrapper->msg;
|
||||
msg_hdr_len = le32_to_cpu(msg->hdr.len);
|
||||
|
||||
if (msg_hdr_len > (UINT_MAX - QAIC_MANAGE_EXT_MSG_LENGTH))
|
||||
return -EINVAL;
|
||||
|
||||
/* There should be enough space to hold at least one ASP entry. */
|
||||
if (msg_hdr_len + sizeof(*out_trans) + sizeof(struct wire_addr_size_pair) >
|
||||
if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
|
||||
QAIC_MANAGE_EXT_MSG_LENGTH)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -634,7 +635,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
|
||||
msg = &wrapper->msg;
|
||||
msg_hdr_len = le32_to_cpu(msg->hdr.len);
|
||||
|
||||
if (msg_hdr_len + sizeof(*out_trans) > QAIC_MANAGE_MAX_MSG_LENGTH)
|
||||
if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
|
||||
return -ENOSPC;
|
||||
|
||||
if (!in_trans->queue_size)
|
||||
@ -718,7 +719,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l
|
||||
msg = &wrapper->msg;
|
||||
msg_hdr_len = le32_to_cpu(msg->hdr.len);
|
||||
|
||||
if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_MAX_MSG_LENGTH)
|
||||
if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
|
||||
return -ENOSPC;
|
||||
|
||||
trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
|
||||
@ -748,7 +749,8 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (!user_msg->count) {
|
||||
if (!user_msg->count ||
|
||||
user_msg->len < sizeof(*trans_hdr)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -765,12 +767,13 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
|
||||
}
|
||||
|
||||
for (i = 0; i < user_msg->count; ++i) {
|
||||
if (user_len >= user_msg->len) {
|
||||
if (user_len > user_msg->len - sizeof(*trans_hdr)) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
|
||||
if (user_len + trans_hdr->len > user_msg->len) {
|
||||
if (trans_hdr->len < sizeof(trans_hdr) ||
|
||||
size_add(user_len, trans_hdr->len) > user_msg->len) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -953,15 +956,23 @@ static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
|
||||
if (msg_hdr_len < sizeof(*trans_hdr) ||
|
||||
msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
|
||||
return -EINVAL;
|
||||
|
||||
user_msg->len = 0;
|
||||
user_msg->count = le32_to_cpu(msg->hdr.count);
|
||||
|
||||
for (i = 0; i < user_msg->count; ++i) {
|
||||
u32 hdr_len;
|
||||
|
||||
if (msg_len > msg_hdr_len - sizeof(*trans_hdr))
|
||||
return -EINVAL;
|
||||
|
||||
trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
|
||||
if (msg_len + le32_to_cpu(trans_hdr->len) > msg_hdr_len)
|
||||
hdr_len = le32_to_cpu(trans_hdr->len);
|
||||
if (hdr_len < sizeof(*trans_hdr) ||
|
||||
size_add(msg_len, hdr_len) > msg_hdr_len)
|
||||
return -EINVAL;
|
||||
|
||||
switch (le32_to_cpu(trans_hdr->type)) {
|
||||
|
@ -139,4 +139,6 @@ static struct pi_protocol aten = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("ATEN EH-100 parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(aten);
|
||||
|
@ -502,4 +502,6 @@ static struct pi_protocol bpck = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("MicroSolutions BACKPACK parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(bpck);
|
||||
|
@ -459,5 +459,6 @@ static struct pi_protocol bpck6 = {
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Micro Solutions Inc.");
|
||||
MODULE_DESCRIPTION("BACKPACK Protocol module, compatible with PARIDE");
|
||||
MODULE_DESCRIPTION("Micro Solutions BACKPACK parallel port IDE adapter "
|
||||
"(version 6 drives) protocol driver");
|
||||
module_pata_parport_driver(bpck6);
|
||||
|
@ -201,4 +201,6 @@ static struct pi_protocol comm = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("DataStor Commuter parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(comm);
|
||||
|
@ -230,4 +230,6 @@ static struct pi_protocol dstr = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("DataStor EP2000 parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(dstr);
|
||||
|
@ -358,5 +358,8 @@ static void __exit epat_exit(void)
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Shuttle Technologies EPAT parallel port IDE adapter "
|
||||
"protocol driver");
|
||||
module_init(epat_init)
|
||||
module_exit(epat_exit)
|
||||
|
@ -306,4 +306,7 @@ static struct pi_protocol epia = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Shuttle Technologies EPIA parallel port IDE adapter "
|
||||
"protocol driver");
|
||||
module_pata_parport_driver(epia);
|
||||
|
@ -132,4 +132,7 @@ static struct pi_protocol fit2 = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Fidelity International Technology parallel port IDE adapter"
|
||||
"(older models) protocol driver");
|
||||
module_pata_parport_driver(fit2);
|
||||
|
@ -193,4 +193,7 @@ static struct pi_protocol fit3 = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Fidelity International Technology parallel port IDE adapter"
|
||||
"(newer models) protocol driver");
|
||||
module_pata_parport_driver(fit3);
|
||||
|
@ -259,4 +259,6 @@ static struct pi_protocol friq = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Freecom IQ parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(friq);
|
||||
|
@ -293,4 +293,6 @@ static struct pi_protocol frpw = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Freecom Power parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(frpw);
|
||||
|
@ -301,5 +301,8 @@ static void __exit kbic_exit(void)
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("KingByte Information Systems KBIC-951A and KBIC-971A "
|
||||
"parallel port IDE adapter protocol driver");
|
||||
module_init(kbic_init)
|
||||
module_exit(kbic_exit)
|
||||
|
@ -106,4 +106,6 @@ static struct pi_protocol ktti = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("KT Technology parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(ktti);
|
||||
|
@ -142,4 +142,6 @@ static struct pi_protocol on20 = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Onspec 90c20 parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(on20);
|
||||
|
@ -310,4 +310,6 @@ static struct pi_protocol on26 = {
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Grant R. Guenther <grant@torque.net>");
|
||||
MODULE_DESCRIPTION("Onspec 90c26 parallel port IDE adapter protocol driver");
|
||||
module_pata_parport_driver(on26);
|
||||
|
@ -471,6 +471,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
|
||||
unsigned int start, end;
|
||||
int ret;
|
||||
|
||||
map->async = true;
|
||||
|
||||
rbtree_ctx = map->cache;
|
||||
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
|
||||
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
|
||||
@ -499,6 +501,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
|
||||
return ret;
|
||||
}
|
||||
|
||||
map->async = false;
|
||||
|
||||
return regmap_async_complete(map);
|
||||
}
|
||||
|
||||
|
@ -368,8 +368,6 @@ int regcache_sync(struct regmap *map)
|
||||
if (!map->cache_dirty)
|
||||
goto out;
|
||||
|
||||
map->async = true;
|
||||
|
||||
/* Apply any patch first */
|
||||
map->cache_bypass = true;
|
||||
for (i = 0; i < map->patch_regs; i++) {
|
||||
@ -392,7 +390,6 @@ int regcache_sync(struct regmap *map)
|
||||
|
||||
out:
|
||||
/* Restore the bypass state */
|
||||
map->async = false;
|
||||
map->cache_bypass = bypass;
|
||||
map->no_sync_defaults = false;
|
||||
map->unlock(map->lock_arg);
|
||||
|
@ -242,8 +242,8 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
|
||||
static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
|
||||
.write = regmap_i2c_smbus_i2c_write,
|
||||
.read = regmap_i2c_smbus_i2c_read,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
|
||||
};
|
||||
|
||||
static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
|
||||
@ -299,8 +299,8 @@ static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
|
||||
static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
|
||||
.write = regmap_i2c_smbus_i2c_write_reg16,
|
||||
.read = regmap_i2c_smbus_i2c_read_reg16,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX,
|
||||
.max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
|
||||
.max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
|
||||
};
|
||||
|
||||
static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
|
||||
|
@ -58,6 +58,9 @@ static struct regmap *gen_regmap(struct regmap_config *config,
|
||||
int i;
|
||||
struct reg_default *defaults;
|
||||
|
||||
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
|
||||
config->cache_type == REGCACHE_MAPLE;
|
||||
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -889,6 +892,8 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
|
||||
|
||||
config->cache_type = test_type->cache_type;
|
||||
config->val_format_endian = test_type->val_endian;
|
||||
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
|
||||
config->cache_type == REGCACHE_MAPLE;
|
||||
|
||||
buf = kmalloc(size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
|
@ -660,7 +660,7 @@ static const struct regmap_bus regmap_spi_avmm_bus = {
|
||||
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
.max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
|
||||
.max_raw_write = SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
|
||||
.max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
|
||||
.free_context = spi_avmm_bridge_ctx_free,
|
||||
};
|
||||
|
||||
|
@ -2082,8 +2082,6 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
size_t val_count = val_len / val_bytes;
|
||||
size_t chunk_count, chunk_bytes;
|
||||
size_t chunk_regs = val_count;
|
||||
size_t max_data = map->max_raw_write - map->format.reg_bytes -
|
||||
map->format.pad_bytes;
|
||||
int ret, i;
|
||||
|
||||
if (!val_count)
|
||||
@ -2091,8 +2089,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
if (map->use_single_write)
|
||||
chunk_regs = 1;
|
||||
else if (map->max_raw_write && val_len > max_data)
|
||||
chunk_regs = max_data / val_bytes;
|
||||
else if (map->max_raw_write && val_len > map->max_raw_write)
|
||||
chunk_regs = map->max_raw_write / val_bytes;
|
||||
|
||||
chunk_count = val_count / chunk_regs;
|
||||
chunk_bytes = chunk_regs * val_bytes;
|
||||
|
@ -1775,14 +1775,43 @@ static const struct block_device_operations lo_fops = {
|
||||
/*
|
||||
* If max_loop is specified, create that many devices upfront.
|
||||
* This also becomes a hard limit. If max_loop is not specified,
|
||||
* the default isn't a hard limit (as before commit 85c50197716c
|
||||
* changed the default value from 0 for max_loop=0 reasons), just
|
||||
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
|
||||
* init time. Loop devices can be requested on-demand with the
|
||||
* /dev/loop-control interface, or be instantiated by accessing
|
||||
* a 'dead' device node.
|
||||
*/
|
||||
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
|
||||
module_param(max_loop, int, 0444);
|
||||
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
static bool max_loop_specified;
|
||||
|
||||
static int max_loop_param_set_int(const char *val,
|
||||
const struct kernel_param *kp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = param_set_int(val, kp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
max_loop_specified = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops max_loop_param_ops = {
|
||||
.set = max_loop_param_set_int,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
|
||||
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
|
||||
#else
|
||||
module_param(max_loop, int, 0444);
|
||||
MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
|
||||
#endif
|
||||
|
||||
module_param(max_part, int, 0444);
|
||||
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
|
||||
|
||||
@ -2093,14 +2122,18 @@ static void loop_remove(struct loop_device *lo)
|
||||
put_disk(lo->lo_disk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
static void loop_probe(dev_t dev)
|
||||
{
|
||||
int idx = MINOR(dev) >> part_shift;
|
||||
|
||||
if (max_loop && idx >= max_loop)
|
||||
if (max_loop_specified && max_loop && idx >= max_loop)
|
||||
return;
|
||||
loop_add(idx);
|
||||
}
|
||||
#else
|
||||
#define loop_probe NULL
|
||||
#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
|
||||
|
||||
static int loop_control_remove(int idx)
|
||||
{
|
||||
@ -2281,6 +2314,9 @@ module_exit(loop_exit);
|
||||
static int __init max_loop_setup(char *str)
|
||||
{
|
||||
max_loop = simple_strtol(str, NULL, 0);
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
max_loop_specified = true;
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -4104,6 +4104,7 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
BT_DBG("intf %p id %p", intf, id);
|
||||
|
||||
if ((id->driver_info & BTUSB_IFNUM_2) &&
|
||||
(intf->cur_altsetting->desc.bInterfaceNumber != 0) &&
|
||||
(intf->cur_altsetting->desc.bInterfaceNumber != 2))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -518,6 +518,7 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
|
||||
* 6.x.y.z series: 6.0.18.6 +
|
||||
* 3.x.y.z series: 3.57.y.5 +
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
||||
{
|
||||
u32 val1, val2;
|
||||
@ -566,6 +567,12 @@ static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
{
|
||||
|
@ -563,15 +563,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
u32 rsp_size;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&acpi_resource_list);
|
||||
ret = acpi_dev_get_resources(device, &acpi_resource_list,
|
||||
crb_check_resource, iores_array);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
acpi_dev_free_resource_list(&acpi_resource_list);
|
||||
|
||||
/* Pluton doesn't appear to define ACPI memory regions */
|
||||
/*
|
||||
* Pluton sometimes does not define ACPI memory regions.
|
||||
* Mapping is then done in crb_map_pluton
|
||||
*/
|
||||
if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
|
||||
INIT_LIST_HEAD(&acpi_resource_list);
|
||||
ret = acpi_dev_get_resources(device, &acpi_resource_list,
|
||||
crb_check_resource, iores_array);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
acpi_dev_free_resource_list(&acpi_resource_list);
|
||||
|
||||
if (resource_type(iores_array) != IORESOURCE_MEM) {
|
||||
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
|
||||
return -EINVAL;
|
||||
|
@ -114,6 +114,22 @@ static int tpm_tis_disable_irq(const struct dmi_system_id *d)
|
||||
}
|
||||
|
||||
static const struct dmi_system_id tpm_tis_dmi_table[] = {
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "Framework Laptop (12th Gen Intel Core)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop (12th Gen Intel Core)"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "Framework Laptop (13th Gen Intel Core)",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop (13th Gen Intel Core)"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "ThinkPad T490s",
|
||||
@ -138,11 +154,20 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "ThinkPad L590",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L590"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "UPX-TGL",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "UPX-TGL"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
|
@ -24,9 +24,12 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/dmi.h>
|
||||
#include "tpm.h"
|
||||
#include "tpm_tis_core.h"
|
||||
|
||||
#define TPM_TIS_MAX_UNHANDLED_IRQS 1000
|
||||
|
||||
static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
|
||||
|
||||
static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
|
||||
@ -468,25 +471,29 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void disable_interrupts(struct tpm_chip *chip)
|
||||
static void __tpm_tis_disable_interrupts(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
u32 int_mask = 0;
|
||||
|
||||
tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &int_mask);
|
||||
int_mask &= ~TPM_GLOBAL_INT_ENABLE;
|
||||
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), int_mask);
|
||||
|
||||
chip->flags &= ~TPM_CHIP_FLAG_IRQ;
|
||||
}
|
||||
|
||||
static void tpm_tis_disable_interrupts(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
u32 intmask;
|
||||
int rc;
|
||||
|
||||
if (priv->irq == 0)
|
||||
return;
|
||||
|
||||
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
|
||||
if (rc < 0)
|
||||
intmask = 0;
|
||||
|
||||
intmask &= ~TPM_GLOBAL_INT_ENABLE;
|
||||
rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
|
||||
__tpm_tis_disable_interrupts(chip);
|
||||
|
||||
devm_free_irq(chip->dev.parent, priv->irq, chip);
|
||||
priv->irq = 0;
|
||||
chip->flags &= ~TPM_CHIP_FLAG_IRQ;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -552,7 +559,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
||||
if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
|
||||
tpm_msleep(1);
|
||||
if (!test_bit(TPM_TIS_IRQ_TESTED, &priv->flags))
|
||||
disable_interrupts(chip);
|
||||
tpm_tis_disable_interrupts(chip);
|
||||
set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
|
||||
return rc;
|
||||
}
|
||||
@ -752,6 +759,57 @@ static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
|
||||
return status == TPM_STS_COMMAND_READY;
|
||||
}
|
||||
|
||||
static irqreturn_t tpm_tis_revert_interrupts(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
const char *product;
|
||||
const char *vendor;
|
||||
|
||||
dev_warn(&chip->dev, FW_BUG
|
||||
"TPM interrupt storm detected, polling instead\n");
|
||||
|
||||
vendor = dmi_get_system_info(DMI_SYS_VENDOR);
|
||||
product = dmi_get_system_info(DMI_PRODUCT_VERSION);
|
||||
|
||||
if (vendor && product) {
|
||||
dev_info(&chip->dev,
|
||||
"Consider adding the following entry to tpm_tis_dmi_table:\n");
|
||||
dev_info(&chip->dev, "\tDMI_SYS_VENDOR: %s\n", vendor);
|
||||
dev_info(&chip->dev, "\tDMI_PRODUCT_VERSION: %s\n", product);
|
||||
}
|
||||
|
||||
if (tpm_tis_request_locality(chip, 0) != 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
__tpm_tis_disable_interrupts(chip);
|
||||
tpm_tis_relinquish_locality(chip, 0);
|
||||
|
||||
schedule_work(&priv->free_irq_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t tpm_tis_update_unhandled_irqs(struct tpm_chip *chip)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
irqreturn_t irqret = IRQ_HANDLED;
|
||||
|
||||
if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
if (time_after(jiffies, priv->last_unhandled_irq + HZ/10))
|
||||
priv->unhandled_irqs = 1;
|
||||
else
|
||||
priv->unhandled_irqs++;
|
||||
|
||||
priv->last_unhandled_irq = jiffies;
|
||||
|
||||
if (priv->unhandled_irqs > TPM_TIS_MAX_UNHANDLED_IRQS)
|
||||
irqret = tpm_tis_revert_interrupts(chip);
|
||||
|
||||
return irqret;
|
||||
}
|
||||
|
||||
static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
||||
{
|
||||
struct tpm_chip *chip = dev_id;
|
||||
@ -761,10 +819,10 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
||||
|
||||
rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
|
||||
if (rc < 0)
|
||||
return IRQ_NONE;
|
||||
goto err;
|
||||
|
||||
if (interrupt == 0)
|
||||
return IRQ_NONE;
|
||||
goto err;
|
||||
|
||||
set_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
|
||||
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
|
||||
@ -780,10 +838,13 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
|
||||
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
|
||||
tpm_tis_relinquish_locality(chip, 0);
|
||||
if (rc < 0)
|
||||
return IRQ_NONE;
|
||||
goto err;
|
||||
|
||||
tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
err:
|
||||
return tpm_tis_update_unhandled_irqs(chip);
|
||||
}
|
||||
|
||||
static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
|
||||
@ -804,6 +865,15 @@ static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
|
||||
chip->flags &= ~TPM_CHIP_FLAG_IRQ;
|
||||
}
|
||||
|
||||
static void tpm_tis_free_irq_func(struct work_struct *work)
|
||||
{
|
||||
struct tpm_tis_data *priv = container_of(work, typeof(*priv), free_irq_work);
|
||||
struct tpm_chip *chip = priv->chip;
|
||||
|
||||
devm_free_irq(chip->dev.parent, priv->irq, chip);
|
||||
priv->irq = 0;
|
||||
}
|
||||
|
||||
/* Register the IRQ and issue a command that will cause an interrupt. If an
|
||||
* irq is seen then leave the chip setup for IRQ operation, otherwise reverse
|
||||
* everything and leave in polling mode. Returns 0 on success.
|
||||
@ -816,6 +886,7 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
|
||||
int rc;
|
||||
u32 int_status;
|
||||
|
||||
INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
|
||||
|
||||
rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
|
||||
tis_int_handler, IRQF_ONESHOT | flags,
|
||||
@ -918,6 +989,7 @@ void tpm_tis_remove(struct tpm_chip *chip)
|
||||
interrupt = 0;
|
||||
|
||||
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
|
||||
flush_work(&priv->free_irq_work);
|
||||
|
||||
tpm_tis_clkrun_enable(chip, false);
|
||||
|
||||
@ -1021,6 +1093,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
||||
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
|
||||
chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
|
||||
chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
|
||||
priv->chip = chip;
|
||||
priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
|
||||
priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
|
||||
priv->phy_ops = phy_ops;
|
||||
@ -1179,7 +1252,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
||||
rc = tpm_tis_request_locality(chip, 0);
|
||||
if (rc < 0)
|
||||
goto out_err;
|
||||
disable_interrupts(chip);
|
||||
tpm_tis_disable_interrupts(chip);
|
||||
tpm_tis_relinquish_locality(chip, 0);
|
||||
}
|
||||
}
|
||||
|
@ -91,11 +91,15 @@ enum tpm_tis_flags {
|
||||
};
|
||||
|
||||
struct tpm_tis_data {
|
||||
struct tpm_chip *chip;
|
||||
u16 manufacturer_id;
|
||||
struct mutex locality_count_mutex;
|
||||
unsigned int locality_count;
|
||||
int locality;
|
||||
int irq;
|
||||
struct work_struct free_irq_work;
|
||||
unsigned long last_unhandled_irq;
|
||||
unsigned int unhandled_irqs;
|
||||
unsigned int int_mask;
|
||||
unsigned long flags;
|
||||
void __iomem *ilb_base_addr;
|
||||
|
@ -189,21 +189,28 @@ static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < TPM_RETRY; i++) {
|
||||
/* write register */
|
||||
msg.len = sizeof(reg);
|
||||
msg.buf = ®
|
||||
msg.flags = 0;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
u16 read = 0;
|
||||
|
||||
/* read data */
|
||||
msg.buf = result;
|
||||
msg.len = len;
|
||||
msg.flags = I2C_M_RD;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (read < len) {
|
||||
/* write register */
|
||||
msg.len = sizeof(reg);
|
||||
msg.buf = ®
|
||||
msg.flags = 0;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* read data */
|
||||
msg.buf = result + read;
|
||||
msg.len = len - read;
|
||||
msg.flags = I2C_M_RD;
|
||||
if (msg.len > I2C_SMBUS_BLOCK_MAX)
|
||||
msg.len = I2C_SMBUS_BLOCK_MAX;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
read += msg.len;
|
||||
}
|
||||
|
||||
ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
|
||||
if (ret == 0)
|
||||
@ -223,19 +230,27 @@ static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
struct i2c_msg msg = { .addr = phy->i2c_client->addr };
|
||||
u8 reg = tpm_tis_i2c_address_to_register(addr);
|
||||
int ret;
|
||||
u16 wrote = 0;
|
||||
|
||||
if (len > TPM_BUFSIZE - 1)
|
||||
return -EIO;
|
||||
|
||||
/* write register and data in one go */
|
||||
phy->io_buf[0] = reg;
|
||||
memcpy(phy->io_buf + sizeof(reg), value, len);
|
||||
|
||||
msg.len = sizeof(reg) + len;
|
||||
msg.buf = phy->io_buf;
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (wrote < len) {
|
||||
/* write register and data in one go */
|
||||
msg.len = sizeof(reg) + len - wrote;
|
||||
if (msg.len > I2C_SMBUS_BLOCK_MAX)
|
||||
msg.len = I2C_SMBUS_BLOCK_MAX;
|
||||
|
||||
memcpy(phy->io_buf + sizeof(reg), value + wrote,
|
||||
msg.len - sizeof(reg));
|
||||
|
||||
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
wrote += msg.len - sizeof(reg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -136,6 +136,14 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
}
|
||||
|
||||
exit:
|
||||
if (ret < 0) {
|
||||
/* Deactivate chip select */
|
||||
memset(&spi_xfer, 0, sizeof(spi_xfer));
|
||||
spi_message_init(&m);
|
||||
spi_message_add_tail(&spi_xfer, &m);
|
||||
spi_sync_locked(phy->spi_device, &m);
|
||||
}
|
||||
|
||||
spi_bus_unlock(phy->spi_device->master);
|
||||
return ret;
|
||||
}
|
||||
|
@ -683,37 +683,21 @@ static struct miscdevice vtpmx_miscdev = {
|
||||
.fops = &vtpmx_fops,
|
||||
};
|
||||
|
||||
static int vtpmx_init(void)
|
||||
{
|
||||
return misc_register(&vtpmx_miscdev);
|
||||
}
|
||||
|
||||
static void vtpmx_cleanup(void)
|
||||
{
|
||||
misc_deregister(&vtpmx_miscdev);
|
||||
}
|
||||
|
||||
static int __init vtpm_module_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = vtpmx_init();
|
||||
if (rc) {
|
||||
pr_err("couldn't create vtpmx device\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
workqueue = create_workqueue("tpm-vtpm");
|
||||
if (!workqueue) {
|
||||
pr_err("couldn't create workqueue\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_vtpmx_cleanup;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_vtpmx_cleanup:
|
||||
vtpmx_cleanup();
|
||||
rc = misc_register(&vtpmx_miscdev);
|
||||
if (rc) {
|
||||
pr_err("couldn't create vtpmx device\n");
|
||||
destroy_workqueue(workqueue);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -721,7 +705,7 @@ static int __init vtpm_module_init(void)
|
||||
static void __exit vtpm_module_exit(void)
|
||||
{
|
||||
destroy_workqueue(workqueue);
|
||||
vtpmx_cleanup();
|
||||
misc_deregister(&vtpmx_miscdev);
|
||||
}
|
||||
|
||||
module_init(vtpm_module_init);
|
||||
|
@ -571,6 +571,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
|
||||
if (dma_resv_iter_is_restarted(&cursor)) {
|
||||
struct dma_fence **new_fences;
|
||||
unsigned int count;
|
||||
|
||||
while (*num_fences)
|
||||
@ -579,13 +580,17 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
count = cursor.num_fences + 1;
|
||||
|
||||
/* Eventually re-allocate the array */
|
||||
*fences = krealloc_array(*fences, count,
|
||||
sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (count && !*fences) {
|
||||
new_fences = krealloc_array(*fences, count,
|
||||
sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (count && !new_fences) {
|
||||
kfree(*fences);
|
||||
*fences = NULL;
|
||||
*num_fences = 0;
|
||||
dma_resv_iter_end(&cursor);
|
||||
return -ENOMEM;
|
||||
}
|
||||
*fences = new_fences;
|
||||
}
|
||||
|
||||
(*fences)[(*num_fences)++] = dma_fence_get(fence);
|
||||
|
@ -874,7 +874,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
|
||||
|
||||
spin_lock_init(&mvpwm->lock);
|
||||
|
||||
return pwmchip_add(&mvpwm->chip);
|
||||
return devm_pwmchip_add(dev, &mvpwm->chip);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -1112,6 +1112,13 @@ static int mvebu_gpio_probe_syscon(struct platform_device *pdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mvebu_gpio_remove_irq_domain(void *data)
|
||||
{
|
||||
struct irq_domain *domain = data;
|
||||
|
||||
irq_domain_remove(domain);
|
||||
}
|
||||
|
||||
static int mvebu_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct mvebu_gpio_chip *mvchip;
|
||||
@ -1243,17 +1250,21 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
|
||||
if (!mvchip->domain) {
|
||||
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
|
||||
mvchip->chip.label);
|
||||
err = -ENODEV;
|
||||
goto err_pwm;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
err = devm_add_action_or_reset(&pdev->dev, mvebu_gpio_remove_irq_domain,
|
||||
mvchip->domain);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = irq_alloc_domain_generic_chips(
|
||||
mvchip->domain, ngpios, 2, np->name, handle_level_irq,
|
||||
IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
|
||||
mvchip->chip.label);
|
||||
goto err_domain;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1293,13 +1304,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_domain:
|
||||
irq_domain_remove(mvchip->domain);
|
||||
err_pwm:
|
||||
pwmchip_remove(&mvchip->mvpwm->chip);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct platform_driver mvebu_gpio_driver = {
|
||||
|
@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
|
||||
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
|
||||
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
|
||||
|
||||
/* Set the initial value */
|
||||
tps68470_gpio_set(gc, offset, value);
|
||||
|
||||
/* rest are always outputs */
|
||||
if (offset >= TPS68470_N_REGULAR_GPIO)
|
||||
return 0;
|
||||
|
||||
/* Set the initial value */
|
||||
tps68470_gpio_set(gc, offset, value);
|
||||
|
||||
return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
|
||||
TPS68470_GPIO_MODE_MASK,
|
||||
TPS68470_GPIO_MODE_OUT_CMOS);
|
||||
|
@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
|
||||
}
|
||||
xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id;
|
||||
xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
|
||||
0 : fpriv->xcp_id;
|
||||
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
|
||||
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_flags = 0;
|
||||
|
@ -1229,13 +1229,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
pasid = 0;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm);
|
||||
r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
|
||||
if (r)
|
||||
goto error_pasid;
|
||||
|
||||
r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
|
||||
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
|
||||
if (r)
|
||||
goto error_vm;
|
||||
goto error_pasid;
|
||||
|
||||
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
|
||||
if (r)
|
||||
|
@ -1382,7 +1382,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
|
||||
goto error_pasid;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_init(adev, vm);
|
||||
r = amdgpu_vm_init(adev, vm, -1);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to initialize vm\n");
|
||||
goto error_pasid;
|
||||
|
@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
|
||||
DRM_WARN("%s: vblank timer overrun\n", __func__);
|
||||
|
||||
ret = drm_crtc_handle_vblank(crtc);
|
||||
/* Don't queue timer again when vblank is disabled. */
|
||||
if (!ret)
|
||||
DRM_ERROR("amdgpu_vkms failure on handling vblank");
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
hrtimer_cancel(&amdgpu_crtc->vblank_timer);
|
||||
hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
|
||||
}
|
||||
|
||||
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
|
||||
|
@ -2121,13 +2121,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vm: requested vm
|
||||
* @xcp_id: GPU partition selection id
|
||||
*
|
||||
* Init @vm fields.
|
||||
*
|
||||
* Returns:
|
||||
* 0 for success, error for failure.
|
||||
*/
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
|
||||
{
|
||||
struct amdgpu_bo *root_bo;
|
||||
struct amdgpu_bo_vm *root;
|
||||
@ -2177,7 +2178,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
vm->evicting = false;
|
||||
|
||||
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
|
||||
false, &root);
|
||||
false, &root, xcp_id);
|
||||
if (r)
|
||||
goto error_free_delayed;
|
||||
root_bo = &root->bo;
|
||||
|
@ -392,7 +392,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
u32 pasid);
|
||||
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
|
||||
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
@ -475,7 +475,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
|
||||
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_vm *vmbo, bool immediate);
|
||||
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo);
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
|
||||
int32_t xcp_id);
|
||||
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
|
@ -498,11 +498,12 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
* @level: the page table level
|
||||
* @immediate: use a immediate update
|
||||
* @vmbo: pointer to the buffer object pointer
|
||||
* @xcp_id: GPU partition id
|
||||
*/
|
||||
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo)
|
||||
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
|
||||
int32_t xcp_id)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);
|
||||
struct amdgpu_bo_param bp;
|
||||
struct amdgpu_bo *bo;
|
||||
struct dma_resv *resv;
|
||||
@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.no_wait_gpu = immediate;
|
||||
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
|
||||
bp.xcp_id_plus1 = xcp_id + 1;
|
||||
|
||||
if (vm->root.bo)
|
||||
bp.resv = vm->root.bo->tbo.base.resv;
|
||||
@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
bp.type = ttm_bo_type_kernel;
|
||||
bp.resv = bo->tbo.base.resv;
|
||||
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
|
||||
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
|
||||
bp.xcp_id_plus1 = xcp_id + 1;
|
||||
|
||||
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
|
||||
|
||||
@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
|
||||
amdgpu_vm_eviction_unlock(vm);
|
||||
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
|
||||
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
|
||||
vm->root.bo->xcp_id);
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
|
||||
if (!adev->xcp_mgr)
|
||||
return 0;
|
||||
|
||||
fpriv->xcp_id = ~0;
|
||||
fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
|
||||
for (i = 0; i < MAX_XCP; ++i) {
|
||||
if (!adev->xcp_mgr->xcp[i].ddev)
|
||||
break;
|
||||
@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 :
|
||||
fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
|
||||
adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
|
||||
return 0;
|
||||
}
|
||||
|
@ -37,6 +37,8 @@
|
||||
#define AMDGPU_XCP_FL_NONE 0
|
||||
#define AMDGPU_XCP_FL_LOCKED (1 << 0)
|
||||
|
||||
#define AMDGPU_XCP_NO_PARTITION (~0)
|
||||
|
||||
struct amdgpu_fpriv;
|
||||
|
||||
enum AMDGPU_XCP_IP_BLOCK {
|
||||
|
@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
|
||||
enum AMDGPU_XCP_IP_BLOCK ip_blk;
|
||||
uint32_t inst_mask;
|
||||
|
||||
ring->xcp_id = ~0;
|
||||
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
|
||||
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
|
||||
return;
|
||||
|
||||
@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds(
|
||||
u32 sel_xcp_id;
|
||||
int i;
|
||||
|
||||
if (fpriv->xcp_id == ~0) {
|
||||
if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
|
||||
u32 least_ref_cnt = ~0;
|
||||
|
||||
fpriv->xcp_id = 0;
|
||||
|
@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
|
||||
|
||||
/* For large FW files the time to complete can be very long */
|
||||
#define USBC_PD_POLLING_LIMIT_S 240
|
||||
|
@ -424,12 +424,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
|
||||
|
||||
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
|
||||
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
|
||||
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
|
||||
amdgpu_crtc->pflip_status,
|
||||
AMDGPU_FLIP_SUBMITTED,
|
||||
amdgpu_crtc->crtc_id,
|
||||
amdgpu_crtc);
|
||||
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||
DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
|
||||
amdgpu_crtc->pflip_status,
|
||||
AMDGPU_FLIP_SUBMITTED,
|
||||
amdgpu_crtc->crtc_id,
|
||||
amdgpu_crtc);
|
||||
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -883,7 +883,7 @@ static int dm_set_powergating_state(void *handle,
|
||||
}
|
||||
|
||||
/* Prototypes of private functions */
|
||||
static int dm_early_init(void* handle);
|
||||
static int dm_early_init(void *handle);
|
||||
|
||||
/* Allocate memory for FBC compressed data */
|
||||
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
|
||||
@ -1282,7 +1282,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
|
||||
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
|
||||
|
||||
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
|
||||
pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
|
||||
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
|
||||
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
|
||||
|
||||
@ -1347,6 +1347,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
|
||||
if (amdgpu_in_reset(adev))
|
||||
goto skip;
|
||||
|
||||
if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
|
||||
offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
|
||||
dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
|
||||
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
|
||||
offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
|
||||
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
|
||||
goto skip;
|
||||
}
|
||||
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
|
||||
dc_link_dp_handle_automated_test(dc_link);
|
||||
@ -1365,8 +1374,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
|
||||
DP_TEST_RESPONSE,
|
||||
&test_response.raw,
|
||||
sizeof(test_response));
|
||||
}
|
||||
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
|
||||
} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
|
||||
dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
|
||||
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
|
||||
/* offload_work->data is from handle_hpd_rx_irq->
|
||||
@ -1554,7 +1562,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
mutex_init(&adev->dm.dc_lock);
|
||||
mutex_init(&adev->dm.audio_lock);
|
||||
|
||||
if(amdgpu_dm_irq_init(adev)) {
|
||||
if (amdgpu_dm_irq_init(adev)) {
|
||||
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
|
||||
goto error;
|
||||
}
|
||||
@ -1696,9 +1704,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
|
||||
adev->dm.dc->debug.disable_stutter = true;
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
|
||||
adev->dm.dc->debug.disable_dsc = true;
|
||||
}
|
||||
|
||||
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
|
||||
adev->dm.dc->debug.disable_clock_gate = true;
|
||||
@ -1942,8 +1949,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
|
||||
mutex_destroy(&adev->dm.audio_lock);
|
||||
mutex_destroy(&adev->dm.dc_lock);
|
||||
mutex_destroy(&adev->dm.dpia_aux_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
@ -1952,7 +1957,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
int r;
|
||||
const struct dmcu_firmware_header_v1_0 *hdr;
|
||||
|
||||
switch(adev->asic_type) {
|
||||
switch (adev->asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
@ -2709,7 +2714,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
struct dc_scaling_info scaling_infos[MAX_SURFACES];
|
||||
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
|
||||
struct dc_stream_update stream_update;
|
||||
} * bundle;
|
||||
} *bundle;
|
||||
int k, m;
|
||||
|
||||
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
|
||||
@ -2739,8 +2744,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
|
||||
|
||||
cleanup:
|
||||
kfree(bundle);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int dm_resume(void *handle)
|
||||
@ -2954,8 +2957,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
|
||||
.set_powergating_state = dm_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version dm_ip_block =
|
||||
{
|
||||
const struct amdgpu_ip_block_version dm_ip_block = {
|
||||
.type = AMD_IP_BLOCK_TYPE_DCE,
|
||||
.major = 1,
|
||||
.minor = 0,
|
||||
@ -3000,9 +3002,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
|
||||
caps->aux_support = false;
|
||||
|
||||
if (caps->ext_caps->bits.oled == 1 /*||
|
||||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
|
||||
caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
|
||||
if (caps->ext_caps->bits.oled == 1
|
||||
/*
|
||||
* ||
|
||||
* caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
|
||||
* caps->ext_caps->bits.hdr_aux_backlight_control == 1
|
||||
*/)
|
||||
caps->aux_support = true;
|
||||
|
||||
if (amdgpu_backlight == 0)
|
||||
@ -3236,86 +3241,6 @@ static void handle_hpd_irq(void *param)
|
||||
|
||||
}
|
||||
|
||||
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
|
||||
u8 dret;
|
||||
bool new_irq_handled = false;
|
||||
int dpcd_addr;
|
||||
int dpcd_bytes_to_read;
|
||||
|
||||
const int max_process_count = 30;
|
||||
int process_count = 0;
|
||||
|
||||
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
|
||||
|
||||
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
|
||||
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
|
||||
/* DPCD 0x200 - 0x201 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT;
|
||||
} else {
|
||||
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
|
||||
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT_ESI;
|
||||
}
|
||||
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
while (dret == dpcd_bytes_to_read &&
|
||||
process_count < max_process_count) {
|
||||
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
|
||||
u8 retry;
|
||||
dret = 0;
|
||||
|
||||
process_count++;
|
||||
|
||||
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
|
||||
/* handle HPD short pulse irq */
|
||||
if (aconnector->mst_mgr.mst_state)
|
||||
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
|
||||
esi,
|
||||
ack,
|
||||
&new_irq_handled);
|
||||
|
||||
if (new_irq_handled) {
|
||||
/* ACK at DPCD to notify down stream */
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
ssize_t wret;
|
||||
|
||||
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr + 1,
|
||||
ack[1]);
|
||||
if (wret == 1)
|
||||
break;
|
||||
}
|
||||
|
||||
if (retry == 3) {
|
||||
DRM_ERROR("Failed to ack MST event.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
|
||||
/* check if there is new irq to be handled */
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
new_irq_handled = false;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (process_count == max_process_count)
|
||||
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
|
||||
}
|
||||
|
||||
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
|
||||
union hpd_irq_data hpd_irq_data)
|
||||
{
|
||||
@ -3377,7 +3302,23 @@ static void handle_hpd_rx_irq(void *param)
|
||||
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
|
||||
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
|
||||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
|
||||
dm_handle_mst_sideband_msg(aconnector);
|
||||
bool skip = false;
|
||||
|
||||
/*
|
||||
* DOWN_REP_MSG_RDY is also handled by polling method
|
||||
* mgr->cbs->poll_hpd_irq()
|
||||
*/
|
||||
spin_lock(&offload_wq->offload_lock);
|
||||
skip = offload_wq->is_handling_mst_msg_rdy_event;
|
||||
|
||||
if (!skip)
|
||||
offload_wq->is_handling_mst_msg_rdy_event = true;
|
||||
|
||||
spin_unlock(&offload_wq->offload_lock);
|
||||
|
||||
if (!skip)
|
||||
schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3468,7 +3409,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
dc_link = aconnector->dc_link;
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
|
||||
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
|
||||
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
|
||||
int_params.irq_source = dc_link->irq_source_hpd;
|
||||
|
||||
@ -3477,7 +3418,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
(void *) aconnector);
|
||||
}
|
||||
|
||||
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
|
||||
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
|
||||
|
||||
/* Also register for DP short pulse (hpd_rx). */
|
||||
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
|
||||
@ -3486,11 +3427,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
|
||||
amdgpu_dm_irq_register_interrupt(adev, &int_params,
|
||||
handle_hpd_rx_irq,
|
||||
(void *) aconnector);
|
||||
|
||||
if (adev->dm.hpd_rx_offload_wq)
|
||||
adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
|
||||
aconnector;
|
||||
}
|
||||
|
||||
if (adev->dm.hpd_rx_offload_wq)
|
||||
adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
|
||||
aconnector;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3503,7 +3444,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
struct dc_interrupt_params int_params = {0};
|
||||
int r;
|
||||
int i;
|
||||
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
|
||||
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
|
||||
@ -3517,11 +3458,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
|
||||
* coming from DC hardware.
|
||||
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
|
||||
* for acknowledging and handling. */
|
||||
* for acknowledging and handling.
|
||||
*/
|
||||
|
||||
/* Use VBLANK interrupt */
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||
r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
|
||||
r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to add crtc irq id!\n");
|
||||
return r;
|
||||
@ -3529,7 +3471,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
|
||||
|
||||
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
|
||||
int_params.irq_source =
|
||||
dc_interrupt_to_irq_source(dc, i+1 , 0);
|
||||
dc_interrupt_to_irq_source(dc, i + 1, 0);
|
||||
|
||||
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
|
||||
|
||||
@ -3585,7 +3527,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
||||
struct dc_interrupt_params int_params = {0};
|
||||
int r;
|
||||
int i;
|
||||
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
|
||||
|
||||
if (adev->family >= AMDGPU_FAMILY_AI)
|
||||
client_id = SOC15_IH_CLIENTID_DCE;
|
||||
@ -3602,7 +3544,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
|
||||
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
|
||||
* coming from DC hardware.
|
||||
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
|
||||
* for acknowledging and handling. */
|
||||
* for acknowledging and handling.
|
||||
*/
|
||||
|
||||
/* Use VBLANK interrupt */
|
||||
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
|
||||
@ -4049,7 +3992,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
|
||||
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
unsigned *min, unsigned *max)
|
||||
unsigned int *min, unsigned int *max)
|
||||
{
|
||||
if (!caps)
|
||||
return 0;
|
||||
@ -4069,7 +4012,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
|
||||
uint32_t brightness)
|
||||
{
|
||||
unsigned min, max;
|
||||
unsigned int min, max;
|
||||
|
||||
if (!get_brightness_range(caps, &min, &max))
|
||||
return brightness;
|
||||
@ -4082,7 +4025,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
|
||||
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
|
||||
uint32_t brightness)
|
||||
{
|
||||
unsigned min, max;
|
||||
unsigned int min, max;
|
||||
|
||||
if (!get_brightness_range(caps, &min, &max))
|
||||
return brightness;
|
||||
@ -4562,7 +4505,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
drm_atomic_private_obj_fini(&dm->atomic_obj);
|
||||
return;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -5394,6 +5336,7 @@ static bool adjust_colour_depth_from_display_info(
|
||||
{
|
||||
enum dc_color_depth depth = timing_out->display_color_depth;
|
||||
int normalized_clk;
|
||||
|
||||
do {
|
||||
normalized_clk = timing_out->pix_clk_100hz / 10;
|
||||
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
|
||||
@ -5609,6 +5552,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink_init_data sink_init_data = { 0 };
|
||||
struct dc_sink *sink = NULL;
|
||||
|
||||
sink_init_data.link = aconnector->dc_link;
|
||||
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
|
||||
|
||||
@ -5732,7 +5676,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
return &aconnector->freesync_vid_base;
|
||||
|
||||
/* Find the preferred mode */
|
||||
list_for_each_entry (m, list_head, head) {
|
||||
list_for_each_entry(m, list_head, head) {
|
||||
if (m->type & DRM_MODE_TYPE_PREFERRED) {
|
||||
m_pref = m;
|
||||
break;
|
||||
@ -5756,7 +5700,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
* For some monitors, preferred mode is not the mode with highest
|
||||
* supported refresh rate.
|
||||
*/
|
||||
list_for_each_entry (m, list_head, head) {
|
||||
list_for_each_entry(m, list_head, head) {
|
||||
current_refresh = drm_mode_vrefresh(m);
|
||||
|
||||
if (m->hdisplay == m_pref->hdisplay &&
|
||||
@ -6028,7 +5972,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
* This may not be an error, the use case is when we have no
|
||||
* usermode calls to reset and set mode upon hotplug. In this
|
||||
* case, we call set mode ourselves to restore the previous mode
|
||||
* and the modelist may not be filled in in time.
|
||||
* and the modelist may not be filled in time.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("No preferred mode found\n");
|
||||
} else {
|
||||
@ -6051,9 +5995,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
||||
drm_mode_set_crtcinfo(&mode, 0);
|
||||
|
||||
/*
|
||||
* If scaling is enabled and refresh rate didn't change
|
||||
* we copy the vic and polarities of the old timings
|
||||
*/
|
||||
* If scaling is enabled and refresh rate didn't change
|
||||
* we copy the vic and polarities of the old timings
|
||||
*/
|
||||
if (!scale || mode_refresh != preferred_refresh)
|
||||
fill_stream_properties_from_drm_display_mode(
|
||||
stream, &mode, &aconnector->base, con_state, NULL,
|
||||
@ -6817,6 +6761,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
|
||||
|
||||
if (!state->duplicated) {
|
||||
int max_bpc = conn_state->max_requested_bpc;
|
||||
|
||||
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
|
||||
aconnector->force_yuv420_output;
|
||||
color_depth = convert_color_depth_from_display_info(connector,
|
||||
@ -7135,7 +7080,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
|
||||
{
|
||||
struct drm_display_mode *m;
|
||||
|
||||
list_for_each_entry (m, &aconnector->base.probed_modes, head) {
|
||||
list_for_each_entry(m, &aconnector->base.probed_modes, head) {
|
||||
if (drm_mode_equal(m, mode))
|
||||
return true;
|
||||
}
|
||||
@ -7295,6 +7240,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
||||
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
|
||||
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
|
||||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
@ -7454,7 +7400,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
||||
|
||||
link->priv = aconnector;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s()\n", __func__);
|
||||
|
||||
i2c = create_i2c(link->ddc, link->link_index, &res);
|
||||
if (!i2c) {
|
||||
@ -8125,7 +8070,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* Only allow immediate flips for fast updates that don't
|
||||
* change memory domain, FB pitch, DCC state, rotation or
|
||||
* mirroring.
|
||||
*
|
||||
* dm_crtc_helper_atomic_check() only accepts async flips with
|
||||
* fast updates.
|
||||
*/
|
||||
if (crtc->state->async_flip &&
|
||||
acrtc_state->update_type != UPDATE_TYPE_FAST)
|
||||
drm_warn_once(state->dev,
|
||||
"[PLANE:%d:%s] async flip with non-fast update\n",
|
||||
plane->base.id, plane->name);
|
||||
bundle->flip_addrs[planes_count].flip_immediate =
|
||||
crtc->state->async_flip &&
|
||||
acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
@ -8168,8 +8121,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* DRI3/Present extension with defined target_msc.
|
||||
*/
|
||||
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
/* For variable refresh rate mode only:
|
||||
* Get vblank of last completed flip to avoid > 1 vrr
|
||||
* flips per video frame by use of throttling, but allow
|
||||
@ -8502,8 +8454,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
dc_resource_state_copy_construct_current(dm->dc, dc_state);
|
||||
}
|
||||
|
||||
for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
@ -8526,9 +8478,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
drm_dbg_state(state->dev,
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
|
||||
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
|
||||
"connectors_changed:%d\n",
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
|
||||
acrtc->crtc_id,
|
||||
new_crtc_state->enable,
|
||||
new_crtc_state->active,
|
||||
@ -9104,8 +9054,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
|
||||
&commit->flip_done, 10*HZ);
|
||||
|
||||
if (ret == 0)
|
||||
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
|
||||
"timed out\n", crtc->base.id, crtc->name);
|
||||
DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
|
||||
crtc->base.id, crtc->name);
|
||||
|
||||
drm_crtc_commit_put(commit);
|
||||
}
|
||||
@ -9190,7 +9140,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
|
||||
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
|
||||
{
|
||||
u64 num, den, res;
|
||||
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
|
||||
|
||||
@ -9312,9 +9263,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
goto skip_modeset;
|
||||
|
||||
drm_dbg_state(state->dev,
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
|
||||
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
|
||||
"connectors_changed:%d\n",
|
||||
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
|
||||
acrtc->crtc_id,
|
||||
new_crtc_state->enable,
|
||||
new_crtc_state->active,
|
||||
@ -9343,8 +9292,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
old_crtc_state)) {
|
||||
new_crtc_state->mode_changed = false;
|
||||
DRM_DEBUG_DRIVER(
|
||||
"Mode change not required for front porch change, "
|
||||
"setting mode_changed to %d",
|
||||
"Mode change not required for front porch change, setting mode_changed to %d",
|
||||
new_crtc_state->mode_changed);
|
||||
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
@ -9356,9 +9304,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
|
||||
struct drm_display_mode *high_mode;
|
||||
|
||||
high_mode = get_highest_refresh_rate_mode(aconnector, false);
|
||||
if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
|
||||
if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
|
||||
set_freesync_fixed_config(dm_new_crtc_state);
|
||||
}
|
||||
}
|
||||
|
||||
ret = dm_atomic_get_state(state, &dm_state);
|
||||
@ -9526,6 +9473,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
|
||||
*/
|
||||
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
|
||||
struct amdgpu_framebuffer *old_afb, *new_afb;
|
||||
|
||||
if (other->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
@ -9624,11 +9572,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
|
||||
}
|
||||
|
||||
/* Core DRM takes care of checking FB modifiers, so we only need to
|
||||
* check tiling flags when the FB doesn't have a modifier. */
|
||||
* check tiling flags when the FB doesn't have a modifier.
|
||||
*/
|
||||
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
|
||||
if (adev->family < AMDGPU_FAMILY_AI) {
|
||||
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
|
||||
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
|
||||
} else {
|
||||
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
|
||||
@ -9850,12 +9799,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
|
||||
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
|
||||
* cursor per pipe but it's going to inherit the scaling and
|
||||
* positioning from the underlying pipe. Check the cursor plane's
|
||||
* blending properties match the underlying planes'. */
|
||||
* blending properties match the underlying planes'.
|
||||
*/
|
||||
|
||||
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
|
||||
if (!new_cursor_state || !new_cursor_state->fb) {
|
||||
if (!new_cursor_state || !new_cursor_state->fb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
|
||||
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
|
||||
@ -9900,6 +9849,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
||||
struct drm_connector_state *conn_state, *old_conn_state;
|
||||
struct amdgpu_dm_connector *aconnector = NULL;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
|
||||
if (!conn_state->crtc)
|
||||
conn_state = old_conn_state;
|
||||
@ -10334,7 +10284,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* Store the overall update type for use later in atomic check. */
|
||||
for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
|
||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
struct dm_crtc_state *dm_new_crtc_state =
|
||||
to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
@ -10356,7 +10306,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
|
||||
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
|
||||
else
|
||||
DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
|
||||
DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
|
||||
|
||||
trace_amdgpu_dm_atomic_check_finish(state, ret);
|
||||
|
||||
|
@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue {
|
||||
* we're handling link loss
|
||||
*/
|
||||
bool is_handling_link_loss;
|
||||
/**
|
||||
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
|
||||
* ready event when we're already handling mst message ready event
|
||||
*/
|
||||
bool is_handling_mst_msg_rdy_event;
|
||||
/**
|
||||
* @aconnector: The aconnector that this work queue is attached to
|
||||
*/
|
||||
@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
|
||||
struct drm_dp_mst_port *mst_output_port;
|
||||
struct amdgpu_dm_connector *mst_root;
|
||||
struct drm_dp_aux *dsc_aux;
|
||||
struct mutex handle_mst_msg_ready;
|
||||
|
||||
/* TODO see if we can merge with ddc_bus or make a dm_connector */
|
||||
struct amdgpu_i2c_adapter *i2c;
|
||||
|
||||
|
@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only allow async flips for fast updates that don't change the FB
|
||||
* pitch, the DCC state, rotation, etc.
|
||||
*/
|
||||
if (crtc_state->async_flip &&
|
||||
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
|
||||
drm_dbg_atomic(crtc->dev,
|
||||
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
|
||||
crtc->base.id, crtc->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* In some use cases, like reset, no stream is attached */
|
||||
if (!dm_crtc_state->stream)
|
||||
return 0;
|
||||
|
@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
return connector;
|
||||
}
|
||||
|
||||
void dm_handle_mst_sideband_msg_ready_event(
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
enum mst_msg_ready_type msg_rdy_type)
|
||||
{
|
||||
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
|
||||
uint8_t dret;
|
||||
bool new_irq_handled = false;
|
||||
int dpcd_addr;
|
||||
uint8_t dpcd_bytes_to_read;
|
||||
const uint8_t max_process_count = 30;
|
||||
uint8_t process_count = 0;
|
||||
u8 retry;
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
|
||||
|
||||
|
||||
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
|
||||
|
||||
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
|
||||
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
|
||||
/* DPCD 0x200 - 0x201 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT;
|
||||
} else {
|
||||
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
|
||||
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
|
||||
dpcd_addr = DP_SINK_COUNT_ESI;
|
||||
}
|
||||
|
||||
mutex_lock(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
while (process_count < max_process_count) {
|
||||
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
|
||||
|
||||
process_count++;
|
||||
|
||||
dret = drm_dp_dpcd_read(
|
||||
&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr,
|
||||
esi,
|
||||
dpcd_bytes_to_read);
|
||||
|
||||
if (dret != dpcd_bytes_to_read) {
|
||||
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
|
||||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
|
||||
|
||||
switch (msg_rdy_type) {
|
||||
case DOWN_REP_MSG_RDY_EVENT:
|
||||
/* Only handle DOWN_REP_MSG_RDY case*/
|
||||
esi[1] &= DP_DOWN_REP_MSG_RDY;
|
||||
break;
|
||||
case UP_REQ_MSG_RDY_EVENT:
|
||||
/* Only handle UP_REQ_MSG_RDY case*/
|
||||
esi[1] &= DP_UP_REQ_MSG_RDY;
|
||||
break;
|
||||
default:
|
||||
/* Handle both cases*/
|
||||
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!esi[1])
|
||||
break;
|
||||
|
||||
/* handle MST irq */
|
||||
if (aconnector->mst_mgr.mst_state)
|
||||
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
|
||||
esi,
|
||||
ack,
|
||||
&new_irq_handled);
|
||||
|
||||
if (new_irq_handled) {
|
||||
/* ACK at DPCD to notify down stream */
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
ssize_t wret;
|
||||
|
||||
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
|
||||
dpcd_addr + 1,
|
||||
ack[1]);
|
||||
if (wret == 1)
|
||||
break;
|
||||
}
|
||||
|
||||
if (retry == 3) {
|
||||
DRM_ERROR("Failed to ack MST event.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
|
||||
|
||||
new_irq_handled = false;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
if (process_count == max_process_count)
|
||||
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
|
||||
}
|
||||
|
||||
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
|
||||
}
|
||||
|
||||
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
|
||||
.add_connector = dm_dp_add_mst_connector,
|
||||
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
|
||||
};
|
||||
|
||||
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
|
@ -49,6 +49,13 @@
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
|
||||
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
|
||||
|
||||
enum mst_msg_ready_type {
|
||||
NONE_MSG_RDY_EVENT = 0,
|
||||
DOWN_REP_MSG_RDY_EVENT = 1,
|
||||
UP_REQ_MSG_RDY_EVENT = 2,
|
||||
DOWN_OR_UP_MSG_RDY_EVENT = 3
|
||||
};
|
||||
|
||||
struct amdgpu_display_manager;
|
||||
struct amdgpu_dm_connector;
|
||||
|
||||
@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
|
||||
void
|
||||
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
|
||||
|
||||
void dm_handle_mst_sideband_msg_ready_event(
|
||||
struct drm_dp_mst_topology_mgr *mgr,
|
||||
enum mst_msg_ready_type msg_rdy_type);
|
||||
|
||||
struct dsc_mst_fairness_vars {
|
||||
int pbn;
|
||||
bool dsc_enabled;
|
||||
|
@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
|
||||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
|
||||
tmds_present = true;
|
||||
|
||||
/* Checking stream / link detection ensuring that PHY is active*/
|
||||
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
|
||||
display_count++;
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
|
@ -3278,7 +3278,8 @@ void dcn10_wait_for_mpcc_disconnect(
|
||||
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
|
||||
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
|
||||
if (pipe_ctx->stream_res.tg &&
|
||||
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
|
||||
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
|
||||
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
|
||||
hubp->funcs->set_blank(hubp, true);
|
||||
|
@ -215,7 +215,7 @@ void optc3_set_odm_bypass(struct timing_generator *optc,
|
||||
optc1->opp_count = 1;
|
||||
}
|
||||
|
||||
static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
|
||||
void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
|
||||
struct dc_crtc_timing *timing)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
@ -293,7 +293,7 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
|
||||
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
|
||||
}
|
||||
|
||||
static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
|
||||
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
|
@ -351,6 +351,9 @@ void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
|
||||
|
||||
void optc3_set_odm_bypass(struct timing_generator *optc,
|
||||
const struct dc_crtc_timing *dc_crtc_timing);
|
||||
void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
|
||||
struct dc_crtc_timing *timing);
|
||||
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
|
||||
void optc3_tg_init(struct timing_generator *optc);
|
||||
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
|
||||
#endif /* __DC_OPTC_DCN30_H__ */
|
||||
|
@ -11,7 +11,8 @@
|
||||
# Makefile for dcn30.
|
||||
|
||||
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
|
||||
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
|
||||
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o \
|
||||
dcn301_optc.o
|
||||
|
||||
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
|
||||
|
||||
|
185
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
Normal file
185
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
Normal file
@ -0,0 +1,185 @@
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "dcn301_optc.h"
|
||||
#include "dc.h"
|
||||
#include "dcn_calc_math.h"
|
||||
#include "dc_dmub_srv.h"
|
||||
|
||||
#include "dml/dcn30/dcn30_fpu.h"
|
||||
#include "dc_trace.h"
|
||||
|
||||
#define REG(reg)\
|
||||
optc1->tg_regs->reg
|
||||
|
||||
#define CTX \
|
||||
optc1->base.ctx
|
||||
|
||||
#undef FN
|
||||
#define FN(reg_name, field_name) \
|
||||
optc1->tg_shift->field_name, optc1->tg_mask->field_name
|
||||
|
||||
|
||||
/**
|
||||
* optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
|
||||
*
|
||||
* @optc: timing_generator instance.
|
||||
* @params: parameters used for Dynamic Refresh Rate.
|
||||
*/
|
||||
void optc301_set_drr(
|
||||
struct timing_generator *optc,
|
||||
const struct drr_params *params)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
if (params != NULL &&
|
||||
params->vertical_total_max > 0 &&
|
||||
params->vertical_total_min > 0) {
|
||||
|
||||
if (params->vertical_total_mid != 0) {
|
||||
|
||||
REG_SET(OTG_V_TOTAL_MID, 0,
|
||||
OTG_V_TOTAL_MID, params->vertical_total_mid - 1);
|
||||
|
||||
REG_UPDATE_2(OTG_V_TOTAL_CONTROL,
|
||||
OTG_VTOTAL_MID_REPLACING_MAX_EN, 1,
|
||||
OTG_VTOTAL_MID_FRAME_NUM,
|
||||
(uint8_t)params->vertical_total_mid_frame_num);
|
||||
|
||||
}
|
||||
|
||||
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
|
||||
|
||||
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
|
||||
OTG_V_TOTAL_MIN_SEL, 1,
|
||||
OTG_V_TOTAL_MAX_SEL, 1,
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, 0);
|
||||
// Setup manual flow control for EOF via TRIG_A
|
||||
optc->funcs->setup_manual_trigger(optc);
|
||||
|
||||
} else {
|
||||
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
|
||||
OTG_SET_V_TOTAL_MIN_MASK, 0,
|
||||
OTG_V_TOTAL_MIN_SEL, 0,
|
||||
OTG_V_TOTAL_MAX_SEL, 0,
|
||||
OTG_FORCE_LOCK_ON_EVENT, 0);
|
||||
|
||||
optc->funcs->set_vtotal_min_max(optc, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void optc301_setup_manual_trigger(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_SET_8(OTG_TRIGA_CNTL, 0,
|
||||
OTG_TRIGA_SOURCE_SELECT, 21,
|
||||
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
|
||||
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
|
||||
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
|
||||
OTG_TRIGA_POLARITY_SELECT, 0,
|
||||
OTG_TRIGA_FREQUENCY_SELECT, 0,
|
||||
OTG_TRIGA_DELAY, 0,
|
||||
OTG_TRIGA_CLEAR, 1);
|
||||
}
|
||||
|
||||
static struct timing_generator_funcs dcn30_tg_funcs = {
|
||||
.validate_timing = optc1_validate_timing,
|
||||
.program_timing = optc1_program_timing,
|
||||
.setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
|
||||
.setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
|
||||
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
|
||||
.program_global_sync = optc1_program_global_sync,
|
||||
.enable_crtc = optc2_enable_crtc,
|
||||
.disable_crtc = optc1_disable_crtc,
|
||||
/* used by enable_timing_synchronization. Not need for FPGA */
|
||||
.is_counter_moving = optc1_is_counter_moving,
|
||||
.get_position = optc1_get_position,
|
||||
.get_frame_count = optc1_get_vblank_counter,
|
||||
.get_scanoutpos = optc1_get_crtc_scanoutpos,
|
||||
.get_otg_active_size = optc1_get_otg_active_size,
|
||||
.set_early_control = optc1_set_early_control,
|
||||
/* used by enable_timing_synchronization. Not need for FPGA */
|
||||
.wait_for_state = optc1_wait_for_state,
|
||||
.set_blank_color = optc3_program_blank_color,
|
||||
.did_triggered_reset_occur = optc1_did_triggered_reset_occur,
|
||||
.triplebuffer_lock = optc3_triplebuffer_lock,
|
||||
.triplebuffer_unlock = optc2_triplebuffer_unlock,
|
||||
.enable_reset_trigger = optc1_enable_reset_trigger,
|
||||
.enable_crtc_reset = optc1_enable_crtc_reset,
|
||||
.disable_reset_trigger = optc1_disable_reset_trigger,
|
||||
.lock = optc3_lock,
|
||||
.unlock = optc1_unlock,
|
||||
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
|
||||
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
|
||||
.enable_optc_clock = optc1_enable_optc_clock,
|
||||
.set_drr = optc301_set_drr,
|
||||
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
|
||||
.set_vtotal_min_max = optc3_set_vtotal_min_max,
|
||||
.set_static_screen_control = optc1_set_static_screen_control,
|
||||
.program_stereo = optc1_program_stereo,
|
||||
.is_stereo_left_eye = optc1_is_stereo_left_eye,
|
||||
.tg_init = optc3_tg_init,
|
||||
.is_tg_enabled = optc1_is_tg_enabled,
|
||||
.is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
|
||||
.clear_optc_underflow = optc1_clear_optc_underflow,
|
||||
.setup_global_swap_lock = NULL,
|
||||
.get_crc = optc1_get_crc,
|
||||
.configure_crc = optc2_configure_crc,
|
||||
.set_dsc_config = optc3_set_dsc_config,
|
||||
.get_dsc_status = optc2_get_dsc_status,
|
||||
.set_dwb_source = NULL,
|
||||
.set_odm_bypass = optc3_set_odm_bypass,
|
||||
.set_odm_combine = optc3_set_odm_combine,
|
||||
.get_optc_source = optc2_get_optc_source,
|
||||
.set_out_mux = optc3_set_out_mux,
|
||||
.set_drr_trigger_window = optc3_set_drr_trigger_window,
|
||||
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
|
||||
.set_gsl = optc2_set_gsl,
|
||||
.set_gsl_source_select = optc2_set_gsl_source_select,
|
||||
.set_vtg_params = optc1_set_vtg_params,
|
||||
.program_manual_trigger = optc2_program_manual_trigger,
|
||||
.setup_manual_trigger = optc301_setup_manual_trigger,
|
||||
.get_hw_timing = optc1_get_hw_timing,
|
||||
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
|
||||
};
|
||||
|
||||
void dcn301_timing_generator_init(struct optc *optc1)
|
||||
{
|
||||
optc1->base.funcs = &dcn30_tg_funcs;
|
||||
|
||||
optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
|
||||
optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
|
||||
|
||||
optc1->min_h_blank = 32;
|
||||
optc1->min_v_blank = 3;
|
||||
optc1->min_v_blank_interlace = 5;
|
||||
optc1->min_h_sync_width = 4;
|
||||
optc1->min_v_sync_width = 1;
|
||||
}
|
36
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
Normal file
36
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DC_OPTC_DCN301_H__
|
||||
#define __DC_OPTC_DCN301_H__
|
||||
|
||||
#include "dcn20/dcn20_optc.h"
|
||||
#include "dcn30/dcn30_optc.h"
|
||||
|
||||
void dcn301_timing_generator_init(struct optc *optc1);
|
||||
void optc301_setup_manual_trigger(struct timing_generator *optc);
|
||||
void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params);
|
||||
|
||||
#endif /* __DC_OPTC_DCN301_H__ */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user