Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

net/mptcp/protocol.c
  977d293e23 ("mptcp: ensure tx skbs always have the MPTCP ext")
  efe686ffce ("mptcp: ensure tx skbs always have the MPTCP ext")

same patch merged in both trees, keep net-next.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-09-23 11:19:49 -07:00
commit 2fcd14d0f7
332 changed files with 3501 additions and 1321 deletions

View File

@ -54,7 +54,7 @@ properties:
- const: toradex,apalis_t30 - const: toradex,apalis_t30
- const: nvidia,tegra30 - const: nvidia,tegra30
- items: - items:
- const: toradex,apalis_t30-eval-v1.1 - const: toradex,apalis_t30-v1.1-eval
- const: toradex,apalis_t30-eval - const: toradex,apalis_t30-eval
- const: toradex,apalis_t30-v1.1 - const: toradex,apalis_t30-v1.1
- const: toradex,apalis_t30 - const: toradex,apalis_t30

View File

@ -9,7 +9,7 @@ function block.
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node. All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
For a description of the MMSYS_CONFIG binding, see For a description of the MMSYS_CONFIG binding, see
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt. Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
DISP function blocks DISP function blocks
==================== ====================

View File

@ -0,0 +1,89 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung SoC series UFS host controller Device Tree Bindings
maintainers:
- Alim Akhtar <alim.akhtar@samsung.com>
description: |
Each Samsung UFS host controller instance should have its own node.
This binding define Samsung specific binding other then what is used
in the common ufshcd bindings
[1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
properties:
compatible:
enum:
- samsung,exynos7-ufs
reg:
items:
- description: HCI register
- description: vendor specific register
- description: unipro register
- description: UFS protector register
reg-names:
items:
- const: hci
- const: vs_hci
- const: unipro
- const: ufsp
clocks:
items:
- description: ufs link core clock
- description: unipro main clock
clock-names:
items:
- const: core_clk
- const: sclk_unipro_main
interrupts:
maxItems: 1
phys:
maxItems: 1
phy-names:
const: ufs-phy
required:
- compatible
- reg
- interrupts
- phys
- phy-names
- clocks
- clock-names
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/exynos7-clk.h>
ufs: ufs@15570000 {
compatible = "samsung,exynos7-ufs";
reg = <0x15570000 0x100>,
<0x15570100 0x100>,
<0x15571000 0x200>,
<0x15572000 0x300>;
reg-names = "hci", "vs_hci", "unipro", "ufsp";
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
<&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
clock-names = "core_clk", "sclk_unipro_main";
pinctrl-names = "default";
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
phys = <&ufs_phy>;
phy-names = "ufs-phy";
};
...

View File

@ -851,7 +851,7 @@ NOTES:
- 0x88A8 traffic will not be received unless VLAN stripping is disabled with - 0x88A8 traffic will not be received unless VLAN stripping is disabled with
the following command:: the following command::
# ethool -K <ethX> rxvlan off # ethtool -K <ethX> rxvlan off
- 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS - 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
configured on the same port. 0x88a8/0x8100 traffic will not be received if configured on the same port. 0x88a8/0x8100 traffic will not be received if

View File

@ -296,7 +296,7 @@ not available.
Device Tree bindings and board design Device Tree bindings and board design
===================================== =====================================
This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt`` This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
and aims to showcase some potential switch caveats. and aims to showcase some potential switch caveats.
RMII PHY role and out-of-band signaling RMII PHY role and out-of-band signaling

View File

@ -2804,9 +2804,8 @@ F: arch/arm/mach-pxa/include/mach/vpac270.h
F: arch/arm/mach-pxa/vpac270.c F: arch/arm/mach-pxa/vpac270.c
ARM/VT8500 ARM ARCHITECTURE ARM/VT8500 ARM ARCHITECTURE
M: Tony Prisk <linux@prisktech.co.nz>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Orphan
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
F: arch/arm/mach-vt8500/ F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c F: drivers/clocksource/timer-vt8500.c
@ -13255,9 +13254,9 @@ F: Documentation/scsi/NinjaSCSI.rst
F: drivers/scsi/nsp32* F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE NIOS2 ARCHITECTURE
M: Ley Foon Tan <ley.foon.tan@intel.com> M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
F: arch/nios2/ F: arch/nios2/
NITRO ENCLAVES (NE) NITRO ENCLAVES (NE)
@ -14342,7 +14341,8 @@ F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
F: drivers/pci/controller/pci-ixp4xx.c F: drivers/pci/controller/pci-ixp4xx.c
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
M: Jonathan Derrick <jonathan.derrick@intel.com> M: Nirmal Patel <nirmal.patel@linux.intel.com>
R: Jonathan Derrick <jonathan.derrick@linux.dev>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
S: Supported S: Supported
F: drivers/pci/controller/vmd.c F: drivers/pci/controller/vmd.c
@ -16955,7 +16955,6 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <kgraul@linux.ibm.com> M: Karsten Graul <kgraul@linux.ibm.com>
M: Guvenc Gulce <guvenc@linux.ibm.com>
L: linux-s390@vger.kernel.org L: linux-s390@vger.kernel.org
S: Supported S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/ W: http://www.ibm.com/developerworks/linux/linux390/
@ -17968,10 +17967,11 @@ F: Documentation/admin-guide/svga.rst
F: arch/x86/boot/video* F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> M: Christoph Hellwig <hch@infradead.org>
L: iommu@lists.linux-foundation.org L: iommu@lists.linux-foundation.org
S: Supported S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git W: http://git.infradead.org/users/hch/dma-mapping.git
T: git git://git.infradead.org/users/hch/dma-mapping.git
F: arch/*/kernel/pci-swiotlb.c F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h F: include/linux/swiotlb.h
F: kernel/dma/swiotlb.c F: kernel/dma/swiotlb.c
@ -20474,7 +20474,6 @@ F: samples/bpf/xdpsock*
F: tools/lib/bpf/xsk* F: tools/lib/bpf/xsk*
XEN BLOCK SUBSYSTEM XEN BLOCK SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
M: Roger Pau Monné <roger.pau@citrix.com> M: Roger Pau Monné <roger.pau@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported S: Supported
@ -20522,7 +20521,7 @@ S: Supported
F: drivers/net/xen-netback/* F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM XEN PCI SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported S: Supported
F: arch/x86/pci/*xen* F: arch/x86/pci/*xen*
@ -20545,7 +20544,8 @@ S: Supported
F: sound/xen/* F: sound/xen/*
XEN SWIOTLB SUBSYSTEM XEN SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> M: Juergen Gross <jgross@suse.com>
M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: iommu@lists.linux-foundation.org L: iommu@lists.linux-foundation.org
S: Supported S: Supported

View File

@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Opossums on Parade NAME = Opossums on Parade
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -20,7 +20,7 @@ config ALPHA
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
select VIRT_TO_BUS select VIRT_TO_BUS
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP if PCI select GENERIC_PCI_IOMAP
select AUTO_IRQ_AFFINITY if SMP select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
@ -199,7 +199,6 @@ config ALPHA_EIGER
config ALPHA_JENSEN config ALPHA_JENSEN
bool "Jensen" bool "Jensen"
depends on BROKEN
select HAVE_EISA select HAVE_EISA
help help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one

View File

@ -16,3 +16,4 @@ extern void __divlu(void);
extern void __remlu(void); extern void __remlu(void);
extern void __divqu(void); extern void __divqu(void);
extern void __remqu(void); extern void __remqu(void);
extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);

View File

@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
* convinced that I need one of the newer machines. * convinced that I need one of the newer machines.
*/ */
static inline unsigned int jensen_local_inb(unsigned long addr) __EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
{ {
return 0xff & *(vuip)((addr << 9) + EISA_VL82C106); return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
} }
static inline void jensen_local_outb(u8 b, unsigned long addr) __EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
{ {
*(vuip)((addr << 9) + EISA_VL82C106) = b; *(vuip)((addr << 9) + EISA_VL82C106) = b;
mb(); mb();
} }
static inline unsigned int jensen_bus_inb(unsigned long addr) __EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
{ {
long result; long result;
@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
return __kernel_extbl(result, addr & 3); return __kernel_extbl(result, addr & 3);
} }
static inline void jensen_bus_outb(u8 b, unsigned long addr) __EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
{ {
jensen_set_hae(0); jensen_set_hae(0);
*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101; *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;

View File

@ -7,6 +7,11 @@
* *
* Code supporting the Jensen. * Code supporting the Jensen.
*/ */
#define __EXTERN_INLINE
#include <asm/io.h>
#include <asm/jensen.h>
#undef __EXTERN_INLINE
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
@ -17,11 +22,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/jensen.h>
#undef __EXTERN_INLINE
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>

View File

@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
ev67-$(CONFIG_ALPHA_EV67) := ev67- ev67-$(CONFIG_ALPHA_EV67) := ev67-
lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
udiv-qrnnd.o \
udelay.o \ udelay.o \
$(ev6-y)memset.o \ $(ev6-y)memset.o \
$(ev6-y)memcpy.o \ $(ev6-y)memcpy.o \

View File

@ -25,6 +25,7 @@
# along with GCC; see the file COPYING. If not, write to the # along with GCC; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA. # MA 02111-1307, USA.
#include <asm/export.h>
.set noreorder .set noreorder
.set noat .set noat
@ -161,3 +162,4 @@ $Odd:
ret $31,($26),1 ret $31,($26),1
.end __udiv_qrnnd .end __udiv_qrnnd
EXPORT_SYMBOL(__udiv_qrnnd)

View File

@ -7,4 +7,4 @@ ccflags-y := -w
obj-$(CONFIG_MATHEMU) += math-emu.o obj-$(CONFIG_MATHEMU) += math-emu.o
math-emu-objs := math.o qrnnd.o math-emu-objs := math.o

View File

@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
egress: egress:
return si_code; return si_code;
} }
EXPORT_SYMBOL(__udiv_qrnnd);

View File

@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
void sve_alloc(struct task_struct *task) void sve_alloc(struct task_struct *task)
{ {
if (task->thread.sve_state) { if (task->thread.sve_state) {
memset(task->thread.sve_state, 0, sve_state_size(current)); memset(task->thread.sve_state, 0, sve_state_size(task));
return; return;
} }

View File

@ -18,7 +18,6 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/sched.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/unistd.h> #include <linux/unistd.h>
@ -58,7 +57,7 @@
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly; unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard); EXPORT_SYMBOL(__stack_chk_guard);
#endif #endif

View File

@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
} }
} }
#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr) void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{ {
if (!INDIRECT_ADDR(addr)) { if (!INDIRECT_ADDR(addr)) {
iounmap(addr); iounmap(addr);
} }
} }
EXPORT_SYMBOL(pci_iounmap);
#endif
EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread16);
@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap); EXPORT_SYMBOL(ioport_unmap);
EXPORT_SYMBOL(pci_iounmap);

View File

@ -18,6 +18,7 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/syscall.h> #include <asm/syscall.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/tm.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
*/ */
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
/*
* If system call is called with TM active, set _TIF_RESTOREALL to
* prevent RFSCV being used to return to userspace, because POWER9
* TM implementation has problems with this instruction returning to
* transactional state. Final register values are not relevant because
* the transaction will be aborted upon return anyway. Or in the case
* of unsupported_scv SIGILL fault, the return state does not much
* matter because it's an edge case.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
current_thread_info()->flags |= _TIF_RESTOREALL;
/*
* If the system call was made with a transaction active, doom it and
* return without performing the system call. Unless it was an
* unsupported scv vector, in which case it's treated like an illegal
* instruction.
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
!trap_is_unsupported_scv(regs)) {
/* Enable TM in the kernel, and disable EE (for scv) */
hard_irq_disable();
mtmsr(mfmsr() | MSR_TM);
/* tabort, this dooms the transaction, nothing else */
asm volatile(".long 0x7c00071d | ((%0) << 16)"
:: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
/*
* Userspace will never see the return value. Execution will
* resume after the tbegin. of the aborted transaction with the
* checkpointed register state. A context switch could occur
* or signal delivered to the process before resuming the
* doomed transaction context, but that should all be handled
* as expected.
*/
return -ENOSYS;
}
#endif // CONFIG_PPC_TRANSACTIONAL_MEM
local_irq_enable(); local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {

View File

@ -12,7 +12,6 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/tm.h>
.section ".toc","aw" .section ".toc","aw"
SYS_CALL_TABLE: SYS_CALL_TABLE:
@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
.globl system_call_vectored_\name .globl system_call_vectored_\name
system_call_vectored_\name: system_call_vectored_\name:
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
SCV_INTERRUPT_TO_KERNEL SCV_INTERRUPT_TO_KERNEL
mr r10,r1 mr r10,r1
ld r1,PACAKSAVE(r13) ld r1,PACAKSAVE(r13)
@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
.globl system_call_common .globl system_call_common
system_call_common: system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common) _ASM_NOKPROBE_SYMBOL(system_call_common)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
mr r10,r1 mr r10,r1
ld r1,PACAKSAVE(r13) ld r1,PACAKSAVE(r13)
std r10,0(r1) std r10,0(r1)
@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart) RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif #endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tabort_syscall:
_ASM_NOKPROBE_SYMBOL(tabort_syscall)
/* Firstly we need to enable TM in the kernel */
mfmsr r10
li r9, 1
rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r10, 0
/* tabort, this dooms the transaction, nothing else */
li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
TABORT(R9)
/*
* Return directly to userspace. We have corrupted user register state,
* but userspace will never see that register state. Execution will
* resume after the tbegin of the aborted transaction with the
* checkpointed register state.
*/
li r9, MSR_RI
andc r10, r10, r9
mtmsrd r10, 1
mtspr SPRN_SRR0, r11
mtspr SPRN_SRR1, r12
RFI_TO_USER
b . /* prevent speculative execution */
#endif
/* /*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, no exit work created, then this can be used. * touched, no exit work created, then this can be used.

View File

@ -249,6 +249,7 @@ void machine_check_queue_event(void)
{ {
int index; int index;
struct machine_check_event evt; struct machine_check_event evt;
unsigned long msr;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return; return;
@ -262,8 +263,20 @@ void machine_check_queue_event(void)
memcpy(&local_paca->mce_info->mce_event_queue[index], memcpy(&local_paca->mce_info->mce_event_queue[index],
&evt, sizeof(evt)); &evt, sizeof(evt));
/* Queue irq work to process this event later. */ /*
irq_work_queue(&mce_event_process_work); * Queue irq work to process this event later. Before
* queuing the work enable translation for non radix LPAR,
* as irq_work_queue may try to access memory outside RMO
* region.
*/
if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
msr = mfmsr();
mtmsr(msr | MSR_IR | MSR_DR);
irq_work_queue(&mce_event_process_work);
mtmsr(msr);
} else {
irq_work_queue(&mce_event_process_work);
}
} }
void mce_common_process_ue(struct pt_regs *regs, void mce_common_process_ue(struct pt_regs *regs,

View File

@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
/* The following code handles the fake_suspend = 1 case */ /* The following code handles the fake_suspend = 1 case */
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
stdu r1, -PPC_MIN_STKFRM(r1) stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */ /* Turn on TM. */
mfmsr r8 mfmsr r8
@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop nop
/*
* It's possible that treclaim. may modify registers, if we have lost
* track of fake-suspend state in the guest due to it using rfscv.
* Save and restore registers in case this occurs.
*/
mfspr r3, SPRN_DSCR
mfspr r4, SPRN_XER
mfspr r5, SPRN_AMR
/* SPRN_TAR would need to be saved here if the kernel ever used it */
mfcr r12
SAVE_NVGPRS(r1)
SAVE_GPR(2, r1)
SAVE_GPR(3, r1)
SAVE_GPR(4, r1)
SAVE_GPR(5, r1)
stw r12, 8(r1)
std r1, HSTATE_HOST_R1(r13)
/* We have to treclaim here because that's the only way to do S->N */ /* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3) TRECLAIM(R3)
GET_PACA(r13)
ld r1, HSTATE_HOST_R1(r13)
REST_GPR(2, r1)
REST_GPR(3, r1)
REST_GPR(4, r1)
REST_GPR(5, r1)
lwz r12, 8(r1)
REST_NVGPRS(r1)
mtspr SPRN_DSCR, r3
mtspr SPRN_XER, r4
mtspr SPRN_AMR, r5
mtcr r12
HMT_MEDIUM
/* /*
* We were in fake suspend, so we are not going to save the * We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since * register state as the guest checkpointed state (since
@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
std r5, VCPU_TFHAR(r9) std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9) std r6, VCPU_TFIAR(r9)
addi r1, r1, PPC_MIN_STKFRM addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1) ld r0, PPC_LR_STKOFF(r1)
mtlr r0 mtlr r0
blr blr

View File

@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
if (xics_ics->check(xics_ics, hwirq)) if (xics_ics->check(xics_ics, hwirq))
return -EINVAL; return -EINVAL;
/* No chip data for the XICS domain */ /* Let the ICS be the chip data for the XICS domain. For ICS native */
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip, irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
NULL, handle_fasteoi_irq, NULL, NULL); xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0; return 0;
} }

View File

@ -685,16 +685,6 @@ config STACK_GUARD
The minimum size for the stack guard should be 256 for 31 bit and The minimum size for the stack guard should be 256 for 31 bit and
512 for 64 bit. 512 for 64 bit.
config WARN_DYNAMIC_STACK
def_bool n
prompt "Emit compiler warnings for function with dynamic stack usage"
help
This option enables the compiler option -mwarn-dynamicstack. If the
compiler supports this options generates warnings for functions
that dynamically allocate stack space using alloca.
Say N if you are unsure.
endmenu endmenu
menu "I/O subsystem" menu "I/O subsystem"

View File

@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif endif
endif endif
ifdef CONFIG_WARN_DYNAMIC_STACK
ifneq ($(call cc-option,-mwarn-dynamicstack),)
KBUILD_CFLAGS += -mwarn-dynamicstack
KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
endif
endif
ifdef CONFIG_EXPOLINE ifdef CONFIG_EXPOLINE
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),) ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk CC_FLAGS_EXPOLINE := -mindirect-branch=thunk

View File

@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y CONFIG_BPF_LSM=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y CONFIG_TASKSTATS=y
@ -503,6 +504,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set # CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y CONFIG_CIFS_POSIX=y
@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m
@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_DMA_API_DEBUG=y CONFIG_DMA_API_DEBUG=y
CONFIG_STRING_SELFTEST=y
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO=y
@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y CONFIG_TEST_MIN_HEAP=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y CONFIG_ATOMIC64_SELFTEST=y
CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m CONFIG_TEST_LIVEPATCH=m

View File

@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y CONFIG_BPF_LSM=y
CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y CONFIG_TASKSTATS=y
@ -494,6 +495,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set # CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y CONFIG_CIFS_POSIX=y
@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m

View File

@ -55,7 +55,7 @@ int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf); int num_devices, const char *buf);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev); extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
extern int ccwgroup_set_offline(struct ccwgroup_device *gdev); int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev); extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev); extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);

View File

@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \ ({ \
/* Branch instruction needs 6 bytes */ \ int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \ REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \ REG_SET_SEEN(b2); \
@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9080000, dst_reg, src_reg); EMIT4(0xb9080000, dst_reg, src_reg);
break; break;
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */ case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
if (!imm) if (imm != 0) {
break; /* alfi %dst,imm */
/* alfi %dst,imm */ EMIT6_IMM(0xc20b0000, dst_reg, imm);
EMIT6_IMM(0xc20b0000, dst_reg, imm); }
EMIT_ZERO(dst_reg); EMIT_ZERO(dst_reg);
break; break;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9090000, dst_reg, src_reg); EMIT4(0xb9090000, dst_reg, src_reg);
break; break;
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
if (!imm) if (imm != 0) {
break; /* alfi %dst,-imm */
/* alfi %dst,-imm */ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
EMIT6_IMM(0xc20b0000, dst_reg, -imm); }
EMIT_ZERO(dst_reg); EMIT_ZERO(dst_reg);
break; break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
if (!imm) if (!imm)
break; break;
/* agfi %dst,-imm */ if (imm == -0x80000000) {
EMIT6_IMM(0xc2080000, dst_reg, -imm); /* algfi %dst,0x80000000 */
EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
} else {
/* agfi %dst,-imm */
EMIT6_IMM(0xc2080000, dst_reg, -imm);
}
break; break;
/* /*
* BPF_MUL * BPF_MUL
@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb90c0000, dst_reg, src_reg); EMIT4(0xb90c0000, dst_reg, src_reg);
break; break;
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */ case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
if (imm == 1) if (imm != 1) {
break; /* msfi %r5,imm */
/* msfi %r5,imm */ EMIT6_IMM(0xc2010000, dst_reg, imm);
EMIT6_IMM(0xc2010000, dst_reg, imm); }
EMIT_ZERO(dst_reg); EMIT_ZERO(dst_reg);
break; break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
if (BPF_OP(insn->code) == BPF_MOD) if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */ /* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0); EMIT4_IMM(0xa7090000, dst_reg, 0);
else
EMIT_ZERO(dst_reg);
break; break;
} }
/* lhi %w0,0 */ /* lhi %w0,0 */
@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9820000, dst_reg, src_reg); EMIT4(0xb9820000, dst_reg, src_reg);
break; break;
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */ case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
if (!imm) if (imm != 0) {
break; /* xilf %dst,imm */
/* xilf %dst,imm */ EMIT6_IMM(0xc0070000, dst_reg, imm);
EMIT6_IMM(0xc0070000, dst_reg, imm); }
EMIT_ZERO(dst_reg); EMIT_ZERO(dst_reg);
break; break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0); EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break; break;
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */ case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
if (imm == 0) if (imm != 0) {
break; /* sll %dst,imm(%r0) */
/* sll %dst,imm(%r0) */ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); }
EMIT_ZERO(dst_reg); EMIT_ZERO(dst_reg);
break; break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */ case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0); EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
break; break;
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */ case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
if (imm == 0) if (imm != 0) {
break; /* srl %dst,imm(%r0) */
/* srl %dst,imm(%r0) */ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); }
EMIT_ZERO(dst_reg); EMIT_ZERO(dst_reg);
break; break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */ case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0); EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break; break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */ case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
if (imm == 0) if (imm != 0) {
break; /* sra %dst,imm(%r0) */
/* sra %dst,imm(%r0) */ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); }
EMIT_ZERO(dst_reg); EMIT_ZERO(dst_reg);
break; break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */ case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */

View File

@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
ret = -EINVAL; ret = -EINVAL;
vma = find_vma(current->mm, mmio_addr); vma = vma_lookup(current->mm, mmio_addr);
if (!vma) if (!vma)
goto out_unlock_mmap; goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
ret = -EINVAL; ret = -EINVAL;
vma = find_vma(current->mm, mmio_addr); vma = vma_lookup(current->mm, mmio_addr);
if (!vma) if (!vma)
goto out_unlock_mmap; goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))

View File

@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzo) $(call if_changed,lzo)
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
$(call if_changed,uimage,bzip2) $(call if_changed,uimage,bzip2)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip) $(call if_changed,uimage,gzip)
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma) $(call if_changed,uimage,lzma)
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz $(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
$(call if_changed,uimage,xz) $(call if_changed,uimage,xz)
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
$(call if_changed,uimage,lzo) $(call if_changed,uimage,lzo)
$(obj)/uImage.bin: $(obj)/vmlinux.bin $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none) $(call if_changed,uimage,none)
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux $(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
OBJCOPYFLAGS_uImage.srec := -I binary -O srec OBJCOPYFLAGS_uImage.srec := -I binary -O srec
$(obj)/uImage.srec: $(obj)/uImage $(obj)/uImage.srec: $(obj)/uImage FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
$(obj)/uImage: $(obj)/uImage.$(suffix-y) $(obj)/uImage: $(obj)/uImage.$(suffix-y)

View File

@ -356,7 +356,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs) dma_addr_t dma_addr, unsigned long attrs)
{ {
if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size))) size = PAGE_ALIGN(size);
if (!sparc_dma_free_resource(cpu_addr, size))
return; return;
dma_make_coherent(dma_addr, size); dma_make_coherent(dma_addr, size);

View File

@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap); EXPORT_SYMBOL(ioport_unmap);
#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr) void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{ {
/* nothing to do */ /* nothing to do */
} }
EXPORT_SYMBOL(pci_iounmap); EXPORT_SYMBOL(pci_iounmap);
#endif

View File

@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config ARCH_HIBERNATION_POSSIBLE config ARCH_HIBERNATION_POSSIBLE
def_bool y def_bool y
config ARCH_NR_GPIO
int
default 1024 if X86_64
default 512
config ARCH_SUSPEND_POSSIBLE config ARCH_SUSPEND_POSSIBLE
def_bool y def_bool y

View File

@ -4,6 +4,12 @@
tune = $(call cc-option,-mtune=$(1),$(2)) tune = $(call cc-option,-mtune=$(1),$(2))
ifdef CONFIG_CC_IS_CLANG
align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
else
align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
endif
cflags-$(CONFIG_M486SX) += -march=i486 cflags-$(CONFIG_M486SX) += -march=i486
cflags-$(CONFIG_M486) += -march=i486 cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586 cflags-$(CONFIG_M586) += -march=i586
@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6
# They make zero difference whatsosever to performance at this time. # They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon cflags-$(CONFIG_MK7) += -march=athlon
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon) cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586) cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686 cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)

View File

@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
static void kill_me_now(struct callback_head *ch) static void kill_me_now(struct callback_head *ch)
{ {
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
p->mce_count = 0;
force_sig(SIGBUS); force_sig(SIGBUS);
} }
@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED; int flags = MF_ACTION_REQUIRED;
int ret; int ret;
p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv) if (!p->mce_ripv)
@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
} }
} }
static void queue_task_work(struct mce *m, int kill_current_task) static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{ {
current->mce_addr = m->addr; int count = ++current->mce_count;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
if (kill_current_task) /* First call, save all the details */
current->mce_kill_me.func = kill_me_now; if (count == 1) {
else current->mce_addr = m->addr;
current->mce_kill_me.func = kill_me_maybe; current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
}
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
if (count > 10)
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
/* Second or later call, make sure page address matches the one from first call */
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
mce_panic("Consecutive machine checks to different user pages", m, msg);
/* Do not call task_work_add() more than once */
if (count > 1)
return;
task_work_add(current, &current->mce_kill_me, TWA_RESUME); task_work_add(current, &current->mce_kill_me, TWA_RESUME);
} }
@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */ /* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs)); BUG_ON(!on_thread_stack() || !user_mode(regs));
queue_task_work(&m, kill_current_task); queue_task_work(&m, msg, kill_current_task);
} else { } else {
/* /*
@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
} }
if (m.kflags & MCE_IN_KERNEL_COPYIN) if (m.kflags & MCE_IN_KERNEL_COPYIN)
queue_task_work(&m, kill_current_task); queue_task_work(&m, msg, kill_current_task);
} }
out: out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);

View File

@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
return 0; return 0;
p4d = p4d_offset(pgd, addr); p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) if (!p4d_present(*p4d))
return 0; return 0;
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
if (pud_none(*pud)) if (!pud_present(*pud))
return 0; return 0;
if (pud_large(*pud)) if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud)); return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) if (!pmd_present(*pmd))
return 0; return 0;
if (pmd_large(*pmd)) if (pmd_large(*pmd))

View File

@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
int err = 0; int err = 0;
start = sanitize_phys(start); start = sanitize_phys(start);
end = sanitize_phys(end);
/*
* The end address passed into this function is exclusive, but
* sanitize_phys() expects an inclusive address.
*/
end = sanitize_phys(end - 1) + 1;
if (start >= end) { if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type)); start, end - 1, cattr_name(req_type));

View File

@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1; x86_platform.legacy.rtc = 1;
} }
static void __init xen_domu_set_legacy_features(void)
{
x86_platform.legacy.rtc = 0;
}
/* First C function to be called on Xen boot */ /* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void) asmlinkage __visible void __init xen_start_kernel(void)
{ {
@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL); add_preferred_console("xenboot", 0, NULL);
if (pci_xen) if (pci_xen)
x86_init.pci.arch_init = pci_xen_init; x86_init.pci.arch_init = pci_xen_init;
x86_platform.set_legacy_features =
xen_domu_set_legacy_features;
} else { } else {
const struct dom0_vga_console_info *info = const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info + (void *)((char *)xen_start_info +

View File

@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (pinned) { if (pinned) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
if (static_branch_likely(&xen_struct_pages_ready)) pinned = false;
if (static_branch_likely(&xen_struct_pages_ready)) {
pinned = PagePinned(page);
SetPagePinned(page); SetPagePinned(page);
}
xen_mc_batch(); xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO); __set_pfn_prot(pfn, PAGE_KERNEL_RO);
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(PARAVIRT_LAZY_MMU);

View File

@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded) if (preloaded)
radix_tree_preload_end(); radix_tree_preload_end();
ret = blk_iolatency_init(q);
if (ret)
goto err_destroy_all;
ret = blk_ioprio_init(q); ret = blk_ioprio_init(q);
if (ret) if (ret)
goto err_destroy_all; goto err_destroy_all;
@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
if (ret) if (ret)
goto err_destroy_all; goto err_destroy_all;
ret = blk_iolatency_init(q);
if (ret) {
blk_throtl_exit(q);
goto err_destroy_all;
}
return 0; return 0;
err_destroy_all: err_destroy_all:
@ -1364,10 +1366,14 @@ int blkcg_activate_policy(struct request_queue *q,
/* alloc failed, nothing's initialized yet, free everything */ /* alloc failed, nothing's initialized yet, free everything */
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) { if (blkg->pd[pol->plid]) {
pol->pd_free_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;
} }
spin_unlock(&blkcg->lock);
} }
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
ret = -ENOMEM; ret = -ENOMEM;
@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols); __clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) { if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn) if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;
} }
spin_unlock(&blkcg->lock);
} }
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);

View File

@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
*/ */
void blk_integrity_unregister(struct gendisk *disk) void blk_integrity_unregister(struct gendisk *disk)
{ {
struct blk_integrity *bi = &disk->queue->integrity;
if (!bi->profile)
return;
/* ensure all bios are off the integrity workqueue */
blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); memset(bi, 0, sizeof(*bi));
} }
EXPORT_SYMBOL(blk_integrity_unregister); EXPORT_SYMBOL(blk_integrity_unregister);

View File

@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
spin_lock_irqsave(&tags->lock, flags); spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr]; rq = tags->rqs[bitnr];
if (!rq || !refcount_inc_not_zero(&rq->ref)) if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
rq = NULL; rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags); spin_unlock_irqrestore(&tags->lock, flags);
return rq; return rq;

View File

@ -13,6 +13,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/rtc.h> #include <linux/rtc.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
const char *file = *(const char **)(tracedata + 2); const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value; unsigned int user_hash_value, file_hash_value;
if (!x86_platform.legacy.rtc)
return;
user_hash_value = user % USERHASH; user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH); file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value); set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
static int __init early_resume_init(void) static int __init early_resume_init(void)
{ {
if (!x86_platform.legacy.rtc)
return 0;
hash_value_early_read = read_magic_time(); hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb); register_pm_notifier(&pm_trace_nb);
return 0; return 0;
@ -277,6 +284,9 @@ static int __init late_resume_init(void)
unsigned int val = hash_value_early_read; unsigned int val = hash_value_early_read;
unsigned int user, file, dev; unsigned int user, file, dev;
if (!x86_platform.legacy.rtc)
return 0;
user = val % USERHASH; user = val % USERHASH;
val = val / USERHASH; val = val / USERHASH;
file = val % FILEHASH; file = val % FILEHASH;

View File

@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
if (count) if (count)
return count; return count;
kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock); mutex_destroy(&attr_set->update_lock);
kobject_put(&attr_set->kobj);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(gov_attr_set_put); EXPORT_SYMBOL_GPL(gov_attr_set_put);

View File

@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV; return -ENODEV;
if (no_load)
return -ENODEV;
id = x86_match_cpu(hwp_support_ids); id = x86_match_cpu(hwp_support_ids);
if (id) { if (id) {
bool hwp_forced = intel_pstate_hwp_is_enabled();
if (hwp_forced)
pr_info("HWP enabled by BIOS\n");
else if (no_load)
return -ENODEV;
copy_cpu_funcs(&core_funcs); copy_cpu_funcs(&core_funcs);
/* /*
* Avoid enabling HWP for processors without EPP support, * Avoid enabling HWP for processors without EPP support,
@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
* If HWP is enabled already, though, there is no choice but to * If HWP is enabled already, though, there is no choice but to
* deal with it. * deal with it.
*/ */
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
intel_pstate_hwp_is_enabled()) {
hwp_active++; hwp_active++;
hwp_mode_bdw = id->driver_data; hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs; intel_pstate.attr = hwp_cpufreq_attrs;
@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
goto hwp_cpu_matched; goto hwp_cpu_matched;
} }
pr_info("HWP not enabled\n");
} else { } else {
if (no_load)
return -ENODEV;
id = x86_match_cpu(intel_pstate_cpu_ids); id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) { if (!id) {
pr_info("CPU model not supported\n"); pr_info("CPU model not supported\n");
@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
else if (!strcmp(str, "passive")) else if (!strcmp(str, "passive"))
default_driver = &intel_cpufreq; default_driver = &intel_cpufreq;
if (!strcmp(str, "no_hwp")) { if (!strcmp(str, "no_hwp"))
pr_info("HWP disabled\n");
no_hwp = 1; no_hwp = 1;
}
if (!strcmp(str, "force")) if (!strcmp(str, "force"))
force_load = 1; force_load = 1;
if (!strcmp(str, "hwp_only")) if (!strcmp(str, "hwp_only"))

View File

@ -758,7 +758,7 @@ enum amd_hw_ip_block_type {
MAX_HWIP MAX_HWIP
}; };
#define HWIP_MAX_INSTANCE 8 #define HWIP_MAX_INSTANCE 10
struct amd_powerplay { struct amd_powerplay {
void *pp_handle; void *pp_handle;

View File

@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
kgd2kfd_suspend(adev->kfd.dev, run_pm); kgd2kfd_suspend(adev->kfd.dev, run_pm);
} }
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
{
int r = 0;
if (adev->kfd.dev)
r = kgd2kfd_resume_iommu(adev->kfd.dev);
return r;
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{ {
int r = 0; int r = 0;

View File

@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry); const void *ih_ring_entry);
@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources); const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd);
@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{ {
} }
static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
return 0;
}
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{ {
return 0; return 0;

View File

@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
struct dentry *ent; struct dentry *ent;
int r, i; int r, i;
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt); &fops_ib_preempt);
if (!ent) { if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO; return PTR_ERR(ent);
} }
ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
&fops_sclk_set); &fops_sclk_set);
if (!ent) { if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
return -EIO; return PTR_ERR(ent);
} }
/* Register debugfs entries for amdgpu_ttm */ /* Register debugfs entries for amdgpu_ttm */

View File

@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r) if (r)
goto init_failed; goto init_failed;
r = amdgpu_amdkfd_resume_iommu(adev);
if (r)
goto init_failed;
r = amdgpu_device_ip_hw_init_phase1(adev); r = amdgpu_device_ip_hw_init_phase1(adev);
if (r) if (r)
goto init_failed; goto init_failed;
@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{ {
int r; int r;
r = amdgpu_amdkfd_resume_iommu(adev);
if (r)
return r;
r = amdgpu_device_ip_resume_phase1(adev); r = amdgpu_device_ip_resume_phase1(adev);
if (r) if (r)
return r; return r;
@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
dev_warn(tmp_adev->dev, "asic atom init failed!"); dev_warn(tmp_adev->dev, "asic atom init failed!");
} else { } else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_amdkfd_resume_iommu(tmp_adev);
if (r)
goto out;
r = amdgpu_device_ip_resume_phase1(tmp_adev); r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r) if (r)
goto out; goto out;

View File

@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break; break;
default: default:
adev->gmc.tmz_enabled = false; adev->gmc.tmz_enabled = false;
dev_warn(adev->dev, dev_info(adev->dev,
"Trusted Memory Zone (TMZ) feature not supported\n"); "Trusted Memory Zone (TMZ) feature not supported\n");
break; break;
} }

View File

@ -757,7 +757,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return res; return res;
} }
inline uint32_t amdgpu_ras_eeprom_max_record_count(void) uint32_t amdgpu_ras_eeprom_max_record_count(void)
{ {
return RAS_MAX_RECORD_COUNT; return RAS_MAX_RECORD_COUNT;
} }

View File

@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records, const u32 num); struct eeprom_table_record *records, const u32 num);
inline uint32_t amdgpu_ras_eeprom_max_record_count(void); uint32_t amdgpu_ras_eeprom_max_record_count(void);
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);

View File

@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name, ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root, S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops); ring, &amdgpu_debugfs_ring_fops);
if (!ent) if (IS_ERR(ent))
return -ENOMEM; return PTR_ERR(ent);
i_size_write(ent->d_inode, ring->ring_size + 12); i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent; ring->ent = ent;

View File

@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out; goto out;
} }
if (bo->type == ttm_bo_type_device &&
new_mem->mem_type == TTM_PL_VRAM &&
old_mem->mem_type != TTM_PL_VRAM) {
/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
* accesses the BO after it's moved.
*/
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
if (adev->mman.buffer_funcs_enabled) { if (adev->mman.buffer_funcs_enabled) {
if (((old_mem->mem_type == TTM_PL_SYSTEM && if (((old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) || new_mem->mem_type == TTM_PL_VRAM) ||
@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r; return r;
} }
if (bo->type == ttm_bo_type_device &&
new_mem->mem_type == TTM_PL_VRAM &&
old_mem->mem_type != TTM_PL_VRAM) {
/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
* accesses the BO after it's moved.
*/
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
out: out:
/* update statistics */ /* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved); atomic64_add(bo->base.size, &adev->num_bytes_moved);

View File

@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 145,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 145,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 145,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 4, .num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED, .mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = false, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 1, .num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2, .num_sdma_queues_per_engine = 2,
@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 2, .num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = {
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = true, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 1, .num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8, .num_sdma_queues_per_engine = 8,
@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED, .mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false, .needs_iommu_device = false,
.supports_cwsr = true, .supports_cwsr = true,
.needs_pci_atomics = false, .needs_pci_atomics = true,
.no_atomic_fw_version = 92,
.num_sdma_engines = 1, .num_sdma_engines = 1,
.num_xgmi_sdma_engines = 0, .num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2, .num_sdma_queues_per_engine = 2,
@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
if (!kfd) if (!kfd)
return NULL; return NULL;
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
* supported.
*/
kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
if (device_info->needs_pci_atomics &&
!kfd->pci_atomic_requested) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics\n",
pdev->vendor, pdev->device);
kfree(kfd);
return NULL;
}
kfd->kgd = kgd; kfd->kgd = kgd;
kfd->device_info = device_info; kfd->device_info = device_info;
kfd->pdev = pdev; kfd->pdev = pdev;
@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1; - kfd->vm_info.first_vmid_kfd + 1;
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
* supported.
*/
kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd);
if (!kfd->pci_atomic_requested &&
kfd->device_info->needs_pci_atomics &&
(!kfd->device_info->no_atomic_fw_version ||
kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics %d<%d\n",
kfd->pdev->vendor, kfd->pdev->device,
kfd->mec_fw_version,
kfd->device_info->no_atomic_fw_version);
return false;
}
/* Verify module parameters regarding mapped process number*/ /* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0) if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
@ -1057,17 +1069,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret; return ret;
} }
static int kfd_resume(struct kfd_dev *kfd) int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{ {
int err = 0; int err = 0;
err = kfd_iommu_resume(kfd); err = kfd_iommu_resume(kfd);
if (err) { if (err)
dev_err(kfd_device, dev_err(kfd_device,
"Failed to resume IOMMU for device %x:%x\n", "Failed to resume IOMMU for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->pdev->vendor, kfd->pdev->device);
return err; return err;
} }
static int kfd_resume(struct kfd_dev *kfd)
{
int err = 0;
err = kfd->dqm->ops.start(kfd->dqm); err = kfd->dqm->ops.start(kfd->dqm);
if (err) { if (err) {

View File

@ -207,6 +207,7 @@ struct kfd_device_info {
bool supports_cwsr; bool supports_cwsr;
bool needs_iommu_device; bool needs_iommu_device;
bool needs_pci_atomics; bool needs_pci_atomics;
uint32_t no_atomic_fw_version;
unsigned int num_sdma_engines; unsigned int num_sdma_engines;
unsigned int num_xgmi_sdma_engines; unsigned int num_xgmi_sdma_engines;
unsigned int num_sdma_queues_per_engine; unsigned int num_sdma_queues_per_engine;

View File

@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
uint32_t agp_base, agp_bot, agp_top; uint32_t agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
memset(pa_config, 0, sizeof(*pa_config));
logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
@ -6024,21 +6026,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
return 0; return 0;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
work = kzalloc(sizeof(*work), GFP_ATOMIC); if (dm->vblank_control_workqueue) {
if (!work) work = kzalloc(sizeof(*work), GFP_ATOMIC);
return -ENOMEM; if (!work)
return -ENOMEM;
INIT_WORK(&work->work, vblank_control_worker); INIT_WORK(&work->work, vblank_control_worker);
work->dm = dm; work->dm = dm;
work->acrtc = acrtc; work->acrtc = acrtc;
work->enable = enable; work->enable = enable;
if (acrtc_state->stream) { if (acrtc_state->stream) {
dc_stream_retain(acrtc_state->stream); dc_stream_retain(acrtc_state->stream);
work->stream = acrtc_state->stream; work->stream = acrtc_state->stream;
}
queue_work(dm->vblank_control_workqueue, &work->work);
} }
queue_work(dm->vblank_control_workqueue, &work->work);
#endif #endif
return 0; return 0;
@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct dc_state *dc_state) struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
{ {
struct dc_stream_state *stream = NULL; struct dc_stream_state *stream = NULL;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_connector_state *new_con_state; struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state; struct dm_connector_state *dm_conn_state;
int i, j, clock, bpp; int i, j, clock;
int vcpi, pbn_div, pbn = 0; int vcpi, pbn_div, pbn = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) { for_each_new_connector_in_state(state, connector, new_con_state, i) {
@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
} }
pbn_div = dm_mst_get_pbn_divider(stream->link); pbn_div = dm_mst_get_pbn_divider(stream->link);
bpp = stream->timing.dsc_cfg.bits_per_pixel;
clock = stream->timing.pix_clk_100hz / 10; clock = stream->timing.pix_clk_100hz / 10;
pbn = drm_dp_calc_pbn_mode(clock, bpp, true); /* pbn is calculated by compute_mst_dsc_configs_for_state*/
for (j = 0; j < dc_state->stream_count; j++) {
if (vars[j].aconnector == aconnector) {
pbn = vars[j].pbn;
break;
}
}
vcpi = drm_dp_mst_atomic_enable_dsc(state, vcpi = drm_dp_mst_atomic_enable_dsc(state,
aconnector->port, aconnector->port,
pbn, pbn_div, pbn, pbn_div,
@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
} }
} }
static void amdgpu_set_panel_orientation(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
const struct drm_display_mode *native_mode;
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
return;
encoder = amdgpu_dm_connector_to_encoder(connector);
if (!encoder)
return;
amdgpu_encoder = to_amdgpu_encoder(encoder);
native_mode = &amdgpu_encoder->native_mode;
if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
return;
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
native_mode->hdisplay,
native_mode->vdisplay);
}
static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
struct edid *edid) struct edid *edid)
{ {
@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
* restored here. * restored here.
*/ */
amdgpu_dm_update_freesync_caps(connector, edid); amdgpu_dm_update_freesync_caps(connector, edid);
amdgpu_set_panel_orientation(connector);
} else { } else {
amdgpu_dm_connector->num_modes = 0; amdgpu_dm_connector->num_modes = 0;
} }
@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state,
state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled /* Stream removed and re-enabled
* hot-plug, headless s3, dpms *
* Can sometimes overlap with the HPD case,
* thus set update_hdcp to false to avoid
* setting HDCP multiple times.
*
* Handles: DESIRED -> DESIRED (Special case)
*/
if (!(old_state->crtc && old_state->crtc->enabled) &&
state->crtc && state->crtc->enabled &&
connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
dm_con_state->update_hdcp = false;
return true;
}
/* Hot-plug, headless s3, dpms
*
* Only start HDCP if the display is connected/enabled.
* update_hdcp flag will be set to false until the next
* HPD comes in.
* *
* Handles: DESIRED -> DESIRED (Special case) * Handles: DESIRED -> DESIRED (Special case)
*/ */
@ -8648,7 +8705,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* If PSR or idle optimizations are enabled then flush out * If PSR or idle optimizations are enabled then flush out
* any pending work before hardware programming. * any pending work before hardware programming.
*/ */
flush_workqueue(dm->vblank_control_workqueue); if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue);
#endif #endif
bundle->stream_update.stream = acrtc_state->stream; bundle->stream_update.stream = acrtc_state->stream;
@ -8983,7 +9041,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* if there mode set or reset, disable eDP PSR */ /* if there mode set or reset, disable eDP PSR */
if (mode_set_reset_required) { if (mode_set_reset_required) {
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
flush_workqueue(dm->vblank_control_workqueue); if (dm->vblank_control_workqueue)
flush_workqueue(dm->vblank_control_workqueue);
#endif #endif
amdgpu_dm_psr_disable_all(dm); amdgpu_dm_psr_disable_all(dm);
} }
@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int ret, i; int ret, i;
bool lock_and_validation_needed = false; bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state; struct dm_crtc_state *dm_old_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
#endif
trace_amdgpu_dm_atomic_check_begin(state); trace_amdgpu_dm_atomic_check_begin(state);
@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail; goto fail;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
goto fail; goto fail;
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
if (ret) if (ret)
goto fail; goto fail;
#endif #endif
@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail; goto fail;
status = dc_validate_global_state(dc, dm_state->context, false); status = dc_validate_global_state(dc, dm_state->context, false);
if (status != DC_OK) { if (status != DC_OK) {
DC_LOG_WARNING("DC global validation failure: %s (%d)", drm_dbg_atomic(dev,
"DC global validation failure: %s (%d)",
dc_status_to_str(status), status); dc_status_to_str(status), status);
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;

View File

@ -518,12 +518,7 @@ struct dsc_mst_fairness_params {
uint32_t num_slices_h; uint32_t num_slices_h;
uint32_t num_slices_v; uint32_t num_slices_v;
uint32_t bpp_overwrite; uint32_t bpp_overwrite;
}; struct amdgpu_dm_connector *aconnector;
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
int bpp_x16;
}; };
static int kbps_to_peak_pbn(int kbps) static int kbps_to_peak_pbn(int kbps)
@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state,
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state, struct dc_state *dc_state,
struct dc_link *dc_link) struct dc_link *dc_link,
struct dsc_mst_fairness_vars *vars)
{ {
int i; int i;
struct dc_stream_state *stream; struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES]; struct dsc_mst_fairness_params params[MAX_PIPES];
struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
int count = 0; int count = 0;
bool debugfs_overwrite = false; bool debugfs_overwrite = false;
@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].timing = &stream->timing; params[count].timing = &stream->timing;
params[count].sink = stream->sink; params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].aconnector = aconnector;
params[count].port = aconnector->port; params[count].port = aconnector->port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
} }
/* Try no compression */ /* Try no compression */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
vars[i].aconnector = params[i].aconnector;
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i].dsc_enabled = false; vars[i].dsc_enabled = false;
vars[i].bpp_x16 = 0; vars[i].bpp_x16 = 0;
@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
} }
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state) struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
{ {
int i, j; int i, j;
struct dc_stream_state *stream; struct dc_stream_state *stream;
@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
return false; return false;
mutex_lock(&aconnector->mst_mgr.lock); mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) {
mutex_unlock(&aconnector->mst_mgr.lock); mutex_unlock(&aconnector->mst_mgr.lock);
return false; return false;
} }

View File

@ -39,8 +39,17 @@ void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
int bpp_x16;
struct amdgpu_dm_connector *aconnector;
};
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state); struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars);
#endif #endif
#endif #endif

View File

@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void)
depth = *pcpu; depth = *pcpu;
put_cpu_ptr(&fpu_recursion_depth); put_cpu_ptr(&fpu_recursion_depth);
ASSERT(depth > 1); ASSERT(depth >= 1);
} }
/** /**

View File

@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link)
int dc_link_get_backlight_level(const struct dc_link *link) int dc_link_get_backlight_level(const struct dc_link *link)
{ {
struct abm *abm = get_abm_from_stream_res(link); struct abm *abm = get_abm_from_stream_res(link);
struct panel_cntl *panel_cntl = link->panel_cntl;
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
bool fw_set_brightness = true;
if (abm == NULL || abm->funcs->get_current_backlight == NULL) if (dmcu)
fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
return panel_cntl->funcs->get_current_backlight(panel_cntl);
else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
return (int) abm->funcs->get_current_backlight(abm);
else
return DC_ERROR_UNEXPECTED; return DC_ERROR_UNEXPECTED;
return (int) abm->funcs->get_current_backlight(abm);
} }
int dc_link_get_target_backlight_pwm(const struct dc_link *link) int dc_link_get_target_backlight_pwm(const struct dc_link *link)

View File

@ -1,4 +1,26 @@
/* Copyright 2015 Advanced Micro Devices, Inc. */ /*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*/
#include "dm_services.h" #include "dm_services.h"
#include "dc.h" #include "dc.h"
#include "dc_link_dp.h" #include "dc_link_dp.h"
@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries(
dp_disable_link_phy(link, signal); dp_disable_link_phy(link, signal);
/* Abort link training if failure due to sink being unplugged. */ /* Abort link training if failure due to sink being unplugged. */
if (status == LINK_TRAINING_ABORT) if (status == LINK_TRAINING_ABORT) {
break; enum dc_connection_type type = dc_connection_none;
else if (do_fallback) {
dc_link_detect_sink(link, &type);
if (type == dc_connection_none)
break;
} else if (do_fallback) {
decide_fallback_link_setting(*link_setting, &current_setting, status); decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets /* Fail link training if reduced link bandwidth no longer meets
* stream requirements. * stream requirements.

View File

@ -49,7 +49,6 @@
static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
{ {
uint64_t current_backlight; uint64_t current_backlight;
uint32_t round_result;
uint32_t bl_period, bl_int_count; uint32_t bl_period, bl_int_count;
uint32_t bl_pwm, fractional_duty_cycle_en; uint32_t bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask; uint32_t bl_period_mask, bl_pwm_mask;
@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c
current_backlight = div_u64(current_backlight, bl_period); current_backlight = div_u64(current_backlight, bl_period);
current_backlight = (current_backlight + 1) >> 1; current_backlight = (current_backlight + 1) >> 1;
current_backlight = (uint64_t)(current_backlight) * bl_period;
round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
round_result = (round_result >> (bl_int_count-1)) & 1;
current_backlight >>= bl_int_count;
current_backlight += round_result;
return (uint32_t)(current_backlight); return (uint32_t)(current_backlight);
} }

View File

@ -33,63 +33,47 @@
#define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging #define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging
#define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible #define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible
#define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible #define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible
#define TABLE_COUNT 6 #define TABLE_SMU_METRICS 6 // Called by Driver
#define TABLE_COUNT 7
#define NUM_DSPCLK_LEVELS 8 typedef struct SmuMetricsTable_t {
#define NUM_SOCCLK_DPM_LEVELS 8 //CPU status
#define NUM_DCEFCLK_DPM_LEVELS 4 uint16_t CoreFrequency[6]; //[MHz]
#define NUM_FCLK_DPM_LEVELS 4 uint32_t CorePower[6]; //[mW]
#define NUM_MEMCLK_DPM_LEVELS 4 uint16_t CoreTemperature[6]; //[centi-Celsius]
uint16_t L3Frequency[2]; //[MHz]
uint16_t L3Temperature[2]; //[centi-Celsius]
uint16_t C0Residency[6]; //Percentage
#define NUMBER_OF_PSTATES 8 // GFX status
#define NUMBER_OF_CORES 8 uint16_t GfxclkFrequency; //[MHz]
uint16_t GfxTemperature; //[centi-Celsius]
typedef enum { // SOC IP info
S3_TYPE_ENTRY, uint16_t SocclkFrequency; //[MHz]
S5_TYPE_ENTRY, uint16_t VclkFrequency; //[MHz]
} Sleep_Type_e; uint16_t DclkFrequency; //[MHz]
uint16_t MemclkFrequency; //[MHz]
typedef enum { // power, VF info for CPU/GFX telemetry rails, and then socket power total
GFX_OFF = 0, uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX
GFX_ON = 1, uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX
} GFX_Mode_e; uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX
uint32_t CurrentSocketPower; //[mW]
typedef enum { uint16_t SocTemperature; //[centi-Celsius]
CPU_P0 = 0, uint16_t EdgeTemperature;
CPU_P1, uint16_t ThrottlerStatus;
CPU_P2, uint16_t Spare;
CPU_P3,
CPU_P4,
CPU_P5,
CPU_P6,
CPU_P7
} CPU_PState_e;
typedef enum { } SmuMetricsTable_t;
CPU_CORE0 = 0,
CPU_CORE1,
CPU_CORE2,
CPU_CORE3,
CPU_CORE4,
CPU_CORE5,
CPU_CORE6,
CPU_CORE7
} CORE_ID_e;
typedef enum { typedef struct SmuMetrics_t {
DF_DPM0 = 0, SmuMetricsTable_t Current;
DF_DPM1, SmuMetricsTable_t Average;
DF_DPM2, uint32_t SampleStartTime;
DF_DPM3, uint32_t SampleStopTime;
DF_PState_Count uint32_t Accnt;
} DF_PState_e; } SmuMetrics_t;
typedef enum {
GFX_DPM0 = 0,
GFX_DPM1,
GFX_DPM2,
GFX_DPM3,
GFX_PState_Count
} GFX_PState_e;
#endif #endif

View File

@ -226,7 +226,10 @@
__SMU_DUMMY_MAP(SetUclkDpmMode), \ __SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \ __SMU_DUMMY_MAP(LightSBR), \
__SMU_DUMMY_MAP(GfxDriverResetRecovery), \ __SMU_DUMMY_MAP(GfxDriverResetRecovery), \
__SMU_DUMMY_MAP(BoardPowerCalibration), __SMU_DUMMY_MAP(BoardPowerCalibration), \
__SMU_DUMMY_MAP(RequestGfxclk), \
__SMU_DUMMY_MAP(ForceGfxVid), \
__SMU_DUMMY_MAP(UnforceGfxVid),
#undef __SMU_DUMMY_MAP #undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type #define __SMU_DUMMY_MAP(type) SMU_MSG_##type

View File

@ -65,6 +65,13 @@
#define PPSMC_MSG_SetDriverTableVMID 0x34 #define PPSMC_MSG_SetDriverTableVMID 0x34
#define PPSMC_MSG_SetSoftMinCclk 0x35 #define PPSMC_MSG_SetSoftMinCclk 0x35
#define PPSMC_MSG_SetSoftMaxCclk 0x36 #define PPSMC_MSG_SetSoftMaxCclk 0x36
#define PPSMC_Message_Count 0x37 #define PPSMC_MSG_GetGfxFrequency 0x37
#define PPSMC_MSG_GetGfxVid 0x38
#define PPSMC_MSG_ForceGfxFreq 0x39
#define PPSMC_MSG_UnForceGfxFreq 0x3A
#define PPSMC_MSG_ForceGfxVid 0x3B
#define PPSMC_MSG_UnforceGfxVid 0x3C
#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D
#define PPSMC_Message_Count 0x3E
#endif #endif

View File

@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/ */
if (smu->uploading_custom_pp_table && if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) && (adev->asic_type >= CHIP_NAVI10) &&
(adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) (adev->asic_type <= CHIP_BEIGE_GOBY))
return smu_disable_all_features_with_exception(smu, return smu_disable_all_features_with_exception(smu,
true, true,
SMU_FEATURE_COUNT); SMU_FEATURE_COUNT);

View File

@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
struct smu_11_0_dpm_context *dpm_context = NULL; struct smu_11_0_dpm_context *dpm_context = NULL;
uint32_t gen_speed, lane_width; uint32_t gen_speed, lane_width;
if (amdgpu_ras_intr_triggered()) smu_cmn_get_sysfs_buf(&buf, &size);
return sysfs_emit(buf, "unavailable\n");
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
return size;
}
dpm_context = smu_dpm->dpm_context; dpm_context = smu_dpm->dpm_context;

View File

@ -44,6 +44,27 @@
#undef pr_info #undef pr_info
#undef pr_debug #undef pr_debug
/* unit: MHz */
#define CYAN_SKILLFISH_SCLK_MIN 1000
#define CYAN_SKILLFISH_SCLK_MAX 2000
#define CYAN_SKILLFISH_SCLK_DEFAULT 1800
/* unit: mV */
#define CYAN_SKILLFISH_VDDC_MIN 700
#define CYAN_SKILLFISH_VDDC_MAX 1129
#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe
static struct gfx_user_settings {
uint32_t sclk;
uint32_t vddc;
} cyan_skillfish_user_settings;
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \
FEATURE_MASK(FEATURE_GFX_DPM_BIT))
static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0),
MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0),
MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0),
MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0),
}; };
static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = {
TAB_MAP_VALID(SMU_METRICS),
};
static int cyan_skillfish_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
sizeof(SmuMetrics_t),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
smu_table->metrics_time = 0;
return 0;
err1_out:
smu_table->gpu_metrics_table_size = 0;
kfree(smu_table->metrics_table);
err0_out:
return -ENOMEM;
}
static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
{
int ret = 0;
ret = cyan_skillfish_tables_init(smu);
if (ret)
return ret;
return smu_v11_0_init_smc_tables(smu);
}
static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
kfree(smu_table->metrics_table);
smu_table->metrics_table = NULL;
kfree(smu_table->gpu_metrics_table);
smu_table->gpu_metrics_table = NULL;
smu_table->gpu_metrics_table_size = 0;
smu_table->metrics_time = 0;
return 0;
}
static int
cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
mutex_lock(&smu->metrics_lock);
ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
if (ret) {
mutex_unlock(&smu->metrics_lock);
return ret;
}
switch (member) {
case METRICS_CURR_GFXCLK:
*value = metrics->Current.GfxclkFrequency;
break;
case METRICS_CURR_SOCCLK:
*value = metrics->Current.SocclkFrequency;
break;
case METRICS_CURR_VCLK:
*value = metrics->Current.VclkFrequency;
break;
case METRICS_CURR_DCLK:
*value = metrics->Current.DclkFrequency;
break;
case METRICS_CURR_UCLK:
*value = metrics->Current.MemclkFrequency;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->Current.CurrentSocketPower << 8) /
1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->Current.GfxTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_HOTSPOT:
*value = metrics->Current.SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_VOLTAGE_VDDSOC:
*value = metrics->Current.Voltage[0];
break;
case METRICS_VOLTAGE_VDDGFX:
*value = metrics->Current.Voltage[1];
break;
case METRICS_THROTTLER_STATUS:
*value = metrics->Current.ThrottlerStatus;
break;
default:
*value = UINT_MAX;
break;
}
mutex_unlock(&smu->metrics_lock);
return ret;
}
static int cyan_skillfish_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data,
uint32_t *size)
{
int ret = 0;
if (!data || !size)
return -EINVAL;
mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_CURR_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDNB:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDGFX:
ret = cyan_skillfish_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data);
*size = 4;
break;
default:
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&smu->sensor_lock);
return ret;
}
static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
MetricsMember_t member_type;
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
member_type = METRICS_CURR_GFXCLK;
break;
case SMU_FCLK:
case SMU_MCLK:
member_type = METRICS_CURR_UCLK;
break;
case SMU_SOCCLK:
member_type = METRICS_CURR_SOCCLK;
break;
case SMU_VCLK:
member_type = METRICS_CURR_VCLK;
break;
case SMU_DCLK:
member_type = METRICS_CURR_DCLK;
break;
default:
return -EINVAL;
}
return cyan_skillfish_get_smu_metrics_data(smu, member_type, value);
}
static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
int ret = 0, size = 0;
uint32_t cur_value = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) {
case SMU_OD_SCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value);
if (ret)
return ret;
size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
break;
case SMU_OD_VDDC_CURVE:
ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value);
if (ret)
return ret;
size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC");
size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value);
break;
case SMU_OD_RANGE:
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
break;
case SMU_GFXCLK:
case SMU_SCLK:
case SMU_FCLK:
case SMU_MCLK:
case SMU_SOCCLK:
case SMU_VCLK:
case SMU_DCLK:
ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
return ret;
size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
break;
default:
dev_warn(smu->adev->dev, "Unsupported clock type\n");
return ret;
}
return size;
}
static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t feature_mask[2];
uint64_t feature_enabled;
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
return false;
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
if (ret)
return false;
feature_enabled = (uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32);
return !!(feature_enabled & SMC_DPM_FEATURE);
}
static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v2_2 *gpu_metrics =
(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
SmuMetrics_t metrics;
int i, ret = 0;
ret = smu_cmn_get_metrics_table(smu, &metrics, true);
if (ret)
return ret;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
gpu_metrics->average_soc_power = metrics.Current.Power[0];
gpu_metrics->average_gfx_power = metrics.Current.Power[1];
gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
for (i = 0; i < 6; i++) {
gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i];
gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i];
gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i];
}
for (i = 0; i < 2; i++) {
gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i];
gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i];
}
gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v2_2);
}
static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
int ret = 0;
uint32_t vid;
switch (type) {
case PP_OD_EDIT_VDDC_CURVE:
if (size != 3 || input[0] != 0) {
dev_err(smu->adev->dev, "Invalid parameter!\n");
return -EINVAL;
}
if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
input[1] > CYAN_SKILLFISH_SCLK_MAX) {
dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
return -EINVAL;
}
if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
input[2] > CYAN_SKILLFISH_VDDC_MAX) {
dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
return -EINVAL;
}
cyan_skillfish_user_settings.sclk = input[1];
cyan_skillfish_user_settings.vddc = input[2];
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size != 0) {
dev_err(smu->adev->dev, "Invalid parameter!\n");
return -EINVAL;
}
cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
break;
case PP_OD_COMMIT_DPM_TABLE:
if (size != 0) {
dev_err(smu->adev->dev, "Invalid parameter!\n");
return -EINVAL;
}
if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN ||
cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) {
dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
return -EINVAL;
}
if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) &&
(cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN ||
cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) {
dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk,
cyan_skillfish_user_settings.sclk, NULL);
if (ret) {
dev_err(smu->adev->dev, "Set sclk failed!\n");
return ret;
}
if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL);
if (ret) {
dev_err(smu->adev->dev, "Unforce vddc failed!\n");
return ret;
}
} else {
/*
* PMFW accepts SVI2 VID code, convert voltage to VID:
* vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001)
*/
vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL);
if (ret) {
dev_err(smu->adev->dev, "Force vddc failed!\n");
return ret;
}
}
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static const struct pptable_funcs cyan_skillfish_ppt_funcs = { static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status, .check_fw_status = smu_v11_0_check_fw_status,
.check_fw_version = smu_v11_0_check_fw_version, .check_fw_version = smu_v11_0_check_fw_version,
.init_power = smu_v11_0_init_power, .init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power, .fini_power = smu_v11_0_fini_power,
.init_smc_tables = cyan_skillfish_init_smc_tables,
.fini_smc_tables = cyan_skillfish_finit_smc_tables,
.read_sensor = cyan_skillfish_read_sensor,
.print_clk_levels = cyan_skillfish_print_clk_levels,
.is_dpm_running = cyan_skillfish_is_dpm_running,
.get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
.od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
.register_irq_handler = smu_v11_0_register_irq_handler, .register_irq_handler = smu_v11_0_register_irq_handler,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
{ {
smu->ppt_funcs = &cyan_skillfish_ppt_funcs; smu->ppt_funcs = &cyan_skillfish_ppt_funcs;
smu->message_map = cyan_skillfish_message_map; smu->message_map = cyan_skillfish_message_map;
smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true; smu->is_apu = true;
} }

View File

@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
struct smu_11_0_overdrive_table *od_settings = smu->od_settings; struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
uint32_t min_value, max_value; uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (!smu->od_enabled || !od_table || !od_settings) if (!smu->od_enabled || !od_table || !od_settings)
break; break;
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) { if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) /*
* This aims the case below:
* amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded
*
* For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To
* make that possible, PMFW needs to acknowledge the dstate transition
* process for both gfx(function 0) and audio(function 1) function of
* the ASIC.
*
* The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the
* device representing the audio function of the ASIC. And that means
* even if the sound driver(snd_hda_intel) was not loaded yet, it's still
* possible runpm suspend kicked on the ASIC. However without the dstate
* transition notification from audio function, pmfw cannot handle the
* BACO in/exit correctly. And that will cause driver hang on runpm
* resuming.
*
* To address this, we revert to legacy message way(driver masters the
* timing for BACO in/exit) on sound driver missing.
*/
if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else else
return smu_v11_0_baco_enter(smu); return smu_v11_0_baco_enter(smu);
@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) { if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */ /* Wait for PMFW handling for the Dstate change */
msleep(10); msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);

View File

@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value; uint32_t min_value, max_value;
uint32_t smu_version; uint32_t smu_version;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_GFXCLK: case SMU_GFXCLK:
case SMU_SCLK: case SMU_SCLK:
@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (!smu->od_enabled || !od_table || !od_settings) if (!smu->od_enabled || !od_table || !od_settings)
break; break;
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) { if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) {
sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN, sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN,
@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
else else
return smu_v11_0_baco_enter(smu); return smu_v11_0_baco_enter(smu);
@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
if (adev->in_runpm) { if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
/* Wait for PMFW handling for the Dstate change */ /* Wait for PMFW handling for the Dstate change */
msleep(10); msleep(10);
return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);

View File

@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_CCLK: case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_CCLK: case SMU_OD_CCLK:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",

View File

@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
if (ret) if (ret)
return ret; return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_RANGE: case SMU_OD_RANGE:
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {

View File

@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
uint32_t freq_values[3] = {0}; uint32_t freq_values[3] = {0};
uint32_t min_clk, max_clk; uint32_t min_clk, max_clk;
if (amdgpu_ras_intr_triggered()) smu_cmn_get_sysfs_buf(&buf, &size);
return sysfs_emit(buf, "unavailable\n");
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
return size;
}
dpm_context = smu_dpm->dpm_context; dpm_context = smu_dpm->dpm_context;
switch (type) { switch (type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
size = sysfs_emit(buf, "%s:\n", "GFXCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
fallthrough; fallthrough;
case SMU_SCLK: case SMU_SCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now);
@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
break; break;
case SMU_OD_MCLK: case SMU_OD_MCLK:
size = sysfs_emit(buf, "%s:\n", "MCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
fallthrough; fallthrough;
case SMU_MCLK: case SMU_MCLK:
ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now);

View File

@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0; int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0; uint32_t cur_value = 0, value = 0, count = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
switch (clk_type) { switch (clk_type) {
case SMU_OD_SCLK: case SMU_OD_SCLK:
size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n", size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n", size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
break; break;
case SMU_OD_RANGE: case SMU_OD_RANGE:
size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
break; break;

View File

@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret; return ret;
} }
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
bool snd_driver_loaded;
/*
* If the ASIC comes with no audio function, we always assume
* it is "enabled".
*/
p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
adev->pdev->bus->number, 1);
if (!p)
return true;
snd_driver_loaded = pci_is_enabled(p) ? true : false;
pci_dev_put(p);
return snd_driver_loaded;
}

View File

@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu, int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state); enum pp_mp1_state mp1_state);
/*
* Helper function to make sysfs_emit_at() happy. Align buf to
* the current page boundary and record the offset.
*/
static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset)
{
if (!*buf || !offset)
return;
*offset = offset_in_page(*buf);
*buf -= *offset;
}
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
#endif #endif
#endif #endif

View File

@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_mmu_context) { if (switch_mmu_context) {
struct etnaviv_iommu_context *old_context = gpu->mmu_context; struct etnaviv_iommu_context *old_context = gpu->mmu_context;
etnaviv_iommu_context_get(mmu_context); gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
gpu->mmu_context = mmu_context;
etnaviv_iommu_context_put(old_context); etnaviv_iommu_context_put(old_context);
} }

View File

@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node); list_del(&mapping->obj_node);
} }
etnaviv_iommu_context_get(mmu_context); mapping->context = etnaviv_iommu_context_get(mmu_context);
mapping->context = mmu_context;
mapping->use = 1; mapping->use = 1;
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,

View File

@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects; goto err_submit_objects;
submit->ctx = file->driver_priv; submit->ctx = file->driver_priv;
etnaviv_iommu_context_get(submit->ctx->mmu); submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->mmu_context = submit->ctx->mmu;
submit->exec_state = args->exec_state; submit->exec_state = args->exec_state;
submit->flags = args->flags; submit->flags = args->flags;

View File

@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */ /* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu); etnaviv_gpu_update_clock(gpu);
gpu->fe_running = false;
gpu->exec_state = -1;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = NULL;
return 0; return 0;
} }
@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
} }
gpu->fe_running = true;
} }
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{ {
u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping);
u16 prefetch; u16 prefetch;
u32 address;
/* setup the MMU */ /* setup the MMU */
etnaviv_iommu_restore(gpu, gpu->mmu_context); etnaviv_iommu_restore(gpu, context);
/* Start command processor */ /* Start command processor */
prefetch = etnaviv_buffer_init(gpu); prefetch = etnaviv_buffer_init(gpu);
address = etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch); etnaviv_gpu_start_fe(gpu, address, prefetch);
} }
@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Now program the hardware */ /* Now program the hardware */
mutex_lock(&gpu->lock); mutex_lock(&gpu->lock);
etnaviv_gpu_hw_init(gpu); etnaviv_gpu_hw_init(gpu);
gpu->exec_state = -1;
mutex_unlock(&gpu->lock); mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev); pm_runtime_mark_last_busy(gpu->dev);
@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
spin_unlock(&gpu->event_spinlock); spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu); etnaviv_gpu_hw_init(gpu);
gpu->exec_state = -1;
gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock); mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev); pm_runtime_mark_last_busy(gpu->dev);
@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock; goto out_unlock;
} }
if (!gpu->mmu_context) { if (!gpu->fe_running)
etnaviv_iommu_context_get(submit->mmu_context); etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
gpu->mmu_context = submit->mmu_context;
etnaviv_gpu_start_fe_idleloop(gpu); if (submit->prev_mmu_context)
} else { etnaviv_iommu_context_put(submit->prev_mmu_context);
etnaviv_iommu_context_get(gpu->mmu_context); submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
submit->prev_mmu_context = gpu->mmu_context;
}
if (submit->nr_pmrs) { if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{ {
if (gpu->initialized && gpu->mmu_context) { if (gpu->initialized && gpu->fe_running) {
/* Replace the last WAIT with END */ /* Replace the last WAIT with END */
mutex_lock(&gpu->lock); mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu); etnaviv_buffer_end(gpu);
@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/ */
etnaviv_gpu_wait_idle(gpu, 100); etnaviv_gpu_wait_idle(gpu, 100);
etnaviv_iommu_context_put(gpu->mmu_context); gpu->fe_running = false;
gpu->mmu_context = NULL;
} }
gpu->exec_state = -1; gpu->exec_state = -1;
@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu); etnaviv_gpu_hw_suspend(gpu);
#endif #endif
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
if (gpu->initialized) { if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu); etnaviv_iommu_global_fini(gpu);

View File

@ -101,6 +101,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct drm_gpu_scheduler sched; struct drm_gpu_scheduler sched;
bool initialized; bool initialized;
bool fe_running;
/* 'ring'-buffer: */ /* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer; struct etnaviv_cmdbuf buffer;

View File

@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable; u32 pgtable;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
/* set base addresses */ /* set base addresses */
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);

View File

@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
return; return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
prefetch = etnaviv_buffer_config_mmuv2(gpu, prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)v2_context->mtlb_dma, (u32)v2_context->mtlb_dma,
(u32)context->global->bad_page_dma); (u32)context->global->bad_page_dma);
@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return; return;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
gpu->mmu_context = etnaviv_iommu_context_get(context);
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(context->global->v2.pta_dma)); lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,

View File

@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
*/ */
list_for_each_entry_safe(m, n, &list, scan_node) { list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_remove_mapping(context, m); etnaviv_iommu_remove_mapping(context, m);
etnaviv_iommu_context_put(m->context);
m->context = NULL; m->context = NULL;
list_del_init(&m->mmu_node); list_del_init(&m->mmu_node);
list_del_init(&m->scan_node); list_del_init(&m->scan_node);

View File

@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
struct etnaviv_iommu_context * struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
struct etnaviv_cmdbuf_suballoc *suballoc); struct etnaviv_cmdbuf_suballoc *suballoc);
static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) static inline struct etnaviv_iommu_context *
etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{ {
kref_get(&ctx->refcount); kref_get(&ctx->refcount);
return ctx;
} }
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,

View File

@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings # clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare) subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror

View File

@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/ */
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd)) sizeof(intel_dp->edp_dpcd)) {
drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd), (int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd); intel_dp->edp_dpcd);
intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
}
/* /*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]

View File

@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
} }
if (ret) if (ret)
intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
if (intel_dp->set_idle_link_train) if (intel_dp->set_idle_link_train)
intel_dp->set_idle_link_train(intel_dp, crtc_state); intel_dp->set_idle_link_train(intel_dp, crtc_state);

View File

@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref)
trace_i915_context_free(ctx); trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex); mutex_destroy(&ctx->lut_mutex);
@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx)
if (vm) if (vm)
i915_vm_close(vm); i915_vm_close(vm);
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
ctx->file_priv = ERR_PTR(-EBADF); ctx->file_priv = ERR_PTR(-EBADF);
/* /*

View File

@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg)
err = PTR_ERR(import); err = PTR_ERR(import);
goto out_dmabuf; goto out_dmabuf;
} }
import_obj = to_intel_bo(import);
if (import != &obj->base) { if (import != &obj->base) {
pr_err("i915_gem_prime_import created a new object!\n"); pr_err("i915_gem_prime_import created a new object!\n");
err = -EINVAL; err = -EINVAL;
goto out_import; goto out_import;
} }
import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL); i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj); err = __i915_gem_object_get_pages(import_obj);
@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg)
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n", pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
PTR_ERR(import)); PTR_ERR(import));
err = PTR_ERR(import); err = PTR_ERR(import);
} else {
err = 0;
} }
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
err = PTR_ERR(import); err = PTR_ERR(import);
goto out_dmabuf; goto out_dmabuf;
} }
import_obj = to_intel_bo(import);
if (import == &obj->base) { if (import == &obj->base) {
pr_err("i915_gem_prime_import reused gem object!\n"); pr_err("i915_gem_prime_import reused gem object!\n");
@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import; goto out_import;
} }
import_obj = to_intel_bo(import);
i915_gem_object_lock(import_obj, NULL); i915_gem_object_lock(import_obj, NULL);
err = __i915_gem_object_get_pages(import_obj); err = __i915_gem_object_get_pages(import_obj);
if (err) { if (err) {

View File

@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
return I915_MMAP_TYPE_GTT; return I915_MMAP_TYPE_GTT;
} }
static struct drm_i915_gem_object *
create_sys_or_internal(struct drm_i915_private *i915,
unsigned long size)
{
if (HAS_LMEM(i915)) {
struct intel_memory_region *sys_region =
i915->mm.regions[INTEL_REGION_SMEM];
return __i915_gem_object_create_user(i915, size, &sys_region, 1);
}
return i915_gem_object_create_internal(i915, size);
}
static bool assert_mmap_offset(struct drm_i915_private *i915, static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size, unsigned long size,
int expected) int expected)
@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
u64 offset; u64 offset;
int ret; int ret;
obj = i915_gem_object_create_internal(i915, size); obj = create_sys_or_internal(i915, size);
if (IS_ERR(obj)) if (IS_ERR(obj))
return expected && expected == PTR_ERR(obj); return expected && expected == PTR_ERR(obj);
@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm_node *hole, *next; struct drm_mm_node *hole, *next;
int loop, err = 0; int loop, err = 0;
u64 offset; u64 offset;
int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
/* Disable background reaper */ /* Disable background reaper */
disable_retire_worker(i915); disable_retire_worker(i915);
@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
} }
/* Too large */ /* Too large */
if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
/* Fill the hole, further allocation attempts should then fail */ /* Fill the hole, further allocation attempts should then fail */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = create_sys_or_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n"); pr_err("Unable to create object for reclaimed hole\n");
@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj; goto err_obj;
} }
if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL; err = -EINVAL;
goto err_obj; goto err_obj;
@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map; bool no_map;
if (HAS_LMEM(i915)) if (obj->ops->mmap_offset)
return type == I915_MMAP_TYPE_FIXED; return type == I915_MMAP_TYPE_FIXED;
else if (type == I915_MMAP_TYPE_FIXED) else if (type == I915_MMAP_TYPE_FIXED)
return false; return false;

View File

@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
u32 intel_rps_read_punit_req(struct intel_rps *rps) u32 intel_rps_read_punit_req(struct intel_rps *rps)
{ {
struct intel_uncore *uncore = rps_to_uncore(rps); struct intel_uncore *uncore = rps_to_uncore(rps);
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
intel_wakeref_t wakeref;
u32 freq = 0;
return intel_uncore_read(uncore, GEN6_RPNSWREQ); with_intel_runtime_pm_if_in_use(rpm, wakeref)
freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
return freq;
} }
static u32 intel_rps_get_req(u32 pureq) static u32 intel_rps_get_req(u32 pureq)

View File

@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc)
__uc_free_load_err_log(uc); __uc_free_load_err_log(uc);
} }
static inline bool guc_communication_enabled(struct intel_guc *guc)
{
return intel_guc_ct_enabled(&guc->ct);
}
/* /*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our * register using the same bits used in the CT message payload. Since our
@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc)
static void guc_handle_mmio_msg(struct intel_guc *guc) static void guc_handle_mmio_msg(struct intel_guc *guc)
{ {
/* we need communication to be enabled to reply to GuC */ /* we need communication to be enabled to reply to GuC */
GEM_BUG_ON(!guc_communication_enabled(guc)); GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
spin_lock_irq(&guc->irq_lock); spin_lock_irq(&guc->irq_lock);
if (guc->mmio_msg) { if (guc->mmio_msg) {
@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
struct drm_i915_private *i915 = gt->i915; struct drm_i915_private *i915 = gt->i915;
int ret; int ret;
GEM_BUG_ON(guc_communication_enabled(guc)); GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
ret = i915_inject_probe_error(i915, -ENXIO); ret = i915_inject_probe_error(i915, -ENXIO);
if (ret) if (ret)
@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
return 0; return 0;
/* Make sure we enable communication if and only if it's disabled */ /* Make sure we enable communication if and only if it's disabled */
GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
if (enable_communication) if (enable_communication)
guc_enable_communication(guc); guc_enable_communication(guc);

View File

@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
args->v0.count = 0; args->v0.count = 0;
args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.pwrsrc = -ENOSYS; args->v0.pwrsrc = -ENODEV;
args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN; args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
} }

View File

@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
#endif #endif
if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
rdev->agp = radeon_agp_head_init(rdev->ddev); rdev->agp = radeon_agp_head_init(dev);
if (rdev->agp) { if (rdev->agp) {
rdev->agp->agp_mtrr = arch_phys_wc_add( rdev->agp->agp_mtrr = arch_phys_wc_add(
rdev->agp->agp_info.aper_base, rdev->agp->agp_info.aper_base,

View File

@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false; bool connected = false;
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
if (vc4_hdmi->hpd_gpio && if (vc4_hdmi->hpd_gpio &&
gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) { gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
connected = true; connected = true;
@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
} }
} }
pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected; return connector_status_connected;
} }
cec_phys_addr_invalidate(vc4_hdmi->cec_adap); cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected; return connector_status_disconnected;
} }
@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state; struct drm_connector_state *cstate = connector->state;
struct drm_crtc *crtc = cstate->crtc; struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode; const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame; union hdmi_infoframe frame;
int ret; int ret;
@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{ {
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
struct drm_crtc *crtc = cstate->crtc;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
if (!vc4_hdmi_supports_scrambling(encoder, mode)) if (!vc4_hdmi_supports_scrambling(encoder, mode))
return; return;
@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder) static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{ {
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector; struct drm_crtc *crtc = encoder->crtc;
struct drm_connector_state *cstate = connector->state;
/* /*
* At boot, connector->state will be NULL. Since we don't know the * At boot, encoder->crtc will be NULL. Since we don't know the
* state of the scrambler and in order to avoid any * state of the scrambler and in order to avoid any
* inconsistency, let's disable it all the time. * inconsistency, let's disable it all the time.
*/ */
if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode)) if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
return; return;
if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode)) if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
return; return;
if (delayed_work_pending(&vc4_hdmi->scrambling_work)) if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
vc4_hdmi->variant->phy_disable(vc4_hdmi); vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev); ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
vc4_hdmi_encoder_get_connector_state(encoder, state); vc4_hdmi_encoder_get_connector_state(encoder, state);
struct vc4_hdmi_connector_state *vc4_conn_state = struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state); conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_crtc_state *crtc_state = struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long bvb_rate, pixel_rate, hsm_rate; unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret; int ret;
@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return; return;
} }
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret) {
DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
vc4_hdmi_cec_update_clk_div(vc4_hdmi); vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000) if (pixel_rate > 297000000)
@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate); ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) { if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock);
return; return;
} }
@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) { if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock);
return; return;
} }
@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct drm_connector_state *conn_state = struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
struct drm_connector_state *conn_state = struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{ {
struct drm_connector *connector = &vc4_hdmi->connector; struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
struct drm_crtc *crtc = connector->state->crtc; struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode; const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 n, cts; u32 n, cts;
u64 tmp; u64 tmp;
@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
static int vc4_hdmi_audio_startup(struct device *dev, void *data) static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{ {
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
struct drm_connector *connector = &vc4_hdmi->connector; struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
/* /*
* If the HDMI encoder hasn't probed, or the encoder is * If the HDMI encoder hasn't probed, or the encoder is
* currently in DVI mode, treat the codec dai as missing. * currently in DVI mode, treat the codec dai as missing.
*/ */
if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE)) VC4_HDMI_RAM_PACKET_ENABLE))
return -ENODEV; return -ENODEV;
@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0; return 0;
} }
#ifdef CONFIG_PM
static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
}
static int vc4_hdmi_runtime_resume(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
return 0;
}
#endif
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{ {
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{} {}
}; };
static const struct dev_pm_ops vc4_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
vc4_hdmi_runtime_resume,
NULL)
};
struct platform_driver vc4_hdmi_driver = { struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe, .probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove, .remove = vc4_hdmi_dev_remove,
.driver = { .driver = {
.name = "vc4_hdmi", .name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match, .of_match_table = vc4_hdmi_dt_match,
.pm = &vc4_hdmi_pm_ops,
}, },
}; };

View File

@ -351,9 +351,25 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
static void b53_mdio_remove(struct mdio_device *mdiodev) static void b53_mdio_remove(struct mdio_device *mdiodev)
{ {
struct b53_device *dev = dev_get_drvdata(&mdiodev->dev); struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
struct dsa_switch *ds = dev->ds;
dsa_unregister_switch(ds); if (!dev)
return;
b53_switch_remove(dev);
dev_set_drvdata(&mdiodev->dev, NULL);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
{
struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
if (!dev)
return;
b53_switch_shutdown(dev);
dev_set_drvdata(&mdiodev->dev, NULL);
} }
static const struct of_device_id b53_of_match[] = { static const struct of_device_id b53_of_match[] = {
@ -373,6 +389,7 @@ MODULE_DEVICE_TABLE(of, b53_of_match);
static struct mdio_driver b53_mdio_driver = { static struct mdio_driver b53_mdio_driver = {
.probe = b53_mdio_probe, .probe = b53_mdio_probe,
.remove = b53_mdio_remove, .remove = b53_mdio_remove,
.shutdown = b53_mdio_shutdown,
.mdiodrv.driver = { .mdiodrv.driver = {
.name = "bcm53xx", .name = "bcm53xx",
.of_match_table = b53_of_match, .of_match_table = b53_of_match,

Some files were not shown because too many files have changed in this diff Show More