Merge branch 'topic/control-lookup-rwlock' into for-next

Pull control lookup optimization changes.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai 2024-08-09 14:25:16 +02:00
commit 4004f3029e
380 changed files with 4155 additions and 2587 deletions

View File

@ -4798,11 +4798,9 @@
profile= [KNL] Enable kernel profiling via /proc/profile
Format: [<profiletype>,]<number>
Param: <profiletype>: "schedule", "sleep", or "kvm"
Param: <profiletype>: "schedule" or "kvm"
[defaults to kernel profiling]
Param: "schedule" - profile schedule points.
Param: "sleep" - profile D-state sleeping (millisecs).
Requires CONFIG_SCHEDSTATS
Param: "kvm" - profile VM exits.
Param: <number> - step/bucket size as a power of 2 for
statistical time based profiling.

View File

@ -122,10 +122,18 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1490853 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1491015 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
@ -138,8 +146,14 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1 | #1502854 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
@ -160,6 +174,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
@ -170,6 +186,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V1 | #1619801 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |

View File

@ -35,6 +35,9 @@ properties:
ports-implemented:
const: 1
power-domains:
maxItems: 1
sata-port@0:
$ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port

View File

@ -199,10 +199,11 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
codec@1,0{
compatible = "slim217,250";
reg = <1 0>;
reset-gpios = <&tlmm 64 0>;
reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>;
slim-ifc-dev = <&wcd9340_ifd>;
#sound-dai-cells = <1>;
interrupt-parent = <&tlmm>;

View File

@ -42,7 +42,7 @@ examples:
pinctrl-names = "default", "sleep";
pinctrl-0 = <&wcd_reset_n>;
pinctrl-1 = <&wcd_reset_n_sleep>;
reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>;
reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>;
vdd-buck-supply = <&vreg_l17b_1p8>;
vdd-rxtx-supply = <&vreg_l18b_1p8>;
vdd-px-supply = <&vreg_l18b_1p8>;

View File

@ -34,9 +34,10 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
codec {
compatible = "qcom,wcd9380-codec";
reset-gpios = <&tlmm 32 0>;
reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <1>;
qcom,tx-device = <&wcd938x_tx>;
qcom,rx-device = <&wcd938x_rx>;

View File

@ -52,10 +52,10 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
codec {
compatible = "qcom,wcd9390-codec";
reset-gpios = <&tlmm 32 IRQ_TYPE_NONE>;
reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <1>;
qcom,tx-device = <&wcd939x_tx>;
qcom,rx-device = <&wcd939x_rx>;

View File

@ -4,8 +4,6 @@ Generic Thermal Sysfs driver How To
Written by Sujith Thomas <sujith.thomas@intel.com>, Zhang Rui <rui.zhang@intel.com>
Updated: 2 January 2008
Copyright (c) 2008 Intel Corporation
@ -38,23 +36,23 @@ temperature) and throttle appropriate devices.
::
struct thermal_zone_device
*thermal_zone_device_register(char *type,
int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
int passive_delay, int polling_delay))
struct thermal_zone_device *
thermal_zone_device_register_with_trips(const char *type,
const struct thermal_trip *trips,
int num_trips, void *devdata,
const struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
unsigned int passive_delay,
unsigned int polling_delay)
This interface function adds a new thermal zone device (sensor) to
This interface function adds a new thermal zone device (sensor) to the
/sys/class/thermal folder as `thermal_zone[0-*]`. It tries to bind all the
thermal cooling devices registered at the same time.
thermal cooling devices registered to it at the same time.
type:
the thermal zone type.
trips:
the total number of trip points this thermal zone supports.
mask:
Bit string: If 'n'th bit is set, then trip point 'n' is writable.
the table of trip points for this thermal zone.
devdata:
device private data
ops:
@ -67,32 +65,29 @@ temperature) and throttle appropriate devices.
.get_temp:
get the current temperature of the thermal zone.
.set_trips:
set the trip points window. Whenever the current temperature
is updated, the trip points immediately below and above the
current temperature are found.
.get_mode:
get the current mode (enabled/disabled) of the thermal zone.
- "enabled" means the kernel thermal management is
enabled.
- "disabled" will prevent kernel thermal driver action
upon trip points so that user applications can take
charge of thermal management.
.set_mode:
set the mode (enabled/disabled) of the thermal zone.
.get_trip_type:
get the type of certain trip point.
.get_trip_temp:
get the temperature above which the certain trip point
will be fired.
set the trip points window. Whenever the current temperature
is updated, the trip points immediately below and above the
current temperature are found.
.change_mode:
change the mode (enabled/disabled) of the thermal zone.
.set_trip_temp:
set the temperature of a given trip point.
.get_crit_temp:
get the critical temperature for this thermal zone.
.set_emul_temp:
set the emulation temperature which helps in debugging
different threshold temperature points.
set the emulation temperature which helps in debugging
different threshold temperature points.
.get_trend:
get the trend of most recent zone temperature changes.
.hot:
hot trip point crossing handler.
.critical:
critical trip point crossing handler.
tzp:
thermal zone platform parameters.
passive_delay:
number of milliseconds to wait between polls when
performing passive cooling.
number of milliseconds to wait between polls when performing passive
cooling.
polling_delay:
number of milliseconds to wait between polls when checking
whether trip points have been crossed (0 for interrupt driven systems).

View File

@ -1753,6 +1753,7 @@ operations:
request:
attributes:
- header
- context
reply:
attributes:
- header
@ -1761,7 +1762,6 @@ operations:
- indir
- hkey
- input_xfrm
dump: *rss-get-op
-
name: plca-get-cfg
doc: Get PLCA params.

View File

@ -1875,6 +1875,7 @@ Kernel response contents:
===================================== ====== ==========================
``ETHTOOL_A_RSS_HEADER`` nested reply header
``ETHTOOL_A_RSS_CONTEXT`` u32 context number
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes

View File

@ -21,9 +21,9 @@ are often referred to as greyscale formats.
.. raw:: latex
\scriptsize
\tiny
.. tabularcolumns:: |p{3.6cm}|p{3.0cm}|p{1.3cm}|p{2.6cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
.. tabularcolumns:: |p{3.6cm}|p{2.4cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
.. flat-table:: Luma-Only Image Formats
:header-rows: 1

View File

@ -6368,7 +6368,7 @@ a single guest_memfd file, but the bound ranges must not overlap).
See KVM_SET_USER_MEMORY_REGION2 for additional details.
4.143 KVM_PRE_FAULT_MEMORY
------------------------
---------------------------
:Capability: KVM_CAP_PRE_FAULT_MEMORY
:Architectures: none
@ -6405,6 +6405,12 @@ for the current vCPU state. KVM maps memory as if the vCPU generated a
stage-2 read page fault, e.g. faults in memory as needed, but doesn't break
CoW. However, KVM does not mark any newly created stage-2 PTE as Accessed.
In the case of confidential VM types where there is an initial set up of
private guest memory before the guest is 'finalized'/measured, this ioctl
should only be issued after completing all the necessary setup to put the
guest into a 'finalized' state so that the above semantics can be reliably
ensured.
In some cases, multiple vCPUs might share the page tables. In this
case, the ioctl can be called in parallel.

View File

@ -5306,7 +5306,7 @@ F: drivers/media/cec/i2c/ch7322.c
CIRRUS LOGIC AUDIO CODEC DRIVERS
M: David Rhodes <david.rhodes@cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linux-sound@vger.kernel.org
L: patches@opensource.cirrus.com
S: Maintained
F: Documentation/devicetree/bindings/sound/cirrus,cs*
@ -5375,7 +5375,7 @@ F: sound/soc/codecs/lochnagar-sc.c
CIRRUS LOGIC MADERA CODEC DRIVERS
M: Charles Keepax <ckeepax@opensource.cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linux-sound@vger.kernel.org
L: patches@opensource.cirrus.com
S: Supported
W: https://github.com/CirrusLogic/linux-drivers/wiki
@ -15936,6 +15936,7 @@ F: include/linux/in.h
F: include/linux/indirect_call_wrapper.h
F: include/linux/net.h
F: include/linux/netdevice.h
F: include/linux/skbuff.h
F: include/net/
F: include/uapi/linux/in.h
F: include/uapi/linux/net.h
@ -18556,7 +18557,7 @@ F: drivers/usb/misc/qcom_eud.c
QCOM IPA DRIVER
M: Alex Elder <elder@kernel.org>
L: netdev@vger.kernel.org
S: Supported
S: Maintained
F: drivers/net/ipa/
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -534,8 +534,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define ioread16be(p) swab16(ioread16(p))
#define ioread32be(p) swab32(ioread32(p))
#define ioread64be(p) swab64(ioread64(p))
#define iowrite16be(v,p) iowrite16(swab16(v), (p))
#define iowrite32be(v,p) iowrite32(swab32(v), (p))
#define iowrite64be(v,p) iowrite64(swab64(v), (p))
#define inb_p inb
#define inw_p inw
@ -634,8 +636,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
*/
#define ioread64 ioread64
#define iowrite64 iowrite64
#define ioread64be ioread64be
#define iowrite64be iowrite64be
#define ioread8_rep ioread8_rep
#define ioread16_rep ioread16_rep
#define ioread32_rep ioread32_rep

View File

@ -87,6 +87,7 @@ config ARM
select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE
@ -116,6 +117,7 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if HAVE_KPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL
@ -736,7 +738,7 @@ config ARM_ERRATA_764319
bool "ARM errata: Read to DBGPRSR and DBGOSLSR may generate Undefined instruction"
depends on CPU_V7
help
This option enables the workaround for the 764319 Cortex A-9 erratum.
This option enables the workaround for the 764319 Cortex-A9 erratum.
CP14 read accesses to the DBGPRSR and DBGOSLSR registers generate an
unexpected Undefined Instruction exception when the DBGSWENABLE
external pin is set to 0, even when the CP14 accesses are performed

View File

@ -9,6 +9,7 @@ OBJS =
HEAD = head.o
OBJS += misc.o decompress.o
CFLAGS_decompress.o += $(DISABLE_STACKLEAK_PLUGIN)
ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
OBJS += debug.o
AFLAGS_head.o += -DDEBUG

View File

@ -125,7 +125,7 @@ SECTIONS
. = BSS_START;
__bss_start = .;
.bss : { *(.bss) }
.bss : { *(.bss .bss.*) }
_end = .;
. = ALIGN(8); /* the stack must be 64-bit aligned */

View File

@ -157,7 +157,7 @@ timclk: clock-1000000 {
clocks = <&xtal24mhz>;
};
pclk: clock-24000000 {
pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <1>;

View File

@ -26,6 +26,13 @@ struct stackframe {
#endif
};
static inline bool on_thread_stack(void)
{
unsigned long delta = current_stack_pointer ^ (unsigned long)current->stack;
return delta < THREAD_SIZE;
}
static __always_inline
void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
{

View File

@ -42,7 +42,7 @@
#define PROC_INFO \
. = ALIGN(4); \
__proc_info_begin = .; \
*(.proc.info.init) \
KEEP(*(.proc.info.init)) \
__proc_info_end = .;
#define IDMAP_TEXT \

View File

@ -1065,6 +1065,7 @@ vector_addrexcptn:
.globl vector_fiq
.section .vectors, "ax", %progbits
.reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
@ -1078,6 +1079,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.section .vectors.bhb.loop8, "ax", %progbits
.reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_bhb_loop8_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
@ -1090,6 +1092,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
W(b) vector_bhb_loop8_fiq
.section .vectors.bhb.bpiall, "ax", %progbits
.reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_bhb_bpiall_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )

View File

@ -119,6 +119,9 @@ no_work_pending:
ct_user_enter save = 0
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase_on_task_stack
#endif
restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)

View File

@ -395,11 +395,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return 0;
}
struct mod_unwind_map {
const Elf_Shdr *unw_sec;
const Elf_Shdr *txt_sec;
};
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
const Elf_Shdr *sechdrs, const char *name)
{

View File

@ -85,8 +85,7 @@ static bool
callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, pc);
return true;
return perf_callchain_store(entry, pc) == 0;
}
void

View File

@ -63,7 +63,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
ARM_MMU_KEEP(*(__ex_table))
ARM_MMU_KEEP(KEEP(*(__ex_table)))
__stop___ex_table = .;
}
@ -83,7 +83,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {

View File

@ -74,7 +74,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
ARM_MMU_KEEP(*(__ex_table))
ARM_MMU_KEEP(KEEP(*(__ex_table)))
__stop___ex_table = .;
}
@ -99,7 +99,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
@ -116,7 +116,7 @@ SECTIONS
#endif
.init.pv_table : {
__pv_table_begin = .;
*(.pv_table)
KEEP(*(.pv_table))
__pv_table_end = .;
}

View File

@ -29,7 +29,7 @@ int alpine_cpu_wakeup(unsigned int phys_cpu, uint32_t phys_resume_addr)
/*
* Set CPU resume address -
* secure firmware running on boot will jump to this address
* after setting proper CPU mode, and initialiing e.g. secure
* after setting proper CPU mode, and initializing e.g. secure
* regs (the same mode all CPUs are booted to - usually HYP)
*/
writel(phys_resume_addr,

View File

@ -17,7 +17,7 @@ void cpu_arm7tdmi_proc_init(void);
__ADDRESSABLE(cpu_arm7tdmi_proc_init);
void cpu_arm7tdmi_proc_fin(void);
__ADDRESSABLE(cpu_arm7tdmi_proc_fin);
void cpu_arm7tdmi_reset(void);
void cpu_arm7tdmi_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm7tdmi_reset);
int cpu_arm7tdmi_do_idle(void);
__ADDRESSABLE(cpu_arm7tdmi_do_idle);
@ -32,7 +32,7 @@ void cpu_arm720_proc_init(void);
__ADDRESSABLE(cpu_arm720_proc_init);
void cpu_arm720_proc_fin(void);
__ADDRESSABLE(cpu_arm720_proc_fin);
void cpu_arm720_reset(void);
void cpu_arm720_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm720_reset);
int cpu_arm720_do_idle(void);
__ADDRESSABLE(cpu_arm720_do_idle);
@ -49,7 +49,7 @@ void cpu_arm740_proc_init(void);
__ADDRESSABLE(cpu_arm740_proc_init);
void cpu_arm740_proc_fin(void);
__ADDRESSABLE(cpu_arm740_proc_fin);
void cpu_arm740_reset(void);
void cpu_arm740_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm740_reset);
int cpu_arm740_do_idle(void);
__ADDRESSABLE(cpu_arm740_do_idle);
@ -64,7 +64,7 @@ void cpu_arm9tdmi_proc_init(void);
__ADDRESSABLE(cpu_arm9tdmi_proc_init);
void cpu_arm9tdmi_proc_fin(void);
__ADDRESSABLE(cpu_arm9tdmi_proc_fin);
void cpu_arm9tdmi_reset(void);
void cpu_arm9tdmi_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm9tdmi_reset);
int cpu_arm9tdmi_do_idle(void);
__ADDRESSABLE(cpu_arm9tdmi_do_idle);
@ -79,7 +79,7 @@ void cpu_arm920_proc_init(void);
__ADDRESSABLE(cpu_arm920_proc_init);
void cpu_arm920_proc_fin(void);
__ADDRESSABLE(cpu_arm920_proc_fin);
void cpu_arm920_reset(void);
void cpu_arm920_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm920_reset);
int cpu_arm920_do_idle(void);
__ADDRESSABLE(cpu_arm920_do_idle);
@ -102,7 +102,7 @@ void cpu_arm922_proc_init(void);
__ADDRESSABLE(cpu_arm922_proc_init);
void cpu_arm922_proc_fin(void);
__ADDRESSABLE(cpu_arm922_proc_fin);
void cpu_arm922_reset(void);
void cpu_arm922_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm922_reset);
int cpu_arm922_do_idle(void);
__ADDRESSABLE(cpu_arm922_do_idle);
@ -119,7 +119,7 @@ void cpu_arm925_proc_init(void);
__ADDRESSABLE(cpu_arm925_proc_init);
void cpu_arm925_proc_fin(void);
__ADDRESSABLE(cpu_arm925_proc_fin);
void cpu_arm925_reset(void);
void cpu_arm925_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm925_reset);
int cpu_arm925_do_idle(void);
__ADDRESSABLE(cpu_arm925_do_idle);
@ -159,7 +159,7 @@ void cpu_arm940_proc_init(void);
__ADDRESSABLE(cpu_arm940_proc_init);
void cpu_arm940_proc_fin(void);
__ADDRESSABLE(cpu_arm940_proc_fin);
void cpu_arm940_reset(void);
void cpu_arm940_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm940_reset);
int cpu_arm940_do_idle(void);
__ADDRESSABLE(cpu_arm940_do_idle);
@ -174,7 +174,7 @@ void cpu_arm946_proc_init(void);
__ADDRESSABLE(cpu_arm946_proc_init);
void cpu_arm946_proc_fin(void);
__ADDRESSABLE(cpu_arm946_proc_fin);
void cpu_arm946_reset(void);
void cpu_arm946_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm946_reset);
int cpu_arm946_do_idle(void);
__ADDRESSABLE(cpu_arm946_do_idle);
@ -429,7 +429,7 @@ void cpu_v7_proc_init(void);
__ADDRESSABLE(cpu_v7_proc_init);
void cpu_v7_proc_fin(void);
__ADDRESSABLE(cpu_v7_proc_fin);
void cpu_v7_reset(void);
void cpu_v7_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_v7_reset);
int cpu_v7_do_idle(void);
__ADDRESSABLE(cpu_v7_do_idle);

View File

@ -1069,18 +1069,28 @@ config ARM64_ERRATUM_3117295
If unsure, say Y.
config ARM64_ERRATUM_3194386
bool "Cortex-{A720,X4,X925}/Neoverse-V3: workaround for MSR SSBS not self-synchronizing"
bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
default y
help
This option adds the workaround for the following errata:
* ARM Cortex-A76 erratum 3324349
* ARM Cortex-A77 erratum 3324348
* ARM Cortex-A78 erratum 3324344
* ARM Cortex-A78C erratum 3324346
* ARM Cortex-A78C erratum 3324347
* ARM Cortex-A710 erratam 3324338
* ARM Cortex-A720 erratum 3456091
* ARM Cortex-A725 erratum 3456106
* ARM Cortex-X1 erratum 3324344
* ARM Cortex-X1C erratum 3324346
* ARM Cortex-X2 erratum 3324338
* ARM Cortex-X3 erratum 3324335
* ARM Cortex-X4 erratum 3194386
* ARM Cortex-X925 erratum 3324334
* ARM Neoverse-N1 erratum 3324349
* ARM Neoverse N2 erratum 3324339
* ARM Neoverse-V1 erratum 3324341
* ARM Neoverse V2 erratum 3324336
* ARM Neoverse-V3 erratum 3312417
@ -1088,11 +1098,11 @@ config ARM64_ERRATUM_3194386
subsequent speculative instructions, which may permit unexepected
speculative store bypassing.
Work around this problem by placing a speculation barrier after
kernel changes to SSBS. The presence of the SSBS special-purpose
register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such
that userspace will use the PR_SPEC_STORE_BYPASS prctl to change
SSBS.
Work around this problem by placing a Speculation Barrier (SB) or
Instruction Synchronization Barrier (ISB) after kernel changes to
SSBS. The presence of the SSBS special-purpose register is hidden
from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
If unsure, say Y.

View File

@ -86,12 +86,14 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@ -165,12 +167,14 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

View File

@ -13,6 +13,7 @@
#include <linux/types.h>
#include <asm/insn.h>
#define HAVE_JUMP_LABEL_BATCH
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
#define JUMP_TABLE_ENTRY(key, label) \

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_32 +=
syscall_abis_64 += renameat newstat rlimit memfd_secret
syscall_abis_64 += renameat rlimit memfd_secret
syscalltbl = arch/arm64/tools/syscall_%.tbl

View File

@ -434,15 +434,24 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_3194386
static const struct midr_range erratum_spec_ssbs_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
{}
};
#endif

View File

@ -7,11 +7,12 @@
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/smp.h>
#include <asm/insn.h>
#include <asm/patching.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
u32 insn;
@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
}
aarch64_insn_patch_text_nosync(addr, insn);
return true;
}
void arch_jump_label_transform_apply(void)
{
kick_all_cpus_sync();
}

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_64 += newstat
# No special ABIs on loongarch so far
syscall_abis_64 +=

View File

@ -20,6 +20,7 @@ config PARISC
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
select DMA_OPS

View File

@ -20,7 +20,16 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_PA20
#define ARCH_DMA_MINALIGN 128
#else
#define ARCH_DMA_MINALIGN 32
#endif
#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
#define arch_slab_minalign() ((unsigned)dcache_stride)
#define cache_line_size() dcache_stride
#define dma_get_cache_alignment cache_line_size
#define __read_mostly __section(".data..read_mostly")

View File

@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
jit_data->header =
bpf_jit_binary_alloc(prog_size + extable_size,
&jit_data->image,
sizeof(u32),
sizeof(long),
bpf_fill_ill_insns);
if (!jit_data->header) {
prog = orig_prog;

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_32 += riscv memfd_secret
syscall_abis_64 += riscv newstat rlimit memfd_secret
syscall_abis_64 += riscv rlimit memfd_secret

View File

@ -432,28 +432,26 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX);
for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) {
ext = riscv_get_isa_ext_data(bit);
if (!ext)
continue;
if (ext->validate) {
if (ext && ext->validate) {
ret = ext->validate(ext, resolved_isa);
if (ret == -EPROBE_DEFER) {
loop = true;
continue;
} else if (ret) {
/* Disable the extension entirely */
clear_bit(ext->id, source_isa);
clear_bit(bit, source_isa);
continue;
}
}
set_bit(ext->id, resolved_isa);
set_bit(bit, resolved_isa);
/* No need to keep it in source isa now that it is enabled */
clear_bit(ext->id, source_isa);
clear_bit(bit, source_isa);
/* Single letter extensions get set in hwcap */
if (ext->id < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[ext->id];
if (bit < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[bit];
}
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
}

View File

@ -71,7 +71,7 @@ void __init sbi_ipi_init(void)
* the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux
*/
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);

View File

@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
{
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
pagefault_out_of_memory();
return;
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
return;
} else if (fault & VM_FAULT_SIGSEGV) {
do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
return;
}
BUG();
}

View File

@ -234,8 +234,6 @@ static void __init setup_bootmem(void)
*/
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
phys_ram_end = memblock_end_of_DRAM();
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
@ -250,6 +248,16 @@ static void __init setup_bootmem(void)
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
* The size of the linear page mapping may restrict the amount of
* usable RAM.
*/
if (IS_ENABLED(CONFIG_64BIT)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base);
}
/*
* Reserve physical address space that would be mapped to virtual
* addresses greater than (void *)(-PAGE_SIZE) because:
@ -266,6 +274,7 @@ static void __init setup_bootmem(void)
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
@ -1284,8 +1293,6 @@ static void __init create_linear_mapping_page_table(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0, NULL);
}

View File

@ -7,6 +7,7 @@
* Author: Li Zhengyu (lizhengyu3@huawei.com)
*
*/
#include <asm/asm.h>
#include <linux/linkage.h>
.text
@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start)
.data
.align LGREG
SYM_DATA(riscv_kernel_entry, .quad 0)
.end

View File

@ -113,7 +113,7 @@ void load_fpu_state(struct fpu *state, int flags)
int mask;
if (flags & KERNEL_FPC)
fpu_lfpc(&state->fpc);
fpu_lfpc_safe(&state->fpc);
if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_V0V7)
load_fp_regs_vx(state->vxrs);

View File

@ -59,14 +59,6 @@ SECTIONS
} :text = 0x0700
RO_DATA(PAGE_SIZE)
.data.rel.ro : {
*(.data.rel.ro .data.rel.ro.*)
}
.got : {
__got_start = .;
*(.got)
__got_end = .;
}
. = ALIGN(PAGE_SIZE);
_sdata = .; /* Start of data section */
@ -80,6 +72,15 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
.data.rel.ro : {
*(.data.rel.ro .data.rel.ro.*)
}
.got : {
__got_start = .;
*(.got)
__got_end = .;
}
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
.data.rel : {
*(.data.rel*)

View File

@ -3,6 +3,7 @@
#include <linux/ptdump.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sort.h>
#include <linux/mm.h>
#include <linux/kfence.h>
#include <linux/kasan.h>
@ -15,13 +16,15 @@
static unsigned long max_addr;
struct addr_marker {
int is_start;
unsigned long start_address;
const char *name;
};
enum address_markers_idx {
IDENTITY_BEFORE_NR = 0,
IDENTITY_BEFORE_END_NR,
KVA_NR = 0,
LOWCORE_START_NR,
LOWCORE_END_NR,
AMODE31_START_NR,
AMODE31_END_NR,
KERNEL_START_NR,
@ -30,8 +33,8 @@ enum address_markers_idx {
KFENCE_START_NR,
KFENCE_END_NR,
#endif
IDENTITY_AFTER_NR,
IDENTITY_AFTER_END_NR,
IDENTITY_START_NR,
IDENTITY_END_NR,
VMEMMAP_NR,
VMEMMAP_END_NR,
VMALLOC_NR,
@ -59,43 +62,44 @@ enum address_markers_idx {
};
static struct addr_marker address_markers[] = {
[IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
[IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
[AMODE31_START_NR] = {0, "Amode31 Area Start"},
[AMODE31_END_NR] = {0, "Amode31 Area End"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
[KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
[LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
[LOWCORE_END_NR] = {0, 0, "Lowcore End"},
[IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
[IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
[AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
[AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
[KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
#ifdef CONFIG_KFENCE
[KFENCE_START_NR] = {0, "KFence Pool Start"},
[KFENCE_END_NR] = {0, "KFence Pool End"},
[KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
[KFENCE_END_NR] = {0, 0, "KFence Pool End"},
#endif
[IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
[IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
[VMEMMAP_NR] = {0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, "vmemmap Area End"},
[VMALLOC_NR] = {0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, "vmalloc Area End"},
[VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
[VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
#ifdef CONFIG_KMSAN
[KMSAN_VMALLOC_SHADOW_START_NR] = {0, "Kmsan vmalloc Shadow Start"},
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, "Kmsan vmalloc Shadow End"},
[KMSAN_VMALLOC_ORIGIN_START_NR] = {0, "Kmsan vmalloc Origins Start"},
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, "Kmsan vmalloc Origins End"},
[KMSAN_MODULES_SHADOW_START_NR] = {0, "Kmsan Modules Shadow Start"},
[KMSAN_MODULES_SHADOW_END_NR] = {0, "Kmsan Modules Shadow End"},
[KMSAN_MODULES_ORIGIN_START_NR] = {0, "Kmsan Modules Origins Start"},
[KMSAN_MODULES_ORIGIN_END_NR] = {0, "Kmsan Modules Origins End"},
[KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
[KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
[KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
[KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
[KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
[KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
#endif
[MODULES_NR] = {0, "Modules Area Start"},
[MODULES_END_NR] = {0, "Modules Area End"},
[ABS_LOWCORE_NR] = {0, "Lowcore Area Start"},
[ABS_LOWCORE_END_NR] = {0, "Lowcore Area End"},
[MEMCPY_REAL_NR] = {0, "Real Memory Copy Area Start"},
[MEMCPY_REAL_END_NR] = {0, "Real Memory Copy Area End"},
[MODULES_NR] = {1, 0, "Modules Area Start"},
[MODULES_END_NR] = {0, 0, "Modules Area End"},
[ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
[ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
[MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
[MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
#ifdef CONFIG_KASAN
[KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
[KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
#endif
{ -1, NULL }
{1, -1UL, NULL}
};
struct pg_state {
@ -163,6 +167,19 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
static void note_page_update_state(struct pg_state *st, unsigned long addr, unsigned int prot, int level)
{
struct seq_file *m = st->seq;
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
st->level = level;
}
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
{
int width = sizeof(unsigned long) * 2;
@ -186,9 +203,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
addr = max_addr;
if (st->level == -1) {
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
st->start_address = addr;
st->current_prot = prot;
st->level = level;
note_page_update_state(st, addr, prot, level);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
note_prot_wx(st, addr);
@ -202,13 +217,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
}
pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
st->level = level;
note_page_update_state(st, addr, prot, level);
}
}
@ -280,22 +289,25 @@ static int ptdump_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(ptdump);
#endif /* CONFIG_PTDUMP_DEBUGFS */
/*
* Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple
* insertion sort to preserve the original order of markers with the same
* start address.
*/
static void sort_address_markers(void)
static int ptdump_cmp(const void *a, const void *b)
{
struct addr_marker tmp;
int i, j;
const struct addr_marker *ama = a;
const struct addr_marker *amb = b;
for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) {
tmp = address_markers[i];
for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--)
address_markers[j + 1] = address_markers[j];
address_markers[j + 1] = tmp;
}
if (ama->start_address > amb->start_address)
return 1;
if (ama->start_address < amb->start_address)
return -1;
/*
* If the start addresses of two markers are identical consider the
* marker which defines the start of an area higher than the one which
* defines the end of an area. This keeps pairs of markers sorted.
*/
if (ama->is_start)
return 1;
if (amb->is_start)
return -1;
return 0;
}
static int pt_dump_init(void)
@ -303,6 +315,8 @@ static int pt_dump_init(void)
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
unsigned long lowcore = (unsigned long)get_lowcore();
/*
* Figure out the maximum virtual address being accessible with the
* kernel ASCE. We need this to keep the page table walker functions
@ -310,7 +324,10 @@ static int pt_dump_init(void)
*/
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[LOWCORE_START_NR].start_address = lowcore;
address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
address_markers[IDENTITY_START_NR].start_address = __identity_base;
address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
@ -337,7 +354,8 @@ static int pt_dump_init(void)
address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
#endif
sort_address_markers();
sort(address_markers, ARRAY_SIZE(address_markers) - 1,
sizeof(address_markers[0]), ptdump_cmp, NULL);
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */

View File

@ -108,6 +108,8 @@ void mark_rodata_ro(void)
{
unsigned long size = __end_ro_after_init - __start_ro_after_init;
if (MACHINE_HAS_NX)
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
}
@ -170,13 +172,6 @@ void __init mem_init(void)
setup_zero_pages(); /* Setup zeroed pages. */
}
void free_initmem(void)
{
set_memory_rwnx((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
free_initmem_default(POISON_FREE_INITMEM);
}
unsigned long memory_block_size_bytes(void)
{
/*

View File

@ -661,7 +661,6 @@ void __init vmem_map_init(void)
{
__set_memory_rox(_stext, _etext);
__set_memory_ro(_etext, __end_rodata);
__set_memory_rox(_sinittext, _einittext);
__set_memory_rox(__stext_amode31, __etext_amode31);
/*
* If the BEAR-enhancement facility is not installed the first
@ -670,16 +669,8 @@ void __init vmem_map_init(void)
*/
if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1);
if (debug_pagealloc_enabled()) {
/*
* Use RELOC_HIDE() as long as __va(0) translates to NULL,
* since performing pointer arithmetic on a NULL pointer
* has undefined behavior and generates compiler warnings.
*/
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
}
if (MACHINE_HAS_NX)
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
if (debug_pagealloc_enabled())
__set_memory_4k(__va(0), __va(0) + ident_map_size);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}

View File

@ -71,7 +71,9 @@ static struct mconsole_command *mconsole_parse(struct mc_request *req)
return NULL;
}
#ifndef MIN
#define MIN(a,b) ((a)<(b) ? (a):(b))
#endif
#define STRINGX(x) #x
#define STRING(x) STRINGX(x)

View File

@ -163,7 +163,7 @@ struct sev_config {
*/
use_cas : 1,
__reserved : 62;
__reserved : 61;
};
static struct sev_config sev_cfg __read_mostly;

View File

@ -344,6 +344,7 @@
332 common statx sys_statx
333 common io_pgetevents sys_io_pgetevents
334 common rseq sys_rseq
335 common uretprobe sys_uretprobe
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
424 common pidfd_send_signal sys_pidfd_send_signal
@ -385,7 +386,6 @@
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
462 common mseal sys_mseal
467 common uretprobe sys_uretprobe
#
# Due to a historical design error, certain syscalls are numbered differently

View File

@ -1520,20 +1520,23 @@ static void x86_pmu_start(struct perf_event *event, int flags)
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
unsigned long *cntr_mask, *fixed_cntr_mask;
struct event_constraint *pebs_constraints;
struct cpu_hw_events *cpuc;
u64 pebs, debugctl;
int cpu = smp_processor_id();
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
unsigned long flags;
int idx;
int cpu, idx;
guard(irqsave)();
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_events, cpu);
cntr_mask = hybrid(cpuc->pmu, cntr_mask);
fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
if (!*(u64 *)cntr_mask)
return;
local_irq_save(flags);
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
@ -1577,7 +1580,6 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
local_irq_restore(flags);
}
void x86_pmu_stop(struct perf_event *event, int flags)

View File

@ -64,7 +64,7 @@
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
* RPL,SPR,MTL,ARL,LNL
* RPL,SPR,MTL,ARL,LNL,SRF
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
@ -693,7 +693,8 @@ static const struct cstate_model srf_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
.pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
BIT(PERF_CSTATE_PKG_C6_RES),
.module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
};

View File

@ -2,6 +2,10 @@
#ifndef _ASM_X86_CMDLINE_H
#define _ASM_X86_CMDLINE_H
#include <asm/setup.h>
extern char builtin_cmdline[COMMAND_LINE_SIZE];
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
int cmdline_find_option(const char *cmdline_ptr, const char *option,
char *buffer, int bufsize);

View File

@ -1305,6 +1305,7 @@ struct kvm_arch {
u8 vm_type;
bool has_private_mem;
bool has_protected_state;
bool pre_fault_allowed;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;

View File

@ -462,7 +462,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
switch (c->x86_model) {
case 0x00 ... 0x2f:
case 0x40 ... 0x4f:
case 0x70 ... 0x7f:
case 0x60 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
default:

View File

@ -306,7 +306,7 @@ static void freq_invariance_enable(void)
WARN_ON_ONCE(1);
return;
}
static_branch_enable(&arch_scale_freq_key);
static_branch_enable_cpuslocked(&arch_scale_freq_key);
register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
}
@ -323,8 +323,10 @@ static void __init bp_init_freq_invariance(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
if (intel_set_max_freq_ratio())
if (intel_set_max_freq_ratio()) {
guard(cpus_read_lock)();
freq_invariance_enable();
}
}
static void disable_freq_invariance_workfn(struct work_struct *work)

View File

@ -164,7 +164,7 @@ unsigned long saved_video_mode;
static char __initdata command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
char builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
bool builtin_cmdline_added __ro_after_init;
#endif

View File

@ -141,8 +141,8 @@ config KVM_AMD_SEV
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
select ARCH_HAS_CC_PLATFORM
select KVM_GENERIC_PRIVATE_MEM
select HAVE_KVM_GMEM_PREPARE
select HAVE_KVM_GMEM_INVALIDATE
select HAVE_KVM_ARCH_GMEM_PREPARE
select HAVE_KVM_ARCH_GMEM_INVALIDATE
help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors.

View File

@ -1743,7 +1743,7 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
s64 min_period = min_timer_period_us * 1000LL;
if (apic->lapic_timer.period < min_period) {
pr_info_ratelimited(
pr_info_once(
"vcpu %i: requested %lld ns "
"lapic timer period limited to %lld ns\n",
apic->vcpu->vcpu_id,

View File

@ -4335,7 +4335,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
if (req_max_level)
max_level = min(max_level, req_max_level);
return req_max_level;
return max_level;
}
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
@ -4743,6 +4743,9 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
u64 end;
int r;
if (!vcpu->kvm->arch.pre_fault_allowed)
return -EOPNOTSUPP;
/*
* reload is efficient when called repeatedly, so we can do it on
* every iteration.
@ -7510,7 +7513,7 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
if (level == PG_LEVEL_2M)
return kvm_range_has_memory_attributes(kvm, start, end, attrs);
return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
if (hugepage_test_mixed(slot, gfn, level - 1) ||

View File

@ -2279,18 +2279,11 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
bool assigned;
int level;
if (!kvm_mem_is_private(kvm, gfn)) {
pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
__func__, gfn);
ret = -EINVAL;
goto err;
}
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
if (ret || assigned) {
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
__func__, gfn, ret, assigned);
ret = -EINVAL;
ret = ret ? -EINVAL : -EEXIST;
goto err;
}
@ -2549,6 +2542,14 @@ static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
data->gctx_paddr = __psp_pa(sev->snp_context);
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error);
/*
* Now that there will be no more SNP_LAUNCH_UPDATE ioctls, private pages
* can be given to the guest simply by marking the RMP entry as private.
* This can happen on first access and also with KVM_PRE_FAULT_MEMORY.
*/
if (!ret)
kvm->arch.pre_fault_allowed = true;
kfree(id_auth);
e_free_id_block:

View File

@ -4949,6 +4949,7 @@ static int svm_vm_init(struct kvm *kvm)
to_kvm_sev_info(kvm)->need_init = true;
kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM);
kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem;
}
if (!pause_filter_count || !pause_filter_thresh)

View File

@ -12646,6 +12646,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.vm_type = type;
kvm->arch.has_private_mem =
(type == KVM_X86_SW_PROTECTED_VM);
/* Decided by the vendor code for other VM types. */
kvm->arch.pre_fault_allowed =
type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
ret = kvm_page_track_init(kvm);
if (ret)
@ -13641,19 +13644,14 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
{
return kvm->arch.vm_type == KVM_X86_SNP_VM;
}
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
{
return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
}
#endif
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
{
kvm_x86_call(gmem_invalidate)(start, end);

View File

@ -207,18 +207,29 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
int cmdline_find_option_bool(const char *cmdline, const char *option)
{
if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
WARN_ON_ONCE(!builtin_cmdline_added);
int ret;
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
ret = __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
if (ret > 0)
return ret;
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
return __cmdline_find_option_bool(builtin_cmdline, COMMAND_LINE_SIZE, option);
return ret;
}
int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
int bufsize)
{
if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
WARN_ON_ONCE(!builtin_cmdline_added);
int ret;
return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
buffer, bufsize);
ret = __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
if (ret > 0)
return ret;
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
return __cmdline_find_option(builtin_cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
return ret;
}

View File

@ -88,12 +88,14 @@ SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
#ifndef CONFIG_X86_64
xor %ecx,%ecx
#endif
check_range size=8
ASM_STAC
#ifdef CONFIG_X86_64
UACCESS movq (%_ASM_AX),%rdx
#else
xor %ecx,%ecx
UACCESS movl (%_ASM_AX),%edx
UACCESS movl 4(%_ASM_AX),%ecx
#endif

View File

@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
*/
*target_pmd = *pmd;
addr += PMD_SIZE;
addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
addr += PAGE_SIZE;
addr = round_up(addr + 1, PAGE_SIZE);
continue;
}
@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
/* Clone the PTE */
*target_pte = *pte;
addr += PAGE_SIZE;
addr = round_up(addr + 1, PAGE_SIZE);
} else {
BUG();
@ -496,7 +496,7 @@ static void pti_clone_entry_text(void)
{
pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end,
PTI_CLONE_PMD);
PTI_LEVEL_KERNEL_IMAGE);
}
/*

View File

@ -413,6 +413,7 @@ config BT_ATH3K
config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver"
depends on MMC
depends on USB || !BT_HCIBTUSB_MTK
select BT_MTK
help
MediaTek Bluetooth HCI SDIO driver.
@ -425,6 +426,7 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
depends on SERIAL_DEV_BUS
depends on USB || !BT_HCIBTUSB_MTK
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.

View File

@ -3085,6 +3085,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
btintel_set_dsm_reset_method(hdev, &ver_tlv);
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
if (err)
goto exit_error;
btintel_register_devcoredump_support(hdev);
btintel_print_fseq_info(hdev);
break;

View File

@ -437,6 +437,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
#if IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK)
static void btmtk_usb_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@ -1262,7 +1263,8 @@ int btmtk_usb_suspend(struct hci_dev *hdev)
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
/* Stop urb anchor for iso data transmission */
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
return 0;
}
@ -1487,6 +1489,7 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
return 0;
}
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
#endif
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");

View File

@ -18,6 +18,7 @@ config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller"
depends on RISCV
depends on ARCH_STARFIVE
depends on 64BIT
select RISCV_DMA_NONCOHERENT
select RISCV_NONSTANDARD_CACHE_OPS
help

View File

@ -45,7 +45,6 @@
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)

View File

@ -27,7 +27,8 @@ cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
$(call cc-option,-mno-single-pic-base) \
$(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
$(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_LOONGARCH) += -fpie
@ -57,6 +58,10 @@ KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
# disable LTO
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
# The .data section would be renamed to .data.efistub, therefore, remove
# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \

View File

@ -268,6 +268,7 @@ config DRM_EXEC
config DRM_GPUVM
tristate
depends on DRM
select DRM_EXEC
help
GPU-VM representation providing helpers to manage a GPUs virtual
address space

View File

@ -1778,7 +1778,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
int r;
int i, r;
addr /= AMDGPU_GPU_PAGE_SIZE;
@ -1793,13 +1793,13 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
}
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
for (i = 0; i < (*bo)->placement.num_placement; i++)
(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}

View File

@ -103,7 +103,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
if (!amdgpu_mes_log_enable)
return 0;
r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@ -113,7 +113,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
return r;
}
memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
return 0;
@ -1573,7 +1573,7 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
mem, adev->mes.event_log_size, false);
return 0;
}

View File

@ -52,7 +52,6 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;
@ -135,8 +134,9 @@ struct amdgpu_mes {
unsigned long *doorbell_bitmap;
/* MES event log buffer */
struct amdgpu_bo *event_log_gpu_obj;
uint64_t event_log_gpu_addr;
uint32_t event_log_size;
struct amdgpu_bo *event_log_gpu_obj;
uint64_t event_log_gpu_addr;
void *event_log_cpu_addr;
/* ip specific functions */

View File

@ -1163,6 +1163,8 @@ static int mes_v11_0_sw_init(void *handle)
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
r = amdgpu_mes_init(adev);
if (r)
return r;

View File

@ -551,8 +551,10 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.oversubscription_timer = 50;
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
mes_set_hw_res_pkt.enable_mes_event_int_logging = 0;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
}
return mes_v12_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
@ -1237,6 +1239,8 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
r = amdgpu_mes_init(adev);
if (r)
return r;

View File

@ -35,8 +35,10 @@
#include "dc_stream_priv.h"
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#define MAX(x, y) ((x > y) ? x : y)
#endif
/*******************************************************************************
* Private functions

View File

@ -25,7 +25,9 @@
#include "hdcp.h"
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
#define HDCP_MAX_AUX_TRANSACTION_SIZE 16

View File

@ -28,6 +28,9 @@
#define MES_API_VERSION 1
/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000
/* Driver submits one API(cmd) as a single Frame and this command size is same
* for all API to ease the debugging and parsing of ring buffer.
*/

View File

@ -28,6 +28,9 @@
#define MES_API_VERSION 0x14
/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG_12 */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0xC000
/* Driver submits one API(cmd) as a single Frame and this command size is same for all API
* to ease the debugging and parsing of ring buffer.
*/

View File

@ -618,7 +618,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int r = 0;
if (!pp_funcs || !pp_funcs->load_firmware || adev->flags & AMD_IS_APU)
if (!pp_funcs || !pp_funcs->load_firmware ||
(is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
return 0;
mutex_lock(&adev->pm.mutex);

View File

@ -22,12 +22,18 @@
*/
#include <asm/div64.h>
#define SHIFT_AMOUNT 16 /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
enum ppevvmath_constants {
/* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
SHIFT_AMOUNT = 16,
#define PRECISION 5 /* Change this value to change the number of decimal places in the final output - 5 is a good default */
/* Change this value to change the number of decimal places in the final output - 5 is a good default */
PRECISION = 5,
#define SHIFTED_2 (2 << SHIFT_AMOUNT)
#define MAX (1 << (SHIFT_AMOUNT - 1)) - 1 /* 32767 - Might change in the future */
SHIFTED_2 = (2 << SHIFT_AMOUNT),
/* 32767 - Might change in the future */
MAX = (1 << (SHIFT_AMOUNT - 1)) - 1,
};
/* -------------------------------------------------------------------------------
* NEW TYPE - fINT

View File

@ -66,6 +66,7 @@
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
#define DEBUGSMC_MSG_Mode1Reset 2
#define LINK_SPEED_MAX 3
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
@ -221,7 +222,6 @@ static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COU
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
};
#if 0
static const uint8_t smu_v14_0_2_throttler_map[] = {
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
@ -241,7 +241,6 @@ static const uint8_t smu_v14_0_2_throttler_map[] = {
[THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
};
#endif
static int
smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
@ -1869,6 +1868,88 @@ static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
return ret;
}
static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_3 *gpu_metrics =
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetricsExternal_t metrics_ext;
SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
int ret = 0;
ret = smu_cmn_get_metrics_table(smu,
&metrics_ext,
true);
if (ret)
return ret;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
else
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
else
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
gpu_metrics->throttle_status =
smu_v14_0_2_get_throttler_status(metrics);
gpu_metrics->indep_throttle_status =
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
smu_v14_0_2_throttler_map);
gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
gpu_metrics->pcie_link_width = metrics->PcieWidth;
if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
else
gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_3);
}
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
@ -1905,6 +1986,7 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.enable_thermal_alert = smu_v14_0_enable_thermal_alert,
.disable_thermal_alert = smu_v14_0_disable_thermal_alert,
.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
.get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,

View File

@ -794,7 +794,7 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
uint64_t feature_mask;
int i, feature_index;
uint32_t count = 0;

View File

@ -158,7 +158,14 @@ void ast_dp_launch(struct drm_device *dev)
ASTDP_HOST_EDID_READ_DONE);
}
bool ast_dp_power_is_on(struct ast_device *ast)
{
u8 vgacre3;
vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
return !(vgacre3 & AST_DP_PHY_SLEEP);
}
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{

View File

@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
ast_enable_vga(ast->ioregs);
ast_open_key(ast->ioregs);
ast_enable_mmio(dev->dev, ast->ioregs);
ast_post_gpu(dev);
return drm_mode_config_helper_resume(dev);

View File

@ -472,6 +472,7 @@ void ast_init_3rdtx(struct drm_device *dev);
bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
void ast_dp_launch(struct drm_device *dev);
bool ast_dp_power_is_on(struct ast_device *ast);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);

View File

@ -28,6 +28,7 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/pci.h>
@ -1687,11 +1688,35 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
struct drm_connector_state *connector_state = connector->state;
bool is_active = false;
mutex_lock(&ast->modeset_lock);
if (connector_state && connector_state->crtc) {
struct drm_crtc_state *crtc_state = connector_state->crtc->state;
if (crtc_state && crtc_state->active)
is_active = true;
}
if (!is_active && !ast_dp_power_is_on(ast)) {
ast_dp_power_on_off(dev, true);
msleep(50);
}
if (ast_astdp_is_connected(ast))
return connector_status_connected;
return connector_status_disconnected;
status = connector_status_connected;
if (!is_active && status == connector_status_disconnected)
ast_dp_power_on_off(dev, false);
mutex_unlock(&ast->modeset_lock);
return status;
}
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {

View File

@ -1070,7 +1070,10 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
if (async_flip && prop != config->prop_fb_id) {
if (async_flip &&
prop != config->prop_fb_id &&
prop != config->prop_in_fence_fd &&
prop != config->prop_fb_damage_clips) {
ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);

View File

@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
err_drm_gem_vmap_unlocked:
drm_gem_unlock(gem);
return 0;
return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap_local);

View File

@ -624,6 +624,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height)
{
/*
* This function may be invoked by panic() to flush the frame
* buffer, where all CPUs except the panic CPU are stopped.
* During the following schedule_work(), the panic CPU needs
* the worker_pool lock, which might be held by a stopped CPU,
* causing schedule_work() and panic() to block. Return early on
* oops_in_progress to prevent this blocking.
*/
if (oops_in_progress)
return;
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
schedule_work(&helper->damage_work);

View File

@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* OrangePi Neo */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),

View File

@ -1658,7 +1658,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
}
static int
skl_ddi_calculate_wrpll(int clock /* in Hz */,
skl_ddi_calculate_wrpll(int clock,
int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
@ -1683,7 +1683,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
};
unsigned int dco, d, i;
unsigned int p0, p1, p2;
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
@ -1808,7 +1808,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
struct skl_wrpll_params wrpll_params = {};
int ret;
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
i915->display.dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;

View File

@ -251,7 +251,7 @@
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
PIPE_HDCP2_STREAM_STATUS(port))
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04

Some files were not shown because too many files have changed in this diff Show More