mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
Misc cleanups, including a large series from Thomas Gleixner to
cure Sparse warnings. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmXvAFQRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hkDRAAwASVCQ88kiGqNQtHibXlK54mAFGsc0xv T8OPds15DUzoLg/y8lw0X0DHly6MdGXVmygybejNIw2BN4lhLjQ7f4Ria7rv7LDy FcI1jfvysEMyYRFHGRefb/GBFzuEfKoROwf+QylGmKz0ZK674gNMngsI9pwOBdbe wElq3IkHoNuTUfH9QA4BvqGam1n122nvVTop3g0PMHWzx9ky8hd/BEUjXFZhfINL zZk3fwUbER2QYbhHt+BN2GRbdf2BrKvqTkXpKxyXTdnpiqAo0CzBGKerZ62H82qG n737Nib1lrsfM5yDHySnau02aamRXaGvCJUd6gpac1ZmNpZMWhEOT/0Tr/Nj5ztF lUAvKqMZn/CwwQky1/XxD0LHegnve0G+syqQt/7x7o1ELdiwTzOWMCx016UeodzB yyHf3Xx9J8nt3snlrlZBaGEfegg9ePLu5Vir7iXjg3vrloUW8A+GZM62NVxF4HVV QWF80BfWf8zbLQ/OS1382t1shaioIe5pEXzIjcnyVIZCiiP2/5kP2O6P4XVbwVlo Ca5eEt8U1rtsLUZaCzI2ZRTQf/8SLMQWyaV+ZmkVwcVdFoARC31EgdE5wYYoZOf6 7Vl+rXd+rZCuTWk0ZgznCZEm75aaqukaQCBa2V8hIVociLFVzhg/Tjedv7s0CspA hNfxdN1LDZc= =0eJ7 -----END PGP SIGNATURE----- Merge tag 'x86-cleanups-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 cleanups from Ingo Molnar: "Misc cleanups, including a large series from Thomas Gleixner to cure sparse warnings" * tag 'x86-cleanups-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/nmi: Drop unused declaration of proc_nmi_enabled() x86/callthunks: Use EXPORT_PER_CPU_SYMBOL_GPL() for per CPU variables x86/cpu: Provide a declaration for itlb_multihit_kvm_mitigation x86/cpu: Use EXPORT_PER_CPU_SYMBOL_GPL() for x86_spec_ctrl_current x86/uaccess: Add missing __force to casts in __access_ok() and valid_user_address() x86/percpu: Cure per CPU madness on UP smp: Consolidate smp_prepare_boot_cpu() x86/msr: Add missing __percpu annotations x86/msr: Prepare for including <linux/percpu.h> into <asm/msr.h> perf/x86/amd/uncore: Fix __percpu annotation x86/nmi: Remove an unnecessary IS_ENABLED(CONFIG_SMP) x86/apm_32: Remove dead function apm_get_battery_status() x86/insn-eval: Fix function param name in get_eff_addr_sib()
This commit is contained in:
commit
fcc196579a
@ -467,11 +467,6 @@ smp_prepare_cpus(unsigned int max_cpus)
|
||||
smp_num_cpus = smp_num_probed;
|
||||
}
|
||||
|
||||
void
|
||||
smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
int
|
||||
__cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
|
@ -39,11 +39,6 @@ struct plat_smp_ops __weak plat_smp_ops;
|
||||
/* XXX: per cpu ? Only needed once in early secondary boot */
|
||||
struct task_struct *secondary_idle_tsk;
|
||||
|
||||
/* Called from start_kernel */
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
|
||||
{
|
||||
unsigned long dt_root = of_get_flat_dt_root();
|
||||
|
@ -152,10 +152,6 @@ void arch_irq_work_raise(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
}
|
||||
|
@ -114,10 +114,6 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* interrupts should already be disabled from the VM
|
||||
* SP should already be correct; need to set THREADINFO_REG
|
||||
|
@ -57,10 +57,6 @@ static void boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
spin_unlock(&boot_lock);
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init smp_init_cpus(void)
|
||||
{
|
||||
struct device_node *cpu;
|
||||
|
@ -42,10 +42,6 @@
|
||||
|
||||
static DECLARE_COMPLETION(cpu_running);
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
int cpuid;
|
||||
|
@ -1206,10 +1206,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
}
|
||||
|
||||
void smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init smp_setup_processor_id(void)
|
||||
{
|
||||
if (tlb_type == spitfire)
|
||||
|
@ -71,7 +71,7 @@ union amd_uncore_info {
|
||||
};
|
||||
|
||||
struct amd_uncore {
|
||||
union amd_uncore_info * __percpu info;
|
||||
union amd_uncore_info __percpu *info;
|
||||
struct amd_uncore_pmu *pmus;
|
||||
unsigned int num_pmus;
|
||||
bool init_done;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/intel_pt.h>
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/sched/clock.h>
|
||||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/insn.h>
|
||||
|
@ -5,7 +5,9 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <uapi/asm/debugreg.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
DECLARE_PER_CPU(unsigned long, cpu_dr7);
|
||||
|
||||
@ -159,4 +161,26 @@ static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long get_debugctlmsr(void)
|
||||
{
|
||||
unsigned long debugctlmsr = 0;
|
||||
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return 0;
|
||||
#endif
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||
|
||||
return debugctlmsr;
|
||||
}
|
||||
|
||||
static inline void update_debugctlmsr(unsigned long debugctlmsr)
|
||||
{
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return;
|
||||
#endif
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_DEBUGREG_H */
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
/*
|
||||
* Read/write a task's FSBASE or GSBASE. This returns the value that
|
||||
|
@ -12,11 +12,13 @@
|
||||
#include <uapi/asm/msr.h>
|
||||
#include <asm/shared/msr.h>
|
||||
|
||||
#include <linux/percpu.h>
|
||||
|
||||
struct msr_info {
|
||||
u32 msr_no;
|
||||
struct msr reg;
|
||||
struct msr *msrs;
|
||||
int err;
|
||||
u32 msr_no;
|
||||
struct msr reg;
|
||||
struct msr __percpu *msrs;
|
||||
int err;
|
||||
};
|
||||
|
||||
struct msr_regs_info {
|
||||
@ -323,8 +325,8 @@ static inline int wrmsrl_safe(u32 msr, u64 val)
|
||||
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
|
||||
}
|
||||
|
||||
struct msr *msrs_alloc(void);
|
||||
void msrs_free(struct msr *msrs);
|
||||
struct msr __percpu *msrs_alloc(void);
|
||||
void msrs_free(struct msr __percpu *msrs);
|
||||
int msr_set_bit(u32 msr, u8 bit);
|
||||
int msr_clear_bit(u32 msr, u8 bit);
|
||||
|
||||
@ -333,8 +335,8 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
|
||||
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
|
||||
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
|
||||
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
|
||||
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
@ -363,14 +365,14 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
return 0;
|
||||
}
|
||||
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
||||
struct msr *msrs)
|
||||
struct msr __percpu *msrs)
|
||||
{
|
||||
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
|
||||
rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h));
|
||||
}
|
||||
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
||||
struct msr *msrs)
|
||||
struct msr __percpu *msrs)
|
||||
{
|
||||
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
|
||||
wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h));
|
||||
}
|
||||
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
|
||||
u32 *l, u32 *h)
|
||||
|
@ -14,9 +14,6 @@ extern void release_perfctr_nmi(unsigned int);
|
||||
extern int reserve_evntsel_nmi(unsigned int);
|
||||
extern void release_evntsel_nmi(unsigned int);
|
||||
|
||||
struct ctl_table;
|
||||
extern int proc_nmi_enabled(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int unknown_nmi_panic;
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
@ -20,7 +20,6 @@ struct vm86;
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/desc_defs.h>
|
||||
#include <asm/nops.h>
|
||||
#include <asm/special_insns.h>
|
||||
@ -185,13 +184,8 @@ extern struct cpuinfo_x86 new_cpu_data;
|
||||
extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
|
||||
extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
|
||||
#define cpu_data(cpu) per_cpu(cpu_info, cpu)
|
||||
#else
|
||||
#define cpu_info boot_cpu_data
|
||||
#define cpu_data(cpu) boot_cpu_data
|
||||
#endif
|
||||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
@ -575,28 +569,6 @@ extern void cpu_init(void);
|
||||
extern void cpu_init_exception_handling(void);
|
||||
extern void cr4_init(void);
|
||||
|
||||
static inline unsigned long get_debugctlmsr(void)
|
||||
{
|
||||
unsigned long debugctlmsr = 0;
|
||||
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return 0;
|
||||
#endif
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||
|
||||
return debugctlmsr;
|
||||
}
|
||||
|
||||
static inline void update_debugctlmsr(unsigned long debugctlmsr)
|
||||
{
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return;
|
||||
#endif
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||
}
|
||||
|
||||
extern void set_task_blockstep(struct task_struct *task, bool on);
|
||||
|
||||
/* Boot loader type from the setup header: */
|
||||
|
@ -56,11 +56,6 @@ static inline void stop_other_cpus(void)
|
||||
smp_ops.stop_other_cpus(1);
|
||||
}
|
||||
|
||||
static inline void smp_prepare_boot_cpu(void)
|
||||
{
|
||||
smp_ops.smp_prepare_boot_cpu();
|
||||
}
|
||||
|
||||
static inline void smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
smp_ops.smp_prepare_cpus(max_cpus);
|
||||
|
@ -96,4 +96,6 @@ static inline void speculative_store_bypass_ht_init(void) { }
|
||||
extern void speculation_ctrl_update(unsigned long tif);
|
||||
extern void speculation_ctrl_update_current(void);
|
||||
|
||||
extern bool itlb_multihit_kvm_mitigation;
|
||||
|
||||
#endif
|
||||
|
@ -2,11 +2,11 @@
|
||||
#ifndef _ASM_X86_SPECIAL_INSNS_H
|
||||
#define _ASM_X86_SPECIAL_INSNS_H
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/nops.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
|
@ -5,8 +5,9 @@
|
||||
#ifndef _ASM_X86_TSC_H
|
||||
#define _ASM_X86_TSC_H
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
/*
|
||||
* Standard way to access the cycle counter.
|
||||
|
@ -54,7 +54,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
|
||||
* half and a user half. When cast to a signed type, user pointers
|
||||
* are positive and kernel pointers are negative.
|
||||
*/
|
||||
#define valid_user_address(x) ((long)(x) >= 0)
|
||||
#define valid_user_address(x) ((__force long)(x) >= 0)
|
||||
|
||||
/*
|
||||
* User pointers can have tag bits on x86-64. This scheme tolerates
|
||||
@ -87,8 +87,9 @@ static inline bool __access_ok(const void __user *ptr, unsigned long size)
|
||||
if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
|
||||
return valid_user_address(ptr);
|
||||
} else {
|
||||
unsigned long sum = size + (unsigned long)ptr;
|
||||
return valid_user_address(sum) && sum >= (unsigned long)ptr;
|
||||
unsigned long sum = size + (__force unsigned long)ptr;
|
||||
|
||||
return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
|
||||
}
|
||||
}
|
||||
#define __access_ok __access_ok
|
||||
|
@ -1055,35 +1055,6 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
|
||||
return APM_SUCCESS;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int apm_get_battery_status(u_short which, u_short *status,
|
||||
u_short *bat, u_short *life, u_short *nbat)
|
||||
{
|
||||
u32 eax;
|
||||
u32 ebx;
|
||||
u32 ecx;
|
||||
u32 edx;
|
||||
u32 esi;
|
||||
|
||||
if (apm_info.connection_version < 0x0102) {
|
||||
/* pretend we only have one battery. */
|
||||
if (which != 1)
|
||||
return APM_BAD_DEVICE;
|
||||
*nbat = 1;
|
||||
return apm_get_power_status(status, bat, life);
|
||||
}
|
||||
|
||||
if (apm_bios_call(APM_FUNC_GET_STATUS, (0x8000 | (which)), 0, &eax,
|
||||
&ebx, &ecx, &edx, &esi))
|
||||
return (eax >> 8) & 0xff;
|
||||
*status = ebx;
|
||||
*bat = ecx;
|
||||
*life = edx;
|
||||
*nbat = esi;
|
||||
return APM_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* apm_engage_power_management - enable PM on a device
|
||||
* @device: identity of device
|
||||
|
@ -42,8 +42,8 @@ DEFINE_PER_CPU(u64, __x86_call_count);
|
||||
DEFINE_PER_CPU(u64, __x86_ret_count);
|
||||
DEFINE_PER_CPU(u64, __x86_stuffs_count);
|
||||
DEFINE_PER_CPU(u64, __x86_ctxsw_count);
|
||||
EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
|
||||
EXPORT_SYMBOL_GPL(__x86_call_count);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
|
||||
#endif
|
||||
|
||||
extern s32 __call_sites[], __call_sites_end[];
|
||||
|
@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
|
||||
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
|
||||
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
|
||||
EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
|
@ -71,6 +71,9 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
||||
|
||||
u32 elf_hwcap2 __read_mostly;
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
|
@ -7,6 +7,8 @@
|
||||
* Author:
|
||||
* Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
|
||||
*/
|
||||
#include <linux/bug.h>
|
||||
#include <linux/limits.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/intel_pconfig.h>
|
||||
|
@ -6,6 +6,7 @@
|
||||
* Authors: Fenghua Yu <fenghua.yu@intel.com>,
|
||||
* H. Peter Anvin <hpa@linux.intel.com>
|
||||
*/
|
||||
#include <linux/printk.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/archrandom.h>
|
||||
|
@ -2,6 +2,8 @@
|
||||
/*
|
||||
* x86 FPU bug checks:
|
||||
*/
|
||||
#include <linux/printk.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
|
@ -503,7 +503,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
|
||||
if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
|
||||
raw_atomic_long_inc(&nsp->idt_calls);
|
||||
|
||||
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) {
|
||||
if (arch_cpu_is_offline(smp_processor_id())) {
|
||||
if (microcode_nmi_handler_enabled())
|
||||
microcode_offline_nmi_handler();
|
||||
return;
|
||||
|
@ -1206,6 +1206,16 @@ void __init i386_reserve_resources(void)
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
*c = boot_cpu_data;
|
||||
c->initialized = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct notifier_block kernel_offset_notifier = {
|
||||
.notifier_call = dump_kernel_offset
|
||||
};
|
||||
|
@ -101,10 +101,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
||||
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_die_map);
|
||||
|
||||
/* Per CPU bogomips and other parameters */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
||||
|
||||
/* CPUs which are the primary SMT threads */
|
||||
struct cpumask __cpu_primary_thread_mask __read_mostly;
|
||||
|
||||
@ -1078,6 +1074,11 @@ void __init smp_prepare_cpus_common(void)
|
||||
set_cpu_sibling_map(0);
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
smp_ops.smp_prepare_boot_cpu();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Establish whether parallel bringup can be supported. */
|
||||
bool __init arch_cpuhp_init_parallel_bringup(void)
|
||||
|
@ -6,7 +6,9 @@
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/ptrace.h>
|
||||
|
||||
#include <asm/desc.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
|
||||
|
@ -53,12 +53,11 @@
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/vmx.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
extern bool itlb_multihit_kvm_mitigation;
|
||||
|
||||
static bool nx_hugepage_mitigation_hard_disabled;
|
||||
|
||||
int __read_mostly nx_huge_pages = -1;
|
||||
|
@ -1129,15 +1129,15 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
|
||||
* get_eff_addr_sib() - Obtain referenced effective address via SIB
|
||||
* @insn: Instruction. Must be valid.
|
||||
* @regs: Register values as seen when entering kernel mode
|
||||
* @regoff: Obtained operand offset, in pt_regs, associated with segment
|
||||
* @base_offset: Obtained operand offset, in pt_regs, associated with segment
|
||||
* @eff_addr: Obtained effective address
|
||||
*
|
||||
* Obtain the effective address referenced by the SIB byte of @insn. After
|
||||
* identifying the registers involved in the indexed, register-indirect memory
|
||||
* reference, its value is obtained from the operands in @regs. The computed
|
||||
* address is stored @eff_addr. Also, the register operand that indicates the
|
||||
* associated segment is stored in @regoff, this parameter can later be used to
|
||||
* determine such segment.
|
||||
* associated segment is stored in @base_offset; this parameter can later be
|
||||
* used to determine such segment.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
|
@ -9,10 +9,9 @@ static void __rdmsr_on_cpu(void *info)
|
||||
{
|
||||
struct msr_info *rv = info;
|
||||
struct msr *reg;
|
||||
int this_cpu = raw_smp_processor_id();
|
||||
|
||||
if (rv->msrs)
|
||||
reg = per_cpu_ptr(rv->msrs, this_cpu);
|
||||
reg = this_cpu_ptr(rv->msrs);
|
||||
else
|
||||
reg = &rv->reg;
|
||||
|
||||
@ -23,10 +22,9 @@ static void __wrmsr_on_cpu(void *info)
|
||||
{
|
||||
struct msr_info *rv = info;
|
||||
struct msr *reg;
|
||||
int this_cpu = raw_smp_processor_id();
|
||||
|
||||
if (rv->msrs)
|
||||
reg = per_cpu_ptr(rv->msrs, this_cpu);
|
||||
reg = this_cpu_ptr(rv->msrs);
|
||||
else
|
||||
reg = &rv->reg;
|
||||
|
||||
@ -97,7 +95,7 @@ int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
EXPORT_SYMBOL(wrmsrl_on_cpu);
|
||||
|
||||
static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
|
||||
struct msr *msrs,
|
||||
struct msr __percpu *msrs,
|
||||
void (*msr_func) (void *info))
|
||||
{
|
||||
struct msr_info rv;
|
||||
@ -124,7 +122,7 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
|
||||
* @msrs: array of MSR values
|
||||
*
|
||||
*/
|
||||
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
|
||||
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
|
||||
{
|
||||
__rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
|
||||
}
|
||||
@ -138,7 +136,7 @@ EXPORT_SYMBOL(rdmsr_on_cpus);
|
||||
* @msrs: array of MSR values
|
||||
*
|
||||
*/
|
||||
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
|
||||
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
|
||||
{
|
||||
__rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
|
||||
}
|
||||
|
@ -6,9 +6,9 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <asm/msr-trace.h>
|
||||
|
||||
struct msr *msrs_alloc(void)
|
||||
struct msr __percpu *msrs_alloc(void)
|
||||
{
|
||||
struct msr *msrs = NULL;
|
||||
struct msr __percpu *msrs = NULL;
|
||||
|
||||
msrs = alloc_percpu(struct msr);
|
||||
if (!msrs) {
|
||||
@ -20,7 +20,7 @@ struct msr *msrs_alloc(void)
|
||||
}
|
||||
EXPORT_SYMBOL(msrs_alloc);
|
||||
|
||||
void msrs_free(struct msr *msrs)
|
||||
void msrs_free(struct msr __percpu *msrs)
|
||||
{
|
||||
free_percpu(msrs);
|
||||
}
|
||||
|
@ -105,6 +105,12 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Architecture specific boot CPU setup. Defined as empty weak function in
|
||||
* init/main.c. Architectures can override it.
|
||||
*/
|
||||
void smp_prepare_boot_cpu(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#include <linux/preempt.h>
|
||||
@ -171,12 +177,6 @@ void generic_smp_call_function_single_interrupt(void);
|
||||
#define generic_smp_call_function_interrupt \
|
||||
generic_smp_call_function_single_interrupt
|
||||
|
||||
/*
|
||||
* Mark the boot cpu "online" so that it can call console drivers in
|
||||
* printk() and can access its per-cpu storage.
|
||||
*/
|
||||
void smp_prepare_boot_cpu(void);
|
||||
|
||||
extern unsigned int setup_max_cpus;
|
||||
extern void __init setup_nr_cpu_ids(void);
|
||||
extern void __init smp_init(void);
|
||||
@ -203,7 +203,6 @@ static inline void up_smp_call_function(smp_call_func_t func, void *info)
|
||||
(up_smp_call_function(func, info))
|
||||
|
||||
static inline void smp_send_reschedule(int cpu) { }
|
||||
#define smp_prepare_boot_cpu() do {} while (0)
|
||||
#define smp_call_function_many(mask, func, info, wait) \
|
||||
(up_smp_call_function(func, info))
|
||||
static inline void call_function_init(void) { }
|
||||
|
@ -776,6 +776,10 @@ void __init __weak smp_setup_processor_id(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init __weak smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
# if THREAD_SIZE >= PAGE_SIZE
|
||||
void __init __weak thread_stack_cache_init(void)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user