mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-12 08:09:56 +00:00
[PATCH] x86_64: Node local pda take 2 -- cpu_pda preparation
Helper patch to change cpu_pda users to use macros to access cpu_pda instead of the cpu_pda[] array. Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
05b3cbd8bb
commit
df79efde82
@ -70,13 +70,13 @@ skip:
|
|||||||
seq_printf(p, "NMI: ");
|
seq_printf(p, "NMI: ");
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for (j = 0; j < NR_CPUS; j++)
|
||||||
if (cpu_online(j))
|
if (cpu_online(j))
|
||||||
seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
|
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
seq_printf(p, "LOC: ");
|
seq_printf(p, "LOC: ");
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for (j = 0; j < NR_CPUS; j++)
|
||||||
if (cpu_online(j))
|
if (cpu_online(j))
|
||||||
seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
|
seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||||
|
@ -155,19 +155,19 @@ int __init check_nmi_watchdog (void)
|
|||||||
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
|
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
|
||||||
|
|
||||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||||
counts[cpu] = cpu_pda[cpu].__nmi_count;
|
counts[cpu] = cpu_pda(cpu)->__nmi_count;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
||||||
|
|
||||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
continue;
|
continue;
|
||||||
if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
|
if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
|
||||||
endflag = 1;
|
endflag = 1;
|
||||||
printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
|
printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
|
||||||
cpu,
|
cpu,
|
||||||
counts[cpu],
|
counts[cpu],
|
||||||
cpu_pda[cpu].__nmi_count);
|
cpu_pda(cpu)->__nmi_count);
|
||||||
nmi_active = 0;
|
nmi_active = 0;
|
||||||
lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
|
lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
|
||||||
nmi_perfctr_msr = 0;
|
nmi_perfctr_msr = 0;
|
||||||
|
@ -30,7 +30,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
|
|||||||
|
|
||||||
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
|
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
|
||||||
|
|
||||||
struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
|
struct x8664_pda _cpu_pda[NR_CPUS] __cacheline_aligned;
|
||||||
|
|
||||||
struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
|
struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
|
||||||
|
|
||||||
@ -110,18 +110,18 @@ void __init setup_per_cpu_areas(void)
|
|||||||
}
|
}
|
||||||
if (!ptr)
|
if (!ptr)
|
||||||
panic("Cannot allocate cpu data for CPU %d\n", i);
|
panic("Cannot allocate cpu data for CPU %d\n", i);
|
||||||
cpu_pda[i].data_offset = ptr - __per_cpu_start;
|
cpu_pda(i)->data_offset = ptr - __per_cpu_start;
|
||||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void pda_init(int cpu)
|
void pda_init(int cpu)
|
||||||
{
|
{
|
||||||
struct x8664_pda *pda = &cpu_pda[cpu];
|
struct x8664_pda *pda = cpu_pda(cpu);
|
||||||
|
|
||||||
/* Setup up data that may be needed in __get_free_pages early */
|
/* Setup up data that may be needed in __get_free_pages early */
|
||||||
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
|
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
|
||||||
wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
|
wrmsrl(MSR_GS_BASE, pda);
|
||||||
|
|
||||||
pda->cpunumber = cpu;
|
pda->cpunumber = cpu;
|
||||||
pda->irqcount = -1;
|
pda->irqcount = -1;
|
||||||
|
@ -792,7 +792,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
|
|||||||
|
|
||||||
do_rest:
|
do_rest:
|
||||||
|
|
||||||
cpu_pda[cpu].pcurrent = c_idle.idle;
|
cpu_pda(cpu)->pcurrent = c_idle.idle;
|
||||||
|
|
||||||
start_rip = setup_trampoline();
|
start_rip = setup_trampoline();
|
||||||
|
|
||||||
|
@ -70,7 +70,6 @@ asmlinkage void reserved(void);
|
|||||||
asmlinkage void alignment_check(void);
|
asmlinkage void alignment_check(void);
|
||||||
asmlinkage void machine_check(void);
|
asmlinkage void machine_check(void);
|
||||||
asmlinkage void spurious_interrupt_bug(void);
|
asmlinkage void spurious_interrupt_bug(void);
|
||||||
asmlinkage void call_debug(void);
|
|
||||||
|
|
||||||
struct notifier_block *die_chain;
|
struct notifier_block *die_chain;
|
||||||
static DEFINE_SPINLOCK(die_notifier_lock);
|
static DEFINE_SPINLOCK(die_notifier_lock);
|
||||||
@ -139,7 +138,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
|
|||||||
switch (k + 1) {
|
switch (k + 1) {
|
||||||
#if DEBUG_STKSZ > EXCEPTION_STKSZ
|
#if DEBUG_STKSZ > EXCEPTION_STKSZ
|
||||||
case DEBUG_STACK:
|
case DEBUG_STACK:
|
||||||
end = cpu_pda[cpu].debugstack + DEBUG_STKSZ;
|
end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
@ -186,7 +185,7 @@ void show_trace(unsigned long *stack)
|
|||||||
{
|
{
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
const unsigned cpu = safe_smp_processor_id();
|
const unsigned cpu = safe_smp_processor_id();
|
||||||
unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr;
|
unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
|
||||||
int i;
|
int i;
|
||||||
unsigned used = 0;
|
unsigned used = 0;
|
||||||
|
|
||||||
@ -254,8 +253,8 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
|
|||||||
unsigned long *stack;
|
unsigned long *stack;
|
||||||
int i;
|
int i;
|
||||||
const int cpu = safe_smp_processor_id();
|
const int cpu = safe_smp_processor_id();
|
||||||
unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
|
unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
|
||||||
unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);
|
unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
|
||||||
|
|
||||||
// debugging aid: "show_stack(NULL, NULL);" prints the
|
// debugging aid: "show_stack(NULL, NULL);" prints the
|
||||||
// back trace for this cpu.
|
// back trace for this cpu.
|
||||||
@ -303,7 +302,7 @@ void show_registers(struct pt_regs *regs)
|
|||||||
int in_kernel = !user_mode(regs);
|
int in_kernel = !user_mode(regs);
|
||||||
unsigned long rsp;
|
unsigned long rsp;
|
||||||
const int cpu = safe_smp_processor_id();
|
const int cpu = safe_smp_processor_id();
|
||||||
struct task_struct *cur = cpu_pda[cpu].pcurrent;
|
struct task_struct *cur = cpu_pda(cpu)->pcurrent;
|
||||||
|
|
||||||
rsp = regs->rsp;
|
rsp = regs->rsp;
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ EXPORT_SYMBOL(pci_mem_start);
|
|||||||
EXPORT_SYMBOL(copy_page);
|
EXPORT_SYMBOL(copy_page);
|
||||||
EXPORT_SYMBOL(clear_page);
|
EXPORT_SYMBOL(clear_page);
|
||||||
|
|
||||||
EXPORT_SYMBOL(cpu_pda);
|
EXPORT_SYMBOL(_cpu_pda);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
EXPORT_SYMBOL(cpu_data);
|
EXPORT_SYMBOL(cpu_data);
|
||||||
EXPORT_SYMBOL(__write_lock_failed);
|
EXPORT_SYMBOL(__write_lock_failed);
|
||||||
|
@ -272,7 +272,7 @@ __cpuinit void numa_add_cpu(int cpu)
|
|||||||
|
|
||||||
void __cpuinit numa_set_node(int cpu, int node)
|
void __cpuinit numa_set_node(int cpu, int node)
|
||||||
{
|
{
|
||||||
cpu_pda[cpu].nodenumber = node;
|
cpu_pda(cpu)->nodenumber = node;
|
||||||
cpu_to_node[cpu] = node;
|
cpu_to_node[cpu] = node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,9 @@ struct x8664_pda {
|
|||||||
unsigned apic_timer_irqs;
|
unsigned apic_timer_irqs;
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
extern struct x8664_pda cpu_pda[];
|
extern struct x8664_pda _cpu_pda[];
|
||||||
|
|
||||||
|
#define cpu_pda(i) (&_cpu_pda[i])
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is no fast way to get the base address of the PDA, all the accesses
|
* There is no fast way to get the base address of the PDA, all the accesses
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
#include <asm/pda.h>
|
#include <asm/pda.h>
|
||||||
|
|
||||||
#define __per_cpu_offset(cpu) (cpu_pda[cpu].data_offset)
|
#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
|
||||||
#define __my_cpu_offset() read_pda(data_offset)
|
#define __my_cpu_offset() read_pda(data_offset)
|
||||||
|
|
||||||
/* Separate out the type, so (int[3], foo) works. */
|
/* Separate out the type, so (int[3], foo) works. */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user