mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-16 09:56:46 +00:00
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] fix ia64 kprobes compilation [IA64] move gcc_intrin.h from header-y to unifdef-y [IA64] workaround tiger ia64_sal_get_physical_id_info hang [IA64] move defconfig to arch/ia64/configs/ [IA64] Fix irq migration in multiple vector domain [IA64] signal(ia64_ia32): add a signal stack overflow check [IA64] signal(ia64): add a signal stack overflow check [IA64] CONFIG_SGI_SN2 - auto select NUMA and ACPI_NUMA
This commit is contained in:
commit
71ca44dac4
@ -156,6 +156,8 @@ config IA64_HP_ZX1_SWIOTLB
|
|||||||
|
|
||||||
config IA64_SGI_SN2
|
config IA64_SGI_SN2
|
||||||
bool "SGI-SN2"
|
bool "SGI-SN2"
|
||||||
|
select NUMA
|
||||||
|
select ACPI_NUMA
|
||||||
help
|
help
|
||||||
Selecting this option will optimize the kernel for use on sn2 based
|
Selecting this option will optimize the kernel for use on sn2 based
|
||||||
systems, but the resulting kernel binary will not run on other
|
systems, but the resulting kernel binary will not run on other
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
# Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com>
|
# Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com>
|
||||||
#
|
#
|
||||||
|
|
||||||
|
KBUILD_DEFCONFIG := generic_defconfig
|
||||||
|
|
||||||
NM := $(CROSS_COMPILE)nm -B
|
NM := $(CROSS_COMPILE)nm -B
|
||||||
READELF := $(CROSS_COMPILE)readelf
|
READELF := $(CROSS_COMPILE)readelf
|
||||||
|
|
||||||
|
@ -766,8 +766,19 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
|
|||||||
|
|
||||||
/* This is the X/Open sanctioned signal stack switching. */
|
/* This is the X/Open sanctioned signal stack switching. */
|
||||||
if (ka->sa.sa_flags & SA_ONSTACK) {
|
if (ka->sa.sa_flags & SA_ONSTACK) {
|
||||||
if (!on_sig_stack(esp))
|
int onstack = sas_ss_flags(esp);
|
||||||
|
|
||||||
|
if (onstack == 0)
|
||||||
esp = current->sas_ss_sp + current->sas_ss_size;
|
esp = current->sas_ss_sp + current->sas_ss_size;
|
||||||
|
else if (onstack == SS_ONSTACK) {
|
||||||
|
/*
|
||||||
|
* If we are on the alternate signal stack and would
|
||||||
|
* overflow it, don't. Return an always-bogus address
|
||||||
|
* instead so we will die with SIGSEGV.
|
||||||
|
*/
|
||||||
|
if (!likely(on_sig_stack(esp - frame_size)))
|
||||||
|
return (void __user *) -1L;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/* Legacy stack switching not supported */
|
/* Legacy stack switching not supported */
|
||||||
|
|
||||||
|
@ -345,7 +345,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
|
|||||||
if (cpus_empty(mask))
|
if (cpus_empty(mask))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (reassign_irq_vector(irq, first_cpu(mask)))
|
if (irq_prepare_move(irq, first_cpu(mask)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dest = cpu_physical_id(first_cpu(mask));
|
dest = cpu_physical_id(first_cpu(mask));
|
||||||
@ -397,6 +397,7 @@ iosapic_end_level_irq (unsigned int irq)
|
|||||||
struct iosapic_rte_info *rte;
|
struct iosapic_rte_info *rte;
|
||||||
int do_unmask_irq = 0;
|
int do_unmask_irq = 0;
|
||||||
|
|
||||||
|
irq_complete_move(irq);
|
||||||
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
|
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
|
||||||
do_unmask_irq = 1;
|
do_unmask_irq = 1;
|
||||||
mask_irq(irq);
|
mask_irq(irq);
|
||||||
@ -450,6 +451,7 @@ iosapic_ack_edge_irq (unsigned int irq)
|
|||||||
{
|
{
|
||||||
irq_desc_t *idesc = irq_desc + irq;
|
irq_desc_t *idesc = irq_desc + irq;
|
||||||
|
|
||||||
|
irq_complete_move(irq);
|
||||||
move_native_irq(irq);
|
move_native_irq(irq);
|
||||||
/*
|
/*
|
||||||
* Once we have recorded IRQ_PENDING already, we can mask the
|
* Once we have recorded IRQ_PENDING already, we can mask the
|
||||||
|
@ -260,6 +260,8 @@ void __setup_vector_irq(int cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
|
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
|
||||||
|
#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
|
||||||
|
|
||||||
static enum vector_domain_type {
|
static enum vector_domain_type {
|
||||||
VECTOR_DOMAIN_NONE,
|
VECTOR_DOMAIN_NONE,
|
||||||
VECTOR_DOMAIN_PERCPU
|
VECTOR_DOMAIN_PERCPU
|
||||||
@ -272,6 +274,101 @@ static cpumask_t vector_allocation_domain(int cpu)
|
|||||||
return CPU_MASK_ALL;
|
return CPU_MASK_ALL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __irq_prepare_move(int irq, int cpu)
|
||||||
|
{
|
||||||
|
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||||
|
int vector;
|
||||||
|
cpumask_t domain;
|
||||||
|
|
||||||
|
if (cfg->move_in_progress || cfg->move_cleanup_count)
|
||||||
|
return -EBUSY;
|
||||||
|
if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
|
||||||
|
return -EINVAL;
|
||||||
|
if (cpu_isset(cpu, cfg->domain))
|
||||||
|
return 0;
|
||||||
|
domain = vector_allocation_domain(cpu);
|
||||||
|
vector = find_unassigned_vector(domain);
|
||||||
|
if (vector < 0)
|
||||||
|
return -ENOSPC;
|
||||||
|
cfg->move_in_progress = 1;
|
||||||
|
cfg->old_domain = cfg->domain;
|
||||||
|
cfg->vector = IRQ_VECTOR_UNASSIGNED;
|
||||||
|
cfg->domain = CPU_MASK_NONE;
|
||||||
|
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int irq_prepare_move(int irq, int cpu)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&vector_lock, flags);
|
||||||
|
ret = __irq_prepare_move(irq, cpu);
|
||||||
|
spin_unlock_irqrestore(&vector_lock, flags);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void irq_complete_move(unsigned irq)
|
||||||
|
{
|
||||||
|
struct irq_cfg *cfg = &irq_cfg[irq];
|
||||||
|
cpumask_t cleanup_mask;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (likely(!cfg->move_in_progress))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
||||||
|
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||||
|
for_each_cpu_mask(i, cleanup_mask)
|
||||||
|
platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
|
||||||
|
cfg->move_in_progress = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
|
||||||
|
{
|
||||||
|
int me = smp_processor_id();
|
||||||
|
ia64_vector vector;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
for (vector = IA64_FIRST_DEVICE_VECTOR;
|
||||||
|
vector < IA64_LAST_DEVICE_VECTOR; vector++) {
|
||||||
|
int irq;
|
||||||
|
struct irq_desc *desc;
|
||||||
|
struct irq_cfg *cfg;
|
||||||
|
irq = __get_cpu_var(vector_irq)[vector];
|
||||||
|
if (irq < 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
desc = irq_desc + irq;
|
||||||
|
cfg = irq_cfg + irq;
|
||||||
|
spin_lock(&desc->lock);
|
||||||
|
if (!cfg->move_cleanup_count)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
if (!cpu_isset(me, cfg->old_domain))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&vector_lock, flags);
|
||||||
|
__get_cpu_var(vector_irq)[vector] = -1;
|
||||||
|
cpu_clear(me, vector_table[vector]);
|
||||||
|
spin_unlock_irqrestore(&vector_lock, flags);
|
||||||
|
cfg->move_cleanup_count--;
|
||||||
|
unlock:
|
||||||
|
spin_unlock(&desc->lock);
|
||||||
|
}
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct irqaction irq_move_irqaction = {
|
||||||
|
.handler = smp_irq_move_cleanup_interrupt,
|
||||||
|
.flags = IRQF_DISABLED,
|
||||||
|
.name = "irq_move"
|
||||||
|
};
|
||||||
|
|
||||||
static int __init parse_vector_domain(char *arg)
|
static int __init parse_vector_domain(char *arg)
|
||||||
{
|
{
|
||||||
if (!arg)
|
if (!arg)
|
||||||
@ -303,36 +400,6 @@ void destroy_and_reserve_irq(unsigned int irq)
|
|||||||
spin_unlock_irqrestore(&vector_lock, flags);
|
spin_unlock_irqrestore(&vector_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __reassign_irq_vector(int irq, int cpu)
|
|
||||||
{
|
|
||||||
struct irq_cfg *cfg = &irq_cfg[irq];
|
|
||||||
int vector;
|
|
||||||
cpumask_t domain;
|
|
||||||
|
|
||||||
if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
|
|
||||||
return -EINVAL;
|
|
||||||
if (cpu_isset(cpu, cfg->domain))
|
|
||||||
return 0;
|
|
||||||
domain = vector_allocation_domain(cpu);
|
|
||||||
vector = find_unassigned_vector(domain);
|
|
||||||
if (vector < 0)
|
|
||||||
return -ENOSPC;
|
|
||||||
__clear_irq_vector(irq);
|
|
||||||
BUG_ON(__bind_irq_vector(irq, vector, domain));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int reassign_irq_vector(int irq, int cpu)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&vector_lock, flags);
|
|
||||||
ret = __reassign_irq_vector(irq, cpu);
|
|
||||||
spin_unlock_irqrestore(&vector_lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Dynamic irq allocate and deallocation for MSI
|
* Dynamic irq allocate and deallocation for MSI
|
||||||
*/
|
*/
|
||||||
@ -578,6 +645,13 @@ init_IRQ (void)
|
|||||||
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
|
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
|
||||||
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
|
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
|
||||||
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
|
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
|
||||||
|
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
|
||||||
|
if (vector_domain_type != VECTOR_DOMAIN_NONE) {
|
||||||
|
BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
|
||||||
|
IA64_FIRST_DEVICE_VECTOR++;
|
||||||
|
register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PERFMON
|
#ifdef CONFIG_PERFMON
|
||||||
pfm_init_percpu();
|
pfm_init_percpu();
|
||||||
|
@ -1001,6 +1001,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ia64 does not need this */
|
||||||
|
void __kprobes jprobe_return(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||||
|
@ -57,7 +57,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
|
|||||||
if (!cpu_online(cpu))
|
if (!cpu_online(cpu))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (reassign_irq_vector(irq, cpu))
|
if (irq_prepare_move(irq, cpu))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
read_msi_msg(irq, &msg);
|
read_msi_msg(irq, &msg);
|
||||||
@ -119,6 +119,7 @@ void ia64_teardown_msi_irq(unsigned int irq)
|
|||||||
|
|
||||||
static void ia64_ack_msi_irq(unsigned int irq)
|
static void ia64_ack_msi_irq(unsigned int irq)
|
||||||
{
|
{
|
||||||
|
irq_complete_move(irq);
|
||||||
move_native_irq(irq);
|
move_native_irq(irq);
|
||||||
ia64_eoi();
|
ia64_eoi();
|
||||||
}
|
}
|
||||||
|
@ -109,6 +109,13 @@ check_versions (struct ia64_sal_systab *systab)
|
|||||||
sal_revision = SAL_VERSION_CODE(2, 8);
|
sal_revision = SAL_VERSION_CODE(2, 8);
|
||||||
sal_version = SAL_VERSION_CODE(0, 0);
|
sal_version = SAL_VERSION_CODE(0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9)))
|
||||||
|
/*
|
||||||
|
* SGI Altix has hard-coded version 2.9 in their prom
|
||||||
|
* but they actually implement 3.2, so let's fix it here.
|
||||||
|
*/
|
||||||
|
sal_revision = SAL_VERSION_CODE(3, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init
|
static void __init
|
||||||
|
@ -342,15 +342,33 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
|
|||||||
|
|
||||||
new_sp = scr->pt.r12;
|
new_sp = scr->pt.r12;
|
||||||
tramp_addr = (unsigned long) __kernel_sigtramp;
|
tramp_addr = (unsigned long) __kernel_sigtramp;
|
||||||
if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) {
|
if (ka->sa.sa_flags & SA_ONSTACK) {
|
||||||
new_sp = current->sas_ss_sp + current->sas_ss_size;
|
int onstack = sas_ss_flags(new_sp);
|
||||||
/*
|
|
||||||
* We need to check for the register stack being on the signal stack
|
if (onstack == 0) {
|
||||||
* separately, because it's switched separately (memory stack is switched
|
new_sp = current->sas_ss_sp + current->sas_ss_size;
|
||||||
* in the kernel, register stack is switched in the signal trampoline).
|
/*
|
||||||
*/
|
* We need to check for the register stack being on the
|
||||||
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
|
* signal stack separately, because it's switched
|
||||||
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
|
* separately (memory stack is switched in the kernel,
|
||||||
|
* register stack is switched in the signal trampoline).
|
||||||
|
*/
|
||||||
|
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
|
||||||
|
new_rbs = ALIGN(current->sas_ss_sp,
|
||||||
|
sizeof(long));
|
||||||
|
} else if (onstack == SS_ONSTACK) {
|
||||||
|
unsigned long check_sp;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are on the alternate signal stack and would
|
||||||
|
* overflow it, don't. Return an always-bogus address
|
||||||
|
* instead so we will die with SIGSEGV.
|
||||||
|
*/
|
||||||
|
check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
|
||||||
|
if (!likely(on_sig_stack(check_sp)))
|
||||||
|
return force_sigsegv_info(sig, (void __user *)
|
||||||
|
check_sp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
|
frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@ include include/asm-generic/Kbuild.asm
|
|||||||
header-y += break.h
|
header-y += break.h
|
||||||
header-y += fpu.h
|
header-y += fpu.h
|
||||||
header-y += fpswa.h
|
header-y += fpswa.h
|
||||||
header-y += gcc_intrin.h
|
|
||||||
header-y += ia64regs.h
|
header-y += ia64regs.h
|
||||||
header-y += intel_intrin.h
|
header-y += intel_intrin.h
|
||||||
header-y += intrinsics.h
|
header-y += intrinsics.h
|
||||||
@ -12,5 +11,6 @@ header-y += ptrace_offsets.h
|
|||||||
header-y += rse.h
|
header-y += rse.h
|
||||||
header-y += ucontext.h
|
header-y += ucontext.h
|
||||||
|
|
||||||
|
unifdef-y += gcc_intrin.h
|
||||||
unifdef-y += perfmon.h
|
unifdef-y += perfmon.h
|
||||||
unifdef-y += ustack.h
|
unifdef-y += ustack.h
|
||||||
|
@ -93,6 +93,9 @@ extern __u8 isa_irq_to_vector_map[16];
|
|||||||
struct irq_cfg {
|
struct irq_cfg {
|
||||||
ia64_vector vector;
|
ia64_vector vector;
|
||||||
cpumask_t domain;
|
cpumask_t domain;
|
||||||
|
cpumask_t old_domain;
|
||||||
|
unsigned move_cleanup_count;
|
||||||
|
u8 move_in_progress : 1;
|
||||||
};
|
};
|
||||||
extern spinlock_t vector_lock;
|
extern spinlock_t vector_lock;
|
||||||
extern struct irq_cfg irq_cfg[NR_IRQS];
|
extern struct irq_cfg irq_cfg[NR_IRQS];
|
||||||
@ -106,12 +109,19 @@ extern int assign_irq_vector (int irq); /* allocate a free vector */
|
|||||||
extern void free_irq_vector (int vector);
|
extern void free_irq_vector (int vector);
|
||||||
extern int reserve_irq_vector (int vector);
|
extern int reserve_irq_vector (int vector);
|
||||||
extern void __setup_vector_irq(int cpu);
|
extern void __setup_vector_irq(int cpu);
|
||||||
extern int reassign_irq_vector(int irq, int cpu);
|
|
||||||
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
|
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
|
||||||
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
|
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
|
||||||
extern int check_irq_used (int irq);
|
extern int check_irq_used (int irq);
|
||||||
extern void destroy_and_reserve_irq (unsigned int irq);
|
extern void destroy_and_reserve_irq (unsigned int irq);
|
||||||
|
|
||||||
|
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
|
||||||
|
extern int irq_prepare_move(int irq, int cpu);
|
||||||
|
extern void irq_complete_move(unsigned int irq);
|
||||||
|
#else
|
||||||
|
static inline int irq_prepare_move(int irq, int cpu) { return 0; }
|
||||||
|
static inline void irq_complete_move(unsigned int irq) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline void ia64_resend_irq(unsigned int vector)
|
static inline void ia64_resend_irq(unsigned int vector)
|
||||||
{
|
{
|
||||||
platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
|
platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
|
||||||
|
@ -121,10 +121,6 @@ extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr);
|
|||||||
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||||
unsigned long val, void *data);
|
unsigned long val, void *data);
|
||||||
|
|
||||||
/* ia64 does not need this */
|
|
||||||
static inline void jprobe_return(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
extern void invalidate_stacked_regs(void);
|
extern void invalidate_stacked_regs(void);
|
||||||
extern void flush_register_stack(void);
|
extern void flush_register_stack(void);
|
||||||
extern void arch_remove_kprobe(struct kprobe *p);
|
extern void arch_remove_kprobe(struct kprobe *p);
|
||||||
|
@ -807,6 +807,10 @@ static inline s64
|
|||||||
ia64_sal_physical_id_info(u16 *splid)
|
ia64_sal_physical_id_info(u16 *splid)
|
||||||
{
|
{
|
||||||
struct ia64_sal_retval isrv;
|
struct ia64_sal_retval isrv;
|
||||||
|
|
||||||
|
if (sal_revision < SAL_VERSION_CODE(3,2))
|
||||||
|
return -1;
|
||||||
|
|
||||||
SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
|
SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
|
||||||
if (splid)
|
if (splid)
|
||||||
*splid = isrv.v0;
|
*splid = isrv.v0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user