mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 22:50:41 +00:00
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86/paravirt: Use normal calling sequences for irq enable/disable x86: fix kernel panic on 32 bits when profiling x86: Fix Suspend to RAM freeze on Acer Aspire 1511Lmi laptop x86, vmi: Mark VMI deprecated and schedule it for removal
This commit is contained in:
commit
ea87644105
@ -451,3 +451,33 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
|
|||||||
will also allow making ALSA OSS emulation independent of
|
will also allow making ALSA OSS emulation independent of
|
||||||
sound_core. The dependency will be broken then too.
|
sound_core. The dependency will be broken then too.
|
||||||
Who: Tejun Heo <tj@kernel.org>
|
Who: Tejun Heo <tj@kernel.org>
|
||||||
|
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
What: Support for VMware's guest paravirtuliazation technique [VMI] will be
|
||||||
|
dropped.
|
||||||
|
When: 2.6.37 or earlier.
|
||||||
|
Why: With the recent innovations in CPU hardware acceleration technologies
|
||||||
|
from Intel and AMD, VMware ran a few experiments to compare these
|
||||||
|
techniques to guest paravirtualization technique on VMware's platform.
|
||||||
|
These hardware assisted virtualization techniques have outperformed the
|
||||||
|
performance benefits provided by VMI in most of the workloads. VMware
|
||||||
|
expects that these hardware features will be ubiquitous in a couple of
|
||||||
|
years, as a result, VMware has started a phased retirement of this
|
||||||
|
feature from the hypervisor. We will be removing this feature from the
|
||||||
|
Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
|
||||||
|
technical reasons (read opportunity to remove major chunk of pvops)
|
||||||
|
arise.
|
||||||
|
|
||||||
|
Please note that VMI has always been an optimization and non-VMI kernels
|
||||||
|
still work fine on VMware's platform.
|
||||||
|
Latest versions of VMware's product which support VMI are,
|
||||||
|
Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
|
||||||
|
releases for these products will continue supporting VMI.
|
||||||
|
|
||||||
|
For more details about VMI retirement take a look at this,
|
||||||
|
http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
|
||||||
|
|
||||||
|
Who: Alok N Kataria <akataria@vmware.com>
|
||||||
|
|
||||||
|
----------------------------
|
||||||
|
@ -491,7 +491,7 @@ if PARAVIRT_GUEST
|
|||||||
source "arch/x86/xen/Kconfig"
|
source "arch/x86/xen/Kconfig"
|
||||||
|
|
||||||
config VMI
|
config VMI
|
||||||
bool "VMI Guest support"
|
bool "VMI Guest support (DEPRECATED)"
|
||||||
select PARAVIRT
|
select PARAVIRT
|
||||||
depends on X86_32
|
depends on X86_32
|
||||||
---help---
|
---help---
|
||||||
@ -500,6 +500,15 @@ config VMI
|
|||||||
at the moment), by linking the kernel to a GPL-ed ROM module
|
at the moment), by linking the kernel to a GPL-ed ROM module
|
||||||
provided by the hypervisor.
|
provided by the hypervisor.
|
||||||
|
|
||||||
|
As of September 2009, VMware has started a phased retirement
|
||||||
|
of this feature from VMware's products. Please see
|
||||||
|
feature-removal-schedule.txt for details. If you are
|
||||||
|
planning to enable this option, please note that you cannot
|
||||||
|
live migrate a VMI enabled VM to a future VMware product,
|
||||||
|
which doesn't support VMI. So if you expect your kernel to
|
||||||
|
seamlessly migrate to newer VMware products, keep this
|
||||||
|
disabled.
|
||||||
|
|
||||||
config KVM_CLOCK
|
config KVM_CLOCK
|
||||||
bool "KVM paravirtualized clock"
|
bool "KVM paravirtualized clock"
|
||||||
select PARAVIRT
|
select PARAVIRT
|
||||||
|
@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
|
|||||||
|
|
||||||
static inline unsigned long __raw_local_save_flags(void)
|
static inline unsigned long __raw_local_save_flags(void)
|
||||||
{
|
{
|
||||||
unsigned long f;
|
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
|
||||||
|
|
||||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
|
||||||
: "=a"(f)
|
|
||||||
: paravirt_type(pv_irq_ops.save_fl),
|
|
||||||
paravirt_clobber(CLBR_EAX)
|
|
||||||
: "memory", "cc");
|
|
||||||
return f;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void raw_local_irq_restore(unsigned long f)
|
static inline void raw_local_irq_restore(unsigned long f)
|
||||||
{
|
{
|
||||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
|
||||||
: "=a"(f)
|
|
||||||
: PV_FLAGS_ARG(f),
|
|
||||||
paravirt_type(pv_irq_ops.restore_fl),
|
|
||||||
paravirt_clobber(CLBR_EAX)
|
|
||||||
: "memory", "cc");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void raw_local_irq_disable(void)
|
static inline void raw_local_irq_disable(void)
|
||||||
{
|
{
|
||||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
|
||||||
:
|
|
||||||
: paravirt_type(pv_irq_ops.irq_disable),
|
|
||||||
paravirt_clobber(CLBR_EAX)
|
|
||||||
: "memory", "eax", "cc");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void raw_local_irq_enable(void)
|
static inline void raw_local_irq_enable(void)
|
||||||
{
|
{
|
||||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
|
||||||
:
|
|
||||||
: paravirt_type(pv_irq_ops.irq_enable),
|
|
||||||
paravirt_clobber(CLBR_EAX)
|
|
||||||
: "memory", "eax", "cc");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __raw_local_irq_save(void)
|
static inline unsigned long __raw_local_irq_save(void)
|
||||||
|
@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
|
|||||||
#define EXTRA_CLOBBERS
|
#define EXTRA_CLOBBERS
|
||||||
#define VEXTRA_CLOBBERS
|
#define VEXTRA_CLOBBERS
|
||||||
#else /* CONFIG_X86_64 */
|
#else /* CONFIG_X86_64 */
|
||||||
|
/* [re]ax isn't an arg, but the return val */
|
||||||
#define PVOP_VCALL_ARGS \
|
#define PVOP_VCALL_ARGS \
|
||||||
unsigned long __edi = __edi, __esi = __esi, \
|
unsigned long __edi = __edi, __esi = __esi, \
|
||||||
__edx = __edx, __ecx = __ecx
|
__edx = __edx, __ecx = __ecx, __eax = __eax
|
||||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
|
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
|
||||||
|
|
||||||
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
|
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
|
||||||
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
|
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
|
||||||
@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
|
|||||||
"=c" (__ecx)
|
"=c" (__ecx)
|
||||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
|
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
|
||||||
|
|
||||||
|
/* void functions are still allowed [re]ax for scratch */
|
||||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
|
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
|
||||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||||
|
|
||||||
@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
|
|||||||
VEXTRA_CLOBBERS, \
|
VEXTRA_CLOBBERS, \
|
||||||
pre, post, ##__VA_ARGS__)
|
pre, post, ##__VA_ARGS__)
|
||||||
|
|
||||||
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
|
#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
|
||||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
____PVOP_VCALL(op.func, CLBR_RET_REG, \
|
||||||
PVOP_VCALLEE_CLOBBERS, , \
|
PVOP_VCALLEE_CLOBBERS, , \
|
||||||
pre, post, ##__VA_ARGS__)
|
pre, post, ##__VA_ARGS__)
|
||||||
|
|
||||||
|
@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|||||||
#ifdef CONFIG_FRAME_POINTER
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
return *(unsigned long *)(regs->bp + sizeof(long));
|
return *(unsigned long *)(regs->bp + sizeof(long));
|
||||||
#else
|
#else
|
||||||
unsigned long *sp = (unsigned long *)regs->sp;
|
unsigned long *sp =
|
||||||
|
(unsigned long *)kernel_stack_pointer(regs);
|
||||||
/*
|
/*
|
||||||
* Return address is either directly at stack pointer
|
* Return address is either directly at stack pointer
|
||||||
* or above a saved flags. Eflags has bits 22-31 zero,
|
* or above a saved flags. Eflags has bits 22-31 zero,
|
||||||
|
@ -3,8 +3,16 @@
|
|||||||
#include <asm/trampoline.h>
|
#include <asm/trampoline.h>
|
||||||
#include <asm/e820.h>
|
#include <asm/e820.h>
|
||||||
|
|
||||||
|
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
|
||||||
|
#define __trampinit
|
||||||
|
#define __trampinitdata
|
||||||
|
#else
|
||||||
|
#define __trampinit __cpuinit
|
||||||
|
#define __trampinitdata __cpuinitdata
|
||||||
|
#endif
|
||||||
|
|
||||||
/* ready for x86_64 and x86 */
|
/* ready for x86_64 and x86 */
|
||||||
unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
|
unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
|
||||||
|
|
||||||
void __init reserve_trampoline_memory(void)
|
void __init reserve_trampoline_memory(void)
|
||||||
{
|
{
|
||||||
@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
|
|||||||
* bootstrap into the page concerned. The caller
|
* bootstrap into the page concerned. The caller
|
||||||
* has made sure it's suitably aligned.
|
* has made sure it's suitably aligned.
|
||||||
*/
|
*/
|
||||||
unsigned long __cpuinit setup_trampoline(void)
|
unsigned long __trampinit setup_trampoline(void)
|
||||||
{
|
{
|
||||||
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
|
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
|
||||||
return virt_to_phys(trampoline_base);
|
return virt_to_phys(trampoline_base);
|
||||||
|
@ -32,8 +32,12 @@
|
|||||||
#include <asm/segment.h>
|
#include <asm/segment.h>
|
||||||
#include <asm/processor-flags.h>
|
#include <asm/processor-flags.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI_SLEEP
|
||||||
|
.section .rodata, "a", @progbits
|
||||||
|
#else
|
||||||
/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
|
/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
|
||||||
__CPUINITRODATA
|
__CPUINITRODATA
|
||||||
|
#endif
|
||||||
.code16
|
.code16
|
||||||
|
|
||||||
ENTRY(trampoline_data)
|
ENTRY(trampoline_data)
|
||||||
|
@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
|
|||||||
|
|
||||||
pv_info.paravirt_enabled = 1;
|
pv_info.paravirt_enabled = 1;
|
||||||
pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
|
pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
|
||||||
pv_info.name = "vmi";
|
pv_info.name = "vmi [deprecated]";
|
||||||
|
|
||||||
pv_init_ops.patch = vmi_patch;
|
pv_init_ops.patch = vmi_patch;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user