x86, asm: Flip RESTORE_ARGS arguments logic

... thus getting rid of the "else" part of the conditional statement in
the macro.

No functionality change.

Signed-off-by: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/1306873314-32523-4-git-send-email-bp@alien8.de
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
Borislav Petkov 2011-05-31 22:21:53 +02:00 committed by H. Peter Anvin
parent cac0e0a78f
commit 838feb4754
3 changed files with 12 additions and 17 deletions

View File

@ -173,7 +173,7 @@ sysexit_from_sys_call:
andl $~0x200,EFLAGS-R11(%rsp) andl $~0x200,EFLAGS-R11(%rsp)
movl RIP-R11(%rsp),%edx /* User %eip */ movl RIP-R11(%rsp),%edx /* User %eip */
CFI_REGISTER rip,rdx CFI_REGISTER rip,rdx
RESTORE_ARGS 1,24,1,1,1,1 RESTORE_ARGS 0,24,0,0,0,0
xorq %r8,%r8 xorq %r8,%r8
xorq %r9,%r9 xorq %r9,%r9
xorq %r10,%r10 xorq %r10,%r10
@ -328,7 +328,7 @@ cstar_dispatch:
jnz sysretl_audit jnz sysretl_audit
sysretl_from_sys_call: sysretl_from_sys_call:
andl $~TS_COMPAT,TI_status(%r10) andl $~TS_COMPAT,TI_status(%r10)
RESTORE_ARGS 1,-ARG_SKIP,1,1,1 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
movl RIP-ARGOFFSET(%rsp),%ecx movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
movl EFLAGS-ARGOFFSET(%rsp),%r11d movl EFLAGS-ARGOFFSET(%rsp),%r11d

View File

@ -109,32 +109,27 @@ For 32-bit we have the following conventions - kernel is built with
#define ARG_SKIP (9*8) #define ARG_SKIP (9*8)
.macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
skipr8910=0, skiprdx=0 rstor_r8910=1, rstor_rdx=1
.if \skipr11 .if \rstor_r11
.else
movq_cfi_restore 0*8, r11 movq_cfi_restore 0*8, r11
.endif .endif
.if \skipr8910 .if \rstor_r8910
.else
movq_cfi_restore 1*8, r10 movq_cfi_restore 1*8, r10
movq_cfi_restore 2*8, r9 movq_cfi_restore 2*8, r9
movq_cfi_restore 3*8, r8 movq_cfi_restore 3*8, r8
.endif .endif
.if \skiprax .if \rstor_rax
.else
movq_cfi_restore 4*8, rax movq_cfi_restore 4*8, rax
.endif .endif
.if \skiprcx .if \rstor_rcx
.else
movq_cfi_restore 5*8, rcx movq_cfi_restore 5*8, rcx
.endif .endif
.if \skiprdx .if \rstor_rdx
.else
movq_cfi_restore 6*8, rdx movq_cfi_restore 6*8, rdx
.endif .endif
@ -193,7 +188,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro RESTORE_ALL addskip=0 .macro RESTORE_ALL addskip=0
RESTORE_REST RESTORE_REST
RESTORE_ARGS 0, \addskip RESTORE_ARGS 1, \addskip
.endm .endm
.macro icebp .macro icebp

View File

@ -508,7 +508,7 @@ sysret_check:
TRACE_IRQS_ON TRACE_IRQS_ON
movq RIP-ARGOFFSET(%rsp),%rcx movq RIP-ARGOFFSET(%rsp),%rcx
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
RESTORE_ARGS 0,-ARG_SKIP,1 RESTORE_ARGS 1,-ARG_SKIP,0
/*CFI_REGISTER rflags,r11*/ /*CFI_REGISTER rflags,r11*/
movq PER_CPU_VAR(old_rsp), %rsp movq PER_CPU_VAR(old_rsp), %rsp
USERGS_SYSRET64 USERGS_SYSRET64
@ -858,7 +858,7 @@ retint_restore_args: /* return to kernel space */
*/ */
TRACE_IRQS_IRETQ TRACE_IRQS_IRETQ
restore_args: restore_args:
RESTORE_ARGS 0,8,0 RESTORE_ARGS 1,8,1
irq_return: irq_return:
INTERRUPT_RETURN INTERRUPT_RETURN