Commit 3701d863 authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

x86: fixup more paravirt fallout

Use a common irq_return entry point for all the iret places, which
need the paravirt INTERRUPT return wrapper.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1ec7fd50
...@@ -409,7 +409,8 @@ restore_nocheck_notrace: ...@@ -409,7 +409,8 @@ restore_nocheck_notrace:
RESTORE_REGS RESTORE_REGS
addl $4, %esp # skip orig_eax/error_code addl $4, %esp # skip orig_eax/error_code
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
1: INTERRUPT_RETURN ENTRY(irq_return)
INTERRUPT_RETURN
.section .fixup,"ax" .section .fixup,"ax"
iret_exc: iret_exc:
pushl $0 # no error code pushl $0 # no error code
...@@ -418,7 +419,7 @@ iret_exc: ...@@ -418,7 +419,7 @@ iret_exc:
.previous .previous
.section __ex_table,"a" .section __ex_table,"a"
.align 4 .align 4
.long 1b,iret_exc .long irq_return,iret_exc
.previous .previous
CFI_RESTORE_STATE CFI_RESTORE_STATE
...@@ -865,20 +866,16 @@ nmi_espfix_stack: ...@@ -865,20 +866,16 @@ nmi_espfix_stack:
RESTORE_REGS RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack lss 12+4(%esp), %esp # back to espfix stack
CFI_ADJUST_CFA_OFFSET -24 CFI_ADJUST_CFA_OFFSET -24
1: INTERRUPT_RETURN jmp irq_return
CFI_ENDPROC CFI_ENDPROC
.section __ex_table,"a"
.align 4
.long 1b,iret_exc
.previous
KPROBE_END(nmi) KPROBE_END(nmi)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_iret) ENTRY(native_iret)
1: iret iret
.section __ex_table,"a" .section __ex_table,"a"
.align 4 .align 4
.long 1b,iret_exc .long native_iret, iret_exc
.previous .previous
END(native_iret) END(native_iret)
......
...@@ -581,16 +581,24 @@ retint_restore_args: /* return to kernel space */ ...@@ -581,16 +581,24 @@ retint_restore_args: /* return to kernel space */
*/ */
TRACE_IRQS_IRETQ TRACE_IRQS_IRETQ
restore_args: restore_args:
RESTORE_ARGS 0,8,0 RESTORE_ARGS 0,8,0
#ifdef CONFIG_PARAVIRT
ENTRY(irq_return)
INTERRUPT_RETURN INTERRUPT_RETURN
#endif
.section __ex_table, "a"
.quad irq_return, bad_iret
.previous
#ifdef CONFIG_PARAVIRT
ENTRY(native_iret) ENTRY(native_iret)
iretq iretq
.section __ex_table,"a" .section __ex_table,"a"
.quad native_iret, bad_iret .quad native_iret, bad_iret
.previous .previous
#endif
.section .fixup,"ax" .section .fixup,"ax"
bad_iret: bad_iret:
/* /*
...@@ -804,7 +812,7 @@ paranoid_swapgs\trace: ...@@ -804,7 +812,7 @@ paranoid_swapgs\trace:
SWAPGS_UNSAFE_STACK SWAPGS_UNSAFE_STACK
paranoid_restore\trace: paranoid_restore\trace:
RESTORE_ALL 8 RESTORE_ALL 8
INTERRUPT_RETURN jmp irq_return
paranoid_userspace\trace: paranoid_userspace\trace:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
movl threadinfo_flags(%rcx),%ebx movl threadinfo_flags(%rcx),%ebx
...@@ -919,7 +927,7 @@ error_kernelspace: ...@@ -919,7 +927,7 @@ error_kernelspace:
iret run with kernel gs again, so don't set the user space flag. iret run with kernel gs again, so don't set the user space flag.
B stepping K8s sometimes report an truncated RIP for IRET B stepping K8s sometimes report an truncated RIP for IRET
exceptions returning to compat mode. Check for these here too. */ exceptions returning to compat mode. Check for these here too. */
leaq native_iret(%rip),%rbp leaq irq_return(%rip),%rbp
cmpq %rbp,RIP(%rsp) cmpq %rbp,RIP(%rsp)
je error_swapgs je error_swapgs
movl %ebp,%ebp /* zero extend */ movl %ebp,%ebp /* zero extend */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment