Commit b77797fb authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

xen: fold xen_sysexit into xen_iret

xen_sysexit and xen_iret were doing essentially the same thing.  Rather
than having a separate implementation for xen_sysexit, we can just strip
the stack back to an iret frame and jump into xen_iret.  This removes
a lot of code and complexity - specifically, another critical region.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 2bd50036
......@@ -1044,15 +1044,8 @@ ENTRY(xen_hypervisor_callback)
jmp xen_iret_crit_fixup
1: cmpl $xen_sysexit_start_crit,%eax
jb 2f
cmpl $xen_sysexit_end_crit,%eax
jae 2f
jmp xen_sysexit_crit_fixup
ENTRY(xen_do_upcall)
2: mov %esp, %eax
1: mov %esp, %eax
call xen_evtchn_do_upcall
jmp ret_from_intr
CFI_ENDPROC
......
......@@ -107,6 +107,20 @@ ENDPATCH(xen_restore_fl_direct)
ENDPROC(xen_restore_fl_direct)
RELOC(xen_restore_fl_direct, 2b+1)
/*
We can't use sysexit directly, because we're not running in ring0.
But we can easily fake it up using iret. Assuming xen_sysexit
is jumped to with a standard stack frame, we can just strip it
back to a standard iret frame and use iret.
*/
ENTRY(xen_sysexit)
movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
lea PT_EIP(%esp), %esp
jmp xen_iret
ENDPROC(xen_sysexit)
/*
This is run where a normal iret would be run, with the same stack setup:
8: eflags
......@@ -276,62 +290,6 @@ ENTRY(xen_iret_crit_fixup)
2: jmp xen_do_upcall
ENTRY(xen_sysexit)
/* Store vcpu_info pointer for easy access. Do it this
way to avoid having to reload %fs */
#ifdef CONFIG_SMP
GET_THREAD_INFO(%eax)
movl TI_cpu(%eax),%eax
movl __per_cpu_offset(,%eax,4),%eax
mov per_cpu__xen_vcpu(%eax),%eax
#else
movl per_cpu__xen_vcpu, %eax
#endif
/* We can't actually use sysexit in a pv guest,
so fake it up with iret */
pushl $__USER_DS /* user stack segment */
pushl %ecx /* user esp */
pushl PT_EFLAGS+2*4(%esp) /* user eflags */
pushl $__USER_CS /* user code segment */
pushl %edx /* user eip */
xen_sysexit_start_crit:
/* Unmask events... */
movb $0, XEN_vcpu_info_mask(%eax)
/* ...and test for pending.
There's a preempt window here, but it doesn't
matter because we're within the critical section. */
testb $0xff, XEN_vcpu_info_pending(%eax)
/* If there's something pending, mask events again so we
can directly inject it back into the kernel. */
jnz 1f
movl PT_EAX+5*4(%esp),%eax
2: iret
1: movb $1, XEN_vcpu_info_mask(%eax)
xen_sysexit_end_crit:
addl $5*4, %esp /* remove iret frame */
/* no need to re-save regs, but need to restore kernel %fs */
mov $__KERNEL_PERCPU, %eax
mov %eax, %fs
jmp xen_do_upcall
.section __ex_table,"a"
.align 4
.long 2b,iret_exc
.previous
.globl xen_sysexit_start_crit, xen_sysexit_end_crit
/*
sysexit fixup is easy, since the old frame is still sitting there
on the stack. We just need to remove the new recursive
interrupt and return.
*/
ENTRY(xen_sysexit_crit_fixup)
addl $PT_OLDESP+5*4, %esp /* remove frame+iret */
jmp xen_do_upcall
/*
Force an event check by making a hypercall,
but preserve regs before making the call.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment