Commit 9226d125 authored by Zachary Amsden's avatar Zachary Amsden Committed by Andi Kleen

[PATCH] i386: paravirt CPU hypercall batching mode

The VMI ROM has a mode where hypercalls can be queued and batched.  This turns
out to be a significant win during context switch, but must be done at a
specific point before side effects to CPU state are visible to subsequent
instructions.  This is similar to the MMU batching hooks already provided.
The same hooks could be used by the Xen backend to implement a context switch
multicall.

To explain a bit more about lazy modes in the paravirt patches, basically, the
idea is that only one of lazy CPU or MMU mode can be active at any given time.
 Lazy MMU mode is similar to this lazy CPU mode, and allows for batching of
multiple PTE updates (say, inside a remap loop), but to avoid keeping some
kind of state machine about when to flush cpu or mmu updates, we just allow
one or the other to be active.  Although there is no real reason a more
comprehensive scheme could not be implemented, there is also no demonstrated
need for this extra complexity.
Signed-off-by: default avatarZachary Amsden <zach@vmware.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
parent c119ecce
...@@ -545,6 +545,7 @@ struct paravirt_ops paravirt_ops = { ...@@ -545,6 +545,7 @@ struct paravirt_ops paravirt_ops = {
.apic_write_atomic = native_apic_write_atomic, .apic_write_atomic = native_apic_write_atomic,
.apic_read = native_apic_read, .apic_read = native_apic_read,
#endif #endif
.set_lazy_mode = (void *)native_nop,
.flush_tlb_user = native_flush_tlb, .flush_tlb_user = native_flush_tlb,
.flush_tlb_kernel = native_flush_tlb_global, .flush_tlb_kernel = native_flush_tlb_global,
......
...@@ -669,14 +669,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas ...@@ -669,14 +669,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
*/ */
load_TLS(next, cpu); load_TLS(next, cpu);
/*
* Restore %gs if needed (which is common)
*/
if (prev->gs | next->gs)
loadsegment(gs, next->gs);
write_pda(pcurrent, next_p);
/* /*
* Now maybe handle debug registers and/or IO bitmaps * Now maybe handle debug registers and/or IO bitmaps
*/ */
...@@ -686,6 +678,15 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas ...@@ -686,6 +678,15 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
disable_tsc(prev_p, next_p); disable_tsc(prev_p, next_p);
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_leave_lazy_cpu_mode();
/* If the task has used fpu the last 5 timeslices, just do a full /* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the * restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now * chances of needing FPU soon are obviously high now
...@@ -693,6 +694,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas ...@@ -693,6 +694,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
if (next_p->fpu_counter > 5) if (next_p->fpu_counter > 5)
math_state_restore(); math_state_restore();
/*
* Restore %gs if needed (which is common)
*/
if (prev->gs | next->gs)
loadsegment(gs, next->gs);
write_pda(pcurrent, next_p);
return prev_p; return prev_p;
} }
......
...@@ -182,6 +182,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres ...@@ -182,6 +182,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define arch_leave_lazy_mmu_mode() do {} while (0) #define arch_leave_lazy_mmu_mode() do {} while (0)
#endif #endif
/*
* A facility to provide batching of the reload of page tables with the
* actual context switch code for paravirtualized guests. By convention,
* only one of the lazy modes (CPU, MMU) should be active at any given
* time, entry should never be nested, and entry and exits should always
* be paired. This is for sanity of maintaining and reasoning about the
* kernel code.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
#define arch_enter_lazy_cpu_mode() do {} while (0)
#define arch_leave_lazy_cpu_mode() do {} while (0)
#endif
/* /*
* When walking page tables, get the address of the next boundary, * When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no * or the end address of the range if that comes earlier. Although no
......
...@@ -146,6 +146,8 @@ struct paravirt_ops ...@@ -146,6 +146,8 @@ struct paravirt_ops
void (fastcall *pmd_clear)(pmd_t *pmdp); void (fastcall *pmd_clear)(pmd_t *pmdp);
#endif #endif
void (fastcall *set_lazy_mode)(int mode);
/* These two are jmp to, not actually called. */ /* These two are jmp to, not actually called. */
void (fastcall *irq_enable_sysexit)(void); void (fastcall *irq_enable_sysexit)(void);
void (fastcall *iret)(void); void (fastcall *iret)(void);
...@@ -386,6 +388,19 @@ static inline void pmd_clear(pmd_t *pmdp) ...@@ -386,6 +388,19 @@ static inline void pmd_clear(pmd_t *pmdp)
} }
#endif #endif
/* Lazy mode for batching updates / context switch */
#define PARAVIRT_LAZY_NONE 0
#define PARAVIRT_LAZY_MMU 1
#define PARAVIRT_LAZY_CPU 2
#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
#define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU)
#define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU)
#define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE)
/* These all sit in the .parainstructions section to tell us what to patch. */ /* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch { struct paravirt_patch {
u8 *instr; /* original instructions */ u8 *instr; /* original instructions */
......
...@@ -1853,6 +1853,13 @@ context_switch(struct rq *rq, struct task_struct *prev, ...@@ -1853,6 +1853,13 @@ context_switch(struct rq *rq, struct task_struct *prev,
struct mm_struct *mm = next->mm; struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm; struct mm_struct *oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_enter_lazy_cpu_mode();
if (!mm) { if (!mm) {
next->active_mm = oldmm; next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count); atomic_inc(&oldmm->mm_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment