Commit b1c78c0f authored by Andi Kleen's avatar Andi Kleen Committed by Andi Kleen

[PATCH] Clean up and minor fixes to TLB flush

- Convert CR* accesses to dedicated inline functions and rewrite
the rest as C inlines
- Don't do a double flush for global flushes (pointed out by Zach Amsden)
This was a bug workaround for old CPUs that don't do 64bit and is obsolete.
- Add a proper memory clobber to invlpg
- Remove an unused extern
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
parent 3f14c746
...@@ -25,8 +25,6 @@ extern int nonx_setup(char *str); ...@@ -25,8 +25,6 @@ extern int nonx_setup(char *str);
extern void paging_init(void); extern void paging_init(void);
extern void clear_kernel_mapping(unsigned long addr, unsigned long size); extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
extern unsigned long pgkern_mask;
/* /*
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
......
...@@ -4,44 +4,44 @@ ...@@ -4,44 +4,44 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/processor.h> #include <asm/processor.h>
#define __flush_tlb() \ static inline unsigned long get_cr3(void)
do { \ {
unsigned long tmpreg; \ unsigned long cr3;
\ asm volatile("mov %%cr3,%0" : "=r" (cr3));
__asm__ __volatile__( \ return cr3;
"movq %%cr3, %0; # flush TLB \n" \ }
"movq %0, %%cr3; \n" \
: "=r" (tmpreg) \
:: "memory"); \
} while (0)
/* static inline void set_cr3(unsigned long cr3)
* Global pages have to be flushed a bit differently. Not a real {
* performance problem because this does not happen often. asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
*/ }
#define __flush_tlb_global() \
do { \ static inline void __flush_tlb(void)
unsigned long tmpreg, cr4, cr4_orig; \ {
\ set_cr3(get_cr3());
__asm__ __volatile__( \ }
"movq %%cr4, %2; # turn off PGE \n" \
"movq %2, %1; \n" \ static inline unsigned long get_cr4(void)
"andq %3, %1; \n" \ {
"movq %1, %%cr4; \n" \ unsigned long cr4;
"movq %%cr3, %0; # flush TLB \n" \ asm volatile("mov %%cr4,%0" : "=r" (cr4));
"movq %0, %%cr3; \n" \ return cr4;
"movq %2, %%cr4; # turn PGE back on \n" \ }
: "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
: "i" (~X86_CR4_PGE) \ static inline void set_cr4(unsigned long cr4)
: "memory"); \ {
} while (0) asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
}
extern unsigned long pgkern_mask;
static inline void __flush_tlb_all(void)
#define __flush_tlb_all() __flush_tlb_global() {
unsigned long cr4 = get_cr4();
set_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
set_cr4(cr4); /* write old PGE again and flush TLBs */
}
#define __flush_tlb_one(addr) \ #define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment