Commit 0b9c99b6 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: cleanup tlbflush.h variants

Bring the tlbflush.h variants into sync to prepare merging and
paravirt support.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1075cf7a
...@@ -55,7 +55,6 @@ union smp_flush_state { ...@@ -55,7 +55,6 @@ union smp_flush_state {
cpumask_t flush_cpumask; cpumask_t flush_cpumask;
struct mm_struct *flush_mm; struct mm_struct *flush_mm;
unsigned long flush_va; unsigned long flush_va;
#define FLUSH_ALL -1ULL
spinlock_t tlbstate_lock; spinlock_t tlbstate_lock;
}; };
char pad[SMP_CACHE_BYTES]; char pad[SMP_CACHE_BYTES];
...@@ -153,7 +152,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -153,7 +152,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
if (f->flush_mm == read_pda(active_mm)) { if (f->flush_mm == read_pda(active_mm)) {
if (read_pda(mmu_state) == TLBSTATE_OK) { if (read_pda(mmu_state) == TLBSTATE_OK) {
if (f->flush_va == FLUSH_ALL) if (f->flush_va == TLB_FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
else else
__flush_tlb_one(f->flush_va); __flush_tlb_one(f->flush_va);
...@@ -166,11 +165,12 @@ out: ...@@ -166,11 +165,12 @@ out:
add_pda(irq_tlb_count, 1); add_pda(irq_tlb_count, 1);
} }
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
unsigned long va) unsigned long va)
{ {
int sender; int sender;
union smp_flush_state *f; union smp_flush_state *f;
cpumask_t cpumask = *cpumaskp;
/* Caller has disabled preemption */ /* Caller has disabled preemption */
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
...@@ -223,7 +223,7 @@ void flush_tlb_current_task(void) ...@@ -223,7 +223,7 @@ void flush_tlb_current_task(void)
local_flush_tlb(); local_flush_tlb();
if (!cpus_empty(cpu_mask)) if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
...@@ -242,7 +242,7 @@ void flush_tlb_mm (struct mm_struct * mm) ...@@ -242,7 +242,7 @@ void flush_tlb_mm (struct mm_struct * mm)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (!cpus_empty(cpu_mask)) if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
......
...@@ -800,7 +800,6 @@ static void smp_reschedule_interrupt(void) ...@@ -800,7 +800,6 @@ static void smp_reschedule_interrupt(void)
static struct mm_struct *flush_mm; static struct mm_struct *flush_mm;
static unsigned long flush_va; static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock); static DEFINE_SPINLOCK(tlbstate_lock);
#define FLUSH_ALL 0xffffffff
/* /*
* We cannot call mmdrop() because we are in interrupt context, * We cannot call mmdrop() because we are in interrupt context,
...@@ -834,7 +833,7 @@ static void smp_invalidate_interrupt(void) ...@@ -834,7 +833,7 @@ static void smp_invalidate_interrupt(void)
if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
if (flush_va == FLUSH_ALL) if (flush_va == TLB_FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
else else
__flush_tlb_one(flush_va); __flush_tlb_one(flush_va);
...@@ -903,7 +902,7 @@ void flush_tlb_current_task(void) ...@@ -903,7 +902,7 @@ void flush_tlb_current_task(void)
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb(); local_flush_tlb();
if (cpu_mask) if (cpu_mask)
voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
...@@ -923,7 +922,7 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -923,7 +922,7 @@ void flush_tlb_mm(struct mm_struct *mm)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (cpu_mask) if (cpu_mask)
voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
......
...@@ -57,7 +57,7 @@ static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, ...@@ -57,7 +57,7 @@ static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages,
pte = boot_vaddr_to_pte(virtual_source); pte = boot_vaddr_to_pte(virtual_source);
for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) { for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL)); set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
__flush_tlb_one(&vaddr[i*PAGE_SIZE]); __flush_tlb_one((unsigned long) &vaddr[i*PAGE_SIZE]);
} }
} }
......
...@@ -163,6 +163,12 @@ ...@@ -163,6 +163,12 @@
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
#else
# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#undef cpu_has_vme #undef cpu_has_vme
...@@ -183,6 +189,9 @@ ...@@ -183,6 +189,9 @@
#undef cpu_has_centaur_mcr #undef cpu_has_centaur_mcr
#define cpu_has_centaur_mcr 0 #define cpu_has_centaur_mcr 0
#undef cpu_has_pge
#define cpu_has_pge 1
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_CPUFEATURE_H */ #endif /* _ASM_X86_CPUFEATURE_H */
#ifndef _I386_TLBFLUSH_H #ifndef _X86_TLBFLUSH_H
#define _I386_TLBFLUSH_H #define _X86_TLBFLUSH_H
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h>
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
...@@ -12,62 +15,41 @@ ...@@ -12,62 +15,41 @@
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif #endif
#define __native_flush_tlb() \ static inline void __native_flush_tlb(void)
do { \ {
unsigned int tmpreg; \ write_cr3(read_cr3());
\ }
__asm__ __volatile__( \
"movl %%cr3, %0; \n" \
"movl %0, %%cr3; # flush TLB \n" \
: "=r" (tmpreg) \
:: "memory"); \
} while (0)
/* static inline void __native_flush_tlb_global(void)
* Global pages have to be flushed a bit differently. Not a real {
* performance problem because this does not happen often. unsigned long cr4 = read_cr4();
*/
#define __native_flush_tlb_global() \ /* clear PGE */
do { \ write_cr4(cr4 & ~X86_CR4_PGE);
unsigned int tmpreg, cr4, cr4_orig; \ /* write old PGE again and flush TLBs */
\ write_cr4(cr4);
__asm__ __volatile__( \ }
"movl %%cr4, %2; # turn off PGE \n" \
"movl %2, %1; \n" \ static inline void __native_flush_tlb_single(unsigned long addr)
"andl %3, %1; \n" \ {
"movl %1, %%cr4; \n" \ __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory");
"movl %%cr3, %0; \n" \ }
"movl %0, %%cr3; # flush TLB \n" \
"movl %2, %%cr4; # turn PGE back on \n" \ static inline void __flush_tlb_all(void)
: "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ {
: "i" (~X86_CR4_PGE) \ if (cpu_has_pge)
: "memory"); \ __flush_tlb_global();
} while (0) else
__flush_tlb();
#define __native_flush_tlb_single(addr) \ }
__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
static inline void __flush_tlb_one(unsigned long addr)
# define __flush_tlb_all() \ {
do { \ if (cpu_has_invlpg)
if (cpu_has_pge) \ __flush_tlb_single(addr);
__flush_tlb_global(); \ else
else \ __flush_tlb();
__flush_tlb(); \ }
} while (0)
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
#else
# define __flush_tlb_one(addr) \
do { \
if (cpu_has_invlpg) \
__flush_tlb_single(addr); \
else \
__flush_tlb(); \
} while (0)
#endif
/* /*
* TLB flushing: * TLB flushing:
...@@ -86,11 +68,8 @@ ...@@ -86,11 +68,8 @@
#define TLB_FLUSH_ALL 0xffffffff #define TLB_FLUSH_ALL 0xffffffff
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#include <linux/sched.h>
#define flush_tlb() __flush_tlb() #define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb_all() #define flush_tlb_all() __flush_tlb_all()
#define local_flush_tlb() __flush_tlb() #define local_flush_tlb() __flush_tlb()
...@@ -102,21 +81,22 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -102,21 +81,22 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
} }
static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr) unsigned long addr)
{ {
if (vma->vm_mm == current->active_mm) if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr); __flush_tlb_one(addr);
} }
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
if (vma->vm_mm == current->active_mm) if (vma->vm_mm == current->active_mm)
__flush_tlb(); __flush_tlb();
} }
static inline void native_flush_tlb_others(const cpumask_t *cpumask, static inline void native_flush_tlb_others(const cpumask_t *cpumask,
struct mm_struct *mm, unsigned long va) struct mm_struct *mm,
unsigned long va)
{ {
} }
...@@ -124,8 +104,7 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask, ...@@ -124,8 +104,7 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask,
#include <asm/smp.h> #include <asm/smp.h>
#define local_flush_tlb() \ #define local_flush_tlb() __flush_tlb()
__flush_tlb()
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void); extern void flush_tlb_current_task(void);
...@@ -134,7 +113,8 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long); ...@@ -134,7 +113,8 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
#define flush_tlb() flush_tlb_current_task() #define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ {
flush_tlb_mm(vma->vm_mm); flush_tlb_mm(vma->vm_mm);
} }
...@@ -152,17 +132,17 @@ struct tlb_state ...@@ -152,17 +132,17 @@ struct tlb_state
char __cacheline_padding[L1_CACHE_BYTES-8]; char __cacheline_padding[L1_CACHE_BYTES-8];
}; };
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
#endif /* SMP */ #endif /* SMP */
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, va) \ #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
native_flush_tlb_others(&mask, mm, va)
#endif #endif
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end) unsigned long end)
{ {
flush_tlb_all(); flush_tlb_all();
} }
#endif /* _I386_TLBFLUSH_H */ #endif /* _X86_TLBFLUSH_H */
#ifndef _X8664_TLBFLUSH_H #ifndef _X86_TLBFLUSH_H
#define _X8664_TLBFLUSH_H #define _X86_TLBFLUSH_H
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
static inline void __flush_tlb(void) #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif
static inline void __native_flush_tlb(void)
{ {
write_cr3(read_cr3()); write_cr3(read_cr3());
} }
static inline void __flush_tlb_all(void) static inline void __native_flush_tlb_global(void)
{ {
unsigned long cr4 = read_cr4(); unsigned long cr4 = read_cr4();
write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
write_cr4(cr4); /* write old PGE again and flush TLBs */ /* clear PGE */
write_cr4(cr4 & ~X86_CR4_PGE);
/* write old PGE again and flush TLBs */
write_cr4(cr4);
} }
#define __flush_tlb_one(addr) \ static inline void __native_flush_tlb_single(unsigned long addr)
__asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") {
__asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory");
}
static inline void __flush_tlb_all(void)
{
if (cpu_has_pge)
__flush_tlb_global();
else
__flush_tlb();
}
static inline void __flush_tlb_one(unsigned long addr)
{
if (cpu_has_invlpg)
__flush_tlb_single(addr);
else
__flush_tlb();
}
/* /*
* TLB flushing: * TLB flushing:
...@@ -37,6 +66,8 @@ static inline void __flush_tlb_all(void) ...@@ -37,6 +66,8 @@ static inline void __flush_tlb_all(void)
* range a few INVLPGs in a row are a win. * range a few INVLPGs in a row are a win.
*/ */
#define TLB_FLUSH_ALL -1ULL
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define flush_tlb() __flush_tlb() #define flush_tlb() __flush_tlb()
...@@ -50,25 +81,30 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -50,25 +81,30 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
} }
static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr) unsigned long addr)
{ {
if (vma->vm_mm == current->active_mm) if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr); __flush_tlb_one(addr);
} }
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
if (vma->vm_mm == current->active_mm) if (vma->vm_mm == current->active_mm)
__flush_tlb(); __flush_tlb();
} }
#else static inline void native_flush_tlb_others(const cpumask_t *cpumask,
struct mm_struct *mm,
unsigned long va)
{
}
#else /* SMP */
#include <asm/smp.h> #include <asm/smp.h>
#define local_flush_tlb() \ #define local_flush_tlb() __flush_tlb()
__flush_tlb()
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void); extern void flush_tlb_current_task(void);
...@@ -77,24 +113,28 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long); ...@@ -77,24 +113,28 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
#define flush_tlb() flush_tlb_current_task() #define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ {
flush_tlb_mm(vma->vm_mm); flush_tlb_mm(vma->vm_mm);
} }
void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
unsigned long va);
#define TLBSTATE_OK 1 #define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2 #define TLBSTATE_LAZY 2
/* Roughly an IPI every 20MB with 4k pages for freeing page table #endif /* SMP */
ranges. Cost is about 42k of memory for each CPU. */
#define ARCH_FREE_PTE_NR 5350
#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
#endif #endif
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end) unsigned long end)
{ {
flush_tlb_all(); flush_tlb_all();
} }
#endif /* _X8664_TLBFLUSH_H */ #endif /* _X86_TLBFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment