Commit 3c868823 authored by Andi Kleen's avatar Andi Kleen Committed by Ingo Molnar

x86: c_p_a() fix: reorder TLB / cache flushes to follow Intel recommendation

Intel recommends to first flush the TLBs and then the caches
on caching attribute changes. c_p_a() previously did it the
other way round. Reorder that.

The procedure is still not fully compliant to the Intel documentation
because Intel recommends a all CPU synchronization step between
the TLB flushes and the cache flushes.

However on all new Intel CPUs this is now meaningless anyways
because they support Self-Snoop and can skip the cache flush
step anyway.

[ mingo@elte.hu: decoupled from clflush and ported it to x86.git ]
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Acked-by: default avatarJan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 6ba9b7d8
...@@ -87,6 +87,12 @@ static void flush_kernel_map(void *arg) ...@@ -87,6 +87,12 @@ static void flush_kernel_map(void *arg)
struct list_head *lh = (struct list_head *)arg; struct list_head *lh = (struct list_head *)arg;
struct page *p; struct page *p;
/*
* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
__flush_tlb_all();
/* High level code is not ready for clflush yet */ /* High level code is not ready for clflush yet */
if (0 && cpu_has_clflush) { if (0 && cpu_has_clflush) {
list_for_each_entry(p, lh, lru) list_for_each_entry(p, lh, lru)
...@@ -95,12 +101,6 @@ static void flush_kernel_map(void *arg) ...@@ -95,12 +101,6 @@ static void flush_kernel_map(void *arg)
if (boot_cpu_data.x86_model >= 4) if (boot_cpu_data.x86_model >= 4)
wbinvd(); wbinvd();
} }
/*
* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
__flush_tlb_all();
} }
static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
......
...@@ -82,6 +82,8 @@ static void flush_kernel_map(void *arg) ...@@ -82,6 +82,8 @@ static void flush_kernel_map(void *arg)
struct list_head *l = (struct list_head *)arg; struct list_head *l = (struct list_head *)arg;
struct page *pg; struct page *pg;
__flush_tlb_all();
/* When clflush is available always use it because it is /* When clflush is available always use it because it is
much cheaper than WBINVD. */ much cheaper than WBINVD. */
/* clflush is still broken. Disable for now. */ /* clflush is still broken. Disable for now. */
...@@ -94,7 +96,6 @@ static void flush_kernel_map(void *arg) ...@@ -94,7 +96,6 @@ static void flush_kernel_map(void *arg)
clflush_cache_range(addr, PAGE_SIZE); clflush_cache_range(addr, PAGE_SIZE);
} }
} }
__flush_tlb_all();
} }
static inline void flush_map(struct list_head *l) static inline void flush_map(struct list_head *l)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment