Commit d7c8f21a authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: cpa: move flush to cpa

The set_memory_* and set_pages_* family of API's currently requires the
callers to do a global tlb flush after the function call; forgetting this is
a very nasty deathtrap. This patch moves the global tlb flush into
each of the callers
Signed-off-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent d1028a15
...@@ -572,7 +572,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -572,7 +572,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
panic("Cannot allocate GATT table"); panic("Cannot allocate GATT table");
if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
panic("Could not set GART PTEs to uncacheable pages"); panic("Could not set GART PTEs to uncacheable pages");
global_flush_tlb();
memset(gatt, 0, gatt_size); memset(gatt, 0, gatt_size);
agp_gatt_table = gatt; agp_gatt_table = gatt;
......
...@@ -752,15 +752,11 @@ void mark_rodata_ro(void) ...@@ -752,15 +752,11 @@ void mark_rodata_ro(void)
printk("Write protecting the kernel text: %luk\n", size >> 10); printk("Write protecting the kernel text: %luk\n", size >> 10);
#ifdef CONFIG_CPA_DEBUG #ifdef CONFIG_CPA_DEBUG
global_flush_tlb();
printk("Testing CPA: Reverting %lx-%lx\n", start, start+size); printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
global_flush_tlb();
printk("Testing CPA: write protecting again\n"); printk("Testing CPA: write protecting again\n");
set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
global_flush_tlb();
#endif #endif
} }
#endif #endif
...@@ -770,22 +766,12 @@ void mark_rodata_ro(void) ...@@ -770,22 +766,12 @@ void mark_rodata_ro(void)
printk("Write protecting the kernel read-only data: %luk\n", printk("Write protecting the kernel read-only data: %luk\n",
size >> 10); size >> 10);
/*
* set_pages_*() requires a global_flush_tlb() call after it.
* We do this after the printk so that if something went wrong in the
* change, the printk gets out at least to give a better debug hint
* of who is the culprit.
*/
global_flush_tlb();
#ifdef CONFIG_CPA_DEBUG #ifdef CONFIG_CPA_DEBUG
printk("Testing CPA: undo %lx-%lx\n", start, start + size); printk("Testing CPA: undo %lx-%lx\n", start, start + size);
set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
global_flush_tlb();
printk("Testing CPA: write protecting again\n"); printk("Testing CPA: write protecting again\n");
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
global_flush_tlb();
#endif #endif
} }
#endif #endif
......
...@@ -610,22 +610,12 @@ void mark_rodata_ro(void) ...@@ -610,22 +610,12 @@ void mark_rodata_ro(void)
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10); (end - start) >> 10);
/*
* set_memory_*() requires a global_flush_tlb() call after it.
* We do this after the printk so that if something went wrong in the
* change, the printk gets out at least to give a better debug hint
* of who is the culprit.
*/
global_flush_tlb();
#ifdef CONFIG_CPA_DEBUG #ifdef CONFIG_CPA_DEBUG
printk("Testing CPA: undo %lx-%lx\n", start, end); printk("Testing CPA: undo %lx-%lx\n", start, end);
set_memory_rw(start, (end-start) >> PAGE_SHIFT); set_memory_rw(start, (end-start) >> PAGE_SHIFT);
global_flush_tlb();
printk("Testing CPA: again\n"); printk("Testing CPA: again\n");
set_memory_ro(start, (end-start) >> PAGE_SHIFT); set_memory_ro(start, (end-start) >> PAGE_SHIFT);
global_flush_tlb();
#endif #endif
} }
#endif #endif
......
...@@ -96,8 +96,6 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size, ...@@ -96,8 +96,6 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size,
err = set_memory_wb(vaddr, nrpages); err = set_memory_wb(vaddr, nrpages);
break; break;
} }
if (!err)
global_flush_tlb();
return err; return err;
} }
......
...@@ -22,6 +22,36 @@ within(unsigned long addr, unsigned long start, unsigned long end) ...@@ -22,6 +22,36 @@ within(unsigned long addr, unsigned long start, unsigned long end)
return addr >= start && addr < end; return addr >= start && addr < end;
} }
/*
* Flushing functions
*/
void clflush_cache_range(void *addr, int size)
{
int i;
for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
clflush(addr+i);
}
static void flush_kernel_map(void *arg)
{
/*
* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
__flush_tlb_all();
if (boot_cpu_data.x86_model >= 4)
wbinvd();
}
static void global_flush_tlb(void)
{
BUG_ON(irqs_disabled());
on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
/* /*
* Certain areas of memory on x86 require very specific protection flags, * Certain areas of memory on x86 require very specific protection flags,
* for example the BIOS area or kernel text. Callers don't always get this * for example the BIOS area or kernel text. Callers don't always get this
...@@ -328,149 +358,124 @@ static int change_page_attr_clear(unsigned long addr, int numpages, ...@@ -328,149 +358,124 @@ static int change_page_attr_clear(unsigned long addr, int numpages,
int set_memory_uc(unsigned long addr, int numpages) int set_memory_uc(unsigned long addr, int numpages)
{ {
pgprot_t uncached; int err;
pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; err = change_page_attr_set(addr, numpages,
return change_page_attr_set(addr, numpages, uncached); __pgprot(_PAGE_PCD | _PAGE_PWT));
global_flush_tlb();
return err;
} }
EXPORT_SYMBOL(set_memory_uc); EXPORT_SYMBOL(set_memory_uc);
int set_memory_wb(unsigned long addr, int numpages) int set_memory_wb(unsigned long addr, int numpages)
{ {
pgprot_t uncached; int err;
pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; err = change_page_attr_clear(addr, numpages,
return change_page_attr_clear(addr, numpages, uncached); __pgprot(_PAGE_PCD | _PAGE_PWT));
global_flush_tlb();
return err;
} }
EXPORT_SYMBOL(set_memory_wb); EXPORT_SYMBOL(set_memory_wb);
int set_memory_x(unsigned long addr, int numpages) int set_memory_x(unsigned long addr, int numpages)
{ {
pgprot_t nx; int err;
pgprot_val(nx) = _PAGE_NX; err = change_page_attr_clear(addr, numpages,
return change_page_attr_clear(addr, numpages, nx); __pgprot(_PAGE_NX));
global_flush_tlb();
return err;
} }
EXPORT_SYMBOL(set_memory_x); EXPORT_SYMBOL(set_memory_x);
int set_memory_nx(unsigned long addr, int numpages) int set_memory_nx(unsigned long addr, int numpages)
{ {
pgprot_t nx; int err;
pgprot_val(nx) = _PAGE_NX; err = change_page_attr_set(addr, numpages,
return change_page_attr_set(addr, numpages, nx); __pgprot(_PAGE_NX));
global_flush_tlb();
return err;
} }
EXPORT_SYMBOL(set_memory_nx); EXPORT_SYMBOL(set_memory_nx);
int set_memory_ro(unsigned long addr, int numpages) int set_memory_ro(unsigned long addr, int numpages)
{ {
pgprot_t rw; int err;
pgprot_val(rw) = _PAGE_RW; err = change_page_attr_clear(addr, numpages,
return change_page_attr_clear(addr, numpages, rw); __pgprot(_PAGE_RW));
global_flush_tlb();
return err;
} }
int set_memory_rw(unsigned long addr, int numpages) int set_memory_rw(unsigned long addr, int numpages)
{ {
pgprot_t rw; int err;
pgprot_val(rw) = _PAGE_RW; err = change_page_attr_set(addr, numpages,
return change_page_attr_set(addr, numpages, rw); __pgprot(_PAGE_RW));
global_flush_tlb();
return err;
} }
int set_memory_np(unsigned long addr, int numpages) int set_memory_np(unsigned long addr, int numpages)
{ {
pgprot_t present; int err;
pgprot_val(present) = _PAGE_PRESENT; err = change_page_attr_clear(addr, numpages,
return change_page_attr_clear(addr, numpages, present); __pgprot(_PAGE_PRESENT));
global_flush_tlb();
return err;
} }
int set_pages_uc(struct page *page, int numpages) int set_pages_uc(struct page *page, int numpages)
{ {
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
pgprot_t uncached;
pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; return set_memory_uc(addr, numpages);
return change_page_attr_set(addr, numpages, uncached);
} }
EXPORT_SYMBOL(set_pages_uc); EXPORT_SYMBOL(set_pages_uc);
int set_pages_wb(struct page *page, int numpages) int set_pages_wb(struct page *page, int numpages)
{ {
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
pgprot_t uncached;
pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; return set_memory_wb(addr, numpages);
return change_page_attr_clear(addr, numpages, uncached);
} }
EXPORT_SYMBOL(set_pages_wb); EXPORT_SYMBOL(set_pages_wb);
int set_pages_x(struct page *page, int numpages) int set_pages_x(struct page *page, int numpages)
{ {
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
pgprot_t nx;
pgprot_val(nx) = _PAGE_NX; return set_memory_x(addr, numpages);
return change_page_attr_clear(addr, numpages, nx);
} }
EXPORT_SYMBOL(set_pages_x); EXPORT_SYMBOL(set_pages_x);
int set_pages_nx(struct page *page, int numpages) int set_pages_nx(struct page *page, int numpages)
{ {
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
pgprot_t nx;
pgprot_val(nx) = _PAGE_NX; return set_memory_nx(addr, numpages);
return change_page_attr_set(addr, numpages, nx);
} }
EXPORT_SYMBOL(set_pages_nx); EXPORT_SYMBOL(set_pages_nx);
int set_pages_ro(struct page *page, int numpages) int set_pages_ro(struct page *page, int numpages)
{ {
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
pgprot_t rw;
pgprot_val(rw) = _PAGE_RW; return set_memory_ro(addr, numpages);
return change_page_attr_clear(addr, numpages, rw);
} }
int set_pages_rw(struct page *page, int numpages) int set_pages_rw(struct page *page, int numpages)
{ {
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
pgprot_t rw;
pgprot_val(rw) = _PAGE_RW;
return change_page_attr_set(addr, numpages, rw);
}
void clflush_cache_range(void *addr, int size)
{
int i;
for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
clflush(addr+i);
}
static void flush_kernel_map(void *arg) return set_memory_rw(addr, numpages);
{
/*
* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
__flush_tlb_all();
if (boot_cpu_data.x86_model >= 4)
wbinvd();
} }
void global_flush_tlb(void)
{
BUG_ON(irqs_disabled());
on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
EXPORT_SYMBOL(global_flush_tlb);
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
......
...@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge) ...@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
void *addr = agp_generic_alloc_page(agp_bridge); void *addr = agp_generic_alloc_page(agp_bridge);
u32 temp; u32 temp;
global_flush_tlb();
if (!addr) if (!addr)
return NULL; return NULL;
...@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags) ...@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags)
if (flags & AGP_PAGE_DESTROY_UNMAP) { if (flags & AGP_PAGE_DESTROY_UNMAP) {
global_cache_flush(); /* is this really needed? --hch */ global_cache_flush(); /* is this really needed? --hch */
agp_generic_destroy_page(addr, flags); agp_generic_destroy_page(addr, flags);
global_flush_tlb();
} else } else
agp_generic_destroy_page(addr, flags); agp_generic_destroy_page(addr, flags);
} }
......
...@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge) ...@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
page = agp_generic_alloc_page(agp_bridge); page = agp_generic_alloc_page(agp_bridge);
global_flush_tlb();
} else } else
/* Returning NULL would cause problems */ /* Returning NULL would cause problems */
/* AK: really dubious code. */ /* AK: really dubious code. */
...@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags) ...@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags)
{ {
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
agp_generic_destroy_page(page, flags); agp_generic_destroy_page(page, flags);
global_flush_tlb();
} }
} }
......
...@@ -212,11 +212,9 @@ static void *i8xx_alloc_pages(void) ...@@ -212,11 +212,9 @@ static void *i8xx_alloc_pages(void)
if (set_pages_uc(page, 4) < 0) { if (set_pages_uc(page, 4) < 0) {
set_pages_wb(page, 4); set_pages_wb(page, 4);
global_flush_tlb();
__free_pages(page, 2); __free_pages(page, 2);
return NULL; return NULL;
} }
global_flush_tlb();
get_page(page); get_page(page);
atomic_inc(&agp_bridge->current_memory_agp); atomic_inc(&agp_bridge->current_memory_agp);
return page_address(page); return page_address(page);
...@@ -231,7 +229,6 @@ static void i8xx_destroy_pages(void *addr) ...@@ -231,7 +229,6 @@ static void i8xx_destroy_pages(void *addr)
page = virt_to_page(addr); page = virt_to_page(addr);
set_pages_wb(page, 4); set_pages_wb(page, 4);
global_flush_tlb();
put_page(page); put_page(page);
__free_pages(page, 2); __free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp); atomic_dec(&agp_bridge->current_memory_agp);
...@@ -341,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) ...@@ -341,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
switch (pg_count) { switch (pg_count) {
case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
global_flush_tlb();
break; break;
case 4: case 4:
/* kludge to get 4 physical pages for ARGB cursor */ /* kludge to get 4 physical pages for ARGB cursor */
...@@ -404,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr) ...@@ -404,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
else { else {
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
AGP_PAGE_DESTROY_UNMAP); AGP_PAGE_DESTROY_UNMAP);
global_flush_tlb();
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
AGP_PAGE_DESTROY_FREE); AGP_PAGE_DESTROY_FREE);
} }
......
...@@ -124,13 +124,8 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order, ...@@ -124,13 +124,8 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
/* /*
* Change caching policy of the linear kernel map to avoid * Change caching policy of the linear kernel map to avoid
* mapping type conflicts with user-space mappings. * mapping type conflicts with user-space mappings.
* The first global_flush_tlb() is really only there to do a global
* wbinvd().
*/ */
global_flush_tlb();
set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT); set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
global_flush_tlb();
printk(KERN_DEBUG MODULE_NAME printk(KERN_DEBUG MODULE_NAME
": Allocated %ld bytes vram area at 0x%08lx\n", ": Allocated %ld bytes vram area at 0x%08lx\n",
...@@ -156,7 +151,6 @@ static void vmlfb_free_vram_area(struct vram_area *va) ...@@ -156,7 +151,6 @@ static void vmlfb_free_vram_area(struct vram_area *va)
set_pages_wb(virt_to_page(va->logical), set_pages_wb(virt_to_page(va->logical),
va->size >> PAGE_SHIFT); va->size >> PAGE_SHIFT);
global_flush_tlb();
/* /*
* Decrease the usage count on the pages we've used * Decrease the usage count on the pages we've used
......
...@@ -12,13 +12,9 @@ ...@@ -12,13 +12,9 @@
* page. This avoids data corruption on some CPUs. * page. This avoids data corruption on some CPUs.
*/ */
/*
* Caller's responsibility to call global_flush_tlb() for performance
* reasons
*/
#define map_page_into_agp(page) set_pages_uc(page, 1) #define map_page_into_agp(page) set_pages_uc(page, 1)
#define unmap_page_from_agp(page) set_pages_wb(page, 1) #define unmap_page_from_agp(page) set_pages_wb(page, 1)
#define flush_agp_mappings() global_flush_tlb() #define flush_agp_mappings() do { } while (0)
/* /*
* Could use CLFLUSH here if the cpu supports it. But then it would * Could use CLFLUSH here if the cpu supports it. But then it would
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) memcpy(dst, src, len)
void global_flush_tlb(void);
int __deprecated_for_modules change_page_attr(struct page *page, int numpages, int __deprecated_for_modules change_page_attr(struct page *page, int numpages,
pgprot_t prot); pgprot_t prot);
......
...@@ -715,7 +715,6 @@ static void fill_nocache(void *buf, int size, int nocache) ...@@ -715,7 +715,6 @@ static void fill_nocache(void *buf, int size, int nocache)
set_pages_uc(virt_to_page(buf), size); set_pages_uc(virt_to_page(buf), size);
else else
set_pages_wb(virt_to_page(buf), size); set_pages_wb(virt_to_page(buf), size);
global_flush_tlb();
} }
#else #else
#define fill_nocache(buf, size, nocache) do { ; } while (0) #define fill_nocache(buf, size, nocache) do { ; } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment