Commit 86f03989 authored by Ingo Molnar's avatar Ingo Molnar

x86: cpa: fix the self-test

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent aba8391f
...@@ -40,7 +40,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386" ...@@ -40,7 +40,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
config DEBUG_PAGEALLOC config DEBUG_PAGEALLOC
bool "Debug page memory allocations" bool "Debug page memory allocations"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL && X86_32
help help
Unmap pages from the kernel linear mapping after free_pages(). Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types This results in a large slowdown, but helps to find certain types
......
...@@ -781,8 +781,6 @@ void mark_rodata_ro(void) ...@@ -781,8 +781,6 @@ void mark_rodata_ro(void)
void free_init_pages(char *what, unsigned long begin, unsigned long end) void free_init_pages(char *what, unsigned long begin, unsigned long end)
{ {
unsigned long addr;
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
/* /*
* If debugging page accesses then do not free this memory but * If debugging page accesses then do not free this memory but
...@@ -793,6 +791,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -793,6 +791,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
begin, PAGE_ALIGN(end)); begin, PAGE_ALIGN(end));
set_memory_np(begin, (end - begin) >> PAGE_SHIFT); set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else #else
unsigned long addr;
/* /*
* We just marked the kernel text read only above, now that * We just marked the kernel text read only above, now that
* we are going to free part of that, we need to make that * we are going to free part of that, we need to make that
......
...@@ -569,22 +569,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -569,22 +569,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
free_page(addr); free_page(addr);
totalram_pages++; totalram_pages++;
} }
#ifdef CONFIG_DEBUG_RODATA
/*
* This will make the __init pages not present and
* not executable, so that any attempt to use a
* __init function from now on will fault immediately
* rather than supriously later when memory gets reused.
*
* We only do this for DEBUG_RODATA to not break up the
* 2Mb kernel mapping just for this debug feature.
*/
if (begin >= __START_KERNEL_map) {
set_memory_rw(begin, (end - begin)/PAGE_SIZE);
set_memory_np(begin, (end - begin)/PAGE_SIZE);
set_memory_nx(begin, (end - begin)/PAGE_SIZE);
}
#endif
#endif #endif
} }
......
...@@ -15,8 +15,7 @@ ...@@ -15,8 +15,7 @@
#include <asm/kdebug.h> #include <asm/kdebug.h>
enum { enum {
NTEST = 400, NTEST = 4000,
LOWEST_LEVEL = PG_LEVEL_4K,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
LPS = (1 << PMD_SHIFT), LPS = (1 << PMD_SHIFT),
#elif defined(CONFIG_X86_PAE) #elif defined(CONFIG_X86_PAE)
...@@ -59,10 +58,10 @@ static __init int print_split(struct split_state *s) ...@@ -59,10 +58,10 @@ static __init int print_split(struct split_state *s)
continue; continue;
} }
if (level == 2 && sizeof(long) == 8) { if (level == PG_LEVEL_1G && sizeof(long) == 8) {
s->gpg++; s->gpg++;
i += GPS/PAGE_SIZE; i += GPS/PAGE_SIZE;
} else if (level != LOWEST_LEVEL) { } else if (level == PG_LEVEL_2M) {
if (!(pte_val(*pte) & _PAGE_PSE)) { if (!(pte_val(*pte) & _PAGE_PSE)) {
printk(KERN_ERR printk(KERN_ERR
"%lx level %d but not PSE %Lx\n", "%lx level %d but not PSE %Lx\n",
...@@ -162,7 +161,7 @@ static __init int exercise_pageattr(void) ...@@ -162,7 +161,7 @@ static __init int exercise_pageattr(void)
continue; continue;
} }
err = __change_page_attr_clear(addr[i], len[i], err = change_page_attr_clear(addr[i], len[i],
__pgprot(_PAGE_GLOBAL)); __pgprot(_PAGE_GLOBAL));
if (err < 0) { if (err < 0) {
printk(KERN_ERR "CPA %d failed %d\n", i, err); printk(KERN_ERR "CPA %d failed %d\n", i, err);
...@@ -175,7 +174,7 @@ static __init int exercise_pageattr(void) ...@@ -175,7 +174,7 @@ static __init int exercise_pageattr(void)
pte ? (u64)pte_val(*pte) : 0ULL); pte ? (u64)pte_val(*pte) : 0ULL);
failed++; failed++;
} }
if (level != LOWEST_LEVEL) { if (level != PG_LEVEL_4K) {
printk(KERN_ERR "CPA %lx: unexpected level %d\n", printk(KERN_ERR "CPA %lx: unexpected level %d\n",
addr[i], level); addr[i], level);
failed++; failed++;
...@@ -183,7 +182,6 @@ static __init int exercise_pageattr(void) ...@@ -183,7 +182,6 @@ static __init int exercise_pageattr(void)
} }
vfree(bm); vfree(bm);
cpa_flush_all();
failed += print_split(&sb); failed += print_split(&sb);
...@@ -197,7 +195,7 @@ static __init int exercise_pageattr(void) ...@@ -197,7 +195,7 @@ static __init int exercise_pageattr(void)
failed++; failed++;
continue; continue;
} }
err = __change_page_attr_set(addr[i], len[i], err = change_page_attr_set(addr[i], len[i],
__pgprot(_PAGE_GLOBAL)); __pgprot(_PAGE_GLOBAL));
if (err < 0) { if (err < 0) {
printk(KERN_ERR "CPA reverting failed: %d\n", err); printk(KERN_ERR "CPA reverting failed: %d\n", err);
...@@ -211,7 +209,6 @@ static __init int exercise_pageattr(void) ...@@ -211,7 +209,6 @@ static __init int exercise_pageattr(void)
} }
} }
cpa_flush_all();
failed += print_split(&sc); failed += print_split(&sc);
......
...@@ -197,10 +197,11 @@ static int split_large_page(pte_t *kpte, unsigned long address) ...@@ -197,10 +197,11 @@ static int split_large_page(pte_t *kpte, unsigned long address)
unsigned long addr; unsigned long addr;
pte_t *pbase, *tmp; pte_t *pbase, *tmp;
struct page *base; struct page *base;
int i, level; unsigned int i, level;
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
gfp_flags = GFP_ATOMIC; gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
#endif #endif
base = alloc_pages(gfp_flags, 0); base = alloc_pages(gfp_flags, 0);
if (!base) if (!base)
...@@ -224,6 +225,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) ...@@ -224,6 +225,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
paravirt_alloc_pt(&init_mm, page_to_pfn(base)); paravirt_alloc_pt(&init_mm, page_to_pfn(base));
#endif #endif
pgprot_val(ref_prot) &= ~_PAGE_NX;
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
...@@ -248,7 +250,8 @@ out_unlock: ...@@ -248,7 +250,8 @@ out_unlock:
} }
static int static int
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot) __change_page_attr(unsigned long address, unsigned long pfn,
pgprot_t mask_set, pgprot_t mask_clr)
{ {
struct page *kpte_page; struct page *kpte_page;
int level, err = 0; int level, err = 0;
...@@ -267,15 +270,20 @@ repeat: ...@@ -267,15 +270,20 @@ repeat:
BUG_ON(PageLRU(kpte_page)); BUG_ON(PageLRU(kpte_page));
BUG_ON(PageCompound(kpte_page)); BUG_ON(PageCompound(kpte_page));
prot = static_protections(prot, address);
if (level == PG_LEVEL_4K) { if (level == PG_LEVEL_4K) {
WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE); pgprot_t new_prot = pte_pgprot(*kpte);
set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot))); pte_t new_pte, old_pte = *kpte;
} else {
/* Clear the PSE bit for the 4k level pages ! */
pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
pgprot_val(new_prot) |= pgprot_val(mask_set);
new_prot = static_protections(new_prot, address);
new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
set_pte_atomic(kpte, new_pte);
} else {
err = split_large_page(kpte, address); err = split_large_page(kpte, address);
if (!err) if (!err)
goto repeat; goto repeat;
...@@ -297,22 +305,26 @@ repeat: ...@@ -297,22 +305,26 @@ repeat:
* Modules and drivers should use the set_memory_* APIs instead. * Modules and drivers should use the set_memory_* APIs instead.
*/ */
static int change_page_attr_addr(unsigned long address, pgprot_t prot) static int
change_page_attr_addr(unsigned long address, pgprot_t mask_set,
pgprot_t mask_clr)
{ {
int err = 0, kernel_map = 0; int err = 0, kernel_map = 0;
unsigned long pfn = __pa(address) >> PAGE_SHIFT; unsigned long pfn;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (address >= __START_KERNEL_map && if (address >= __START_KERNEL_map &&
address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
address = (unsigned long)__va(__pa(address)); address = (unsigned long)__va(__pa((void *)address));
kernel_map = 1; kernel_map = 1;
} }
#endif #endif
if (!kernel_map || pte_present(pfn_pte(0, prot))) { pfn = __pa(address) >> PAGE_SHIFT;
err = __change_page_attr(address, pfn, prot);
if (!kernel_map || 1) {
err = __change_page_attr(address, pfn, mask_set, mask_clr);
if (err) if (err)
return err; return err;
} }
...@@ -324,12 +336,15 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot) ...@@ -324,12 +336,15 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
*/ */
if (__pa(address) < KERNEL_TEXT_SIZE) { if (__pa(address) < KERNEL_TEXT_SIZE) {
unsigned long addr2; unsigned long addr2;
pgprot_t prot2;
addr2 = __START_KERNEL_map + __pa(address); addr2 = __pa(address) + __START_KERNEL_map - phys_base;
/* Make sure the kernel mappings stay executable */ /* Make sure the kernel mappings stay executable */
prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); pgprot_val(mask_clr) |= _PAGE_NX;
err = __change_page_attr(addr2, pfn, prot2); /*
* Our high aliases are imprecise, so do not propagate
* failures back to users:
*/
__change_page_attr(addr2, pfn, mask_set, mask_clr);
} }
#endif #endif
...@@ -339,26 +354,13 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot) ...@@ -339,26 +354,13 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
static int __change_page_attr_set_clr(unsigned long addr, int numpages, static int __change_page_attr_set_clr(unsigned long addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr) pgprot_t mask_set, pgprot_t mask_clr)
{ {
pgprot_t new_prot; unsigned int i;
int level; int ret;
pte_t *pte;
int i, ret;
for (i = 0; i < numpages ; i++) {
pte = lookup_address(addr, &level);
if (!pte)
return -EINVAL;
new_prot = pte_pgprot(*pte);
pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
pgprot_val(new_prot) |= pgprot_val(mask_set);
ret = change_page_attr_addr(addr, new_prot); for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
ret = change_page_attr_addr(addr, mask_set, mask_clr);
if (ret) if (ret)
return ret; return ret;
addr += PAGE_SIZE;
} }
return 0; return 0;
......
...@@ -240,6 +240,7 @@ enum { ...@@ -240,6 +240,7 @@ enum {
PG_LEVEL_NONE, PG_LEVEL_NONE,
PG_LEVEL_4K, PG_LEVEL_4K,
PG_LEVEL_2M, PG_LEVEL_2M,
PG_LEVEL_1G,
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment