Commit d01ad8dd authored by Jan Beulich's avatar Jan Beulich Committed by Andi Kleen

[PATCH] x86: Improve handling of kernel mappings in change_page_attr

Fix various broken corner cases in i386 and x86-64 change_page_attr.

AK: split off from tighten kernel image access rights
Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
parent 90a0a06a
...@@ -142,7 +142,7 @@ __change_page_attr(struct page *page, pgprot_t prot) ...@@ -142,7 +142,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL; return -EINVAL;
kpte_page = virt_to_page(kpte); kpte_page = virt_to_page(kpte);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
if ((pte_val(*kpte) & _PAGE_PSE) == 0) { if (!pte_huge(*kpte)) {
set_pte_atomic(kpte, mk_pte(page, prot)); set_pte_atomic(kpte, mk_pte(page, prot));
} else { } else {
pgprot_t ref_prot; pgprot_t ref_prot;
...@@ -158,7 +158,7 @@ __change_page_attr(struct page *page, pgprot_t prot) ...@@ -158,7 +158,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
kpte_page = split; kpte_page = split;
} }
page_private(kpte_page)++; page_private(kpte_page)++;
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { } else if (!pte_huge(*kpte)) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL)); set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
BUG_ON(page_private(kpte_page) == 0); BUG_ON(page_private(kpte_page) == 0);
page_private(kpte_page)--; page_private(kpte_page)--;
......
...@@ -179,16 +179,24 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, ...@@ -179,16 +179,24 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
{ {
unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT; unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT;
int err = 0; int err = 0, kernel_map = 0;
int i; int i;
if (address >= __START_KERNEL_map
&& address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
address = (unsigned long)__va(__pa(address));
kernel_map = 1;
}
down_write(&init_mm.mmap_sem); down_write(&init_mm.mmap_sem);
for (i = 0; i < numpages; i++, address += PAGE_SIZE) { for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
unsigned long pfn = __pa(address) >> PAGE_SHIFT; unsigned long pfn = __pa(address) >> PAGE_SHIFT;
if (!kernel_map || pte_present(pfn_pte(0, prot))) {
err = __change_page_attr(address, pfn, prot, PAGE_KERNEL); err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
if (err) if (err)
break; break;
}
/* Handle kernel mapping too which aliases part of the /* Handle kernel mapping too which aliases part of the
* lowmem */ * lowmem */
if ((pfn >= phys_base_pfn) && if ((pfn >= phys_base_pfn) &&
......
...@@ -159,6 +159,7 @@ void paging_init(void); ...@@ -159,6 +159,7 @@ void paging_init(void);
extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
...@@ -166,6 +167,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; ...@@ -166,6 +167,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment