Commit 9a3dc780 authored by Ingo Molnar's avatar Ingo Molnar

x86: cpa: simplify locking

further simplify cpa locking: since the largepage-split is a
slowpath, use the pgd_lock for the whole operation, intead
of the mmap_sem.

This also makes it suitable for DEBUG_PAGEALLOC purposes again.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 7afe15b9
...@@ -37,9 +37,8 @@ pte_t *lookup_address(unsigned long address, int *level) ...@@ -37,9 +37,8 @@ pte_t *lookup_address(unsigned long address, int *level)
return pte_offset_kernel(pmd, address); return pte_offset_kernel(pmd, address);
} }
static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{ {
unsigned long flags;
struct page *page; struct page *page;
/* change init_mm */ /* change init_mm */
...@@ -47,7 +46,6 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) ...@@ -47,7 +46,6 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
if (SHARED_KERNEL_PMD) if (SHARED_KERNEL_PMD)
return; return;
spin_lock_irqsave(&pgd_lock, flags);
for (page = pgd_list; page; page = (struct page *)page->index) { for (page = pgd_list; page; page = (struct page *)page->index) {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
...@@ -58,12 +56,12 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) ...@@ -58,12 +56,12 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
set_pte_atomic((pte_t *)pmd, pte); set_pte_atomic((pte_t *)pmd, pte);
} }
spin_unlock_irqrestore(&pgd_lock, flags);
} }
static int split_large_page(pte_t *kpte, unsigned long address) static int split_large_page(pte_t *kpte, unsigned long address)
{ {
pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
unsigned long flags;
unsigned long addr; unsigned long addr;
pte_t *pbase, *tmp; pte_t *pbase, *tmp;
struct page *base; struct page *base;
...@@ -73,7 +71,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) ...@@ -73,7 +71,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
if (!base) if (!base)
return -ENOMEM; return -ENOMEM;
down_write(&init_mm.mmap_sem); spin_lock_irqsave(&pgd_lock, flags);
/* /*
* Check for races, another CPU might have split this page * Check for races, another CPU might have split this page
* up for us already: * up for us already:
...@@ -95,11 +93,11 @@ static int split_large_page(pte_t *kpte, unsigned long address) ...@@ -95,11 +93,11 @@ static int split_large_page(pte_t *kpte, unsigned long address)
/* /*
* Install the new, split up pagetable: * Install the new, split up pagetable:
*/ */
set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
base = NULL; base = NULL;
out_unlock: out_unlock:
up_write(&init_mm.mmap_sem); spin_unlock_irqrestore(&pgd_lock, flags);
if (base) if (base)
__free_pages(base, 0); __free_pages(base, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment