Commit eb5b5f02 authored by Thomas Gleixner's avatar Thomas Gleixner

x86: cpa, use page pool

Switch the split page code to use the page pool. We do this
unconditionally to avoid different behaviour with and without
DEBUG_PAGEALLOC enabled.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 76ebd054
...@@ -411,20 +411,29 @@ void __init cpa_init(void) ...@@ -411,20 +411,29 @@ void __init cpa_init(void)
static int split_large_page(pte_t *kpte, unsigned long address) static int split_large_page(pte_t *kpte, unsigned long address)
{ {
unsigned long flags, pfn, pfninc = 1; unsigned long flags, pfn, pfninc = 1;
gfp_t gfp_flags = GFP_KERNEL;
unsigned int i, level; unsigned int i, level;
pte_t *pbase, *tmp; pte_t *pbase, *tmp;
pgprot_t ref_prot; pgprot_t ref_prot;
struct page *base; struct page *base;
#ifdef CONFIG_DEBUG_PAGEALLOC /*
gfp_flags = GFP_ATOMIC | __GFP_NOWARN; * Get a page from the pool. The pool list is protected by the
#endif * pgd_lock, which we have to take anyway for the split
base = alloc_pages(gfp_flags, 0); * operation:
if (!base) */
spin_lock_irqsave(&pgd_lock, flags);
if (list_empty(&page_pool)) {
spin_unlock_irqrestore(&pgd_lock, flags);
return -ENOMEM; return -ENOMEM;
}
base = list_first_entry(&page_pool, struct page, lru);
list_del(&base->lru);
pool_pages--;
if (pool_pages < pool_low)
pool_low = pool_pages;
spin_lock_irqsave(&pgd_lock, flags);
/* /*
* Check for races, another CPU might have split this page * Check for races, another CPU might have split this page
* up for us already: * up for us already:
...@@ -469,11 +478,17 @@ static int split_large_page(pte_t *kpte, unsigned long address) ...@@ -469,11 +478,17 @@ static int split_large_page(pte_t *kpte, unsigned long address)
base = NULL; base = NULL;
out_unlock: out_unlock:
/*
* If we dropped out via the lookup_address check under
* pgd_lock then stick the page back into the pool:
*/
if (base) {
list_add(&base->lru, &page_pool);
pool_pages++;
} else
pool_used++;
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&pgd_lock, flags);
if (base)
__free_pages(base, 0);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment