Commit 67350a5c authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

x86: simplify vmalloc_sync_all

vmalloc_sync_all() is only called from register_die_notifier and
alloc_vm_area.  Neither is on any performance-critical paths, so
vmalloc_sync_all() itself is not on any hot paths.

Given that the optimisations in vmalloc_sync_all add a fair amount of
code and complexity, and are fairly hard to evaluate for correctness,
it's better to just remove them to simplify the code rather than worry
about its absolute performance.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 15878c0b
...@@ -903,14 +903,7 @@ LIST_HEAD(pgd_list); ...@@ -903,14 +903,7 @@ LIST_HEAD(pgd_list);
void vmalloc_sync_all(void) void vmalloc_sync_all(void)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* unsigned long start = VMALLOC_START & PGDIR_MASK;
* Note that races in the updates of insync and start aren't
* problematic: insync can only get set bits added, and updates to
* start are only improving performance (without affecting correctness
* if undone).
*/
static DECLARE_BITMAP(insync, PTRS_PER_PGD);
static unsigned long start = TASK_SIZE;
unsigned long address; unsigned long address;
if (SHARED_KERNEL_PMD) if (SHARED_KERNEL_PMD)
...@@ -918,7 +911,6 @@ void vmalloc_sync_all(void) ...@@ -918,7 +911,6 @@ void vmalloc_sync_all(void)
BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
if (!test_bit(pgd_index(address), insync)) {
unsigned long flags; unsigned long flags;
struct page *page; struct page *page;
...@@ -929,25 +921,12 @@ void vmalloc_sync_all(void) ...@@ -929,25 +921,12 @@ void vmalloc_sync_all(void)
break; break;
} }
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&pgd_lock, flags);
if (!page)
set_bit(pgd_index(address), insync);
}
if (address == start && test_bit(pgd_index(address), insync))
start = address + PGDIR_SIZE;
} }
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
/* unsigned long start = VMALLOC_START & PGDIR_MASK;
* Note that races in the updates of insync and start aren't
* problematic: insync can only get set bits added, and updates to
* start are only improving performance (without affecting correctness
* if undone).
*/
static DECLARE_BITMAP(insync, PTRS_PER_PGD);
static unsigned long start = VMALLOC_START & PGDIR_MASK;
unsigned long address; unsigned long address;
for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
if (!test_bit(pgd_index(address), insync)) {
const pgd_t *pgd_ref = pgd_offset_k(address); const pgd_t *pgd_ref = pgd_offset_k(address);
unsigned long flags; unsigned long flags;
struct page *page; struct page *page;
...@@ -964,10 +943,6 @@ void vmalloc_sync_all(void) ...@@ -964,10 +943,6 @@ void vmalloc_sync_all(void)
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
} }
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&pgd_lock, flags);
set_bit(pgd_index(address), insync);
}
if (address == start)
start = address + PGDIR_SIZE;
} }
#endif #endif
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment