Commit 2bff7383 authored by Christoph Lameter's avatar Christoph Lameter Committed by Andi Kleen

[PATCH] x86-64: use lru instead of page->index and page->private for pgd lists management.

x86_64 currently simulates a list using the index and private fields of the
page struct.  Seems that the code was inherited from i386.  But x86_64 does
not use the slab to allocate pgds and pmds etc.  So the lru field is not
used by the slab and therefore available.

This patch uses standard list operations on page->lru to realize pgd
tracking.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 05f36927
...@@ -585,7 +585,7 @@ do_sigbus: ...@@ -585,7 +585,7 @@ do_sigbus:
} }
DEFINE_SPINLOCK(pgd_lock); DEFINE_SPINLOCK(pgd_lock);
struct page *pgd_list; LIST_HEAD(pgd_list);
void vmalloc_sync_all(void) void vmalloc_sync_all(void)
{ {
...@@ -605,8 +605,7 @@ void vmalloc_sync_all(void) ...@@ -605,8 +605,7 @@ void vmalloc_sync_all(void)
if (pgd_none(*pgd_ref)) if (pgd_none(*pgd_ref))
continue; continue;
spin_lock(&pgd_lock); spin_lock(&pgd_lock);
for (page = pgd_list; page; list_for_each_entry(page, &pgd_list, lru) {
page = (struct page *)page->index) {
pgd_t *pgd; pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address); pgd = (pgd_t *)page_address(page) + pgd_index(address);
if (pgd_none(*pgd)) if (pgd_none(*pgd))
......
...@@ -44,24 +44,16 @@ static inline void pgd_list_add(pgd_t *pgd) ...@@ -44,24 +44,16 @@ static inline void pgd_list_add(pgd_t *pgd)
struct page *page = virt_to_page(pgd); struct page *page = virt_to_page(pgd);
spin_lock(&pgd_lock); spin_lock(&pgd_lock);
page->index = (pgoff_t)pgd_list; list_add(&page->lru, &pgd_list);
if (pgd_list)
pgd_list->private = (unsigned long)&page->index;
pgd_list = page;
page->private = (unsigned long)&pgd_list;
spin_unlock(&pgd_lock); spin_unlock(&pgd_lock);
} }
static inline void pgd_list_del(pgd_t *pgd) static inline void pgd_list_del(pgd_t *pgd)
{ {
struct page *next, **pprev, *page = virt_to_page(pgd); struct page *page = virt_to_page(pgd);
spin_lock(&pgd_lock); spin_lock(&pgd_lock);
next = (struct page *)page->index; list_del(&page->lru);
pprev = (struct page **)page->private;
*pprev = next;
if (next)
next->private = (unsigned long)pprev;
spin_unlock(&pgd_lock); spin_unlock(&pgd_lock);
} }
......
...@@ -410,7 +410,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -410,7 +410,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
extern spinlock_t pgd_lock; extern spinlock_t pgd_lock;
extern struct page *pgd_list; extern struct list_head pgd_list;
void vmalloc_sync_all(void); void vmalloc_sync_all(void);
extern int kern_addr_valid(unsigned long addr); extern int kern_addr_valid(unsigned long addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment