Commit 3c936465 authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[SPARC64]: Kill pgtable quicklists and use SLAB.

Taking a nod from the powerpc port.

With the per-cpu caching of both the page allocator and SLAB, the
pgtable quicklist scheme becomes relatively silly and primitive.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 05e28f9d
......@@ -241,10 +241,6 @@ EXPORT_SYMBOL(verify_compat_iovec);
#endif
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(pte_alloc_one_kernel);
#ifndef CONFIG_SMP
EXPORT_SYMBOL(pgt_quicklists);
#endif
EXPORT_SYMBOL(put_fs_struct);
/* math-emu wants this */
......
......@@ -141,26 +141,25 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int bigkernel = 0;
/* XXX Tune this... */
#define PGT_CACHE_LOW 25
#define PGT_CACHE_HIGH 50
kmem_cache_t *pgtable_cache __read_mostly;
#ifndef CONFIG_SMP
struct pgtable_cache_struct pgt_quicklists;
#endif
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
{
clear_page(addr);
}
void check_pgt_cache(void)
void pgtable_cache_init(void)
{
preempt_disable();
if (pgtable_cache_size > PGT_CACHE_HIGH) {
do {
if (pgd_quicklist)
free_pgd_slow(get_pgd_fast());
if (pte_quicklist)
free_pte_slow(pte_alloc_one_fast());
} while (pgtable_cache_size > PGT_CACHE_LOW);
pgtable_cache = kmem_cache_create("pgtable_cache",
PAGE_SIZE, PAGE_SIZE,
SLAB_HWCACHE_ALIGN |
SLAB_MUST_HWCACHE_ALIGN,
zero_ctor,
NULL);
if (!pgtable_cache) {
prom_printf("pgtable_cache_init(): Could not create!\n");
prom_halt();
}
preempt_enable();
}
#ifdef CONFIG_DEBUG_DCFLUSH
......@@ -340,7 +339,6 @@ void show_mem(void)
nr_swap_pages << (PAGE_SHIFT-10));
printk("%ld pages of RAM\n", num_physpages);
printk("%d free pages\n", nr_free_pages());
printk("%d pages in page table cache\n",pgtable_cache_size);
}
void mmu_info(struct seq_file *m)
......
......@@ -17,14 +17,7 @@ typedef struct {
unsigned long clock_tick; /* %tick's per second */
unsigned long udelay_val;
/* Dcache line 2 */
unsigned int pgcache_size;
unsigned int __pad1;
unsigned long *pte_cache;
unsigned long *pgd_cache;
unsigned long __pad2;
/* Dcache line 3, rarely used */
/* Dcache line 2, rarely used */
unsigned int dcache_size;
unsigned int dcache_line_size;
unsigned int icache_size;
......
......@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/spitfire.h>
#include <asm/cpudata.h>
......@@ -13,164 +14,59 @@
#include <asm/page.h>
/* Page table allocation/freeing. */
#ifdef CONFIG_SMP
/* Sliiiicck */
#define pgt_quicklists local_cpu_data()
#else
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned int pgcache_size;
} pgt_quicklists;
#endif
#define pgd_quicklist (pgt_quicklists.pgd_cache)
#define pte_quicklist (pgt_quicklists.pte_cache)
#define pgtable_cache_size (pgt_quicklists.pgcache_size)
extern kmem_cache_t *pgtable_cache;
static inline void free_pgd_fast(pgd_t *pgd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
preempt_enable();
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
}
static inline pgd_t *get_pgd_fast(void)
static inline void pgd_free(pgd_t *pgd)
{
unsigned long *ret;
preempt_disable();
if((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
preempt_enable();
} else {
preempt_enable();
ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
if(ret)
memset(ret, 0, PAGE_SIZE);
}
return (pgd_t *)ret;
}
static inline void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
kmem_cache_free(pgtable_cache, pgd);
}
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
static inline pmd_t *pmd_alloc_one_fast(void)
{
unsigned long *ret;
preempt_disable();
ret = (unsigned long *) pte_quicklist;
if (likely(ret)) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pmd_t *) ret;
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
pmd = pmd_alloc_one_fast();
if (unlikely(!pmd)) {
pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
}
return pmd;
}
static inline void free_pmd_fast(pmd_t *pmd)
{
preempt_disable();
*(unsigned long *)pmd = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pmd;
pgtable_cache_size++;
preempt_enable();
}
static inline void free_pmd_slow(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
static inline pte_t *pte_alloc_one_fast(void)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
unsigned long *ret;
preempt_disable();
ret = (unsigned long *) pte_quicklist;
if (likely(ret)) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pte_t *) ret;
return kmem_cache_alloc(pgtable_cache,
GFP_KERNEL|__GFP_REPEAT);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(pmd_t *pmd)
{
pte_t *ptep = pte_alloc_one_fast();
if (likely(ptep))
return ptep;
return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
kmem_cache_free(pgtable_cache, pmd);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte = pte_alloc_one_fast();
if (likely(pte))
return virt_to_page(pte);
return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
return kmem_cache_alloc(pgtable_cache,
GFP_KERNEL|__GFP_REPEAT);
}
static inline void free_pte_fast(pte_t *pte)
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
preempt_disable();
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
preempt_enable();
return virt_to_page(pte_alloc_one_kernel(mm, address));
}
static inline void free_pte_slow(pte_t *pte)
{
free_page((unsigned long) pte);
}
static inline void pte_free_kernel(pte_t *pte)
{
free_pte_fast(pte);
kmem_cache_free(pgtable_cache, pte);
}
static inline void pte_free(struct page *ptepage)
{
free_pte_fast(page_address(ptepage));
pte_free_kernel(page_address(ptepage));
}
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
#define check_pgt_cache() do { } while (0)
#endif /* _SPARC64_PGALLOC_H */
......@@ -432,12 +432,7 @@ extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
unsigned long);
#define HAVE_ARCH_FB_UNMAPPED_AREA
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
extern void check_pgt_cache(void);
extern void pgtable_cache_init(void);
#endif /* !(__ASSEMBLY__) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment