Commit 05e28f9d authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[SPARC64]: No need to D-cache color page tables any longer.

Unlike the virtual page tables, the new TSB scheme does not
require this ugly hack.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 74bf4312
...@@ -145,6 +145,10 @@ int bigkernel = 0; ...@@ -145,6 +145,10 @@ int bigkernel = 0;
#define PGT_CACHE_LOW 25 #define PGT_CACHE_LOW 25
#define PGT_CACHE_HIGH 50 #define PGT_CACHE_HIGH 50
#ifndef CONFIG_SMP
struct pgtable_cache_struct pgt_quicklists;
#endif
void check_pgt_cache(void) void check_pgt_cache(void)
{ {
preempt_disable(); preempt_disable();
...@@ -152,10 +156,8 @@ void check_pgt_cache(void) ...@@ -152,10 +156,8 @@ void check_pgt_cache(void)
do { do {
if (pgd_quicklist) if (pgd_quicklist)
free_pgd_slow(get_pgd_fast()); free_pgd_slow(get_pgd_fast());
if (pte_quicklist[0]) if (pte_quicklist)
free_pte_slow(pte_alloc_one_fast(NULL, 0)); free_pte_slow(pte_alloc_one_fast());
if (pte_quicklist[1])
free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
} while (pgtable_cache_size > PGT_CACHE_LOW); } while (pgtable_cache_size > PGT_CACHE_LOW);
} }
preempt_enable(); preempt_enable();
...@@ -962,67 +964,6 @@ out: ...@@ -962,67 +964,6 @@ out:
spin_unlock(&ctx_alloc_lock); spin_unlock(&ctx_alloc_lock);
} }
#ifndef CONFIG_SMP
struct pgtable_cache_struct pgt_quicklists;
#endif
/* XXX We don't need to color these things in the D-cache any longer. */
#ifdef DCACHE_ALIASING_POSSIBLE
#define DC_ALIAS_SHIFT 1
#else
#define DC_ALIAS_SHIFT 0
#endif
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
struct page *page;
unsigned long color;
{
pte_t *ptep = pte_alloc_one_fast(mm, address);
if (ptep)
return ptep;
}
color = VPTE_COLOR(address);
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
if (page) {
unsigned long *to_free;
unsigned long paddr;
pte_t *pte;
#ifdef DCACHE_ALIASING_POSSIBLE
set_page_count(page, 1);
ClearPageCompound(page);
set_page_count((page + 1), 1);
ClearPageCompound(page + 1);
#endif
paddr = (unsigned long) page_address(page);
memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
if (!color) {
pte = (pte_t *) paddr;
to_free = (unsigned long *) (paddr + PAGE_SIZE);
} else {
pte = (pte_t *) (paddr + PAGE_SIZE);
to_free = (unsigned long *) paddr;
}
#ifdef DCACHE_ALIASING_POSSIBLE
/* Now free the other one up, adjust cache size. */
preempt_disable();
*to_free = (unsigned long) pte_quicklist[color ^ 0x1];
pte_quicklist[color ^ 0x1] = to_free;
pgtable_cache_size++;
preempt_enable();
#endif
return pte;
}
return NULL;
}
void sparc_ultra_dump_itlb(void) void sparc_ultra_dump_itlb(void)
{ {
int slot; int slot;
......
...@@ -20,8 +20,9 @@ typedef struct { ...@@ -20,8 +20,9 @@ typedef struct {
/* Dcache line 2 */ /* Dcache line 2 */
unsigned int pgcache_size; unsigned int pgcache_size;
unsigned int __pad1; unsigned int __pad1;
unsigned long *pte_cache[2]; unsigned long *pte_cache;
unsigned long *pgd_cache; unsigned long *pgd_cache;
unsigned long __pad2;
/* Dcache line 3, rarely used */ /* Dcache line 3, rarely used */
unsigned int dcache_size; unsigned int dcache_size;
...@@ -30,8 +31,8 @@ typedef struct { ...@@ -30,8 +31,8 @@ typedef struct {
unsigned int icache_line_size; unsigned int icache_line_size;
unsigned int ecache_size; unsigned int ecache_size;
unsigned int ecache_line_size; unsigned int ecache_line_size;
unsigned int __pad2;
unsigned int __pad3; unsigned int __pad3;
unsigned int __pad4;
} cpuinfo_sparc; } cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
......
...@@ -19,16 +19,15 @@ ...@@ -19,16 +19,15 @@
#else #else
extern struct pgtable_cache_struct { extern struct pgtable_cache_struct {
unsigned long *pgd_cache; unsigned long *pgd_cache;
unsigned long *pte_cache[2]; unsigned long *pte_cache;
unsigned int pgcache_size; unsigned int pgcache_size;
} pgt_quicklists; } pgt_quicklists;
#endif #endif
#define pgd_quicklist (pgt_quicklists.pgd_cache) #define pgd_quicklist (pgt_quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (pgt_quicklists.pte_cache) #define pte_quicklist (pgt_quicklists.pte_cache)
#define pgtable_cache_size (pgt_quicklists.pgcache_size) #define pgtable_cache_size (pgt_quicklists.pgcache_size)
static __inline__ void free_pgd_fast(pgd_t *pgd) static inline void free_pgd_fast(pgd_t *pgd)
{ {
preempt_disable(); preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist; *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
...@@ -37,7 +36,7 @@ static __inline__ void free_pgd_fast(pgd_t *pgd) ...@@ -37,7 +36,7 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
preempt_enable(); preempt_enable();
} }
static __inline__ pgd_t *get_pgd_fast(void) static inline pgd_t *get_pgd_fast(void)
{ {
unsigned long *ret; unsigned long *ret;
...@@ -56,47 +55,35 @@ static __inline__ pgd_t *get_pgd_fast(void) ...@@ -56,47 +55,35 @@ static __inline__ pgd_t *get_pgd_fast(void)
return (pgd_t *)ret; return (pgd_t *)ret;
} }
static __inline__ void free_pgd_slow(pgd_t *pgd) static inline void free_pgd_slow(pgd_t *pgd)
{ {
free_page((unsigned long)pgd); free_page((unsigned long)pgd);
} }
/* XXX This crap can die, no longer using virtual page tables... */
#ifdef DCACHE_ALIASING_POSSIBLE
#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
#else
#define VPTE_COLOR(address) 0
#define DCACHE_COLOR(address) 0
#endif
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) static inline pmd_t *pmd_alloc_one_fast(void)
{ {
unsigned long *ret; unsigned long *ret;
int color = 0;
preempt_disable(); preempt_disable();
if (pte_quicklist[color] == NULL) ret = (unsigned long *) pte_quicklist;
color = 1; if (likely(ret)) {
pte_quicklist = (unsigned long *)(*ret);
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0; ret[0] = 0;
pgtable_cache_size--; pgtable_cache_size--;
} }
preempt_enable(); preempt_enable();
return (pmd_t *)ret; return (pmd_t *) ret;
} }
static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *pmd; pmd_t *pmd;
pmd = pmd_alloc_one_fast(mm, address); pmd = pmd_alloc_one_fast();
if (!pmd) { if (unlikely(!pmd)) {
pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pmd) if (pmd)
memset(pmd, 0, PAGE_SIZE); memset(pmd, 0, PAGE_SIZE);
...@@ -104,18 +91,16 @@ static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addre ...@@ -104,18 +91,16 @@ static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addre
return pmd; return pmd;
} }
static __inline__ void free_pmd_fast(pmd_t *pmd) static inline void free_pmd_fast(pmd_t *pmd)
{ {
unsigned long color = DCACHE_COLOR((unsigned long)pmd);
preempt_disable(); preempt_disable();
*(unsigned long *)pmd = (unsigned long) pte_quicklist[color]; *(unsigned long *)pmd = (unsigned long) pte_quicklist;
pte_quicklist[color] = (unsigned long *) pmd; pte_quicklist = (unsigned long *) pmd;
pgtable_cache_size++; pgtable_cache_size++;
preempt_enable(); preempt_enable();
} }
static __inline__ void free_pmd_slow(pmd_t *pmd) static inline void free_pmd_slow(pmd_t *pmd)
{ {
free_page((unsigned long)pmd); free_page((unsigned long)pmd);
} }
...@@ -124,48 +109,54 @@ static __inline__ void free_pmd_slow(pmd_t *pmd) ...@@ -124,48 +109,54 @@ static __inline__ void free_pmd_slow(pmd_t *pmd)
#define pmd_populate(MM,PMD,PTE_PAGE) \ #define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); static inline pte_t *pte_alloc_one_fast(void)
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = pte_alloc_one_kernel(mm, addr);
if (pte)
return virt_to_page(pte);
return NULL;
}
static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{ {
unsigned long color = VPTE_COLOR(address);
unsigned long *ret; unsigned long *ret;
preempt_disable(); preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { ret = (unsigned long *) pte_quicklist;
pte_quicklist[color] = (unsigned long *)(*ret); if (likely(ret)) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0; ret[0] = 0;
pgtable_cache_size--; pgtable_cache_size--;
} }
preempt_enable(); preempt_enable();
return (pte_t *)ret;
return (pte_t *) ret;
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *ptep = pte_alloc_one_fast();
if (likely(ptep))
return ptep;
return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
} }
static __inline__ void free_pte_fast(pte_t *pte) static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
unsigned long color = DCACHE_COLOR((unsigned long)pte); pte_t *pte = pte_alloc_one_fast();
if (likely(pte))
return virt_to_page(pte);
return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
}
static inline void free_pte_fast(pte_t *pte)
{
preempt_disable(); preempt_disable();
*(unsigned long *)pte = (unsigned long) pte_quicklist[color]; *(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist[color] = (unsigned long *) pte; pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++; pgtable_cache_size++;
preempt_enable(); preempt_enable();
} }
static __inline__ void free_pte_slow(pte_t *pte) static inline void free_pte_slow(pte_t *pte)
{ {
free_page((unsigned long)pte); free_page((unsigned long) pte);
} }
static inline void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment