Commit 5f8c9908 authored by Paul Mundt's avatar Paul Mundt Committed by Paul Mundt

sh: generic quicklist support.

This moves SH over to the generic quicklists. As per x86_64,
we have special mappings for the PGDs, so these go on their
own list..
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 36f021b5
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/ubc.h> #include <asm/ubc.h>
static int hlt_counter; static int hlt_counter;
...@@ -64,6 +65,7 @@ void cpu_idle(void) ...@@ -64,6 +65,7 @@ void cpu_idle(void)
preempt_enable_no_resched(); preempt_enable_no_resched();
schedule(); schedule();
preempt_disable(); preempt_disable();
check_pgt_cache();
} }
} }
......
...@@ -218,6 +218,9 @@ endmenu ...@@ -218,6 +218,9 @@ endmenu
menu "Memory management options" menu "Memory management options"
config QUICKLIST
def_bool y
config MMU config MMU
bool "Support for memory management hardware" bool "Support for memory management hardware"
depends on !CPU_SH2 depends on !CPU_SH2
......
...@@ -67,6 +67,8 @@ void show_mem(void) ...@@ -67,6 +67,8 @@ void show_mem(void)
printk("%d slab pages\n", slab); printk("%d slab pages\n", slab);
printk("%d pages shared\n", shared); printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached); printk("%d pages swap cached\n", cached);
printk(KERN_INFO "Total of %ld pages in page table cache\n",
quicklist_total_size());
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
#ifndef __ASM_SH_PGALLOC_H #ifndef __ASM_SH_PGALLOC_H
#define __ASM_SH_PGALLOC_H #define __ASM_SH_PGALLOC_H
#include <linux/quicklist.h>
#include <asm/page.h>
#define QUICK_PGD 0 /* We preserve special mappings over free */
#define QUICK_PT 1 /* Other page table pages that are zero on free */
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte) pte_t *pte)
{ {
...@@ -13,48 +19,49 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, ...@@ -13,48 +19,49 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd((unsigned long)page_address(pte))); set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
} }
/* static inline void pgd_ctor(void *x)
* Allocate and free page tables.
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); pgd_t *pgd = x;
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD, memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
} }
return pgd; /*
* Allocate and free page tables.
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
} }
static inline void pgd_free(pgd_t *pgd) static inline void pgd_free(pgd_t *pgd)
{ {
free_page((unsigned long)pgd); quicklist_free(QUICK_PGD, NULL, pgd);
} }
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
} }
static inline struct page *pte_alloc_one(struct mm_struct *mm, static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); void *pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
return pg ? virt_to_page(pg) : NULL;
} }
static inline void pte_free_kernel(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
{ {
free_page((unsigned long)pte); quicklist_free(QUICK_PT, NULL, pte);
} }
static inline void pte_free(struct page *pte) static inline void pte_free(struct page *pte)
{ {
__free_page(pte); quicklist_free_page(QUICK_PT, NULL, pte);
} }
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
...@@ -66,6 +73,11 @@ static inline void pte_free(struct page *pte) ...@@ -66,6 +73,11 @@ static inline void pte_free(struct page *pte)
#define pmd_free(x) do { } while (0) #define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0) #define __pmd_free_tlb(tlb,x) do { } while (0)
#define check_pgt_cache() do { } while (0)
static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PGD, NULL, 25, 16);
quicklist_trim(QUICK_PT, NULL, 25, 16);
}
#endif /* __ASM_SH_PGALLOC_H */ #endif /* __ASM_SH_PGALLOC_H */
...@@ -166,5 +166,5 @@ config ZONE_DMA_FLAG ...@@ -166,5 +166,5 @@ config ZONE_DMA_FLAG
config NR_QUICK config NR_QUICK
int int
depends on QUICKLIST depends on QUICKLIST
default "2" if SUPERH
default "1" default "1"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment