Commit fd062c84 authored by David Daney's avatar David Daney Committed by Ralf Baechle

MIPS: TLB support for hugetlbfs.

The TLB handlers need to check for huge pages and give them special
handling.  Huge pages consist of two contiguous sub-pages of physical
memory.

* Loading entrylo0 and entrylo1 need to be handled specially.

* The page mask must be set for huge pages and then restored after
  writing the TLB entries.

* The PTE for huge pages resides in the PMD, we halt traversal of the
  tables there.
Signed-off-by: default avatarDavid Daney <ddaney@caviumnetworks.com>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent dd794392
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
...@@ -295,6 +296,25 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -295,6 +296,25 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
pudp = pud_offset(pgdp, address); pudp = pud_offset(pgdp, address);
pmdp = pmd_offset(pudp, address); pmdp = pmd_offset(pudp, address);
idx = read_c0_index(); idx = read_c0_index();
#ifdef CONFIG_HUGETLB_PAGE
/* this could be a huge page */
if (pmd_huge(*pmdp)) {
unsigned long lo;
write_c0_pagemask(PM_HUGE_MASK);
ptep = (pte_t *)pmdp;
lo = pte_val(*ptep) >> 6;
write_c0_entrylo0(lo);
write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
write_c0_pagemask(PM_DEFAULT_MASK);
} else
#endif
{
ptep = pte_offset_map(pmdp, address); ptep = pte_offset_map(pmdp, address);
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
...@@ -310,6 +330,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -310,6 +330,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_random(); tlb_write_random();
else else
tlb_write_indexed(); tlb_write_indexed();
}
tlbw_use_hazard(); tlbw_use_hazard();
FLUSH_ITLB_VM(vma); FLUSH_ITLB_VM(vma);
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
* *
* ... and the days got worse and worse and now you see * ... and the days got worse and worse and now you see
* I've gone completly out of my mind. * I've gone completly out of my mind.
...@@ -83,6 +84,9 @@ enum label_id { ...@@ -83,6 +84,9 @@ enum label_id {
label_nopage_tlbm, label_nopage_tlbm,
label_smp_pgtable_change, label_smp_pgtable_change,
label_r3000_write_probe_fail, label_r3000_write_probe_fail,
#ifdef CONFIG_HUGETLB_PAGE
label_tlb_huge_update,
#endif
}; };
UASM_L_LA(_second_part) UASM_L_LA(_second_part)
...@@ -99,6 +103,9 @@ UASM_L_LA(_nopage_tlbs) ...@@ -99,6 +103,9 @@ UASM_L_LA(_nopage_tlbs)
UASM_L_LA(_nopage_tlbm) UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change) UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail) UASM_L_LA(_r3000_write_probe_fail)
#ifdef CONFIG_HUGETLB_PAGE
UASM_L_LA(_tlb_huge_update)
#endif
/* /*
* For debug purposes. * For debug purposes.
...@@ -126,6 +133,7 @@ static inline void dump_handler(const u32 *handler, int count) ...@@ -126,6 +133,7 @@ static inline void dump_handler(const u32 *handler, int count)
#define C0_TCBIND 2, 2 #define C0_TCBIND 2, 2
#define C0_ENTRYLO1 3, 0 #define C0_ENTRYLO1 3, 0
#define C0_CONTEXT 4, 0 #define C0_CONTEXT 4, 0
#define C0_PAGEMASK 5, 0
#define C0_BADVADDR 8, 0 #define C0_BADVADDR 8, 0
#define C0_ENTRYHI 10, 0 #define C0_ENTRYHI 10, 0
#define C0_EPC 14, 0 #define C0_EPC 14, 0
...@@ -383,6 +391,98 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, ...@@ -383,6 +391,98 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
} }
} }
#ifdef CONFIG_HUGETLB_PAGE
static __cpuinit void build_huge_tlb_write_entry(u32 **p,
struct uasm_label **l,
struct uasm_reloc **r,
unsigned int tmp,
enum tlb_write_entry wmode)
{
/* Set huge page tlb entry size */
uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
build_tlb_write_entry(p, l, r, wmode);
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
uasm_il_b(p, r, label_leave);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
} else if (PM_DEFAULT_MASK) {
uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
uasm_il_b(p, r, label_leave);
uasm_i_mtc0(p, tmp, C0_PAGEMASK);
} else {
uasm_il_b(p, r, label_leave);
uasm_i_mtc0(p, 0, C0_PAGEMASK);
}
}
/*
* Check if Huge PTE is present, if so then jump to LABEL.
*/
static void __cpuinit
build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
unsigned int pmd, int lid)
{
UASM_i_LW(p, tmp, 0, pmd);
uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
uasm_il_bnez(p, r, tmp, lid);
}
static __cpuinit void build_huge_update_entries(u32 **p,
unsigned int pte,
unsigned int tmp)
{
int small_sequence;
/*
* A huge PTE describes an area the size of the
* configured huge page size. This is twice the
* of the large TLB entry size we intend to use.
* A TLB entry half the size of the configured
* huge page size is configured into entrylo0
* and entrylo1 to cover the contiguous huge PTE
* address space.
*/
small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
/* We can clobber tmp. It isn't used after this.*/
if (!small_sequence)
uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
/* convert to entrylo1 */
if (small_sequence)
UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
else
UASM_i_ADDU(p, pte, pte, tmp);
uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
}
static __cpuinit void build_huge_handler_tail(u32 **p,
struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
unsigned int ptr)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
uasm_il_beqz(p, r, pte, label_tlb_huge_update);
UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
}
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* /*
* TMP and PTR are scratch. * TMP and PTR are scratch.
...@@ -689,12 +789,23 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) ...@@ -689,12 +789,23 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE
build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
#endif
build_get_ptep(&p, K0, K1); build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1); build_update_entries(&p, K0, K1);
build_tlb_write_entry(&p, &l, &r, tlb_random); build_tlb_write_entry(&p, &l, &r, tlb_random);
uasm_l_leave(&l, p); uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */ uasm_i_eret(&p); /* return from trap */
#ifdef CONFIG_HUGETLB_PAGE
uasm_l_tlb_huge_update(&l, p);
UASM_i_LW(&p, K0, 0, K1);
build_huge_update_entries(&p, K0, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
#endif
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
#endif #endif
...@@ -733,7 +844,9 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) ...@@ -733,7 +844,9 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
uasm_copy_handler(relocs, labels, tlb_handler, p, f); uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler; final_len = p - tlb_handler;
} else { } else {
#ifdef MODULE_START #if defined(CONFIG_HUGETLB_PAGE)
const enum label_id ls = label_tlb_huge_update;
#elif defined(MODULE_START)
const enum label_id ls = label_module_alloc; const enum label_id ls = label_module_alloc;
#else #else
const enum label_id ls = label_vmalloc; const enum label_id ls = label_vmalloc;
...@@ -1130,6 +1243,15 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, ...@@ -1130,6 +1243,15 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE
/*
* For huge tlb entries, pmd doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
#endif
UASM_i_MFC0(p, pte, C0_BADVADDR); UASM_i_MFC0(p, pte, C0_BADVADDR);
UASM_i_LW(p, ptr, 0, ptr); UASM_i_LW(p, ptr, 0, ptr);
UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
...@@ -1187,6 +1309,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void) ...@@ -1187,6 +1309,19 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
build_make_valid(&p, &r, K0, K1); build_make_valid(&p, &r, K0, K1);
build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
#ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when build_r4000_tlbchange_handler_head
* spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, K0, K1);
build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
build_huge_handler_tail(&p, &r, &l, K0, K1);
#endif
uasm_l_nopage_tlbl(&l, p); uasm_l_nopage_tlbl(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
...@@ -1218,6 +1353,20 @@ static void __cpuinit build_r4000_tlb_store_handler(void) ...@@ -1218,6 +1353,20 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
build_make_write(&p, &r, K0, K1); build_make_write(&p, &r, K0, K1);
build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
#ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, K0, K1);
build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, K0, K0,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, K0, K1);
#endif
uasm_l_nopage_tlbs(&l, p); uasm_l_nopage_tlbs(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
...@@ -1250,6 +1399,20 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) ...@@ -1250,6 +1399,20 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
build_make_write(&p, &r, K0, K1); build_make_write(&p, &r, K0, K1);
build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
#ifdef CONFIG_HUGETLB_PAGE
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
*/
uasm_l_tlb_huge_update(&l, p);
iPTE_LW(&p, K0, K1);
build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
build_tlb_probe_entry(&p);
uasm_i_ori(&p, K0, K0,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
build_huge_handler_tail(&p, &r, &l, K0, K1);
#endif
uasm_l_nopage_tlbm(&l, p); uasm_l_nopage_tlbm(&l, p);
uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
uasm_i_nop(&p); uasm_i_nop(&p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment