Commit 09f94287 authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[SPARC64]: TSB refinements.

Move {init_new,destroy}_context() out of line.

Do not put huge pages into the TSB, only base page size translations.
There are some clever things we could do here, but for now let's be
correct instead of fancy.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 56fb4df6
......@@ -55,6 +55,17 @@ tsb_reload:
brgez,a,pn %g5, tsb_do_fault
stx %g0, [%g1]
/* If it is larger than the base page size, don't
* bother putting it into the TSB.
*/
srlx %g5, 32, %g2
sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4
sethi %hi(_PAGE_SZBITS >> 32), %g7
and %g2, %g4, %g2
cmp %g2, %g7
bne,a,pn %xcc, tsb_tlb_reload
stx %g0, [%g1]
TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */
......
......@@ -8,6 +8,7 @@
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#define TSB_ENTRY_ALIGNMENT 16
......@@ -82,3 +83,30 @@ void flush_tsb_user(struct mmu_gather *mp)
}
}
}
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
unsigned long page = get_zeroed_page(GFP_KERNEL);
mm->context.sparc64_ctx_val = 0UL;
if (unlikely(!page))
return -ENOMEM;
mm->context.sparc64_tsb = (unsigned long *) page;
return 0;
}
void destroy_context(struct mm_struct *mm)
{
free_page((unsigned long) mm->context.sparc64_tsb);
spin_lock(&ctx_alloc_lock);
if (CTX_VALID(mm->context)) {
unsigned long nr = CTX_NRBITS(mm->context);
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
}
spin_unlock(&ctx_alloc_lock);
}
......@@ -19,36 +19,8 @@ extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
extern void get_new_mmu_context(struct mm_struct *mm);
/* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
* This just needs to set mm->context to an invalid context.
*/
#define init_new_context(__tsk, __mm) \
({ unsigned long __pg = get_zeroed_page(GFP_KERNEL); \
(__mm)->context.sparc64_ctx_val = 0UL; \
(__mm)->context.sparc64_tsb = \
(unsigned long *) __pg; \
(__pg ? 0 : -ENOMEM); \
})
/* Destroy a dead context. This occurs when mmput drops the
* mm_users count to zero, the mmaps have been released, and
* all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state, and in the sparc64
* case this just means freeing up the mmu context ID held by
* this task if valid.
*/
#define destroy_context(__mm) \
do { free_page((unsigned long)(__mm)->context.sparc64_tsb); \
spin_lock(&ctx_alloc_lock); \
if (CTX_VALID((__mm)->context)) { \
unsigned long nr = CTX_NRBITS((__mm)->context); \
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
} \
spin_unlock(&ctx_alloc_lock); \
} while(0)
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
......
......@@ -116,6 +116,10 @@
#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */
#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */
#define _PAGE_ALL_SZ_BITS \
(_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \
_PAGE_SZ8K | _PAGE_SZ32MB | _PAGE_SZ256MB)
/* Here are the SpitFire software bits we use in the TTE's.
*
* WARNING: If you are going to try and start using some
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment