Commit 112e5847 authored by Paul Mundt's avatar Paul Mundt

sh: TLB protection violation exception optimizations.

This adds a bit of rework to have the TLB protection violations skip the
TLB miss fastpath and go directly in to do_page_fault(), as these require
slow path handling.

Based on an earlier patch by SUGIOKA Toshinobu.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent e7b8b7f1
...@@ -113,34 +113,33 @@ OFF_TRA = (16*4+6*4) ...@@ -113,34 +113,33 @@ OFF_TRA = (16*4+6*4)
#if defined(CONFIG_MMU) #if defined(CONFIG_MMU)
.align 2 .align 2
ENTRY(tlb_miss_load) ENTRY(tlb_miss_load)
bra call_dpf bra call_handle_tlbmiss
mov #0, r5 mov #0, r5
.align 2 .align 2
ENTRY(tlb_miss_store) ENTRY(tlb_miss_store)
bra call_dpf bra call_handle_tlbmiss
mov #1, r5 mov #1, r5
.align 2 .align 2
ENTRY(initial_page_write) ENTRY(initial_page_write)
bra call_dpf bra call_handle_tlbmiss
mov #1, r5 mov #1, r5
.align 2 .align 2
ENTRY(tlb_protection_violation_load) ENTRY(tlb_protection_violation_load)
bra call_dpf bra call_do_page_fault
mov #0, r5 mov #0, r5
.align 2 .align 2
ENTRY(tlb_protection_violation_store) ENTRY(tlb_protection_violation_store)
bra call_dpf bra call_do_page_fault
mov #1, r5 mov #1, r5
call_dpf: call_handle_tlbmiss:
mov.l 1f, r0 mov.l 1f, r0
mov r5, r8 mov r5, r8
mov.l @r0, r6 mov.l @r0, r6
mov r6, r9
mov.l 2f, r0 mov.l 2f, r0
sts pr, r10 sts pr, r10
jsr @r0 jsr @r0
...@@ -151,16 +150,25 @@ call_dpf: ...@@ -151,16 +150,25 @@ call_dpf:
lds r10, pr lds r10, pr
rts rts
nop nop
0: mov.l 3f, r0 0:
mov r9, r6
mov r8, r5 mov r8, r5
call_do_page_fault:
mov.l 1f, r0
mov.l @r0, r6
sti
mov.l 3f, r0
mov.l 4f, r1
mov r15, r4
jmp @r0 jmp @r0
mov r15, r4 lds r1, pr
.align 2 .align 2
1: .long MMU_TEA 1: .long MMU_TEA
2: .long __do_page_fault 2: .long handle_tlbmiss
3: .long do_page_fault 3: .long do_page_fault
4: .long ret_from_exception
.align 2 .align 2
ENTRY(address_error_load) ENTRY(address_error_load)
......
...@@ -318,9 +318,9 @@ do_sigbus: ...@@ -318,9 +318,9 @@ do_sigbus:
/* /*
* Called with interrupts disabled. * Called with interrupts disabled.
*/ */
asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, asmlinkage int __kprobes
unsigned long writeaccess, handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
unsigned long address) unsigned long address)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment