Commit 12eaa328 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Use ASI_SCRATCHPAD address 0x0 properly.

This is where the virtual address of the fault status
area belongs.

To set it up we don't make a hypervisor call, instead
we call OBP's SUNW,set-trap-table with the real address
of the fault status area as the second argument.  And
right before that call we write the virtual address into
ASI_SCRATCHPAD vaddr 0x0.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 18397944
...@@ -521,11 +521,36 @@ setup_trap_table: ...@@ -521,11 +521,36 @@ setup_trap_table:
wrpr %g0, 15, %pil wrpr %g0, 15, %pil
/* Make the firmware call to jump over to the Linux trap table. */ /* Make the firmware call to jump over to the Linux trap table. */
call prom_set_trap_table sethi %hi(is_sun4v), %o0
lduw [%o0 + %lo(is_sun4v)], %o0
brz,pt %o0, 1f
nop
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
stxa %g2, [%g0] ASI_SCRATCHPAD
/* Compute physical address:
*
* paddr = kern_base + (mmfsa_vaddr - KERNBASE)
*/
sethi %hi(KERNBASE), %g3
sub %g2, %g3, %g2
sethi %hi(kern_base), %g3
ldx [%g3 + %lo(kern_base)], %g3
add %g2, %g3, %o1
call prom_set_trap_table_sun4v
sethi %hi(sparc64_ttable_tl0), %o0
ba,pt %xcc, 2f
nop
1: call prom_set_trap_table
sethi %hi(sparc64_ttable_tl0), %o0 sethi %hi(sparc64_ttable_tl0), %o0
/* Start using proper page size encodings in ctx register. */ /* Start using proper page size encodings in ctx register. */
sethi %hi(sparc64_kern_pri_context), %g3 2: sethi %hi(sparc64_kern_pri_context), %g3
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1 mov PRIMARY_CONTEXT, %g1
......
...@@ -22,11 +22,8 @@ sun4v_cpu_mondo: ...@@ -22,11 +22,8 @@ sun4v_cpu_mondo:
nop nop
/* Get &trap_block[smp_processor_id()] into %g3. */ /* Get &trap_block[smp_processor_id()] into %g3. */
__GET_CPUID(%g1) ldxa [%g0] ASI_SCRATCHPAD, %g3
sethi %hi(trap_block), %g3 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
or %g3, %lo(trap_block), %g3
add %g3, %g7, %g3
/* Get CPU mondo queue base phys address into %g7. */ /* Get CPU mondo queue base phys address into %g7. */
ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
...@@ -74,11 +71,8 @@ sun4v_dev_mondo: ...@@ -74,11 +71,8 @@ sun4v_dev_mondo:
nop nop
/* Get &trap_block[smp_processor_id()] into %g3. */ /* Get &trap_block[smp_processor_id()] into %g3. */
__GET_CPUID(%g1) ldxa [%g0] ASI_SCRATCHPAD, %g3
sethi %hi(trap_block), %g3 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
or %g3, %lo(trap_block), %g3
add %g3, %g7, %g3
/* Get DEV mondo queue base phys address into %g5. */ /* Get DEV mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
...@@ -143,11 +137,8 @@ sun4v_res_mondo: ...@@ -143,11 +137,8 @@ sun4v_res_mondo:
nop nop
/* Get &trap_block[smp_processor_id()] into %g3. */ /* Get &trap_block[smp_processor_id()] into %g3. */
__GET_CPUID(%g1) ldxa [%g0] ASI_SCRATCHPAD, %g3
sethi %hi(trap_block), %g3 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
or %g3, %lo(trap_block), %g3
add %g3, %g7, %g3
/* Get RES mondo queue base phys address into %g5. */ /* Get RES mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
...@@ -251,11 +242,8 @@ sun4v_nonres_mondo: ...@@ -251,11 +242,8 @@ sun4v_nonres_mondo:
nop nop
/* Get &trap_block[smp_processor_id()] into %g3. */ /* Get &trap_block[smp_processor_id()] into %g3. */
__GET_CPUID(%g1) ldxa [%g0] ASI_SCRATCHPAD, %g3
sethi %hi(trap_block), %g3 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
or %g3, %lo(trap_block), %g3
add %g3, %g7, %g3
/* Get RES mondo queue base phys address into %g5. */ /* Get RES mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
......
...@@ -7,26 +7,20 @@ ...@@ -7,26 +7,20 @@
.align 32 .align 32
sun4v_itlb_miss: sun4v_itlb_miss:
/* Load CPU ID into %g3. */ /* Load MMU Miss base into %g2. */
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g3
ldxa [%g1] ASI_SCRATCHPAD, %g3
/* Load UTSB reg into %g1. */ /* Load UTSB reg into %g1. */
ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
/* Load &trap_block[smp_processor_id()] into %g2. */
sethi %hi(trap_block), %g2
or %g2, %lo(trap_block), %g2
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
/* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
* Branch if kernel TLB miss. The kernel TSB and user TSB miss * Branch if kernel TLB miss. The kernel TSB and user TSB miss
* code wants the missing virtual address in %g4, so that value * code wants the missing virtual address in %g4, so that value
* cannot be modified through the entirety of this handler. * cannot be modified through the entirety of this handler.
*/ */
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
srlx %g4, 22, %g3 srlx %g4, 22, %g3
sllx %g5, 48, %g6 sllx %g5, 48, %g6
or %g6, %g3, %g6 or %g6, %g3, %g6
...@@ -90,26 +84,20 @@ sun4v_itlb_load: ...@@ -90,26 +84,20 @@ sun4v_itlb_load:
retry retry
sun4v_dtlb_miss: sun4v_dtlb_miss:
/* Load CPU ID into %g3. */ /* Load MMU Miss base into %g2. */
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3
/* Load UTSB reg into %g1. */ /* Load UTSB reg into %g1. */
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1
/* Load &trap_block[smp_processor_id()] into %g2. */
sethi %hi(trap_block), %g2
or %g2, %lo(trap_block), %g2
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
/* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
* Branch if kernel TLB miss. The kernel TSB and user TSB miss * Branch if kernel TLB miss. The kernel TSB and user TSB miss
* code wants the missing virtual address in %g4, so that value * code wants the missing virtual address in %g4, so that value
* cannot be modified through the entirety of this handler. * cannot be modified through the entirety of this handler.
*/ */
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
srlx %g4, 22, %g3 srlx %g4, 22, %g3
sllx %g5, 48, %g6 sllx %g5, 48, %g6
or %g6, %g3, %g6 or %g6, %g3, %g6
...@@ -169,17 +157,10 @@ sun4v_dtlb_load: ...@@ -169,17 +157,10 @@ sun4v_dtlb_load:
retry retry
sun4v_dtlb_prot: sun4v_dtlb_prot:
/* Load CPU ID into %g3. */ /* Load MMU Miss base into %g2. */
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3
/* Load &trap_block[smp_processor_id()] into %g2. */ ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
sethi %hi(trap_block), %g2
or %g2, %lo(trap_block), %g2
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g5
rdpr %tl, %g1 rdpr %tl, %g1
cmp %g1, 1 cmp %g1, 1
bgu,pn %xcc, winfix_trampoline bgu,pn %xcc, winfix_trampoline
...@@ -187,35 +168,17 @@ sun4v_dtlb_prot: ...@@ -187,35 +168,17 @@ sun4v_dtlb_prot:
ba,pt %xcc, sparc64_realfault_common ba,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
/* Called from trap table with &trap_block[smp_processor_id()] in /* Called from trap table with TAG TARGET placed into
* %g5 and SCRATCHPAD_UTSBREG1 contents in %g1. * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1.
*/ */
sun4v_itsb_miss: sun4v_itsb_miss:
ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
srlx %g4, 22, %g7
sllx %g5, 48, %g6
or %g6, %g7, %g6
brz,pn %g5, kvmap_itlb_4v
nop
ba,pt %xcc, sun4v_tsb_miss_common ba,pt %xcc, sun4v_tsb_miss_common
mov FAULT_CODE_ITLB, %g3 mov FAULT_CODE_ITLB, %g3
/* Called from trap table with &trap_block[smp_processor_id()] in /* Called from trap table with TAG TARGET placed into
* %g5 and SCRATCHPAD_UTSBREG1 contents in %g1. * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1.
*/ */
sun4v_dtsb_miss: sun4v_dtsb_miss:
ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
srlx %g4, 22, %g7
sllx %g5, 48, %g6
or %g6, %g7, %g6
brz,pn %g5, kvmap_dtlb_4v
nop
mov FAULT_CODE_DTLB, %g3 mov FAULT_CODE_DTLB, %g3
/* Create TSB pointer into %g1. This is something like: /* Create TSB pointer into %g1. This is something like:
...@@ -239,15 +202,10 @@ sun4v_tsb_miss_common: ...@@ -239,15 +202,10 @@ sun4v_tsb_miss_common:
/* Instruction Access Exception, tl0. */ /* Instruction Access Exception, tl0. */
sun4v_iacc: sun4v_iacc:
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3 ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
sethi %hi(trap_block), %g2 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
or %g2, %lo(trap_block), %g2 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, 16, %g3 sllx %g3, 16, %g3
or %g5, %g3, %g5 or %g5, %g3, %g5
ba,pt %xcc, etrap ba,pt %xcc, etrap
...@@ -260,15 +218,10 @@ sun4v_iacc: ...@@ -260,15 +218,10 @@ sun4v_iacc:
/* Instruction Access Exception, tl1. */ /* Instruction Access Exception, tl1. */
sun4v_iacc_tl1: sun4v_iacc_tl1:
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3 ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
sethi %hi(trap_block), %g2 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
or %g2, %lo(trap_block), %g2 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, 16, %g3 sllx %g3, 16, %g3
or %g5, %g3, %g5 or %g5, %g3, %g5
ba,pt %xcc, etraptl1 ba,pt %xcc, etraptl1
...@@ -281,15 +234,10 @@ sun4v_iacc_tl1: ...@@ -281,15 +234,10 @@ sun4v_iacc_tl1:
/* Data Access Exception, tl0. */ /* Data Access Exception, tl0. */
sun4v_dacc: sun4v_dacc:
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
sethi %hi(trap_block), %g2 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
or %g2, %lo(trap_block), %g2 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3 sllx %g3, 16, %g3
or %g5, %g3, %g5 or %g5, %g3, %g5
ba,pt %xcc, etrap ba,pt %xcc, etrap
...@@ -302,15 +250,10 @@ sun4v_dacc: ...@@ -302,15 +250,10 @@ sun4v_dacc:
/* Data Access Exception, tl1. */ /* Data Access Exception, tl1. */
sun4v_dacc_tl1: sun4v_dacc_tl1:
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
sethi %hi(trap_block), %g2 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
or %g2, %lo(trap_block), %g2 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3 sllx %g3, 16, %g3
or %g5, %g3, %g5 or %g5, %g3, %g5
ba,pt %xcc, etraptl1 ba,pt %xcc, etraptl1
...@@ -323,15 +266,10 @@ sun4v_dacc_tl1: ...@@ -323,15 +266,10 @@ sun4v_dacc_tl1:
/* Memory Address Unaligned. */ /* Memory Address Unaligned. */
sun4v_mna: sun4v_mna:
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3
sethi %hi(trap_block), %g2
or %g2, %lo(trap_block), %g2
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
mov HV_FAULT_TYPE_UNALIGNED, %g3 mov HV_FAULT_TYPE_UNALIGNED, %g3
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3 sllx %g3, 16, %g3
or %g5, %g3, %g5 or %g5, %g3, %g5
...@@ -359,15 +297,10 @@ sun4v_privact: ...@@ -359,15 +297,10 @@ sun4v_privact:
/* Unaligned ldd float, tl0. */ /* Unaligned ldd float, tl0. */
sun4v_lddfmna: sun4v_lddfmna:
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
sethi %hi(trap_block), %g2 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
or %g2, %lo(trap_block), %g2 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3 sllx %g3, 16, %g3
or %g5, %g3, %g5 or %g5, %g3, %g5
ba,pt %xcc, etrap ba,pt %xcc, etrap
...@@ -380,15 +313,10 @@ sun4v_lddfmna: ...@@ -380,15 +313,10 @@ sun4v_lddfmna:
/* Unaligned std float, tl0. */ /* Unaligned std float, tl0. */
sun4v_stdfmna: sun4v_stdfmna:
mov SCRATCHPAD_CPUID, %g1 ldxa [%g0] ASI_SCRATCHPAD, %g2
ldxa [%g1] ASI_SCRATCHPAD, %g3 ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
sethi %hi(trap_block), %g2 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
or %g2, %lo(trap_block), %g2 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
add %g2, %g3, %g2
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3 sllx %g3, 16, %g3
or %g5, %g3, %g5 or %g5, %g3, %g5
ba,pt %xcc, etrap ba,pt %xcc, etrap
......
...@@ -389,10 +389,35 @@ after_lock_tlb: ...@@ -389,10 +389,35 @@ after_lock_tlb:
or %o1, PSTATE_IE, %o1 or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate wrpr %o1, 0, %pstate
call prom_set_trap_table sethi %hi(is_sun4v), %o0
lduw [%o0 + %lo(is_sun4v)], %o0
brz,pt %o0, 1f
nop
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
stxa %g2, [%g0] ASI_SCRATCHPAD
/* Compute physical address:
*
* paddr = kern_base + (mmfsa_vaddr - KERNBASE)
*/
sethi %hi(KERNBASE), %g3
sub %g2, %g3, %g2
sethi %hi(kern_base), %g3
ldx [%g3 + %lo(kern_base)], %g3
add %g2, %g3, %o1
call prom_set_trap_table_sun4v
sethi %hi(sparc64_ttable_tl0), %o0
ba,pt %xcc, 2f
nop
1: call prom_set_trap_table
sethi %hi(sparc64_ttable_tl0), %o0 sethi %hi(sparc64_ttable_tl0), %o0
call smp_callin 2: call smp_callin
nop nop
call cpu_idle call cpu_idle
mov 0, %o0 mov 0, %o0
......
...@@ -1109,24 +1109,6 @@ static void __init tsb_phys_patch(void) ...@@ -1109,24 +1109,6 @@ static void __init tsb_phys_patch(void)
} }
} }
/* Register this cpu's fault status area with the hypervisor. */
void __cpuinit sun4v_register_fault_status(void)
{
register unsigned long func asm("%o5");
register unsigned long arg0 asm("%o0");
int cpu = hard_smp_processor_id();
struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long pa;
pa = kern_base + ((unsigned long) tb - KERNBASE);
func = HV_FAST_MMU_FAULT_AREA_CONF;
arg0 = pa;
__asm__ __volatile__("ta %4"
: "=&r" (func), "=&r" (arg0)
: "0" (func), "1" (arg0),
"i" (HV_FAST_TRAP));
}
/* paging_init() sets up the page tables */ /* paging_init() sets up the page tables */
extern void cheetah_ecache_flush_init(void); extern void cheetah_ecache_flush_init(void);
...@@ -1147,10 +1129,8 @@ void __init paging_init(void) ...@@ -1147,10 +1129,8 @@ void __init paging_init(void)
tlb_type == hypervisor) tlb_type == hypervisor)
tsb_phys_patch(); tsb_phys_patch();
if (tlb_type == hypervisor) { if (tlb_type == hypervisor)
sun4v_patch_tlb_handlers(); sun4v_patch_tlb_handlers();
sun4v_register_fault_status();
}
/* Find available physical memory... */ /* Find available physical memory... */
read_obp_memory("available", &pavail[0], &pavail_ents); read_obp_memory("available", &pavail[0], &pavail_ents);
......
...@@ -136,6 +136,11 @@ void prom_set_trap_table(unsigned long tba) ...@@ -136,6 +136,11 @@ void prom_set_trap_table(unsigned long tba)
p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
} }
void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa)
{
p1275_cmd("SUNW,set-trap-table", P1275_INOUT(2, 0), tba, mmfsa);
}
int prom_get_mmu_ihandle(void) int prom_get_mmu_ihandle(void)
{ {
int node, ret; int node, ret;
......
...@@ -156,13 +156,16 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ...@@ -156,13 +156,16 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
nop; \ nop; \
.previous; .previous;
/* Clobbers TMP, current address space PGD phys address into DEST. */ #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
__GET_CPUID(TMP) \ __GET_CPUID(TMP) \
sethi %hi(trap_block), DEST; \ sethi %hi(trap_block), DEST; \
sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
or DEST, %lo(trap_block), DEST; \ or DEST, %lo(trap_block), DEST; \
add DEST, TMP, DEST; \ add DEST, TMP, DEST; \
/* Clobbers TMP, current address space PGD phys address into DEST. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */ /* Clobbers TMP, loads local processor's IRQ work area into DEST. */
...@@ -175,11 +178,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ...@@ -175,11 +178,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
/* Clobbers TMP, loads DEST with current thread info pointer. */ /* Clobbers TMP, loads DEST with current thread info pointer. */
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ #define TRAP_LOAD_THREAD_REG(DEST, TMP) \
__GET_CPUID(TMP) \ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
sethi %hi(trap_block), DEST; \ ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
or DEST, %lo(trap_block), DEST; \
ldx [DEST + TMP], DEST;
/* Given the current thread info pointer in THR, load the per-cpu /* Given the current thread info pointer in THR, load the per-cpu
* area base of the current processor into DEST. REG1, REG2, and REG3 are * area base of the current processor into DEST. REG1, REG2, and REG3 are
...@@ -201,13 +201,13 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ...@@ -201,13 +201,13 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
#else #else
#define __GET_CPUID(REG) \ #define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
mov 0, REG; sethi %hi(trap_block), DEST; \
or DEST, %lo(trap_block), DEST; \
/* Uniprocessor versions, we know the cpuid is zero. */ /* Uniprocessor versions, we know the cpuid is zero. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ #define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
sethi %hi(trap_block), DEST; \ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
or DEST, %lo(trap_block), DEST; \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
...@@ -215,8 +215,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ...@@ -215,8 +215,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
or DEST, %lo(__irq_work), DEST; or DEST, %lo(__irq_work), DEST;
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ #define TRAP_LOAD_THREAD_REG(DEST, TMP) \
sethi %hi(trap_block), DEST; \ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + %lo(trap_block)], DEST; ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
/* No per-cpu areas on uniprocessor, so no need to load DEST. */ /* No per-cpu areas on uniprocessor, so no need to load DEST. */
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
......
...@@ -338,6 +338,7 @@ int cpu_find_by_mid(int mid, int *prom_node); ...@@ -338,6 +338,7 @@ int cpu_find_by_mid(int mid, int *prom_node);
/* Client interface level routines. */ /* Client interface level routines. */
extern void prom_set_trap_table(unsigned long tba); extern void prom_set_trap_table(unsigned long tba);
extern void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa);
extern long p1275_cmd(const char *, long, ...); extern long p1275_cmd(const char *, long, ...);
......
...@@ -180,25 +180,25 @@ ...@@ -180,25 +180,25 @@
#define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl) #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
#endif #endif
#define SUN4V_ITSB_MISS \ #define SUN4V_ITSB_MISS \
mov SCRATCHPAD_CPUID, %g1; \ ldxa [%g0] ASI_SCRATCHPAD, %g2; \
ldxa [%g1] ASI_SCRATCHPAD, %g2; \ ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \
ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1;\ ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \
sethi %hi(trap_block), %g5; \ srlx %g4, 22, %g7; \
sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2; \ sllx %g5, 48, %g6; \
or %g5, %lo(trap_block), %g5; \ brz,pn %g5, kvmap_itlb_4v; \
ba,pt %xcc, sun4v_itsb_miss; \ or %g6, %g7, %g6; \
add %g5, %g2, %g5; ba,a,pt %xcc, sun4v_itsb_miss;
#define SUN4V_DTSB_MISS \ #define SUN4V_DTSB_MISS \
mov SCRATCHPAD_CPUID, %g1; \ ldxa [%g0] ASI_SCRATCHPAD, %g2; \
ldxa [%g1] ASI_SCRATCHPAD, %g2; \ ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \
ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1;\ ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \
sethi %hi(trap_block), %g5; \ srlx %g4, 22, %g7; \
sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2; \ sllx %g5, 48, %g6; \
or %g5, %lo(trap_block), %g5; \ brz,pn %g5, kvmap_dtlb_4v; \
ba,pt %xcc, sun4v_dtsb_miss; \ or %g6, %g7, %g6; \
add %g5, %g2, %g5; ba,a,pt %xcc, sun4v_dtsb_miss;
/* Before touching these macros, you owe it to yourself to go and /* Before touching these macros, you owe it to yourself to go and
* see how arch/sparc64/kernel/winfixup.S works... -DaveM * see how arch/sparc64/kernel/winfixup.S works... -DaveM
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment