Commit 164c220f authored by David S. Miller's avatar David S. Miller

[SPARC64]: Fix hypervisor call arg passing.

Function goes in %o5, args go in %o0 --> %o5.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dedacf62
...@@ -863,10 +863,10 @@ void init_irqwork_curcpu(void) ...@@ -863,10 +863,10 @@ void init_irqwork_curcpu(void)
static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type)
{ {
register unsigned long func __asm__("%o0"); register unsigned long func __asm__("%o5");
register unsigned long arg0 __asm__("%o1"); register unsigned long arg0 __asm__("%o0");
register unsigned long arg1 __asm__("%o2"); register unsigned long arg1 __asm__("%o1");
register unsigned long arg2 __asm__("%o3"); register unsigned long arg2 __asm__("%o2");
unsigned long page = get_zeroed_page(GFP_ATOMIC); unsigned long page = get_zeroed_page(GFP_ATOMIC);
if (!page) { if (!page) {
......
...@@ -572,10 +572,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t ...@@ -572,10 +572,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
retries = 0; retries = 0;
cnt = init_cpu_list(cpu_list, mask); cnt = init_cpu_list(cpu_list, mask);
do { do {
register unsigned long func __asm__("%o0"); register unsigned long func __asm__("%o5");
register unsigned long arg0 __asm__("%o1"); register unsigned long arg0 __asm__("%o0");
register unsigned long arg1 __asm__("%o2"); register unsigned long arg1 __asm__("%o1");
register unsigned long arg2 __asm__("%o3"); register unsigned long arg2 __asm__("%o2");
func = HV_FAST_CPU_MONDO_SEND; func = HV_FAST_CPU_MONDO_SEND;
arg0 = cnt; arg0 = cnt;
...@@ -624,10 +624,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t ...@@ -624,10 +624,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
int retries = 0; int retries = 0;
do { do {
register unsigned long func __asm__("%o0"); register unsigned long func __asm__("%o5");
register unsigned long arg0 __asm__("%o1"); register unsigned long arg0 __asm__("%o0");
register unsigned long arg1 __asm__("%o2"); register unsigned long arg1 __asm__("%o1");
register unsigned long arg2 __asm__("%o3"); register unsigned long arg2 __asm__("%o2");
cpu_list[0] = i; cpu_list[0] = i;
func = HV_FAST_CPU_MONDO_SEND; func = HV_FAST_CPU_MONDO_SEND;
......
...@@ -265,20 +265,20 @@ do_unlock: ...@@ -265,20 +265,20 @@ do_unlock:
nop nop
niagara_lock_tlb: niagara_lock_tlb:
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
sethi %hi(KERNBASE), %o1 sethi %hi(KERNBASE), %o0
clr %o2 clr %o1
sethi %hi(kern_locked_tte_data), %o3 sethi %hi(kern_locked_tte_data), %o2
ldx [%o3 + %lo(kern_locked_tte_data)], %o3 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
mov HV_MMU_IMMU, %o4 mov HV_MMU_IMMU, %o3
ta HV_FAST_TRAP ta HV_FAST_TRAP
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
sethi %hi(KERNBASE), %o1 sethi %hi(KERNBASE), %o0
clr %o2 clr %o1
sethi %hi(kern_locked_tte_data), %o3 sethi %hi(kern_locked_tte_data), %o2
ldx [%o3 + %lo(kern_locked_tte_data)], %o3 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
mov HV_MMU_DMMU, %o4 mov HV_MMU_DMMU, %o3
ta HV_FAST_TRAP ta HV_FAST_TRAP
sethi %hi(bigkernel), %g2 sethi %hi(bigkernel), %g2
...@@ -286,24 +286,24 @@ niagara_lock_tlb: ...@@ -286,24 +286,24 @@ niagara_lock_tlb:
brz,pt %g2, after_lock_tlb brz,pt %g2, after_lock_tlb
nop nop
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
sethi %hi(KERNBASE + 0x400000), %o1 sethi %hi(KERNBASE + 0x400000), %o0
clr %o2 clr %o1
sethi %hi(kern_locked_tte_data), %o3 sethi %hi(kern_locked_tte_data), %o2
ldx [%o3 + %lo(kern_locked_tte_data)], %o3 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
sethi %hi(0x400000), %o4 sethi %hi(0x400000), %o3
add %o3, %o4, %o3 add %o2, %o3, %o2
mov HV_MMU_IMMU, %o4 mov HV_MMU_IMMU, %o3
ta HV_FAST_TRAP ta HV_FAST_TRAP
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
sethi %hi(KERNBASE + 0x400000), %o1 sethi %hi(KERNBASE + 0x400000), %o0
clr %o2 clr %o1
sethi %hi(kern_locked_tte_data), %o3 sethi %hi(kern_locked_tte_data), %o2
ldx [%o3 + %lo(kern_locked_tte_data)], %o3 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
sethi %hi(0x400000), %o4 sethi %hi(0x400000), %o3
add %o3, %o4, %o3 add %o2, %o3, %o2
mov HV_MMU_DMMU, %o4 mov HV_MMU_DMMU, %o3
ta HV_FAST_TRAP ta HV_FAST_TRAP
after_lock_tlb: after_lock_tlb:
......
...@@ -266,9 +266,9 @@ __tsb_context_switch: ...@@ -266,9 +266,9 @@ __tsb_context_switch:
mov SCRATCHPAD_UTSBREG2, %g1 mov SCRATCHPAD_UTSBREG2, %g1
stxa %g2, [%g1] ASI_SCRATCHPAD stxa %g2, [%g1] ASI_SCRATCHPAD
mov HV_FAST_MMU_TSB_CTXNON0, %o0 mov HV_FAST_MMU_TSB_CTXNON0, %o5
mov 1, %o1 mov 1, %o0
mov %o4, %o2 mov %o4, %o1
ta HV_FAST_TRAP ta HV_FAST_TRAP
ba,pt %xcc, 9f ba,pt %xcc, 9f
......
...@@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, ...@@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
unsigned long pte, unsigned long pte,
unsigned long mmu) unsigned long mmu)
{ {
register unsigned long func asm("%o0"); register unsigned long func asm("%o5");
register unsigned long arg0 asm("%o1"); register unsigned long arg0 asm("%o0");
register unsigned long arg1 asm("%o2"); register unsigned long arg1 asm("%o1");
register unsigned long arg2 asm("%o3"); register unsigned long arg2 asm("%o2");
register unsigned long arg3 asm("%o4"); register unsigned long arg3 asm("%o3");
func = HV_FAST_MMU_MAP_PERM_ADDR; func = HV_FAST_MMU_MAP_PERM_ADDR;
arg0 = vaddr; arg0 = vaddr;
...@@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void) ...@@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void)
/* Register this cpu's fault status area with the hypervisor. */ /* Register this cpu's fault status area with the hypervisor. */
void __cpuinit sun4v_register_fault_status(void) void __cpuinit sun4v_register_fault_status(void)
{ {
register unsigned long func asm("%o5");
register unsigned long arg0 asm("%o0"); register unsigned long arg0 asm("%o0");
register unsigned long arg1 asm("%o1");
int cpu = hard_smp_processor_id(); int cpu = hard_smp_processor_id();
struct trap_per_cpu *tb = &trap_block[cpu]; struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long pa; unsigned long pa;
pa = kern_base + ((unsigned long) tb - KERNBASE); pa = kern_base + ((unsigned long) tb - KERNBASE);
arg0 = HV_FAST_MMU_FAULT_AREA_CONF; func = HV_FAST_MMU_FAULT_AREA_CONF;
arg1 = pa; arg0 = pa;
__asm__ __volatile__("ta %4" __asm__ __volatile__("ta %4"
: "=&r" (arg0), "=&r" (arg1) : "=&r" (func), "=&r" (arg0)
: "0" (arg0), "1" (arg1), : "0" (func), "1" (arg0),
"i" (HV_FAST_TRAP)); "i" (HV_FAST_TRAP));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment