Commit c0ac5dc4 authored by Catalin Marinas's avatar Catalin Marinas

Thumb-2: Implementation of the unified start-up and exceptions code

This patch implements the ARM/Thumb-2 unified kernel start-up and
exception handling code.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 1a74f2d2
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
* *
* 32-bit debugging code * 32-bit debugging code
*/ */
#include <asm/unified.h>
#include <linux/linkage.h> #include <linux/linkage.h>
.text .text
...@@ -86,14 +88,17 @@ ...@@ -86,14 +88,17 @@
/* /*
* Useful debugging routines * Useful debugging routines
*/ */
.type printhex8, %function
ENTRY(printhex8) ENTRY(printhex8)
mov r1, #8 mov r1, #8
b printhex b printhex
.type printhex4, %function
ENTRY(printhex4) ENTRY(printhex4)
mov r1, #4 mov r1, #4
b printhex b printhex
.type printhex2, %function
ENTRY(printhex2) ENTRY(printhex2)
mov r1, #2 mov r1, #2
printhex: adr r2, hexbuf printhex: adr r2, hexbuf
...@@ -103,6 +108,7 @@ printhex: adr r2, hexbuf ...@@ -103,6 +108,7 @@ printhex: adr r2, hexbuf
1: and r1, r0, #15 1: and r1, r0, #15
mov r0, r0, lsr #4 mov r0, r0, lsr #4
cmp r1, #10 cmp r1, #10
ite lt
addlt r1, r1, #'0' addlt r1, r1, #'0'
addge r1, r1, #'a' - 10 addge r1, r1, #'a' - 10
strb r1, [r3, #-1]! strb r1, [r3, #-1]!
...@@ -113,6 +119,7 @@ printhex: adr r2, hexbuf ...@@ -113,6 +119,7 @@ printhex: adr r2, hexbuf
.ltorg .ltorg
.type printascii, %function
ENTRY(printascii) ENTRY(printascii)
addruart r3 addruart r3
b 2f b 2f
...@@ -120,14 +127,17 @@ ENTRY(printascii) ...@@ -120,14 +127,17 @@ ENTRY(printascii)
senduart r1, r3 senduart r1, r3
busyuart r2, r3 busyuart r2, r3
teq r1, #'\n' teq r1, #'\n'
itt eq
moveq r1, #'\r' moveq r1, #'\r'
beq 1b beq 1b
2: teq r0, #0 2: teq r0, #0
itt ne
ldrneb r1, [r0], #1 ldrneb r1, [r0], #1
teqne r1, #0 teqne r1, #0
bne 1b bne 1b
mov pc, lr mov pc, lr
.type printch, %function
ENTRY(printch) ENTRY(printch)
addruart r3 addruart r3
mov r1, r0 mov r1, r0
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
* it to save wrong values... Be aware! * it to save wrong values... Be aware!
*/ */
#include <asm/unified.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/glue.h> #include <asm/glue.h>
...@@ -29,11 +30,12 @@ ...@@ -29,11 +30,12 @@
.macro irq_handler .macro irq_handler
get_irqnr_preamble r5, lr get_irqnr_preamble r5, lr
1: get_irqnr_and_base r0, r6, r5, lr 1: get_irqnr_and_base r0, r6, r5, lr
ittt ne
movne r1, sp movne r1, sp
@ @
@ routine called with r0 = irq number, r1 = struct pt_regs * @ routine called with r0 = irq number, r1 = struct pt_regs *
@ @
adrne lr, 1b badr lr, 1b, ne
bne asm_do_IRQ bne asm_do_IRQ
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -44,14 +46,16 @@ ...@@ -44,14 +46,16 @@
* preserved from get_irqnr_and_base above * preserved from get_irqnr_and_base above
*/ */
test_for_ipi r0, r6, r5, lr test_for_ipi r0, r6, r5, lr
ittt ne
movne r0, sp movne r0, sp
adrne lr, 1b badr lr, 1b, ne
bne do_IPI bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS #ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r6, r5, lr test_for_ltirq r0, r6, r5, lr
ittt ne
movne r0, sp movne r0, sp
adrne lr, 1b badr lr, 1b, ne
bne do_local_timer bne do_local_timer
#endif #endif
#endif #endif
...@@ -63,22 +67,29 @@ ...@@ -63,22 +67,29 @@
*/ */
.macro inv_entry, reason .macro inv_entry, reason
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
stmib sp, {r1 - lr} ARM( stmib sp, {r1 - lr} )
THUMB( stmia sp, {r0 - r12} )
THUMB( str sp, [sp, #S_SP] )
THUMB( str lr, [sp, #S_LR] )
mov r1, #\reason mov r1, #\reason
.endm .endm
.type __pabt_invalid, %function
__pabt_invalid: __pabt_invalid:
inv_entry BAD_PREFETCH inv_entry BAD_PREFETCH
b common_invalid b common_invalid
.type __dabt_invalid, %function
__dabt_invalid: __dabt_invalid:
inv_entry BAD_DATA inv_entry BAD_DATA
b common_invalid b common_invalid
.type __irq_invalid, %function
__irq_invalid: __irq_invalid:
inv_entry BAD_IRQ inv_entry BAD_IRQ
b common_invalid b common_invalid
.type __und_invalid, %function
__und_invalid: __und_invalid:
inv_entry BAD_UNDEFINSTR inv_entry BAD_UNDEFINSTR
...@@ -115,13 +126,16 @@ common_invalid: ...@@ -115,13 +126,16 @@ common_invalid:
.macro svc_entry .macro svc_entry
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
SPFIX( tst sp, #4 ) SPFIX( tst sp, #4 )
SPFIX( it ne )
SPFIX( bicne sp, sp, #4 ) SPFIX( bicne sp, sp, #4 )
stmib sp, {r1 - r12} ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
ldmia r0, {r1 - r3} ldmia r0, {r1 - r3}
add r5, sp, #S_SP @ here for interlock avoidance add r5, sp, #S_SP @ here for interlock avoidance
mov r4, #-1 @ "" "" "" "" mov r4, #-1 @ "" "" "" ""
add r0, sp, #S_FRAME_SIZE @ "" "" "" "" add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
SPFIX( it ne )
SPFIX( addne r0, r0, #4 ) SPFIX( addne r0, r0, #4 )
str r1, [sp] @ save the "real" r0 copied str r1, [sp] @ save the "real" r0 copied
@ from the exception stack @ from the exception stack
...@@ -141,6 +155,7 @@ common_invalid: ...@@ -141,6 +155,7 @@ common_invalid:
.endm .endm
.align 5 .align 5
.type __dabt_svc, %function
__dabt_svc: __dabt_svc:
svc_entry svc_entry
...@@ -149,6 +164,7 @@ __dabt_svc: ...@@ -149,6 +164,7 @@ __dabt_svc:
@ @
mrs r9, cpsr mrs r9, cpsr
tst r3, #PSR_I_BIT tst r3, #PSR_I_BIT
it eq
biceq r9, r9, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT
@ @
...@@ -183,11 +199,11 @@ __dabt_svc: ...@@ -183,11 +199,11 @@ __dabt_svc:
@ @
@ restore SPSR and restart the instruction @ restore SPSR and restart the instruction
@ @
ldr r0, [sp, #S_PSR] ARM( arm_rfe r0 )
msr spsr_cxsf, r0 THUMB( thumb_rfe r0, r1, r2 )
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5 .align 5
.type __irq_svc, %function
__irq_svc: __irq_svc:
svc_entry svc_entry
...@@ -210,26 +226,30 @@ preempt_return: ...@@ -210,26 +226,30 @@ preempt_return:
ldr r0, [tsk, #TI_PREEMPT] @ read preempt value ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
str r8, [tsk, #TI_PREEMPT] @ restore preempt count str r8, [tsk, #TI_PREEMPT] @ restore preempt count
teq r0, r7 teq r0, r7
it ne
strne r0, [r0, -r0] @ bug() strne r0, [r0, -r0] @ bug()
#endif #endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled ldr r0, [sp, #S_PSR] @ irqs are already disabled
msr spsr_cxsf, r0 ARM( msr spsr_cxsf, r0 )
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
tst r0, #PSR_I_BIT tst r0, #PSR_I_BIT
bleq trace_hardirqs_on bleq trace_hardirqs_on
#endif #endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ARM( ldmia sp, {r0 - pc}^ ) @ load r0 - pc, cpsr
THUMB( thumb_rfe r0, r1, r2 )
.ltorg .ltorg
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
svc_preempt: svc_preempt:
teq r8, #0 @ was preempt count = 0 teq r8, #0 @ was preempt count = 0
ite eq
ldreq r6, .LCirq_stat ldreq r6, .LCirq_stat
movne pc, lr @ no movne pc, lr @ no
ldr r0, [r6, #4] @ local_irq_count ldr r0, [r6, #4] @ local_irq_count
ldr r1, [r6, #8] @ local_bh_count ldr r1, [r6, #8] @ local_bh_count
adds r0, r0, r1 adds r0, r0, r1
it ne
movne pc, lr movne pc, lr
mov r7, #0 @ preempt_schedule_irq mov r7, #0 @ preempt_schedule_irq
str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
...@@ -241,6 +261,7 @@ svc_preempt: ...@@ -241,6 +261,7 @@ svc_preempt:
#endif #endif
.align 5 .align 5
.type __und_svc, %function
__und_svc: __und_svc:
svc_entry svc_entry
...@@ -252,7 +273,7 @@ __und_svc: ...@@ -252,7 +273,7 @@ __und_svc:
@ r0 - instruction @ r0 - instruction
@ @
ldr r0, [r2, #-4] ldr r0, [r2, #-4]
adr r9, 1f badr r9, 1f
bl call_fpe bl call_fpe
mov r0, sp @ struct pt_regs *regs mov r0, sp @ struct pt_regs *regs
...@@ -266,11 +287,11 @@ __und_svc: ...@@ -266,11 +287,11 @@ __und_svc:
@ @
@ restore SPSR and restart the instruction @ restore SPSR and restart the instruction
@ @
ldr lr, [sp, #S_PSR] @ Get SVC cpsr ARM( arm_rfe lr)
msr spsr_cxsf, lr THUMB( thumb_rfe r0, r1, r2 )
ldmia sp, {r0 - pc}^ @ Restore SVC registers
.align 5 .align 5
.type __pabt_svc, %function
__pabt_svc: __pabt_svc:
svc_entry svc_entry
...@@ -279,6 +300,7 @@ __pabt_svc: ...@@ -279,6 +300,7 @@ __pabt_svc:
@ @
mrs r9, cpsr mrs r9, cpsr
tst r3, #PSR_I_BIT tst r3, #PSR_I_BIT
it eq
biceq r9, r9, #PSR_I_BIT biceq r9, r9, #PSR_I_BIT
@ @
...@@ -307,9 +329,8 @@ __pabt_svc: ...@@ -307,9 +329,8 @@ __pabt_svc:
@ @
@ restore SPSR and restart the instruction @ restore SPSR and restart the instruction
@ @
ldr r0, [sp, #S_PSR] ARM( arm_rfe r0 )
msr spsr_cxsf, r0 THUMB( thumb_rfe r0, r1, r2 )
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.align 5 .align 5
.LCcralign: .LCcralign:
...@@ -337,7 +358,8 @@ __pabt_svc: ...@@ -337,7 +358,8 @@ __pabt_svc:
.macro usr_entry .macro usr_entry
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
stmib sp, {r1 - r12} ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
ldmia r0, {r1 - r3} ldmia r0, {r1 - r3}
add r0, sp, #S_PC @ here for interlock avoidance add r0, sp, #S_PC @ here for interlock avoidance
...@@ -366,7 +388,8 @@ __pabt_svc: ...@@ -366,7 +388,8 @@ __pabt_svc:
@ Also, separately save sp_usr and lr_usr @ Also, separately save sp_usr and lr_usr
@ @
stmia r0, {r2 - r4} stmia r0, {r2 - r4}
stmdb r0, {sp, lr}^ ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
@ @
@ Enable the alignment trap while in kernel mode @ Enable the alignment trap while in kernel mode
...@@ -380,6 +403,7 @@ __pabt_svc: ...@@ -380,6 +403,7 @@ __pabt_svc:
.endm .endm
.align 5 .align 5
.type __dabt_usr, %function
__dabt_usr: __dabt_usr:
usr_entry usr_entry
...@@ -405,10 +429,11 @@ __dabt_usr: ...@@ -405,10 +429,11 @@ __dabt_usr:
@ @
enable_irq enable_irq
mov r2, sp mov r2, sp
adr lr, ret_from_exception badr lr, ret_from_exception
b do_DataAbort b do_DataAbort
.align 5 .align 5
.type __irq_usr, %function
__irq_usr: __irq_usr:
usr_entry usr_entry
...@@ -439,6 +464,7 @@ __irq_usr: ...@@ -439,6 +464,7 @@ __irq_usr:
.ltorg .ltorg
.align 5 .align 5
.type __und_usr, %function
__und_usr: __und_usr:
usr_entry usr_entry
...@@ -449,17 +475,22 @@ __und_usr: ...@@ -449,17 +475,22 @@ __und_usr:
@ @
@ r0 - instruction @ r0 - instruction
@ @
adr r9, ret_from_exception badr r9, ret_from_exception
adr lr, __und_usr_unknown badr lr, __und_usr_unknown
tst r3, #PSR_T_BIT @ Thumb mode? tst r3, #PSR_T_BIT @ Thumb mode?
it ne
addne r2, r2, #2 addne r2, r2, #2
sub r4, r2, #4 sub r4, r2, #4
1: ldreqt r0, [r4] 1: it eq
ldreqt r0, [r4]
beq call_fpe beq call_fpe
@ Thumb instruction @ Thumb instruction
#if __LINUX_ARM_ARCH__ >= 7 #if __LINUX_ARM_ARCH__ >= 7
2: ldrht r5, [r4], #2 2:
ARM( ldrht r5, [r4], #2 )
THUMB( ldrht r5, [r4] )
THUMB( add r4, r4, #2 )
@ ignore 16-bit instructions. @ ignore 16-bit instructions.
and r0, r5, #0xf800 and r0, r5, #0xf800
cmp r0, #0xe800 cmp r0, #0xe800
...@@ -550,12 +581,15 @@ call_fpe: ...@@ -550,12 +581,15 @@ call_fpe:
and r8, r0, #0x0f000000 @ mask out op-code bits and r8, r0, #0x0f000000 @ mask out op-code bits
teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
#endif #endif
it eq
moveq pc, lr moveq pc, lr
get_thread_info r10 @ get current thread get_thread_info r10 @ get current thread
and r8, r0, #0x00000f00 @ mask out CP number and r8, r0, #0x00000f00 @ mask out CP number
THUMB( lsr r8, r8, #8 )
mov r7, #1 mov r7, #1
add r6, r10, #TI_USED_CP add r6, r10, #TI_USED_CP
strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT #ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors @ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS] ldr r5, [r10, #TI_FLAGS]
...@@ -563,36 +597,38 @@ call_fpe: ...@@ -563,36 +597,38 @@ call_fpe:
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable bcs iwmmxt_task_enable
#endif #endif
add pc, pc, r8, lsr #6 ARM( add pc, pc, r8, lsr #6 )
mov r0, r0 ARM( mov r0, r0 )
THUMB( lsl r8, r8, #2 )
mov pc, lr @ CP#0 THUMB( add.w pc, pc, r8 )
b do_fpe @ CP#1 (FPE)
b do_fpe @ CP#2 (FPE) W(mov) pc, lr @ CP#0
mov pc, lr @ CP#3 W(b) do_fpe @ CP#1 (FPE)
W(b) do_fpe @ CP#2 (FPE)
W(mov) pc, lr @ CP#3
#ifdef CONFIG_CRUNCH #ifdef CONFIG_CRUNCH
b crunch_task_enable @ CP#4 (MaverickCrunch) b crunch_task_enable @ CP#4 (MaverickCrunch)
b crunch_task_enable @ CP#5 (MaverickCrunch) b crunch_task_enable @ CP#5 (MaverickCrunch)
b crunch_task_enable @ CP#6 (MaverickCrunch) b crunch_task_enable @ CP#6 (MaverickCrunch)
#else #else
mov pc, lr @ CP#4 W(mov) pc, lr @ CP#4
mov pc, lr @ CP#5 W(mov) pc, lr @ CP#5
mov pc, lr @ CP#6 W(mov) pc, lr @ CP#6
#endif #endif
mov pc, lr @ CP#7 W(mov) pc, lr @ CP#7
mov pc, lr @ CP#8 W(mov) pc, lr @ CP#8
mov pc, lr @ CP#9 W(mov) pc, lr @ CP#9
#ifdef CONFIG_VFP #ifdef CONFIG_VFP
b do_vfp @ CP#10 (VFP) W(b) do_vfp @ CP#10 (VFP)
b do_vfp @ CP#11 (VFP) W(b) do_vfp @ CP#11 (VFP)
#else #else
mov pc, lr @ CP#10 (VFP) W(mov) pc, lr @ CP#10 (VFP)
mov pc, lr @ CP#11 (VFP) W(mov) pc, lr @ CP#11 (VFP)
#endif #endif
mov pc, lr @ CP#12 W(mov) pc, lr @ CP#12
mov pc, lr @ CP#13 W(mov) pc, lr @ CP#13
mov pc, lr @ CP#14 (Debug) W(mov) pc, lr @ CP#14 (Debug)
mov pc, lr @ CP#15 (Control) W(mov) pc, lr @ CP#15 (Control)
#ifdef CONFIG_NEON #ifdef CONFIG_NEON
.align 6 .align 6
...@@ -639,12 +675,14 @@ ENTRY(fp_enter) ...@@ -639,12 +675,14 @@ ENTRY(fp_enter)
no_fp: mov pc, lr no_fp: mov pc, lr
.type __und_usr_unknown, %function
__und_usr_unknown: __und_usr_unknown:
mov r0, sp mov r0, sp
adr lr, ret_from_exception badr lr, ret_from_exception
b do_undefinstr b do_undefinstr
.align 5 .align 5
.type __pabt_usr, %function
__pabt_usr: __pabt_usr:
usr_entry usr_entry
...@@ -663,6 +701,7 @@ __pabt_usr: ...@@ -663,6 +701,7 @@ __pabt_usr:
/* /*
* This is the return code to user mode for abort handlers * This is the return code to user mode for abort handlers
*/ */
.type ret_from_exception, %function
ENTRY(ret_from_exception) ENTRY(ret_from_exception)
get_thread_info tsk get_thread_info tsk
mov why, #0 mov why, #0
...@@ -673,10 +712,14 @@ ENTRY(ret_from_exception) ...@@ -673,10 +712,14 @@ ENTRY(ret_from_exception)
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
* previous and next are guaranteed not to be the same. * previous and next are guaranteed not to be the same.
*/ */
.type __switch_to, %function
ENTRY(__switch_to) ENTRY(__switch_to)
add ip, r1, #TI_CPU_SAVE add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE] ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
THUMB( str sp, [ip], #4 )
THUMB( str lr, [ip], #4 )
#ifdef CONFIG_ARM_XENON #ifdef CONFIG_ARM_XENON
mrc p14, 6, r4, c1, c0, 0 @ current xenon state mrc p14, 6, r4, c1, c0, 0 @ current xenon state
ldr r5, [r2, #TI_XENONSTATE] @ value to restore ldr r5, [r2, #TI_XENONSTATE] @ value to restore
...@@ -707,8 +750,12 @@ ENTRY(__switch_to) ...@@ -707,8 +750,12 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain bl atomic_notifier_call_chain
THUMB( mov ip, r4 )
mov r0, r5 mov r0, r5
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
THUMB( ldr sp, [ip], #4 )
THUMB( ldr pc, [ip] )
__INIT __INIT
...@@ -741,6 +788,7 @@ ENTRY(__switch_to) ...@@ -741,6 +788,7 @@ ENTRY(__switch_to)
* if your compiled code is not going to use the new instructions for other * if your compiled code is not going to use the new instructions for other
* purpose. * purpose.
*/ */
THUMB( .arm )
.macro usr_ret, reg .macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB #ifdef CONFIG_ARM_THUMB
...@@ -903,6 +951,7 @@ __kuser_cmpxchg: @ 0xffff0fc0 ...@@ -903,6 +951,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
#endif #endif
ldrex r3, [r2] ldrex r3, [r2]
subs r3, r3, r0 subs r3, r3, r0
it eq
strexeq r3, r1, [r2] strexeq r3, r1, [r2]
rsbs r0, r3, #0 rsbs r0, r3, #0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -979,6 +1028,7 @@ __kuser_helper_version: @ 0xffff0ffc ...@@ -979,6 +1028,7 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end .globl __kuser_helper_end
__kuser_helper_end: __kuser_helper_end:
THUMB( .thumb )
/* /*
* Vector stubs. * Vector stubs.
...@@ -992,36 +1042,42 @@ __kuser_helper_end: ...@@ -992,36 +1042,42 @@ __kuser_helper_end:
* *
* SP points to a minimal amount of processor-private memory, the address * SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler. * of which is copied into r0 for the mode specific abort handler.
*
* Any change to this macro might affect the position of the last "ldr"
* relative to the vector entries as the Thumb-2 instruction can be 16bit.
*/ */
.macro vector_stub, name, mode, correction=0 .macro vector_stub, name, mode, correction=0
.align 5 .align 5
.type vector_\name, %function
vector_\name: vector_\name:
.if \correction .if \correction
sub lr, lr, #\correction W(sub) lr, lr, #\correction
.endif .endif
@ @
@ Save r0, lr_<exception> (parent PC) and spsr_<exception> @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
@ (parent CPSR) @ (parent CPSR)
@ @
stmia sp, {r0, lr} @ save r0, lr W(stmia) sp, {r0, lr} @ save r0, lr
mrs lr, spsr W(mrs) lr, spsr
str lr, [sp, #8] @ save spsr W(str) lr, [sp, #8] @ save spsr
@ @
@ Prepare for SVC32 mode. IRQs remain disabled. @ Prepare for SVC32 mode. IRQs remain disabled.
@ @
mrs r0, cpsr W(mrs) r0, cpsr
eor r0, r0, #(\mode ^ SVC_MODE) W(eor) r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msr spsr_cxsf, r0 W(msr) spsr_cxsf, r0
@ @
@ the branch table must immediately follow this code @ the branch table must immediately follow this code
@ @
and lr, lr, #0x0f W(and) lr, lr, #0x0f
mov r0, sp W(mov) r0, sp
ldr lr, [pc, lr, lsl #2] ARM( ldr lr, [pc, lr, lsl #2] )
THUMB( add.w lr, pc, lr, lsl #2 )
THUMB( ldr.w lr, [lr, #8] )
movs pc, lr @ branch to handler in SVC mode movs pc, lr @ branch to handler in SVC mode
.endm .endm
...@@ -1160,14 +1216,16 @@ __stubs_end: ...@@ -1160,14 +1216,16 @@ __stubs_end:
.globl __vectors_start .globl __vectors_start
__vectors_start: __vectors_start:
swi SYS_ERROR0 ARM( swi SYS_ERROR0 )
b vector_und + stubs_offset THUMB( svc #0 )
ldr pc, .LCvswi + stubs_offset THUMB( nop )
b vector_pabt + stubs_offset W(b) vector_und + stubs_offset
b vector_dabt + stubs_offset W(ldr) pc, .LCvswi + stubs_offset
b vector_addrexcptn + stubs_offset W(b) vector_pabt + stubs_offset
b vector_irq + stubs_offset W(b) vector_dabt + stubs_offset
b vector_fiq + stubs_offset W(b) vector_addrexcptn + stubs_offset
W(b) vector_irq + stubs_offset
W(b) vector_fiq + stubs_offset
.globl __vectors_end .globl __vectors_end
__vectors_end: __vectors_end:
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <asm/unified.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/arch/entry-macro.S> #include <asm/arch/entry-macro.S>
...@@ -30,12 +32,18 @@ ret_fast_syscall: ...@@ -30,12 +32,18 @@ ret_fast_syscall:
arch_ret_to_user r1, lr arch_ret_to_user r1, lr
@ fast_restore_user_regs @ fast_restore_user_regs
THUMB( mov r2, sp )
THUMB( load_user_sp_lr r2, r3, S_OFF + S_SP ) @ calling sp, lr
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc ARM( ldr lr, [sp, #S_OFF + S_PC]! ) @ get pc
THUMB( ldr lr, [sp, #S_OFF + S_PC] ) @ get pc
THUMB( add sp, sp, #S_OFF + S_R1 )
msr spsr_cxsf, r1 @ save in spsr_svc msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr ARM( ldmdb sp, {r1 - lr}^ ) @ get calling r1 - lr
THUMB( ldmia sp, {r1 - r12} ) @ get calling r1 - r12
mov r0, r0 mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC ARM( add sp, sp, #S_FRAME_SIZE - S_PC )
THUMB( add sp, sp, #S_FRAME_SIZE - S_R1 )
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
/* /*
...@@ -58,6 +66,7 @@ work_resched: ...@@ -58,6 +66,7 @@ work_resched:
/* /*
* "slow" syscall return path. "why" tells us if this was a real syscall. * "slow" syscall return path. "why" tells us if this was a real syscall.
*/ */
.type ret_to_user, %function
ENTRY(ret_to_user) ENTRY(ret_to_user)
ret_slow_syscall: ret_slow_syscall:
disable_irq @ disable interrupts disable_irq @ disable interrupts
...@@ -69,17 +78,23 @@ no_work_pending: ...@@ -69,17 +78,23 @@ no_work_pending:
arch_ret_to_user r1, lr arch_ret_to_user r1, lr
@ slow_restore_user_regs @ slow_restore_user_regs
THUMB( mov r2, sp )
THUMB( load_user_sp_lr r2, r3, S_SP ) @ calling sp, lr
ldr r1, [sp, #S_PSR] @ get calling cpsr ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc ARM( ldr lr, [sp, #S_PC]! ) @ get pc
THUMB( ldr lr, [sp, #S_PC] ) @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr ARM( ldmdb sp, {r0 - lr}^ ) @ get calling r1 - lr
THUMB( ldmia sp, {r0 - r12} ) @ get calling r0 - r12
mov r0, r0 mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC ARM( add sp, sp, #S_FRAME_SIZE - S_PC )
THUMB( add sp, sp, #S_FRAME_SIZE )
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
/* /*
* This is how we return from a fork. * This is how we return from a fork.
*/ */
.type ret_from_fork, %function
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
bl schedule_tail bl schedule_tail
get_thread_info tsk get_thread_info tsk
...@@ -119,11 +134,14 @@ ENTRY(ret_from_fork) ...@@ -119,11 +134,14 @@ ENTRY(ret_from_fork)
#endif #endif
.align 5 .align 5
.type vector_swi, %function
ENTRY(vector_swi) ENTRY(vector_swi)
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12 stmia sp, {r0 - r12} @ Calling r0 - r12
add r8, sp, #S_PC ARM( add r8, sp, #S_PC )
stmdb r8, {sp, lr}^ @ Calling sp, lr ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
THUMB( mov r8, sp )
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok. mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR str r8, [sp, #S_PSR] @ Save CPSR
...@@ -142,6 +160,7 @@ ENTRY(vector_swi) ...@@ -142,6 +160,7 @@ ENTRY(vector_swi)
*/ */
#ifdef CONFIG_ARM_THUMB #ifdef CONFIG_ARM_THUMB
tst r8, #PSR_T_BIT tst r8, #PSR_T_BIT
ite ne
movne r10, #0 @ no thumb OABI emulation movne r10, #0 @ no thumb OABI emulation
ldreq r10, [lr, #-4] @ get SWI instruction ldreq r10, [lr, #-4] @ get SWI instruction
#else #else
...@@ -197,6 +216,7 @@ ENTRY(vector_swi) ...@@ -197,6 +216,7 @@ ENTRY(vector_swi)
* get the old ABI syscall table address. * get the old ABI syscall table address.
*/ */
bics r10, r10, #0xff000000 bics r10, r10, #0xff000000
itt ne
eorne scno, r10, #__NR_OABI_SYSCALL_BASE eorne scno, r10, #__NR_OABI_SYSCALL_BASE
ldrne tbl, =sys_oabi_call_table ldrne tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI) #elif !defined(CONFIG_AEABI)
...@@ -209,7 +229,8 @@ ENTRY(vector_swi) ...@@ -209,7 +229,8 @@ ENTRY(vector_swi)
bne __sys_trace bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit cmp scno, #NR_syscalls @ check upper syscall limit
adr lr, ret_fast_syscall @ return address badr lr, ret_fast_syscall @ return address
it cc
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF add r1, sp, #S_OFF
...@@ -229,10 +250,11 @@ __sys_trace: ...@@ -229,10 +250,11 @@ __sys_trace:
mov r0, #0 @ trace entry [IP = 0] mov r0, #0 @ trace entry [IP = 0]
bl syscall_trace bl syscall_trace
adr lr, __sys_trace_return @ return address badr lr, __sys_trace_return @ return address
mov scno, r0 @ syscall number (possibly new) mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit cmp scno, #NR_syscalls @ check upper syscall limit
itt cc
ldmccia r1, {r0 - r3} @ have to reload r0 - r3 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
b 2b b 2b
...@@ -279,59 +301,75 @@ ENTRY(sys_call_table) ...@@ -279,59 +301,75 @@ ENTRY(sys_call_table)
sys_syscall: sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
it ne
cmpne scno, #NR_syscalls @ check range cmpne scno, #NR_syscalls @ check range
itttt lo
stmloia sp, {r5, r6} @ shuffle args stmloia sp, {r5, r6} @ shuffle args
movlo r0, r1 movlo r0, r1
movlo r1, r2 movlo r1, r2
movlo r2, r3 movlo r2, r3
itt lo
movlo r3, r4 movlo r3, r4
ldrlo pc, [tbl, scno, lsl #2] ldrlo pc, [tbl, scno, lsl #2]
b sys_ni_syscall b sys_ni_syscall
.type sys_fork_wrapper, %function
sys_fork_wrapper: sys_fork_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_fork b sys_fork
.type sys_vfork_wrapper, %function
sys_vfork_wrapper: sys_vfork_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_vfork b sys_vfork
.type sys_execve_wrapper, %function
sys_execve_wrapper: sys_execve_wrapper:
add r3, sp, #S_OFF add r3, sp, #S_OFF
b sys_execve b sys_execve
.type sys_clone_wrapper, %function
sys_clone_wrapper: sys_clone_wrapper:
add ip, sp, #S_OFF add ip, sp, #S_OFF
str ip, [sp, #4] str ip, [sp, #4]
b sys_clone b sys_clone
.type sys_sigsuspend_wrapper, %function
sys_sigsuspend_wrapper: sys_sigsuspend_wrapper:
add r3, sp, #S_OFF add r3, sp, #S_OFF
b sys_sigsuspend b sys_sigsuspend
.type sys_rt_sigsuspend_wrapper, %function
sys_rt_sigsuspend_wrapper: sys_rt_sigsuspend_wrapper:
add r2, sp, #S_OFF add r2, sp, #S_OFF
b sys_rt_sigsuspend b sys_rt_sigsuspend
.type sys_sigreturn_wrapper, %function
sys_sigreturn_wrapper: sys_sigreturn_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_sigreturn b sys_sigreturn
.type sys_rt_sigreturn_wrapper, %function
sys_rt_sigreturn_wrapper: sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_rt_sigreturn b sys_rt_sigreturn
.type sys_sigaltstack_wrapper, %function
sys_sigaltstack_wrapper: sys_sigaltstack_wrapper:
ldr r2, [sp, #S_OFF + S_SP] ldr r2, [sp, #S_OFF + S_SP]
b do_sigaltstack b do_sigaltstack
.type sys_statfs64_wrapper, %function
sys_statfs64_wrapper: sys_statfs64_wrapper:
teq r1, #88 teq r1, #88
it eq
moveq r1, #84 moveq r1, #84
b sys_statfs64 b sys_statfs64
.type sys_fstatfs64_wrapper, %function
sys_fstatfs64_wrapper: sys_fstatfs64_wrapper:
teq r1, #88 teq r1, #88
it eq
moveq r1, #84 moveq r1, #84
b sys_fstatfs64 b sys_fstatfs64
...@@ -339,9 +377,11 @@ sys_fstatfs64_wrapper: ...@@ -339,9 +377,11 @@ sys_fstatfs64_wrapper:
* Note: off_4k (r5) is always units of 4K. If we can't do the requested * Note: off_4k (r5) is always units of 4K. If we can't do the requested
* offset, we return EINVAL. * offset, we return EINVAL.
*/ */
.type sys_mmap2, %function
sys_mmap2: sys_mmap2:
#if PAGE_SHIFT > 12 #if PAGE_SHIFT > 12
tst r5, #PGOFF_MASK tst r5, #PGOFF_MASK
ittt eq
moveq r5, r5, lsr #PAGE_SHIFT - 12 moveq r5, r5, lsr #PAGE_SHIFT - 12
streq r5, [sp, #4] streq r5, [sp, #4]
beq do_mmap2 beq do_mmap2
...@@ -352,8 +392,10 @@ sys_mmap2: ...@@ -352,8 +392,10 @@ sys_mmap2:
b do_mmap2 b do_mmap2
#endif #endif
.type pabort_ifar, %function
ENTRY(pabort_ifar) ENTRY(pabort_ifar)
mrc p15, 0, r0, cr6, cr0, 2 mrc p15, 0, r0, cr6, cr0, 2
.type pabort_noifar, %function
ENTRY(pabort_noifar) ENTRY(pabort_noifar)
mov pc, lr mov pc, lr
...@@ -363,24 +405,29 @@ ENTRY(pabort_noifar) ...@@ -363,24 +405,29 @@ ENTRY(pabort_noifar)
* These are syscalls with argument register differences * These are syscalls with argument register differences
*/ */
.type sys_oabi_pread64, %function
sys_oabi_pread64: sys_oabi_pread64:
stmia sp, {r3, r4} stmia sp, {r3, r4}
b sys_pread64 b sys_pread64
.type sys_oabi_pwrite64, %function
sys_oabi_pwrite64: sys_oabi_pwrite64:
stmia sp, {r3, r4} stmia sp, {r3, r4}
b sys_pwrite64 b sys_pwrite64
.type sys_oabi_truncate64, %function
sys_oabi_truncate64: sys_oabi_truncate64:
mov r3, r2 mov r3, r2
mov r2, r1 mov r2, r1
b sys_truncate64 b sys_truncate64
.type sys_oabi_ftruncate64, %function
sys_oabi_ftruncate64: sys_oabi_ftruncate64:
mov r3, r2 mov r3, r2
mov r2, r1 mov r2, r1
b sys_ftruncate64 b sys_ftruncate64
.type sys_oabi_readahead, %function
sys_oabi_readahead: sys_oabi_readahead:
str r3, [sp] str r3, [sp]
mov r3, r2 mov r3, r2
......
...@@ -49,7 +49,55 @@ ...@@ -49,7 +49,55 @@
#endif #endif
.endm .endm
@
@ Store/load the USER SP and LR registers by switching to the SYS
@ mode. Useful in Thumb-2 more where "stm/ldm rd, {sp, lr}^" is not
@ available. Should only be called from SVC mode
@
.macro store_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
str sp, [\rd, #\offset] @ save sp_usr
str lr, [\rd, #\offset + 4] @ save lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro load_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
ldr sp, [\rd, #\offset] @ load sp_usr
ldr lr, [\rd, #\offset + 4] @ load lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro arm_rfe, rpsr
ldr \rpsr, [sp, #S_PSR]
msr spsr_cxsf, \rpsr
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.endm
.macro thumb_rfe, rsp, rpc, rpsr
ldr \rsp, [sp, #S_SP] @ top of the stack
ldr \rpc, [sp, #S_PC] @ return address
ldr \rpsr, [sp, #S_PSR] @ return cpsr
tst \rsp, #4 @ orig stack 8-byte aligned?
stmdb \rsp, {\rpc, \rpsr} @ rfe context
ldmia sp, {r0 - r12}
ldr lr, [sp, #S_LR]
ite eq
addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
rfeia sp!
.endm
/* /*
* These are the registers used in the syscall handler, and allow us to * These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6. * have in theory up to 7 arguments to a function - r0 to r6.
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
*/ */
#include <asm/unified.h>
.type __switch_data, %object .type __switch_data, %object
__switch_data: __switch_data:
...@@ -37,6 +38,7 @@ __mmap_switched: ...@@ -37,6 +38,7 @@ __mmap_switched:
ldmia r3!, {r4, r5, r6, r7} ldmia r3!, {r4, r5, r6, r7}
cmp r4, r5 @ Copy data segment if needed cmp r4, r5 @ Copy data segment if needed
itttt ne
1: cmpne r5, r6 1: cmpne r5, r6
ldrne fp, [r4], #4 ldrne fp, [r4], #4
strne fp, [r5], #4 strne fp, [r5], #4
...@@ -44,10 +46,13 @@ __mmap_switched: ...@@ -44,10 +46,13 @@ __mmap_switched:
mov fp, #0 @ Clear BSS (and zero fp) mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r6, r7 1: cmp r6, r7
itt cc
strcc fp, [r6],#4 strcc fp, [r6],#4
bcc 1b bcc 1b
ldmia r3, {r4, r5, r6, sp} ARM( ldmia r3, {r4, r5, r6, sp} )
THUMB( ldmia r3, {r4, r5, r6} )
THUMB( ldr sp, [r3, #12] )
str r9, [r4] @ Save processor ID str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type str r1, [r5] @ Save machine type
bic r4, r0, #CR_A @ Clear 'A' bit bic r4, r0, #CR_A @ Clear 'A' bit
...@@ -144,8 +149,11 @@ __error: ...@@ -144,8 +149,11 @@ __error:
*/ */
.type __lookup_processor_type, %function .type __lookup_processor_type, %function
__lookup_processor_type: __lookup_processor_type:
adr r3, 3f ARM( adr r3, 3f )
ldmda r3, {r5 - r7} ARM( ldmda r3, {r5 - r7} )
THUMB( adr r3, 3f+4 )
THUMB( ldmdb r3, {r5 - r7} )
THUMB( sub r3, r3, #4 )
sub r3, r3, r7 @ get offset between virt&phys sub r3, r3, r7 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space add r6, r6, r3 @ physical address space
...@@ -162,6 +170,7 @@ __lookup_processor_type: ...@@ -162,6 +170,7 @@ __lookup_processor_type:
/* /*
* This provides a C-API version of the above function. * This provides a C-API version of the above function.
*/ */
.type lookup_processor_type, %function
ENTRY(lookup_processor_type) ENTRY(lookup_processor_type)
stmfd sp!, {r4 - r7, r9, lr} stmfd sp!, {r4 - r7, r9, lr}
mov r9, r0 mov r9, r0
...@@ -209,6 +218,7 @@ __lookup_machine_type: ...@@ -209,6 +218,7 @@ __lookup_machine_type:
/* /*
* This provides a C-API version of the above function. * This provides a C-API version of the above function.
*/ */
.type lookup_machine_type, %function
ENTRY(lookup_machine_type) ENTRY(lookup_machine_type)
stmfd sp!, {r4 - r6, lr} stmfd sp!, {r4 - r6, lr}
mov r1, r0 mov r1, r0
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
* Common kernel startup code (non-paged MM) * Common kernel startup code (non-paged MM)
* *
*/ */
#include <asm/unified.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -52,7 +54,7 @@ ENTRY(stext) ...@@ -52,7 +54,7 @@ ENTRY(stext)
ldr r13, __switch_data @ address to jump to after ldr r13, __switch_data @ address to jump to after
@ the initialization is done @ the initialization is done
adr lr, __after_proc_init @ return (PIC) address badr lr, __after_proc_init @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC add pc, r10, #PROCINFO_INITFUNC
/* /*
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
* *
* Kernel startup code for all 32-bit CPUs * Kernel startup code for all 32-bit CPUs
*/ */
#include <asm/unified.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -76,7 +78,9 @@ ...@@ -76,7 +78,9 @@
__INIT __INIT
.type stext, %function .type stext, %function
ENTRY(stext) ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode ARM( msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE ) @ ensure svc mode
THUMB( mov r9, #PSR_F_BIT | PSR_I_BIT | SVC_MODE )
THUMB( msr cpsr_c, r9 ) @ ensure svc mode
@ and irqs disabled @ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid bl __lookup_processor_type @ r5=procinfo r9=cpuid
...@@ -96,8 +100,10 @@ ENTRY(stext) ...@@ -96,8 +100,10 @@ ENTRY(stext)
*/ */
ldr r13, __switch_data @ address to jump to after ldr r13, __switch_data @ address to jump to after
@ mmu has been enabled @ mmu has been enabled
adr lr, __enable_mmu @ return (PIC) address badr lr, __enable_mmu @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC ARM( add pc, r10, #PROCINFO_INITFUNC )
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
.type secondary_startup, #function .type secondary_startup, #function
...@@ -123,13 +129,16 @@ ENTRY(secondary_startup) ...@@ -123,13 +129,16 @@ ENTRY(secondary_startup)
ldmia r4, {r5, r7, r13} @ address to jump to after ldmia r4, {r5, r7, r13} @ address to jump to after
sub r4, r4, r5 @ mmu has been enabled sub r4, r4, r5 @ mmu has been enabled
ldr r4, [r7, r4] @ get secondary_data.pgdir ldr r4, [r7, r4] @ get secondary_data.pgdir
adr lr, __enable_mmu @ return address badr lr, __enable_mmu @ return address
add pc, r10, #PROCINFO_INITFUNC @ initialise processor ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
@ (return control reg) @ (return control reg)
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
/* /*
* r6 = &secondary_data * r6 = &secondary_data
*/ */
.type __secondary_switched, %function
ENTRY(__secondary_switched) ENTRY(__secondary_switched)
ldr sp, [r7, #4] @ get secondary_data.stack ldr sp, [r7, #4] @ get secondary_data.stack
mov fp, #0 mov fp, #0
...@@ -249,6 +258,7 @@ __create_page_tables: ...@@ -249,6 +258,7 @@ __create_page_tables:
add r6, r4, r6, lsr #18 add r6, r4, r6, lsr #18
1: cmp r0, r6 1: cmp r0, r6
add r3, r3, #1 << 20 add r3, r3, #1 << 20
it ls
strls r3, [r0], #4 strls r3, [r0], #4
bls 1b bls 1b
...@@ -288,6 +298,7 @@ __create_page_tables: ...@@ -288,6 +298,7 @@ __create_page_tables:
add r0, r4, r3 add r0, r4, r3
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
cmp r3, #0x0800 @ limit to 512MB cmp r3, #0x0800 @ limit to 512MB
it hi
movhi r3, #0x0800 movhi r3, #0x0800
add r6, r0, r3 add r6, r0, r3
ldr r3, [r8, #MACHINFO_PHYSIO] ldr r3, [r8, #MACHINFO_PHYSIO]
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/elfcore.h> #include <linux/elfcore.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <asm/unified.h>
#include <asm/leds.h> #include <asm/leds.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -412,7 +413,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) ...@@ -412,7 +413,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.ARM_r2 = (unsigned long)fn; regs.ARM_r2 = (unsigned long)fn;
regs.ARM_r3 = (unsigned long)do_exit; regs.ARM_r3 = (unsigned long)do_exit;
regs.ARM_pc = (unsigned long)kernel_thread_helper; regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = SVC_MODE; regs.ARM_cpsr = SVC_MODE | PSR_ISETSTATE;
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/unified.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
...@@ -406,13 +407,17 @@ void cpu_init(void) ...@@ -406,13 +407,17 @@ void cpu_init(void)
"msr cpsr_c, %7" "msr cpsr_c, %7"
: :
: "r" (stk), : "r" (stk),
"I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), ARM( "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), )
"I" (offsetof(struct stack, irq[0])), "I" (offsetof(struct stack, irq[0])),
"I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), ARM( "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), )
"I" (offsetof(struct stack, abt[0])), "I" (offsetof(struct stack, abt[0])),
"I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), ARM( "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | UND_MODE), )
"I" (offsetof(struct stack, und[0])), "I" (offsetof(struct stack, und[0])),
"I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) ARM( "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) )
: "r14"); : "r14");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment