Commit 459fc9f0 authored by Catalin Marinas's avatar Catalin Marinas

Thumb-2: Implementation of the unified start-up and exceptions code

This patch implements the ARM/Thumb-2 unified kernel start-up and
exception handling code.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent fdaa1cf0
......@@ -9,6 +9,8 @@
*
* 32-bit debugging code
*/
#include <asm/unified.h>
#include <linux/linkage.h>
.text
......@@ -105,6 +107,7 @@ printhex: adr r2, hexbuf
1: and r1, r0, #15
mov r0, r0, lsr #4
cmp r1, #10
ite lt
addlt r1, r1, #'0'
addge r1, r1, #'a' - 10
strb r1, [r3, #-1]!
......@@ -123,9 +126,11 @@ ENTRY(printascii)
senduart r1, r3
busyuart r2, r3
teq r1, #'\n'
itt eq
moveq r1, #'\r'
beq 1b
2: teq r0, #0
itt ne
ldrneb r1, [r0], #1
teqne r1, #0
bne 1b
......
......@@ -14,6 +14,7 @@
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
* it to save wrong values... Be aware!
*/
#include <asm/unified.h>
#include <asm/memory.h>
#include <asm/glue.h>
......@@ -29,11 +30,12 @@
.macro irq_handler
get_irqnr_preamble r5, lr
1: get_irqnr_and_base r0, r6, r5, lr
ittt ne
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, 1b
badr lr, 1b, ne
bne asm_do_IRQ
#ifdef CONFIG_SMP
......@@ -44,14 +46,16 @@
* preserved from get_irqnr_and_base above
*/
test_for_ipi r0, r6, r5, lr
ittt ne
movne r0, sp
adrne lr, 1b
badr lr, 1b, ne
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r6, r5, lr
ittt ne
movne r0, sp
adrne lr, 1b
badr lr, 1b, ne
bne do_local_timer
#endif
#endif
......@@ -63,7 +67,10 @@
*/
.macro inv_entry, reason
sub sp, sp, #S_FRAME_SIZE
stmib sp, {r1 - lr}
ARM( stmib sp, {r1 - lr} )
THUMB( stmia sp, {r0 - r12} )
THUMB( str sp, [sp, #S_SP] )
THUMB( str lr, [sp, #S_LR] )
mov r1, #\reason
.endm
......@@ -119,13 +126,16 @@ common_invalid:
.macro svc_entry
sub sp, sp, #S_FRAME_SIZE
SPFIX( tst sp, #4 )
SPFIX( it ne )
SPFIX( bicne sp, sp, #4 )
stmib sp, {r1 - r12}
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
ldmia r0, {r1 - r3}
add r5, sp, #S_SP @ here for interlock avoidance
mov r4, #-1 @ "" "" "" ""
add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
SPFIX( it ne )
SPFIX( addne r0, r0, #4 )
str r1, [sp] @ save the "real" r0 copied
@ from the exception stack
......@@ -153,6 +163,7 @@ __dabt_svc:
@
mrs r9, cpsr
tst r3, #PSR_I_BIT
it eq
biceq r9, r9, #PSR_I_BIT
@
......@@ -187,9 +198,8 @@ __dabt_svc:
@
@ restore SPSR and restart the instruction
@
ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
ARM( arm_rfe r0 )
THUMB( thumb_rfe r0, r1, r2 )
ENDPROC(__dabt_svc)
.align 5
......@@ -215,15 +225,17 @@ preempt_return:
ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
str r8, [tsk, #TI_PREEMPT] @ restore preempt count
teq r0, r7
it ne
strne r0, [r0, -r0] @ bug()
#endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled
msr spsr_cxsf, r0
ARM( msr spsr_cxsf, r0 )
#ifdef CONFIG_TRACE_IRQFLAGS
tst r0, #PSR_I_BIT
bleq trace_hardirqs_on
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
ARM( ldmia sp, {r0 - pc}^ ) @ load r0 - pc, cpsr
THUMB( thumb_rfe r0, r1, r2 )
ENDPROC(__irq_svc)
.ltorg
......@@ -231,11 +243,13 @@ ENDPROC(__irq_svc)
#ifdef CONFIG_PREEMPT
svc_preempt:
teq r8, #0 @ was preempt count = 0
ite eq
ldreq r6, .LCirq_stat
movne pc, lr @ no
ldr r0, [r6, #4] @ local_irq_count
ldr r1, [r6, #8] @ local_bh_count
adds r0, r0, r1
it ne
movne pc, lr
mov r7, #0 @ preempt_schedule_irq
str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
......@@ -258,7 +272,7 @@ __und_svc:
@ r0 - instruction
@
ldr r0, [r2, #-4]
adr r9, 1f
badr r9, 1f
bl call_fpe
mov r0, sp @ struct pt_regs *regs
......@@ -272,9 +286,8 @@ __und_svc:
@
@ restore SPSR and restart the instruction
@
ldr lr, [sp, #S_PSR] @ Get SVC cpsr
msr spsr_cxsf, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers
ARM( arm_rfe lr)
THUMB( thumb_rfe r0, r1, r2 )
ENDPROC(__und_svc)
.align 5
......@@ -286,6 +299,7 @@ __pabt_svc:
@
mrs r9, cpsr
tst r3, #PSR_I_BIT
it eq
biceq r9, r9, #PSR_I_BIT
@
......@@ -314,9 +328,8 @@ __pabt_svc:
@
@ restore SPSR and restart the instruction
@
ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
ARM( arm_rfe r0 )
THUMB( thumb_rfe r0, r1, r2 )
ENDPROC(__pabt_svc)
.align 5
......@@ -345,7 +358,8 @@ ENDPROC(__pabt_svc)
.macro usr_entry
sub sp, sp, #S_FRAME_SIZE
stmib sp, {r1 - r12}
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
ldmia r0, {r1 - r3}
add r0, sp, #S_PC @ here for interlock avoidance
......@@ -364,7 +378,8 @@ ENDPROC(__pabt_svc)
@ Also, separately save sp_usr and lr_usr
@
stmia r0, {r2 - r4}
stmdb r0, {sp, lr}^
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
@
@ Enable the alignment trap while in kernel mode
......@@ -419,7 +434,7 @@ __dabt_usr:
@
enable_irq
mov r2, sp
adr lr, ret_from_exception
badr lr, ret_from_exception
b do_DataAbort
ENDPROC(__dabt_usr)
......@@ -466,16 +481,20 @@ __und_usr:
@
@ r0 - instruction
@
adr r9, ret_from_exception
adr lr, __und_usr_unknown
badr r9, ret_from_exception
badr lr, __und_usr_unknown
tst r3, #PSR_T_BIT @ Thumb mode?
itet eq
subeq r4, r2, #4 @ ARM instr at LR - 4
subne r4, r2, #2 @ Thumb instr at LR - 2
1: ldreqt r0, [r4]
beq call_fpe
@ Thumb instruction
#if __LINUX_ARM_ARCH__ >= 7
2: ldrht r5, [r4], #2
2:
ARM( ldrht r5, [r4], #2 )
THUMB( ldrht r5, [r4] )
THUMB( add r4, r4, #2 )
and r0, r5, #0xf800 @ mask bits 111x x... .... ....
cmp r0, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_unknown
......@@ -555,6 +574,7 @@ call_fpe:
1:
#endif
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
ite ne
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
and r8, r0, #0x0f000000 @ mask out op-code bits
......@@ -563,9 +583,11 @@ call_fpe:
moveq pc, lr
get_thread_info r10 @ get current thread
and r8, r0, #0x00000f00 @ mask out CP number
THUMB( lsr r8, r8, #8 )
mov r7, #1
add r6, r10, #TI_USED_CP
strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
......@@ -573,36 +595,38 @@ call_fpe:
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
bcs iwmmxt_task_enable
#endif
add pc, pc, r8, lsr #6
mov r0, r0
mov pc, lr @ CP#0
b do_fpe @ CP#1 (FPE)
b do_fpe @ CP#2 (FPE)
mov pc, lr @ CP#3
ARM( add pc, pc, r8, lsr #6 )
THUMB( lsl r8, r8, #2 )
THUMB( add pc, r8 )
nop
W(mov) pc, lr @ CP#0
W(b) do_fpe @ CP#1 (FPE)
W(b) do_fpe @ CP#2 (FPE)
W(mov) pc, lr @ CP#3
#ifdef CONFIG_CRUNCH
b crunch_task_enable @ CP#4 (MaverickCrunch)
b crunch_task_enable @ CP#5 (MaverickCrunch)
b crunch_task_enable @ CP#6 (MaverickCrunch)
#else
mov pc, lr @ CP#4
mov pc, lr @ CP#5
mov pc, lr @ CP#6
W(mov) pc, lr @ CP#4
W(mov) pc, lr @ CP#5
W(mov) pc, lr @ CP#6
#endif
mov pc, lr @ CP#7
mov pc, lr @ CP#8
mov pc, lr @ CP#9
W(mov) pc, lr @ CP#7
W(mov) pc, lr @ CP#8
W(mov) pc, lr @ CP#9
#ifdef CONFIG_VFP
b do_vfp @ CP#10 (VFP)
b do_vfp @ CP#11 (VFP)
W(b) do_vfp @ CP#10 (VFP)
W(b) do_vfp @ CP#11 (VFP)
#else
mov pc, lr @ CP#10 (VFP)
mov pc, lr @ CP#11 (VFP)
W(mov) pc, lr @ CP#10 (VFP)
W(mov) pc, lr @ CP#11 (VFP)
#endif
mov pc, lr @ CP#12
mov pc, lr @ CP#13
mov pc, lr @ CP#14 (Debug)
mov pc, lr @ CP#15 (Control)
W(mov) pc, lr @ CP#12
W(mov) pc, lr @ CP#13
W(mov) pc, lr @ CP#14 (Debug)
W(mov) pc, lr @ CP#15 (Control)
#ifdef CONFIG_NEON
.align 6
......@@ -652,7 +676,7 @@ no_fp: mov pc, lr
__und_usr_unknown:
mov r0, sp
adr lr, ret_from_exception
badr lr, ret_from_exception
b do_undefinstr
ENDPROC(__und_usr_unknown)
......@@ -691,13 +715,10 @@ ENDPROC(ret_from_exception)
ENTRY(__switch_to)
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
#ifdef CONFIG_ARM_XENON
mrc p14, 6, r4, c1, c0, 0 @ current xenon state
ldr r5, [r2, #TI_XENONSTATE] @ value to restore
str r4, [r1, #TI_XENONSTATE] @ save current
mcr p14, 6, r5, c1, c0, 0 @ restore new value
#endif
ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
THUMB( str sp, [ip], #4 )
THUMB( str lr, [ip], #4 )
#ifdef CONFIG_MMU
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
......@@ -722,8 +743,12 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
THUMB( mov ip, r4 )
mov r0, r5
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
THUMB( ldr sp, [ip], #4 )
THUMB( ldr pc, [ip] )
ENDPROC(__switch_to)
__INIT
......@@ -757,6 +782,7 @@ ENDPROC(__switch_to)
* if your compiled code is not going to use the new instructions for other
* purpose.
*/
THUMB( .arm )
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
......@@ -928,6 +954,7 @@ kuser_cmpxchg_fixup:
#endif
1: ldrex r3, [r2]
subs r3, r3, r0
it eq
strexeq r3, r1, [r2]
teqeq r3, #1
beq 1b
......@@ -1008,6 +1035,7 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
THUMB( .thumb )
/*
* Vector stubs.
......@@ -1042,17 +1070,23 @@ vector_\name:
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrs r0, cpsr
eor r0, r0, #(\mode ^ SVC_MODE)
eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msr spsr_cxsf, r0
@
@ the branch table must immediately follow this code
@
and lr, lr, #0x0f
THUMB( adr r0, 1f )
THUMB( ldr lr, [r0, lr, lsl #2] )
mov r0, sp
ldr lr, [pc, lr, lsl #2]
ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
.align 2
@ handler addresses follow this label
1:
.endm
.globl __stubs_start
......@@ -1190,14 +1224,16 @@ __stubs_end:
.globl __vectors_start
__vectors_start:
swi SYS_ERROR0
b vector_und + stubs_offset
ldr pc, .LCvswi + stubs_offset
b vector_pabt + stubs_offset
b vector_dabt + stubs_offset
b vector_addrexcptn + stubs_offset
b vector_irq + stubs_offset
b vector_fiq + stubs_offset
ARM( swi SYS_ERROR0 )
THUMB( svc #0 )
THUMB( nop )
W(b) vector_und + stubs_offset
W(ldr) pc, .LCvswi + stubs_offset
W(b) vector_pabt + stubs_offset
W(b) vector_dabt + stubs_offset
W(b) vector_addrexcptn + stubs_offset
W(b) vector_irq + stubs_offset
W(b) vector_fiq + stubs_offset
.globl __vectors_end
__vectors_end:
......
......@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
#include <asm/unified.h>
#include <asm/unistd.h>
#include <asm/arch/entry-macro.S>
......@@ -30,12 +32,18 @@ ret_fast_syscall:
arch_ret_to_user r1, lr
@ fast_restore_user_regs
THUMB( mov r2, sp )
THUMB( load_user_sp_lr r2, r3, S_OFF + S_SP ) @ calling sp, lr
ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
ldr lr, [sp, #S_OFF + S_PC]! @ get pc
ARM( ldr lr, [sp, #S_OFF + S_PC]! ) @ get pc
THUMB( ldr lr, [sp, #S_OFF + S_PC] ) @ get pc
THUMB( add sp, sp, #S_OFF + S_R1 )
msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
ARM( ldmdb sp, {r1 - lr}^ ) @ get calling r1 - lr
THUMB( ldmia sp, {r1 - r12} ) @ get calling r1 - r12
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
ARM( add sp, sp, #S_FRAME_SIZE - S_PC )
THUMB( add sp, sp, #S_FRAME_SIZE - S_R1 )
movs pc, lr @ return & move spsr_svc into cpsr
/*
......@@ -69,12 +77,17 @@ no_work_pending:
arch_ret_to_user r1, lr
@ slow_restore_user_regs
THUMB( mov r2, sp )
THUMB( load_user_sp_lr r2, r3, S_SP ) @ calling sp, lr
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
ARM( ldr lr, [sp, #S_PC]! ) @ get pc
THUMB( ldr lr, [sp, #S_PC] ) @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
ARM( ldmdb sp, {r0 - lr}^ ) @ get calling r0 - lr
THUMB( ldmia sp, {r0 - r12} ) @ get calling r0 - r12
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
ARM( add sp, sp, #S_FRAME_SIZE - S_PC )
THUMB( add sp, sp, #S_FRAME_SIZE )
movs pc, lr @ return & move spsr_svc into cpsr
ENDPROC(ret_to_user)
......@@ -123,8 +136,10 @@ ENDPROC(ret_from_fork)
ENTRY(vector_swi)
sub sp, sp, #S_FRAME_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
add r8, sp, #S_PC
stmdb r8, {sp, lr}^ @ Calling sp, lr
ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
THUMB( mov r8, sp )
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
mrs r8, spsr @ called from non-FIQ mode, so ok.
str lr, [sp, #S_PC] @ Save calling PC
str r8, [sp, #S_PSR] @ Save CPSR
......@@ -143,6 +158,7 @@ ENTRY(vector_swi)
*/
#ifdef CONFIG_ARM_THUMB
tst r8, #PSR_T_BIT
ite ne
movne r10, #0 @ no thumb OABI emulation
ldreq r10, [lr, #-4] @ get SWI instruction
#else
......@@ -198,6 +214,7 @@ ENTRY(vector_swi)
* get the old ABI syscall table address.
*/
bics r10, r10, #0xff000000
itt ne
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
ldrne tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
......@@ -210,7 +227,8 @@ ENTRY(vector_swi)
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
adr lr, ret_fast_syscall @ return address
badr lr, ret_fast_syscall @ return address
it cc
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
......@@ -231,10 +249,11 @@ __sys_trace:
mov r0, #0 @ trace entry [IP = 0]
bl syscall_trace
adr lr, __sys_trace_return @ return address
badr lr, __sys_trace_return @ return address
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
itt cc
ldmccia r1, {r0 - r3} @ have to reload r0 - r3
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
b 2b
......@@ -280,11 +299,14 @@ ENTRY(sys_call_table)
sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
it ne
cmpne scno, #NR_syscalls @ check range
itttt lo
stmloia sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2
movlo r2, r3
itt lo
movlo r3, r4
ldrlo pc, [tbl, scno, lsl #2]
b sys_ni_syscall
......@@ -338,12 +360,14 @@ ENDPROC(sys_sigaltstack_wrapper)
sys_statfs64_wrapper:
teq r1, #88
it eq
moveq r1, #84
b sys_statfs64
ENDPROC(sys_statfs64_wrapper)
sys_fstatfs64_wrapper:
teq r1, #88
it eq
moveq r1, #84
b sys_fstatfs64
ENDPROC(sys_fstatfs64_wrapper)
......@@ -355,6 +379,7 @@ ENDPROC(sys_fstatfs64_wrapper)
sys_mmap2:
#if PAGE_SHIFT > 12
tst r5, #PGOFF_MASK
ittt eq
moveq r5, r5, lsr #PAGE_SHIFT - 12
streq r5, [sp, #4]
beq do_mmap2
......
......@@ -37,7 +37,9 @@
.endm
.macro get_thread_info, rd
mov \rd, sp, lsr #13
ARM( mov \rd, sp, lsr #13 )
THUMB( mov \rd, sp )
THUMB( lsr \rd, \rd, #13 )
mov \rd, \rd, lsl #13
.endm
......@@ -49,6 +51,54 @@
#endif
.endm
@
@ Store/load the USER SP and LR registers by switching to the SYS
@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
@ available. Should only be called from SVC mode
@
.macro store_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
str sp, [\rd, #\offset] @ save sp_usr
str lr, [\rd, #\offset + 4] @ save lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro load_user_sp_lr, rd, rtemp, offset = 0
mrs \rtemp, cpsr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch to the SYS mode
ldr sp, [\rd, #\offset] @ load sp_usr
ldr lr, [\rd, #\offset + 4] @ load lr_usr
eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
.macro arm_rfe, rpsr
ldr \rpsr, [sp, #S_PSR]
msr spsr_cxsf, \rpsr
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
.endm
.macro thumb_rfe, rsp, rpc, rpsr
ldr \rsp, [sp, #S_SP] @ top of the stack
ldr \rpc, [sp, #S_PC] @ return address
ldr \rpsr, [sp, #S_PSR] @ return cpsr
tst \rsp, #4 @ orig stack 8-byte aligned?
stmdb \rsp, {\rpc, \rpsr} @ rfe context
ldmia sp, {r0 - r12}
ldr lr, [sp, #S_LR]
ite eq
addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
rfeia sp!
.endm
/*
* These are the registers used in the syscall handler, and allow us to
......
......@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*
*/
#include <asm/unified.h>
.type __switch_data, %object
__switch_data:
......@@ -38,6 +39,7 @@ __mmap_switched:
ldmia r3!, {r4, r5, r6, r7}
cmp r4, r5 @ Copy data segment if needed
itttt ne
1: cmpne r5, r6
ldrne fp, [r4], #4
strne fp, [r5], #4
......@@ -45,10 +47,13 @@ __mmap_switched:
mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r6, r7
itt cc
strcc fp, [r6],#4
bcc 1b
ldmia r3, {r4, r5, r6, r7, sp}
ARM( ldmia r3, {r4, r5, r6, r7, sp})
THUMB( ldmia r3, {r4, r5, r6, r7} )
THUMB( ldr sp, [r3, #16] )
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
......@@ -146,8 +151,11 @@ ENDPROC(__error)
* r9 = cpuid (preserved)
*/
__lookup_processor_type:
adr r3, 3f
ldmda r3, {r5 - r7}
ARM( adr r3, 3f )
ARM( ldmda r3, {r5 - r7} )
THUMB( adr r3, 3f+4 )
THUMB( ldmdb r3, {r5 - r7} )
THUMB( sub r3, r3, #4 )
sub r3, r3, r7 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
......
......@@ -11,6 +11,8 @@
* Common kernel startup code (non-paged MM)
*
*/
#include <asm/unified.h>
#include <linux/linkage.h>
#include <linux/init.h>
......@@ -38,8 +40,9 @@
*/
.section ".text.head", "ax"
ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled
ARM( msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE ) @ ensure svc mode
THUMB( mov r9, #PSR_F_BIT | PSR_I_BIT | SVC_MODE )
THUMB( msr cpsr_c, r9 ) @ ensure svc mode
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
......@@ -54,8 +57,10 @@ ENTRY(stext)
ldr r13, __switch_data @ address to jump to after
@ the initialization is done
adr lr, __after_proc_init @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC
badr lr, __after_proc_init @ return (PIC) address
ARM( add pc, r10, #PROCINFO_INITFUNC )
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
ENDPROC(stext)
/*
......
......@@ -11,6 +11,8 @@
*
* Kernel startup code for all 32-bit CPUs
*/
#include <asm/unified.h>
#include <linux/linkage.h>
#include <linux/init.h>
......@@ -79,7 +81,9 @@
*/
.section ".text.head", "ax"
ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
ARM( msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE ) @ ensure svc mode
THUMB( mov r9, #PSR_F_BIT | PSR_I_BIT | SVC_MODE )
THUMB( msr cpsr_c, r9 ) @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
......@@ -100,8 +104,10 @@ ENTRY(stext)
*/
ldr r13, __switch_data @ address to jump to after
@ mmu has been enabled
adr lr, __enable_mmu @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC
badr lr, __enable_mmu @ return (PIC) address
ARM( add pc, r10, #PROCINFO_INITFUNC )
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
ENDPROC(stext)
#if defined(CONFIG_SMP)
......@@ -127,9 +133,11 @@ ENTRY(secondary_startup)
ldmia r4, {r5, r7, r13} @ address to jump to after
sub r4, r4, r5 @ mmu has been enabled
ldr r4, [r7, r4] @ get secondary_data.pgdir
adr lr, __enable_mmu @ return address
add pc, r10, #PROCINFO_INITFUNC @ initialise processor
badr lr, __enable_mmu @ return address
ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
@ (return control reg)
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
ENDPROC(secondary_startup)
/*
......@@ -253,6 +261,7 @@ __create_page_tables:
add r6, r4, r6, lsr #18
1: cmp r0, r6
add r3, r3, #1 << 20
it ls
strls r3, [r0], #4
bls 1b
......@@ -296,6 +305,7 @@ __create_page_tables:
add r0, r4, r3
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
cmp r3, #0x0800 @ limit to 512MB
it hi
movhi r3, #0x0800
add r6, r0, r3
ldr r3, [r8, #MACHINFO_PHYSIO]
......
......@@ -30,6 +30,7 @@
#include <linux/tick.h>
#include <linux/utsname.h>
#include <asm/unified.h>
#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/system.h>
......@@ -425,7 +426,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.ARM_r2 = (unsigned long)fn;
regs.ARM_r3 = (unsigned long)do_exit;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = SVC_MODE;
regs.ARM_cpsr = SVC_MODE | PSR_ISETSTATE;
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
......
......@@ -26,6 +26,7 @@
#include <linux/fs.h>
#include <linux/kexec.h>
#include <asm/unified.h>
#include <asm/cpu.h>
#include <asm/elf.h>
#include <asm/procinfo.h>
......@@ -410,13 +411,17 @@ void cpu_init(void)
"msr cpsr_c, %7"
:
: "r" (stk),
"I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ARM( "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), )
"I" (offsetof(struct stack, irq[0])),
"I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ARM( "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), )
"I" (offsetof(struct stack, abt[0])),
"I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ARM( "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | UND_MODE), )
"I" (offsetof(struct stack, und[0])),
"I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
ARM( "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) )
THUMB("r" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) )
: "r14");
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment