Commit d8fba3d4 authored by Catalin Marinas's avatar Catalin Marinas

Thumb-2: Add the ENDPROC declarations to the .S files

This declaration specifies the "function" type for various assembly
functions, needed for generating the correct branch instructions in
Thumb-2.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent a5dfe443
...@@ -421,6 +421,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size ...@@ -421,6 +421,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
add r1, r1, #1048576 add r1, r1, #1048576
str r1, [r0] str r1, [r0]
mov pc, lr mov pc, lr
ENDPROC(__setup_mmu)
__armv4_mmu_cache_on: __armv4_mmu_cache_on:
mov r12, lr mov r12, lr
......
...@@ -89,10 +89,12 @@ ...@@ -89,10 +89,12 @@
ENTRY(printhex8) ENTRY(printhex8)
mov r1, #8 mov r1, #8
b printhex b printhex
ENDPROC(printhex8)
ENTRY(printhex4) ENTRY(printhex4)
mov r1, #4 mov r1, #4
b printhex b printhex
ENDPROC(printhex4)
ENTRY(printhex2) ENTRY(printhex2)
mov r1, #2 mov r1, #2
...@@ -110,6 +112,7 @@ printhex: adr r2, hexbuf ...@@ -110,6 +112,7 @@ printhex: adr r2, hexbuf
bne 1b bne 1b
mov r0, r2 mov r0, r2
b printascii b printascii
ENDPROC(printhex2)
.ltorg .ltorg
...@@ -127,11 +130,13 @@ ENTRY(printascii) ...@@ -127,11 +130,13 @@ ENTRY(printascii)
teqne r1, #0 teqne r1, #0
bne 1b bne 1b
mov pc, lr mov pc, lr
ENDPROC(printascii)
ENTRY(printch) ENTRY(printch)
addruart r3 addruart r3
mov r1, r0 mov r1, r0
mov r0, #0 mov r0, #0
b 1b b 1b
ENDPROC(printch)
hexbuf: .space 16 hexbuf: .space 16
...@@ -70,17 +70,21 @@ ...@@ -70,17 +70,21 @@
__pabt_invalid: __pabt_invalid:
inv_entry BAD_PREFETCH inv_entry BAD_PREFETCH
b common_invalid b common_invalid
ENDPROC(__pabt_invalid)
__dabt_invalid: __dabt_invalid:
inv_entry BAD_DATA inv_entry BAD_DATA
b common_invalid b common_invalid
ENDPROC(__dabt_invalid)
__irq_invalid: __irq_invalid:
inv_entry BAD_IRQ inv_entry BAD_IRQ
b common_invalid b common_invalid
ENDPROC(__irq_invalid)
__und_invalid: __und_invalid:
inv_entry BAD_UNDEFINSTR inv_entry BAD_UNDEFINSTR
ENDPROC(__und_invalid)
@ @
@ XXX fall through to common_invalid @ XXX fall through to common_invalid
...@@ -186,6 +190,7 @@ __dabt_svc: ...@@ -186,6 +190,7 @@ __dabt_svc:
ldr r0, [sp, #S_PSR] ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0 msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
ENDPROC(__dabt_svc)
.align 5 .align 5
__irq_svc: __irq_svc:
...@@ -219,6 +224,7 @@ preempt_return: ...@@ -219,6 +224,7 @@ preempt_return:
bleq trace_hardirqs_on bleq trace_hardirqs_on
#endif #endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
ENDPROC(__irq_svc)
.ltorg .ltorg
...@@ -269,6 +275,7 @@ __und_svc: ...@@ -269,6 +275,7 @@ __und_svc:
ldr lr, [sp, #S_PSR] @ Get SVC cpsr ldr lr, [sp, #S_PSR] @ Get SVC cpsr
msr spsr_cxsf, lr msr spsr_cxsf, lr
ldmia sp, {r0 - pc}^ @ Restore SVC registers ldmia sp, {r0 - pc}^ @ Restore SVC registers
ENDPROC(__und_svc)
.align 5 .align 5
__pabt_svc: __pabt_svc:
...@@ -310,6 +317,7 @@ __pabt_svc: ...@@ -310,6 +317,7 @@ __pabt_svc:
ldr r0, [sp, #S_PSR] ldr r0, [sp, #S_PSR]
msr spsr_cxsf, r0 msr spsr_cxsf, r0
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
ENDPROC(__pabt_svc)
.align 5 .align 5
.LCcralign: .LCcralign:
...@@ -413,6 +421,7 @@ __dabt_usr: ...@@ -413,6 +421,7 @@ __dabt_usr:
mov r2, sp mov r2, sp
adr lr, ret_from_exception adr lr, ret_from_exception
b do_DataAbort b do_DataAbort
ENDPROC(__dabt_usr)
.align 5 .align 5
__irq_usr: __irq_usr:
...@@ -442,6 +451,7 @@ __irq_usr: ...@@ -442,6 +451,7 @@ __irq_usr:
mov why, #0 mov why, #0
b ret_to_user b ret_to_user
ENDPROC(__irq_usr)
.ltorg .ltorg
...@@ -474,6 +484,7 @@ __und_usr: ...@@ -474,6 +484,7 @@ __und_usr:
#else #else
b __und_usr_unknown b __und_usr_unknown
#endif #endif
ENDPROC(__und_usr)
@ @
@ fallthrough to call_fpe @ fallthrough to call_fpe
...@@ -624,6 +635,7 @@ __und_usr_unknown: ...@@ -624,6 +635,7 @@ __und_usr_unknown:
mov r0, sp mov r0, sp
adr lr, ret_from_exception adr lr, ret_from_exception
b do_undefinstr b do_undefinstr
ENDPROC(__und_usr_unknown)
.align 5 .align 5
__pabt_usr: __pabt_usr:
...@@ -640,7 +652,9 @@ __pabt_usr: ...@@ -640,7 +652,9 @@ __pabt_usr:
enable_irq @ Enable interrupts enable_irq @ Enable interrupts
mov r1, sp @ regs mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler bl do_PrefetchAbort @ call abort handler
ENDPROC(__pabt_usr)
/* fall through */ /* fall through */
/* /*
* This is the return code to user mode for abort handlers * This is the return code to user mode for abort handlers
*/ */
...@@ -648,6 +662,7 @@ ENTRY(ret_from_exception) ...@@ -648,6 +662,7 @@ ENTRY(ret_from_exception)
get_thread_info tsk get_thread_info tsk
mov why, #0 mov why, #0
b ret_to_user b ret_to_user
ENDPROC(ret_from_exception)
/* /*
* Register switch for ARMv3 and ARMv4 processors * Register switch for ARMv3 and ARMv4 processors
...@@ -690,6 +705,7 @@ ENTRY(__switch_to) ...@@ -690,6 +705,7 @@ ENTRY(__switch_to)
bl atomic_notifier_call_chain bl atomic_notifier_call_chain
mov r0, r5 mov r0, r5
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
ENDPROC(__switch_to)
__INIT __INIT
...@@ -1017,6 +1033,7 @@ vector_\name: ...@@ -1017,6 +1033,7 @@ vector_\name:
mov r0, sp mov r0, sp
ldr lr, [pc, lr, lsl #2] ldr lr, [pc, lr, lsl #2]
movs pc, lr @ branch to handler in SVC mode movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
.endm .endm
.globl __stubs_start .globl __stubs_start
......
...@@ -76,6 +76,7 @@ no_work_pending: ...@@ -76,6 +76,7 @@ no_work_pending:
mov r0, r0 mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr movs pc, lr @ return & move spsr_svc into cpsr
ENDPROC(ret_to_user)
/* /*
* This is how we return from a fork. * This is how we return from a fork.
...@@ -91,7 +92,7 @@ ENTRY(ret_from_fork) ...@@ -91,7 +92,7 @@ ENTRY(ret_from_fork)
mov r0, #1 @ trace exit [IP = 1] mov r0, #1 @ trace exit [IP = 1]
bl syscall_trace bl syscall_trace
b ret_slow_syscall b ret_slow_syscall
ENDPROC(ret_from_fork)
.equ NR_syscalls,0 .equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1 #define CALL(x) .equ NR_syscalls,NR_syscalls+1
...@@ -218,6 +219,7 @@ ENTRY(vector_swi) ...@@ -218,6 +219,7 @@ ENTRY(vector_swi)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall bcs arm_syscall
b sys_ni_syscall @ not private func b sys_ni_syscall @ not private func
ENDPROC(vector_swi)
/* /*
* This is the really slow path. We're going to be doing * This is the really slow path. We're going to be doing
...@@ -275,7 +277,6 @@ ENTRY(sys_call_table) ...@@ -275,7 +277,6 @@ ENTRY(sys_call_table)
*/ */
@ r0 = syscall number @ r0 = syscall number
@ r8 = syscall table @ r8 = syscall table
.type sys_syscall, #function
sys_syscall: sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
...@@ -287,53 +288,65 @@ sys_syscall: ...@@ -287,53 +288,65 @@ sys_syscall:
movlo r3, r4 movlo r3, r4
ldrlo pc, [tbl, scno, lsl #2] ldrlo pc, [tbl, scno, lsl #2]
b sys_ni_syscall b sys_ni_syscall
ENDPROC(sys_syscall)
sys_fork_wrapper: sys_fork_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_fork b sys_fork
ENDPROC(sys_fork_wrapper)
sys_vfork_wrapper: sys_vfork_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_vfork b sys_vfork
ENDPROC(sys_vfork_wrapper)
sys_execve_wrapper: sys_execve_wrapper:
add r3, sp, #S_OFF add r3, sp, #S_OFF
b sys_execve b sys_execve
ENDPROC(sys_execve_wrapper)
sys_clone_wrapper: sys_clone_wrapper:
add ip, sp, #S_OFF add ip, sp, #S_OFF
str ip, [sp, #4] str ip, [sp, #4]
b sys_clone b sys_clone
ENDPROC(sys_clone_wrapper)
sys_sigsuspend_wrapper: sys_sigsuspend_wrapper:
add r3, sp, #S_OFF add r3, sp, #S_OFF
b sys_sigsuspend b sys_sigsuspend
ENDPROC(sys_sigsuspend_wrapper)
sys_rt_sigsuspend_wrapper: sys_rt_sigsuspend_wrapper:
add r2, sp, #S_OFF add r2, sp, #S_OFF
b sys_rt_sigsuspend b sys_rt_sigsuspend
ENDPROC(sys_rt_sigsuspend_wrapper)
sys_sigreturn_wrapper: sys_sigreturn_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_sigreturn b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper: sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF add r0, sp, #S_OFF
b sys_rt_sigreturn b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
sys_sigaltstack_wrapper: sys_sigaltstack_wrapper:
ldr r2, [sp, #S_OFF + S_SP] ldr r2, [sp, #S_OFF + S_SP]
b do_sigaltstack b do_sigaltstack
ENDPROC(sys_sigaltstack_wrapper)
sys_statfs64_wrapper: sys_statfs64_wrapper:
teq r1, #88 teq r1, #88
moveq r1, #84 moveq r1, #84
b sys_statfs64 b sys_statfs64
ENDPROC(sys_statfs64_wrapper)
sys_fstatfs64_wrapper: sys_fstatfs64_wrapper:
teq r1, #88 teq r1, #88
moveq r1, #84 moveq r1, #84
b sys_fstatfs64 b sys_fstatfs64
ENDPROC(sys_fstatfs64_wrapper)
/* /*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested * Note: off_4k (r5) is always units of 4K. If we can't do the requested
...@@ -351,11 +364,14 @@ sys_mmap2: ...@@ -351,11 +364,14 @@ sys_mmap2:
str r5, [sp, #4] str r5, [sp, #4]
b do_mmap2 b do_mmap2
#endif #endif
ENDPROC(sys_mmap2)
ENTRY(pabort_ifar) ENTRY(pabort_ifar)
mrc p15, 0, r0, cr6, cr0, 2 mrc p15, 0, r0, cr6, cr0, 2
ENDPROC(pabort_ifar)
ENTRY(pabort_noifar) ENTRY(pabort_noifar)
mov pc, lr mov pc, lr
ENDPROC(pabort_noifar)
#ifdef CONFIG_OABI_COMPAT #ifdef CONFIG_OABI_COMPAT
...@@ -366,26 +382,31 @@ ENTRY(pabort_noifar) ...@@ -366,26 +382,31 @@ ENTRY(pabort_noifar)
sys_oabi_pread64: sys_oabi_pread64:
stmia sp, {r3, r4} stmia sp, {r3, r4}
b sys_pread64 b sys_pread64
ENDPROC(sys_oabi_pread64)
sys_oabi_pwrite64: sys_oabi_pwrite64:
stmia sp, {r3, r4} stmia sp, {r3, r4}
b sys_pwrite64 b sys_pwrite64
ENDPROC(sys_oabi_pwrite64)
sys_oabi_truncate64: sys_oabi_truncate64:
mov r3, r2 mov r3, r2
mov r2, r1 mov r2, r1
b sys_truncate64 b sys_truncate64
ENDPROC(sys_oabi_truncate64)
sys_oabi_ftruncate64: sys_oabi_ftruncate64:
mov r3, r2 mov r3, r2
mov r2, r1 mov r2, r1
b sys_ftruncate64 b sys_ftruncate64
ENDPROC(sys_oabi_ftruncate64)
sys_oabi_readahead: sys_oabi_readahead:
str r3, [sp] str r3, [sp]
mov r3, r2 mov r3, r2
mov r2, r1 mov r2, r1
b sys_readahead b sys_readahead
ENDPROC(sys_oabi_readahead)
/* /*
* Let's declare a second syscall table for old ABI binaries * Let's declare a second syscall table for old ABI binaries
......
...@@ -33,7 +33,6 @@ __switch_data: ...@@ -33,7 +33,6 @@ __switch_data:
* r2 = atags pointer * r2 = atags pointer
* r9 = processor ID * r9 = processor ID
*/ */
.type __mmap_switched, %function
__mmap_switched: __mmap_switched:
adr r3, __switch_data + 4 adr r3, __switch_data + 4
...@@ -56,6 +55,7 @@ __mmap_switched: ...@@ -56,6 +55,7 @@ __mmap_switched:
bic r4, r0, #CR_A @ Clear 'A' bit bic r4, r0, #CR_A @ Clear 'A' bit
stmia r7, {r0, r4} @ Save control register values stmia r7, {r0, r4} @ Save control register values
b start_kernel b start_kernel
ENDPROC(__mmap_switched)
/* /*
* Exception handling. Something went wrong and we can't proceed. We * Exception handling. Something went wrong and we can't proceed. We
...@@ -66,8 +66,6 @@ __mmap_switched: ...@@ -66,8 +66,6 @@ __mmap_switched:
* and hope for the best (useful if bootloader fails to pass a proper * and hope for the best (useful if bootloader fails to pass a proper
* machine ID for example). * machine ID for example).
*/ */
.type __error_p, %function
__error_p: __error_p:
#ifdef CONFIG_DEBUG_LL #ifdef CONFIG_DEBUG_LL
adr r0, str_p1 adr r0, str_p1
...@@ -76,8 +74,8 @@ __error_p: ...@@ -76,8 +74,8 @@ __error_p:
str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n" str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
.align .align
#endif #endif
ENDPROC(__error_p)
.type __error_a, %function
__error_a: __error_a:
#ifdef CONFIG_DEBUG_LL #ifdef CONFIG_DEBUG_LL
mov r4, r1 @ preserve machine ID mov r4, r1 @ preserve machine ID
...@@ -107,13 +105,14 @@ __error_a: ...@@ -107,13 +105,14 @@ __error_a:
adr r0, str_a3 adr r0, str_a3
bl printascii bl printascii
b __error b __error
ENDPROC(__error_a)
str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
.align .align
#endif #endif
.type __error, %function
__error: __error:
#ifdef CONFIG_ARCH_RPC #ifdef CONFIG_ARCH_RPC
/* /*
...@@ -130,6 +129,7 @@ __error: ...@@ -130,6 +129,7 @@ __error:
#endif #endif
1: mov r0, r0 1: mov r0, r0
b 1b b 1b
ENDPROC(__error)
/* /*
...@@ -145,7 +145,6 @@ __error: ...@@ -145,7 +145,6 @@ __error:
* r5 = proc_info pointer in physical address space * r5 = proc_info pointer in physical address space
* r9 = cpuid (preserved) * r9 = cpuid (preserved)
*/ */
.type __lookup_processor_type, %function
__lookup_processor_type: __lookup_processor_type:
adr r3, 3f adr r3, 3f
ldmda r3, {r5 - r7} ldmda r3, {r5 - r7}
...@@ -161,6 +160,7 @@ __lookup_processor_type: ...@@ -161,6 +160,7 @@ __lookup_processor_type:
blo 1b blo 1b
mov r5, #0 @ unknown processor mov r5, #0 @ unknown processor
2: mov pc, lr 2: mov pc, lr
ENDPROC(__lookup_processor_type)
/* /*
* This provides a C-API version of the above function. * This provides a C-API version of the above function.
...@@ -171,6 +171,7 @@ ENTRY(lookup_processor_type) ...@@ -171,6 +171,7 @@ ENTRY(lookup_processor_type)
bl __lookup_processor_type bl __lookup_processor_type
mov r0, r5 mov r0, r5
ldmfd sp!, {r4 - r7, r9, pc} ldmfd sp!, {r4 - r7, r9, pc}
ENDPROC(lookup_processor_type)
/* /*
* Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
...@@ -193,7 +194,6 @@ ENTRY(lookup_processor_type) ...@@ -193,7 +194,6 @@ ENTRY(lookup_processor_type)
* r3, r4, r6 corrupted * r3, r4, r6 corrupted
* r5 = mach_info pointer in physical address space * r5 = mach_info pointer in physical address space
*/ */
.type __lookup_machine_type, %function
__lookup_machine_type: __lookup_machine_type:
adr r3, 3b adr r3, 3b
ldmia r3, {r4, r5, r6} ldmia r3, {r4, r5, r6}
...@@ -208,6 +208,7 @@ __lookup_machine_type: ...@@ -208,6 +208,7 @@ __lookup_machine_type:
blo 1b blo 1b
mov r5, #0 @ unknown machine mov r5, #0 @ unknown machine
2: mov pc, lr 2: mov pc, lr
ENDPROC(__lookup_machine_type)
/* /*
* This provides a C-API version of the above function. * This provides a C-API version of the above function.
...@@ -218,6 +219,7 @@ ENTRY(lookup_machine_type) ...@@ -218,6 +219,7 @@ ENTRY(lookup_machine_type)
bl __lookup_machine_type bl __lookup_machine_type
mov r0, r5 mov r0, r5
ldmfd sp!, {r4 - r6, pc} ldmfd sp!, {r4 - r6, pc}
ENDPROC(lookup_machine_type)
/* Determine validity of the r2 atags pointer. The heuristic requires /* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and * that the pointer be aligned, in the first 16k of physical RAM and
...@@ -231,8 +233,6 @@ ENTRY(lookup_machine_type) ...@@ -231,8 +233,6 @@ ENTRY(lookup_machine_type)
* r2 either valid atags pointer, or zero * r2 either valid atags pointer, or zero
* r5, r6 corrupted * r5, r6 corrupted
*/ */
.type __vet_atags, %function
__vet_atags: __vet_atags:
tst r2, #0x3 @ aligned? tst r2, #0x3 @ aligned?
bne 1f bne 1f
...@@ -249,3 +249,4 @@ __vet_atags: ...@@ -249,3 +249,4 @@ __vet_atags:
1: mov r2, #0 1: mov r2, #0
mov pc, lr mov pc, lr
ENDPROC(__vet_atags)
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
* *
*/ */
.section ".text.head", "ax" .section ".text.head", "ax"
.type stext, %function
ENTRY(stext) ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled @ and irqs disabled
...@@ -57,11 +56,11 @@ ENTRY(stext) ...@@ -57,11 +56,11 @@ ENTRY(stext)
@ the initialization is done @ the initialization is done
adr lr, __after_proc_init @ return (PIC) address adr lr, __after_proc_init @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC add pc, r10, #PROCINFO_INITFUNC
ENDPROC(stext)
/* /*
* Set the Control Register and Read the process ID. * Set the Control Register and Read the process ID.
*/ */
.type __after_proc_init, %function
__after_proc_init: __after_proc_init:
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
/* /*
...@@ -92,6 +91,7 @@ __after_proc_init: ...@@ -92,6 +91,7 @@ __after_proc_init:
mov pc, r13 @ clear the BSS and jump mov pc, r13 @ clear the BSS and jump
@ to start_kernel @ to start_kernel
ENDPROC(__after_proc_init)
.ltorg .ltorg
#include "head-common.S" #include "head-common.S"
...@@ -78,7 +78,6 @@ ...@@ -78,7 +78,6 @@
* circumstances, zImage) is for. * circumstances, zImage) is for.
*/ */
.section ".text.head", "ax" .section ".text.head", "ax"
.type stext, %function
ENTRY(stext) ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
@ and irqs disabled @ and irqs disabled
...@@ -103,9 +102,9 @@ ENTRY(stext) ...@@ -103,9 +102,9 @@ ENTRY(stext)
@ mmu has been enabled @ mmu has been enabled
adr lr, __enable_mmu @ return (PIC) address adr lr, __enable_mmu @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC add pc, r10, #PROCINFO_INITFUNC
ENDPROC(stext)
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
.type secondary_startup, #function
ENTRY(secondary_startup) ENTRY(secondary_startup)
/* /*
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.
...@@ -131,6 +130,7 @@ ENTRY(secondary_startup) ...@@ -131,6 +130,7 @@ ENTRY(secondary_startup)
adr lr, __enable_mmu @ return address adr lr, __enable_mmu @ return address
add pc, r10, #PROCINFO_INITFUNC @ initialise processor add pc, r10, #PROCINFO_INITFUNC @ initialise processor
@ (return control reg) @ (return control reg)
ENDPROC(secondary_startup)
/* /*
* r6 = &secondary_data * r6 = &secondary_data
...@@ -139,6 +139,7 @@ ENTRY(__secondary_switched) ...@@ -139,6 +139,7 @@ ENTRY(__secondary_switched)
ldr sp, [r7, #4] @ get secondary_data.stack ldr sp, [r7, #4] @ get secondary_data.stack
mov fp, #0 mov fp, #0
b secondary_start_kernel b secondary_start_kernel
ENDPROC(__secondary_switched)
.type __secondary_data, %object .type __secondary_data, %object
__secondary_data: __secondary_data:
...@@ -154,7 +155,6 @@ __secondary_data: ...@@ -154,7 +155,6 @@ __secondary_data:
* this is just loading the page table pointer and domain access * this is just loading the page table pointer and domain access
* registers. * registers.
*/ */
.type __enable_mmu, %function
__enable_mmu: __enable_mmu:
#ifdef CONFIG_ALIGNMENT_TRAP #ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #CR_A orr r0, r0, #CR_A
...@@ -177,6 +177,7 @@ __enable_mmu: ...@@ -177,6 +177,7 @@ __enable_mmu:
mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
b __turn_mmu_on b __turn_mmu_on
ENDPROC(__enable_mmu)
/* /*
* Enable the MMU. This completely changes the structure of the visible * Enable the MMU. This completely changes the structure of the visible
...@@ -190,7 +191,6 @@ __enable_mmu: ...@@ -190,7 +191,6 @@ __enable_mmu:
* other registers depend on the function called upon completion * other registers depend on the function called upon completion
*/ */
.align 5 .align 5
.type __turn_mmu_on, %function
__turn_mmu_on: __turn_mmu_on:
mov r0, r0 mov r0, r0
mcr p15, 0, r0, c1, c0, 0 @ write control reg mcr p15, 0, r0, c1, c0, 0 @ write control reg
...@@ -198,7 +198,7 @@ __turn_mmu_on: ...@@ -198,7 +198,7 @@ __turn_mmu_on:
mov r3, r3 mov r3, r3
mov r3, r3 mov r3, r3
mov pc, r13 mov pc, r13
ENDPROC(__turn_mmu_on)
/* /*
...@@ -214,7 +214,6 @@ __turn_mmu_on: ...@@ -214,7 +214,6 @@ __turn_mmu_on:
* r0, r3, r6, r7 corrupted * r0, r3, r6, r7 corrupted
* r4 = physical page table address * r4 = physical page table address
*/ */
.type __create_page_tables, %function
__create_page_tables: __create_page_tables:
pgtbl r4 @ page table address pgtbl r4 @ page table address
...@@ -328,6 +327,7 @@ __create_page_tables: ...@@ -328,6 +327,7 @@ __create_page_tables:
#endif #endif
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(__create_page_tables)
.ltorg .ltorg
#include "head-common.S" #include "head-common.S"
...@@ -47,3 +47,5 @@ ENTRY(__aeabi_llsl) ...@@ -47,3 +47,5 @@ ENTRY(__aeabi_llsl)
mov al, al, lsl r2 mov al, al, lsl r2
mov pc, lr mov pc, lr
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
...@@ -47,3 +47,5 @@ ENTRY(__aeabi_lasr) ...@@ -47,3 +47,5 @@ ENTRY(__aeabi_lasr)
mov ah, ah, asr r2 mov ah, ah, asr r2
mov pc, lr mov pc, lr
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
...@@ -103,6 +103,8 @@ for_each_frame: tst frame, mask @ Check for address exceptions ...@@ -103,6 +103,8 @@ for_each_frame: tst frame, mask @ Check for address exceptions
mov r1, frame mov r1, frame
bl printk bl printk
no_frame: ldmfd sp!, {r4 - r8, pc} no_frame: ldmfd sp!, {r4 - r8, pc}
ENDPROC(__backtrace)
ENDPROC(c_backtrace)
.section __ex_table,"a" .section __ex_table,"a"
.align 3 .align 3
......
...@@ -19,3 +19,5 @@ ENTRY(_change_bit_be) ...@@ -19,3 +19,5 @@ ENTRY(_change_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_change_bit_le) ENTRY(_change_bit_le)
bitop eor bitop eor
ENDPROC(_change_bit_be)
ENDPROC(_change_bit_le)
...@@ -44,6 +44,7 @@ USER( strnebt r2, [r0], #1) ...@@ -44,6 +44,7 @@ USER( strnebt r2, [r0], #1)
USER( strnebt r2, [r0], #1) USER( strnebt r2, [r0], #1)
mov r0, #0 mov r0, #0
ldmfd sp!, {r1, pc} ldmfd sp!, {r1, pc}
ENDPROC(__clear_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 0 .align 0
......
...@@ -20,3 +20,5 @@ ENTRY(_clear_bit_be) ...@@ -20,3 +20,5 @@ ENTRY(_clear_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_clear_bit_le) ENTRY(_clear_bit_le)
bitop bic bitop bic
ENDPROC(_clear_bit_be)
ENDPROC(_clear_bit_le)
...@@ -87,6 +87,8 @@ ENTRY(__copy_from_user) ...@@ -87,6 +87,8 @@ ENTRY(__copy_from_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 0 .align 0
copy_abort_preamble copy_abort_preamble
......
...@@ -44,3 +44,4 @@ ENTRY(copy_page) ...@@ -44,3 +44,4 @@ ENTRY(copy_page)
PLD( ldmeqia r1!, {r3, r4, ip, lr} ) PLD( ldmeqia r1!, {r3, r4, ip, lr} )
PLD( beq 2b ) PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3 ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
...@@ -90,6 +90,8 @@ ENTRY(__copy_to_user) ...@@ -90,6 +90,8 @@ ENTRY(__copy_to_user)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(__copy_to_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 0 .align 0
copy_abort_preamble copy_abort_preamble
......
...@@ -29,4 +29,5 @@ ENTRY(__csum_ipv6_magic) ...@@ -29,4 +29,5 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, r2 adcs r0, r0, r2
adcs r0, r0, #0 adcs r0, r0, #0
ldmfd sp!, {pc} ldmfd sp!, {pc}
ENDPROC(__csum_ipv6_magic)
...@@ -139,3 +139,4 @@ ENTRY(csum_partial) ...@@ -139,3 +139,4 @@ ENTRY(csum_partial)
tst len, #0x1c tst len, #0x1c
bne 4b bne 4b
b .Lless4 b .Lless4
ENDPROC(csum_partial)
...@@ -32,6 +32,8 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06 ...@@ -32,6 +32,8 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
mul r0, r2, r0 @ max = 2^32-1 mul r0, r2, r0 @ max = 2^32-1
movs r0, r0, lsr #6 movs r0, r0, lsr #6
moveq pc, lr moveq pc, lr
ENDPROC(__udelay)
ENDPROC(__const_udelay) @ 0 <= r0 <= 0x7fffff06
/* /*
* loops = r0 * HZ * loops_per_jiffy / 1000000 * loops = r0 * HZ * loops_per_jiffy / 1000000
...@@ -60,3 +62,4 @@ ENTRY(__delay) ...@@ -60,3 +62,4 @@ ENTRY(__delay)
#endif #endif
bhi __delay bhi __delay
mov pc, lr mov pc, lr
ENDPROC(__delay)
...@@ -198,3 +198,4 @@ ENTRY(__do_div64) ...@@ -198,3 +198,4 @@ ENTRY(__do_div64)
mov xh, #0 mov xh, #0
ldr pc, [sp], #8 ldr pc, [sp], #8
ENDPROC(__do_div64)
...@@ -33,6 +33,7 @@ ENTRY(_find_first_zero_bit_le) ...@@ -33,6 +33,7 @@ ENTRY(_find_first_zero_bit_le)
blo 1b blo 1b
3: mov r0, r1 @ no free bits 3: mov r0, r1 @ no free bits
mov pc, lr mov pc, lr
ENDPROC(_find_first_zero_bit_le)
/* /*
* Purpose : Find next 'zero' bit * Purpose : Find next 'zero' bit
...@@ -50,6 +51,7 @@ ENTRY(_find_next_zero_bit_le) ...@@ -50,6 +51,7 @@ ENTRY(_find_next_zero_bit_le)
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
/* /*
* Purpose : Find a 'one' bit * Purpose : Find a 'one' bit
...@@ -67,6 +69,7 @@ ENTRY(_find_first_bit_le) ...@@ -67,6 +69,7 @@ ENTRY(_find_first_bit_le)
blo 1b blo 1b
3: mov r0, r1 @ no free bits 3: mov r0, r1 @ no free bits
mov pc, lr mov pc, lr
ENDPROC(_find_first_bit_le)
/* /*
* Purpose : Find next 'one' bit * Purpose : Find next 'one' bit
...@@ -83,6 +86,7 @@ ENTRY(_find_next_bit_le) ...@@ -83,6 +86,7 @@ ENTRY(_find_next_bit_le)
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
#ifdef __ARMEB__ #ifdef __ARMEB__
...@@ -99,6 +103,7 @@ ENTRY(_find_first_zero_bit_be) ...@@ -99,6 +103,7 @@ ENTRY(_find_first_zero_bit_be)
blo 1b blo 1b
3: mov r0, r1 @ no free bits 3: mov r0, r1 @ no free bits
mov pc, lr mov pc, lr
ENDPROC(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be) ENTRY(_find_next_zero_bit_be)
teq r1, #0 teq r1, #0
...@@ -113,6 +118,7 @@ ENTRY(_find_next_zero_bit_be) ...@@ -113,6 +118,7 @@ ENTRY(_find_next_zero_bit_be)
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be) ENTRY(_find_first_bit_be)
teq r1, #0 teq r1, #0
...@@ -127,6 +133,7 @@ ENTRY(_find_first_bit_be) ...@@ -127,6 +133,7 @@ ENTRY(_find_first_bit_be)
blo 1b blo 1b
3: mov r0, r1 @ no free bits 3: mov r0, r1 @ no free bits
mov pc, lr mov pc, lr
ENDPROC(_find_first_bit_be)
ENTRY(_find_next_bit_be) ENTRY(_find_next_bit_be)
teq r1, #0 teq r1, #0
...@@ -140,6 +147,7 @@ ENTRY(_find_next_bit_be) ...@@ -140,6 +147,7 @@ ENTRY(_find_next_bit_be)
orr r2, r2, #7 @ if zero, then no bits here orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
#endif #endif
......
...@@ -26,16 +26,16 @@ ...@@ -26,16 +26,16 @@
* Note that ADDR_LIMIT is either 0 or 0xc0000000. * Note that ADDR_LIMIT is either 0 or 0xc0000000.
* Note also that it is intended that __get_user_bad is not global. * Note also that it is intended that __get_user_bad is not global.
*/ */
#include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
.global __get_user_1 ENTRY(__get_user_1)
__get_user_1:
1: ldrbt r2, [r0] 1: ldrbt r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__get_user_1)
.global __get_user_2 ENTRY(__get_user_2)
__get_user_2:
2: ldrbt r2, [r0], #1 2: ldrbt r2, [r0], #1
3: ldrbt r3, [r0] 3: ldrbt r3, [r0]
#ifndef __ARMEB__ #ifndef __ARMEB__
...@@ -45,17 +45,19 @@ __get_user_2: ...@@ -45,17 +45,19 @@ __get_user_2:
#endif #endif
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__get_user_2)
.global __get_user_4 ENTRY(__get_user_4)
__get_user_4:
4: ldrt r2, [r0] 4: ldrt r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__get_user_4)
__get_user_bad: __get_user_bad:
mov r2, #0 mov r2, #0
mov r0, #-EFAULT mov r0, #-EFAULT
mov pc, lr mov pc, lr
ENDPROC(__get_user_bad)
.section __ex_table, "a" .section __ex_table, "a"
.long 1b, __get_user_bad .long 1b, __get_user_bad
......
...@@ -120,3 +120,4 @@ ENTRY(__raw_readsb) ...@@ -120,3 +120,4 @@ ENTRY(__raw_readsb)
strgtb r3, [r1] strgtb r3, [r1]
ldmfd sp!, {r4 - r6, pc} ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
...@@ -76,3 +76,4 @@ ENTRY(__raw_readsl) ...@@ -76,3 +76,4 @@ ENTRY(__raw_readsl)
8: mov r3, ip, get_byte_0 8: mov r3, ip, get_byte_0
strb r3, [r1, #0] strb r3, [r1, #0]
mov pc, lr mov pc, lr
ENDPROC(__raw_readsl)
...@@ -128,3 +128,4 @@ ENTRY(__raw_readsw) ...@@ -128,3 +128,4 @@ ENTRY(__raw_readsw)
_BE_ONLY_( movne ip, ip, lsr #24 ) _BE_ONLY_( movne ip, ip, lsr #24 )
strneb ip, [r1] strneb ip, [r1]
ldmfd sp!, {r4, pc} ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
...@@ -91,3 +91,4 @@ ENTRY(__raw_writesb) ...@@ -91,3 +91,4 @@ ENTRY(__raw_writesb)
strgtb r3, [r0] strgtb r3, [r0]
ldmfd sp!, {r4, r5, pc} ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
...@@ -64,3 +64,4 @@ ENTRY(__raw_writesl) ...@@ -64,3 +64,4 @@ ENTRY(__raw_writesl)
str ip, [r0] str ip, [r0]
bne 6b bne 6b
mov pc, lr mov pc, lr
ENDPROC(__raw_writesl)
...@@ -94,3 +94,4 @@ ENTRY(__raw_writesw) ...@@ -94,3 +94,4 @@ ENTRY(__raw_writesw)
3: movne ip, r3, lsr #8 3: movne ip, r3, lsr #8
strneh ip, [r0] strneh ip, [r0]
mov pc, lr mov pc, lr
ENDPROC(__raw_writesw)
...@@ -230,6 +230,8 @@ ENTRY(__aeabi_uidiv) ...@@ -230,6 +230,8 @@ ENTRY(__aeabi_uidiv)
mov r0, r0, lsr r2 mov r0, r0, lsr r2
mov pc, lr mov pc, lr
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
ENTRY(__umodsi3) ENTRY(__umodsi3)
...@@ -245,6 +247,7 @@ ENTRY(__umodsi3) ...@@ -245,6 +247,7 @@ ENTRY(__umodsi3)
mov pc, lr mov pc, lr
ENDPROC(__umodsi3)
ENTRY(__divsi3) ENTRY(__divsi3)
ENTRY(__aeabi_idiv) ENTRY(__aeabi_idiv)
...@@ -284,6 +287,8 @@ ENTRY(__aeabi_idiv) ...@@ -284,6 +287,8 @@ ENTRY(__aeabi_idiv)
rsbmi r0, r0, #0 rsbmi r0, r0, #0
mov pc, lr mov pc, lr
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
ENTRY(__modsi3) ENTRY(__modsi3)
...@@ -305,6 +310,8 @@ ENTRY(__modsi3) ...@@ -305,6 +310,8 @@ ENTRY(__modsi3)
rsbmi r0, r0, #0 rsbmi r0, r0, #0
mov pc, lr mov pc, lr
ENDPROC(__modsi3)
#ifdef CONFIG_AEABI #ifdef CONFIG_AEABI
ENTRY(__aeabi_uidivmod) ENTRY(__aeabi_uidivmod)
...@@ -316,6 +323,8 @@ ENTRY(__aeabi_uidivmod) ...@@ -316,6 +323,8 @@ ENTRY(__aeabi_uidivmod)
sub r1, r1, r3 sub r1, r1, r3
mov pc, lr mov pc, lr
ENDPROC(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod) ENTRY(__aeabi_idivmod)
stmfd sp!, {r0, r1, ip, lr} stmfd sp!, {r0, r1, ip, lr}
...@@ -325,6 +334,8 @@ ENTRY(__aeabi_idivmod) ...@@ -325,6 +334,8 @@ ENTRY(__aeabi_idivmod)
sub r1, r1, r3 sub r1, r1, r3
mov pc, lr mov pc, lr
ENDPROC(__aeabi_idivmod)
#endif #endif
Ldiv0: Ldiv0:
......
...@@ -47,3 +47,5 @@ ENTRY(__aeabi_llsr) ...@@ -47,3 +47,5 @@ ENTRY(__aeabi_llsr)
mov ah, ah, lsr r2 mov ah, ah, lsr r2
mov pc, lr mov pc, lr
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
...@@ -23,3 +23,4 @@ ENTRY(memchr) ...@@ -23,3 +23,4 @@ ENTRY(memchr)
sub r0, r0, #1 sub r0, r0, #1
2: movne r0, #0 2: movne r0, #0
mov pc, lr mov pc, lr
ENDPROC(memchr)
...@@ -57,3 +57,4 @@ ENTRY(memcpy) ...@@ -57,3 +57,4 @@ ENTRY(memcpy)
#include "copy_template.S" #include "copy_template.S"
ENDPROC(memcpy)
...@@ -204,3 +204,4 @@ ENTRY(memmove) ...@@ -204,3 +204,4 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8 18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
...@@ -78,3 +78,4 @@ ENTRY(memset) ...@@ -78,3 +78,4 @@ ENTRY(memset)
tst r2, #1 tst r2, #1
strneb r1, [r0], #1 strneb r1, [r0], #1
mov pc, lr mov pc, lr
ENDPROC(memset)
...@@ -78,3 +78,4 @@ ENTRY(__memzero) ...@@ -78,3 +78,4 @@ ENTRY(__memzero)
tst r1, #1 @ 1 a byte left over tst r1, #1 @ 1 a byte left over
strneb r2, [r0], #1 @ 1 strneb r2, [r0], #1 @ 1
mov pc, lr @ 1 mov pc, lr @ 1
ENDPROC(__memzero)
...@@ -43,3 +43,5 @@ ENTRY(__aeabi_lmul) ...@@ -43,3 +43,5 @@ ENTRY(__aeabi_lmul)
adc xh, xh, ip, lsr #16 adc xh, xh, ip, lsr #16
mov pc, lr mov pc, lr
ENDPROC(__muldi3)
ENDPROC(__aeabi_lmul)
...@@ -26,16 +26,16 @@ ...@@ -26,16 +26,16 @@
* Note that ADDR_LIMIT is either 0 or 0xc0000000 * Note that ADDR_LIMIT is either 0 or 0xc0000000
* Note also that it is intended that __put_user_bad is not global. * Note also that it is intended that __put_user_bad is not global.
*/ */
#include <linux/linkage.h>
#include <asm/errno.h> #include <asm/errno.h>
.global __put_user_1 ENTRY(__put_user_1)
__put_user_1:
1: strbt r2, [r0] 1: strbt r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__put_user_1)
.global __put_user_2 ENTRY(__put_user_2)
__put_user_2:
mov ip, r2, lsr #8 mov ip, r2, lsr #8
#ifndef __ARMEB__ #ifndef __ARMEB__
2: strbt r2, [r0], #1 2: strbt r2, [r0], #1
...@@ -46,23 +46,25 @@ __put_user_2: ...@@ -46,23 +46,25 @@ __put_user_2:
#endif #endif
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__put_user_2)
.global __put_user_4 ENTRY(__put_user_4)
__put_user_4:
4: strt r2, [r0] 4: strt r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__put_user_4)
.global __put_user_8 ENTRY(__put_user_8)
__put_user_8:
5: strt r2, [r0], #4 5: strt r2, [r0], #4
6: strt r3, [r0] 6: strt r3, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__put_user_8)
__put_user_bad: __put_user_bad:
mov r0, #-EFAULT mov r0, #-EFAULT
mov pc, lr mov pc, lr
ENDPROC(__put_user_bad)
.section __ex_table, "a" .section __ex_table, "a"
.long 1b, __put_user_bad .long 1b, __put_user_bad
......
...@@ -20,3 +20,5 @@ ENTRY(_set_bit_be) ...@@ -20,3 +20,5 @@ ENTRY(_set_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_set_bit_le) ENTRY(_set_bit_le)
bitop orr bitop orr
ENDPROC(_set_bit_be)
ENDPROC(_set_bit_le)
...@@ -185,6 +185,8 @@ ENTRY(sha_transform) ...@@ -185,6 +185,8 @@ ENTRY(sha_transform)
ldmfd sp!, {r4 - r8, pc} ldmfd sp!, {r4 - r8, pc}
ENDPROC(sha_transform)
.L_sha_K: .L_sha_K:
.word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
...@@ -204,3 +206,4 @@ ENTRY(sha_init) ...@@ -204,3 +206,4 @@ ENTRY(sha_init)
stmia r0, {r1, r2, r3, ip, lr} stmia r0, {r1, r2, r3, ip, lr}
ldr pc, [sp], #4 ldr pc, [sp], #4
ENDPROC(sha_init)
...@@ -24,3 +24,4 @@ ENTRY(strchr) ...@@ -24,3 +24,4 @@ ENTRY(strchr)
movne r0, #0 movne r0, #0
subeq r0, r0, #1 subeq r0, r0, #1
mov pc, lr mov pc, lr
ENDPROC(strchr)
...@@ -31,6 +31,7 @@ USER( ldrplbt r3, [r1], #1) ...@@ -31,6 +31,7 @@ USER( ldrplbt r3, [r1], #1)
sub r1, r1, #1 @ take NUL character out of count sub r1, r1, #1 @ take NUL character out of count
2: sub r0, r1, ip 2: sub r0, r1, ip
mov pc, lr mov pc, lr
ENDPROC(__strncpy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 0 .align 0
......
...@@ -31,6 +31,7 @@ USER( ldrbt r3, [r0], #1) ...@@ -31,6 +31,7 @@ USER( ldrbt r3, [r0], #1)
add r0, r0, #1 add r0, r0, #1
2: sub r0, r0, r2 2: sub r0, r0, r2
mov pc, lr mov pc, lr
ENDPROC(__strnlen_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 0 .align 0
......
...@@ -23,3 +23,4 @@ ENTRY(strrchr) ...@@ -23,3 +23,4 @@ ENTRY(strrchr)
bne 1b bne 1b
mov r0, r3 mov r0, r3
mov pc, lr mov pc, lr
ENDPROC(strrchr)
...@@ -16,3 +16,5 @@ ENTRY(_test_and_change_bit_be) ...@@ -16,3 +16,5 @@ ENTRY(_test_and_change_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_test_and_change_bit_le) ENTRY(_test_and_change_bit_le)
testop eor, strb testop eor, strb
ENDPROC(_test_and_change_bit_be)
ENDPROC(_test_and_change_bit_le)
...@@ -16,3 +16,5 @@ ENTRY(_test_and_clear_bit_be) ...@@ -16,3 +16,5 @@ ENTRY(_test_and_clear_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_test_and_clear_bit_le) ENTRY(_test_and_clear_bit_le)
testop bicne, strneb testop bicne, strneb
ENDPROC(_test_and_clear_bit_be)
ENDPROC(_test_and_clear_bit_le)
...@@ -16,3 +16,5 @@ ENTRY(_test_and_set_bit_be) ...@@ -16,3 +16,5 @@ ENTRY(_test_and_set_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_test_and_set_bit_le) ENTRY(_test_and_set_bit_le)
testop orreq, streqb testop orreq, streqb
ENDPROC(_test_and_set_bit_be)
ENDPROC(_test_and_set_bit_le)
...@@ -277,6 +277,7 @@ USER( strgebt r3, [r0], #1) @ May fault ...@@ -277,6 +277,7 @@ USER( strgebt r3, [r0], #1) @ May fault
ldrgtb r3, [r1], #0 ldrgtb r3, [r1], #0
USER( strgtbt r3, [r0], #1) @ May fault USER( strgtbt r3, [r0], #1) @ May fault
b .Lc2u_finished b .Lc2u_finished
ENDPROC(__copy_to_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 0 .align 0
...@@ -542,6 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault ...@@ -542,6 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
USER( ldrgtbt r3, [r1], #1) @ May fault USER( ldrgtbt r3, [r1], #1) @ May fault
strgtb r3, [r0], #1 strgtb r3, [r0], #1
b .Lcfu_finished b .Lcfu_finished
ENDPROC(__copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 0 .align 0
......
...@@ -33,6 +33,8 @@ ENTRY(__ucmpdi2) ...@@ -33,6 +33,8 @@ ENTRY(__ucmpdi2)
movhi r0, #2 movhi r0, #2
mov pc, lr mov pc, lr
ENDPROC(__ucmpdi2)
#ifdef CONFIG_AEABI #ifdef CONFIG_AEABI
ENTRY(__aeabi_ulcmp) ENTRY(__aeabi_ulcmp)
...@@ -44,5 +46,7 @@ ENTRY(__aeabi_ulcmp) ...@@ -44,5 +46,7 @@ ENTRY(__aeabi_ulcmp)
movhi r0, #1 movhi r0, #1
mov pc, lr mov pc, lr
ENDPROC(__aeabi_ulcmp)
#endif #endif
...@@ -30,3 +30,4 @@ ENTRY(v7_early_abort) ...@@ -30,3 +30,4 @@ ENTRY(v7_early_abort)
* New designs should not need to patch up faults. * New designs should not need to patch up faults.
*/ */
mov pc, lr mov pc, lr
ENDPROC(v7_early_abort)
...@@ -17,3 +17,4 @@ ENTRY(nommu_early_abort) ...@@ -17,3 +17,4 @@ ENTRY(nommu_early_abort)
mov r0, #0 @ clear r0, r1 (no FSR/FAR) mov r0, #0 @ clear r0, r1 (no FSR/FAR)
mov r1, #0 mov r1, #0
mov pc, lr mov pc, lr
ENDPROC(nommu_early_abort)
...@@ -66,6 +66,7 @@ finished: ...@@ -66,6 +66,7 @@ finished:
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb isb
mov pc, lr mov pc, lr
ENDPROC(v7_flush_dcache_all)
/* /*
* v7_flush_cache_all() * v7_flush_cache_all()
...@@ -85,6 +86,7 @@ ENTRY(v7_flush_kern_cache_all) ...@@ -85,6 +86,7 @@ ENTRY(v7_flush_kern_cache_all)
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
ldmfd sp!, {r4-r5, r7, r9-r11, lr} ldmfd sp!, {r4-r5, r7, r9-r11, lr}
mov pc, lr mov pc, lr
ENDPROC(v7_flush_kern_cache_all)
/* /*
* v7_flush_cache_all() * v7_flush_cache_all()
...@@ -110,6 +112,8 @@ ENTRY(v7_flush_user_cache_all) ...@@ -110,6 +112,8 @@ ENTRY(v7_flush_user_cache_all)
*/ */
ENTRY(v7_flush_user_cache_range) ENTRY(v7_flush_user_cache_range)
mov pc, lr mov pc, lr
ENDPROC(v7_flush_user_cache_all)
ENDPROC(v7_flush_user_cache_range)
/* /*
* v7_coherent_kern_range(start,end) * v7_coherent_kern_range(start,end)
...@@ -155,6 +159,8 @@ ENTRY(v7_coherent_user_range) ...@@ -155,6 +159,8 @@ ENTRY(v7_coherent_user_range)
dsb dsb
isb isb
mov pc, lr mov pc, lr
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
/* /*
* v7_flush_kern_dcache_page(kaddr) * v7_flush_kern_dcache_page(kaddr)
...@@ -174,6 +180,7 @@ ENTRY(v7_flush_kern_dcache_page) ...@@ -174,6 +180,7 @@ ENTRY(v7_flush_kern_dcache_page)
blo 1b blo 1b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7_flush_kern_dcache_page)
/* /*
* v7_dma_inv_range(start,end) * v7_dma_inv_range(start,end)
...@@ -202,6 +209,7 @@ ENTRY(v7_dma_inv_range) ...@@ -202,6 +209,7 @@ ENTRY(v7_dma_inv_range)
blo 1b blo 1b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7_dma_inv_range)
/* /*
* v7_dma_clean_range(start,end) * v7_dma_clean_range(start,end)
...@@ -219,6 +227,7 @@ ENTRY(v7_dma_clean_range) ...@@ -219,6 +227,7 @@ ENTRY(v7_dma_clean_range)
blo 1b blo 1b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7_dma_clean_range)
/* /*
* v7_dma_flush_range(start,end) * v7_dma_flush_range(start,end)
...@@ -236,6 +245,7 @@ ENTRY(v7_dma_flush_range) ...@@ -236,6 +245,7 @@ ENTRY(v7_dma_flush_range)
blo 1b blo 1b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7_dma_flush_range)
__INITDATA __INITDATA
......
...@@ -25,9 +25,11 @@ ...@@ -25,9 +25,11 @@
ENTRY(cpu_v7_proc_init) ENTRY(cpu_v7_proc_init)
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_proc_init)
ENTRY(cpu_v7_proc_fin) ENTRY(cpu_v7_proc_fin)
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_proc_fin)
/* /*
* cpu_v7_reset(loc) * cpu_v7_reset(loc)
...@@ -43,6 +45,7 @@ ENTRY(cpu_v7_proc_fin) ...@@ -43,6 +45,7 @@ ENTRY(cpu_v7_proc_fin)
.align 5 .align 5
ENTRY(cpu_v7_reset) ENTRY(cpu_v7_reset)
mov pc, r0 mov pc, r0
ENDPROC(cpu_v7_reset)
/* /*
* cpu_v7_do_idle() * cpu_v7_do_idle()
...@@ -54,6 +57,7 @@ ENTRY(cpu_v7_reset) ...@@ -54,6 +57,7 @@ ENTRY(cpu_v7_reset)
ENTRY(cpu_v7_do_idle) ENTRY(cpu_v7_do_idle)
.long 0xe320f003 @ ARM V7 WFI instruction .long 0xe320f003 @ ARM V7 WFI instruction
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area) ENTRY(cpu_v7_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE #ifndef TLB_CAN_READ_FROM_L1_CACHE
...@@ -65,6 +69,7 @@ ENTRY(cpu_v7_dcache_clean_area) ...@@ -65,6 +69,7 @@ ENTRY(cpu_v7_dcache_clean_area)
dsb dsb
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
/* /*
* cpu_v7_switch_mm(pgd_phys, tsk) * cpu_v7_switch_mm(pgd_phys, tsk)
...@@ -89,6 +94,7 @@ ENTRY(cpu_v7_switch_mm) ...@@ -89,6 +94,7 @@ ENTRY(cpu_v7_switch_mm)
isb isb
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_switch_mm)
/* /*
* cpu_v7_set_pte_ext(ptep, pte) * cpu_v7_set_pte_ext(ptep, pte)
...@@ -141,6 +147,7 @@ ENTRY(cpu_v7_set_pte_ext) ...@@ -141,6 +147,7 @@ ENTRY(cpu_v7_set_pte_ext)
mcr p15, 0, r0, c7, c10, 1 @ flush_pte mcr p15, 0, r0, c7, c10, 1 @ flush_pte
#endif #endif
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
cpu_v7_name: cpu_v7_name:
.ascii "ARMv7 Processor" .ascii "ARMv7 Processor"
...@@ -188,6 +195,7 @@ __v7_setup: ...@@ -188,6 +195,7 @@ __v7_setup:
bic r0, r0, r5 @ clear bits them bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them orr r0, r0, r6 @ set them
mov pc, lr @ return to head.S:__ret mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup)
/* /*
* V X F I D LR * V X F I D LR
......
...@@ -51,6 +51,7 @@ ENTRY(v7wbi_flush_user_tlb_range) ...@@ -51,6 +51,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
/* /*
* v7wbi_flush_kern_tlb_range(start,end) * v7wbi_flush_kern_tlb_range(start,end)
...@@ -77,6 +78,7 @@ ENTRY(v7wbi_flush_kern_tlb_range) ...@@ -77,6 +78,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
dsb dsb
isb isb
mov pc, lr mov pc, lr
ENDPROC(v7wbi_flush_kern_tlb_range)
.section ".text.init", #alloc, #execinstr .section ".text.init", #alloc, #execinstr
......
...@@ -21,13 +21,13 @@ ...@@ -21,13 +21,13 @@
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/vfpmacros.h> #include <asm/vfpmacros.h>
.globl do_vfp ENTRY(do_vfp)
do_vfp:
enable_irq enable_irq
ldr r4, .LCvfp ldr r4, .LCvfp
ldr r11, [r10, #TI_CPU] @ CPU number ldr r11, [r10, #TI_CPU] @ CPU number
add r10, r10, #TI_VFPSTATE @ r10 = workspace add r10, r10, #TI_VFPSTATE @ r10 = workspace
ldr pc, [r4] @ call VFP entry point ldr pc, [r4] @ call VFP entry point
ENDPROC(do_vfp)
ENTRY(vfp_null_entry) ENTRY(vfp_null_entry)
mov pc, lr mov pc, lr
...@@ -40,11 +40,11 @@ ENDPROC(vfp_null_entry) ...@@ -40,11 +40,11 @@ ENDPROC(vfp_null_entry)
@ failure to the VFP initialisation code. @ failure to the VFP initialisation code.
__INIT __INIT
.globl vfp_testing_entry ENTRY(vfp_testing_entry)
vfp_testing_entry:
ldr r0, VFP_arch_address ldr r0, VFP_arch_address
str r5, [r0] @ known non-zero value str r5, [r0] @ known non-zero value
mov pc, r9 @ we have handled the fault mov pc, r9 @ we have handled the fault
ENDPROC(vfp_testing_entry)
VFP_arch_address: VFP_arch_address:
.word VFP_arch .word VFP_arch
......
...@@ -68,8 +68,7 @@ ...@@ -68,8 +68,7 @@
@ r11 = CPU number @ r11 = CPU number
@ lr = failure return @ lr = failure return
.globl vfp_support_entry ENTRY(vfp_support_entry)
vfp_support_entry:
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
VFPFMRX r1, FPEXC @ Is the VFP enabled? VFPFMRX r1, FPEXC @ Is the VFP enabled?
...@@ -165,11 +164,10 @@ process_exception: ...@@ -165,11 +164,10 @@ process_exception:
@ code will raise an exception if @ code will raise an exception if
@ required. If not, the user code will @ required. If not, the user code will
@ retry the faulted instruction @ retry the faulted instruction
ENDPROC(vfp_support_entry)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.globl vfp_save_state ENTRY(vfp_save_state)
.type vfp_save_state, %function
vfp_save_state:
@ Save the current VFP state @ Save the current VFP state
@ r0 - save location @ r0 - save location
@ r1 - FPEXC @ r1 - FPEXC
...@@ -182,13 +180,13 @@ vfp_save_state: ...@@ -182,13 +180,13 @@ vfp_save_state:
VFPFMRX r12, FPINST2, NE @ FPINST2 if needed (and present) VFPFMRX r12, FPINST2, NE @ FPINST2 if needed (and present)
stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
mov pc, lr mov pc, lr
ENDPROC(vfp_save_state)
#endif #endif
last_VFP_context_address: last_VFP_context_address:
.word last_VFP_context .word last_VFP_context
.globl vfp_get_float ENTRY(vfp_get_float)
vfp_get_float:
add pc, pc, r0, lsl #3 add pc, pc, r0, lsl #3
mov r0, r0 mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
...@@ -197,9 +195,9 @@ vfp_get_float: ...@@ -197,9 +195,9 @@ vfp_get_float:
mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
mov pc, lr mov pc, lr
.endr .endr
ENDPROC(vfp_get_float)
.globl vfp_put_float ENTRY(vfp_put_float)
vfp_put_float:
add pc, pc, r1, lsl #3 add pc, pc, r1, lsl #3
mov r0, r0 mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
...@@ -208,9 +206,9 @@ vfp_put_float: ...@@ -208,9 +206,9 @@ vfp_put_float:
mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
mov pc, lr mov pc, lr
.endr .endr
ENDPROC(vfp_put_float)
.globl vfp_get_double ENTRY(vfp_get_double)
vfp_get_double:
add pc, pc, r0, lsl #3 add pc, pc, r0, lsl #3
mov r0, r0 mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
...@@ -229,9 +227,9 @@ vfp_get_double: ...@@ -229,9 +227,9 @@ vfp_get_double:
mov r0, #0 mov r0, #0
mov r1, #0 mov r1, #0
mov pc, lr mov pc, lr
ENDPROC(vfp_get_double)
.globl vfp_put_double ENTRY(vfp_put_double)
vfp_put_double:
add pc, pc, r2, lsl #3 add pc, pc, r2, lsl #3
mov r0, r0 mov r0, r0
.irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
...@@ -245,3 +243,4 @@ vfp_put_double: ...@@ -245,3 +243,4 @@ vfp_put_double:
mov pc, lr mov pc, lr
.endr .endr
#endif #endif
ENDPROC(vfp_put_double)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment