Commit 60248bb3 authored by Catalin Marinas's avatar Catalin Marinas

Thumb-2: Implement the unified arch/arm/mm support

This patch adds the ARM/Thumb-2 unified support to the arch/arm/mm/*
files.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 1ec3bc29
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/unified.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
...@@ -148,7 +149,9 @@ union offset_union { ...@@ -148,7 +149,9 @@ union offset_union {
#define __get8_unaligned_check(ins,val,addr,err) \ #define __get8_unaligned_check(ins,val,addr,err) \
__asm__( \ __asm__( \
"1: "ins" %1, [%2], #1\n" \ ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
"2:\n" \ "2:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" .align 2\n" \ " .align 2\n" \
...@@ -204,7 +207,9 @@ union offset_union { ...@@ -204,7 +207,9 @@ union offset_union {
do { \ do { \
unsigned int err = 0, v = val, a = addr; \ unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_16 \ __asm__( FIRST_BYTE_16 \
"1: "ins" %1, [%2], #1\n" \ ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \ " mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2]\n" \ "2: "ins" %1, [%2]\n" \
"3:\n" \ "3:\n" \
...@@ -234,11 +239,17 @@ union offset_union { ...@@ -234,11 +239,17 @@ union offset_union {
do { \ do { \
unsigned int err = 0, v = val, a = addr; \ unsigned int err = 0, v = val, a = addr; \
__asm__( FIRST_BYTE_32 \ __asm__( FIRST_BYTE_32 \
"1: "ins" %1, [%2], #1\n" \ ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \ " mov %1, %1, "NEXT_BYTE"\n" \
"2: "ins" %1, [%2], #1\n" \ ARM( "2: "ins" %1, [%2], #1\n" ) \
THUMB( "2: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \ " mov %1, %1, "NEXT_BYTE"\n" \
"3: "ins" %1, [%2], #1\n" \ ARM( "3: "ins" %1, [%2], #1\n" ) \
THUMB( "3: "ins" %1, [%2]\n" ) \
THUMB( " add %2, %2, #1\n" ) \
" mov %1, %1, "NEXT_BYTE"\n" \ " mov %1, %1, "NEXT_BYTE"\n" \
"4: "ins" %1, [%2]\n" \ "4: "ins" %1, [%2]\n" \
"5:\n" \ "5:\n" \
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
* *
* This is the "shell" of the ARMv7 processor support. * This is the "shell" of the ARMv7 processor support.
*/ */
#include <asm/unified.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/assembler.h> #include <asm/assembler.h>
...@@ -21,7 +23,7 @@ ...@@ -21,7 +23,7 @@
* *
* Flush the whole D-cache. * Flush the whole D-cache.
* *
* Corrupted registers: r0-r5, r7, r9-r11 * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
* *
* - mm - mm_struct describing address space * - mm - mm_struct describing address space
*/ */
...@@ -50,8 +52,12 @@ loop1: ...@@ -50,8 +52,12 @@ loop1:
loop2: loop2:
mov r9, r4 @ create working copy of max way size mov r9, r4 @ create working copy of max way size
loop3: loop3:
orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
orr r11, r11, r7, lsl r2 @ factor index number into r11 THUMB( lsl r6, r9, r5 )
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
THUMB( lsl r6, r7, r2 )
THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the way subs r9, r9, #1 @ decrement the way
bge loop3 bge loop3
...@@ -80,11 +86,13 @@ ENDPROC(v7_flush_dcache_all) ...@@ -80,11 +86,13 @@ ENDPROC(v7_flush_dcache_all)
* *
*/ */
ENTRY(v7_flush_kern_cache_all) ENTRY(v7_flush_kern_cache_all)
stmfd sp!, {r4-r5, r7, r9-r11, lr} ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all bl v7_flush_dcache_all
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
ldmfd sp!, {r4-r5, r7, r9-r11, lr} ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr mov pc, lr
ENDPROC(v7_flush_kern_cache_all) ENDPROC(v7_flush_kern_cache_all)
...@@ -197,10 +205,12 @@ ENTRY(v7_dma_inv_range) ...@@ -197,10 +205,12 @@ ENTRY(v7_dma_inv_range)
sub r3, r2, #1 sub r3, r2, #1
tst r0, r3 tst r0, r3
bic r0, r0, r3 bic r0, r0, r3
it ne
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3 tst r1, r3
bic r1, r1, r3 bic r1, r1, r3
it ne
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1: 1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* VMA_VM_FLAGS * VMA_VM_FLAGS
* VM_EXEC * VM_EXEC
*/ */
#include <asm/unified.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
* *
* This is the "shell" of the ARMv7 processor support. * This is the "shell" of the ARMv7 processor support.
*/ */
#include <asm/unified.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
...@@ -55,7 +57,7 @@ ENDPROC(cpu_v7_reset) ...@@ -55,7 +57,7 @@ ENDPROC(cpu_v7_reset)
* IRQs are already disabled. * IRQs are already disabled.
*/ */
ENTRY(cpu_v7_do_idle) ENTRY(cpu_v7_do_idle)
.long 0xe320f003 @ ARM V7 WFI instruction wfi
mov pc, lr mov pc, lr
ENDPROC(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle)
...@@ -118,7 +120,9 @@ ENDPROC(cpu_v7_switch_mm) ...@@ -118,7 +120,9 @@ ENDPROC(cpu_v7_switch_mm)
*/ */
ENTRY(cpu_v7_set_pte_ext) ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version ARM( str r1, [r0], #-2048 ) @ linux version
THUMB( str r1, [r0] ) @ linux version
THUMB( sub r0, r0, #2048 )
bic r3, r1, #0x000003f0 bic r3, r1, #0x000003f0
bic r3, r3, #0x00000003 bic r3, r3, #0x00000003
...@@ -126,21 +130,26 @@ ENTRY(cpu_v7_set_pte_ext) ...@@ -126,21 +130,26 @@ ENTRY(cpu_v7_set_pte_ext)
orr r3, r3, #PTE_EXT_AP0 | 2 orr r3, r3, #PTE_EXT_AP0 | 2
tst r1, #L_PTE_WRITE tst r1, #L_PTE_WRITE
ite ne
tstne r1, #L_PTE_DIRTY tstne r1, #L_PTE_DIRTY
orreq r3, r3, #PTE_EXT_APX orreq r3, r3, #PTE_EXT_APX
tst r1, #L_PTE_USER tst r1, #L_PTE_USER
ittt ne
orrne r3, r3, #PTE_EXT_AP1 orrne r3, r3, #PTE_EXT_AP1
tstne r3, #PTE_EXT_APX tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
tst r1, #L_PTE_YOUNG tst r1, #L_PTE_YOUNG
it eq
biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
tst r1, #L_PTE_EXEC tst r1, #L_PTE_EXEC
it eq
orreq r3, r3, #PTE_EXT_XN orreq r3, r3, #PTE_EXT_XN
tst r1, #L_PTE_PRESENT tst r1, #L_PTE_PRESENT
it eq
moveq r3, #0 moveq r3, #0
str r3, [r0] str r3, [r0]
...@@ -197,15 +206,17 @@ __v7_setup: ...@@ -197,15 +206,17 @@ __v7_setup:
mov pc, lr @ return to head.S:__ret mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup) ENDPROC(__v7_setup)
.align 2
/* /*
* V X F I D LR * T V X F I D LR
* .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM * .E.. ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
* rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 0 110 0011 1.00 .111 1101 < we want * 0 110 0011 1.00 .111 1101 < we want
*/ */
.type v7_crval, #object .type v7_crval, #object
v7_crval: v7_crval:
crval clear=0x0120c302, mmuset=0x00c0387d, ucset=0x00c0187c ARM( crval clear=0x0120c302, mmuset=0x00c0387d, ucset=0x00c0187c )
THUMB( crval clear=0x0120c302, mmuset=0x40c0387d, ucset=0x00c0187c )
__v7_setup_stack: __v7_setup_stack:
.space 4 * 11 @ 11 registers .space 4 * 11 @ 11 registers
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
* ARM architecture version 6 TLB handling functions. * ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB. * These assume a split I/D TLB.
*/ */
#include <asm/unified.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -43,6 +45,7 @@ ENTRY(v7wbi_flush_user_tlb_range) ...@@ -43,6 +45,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
1: 1:
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
tst r2, #VM_EXEC @ Executable area ? tst r2, #VM_EXEC @ Executable area ?
it ne
mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
add r0, r0, #PAGE_SZ add r0, r0, #PAGE_SZ
cmp r0, r1 cmp r0, r1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment