Commit 6ae3983b authored by Russell King - ARM Linux's avatar Russell King - ARM Linux Committed by Tony Lindgren

Fix ARMv7 memory typing

* Russell King - ARM Linux <linux@arm.linux.org.uk> [081103 07:09]:
> Solving ARMv7 seems to be fairly simple, at the expense of making
> build_mem_types_table() slightly more complex.  If that was the only
> problem, then I wouldn't be mentioning the idea of dropping the
> patchset.

Well, this is the fix for ARMv7 (and a few others).  In making these
changes, I went back to DDI0100I (ARMv6 ARM), DDI0406A (ARMv7 ARM)
and the Marvell Xscale3 documentation.
...

Note that this version has a bug with xscale handling, but works
for omap. This patch will get reverted once the final patch is
available.
Signed-off-by: default avatarTony Lindgren <tony@atomide.com>
parent a634e507
...@@ -42,6 +42,10 @@ ...@@ -42,6 +42,10 @@
#define CR_U (1 << 22) /* Unaligned access operation */ #define CR_U (1 << 22) /* Unaligned access operation */
#define CR_XP (1 << 23) /* Extended page tables */ #define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */ #define CR_VE (1 << 24) /* Vectored interrupts */
#define CR_EE (1 << 25) /* Exception (Big) Endian */
#define CR_TRE (1 << 28) /* TEX remap enable */
#define CR_AFE (1 << 29) /* Access flag enable */
#define CR_TE (1 << 30) /* Thumb exception enable */
/* /*
* This is used to ensure the compiler did actually allocate the register we * This is used to ensure the compiler did actually allocate the register we
......
...@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set) ...@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
#endif #endif
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = { static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED, L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), .prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_DEVICE_CACHED] = { /* ioremap_cached */ [MT_DEVICE_CACHED] = { /* ioremap_cached */
...@@ -205,7 +205,7 @@ static struct mem_type mem_types[] = { ...@@ -205,7 +205,7 @@ static struct mem_type mem_types[] = {
[MT_DEVICE_WC] = { /* ioremap_wc */ [MT_DEVICE_WC] = { /* ioremap_wc */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, .prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_CACHECLEAN] = { [MT_CACHECLEAN] = {
...@@ -277,22 +277,23 @@ static void __init build_mem_type_table(void) ...@@ -277,22 +277,23 @@ static void __init build_mem_type_table(void)
#endif #endif
/* /*
* On non-Xscale3 ARMv5-and-older systems, use CB=01 * Strip out features not present on earlier architectures.
* (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
* and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable * without extended page tables don't have the 'Shared' bit.
* in xsc3 parlance, Uncached Normal in ARMv6 parlance).
*/ */
if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { if (cpu_arch < CPU_ARCH_ARMv5)
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); for (i = 0; i < ARRAY_SIZE(mem_types); i++)
mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
} if (cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP))
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
mem_types[i].prot_sect &= ~PMD_SECT_S;
/* /*
* ARMv5 and lower, bit 4 must be set for page tables. * ARMv5 and lower, bit 4 must be set for page tables (was: cache
* (was: cache "update-able on write" bit on ARM610) * "update-able on write" bit on ARM610). However, Xscale and
* However, Xscale cores require this bit to be cleared. * Xscale3 require this bit to be cleared.
*/ */
if (cpu_is_xscale()) { if (cpu_is_xscale() || cpu_is_xsc3()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_sect &= ~PMD_BIT4; mem_types[i].prot_sect &= ~PMD_BIT4;
mem_types[i].prot_l1 &= ~PMD_BIT4; mem_types[i].prot_l1 &= ~PMD_BIT4;
...@@ -306,6 +307,54 @@ static void __init build_mem_type_table(void) ...@@ -306,6 +307,54 @@ static void __init build_mem_type_table(void)
} }
} }
/*
* Mark the device areas according to the CPU/architecture.
*/
if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
if (!cpu_is_xsc3()) {
/*
* Mark device regions on ARMv6+ as execute-never
* to prevent speculative instruction fetches.
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
}
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/*
* For ARMv7 with TEX remapping,
* - shared device is SXCB=1100
* - nonshared device is SXCB=0100
* - write combine device mem is SXCB=0001
* (Uncached Normal memory)
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
} else {
/*
* For Xscale3, ARMv6 and ARMv7 without TEX remapping,
* - shared device is TEXCB=00001
* - nonshared device is TEXCB=01000
* - write combine device mem is TEXCB=00100
* (Inner/Outer Uncacheable in xsc3 parlance, Uncached
* Normal in ARMv6 parlance).
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
}
} else {
/*
* On others, write combining is "Uncached/Buffered"
*/
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
}
/*
* Now deal with the memory-type mappings
*/
cp = &cache_policies[cachepolicy]; cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
...@@ -321,12 +370,8 @@ static void __init build_mem_type_table(void) ...@@ -321,12 +370,8 @@ static void __init build_mem_type_table(void)
* Enable CPU-specific coherency if supported. * Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.) * (Only available on XSC3 at the moment.)
*/ */
if (arch_is_coherent()) { if (arch_is_coherent() && cpu_is_xsc3())
if (cpu_is_xsc3()) { mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
}
}
/* /*
* ARMv6 and above have extended page tables. * ARMv6 and above have extended page tables.
...@@ -340,11 +385,6 @@ static void __init build_mem_type_table(void) ...@@ -340,11 +385,6 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
/*
* Mark the device area as "shared device"
*/
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* Mark memory with the "shared" attribute for SMP systems * Mark memory with the "shared" attribute for SMP systems
...@@ -364,9 +404,6 @@ static void __init build_mem_type_table(void) ...@@ -364,9 +404,6 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
if (cpu_arch < CPU_ARCH_ARMv5)
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE | L_PTE_DIRTY | L_PTE_WRITE |
...@@ -391,6 +428,22 @@ static void __init build_mem_type_table(void) ...@@ -391,6 +428,22 @@ static void __init build_mem_type_table(void)
for (i = 0; i < ARRAY_SIZE(mem_types); i++) { for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
struct mem_type *t = &mem_types[i]; struct mem_type *t = &mem_types[i];
const char *s;
#define T(n) if (i == (n)) s = #n;
s = "???";
T(MT_DEVICE);
T(MT_DEVICE_NONSHARED);
T(MT_DEVICE_CACHED);
T(MT_DEVICE_WC);
T(MT_CACHECLEAN);
T(MT_MINICLEAN);
T(MT_LOW_VECTORS);
T(MT_HIGH_VECTORS);
T(MT_MEMORY);
T(MT_ROM);
printk(KERN_INFO "%-19s: DOM=%#3x S=%#010x L1=%#010x P=%#010x\n",
s, t->domain, t->prot_sect, t->prot_l1, t->prot_pte);
if (t->prot_l1) if (t->prot_l1)
t->prot_l1 |= PMD_DOMAIN(t->domain); t->prot_l1 |= PMD_DOMAIN(t->domain);
if (t->prot_sect) if (t->prot_sect)
......
...@@ -192,11 +192,11 @@ __v7_setup: ...@@ -192,11 +192,11 @@ __v7_setup:
mov pc, lr @ return to head.S:__ret mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup) ENDPROC(__v7_setup)
/* /* AT
* V X F I D LR * TFR EV X F I D LR
* .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
* rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 0 110 0011 1.00 .111 1101 < we want * 1 0 110 0011 1.00 .111 1101 < we want
*/ */
.type v7_crval, #object .type v7_crval, #object
v7_crval: v7_crval:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment