Commit 9ef79635 authored by Russell King's avatar Russell King Committed by Russell King

[ARM] mm 8: define mem_types table L1 bit 4 to be for ARMv6

Change the memory types table to define the L1 descriptor bit 4 to
be in terms of the ARMv6 definition - execute never.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 0058ca32
...@@ -181,16 +181,16 @@ static struct mem_type mem_types[] = { ...@@ -181,16 +181,16 @@ static struct mem_type mem_types[] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE, L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE, PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_CACHECLEAN] = { [MT_CACHECLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_BIT4, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_MINICLEAN] = { [MT_MINICLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_LOW_VECTORS] = { [MT_LOW_VECTORS] = {
...@@ -206,25 +206,25 @@ static struct mem_type mem_types[] = { ...@@ -206,25 +206,25 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_USER, .domain = DOMAIN_USER,
}, },
[MT_MEMORY] = { [MT_MEMORY] = {
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_ROM] = { [MT_ROM] = {
.prot_sect = PMD_TYPE_SECT | PMD_BIT4, .prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_WRITE, L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED |
PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
PMD_SECT_TEX(1), PMD_SECT_TEX(1),
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_NONSHARED_DEVICE] = { [MT_NONSHARED_DEVICE] = {
.prot_l1 = PMD_TYPE_TABLE, .prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_NONSHARED_DEV |
PMD_SECT_AP_WRITE, PMD_SECT_AP_WRITE,
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
} }
...@@ -260,20 +260,23 @@ static void __init build_mem_type_table(void) ...@@ -260,20 +260,23 @@ static void __init build_mem_type_table(void)
} }
/* /*
* Xscale must not have PMD bit 4 set for section mappings. * ARMv5 and lower, bit 4 must be set for page tables.
* (was: cache "update-able on write" bit on ARM610)
* However, Xscale cores require this bit to be cleared.
*/ */
if (cpu_is_xscale()) if (cpu_is_xscale()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_sect &= ~PMD_BIT4; mem_types[i].prot_sect &= ~PMD_BIT4;
mem_types[i].prot_l1 &= ~PMD_BIT4;
/* }
* ARMv5 and lower, excluding Xscale, bit 4 must be set for } else if (cpu_arch < CPU_ARCH_ARMv6) {
* page tables. for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
*/
if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
for (i = 0; i < ARRAY_SIZE(mem_types); i++)
if (mem_types[i].prot_l1) if (mem_types[i].prot_l1)
mem_types[i].prot_l1 |= PMD_BIT4; mem_types[i].prot_l1 |= PMD_BIT4;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_BIT4;
}
}
cp = &cache_policies[cachepolicy]; cp = &cache_policies[cachepolicy];
kern_pgprot = user_pgprot = cp->pte; kern_pgprot = user_pgprot = cp->pte;
...@@ -293,13 +296,6 @@ static void __init build_mem_type_table(void) ...@@ -293,13 +296,6 @@ static void __init build_mem_type_table(void)
* ARMv6 and above have extended page tables. * ARMv6 and above have extended page tables.
*/ */
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
/*
* bit 4 becomes XN which we must clear for the
* kernel memory mapping.
*/
mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
/* /*
* Mark cache clean areas and XIP ROM read only * Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace. * from SVC mode and no access from userspace.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment