Commit 24e6c699 authored by Russell King's avatar Russell King Committed by Russell King

[ARM] mm 4: make create_mapping() more conventional

Rather than our three separate loops to setup mappings (by page
mappings up to a section boundary, then section mappings, and the
remainder by page mappings) convert this to a more conventional
Linux style of a loop over each page table level.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 4a56c1e4
...@@ -380,45 +380,55 @@ static void __init build_mem_type_table(void) ...@@ -380,45 +380,55 @@ static void __init build_mem_type_table(void)
#define vectors_base() (vectors_high() ? 0xffff0000 : 0) #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
/* static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
* Create a SECTION PGD between VIRT and PHYS in domain unsigned long end, unsigned long pfn,
* DOMAIN with protection PROT. This operates on half- const struct mem_type *type)
* pgdir entry increments.
*/
static inline void
alloc_init_section(unsigned long virt, unsigned long phys, int prot)
{ {
pmd_t *pmdp = pmd_off_k(virt); pte_t *pte;
if (virt & (1 << 20)) if (pmd_none(*pmd)) {
pmdp++; pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
__pmd_populate(pmd, __pa(pte) | type->prot_l1);
}
*pmdp = __pmd(phys | prot); pte = pte_offset_kernel(pmd, addr);
flush_pmd_entry(pmdp); do {
set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
} }
/* static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
* Add a PAGE mapping between VIRT and PHYS in domain unsigned long end, unsigned long phys,
* DOMAIN with protection PROT. Note that due to the const struct mem_type *type)
* way we map the PTEs, we must allocate two PTE_SIZE'd
* blocks - one for the Linux pte table, and one for
* the hardware pte table.
*/
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, const struct mem_type *type)
{ {
pmd_t *pmdp = pmd_off_k(virt); pmd_t *pmd = pmd_offset(pgd, addr);
pte_t *ptep;
if (pmd_none(*pmdp)) { /*
ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * * Try a section mapping - end, addr and phys must all be aligned
sizeof(pte_t)); * to a section boundary. Note that PMDs refer to the individual
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
if (((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
__pmd_populate(pmdp, __pa(ptep) | type->prot_l1); if (addr & SECTION_SIZE)
} pmd++;
ptep = pte_offset_kernel(pmdp, virt);
do {
*pmd = __pmd(phys | type->prot_sect);
phys += SECTION_SIZE;
} while (pmd++, addr += SECTION_SIZE, addr != end);
set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, __pgprot(type->prot_pte)), 0); flush_pmd_entry(p);
} else {
/*
* No need to loop; pte's aren't interested in the
* individual L1 entries.
*/
alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
}
} }
static void __init create_36bit_mapping(struct map_desc *md, static void __init create_36bit_mapping(struct map_desc *md,
...@@ -488,9 +498,9 @@ static void __init create_36bit_mapping(struct map_desc *md, ...@@ -488,9 +498,9 @@ static void __init create_36bit_mapping(struct map_desc *md,
*/ */
void __init create_mapping(struct map_desc *md) void __init create_mapping(struct map_desc *md)
{ {
unsigned long virt, length; unsigned long phys, addr, length, end;
unsigned long off = (u32)__pfn_to_phys(md->pfn);
const struct mem_type *type; const struct mem_type *type;
pgd_t *pgd;
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
printk(KERN_WARNING "BUG: not creating mapping for " printk(KERN_WARNING "BUG: not creating mapping for "
...@@ -516,41 +526,27 @@ void __init create_mapping(struct map_desc *md) ...@@ -516,41 +526,27 @@ void __init create_mapping(struct map_desc *md)
return; return;
} }
virt = md->virtual; addr = md->virtual;
off -= virt; phys = (unsigned long)__pfn_to_phys(md->pfn);
length = md->length; length = PAGE_ALIGN(md->length);
if (type->prot_l1 == 0 && if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
(virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
"be mapped using pages, ignoring.\n", "be mapped using pages, ignoring.\n",
__pfn_to_phys(md->pfn), md->virtual); __pfn_to_phys(md->pfn), addr);
return; return;
} }
while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { pgd = pgd_offset_k(addr);
alloc_init_page(virt, virt + off, type); end = addr + length;
do {
virt += PAGE_SIZE; unsigned long next = pgd_addr_end(addr, end);
length -= PAGE_SIZE;
}
/*
* A section mapping covers half a "pgdir" entry.
*/
while (length >= (PGDIR_SIZE / 2)) {
alloc_init_section(virt, virt + off, type->prot_sect);
virt += (PGDIR_SIZE / 2);
length -= (PGDIR_SIZE / 2);
}
while (length >= PAGE_SIZE) { alloc_init_section(pgd, addr, next, phys, type);
alloc_init_page(virt, virt + off, type);
virt += PAGE_SIZE; phys += next - addr;
length -= PAGE_SIZE; addr = next;
} } while (pgd++, addr != end);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment