Commit 240d3a7c authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: unify ioremap

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent e4c1b977
...@@ -19,6 +19,18 @@ ...@@ -19,6 +19,18 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#ifdef CONFIG_X86_64
unsigned long __phys_addr(unsigned long x)
{
if (x >= __START_KERNEL_map)
return x - __START_KERNEL_map + phys_base;
return x - PAGE_OFFSET;
}
EXPORT_SYMBOL(__phys_addr);
#endif
/* /*
* Fix up the linear direct mapping of the kernel to avoid cache attribute * Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts. * conflicts.
...@@ -49,6 +61,7 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -49,6 +61,7 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
* memmap entry. * memmap entry.
*/ */
err = change_page_attr_addr(vaddr, npages, prot); err = change_page_attr_addr(vaddr, npages, prot);
if (!err) if (!err)
global_flush_tlb(); global_flush_tlb();
...@@ -83,6 +96,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -83,6 +96,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
return (__force void __iomem *)phys_to_virt(phys_addr); return (__force void __iomem *)phys_to_virt(phys_addr);
#ifdef CONFIG_X86_32
/* /*
* Don't allow anybody to remap normal RAM that we're using.. * Don't allow anybody to remap normal RAM that we're using..
*/ */
...@@ -98,6 +112,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -98,6 +112,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (!PageReserved(page)) if (!PageReserved(page))
return NULL; return NULL;
} }
#endif
pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
...@@ -211,6 +226,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -211,6 +226,7 @@ void iounmap(volatile void __iomem *addr)
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
#ifdef CONFIG_X86_32
int __initdata early_ioremap_debug; int __initdata early_ioremap_debug;
...@@ -443,3 +459,5 @@ void __this_fixmap_does_not_exist(void) ...@@ -443,3 +459,5 @@ void __this_fixmap_does_not_exist(void)
{ {
WARN_ON(1); WARN_ON(1);
} }
#endif /* CONFIG_X86_32 */
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 1995 1996 Linus Torvalds
*/ */
#include <linux/bootmem.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -18,6 +19,8 @@ ...@@ -18,6 +19,8 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#ifdef CONFIG_X86_64
unsigned long __phys_addr(unsigned long x) unsigned long __phys_addr(unsigned long x)
{ {
if (x >= __START_KERNEL_map) if (x >= __START_KERNEL_map)
...@@ -26,6 +29,8 @@ unsigned long __phys_addr(unsigned long x) ...@@ -26,6 +29,8 @@ unsigned long __phys_addr(unsigned long x)
} }
EXPORT_SYMBOL(__phys_addr); EXPORT_SYMBOL(__phys_addr);
#endif
/* /*
* Fix up the linear direct mapping of the kernel to avoid cache attribute * Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts. * conflicts.
...@@ -33,28 +38,33 @@ EXPORT_SYMBOL(__phys_addr); ...@@ -33,28 +38,33 @@ EXPORT_SYMBOL(__phys_addr);
static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
pgprot_t prot) pgprot_t prot)
{ {
int err = 0; unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { int err, level;
unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long vaddr = (unsigned long) __va(phys_addr); /* No change for pages after the last mapping */
int level; if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
return 0;
/*
* If there is no identity map for this address, npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
* change_page_attr_addr is unnecessary vaddr = (unsigned long) __va(phys_addr);
*/
if (!lookup_address(vaddr, &level)) /*
return err; * If there is no identity map for this address,
/* * change_page_attr_addr is unnecessary
* Must use a address here and not struct page because */
* the phys addr can be a in hole between nodes and if (!lookup_address(vaddr, &level))
* not have an memmap entry. return 0;
*/
err = change_page_attr_addr(vaddr, npages, prot); /*
* Must use an address here and not struct page because the
if (!err) * phys addr can be a in hole between nodes and not have a
global_flush_tlb(); * memmap entry.
} */
err = change_page_attr_addr(vaddr, npages, prot);
if (!err)
global_flush_tlb();
return err; return err;
} }
...@@ -86,7 +96,26 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -86,7 +96,26 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
return (__force void __iomem *)phys_to_virt(phys_addr); return (__force void __iomem *)phys_to_virt(phys_addr);
#ifdef CONFIG_X86_32
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
if (phys_addr <= virt_to_phys(high_memory - 1)) {
char *t_addr, *t_end;
struct page *page;
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
for (page = virt_to_page(t_addr);
page <= virt_to_page(t_end); page++)
if (!PageReserved(page))
return NULL;
}
#endif
pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
/* /*
* Mappings have to be page-aligned * Mappings have to be page-aligned
*/ */
...@@ -107,10 +136,12 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -107,10 +136,12 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
return NULL; return NULL;
} }
if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
vunmap(addr); vunmap(addr);
return NULL; return NULL;
} }
return (void __iomem *) (offset + (char __iomem *)addr); return (void __iomem *) (offset + (char __iomem *)addr);
} }
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap);
...@@ -154,12 +185,19 @@ void iounmap(volatile void __iomem *addr) ...@@ -154,12 +185,19 @@ void iounmap(volatile void __iomem *addr)
if ((void __force *)addr <= high_memory) if ((void __force *)addr <= high_memory)
return; return;
/*
* __ioremap special-cases the PCI/ISA range by not instantiating a
* vm_area and by simply returning an address into the kernel mapping
* of ISA space. So handle that here.
*/
if (addr >= phys_to_virt(ISA_START_ADDRESS) && if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
addr < phys_to_virt(ISA_END_ADDRESS)) addr < phys_to_virt(ISA_END_ADDRESS))
return; return;
addr = (volatile void __iomem *) addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr); (PAGE_MASK & (unsigned long __force)addr);
/* Use the vm area unlocked, assuming the caller /* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by in parallel. Reuse of the virtual address is prevented by
...@@ -188,3 +226,238 @@ void iounmap(volatile void __iomem *addr) ...@@ -188,3 +226,238 @@ void iounmap(volatile void __iomem *addr)
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
#ifdef CONFIG_X86_32
int __initdata early_ioremap_debug;
static int __init early_ioremap_debug_setup(char *str)
{
early_ioremap_debug = 1;
return 0;
}
early_param("early_ioremap_debug", early_ioremap_debug_setup);
static __initdata int after_paging_init;
static __initdata unsigned long bm_pte[1024]
__attribute__((aligned(PAGE_SIZE)));
static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
{
return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
}
static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
{
return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
}
void __init early_ioremap_init(void)
{
unsigned long *pgd;
if (early_ioremap_debug)
printk(KERN_DEBUG "early_ioremap_init()\n");
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = __pa(bm_pte) | _PAGE_TABLE;
memset(bm_pte, 0, sizeof(bm_pte));
/*
* The boot-ioremap range spans multiple pgds, for which
* we are not prepared:
*/
if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
WARN_ON(1);
printk(KERN_WARNING "pgd %p != %p\n",
pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN));
printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END));
printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
FIX_BTMAP_BEGIN);
}
}
void __init early_ioremap_clear(void)
{
unsigned long *pgd;
if (early_ioremap_debug)
printk(KERN_DEBUG "early_ioremap_clear()\n");
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = 0;
__flush_tlb_all();
}
void __init early_ioremap_reset(void)
{
enum fixed_addresses idx;
unsigned long *pte, phys, addr;
after_paging_init = 1;
for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
addr = fix_to_virt(idx);
pte = early_ioremap_pte(addr);
if (!*pte & _PAGE_PRESENT) {
phys = *pte & PAGE_MASK;
set_fixmap(idx, phys);
}
}
}
static void __init __early_set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags)
{
unsigned long *pte, addr = __fix_to_virt(idx);
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
}
pte = early_ioremap_pte(addr);
if (pgprot_val(flags))
*pte = (phys & PAGE_MASK) | pgprot_val(flags);
else
*pte = 0;
__flush_tlb_one(addr);
}
static inline void __init early_set_fixmap(enum fixed_addresses idx,
unsigned long phys)
{
if (after_paging_init)
set_fixmap(idx, phys);
else
__early_set_fixmap(idx, phys, PAGE_KERNEL);
}
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
{
if (after_paging_init)
clear_fixmap(idx);
else
__early_set_fixmap(idx, 0, __pgprot(0));
}
int __initdata early_ioremap_nested;
static int __init check_early_ioremap_leak(void)
{
if (!early_ioremap_nested)
return 0;
printk(KERN_WARNING
"Debug warning: early ioremap leak of %d areas detected.\n",
early_ioremap_nested);
printk(KERN_WARNING
"please boot with early_ioremap_debug and report the dmesg.\n");
WARN_ON(1);
return 1;
}
late_initcall(check_early_ioremap_leak);
void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
{
unsigned long offset, last_addr;
unsigned int nrpages, nesting;
enum fixed_addresses idx0, idx;
WARN_ON(system_state != SYSTEM_BOOTING);
nesting = early_ioremap_nested;
if (early_ioremap_debug) {
printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
phys_addr, size, nesting);
dump_stack();
}
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr) {
WARN_ON(1);
return NULL;
}
if (nesting >= FIX_BTMAPS_NESTING) {
WARN_ON(1);
return NULL;
}
early_ioremap_nested++;
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr) - phys_addr;
/*
* Mappings have to fit in the FIX_BTMAP area.
*/
nrpages = size >> PAGE_SHIFT;
if (nrpages > NR_FIX_BTMAPS) {
WARN_ON(1);
return NULL;
}
/*
* Ok, go for it..
*/
idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
idx = idx0;
while (nrpages > 0) {
early_set_fixmap(idx, phys_addr);
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
}
if (early_ioremap_debug)
printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
return (void *) (offset + fix_to_virt(idx0));
}
void __init early_iounmap(void *addr, unsigned long size)
{
unsigned long virt_addr;
unsigned long offset;
unsigned int nrpages;
enum fixed_addresses idx;
unsigned int nesting;
nesting = --early_ioremap_nested;
WARN_ON(nesting < 0);
if (early_ioremap_debug) {
printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
size, nesting);
dump_stack();
}
virt_addr = (unsigned long)addr;
if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
WARN_ON(1);
return;
}
offset = virt_addr & ~PAGE_MASK;
nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
while (nrpages > 0) {
early_clear_fixmap(idx);
--idx;
--nrpages;
}
}
void __this_fixmap_does_not_exist(void)
{
WARN_ON(1);
}
#endif /* CONFIG_X86_32 */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment