Commit 91eebf40 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: style cleanup of ioremap code

Fix the coding style before going further.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1aaf74e9
/* /*
* arch/i386/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it. * Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the * This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's * 640k-1MB IO memory area on PC's
...@@ -21,10 +19,6 @@ ...@@ -21,10 +19,6 @@
#define ISA_START_ADDRESS 0xa0000 #define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000 #define ISA_END_ADDRESS 0x100000
/*
* Generic mapping function (not visible outside):
*/
/* /*
* Remap an arbitrary physical address space into the kernel virtual * Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses * address space. Needed when the kernel wants to access high addresses
...@@ -34,10 +28,11 @@ ...@@ -34,10 +28,11 @@
* have to convert them into an offset in a page-aligned mapping, but the * have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail. * caller shouldn't need to know that small detail.
*/ */
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
unsigned long flags)
{ {
void __iomem * addr; void __iomem *addr;
struct vm_struct * area; struct vm_struct *area;
unsigned long offset, last_addr; unsigned long offset, last_addr;
pgprot_t prot; pgprot_t prot;
...@@ -61,9 +56,10 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l ...@@ -61,9 +56,10 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
t_addr = __va(phys_addr); t_addr = __va(phys_addr);
t_end = t_addr + (size - 1); t_end = t_addr + (size - 1);
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) for (page = virt_to_page(t_addr);
if(!PageReserved(page)) page <= virt_to_page(t_end); page++)
if (!PageReserved(page))
return NULL; return NULL;
} }
...@@ -85,7 +81,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l ...@@ -85,7 +81,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr; addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long) addr, if (ioremap_page_range((unsigned long) addr,
(unsigned long) addr + size, phys_addr, prot)) { (unsigned long) addr + size, phys_addr, prot)) {
vunmap((void __force *) addr); vunmap((void __force *) addr);
return NULL; return NULL;
} }
...@@ -102,31 +98,31 @@ EXPORT_SYMBOL(__ioremap); ...@@ -102,31 +98,31 @@ EXPORT_SYMBOL(__ioremap);
* make bus memory CPU accessible via the readb/readw/readl/writeb/ * make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned * writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual * address is not guaranteed to be usable directly as a virtual
* address. * address.
* *
* This version of ioremap ensures that the memory is marked uncachable * This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like * on the CPU as well as honouring existing caching rules from things like
* the PCI bus. Note that there are other caches and buffers on many * the PCI bus. Note that there are other caches and buffers on many
* busses. In particular driver authors should read up on PCI writes * busses. In particular driver authors should read up on PCI writes
* *
* It's useful if some control registers are in such an area and * It's useful if some control registers are in such an area and
* write combining or read caching is not desirable: * write combining or read caching is not desirable:
* *
* Must be freed with iounmap. * Must be freed with iounmap.
*/ */
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{ {
unsigned long last_addr; unsigned long last_addr;
void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
if (!p)
return p; if (!p)
return p;
/* Guaranteed to be > phys_addr, as per __ioremap() */ /* Guaranteed to be > phys_addr, as per __ioremap() */
last_addr = phys_addr + size - 1; last_addr = phys_addr + size - 1;
if (last_addr < virt_to_phys(high_memory) - 1) { if (last_addr < virt_to_phys(high_memory) - 1) {
struct page *ppage = virt_to_page(__va(phys_addr)); struct page *ppage = virt_to_page(__va(phys_addr));
unsigned long npages; unsigned long npages;
phys_addr &= PAGE_MASK; phys_addr &= PAGE_MASK;
...@@ -135,18 +131,18 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) ...@@ -135,18 +131,18 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
last_addr = PAGE_ALIGN(last_addr); last_addr = PAGE_ALIGN(last_addr);
/* .. but that's ok, because modulo-2**n arithmetic will make /* .. but that's ok, because modulo-2**n arithmetic will make
* the page-aligned "last - first" come out right. * the page-aligned "last - first" come out right.
*/ */
npages = (last_addr - phys_addr) >> PAGE_SHIFT; npages = (last_addr - phys_addr) >> PAGE_SHIFT;
if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
iounmap(p); iounmap(p);
p = NULL; p = NULL;
} }
global_flush_tlb(); global_flush_tlb();
} }
return p; return p;
} }
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap_nocache);
...@@ -169,10 +165,11 @@ void iounmap(volatile void __iomem *addr) ...@@ -169,10 +165,11 @@ void iounmap(volatile void __iomem *addr)
* of ISA space. So handle that here. * of ISA space. So handle that here.
*/ */
if (addr >= phys_to_virt(ISA_START_ADDRESS) && if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
addr < phys_to_virt(ISA_END_ADDRESS)) addr < phys_to_virt(ISA_END_ADDRESS))
return; return;
addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr);
/* Use the vm area unlocked, assuming the caller /* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address ensures there isn't another iounmap for the same address
...@@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr)
read_unlock(&vmlist_lock); read_unlock(&vmlist_lock);
if (!p) { if (!p) {
printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr);
dump_stack(); dump_stack();
return; return;
} }
...@@ -198,12 +195,12 @@ void iounmap(volatile void __iomem *addr) ...@@ -198,12 +195,12 @@ void iounmap(volatile void __iomem *addr)
get_vm_area_size(p) >> PAGE_SHIFT, get_vm_area_size(p) >> PAGE_SHIFT,
PAGE_KERNEL); PAGE_KERNEL);
global_flush_tlb(); global_flush_tlb();
} }
/* Finally remove it */ /* Finally remove it */
o = remove_vm_area((void *)addr); o = remove_vm_area((void *)addr);
BUG_ON(p != o || o == NULL); BUG_ON(p != o || o == NULL);
kfree(p); kfree(p);
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
...@@ -237,7 +234,7 @@ void __init early_ioremap_init(void) ...@@ -237,7 +234,7 @@ void __init early_ioremap_init(void)
unsigned long *pgd; unsigned long *pgd;
if (early_ioremap_debug) if (early_ioremap_debug)
printk("early_ioremap_init()\n"); printk(KERN_DEBUG "early_ioremap_init()\n");
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = __pa(bm_pte) | _PAGE_TABLE; *pgd = __pa(bm_pte) | _PAGE_TABLE;
...@@ -248,15 +245,16 @@ void __init early_ioremap_init(void) ...@@ -248,15 +245,16 @@ void __init early_ioremap_init(void)
*/ */
if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
WARN_ON(1); WARN_ON(1);
printk("pgd %p != %p\n", printk(KERN_WARNING "pgd %p != %p\n",
pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN)); fix_to_virt(FIX_BTMAP_BEGIN));
printk("fix_to_virt(FIX_BTMAP_END): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END)); fix_to_virt(FIX_BTMAP_END));
printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
FIX_BTMAP_BEGIN);
} }
} }
...@@ -265,7 +263,7 @@ void __init early_ioremap_clear(void) ...@@ -265,7 +263,7 @@ void __init early_ioremap_clear(void)
unsigned long *pgd; unsigned long *pgd;
if (early_ioremap_debug) if (early_ioremap_debug)
printk("early_ioremap_clear()\n"); printk(KERN_DEBUG "early_ioremap_clear()\n");
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = 0; *pgd = 0;
...@@ -331,10 +329,10 @@ static int __init check_early_ioremap_leak(void) ...@@ -331,10 +329,10 @@ static int __init check_early_ioremap_leak(void)
return 0; return 0;
printk(KERN_WARNING printk(KERN_WARNING
"Debug warning: early ioremap leak of %d areas detected.\n", "Debug warning: early ioremap leak of %d areas detected.\n",
early_ioremap_nested); early_ioremap_nested);
printk(KERN_WARNING printk(KERN_WARNING
"please boot with early_ioremap_debug and report the dmesg.\n"); "please boot with early_ioremap_debug and report the dmesg.\n");
WARN_ON(1); WARN_ON(1);
return 1; return 1;
...@@ -351,8 +349,8 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size) ...@@ -351,8 +349,8 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
nesting = early_ioremap_nested; nesting = early_ioremap_nested;
if (early_ioremap_debug) { if (early_ioremap_debug) {
printk("early_ioremap(%08lx, %08lx) [%d] => ", printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
phys_addr, size, nesting); phys_addr, size, nesting);
dump_stack(); dump_stack();
} }
...@@ -398,7 +396,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size) ...@@ -398,7 +396,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
if (early_ioremap_debug) if (early_ioremap_debug)
printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
return (void*) (offset + fix_to_virt(idx0)); return (void *) (offset + fix_to_virt(idx0));
} }
void __init early_iounmap(void *addr, unsigned long size) void __init early_iounmap(void *addr, unsigned long size)
...@@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size) ...@@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size)
WARN_ON(nesting < 0); WARN_ON(nesting < 0);
if (early_ioremap_debug) { if (early_ioremap_debug) {
printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
size, nesting);
dump_stack(); dump_stack();
} }
......
/* /*
* arch/x86_64/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it. * Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the * This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's * 640k-1MB IO memory area on PC's
...@@ -33,9 +31,8 @@ EXPORT_SYMBOL(__phys_addr); ...@@ -33,9 +31,8 @@ EXPORT_SYMBOL(__phys_addr);
* Fix up the linear direct mapping of the kernel to avoid cache attribute * Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts. * conflicts.
*/ */
static int static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
ioremap_change_attr(unsigned long phys_addr, unsigned long size, unsigned long flags)
unsigned long flags)
{ {
int err = 0; int err = 0;
if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
...@@ -50,20 +47,18 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -50,20 +47,18 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
if (!lookup_address(vaddr, &level)) if (!lookup_address(vaddr, &level))
return err; return err;
/* /*
* Must use a address here and not struct page because the phys addr * Must use a address here and not struct page because
* can be a in hole between nodes and not have an memmap entry. * the phys addr can be a in hole between nodes and
* not have an memmap entry.
*/ */
err = change_page_attr_addr(vaddr,npages,MAKE_GLOBAL(__PAGE_KERNEL|flags)); err = change_page_attr_addr(vaddr,npages,
MAKE_GLOBAL(__PAGE_KERNEL|flags));
if (!err) if (!err)
global_flush_tlb(); global_flush_tlb();
} }
return err; return err;
} }
/*
* Generic mapping function
*/
/* /*
* Remap an arbitrary physical address space into the kernel virtual * Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses * address space. Needed when the kernel wants to access high addresses
...@@ -73,10 +68,11 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -73,10 +68,11 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
* have to convert them into an offset in a page-aligned mapping, but the * have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail. * caller shouldn't need to know that small detail.
*/ */
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
unsigned long flags)
{ {
void * addr; void *addr;
struct vm_struct * area; struct vm_struct *area;
unsigned long offset, last_addr; unsigned long offset, last_addr;
pgprot_t pgprot; pgprot_t pgprot;
...@@ -130,20 +126,19 @@ EXPORT_SYMBOL(__ioremap); ...@@ -130,20 +126,19 @@ EXPORT_SYMBOL(__ioremap);
* make bus memory CPU accessible via the readb/readw/readl/writeb/ * make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned * writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual * address is not guaranteed to be usable directly as a virtual
* address. * address.
* *
* This version of ioremap ensures that the memory is marked uncachable * This version of ioremap ensures that the memory is marked uncachable
* on the CPU as well as honouring existing caching rules from things like * on the CPU as well as honouring existing caching rules from things like
* the PCI bus. Note that there are other caches and buffers on many * the PCI bus. Note that there are other caches and buffers on many
* busses. In particular driver authors should read up on PCI writes * busses. In particular driver authors should read up on PCI writes
* *
* It's useful if some control registers are in such an area and * It's useful if some control registers are in such an area and
* write combining or read caching is not desirable: * write combining or read caching is not desirable:
* *
* Must be freed with iounmap. * Must be freed with iounmap.
*/ */
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{ {
return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
} }
...@@ -159,13 +154,14 @@ void iounmap(volatile void __iomem *addr) ...@@ -159,13 +154,14 @@ void iounmap(volatile void __iomem *addr)
{ {
struct vm_struct *p, *o; struct vm_struct *p, *o;
if (addr <= high_memory) if (addr <= high_memory)
return; return;
if (addr >= phys_to_virt(ISA_START_ADDRESS) && if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
addr < phys_to_virt(ISA_END_ADDRESS)) addr < phys_to_virt(ISA_END_ADDRESS))
return; return;
addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr);
/* Use the vm area unlocked, assuming the caller /* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by in parallel. Reuse of the virtual address is prevented by
...@@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr)
read_unlock(&vmlist_lock); read_unlock(&vmlist_lock);
if (!p) { if (!p) {
printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr);
dump_stack(); dump_stack();
return; return;
} }
...@@ -191,7 +187,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -191,7 +187,7 @@ void iounmap(volatile void __iomem *addr)
/* Finally remove it */ /* Finally remove it */
o = remove_vm_area((void *)addr); o = remove_vm_area((void *)addr);
BUG_ON(p != o || o == NULL); BUG_ON(p != o || o == NULL);
kfree(p); kfree(p);
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment