Commit b3bdda02 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

vmalloc: add const to void* parameters

Make vmalloc functions work the same way as kfree() and friends that
take a const void * argument.

[akpm@linux-foundation.org: fix consts, coding-style]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 48667e7a
...@@ -232,8 +232,8 @@ static inline int get_page_unless_zero(struct page *page) ...@@ -232,8 +232,8 @@ static inline int get_page_unless_zero(struct page *page)
} }
/* Support for virtually mapped pages */ /* Support for virtually mapped pages */
struct page *vmalloc_to_page(void *addr); struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(void *addr); unsigned long vmalloc_to_pfn(const void *addr);
static inline struct page *compound_head(struct page *page) static inline struct page *compound_head(struct page *page)
{ {
......
...@@ -45,11 +45,11 @@ extern void *vmalloc_32_user(unsigned long size); ...@@ -45,11 +45,11 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot); pgprot_t prot);
extern void vfree(void *addr); extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count, extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot); unsigned long flags, pgprot_t prot);
extern void vunmap(void *addr); extern void vunmap(const void *addr);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff); unsigned long pgoff);
...@@ -71,7 +71,7 @@ extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, ...@@ -71,7 +71,7 @@ extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
extern struct vm_struct *get_vm_area_node(unsigned long size, extern struct vm_struct *get_vm_area_node(unsigned long size,
unsigned long flags, int node, unsigned long flags, int node,
gfp_t gfp_mask); gfp_t gfp_mask);
extern struct vm_struct *remove_vm_area(void *addr); extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page ***pages);
......
...@@ -167,7 +167,7 @@ EXPORT_SYMBOL(get_user_pages); ...@@ -167,7 +167,7 @@ EXPORT_SYMBOL(get_user_pages);
DEFINE_RWLOCK(vmlist_lock); DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist; struct vm_struct *vmlist;
void vfree(void *addr) void vfree(const void *addr)
{ {
kfree(addr); kfree(addr);
} }
...@@ -183,13 +183,13 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) ...@@ -183,13 +183,13 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
} }
EXPORT_SYMBOL(__vmalloc); EXPORT_SYMBOL(__vmalloc);
struct page * vmalloc_to_page(void *addr) struct page *vmalloc_to_page(const void *addr)
{ {
return virt_to_page(addr); return virt_to_page(addr);
} }
EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_to_page);
unsigned long vmalloc_to_pfn(void *addr) unsigned long vmalloc_to_pfn(const void *addr)
{ {
return page_to_pfn(virt_to_page(addr)); return page_to_pfn(virt_to_page(addr));
} }
...@@ -267,7 +267,7 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_ ...@@ -267,7 +267,7 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_
} }
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
void vunmap(void *addr) void vunmap(const void *addr)
{ {
BUG(); BUG();
} }
......
...@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(map_vm_area); ...@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(map_vm_area);
/* /*
* Map a vmalloc()-space virtual address to the physical page. * Map a vmalloc()-space virtual address to the physical page.
*/ */
struct page *vmalloc_to_page(void *vmalloc_addr) struct page *vmalloc_to_page(const void *vmalloc_addr)
{ {
unsigned long addr = (unsigned long) vmalloc_addr; unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL; struct page *page = NULL;
...@@ -198,7 +198,7 @@ EXPORT_SYMBOL(vmalloc_to_page); ...@@ -198,7 +198,7 @@ EXPORT_SYMBOL(vmalloc_to_page);
/* /*
* Map a vmalloc()-space virtual address to the physical page frame number. * Map a vmalloc()-space virtual address to the physical page frame number.
*/ */
unsigned long vmalloc_to_pfn(void *vmalloc_addr) unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
{ {
return page_to_pfn(vmalloc_to_page(vmalloc_addr)); return page_to_pfn(vmalloc_to_page(vmalloc_addr));
} }
...@@ -306,7 +306,7 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, ...@@ -306,7 +306,7 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
} }
/* Caller must hold vmlist_lock */ /* Caller must hold vmlist_lock */
static struct vm_struct *__find_vm_area(void *addr) static struct vm_struct *__find_vm_area(const void *addr)
{ {
struct vm_struct *tmp; struct vm_struct *tmp;
...@@ -319,7 +319,7 @@ static struct vm_struct *__find_vm_area(void *addr) ...@@ -319,7 +319,7 @@ static struct vm_struct *__find_vm_area(void *addr)
} }
/* Caller must hold vmlist_lock */ /* Caller must hold vmlist_lock */
static struct vm_struct *__remove_vm_area(void *addr) static struct vm_struct *__remove_vm_area(const void *addr)
{ {
struct vm_struct **p, *tmp; struct vm_struct **p, *tmp;
...@@ -348,7 +348,7 @@ found: ...@@ -348,7 +348,7 @@ found:
* This function returns the found VM area, but using it is NOT safe * This function returns the found VM area, but using it is NOT safe
* on SMP machines, except for its size or flags. * on SMP machines, except for its size or flags.
*/ */
struct vm_struct *remove_vm_area(void *addr) struct vm_struct *remove_vm_area(const void *addr)
{ {
struct vm_struct *v; struct vm_struct *v;
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
...@@ -357,7 +357,7 @@ struct vm_struct *remove_vm_area(void *addr) ...@@ -357,7 +357,7 @@ struct vm_struct *remove_vm_area(void *addr)
return v; return v;
} }
static void __vunmap(void *addr, int deallocate_pages) static void __vunmap(const void *addr, int deallocate_pages)
{ {
struct vm_struct *area; struct vm_struct *area;
...@@ -408,7 +408,7 @@ static void __vunmap(void *addr, int deallocate_pages) ...@@ -408,7 +408,7 @@ static void __vunmap(void *addr, int deallocate_pages)
* *
* Must not be called in interrupt context. * Must not be called in interrupt context.
*/ */
void vfree(void *addr) void vfree(const void *addr)
{ {
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
__vunmap(addr, 1); __vunmap(addr, 1);
...@@ -424,7 +424,7 @@ EXPORT_SYMBOL(vfree); ...@@ -424,7 +424,7 @@ EXPORT_SYMBOL(vfree);
* *
* Must not be called in interrupt context. * Must not be called in interrupt context.
*/ */
void vunmap(void *addr) void vunmap(const void *addr)
{ {
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
__vunmap(addr, 0); __vunmap(addr, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment