Commit 08ef4729 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] get_user_pages: kill get_page_map

Since its birth, get_user_pages has been calling a misguided get_page_map
function.  follow_page has already returned NULL if the pfn is invalid, we
cannot reach an invalid pfn from a validated struct page.

Remove get_page_map, and the messy rewind in get_user_pages to cope with
its failure.  Oh, and could we please call that "struct page *page" like
everywhere else, instead of "struct page *map"?
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7c2f3fda
...@@ -840,23 +840,8 @@ check_user_page_readable(struct mm_struct *mm, unsigned long address) ...@@ -840,23 +840,8 @@ check_user_page_readable(struct mm_struct *mm, unsigned long address)
{ {
return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
} }
EXPORT_SYMBOL(check_user_page_readable); EXPORT_SYMBOL(check_user_page_readable);
/*
* Given a physical address, is there a useful struct page pointing to
* it? This may become more complex in the future if we start dealing
* with IO-aperture pages for direct-IO.
*/
static inline struct page *get_page_map(struct page *page)
{
if (!pfn_valid(page_to_pfn(page)))
return NULL;
return page;
}
static inline int static inline int
untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
unsigned long address) unsigned long address)
...@@ -887,7 +872,6 @@ untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, ...@@ -887,7 +872,6 @@ untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
return 0; return 0;
} }
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int write, int force, unsigned long start, int len, int write, int force,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
...@@ -951,21 +935,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -951,21 +935,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
} }
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
struct page *map; struct page *page;
int lookup_write = write; int lookup_write = write;
cond_resched_lock(&mm->page_table_lock); cond_resched_lock(&mm->page_table_lock);
while (!(map = follow_page(mm, start, lookup_write))) { while (!(page = follow_page(mm, start, lookup_write))) {
/* /*
* Shortcut for anonymous pages. We don't want * Shortcut for anonymous pages. We don't want
* to force the creation of pages tables for * to force the creation of pages tables for
* insanly big anonymously mapped areas that * insanely big anonymously mapped areas that
* nobody touched so far. This is important * nobody touched so far. This is important
* for doing a core dump for these mappings. * for doing a core dump for these mappings.
*/ */
if (!lookup_write && if (!lookup_write &&
untouched_anonymous_page(mm,vma,start)) { untouched_anonymous_page(mm,vma,start)) {
map = ZERO_PAGE(start); page = ZERO_PAGE(start);
break; break;
} }
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
...@@ -994,30 +978,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -994,30 +978,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
} }
if (pages) { if (pages) {
pages[i] = get_page_map(map); pages[i] = page;
if (!pages[i]) { flush_dcache_page(page);
spin_unlock(&mm->page_table_lock); if (!PageReserved(page))
while (i--) page_cache_get(page);
page_cache_release(pages[i]);
i = -EFAULT;
goto out;
}
flush_dcache_page(pages[i]);
if (!PageReserved(pages[i]))
page_cache_get(pages[i]);
} }
if (vmas) if (vmas)
vmas[i] = vma; vmas[i] = vma;
i++; i++;
start += PAGE_SIZE; start += PAGE_SIZE;
len--; len--;
} while(len && start < vma->vm_end); } while (len && start < vma->vm_end);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} while(len); } while (len);
out:
return i; return i;
} }
EXPORT_SYMBOL(get_user_pages); EXPORT_SYMBOL(get_user_pages);
static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment