Commit 6bda666a authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] hugepages: fold find_or_alloc_pages into huge_no_page()

The number of parameters for find_or_alloc_page increases significantly after
policy support is added to huge pages.  Simplify the code by folding
find_or_alloc_huge_page() into hugetlb_no_page().

Adam Litke objected to this piece in an earlier patch but I think this is a
good simplification.  Diffstat shows that we can get rid of almost half of the
lines of find_or_alloc_page().  If we can find no consensus then lets simply
drop this patch.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@muc.de>
Acked-by: default avatarWilliam Lee Irwin III <wli@holomorphy.com>
Cc: Adam Litke <agl@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 21abb147
...@@ -368,43 +368,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, ...@@ -368,43 +368,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
} }
static struct page *find_or_alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, struct address_space *mapping,
unsigned long idx, int shared)
{
struct page *page;
int err;
retry:
page = find_lock_page(mapping, idx);
if (page)
goto out;
if (hugetlb_get_quota(mapping))
goto out;
page = alloc_huge_page(vma, addr);
if (!page) {
hugetlb_put_quota(mapping);
goto out;
}
if (shared) {
err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
if (err) {
put_page(page);
hugetlb_put_quota(mapping);
if (err == -EEXIST)
goto retry;
page = NULL;
}
} else {
/* Caller expects a locked page */
lock_page(page);
}
out:
return page;
}
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte) unsigned long address, pte_t *ptep, pte_t pte)
{ {
...@@ -471,12 +434,31 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -471,12 +434,31 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Use page lock to guard against racing truncation * Use page lock to guard against racing truncation
* before we get page_table_lock. * before we get page_table_lock.
*/ */
page = find_or_alloc_huge_page(vma, address, mapping, idx, retry:
vma->vm_flags & VM_SHARED); page = find_lock_page(mapping, idx);
if (!page) if (!page) {
goto out; if (hugetlb_get_quota(mapping))
goto out;
page = alloc_huge_page(vma, address);
if (!page) {
hugetlb_put_quota(mapping);
goto out;
}
BUG_ON(!PageLocked(page)); if (vma->vm_flags & VM_SHARED) {
int err;
err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
if (err) {
put_page(page);
hugetlb_put_quota(mapping);
if (err == -EEXIST)
goto retry;
goto out;
}
} else
lock_page(page);
}
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
size = i_size_read(mapping->host) >> HPAGE_SHIFT; size = i_size_read(mapping->host) >> HPAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment