Commit 021740dc authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] freepgt: hugetlb area is clean

Once we're strict about clearing away page tables, hugetlb_prefault can assume
there are no page tables left within its range.  Since the other arches
continue if !pte_none here, let i386 do the same.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 146425a3
...@@ -249,15 +249,8 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -249,15 +249,8 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
if (!pte_none(*pte)) { if (!pte_none(*pte))
pmd_t *pmd = (pmd_t *) pte; continue;
page = pmd_page(*pmd);
pmd_clear(pmd);
mm->nr_ptes--;
dec_page_state(nr_page_table_pages);
page_cache_release(page);
}
idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
......
...@@ -203,8 +203,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) ...@@ -203,8 +203,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
unsigned long start = seg << SID_SHIFT; unsigned long start = seg << SID_SHIFT;
unsigned long end = (seg+1) << SID_SHIFT; unsigned long end = (seg+1) << SID_SHIFT;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long addr;
struct mmu_gather *tlb;
BUG_ON(seg >= 16); BUG_ON(seg >= 16);
...@@ -213,41 +211,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) ...@@ -213,41 +211,6 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
if (vma && (vma->vm_start < end)) if (vma && (vma->vm_start < end))
return -EBUSY; return -EBUSY;
/* Clean up any leftover PTE pages in the region */
spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm, 0);
for (addr = start; addr < end; addr += PMD_SIZE) {
pgd_t *pgd = pgd_offset(mm, addr);
pmd_t *pmd;
struct page *page;
pte_t *pte;
int i;
if (pgd_none(*pgd))
continue;
pmd = pmd_offset(pgd, addr);
if (!pmd || pmd_none(*pmd))
continue;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
continue;
}
pte = (pte_t *)pmd_page_kernel(*pmd);
/* No VMAs, so there should be no PTEs, check just in case. */
for (i = 0; i < PTRS_PER_PTE; i++) {
BUG_ON(!pte_none(*pte));
pte++;
}
page = pmd_page(*pmd);
pmd_clear(pmd);
mm->nr_ptes--;
dec_page_state(nr_page_table_pages);
pte_free_tlb(tlb, page);
}
tlb_finish_mmu(tlb, start, end);
spin_unlock(&mm->page_table_lock);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment