Commit c97a9e10 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

mm: more rmap checking

Re-introduce rmap verification patches that Hugh removed when he removed
PG_map_lock. PG_map_lock actually isn't needed to synchronise access to
anonymous pages, because PG_locked and PTL together already do.

These checks were important in discovering and fixing a rare rmap corruption
in SLES9.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ea125892
...@@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon ...@@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
void page_add_file_rmap(struct page *); void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *, struct vm_area_struct *); void page_remove_rmap(struct page *, struct vm_area_struct *);
/** #ifdef CONFIG_DEBUG_VM
* page_dup_rmap - duplicate pte mapping to a page void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
* @page: the page to add the mapping to #else
* static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
* For copy_page_range only: minimal extract from page_add_rmap,
* avoiding unnecessary tests (already checked) so it's quicker.
*/
static inline void page_dup_rmap(struct page *page)
{ {
atomic_inc(&page->_mapcount); atomic_inc(&page->_mapcount);
} }
#endif
/* /*
* Called from mm/vmscan.c to handle paging out * Called from mm/vmscan.c to handle paging out
......
...@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
page = vm_normal_page(vma, addr, pte); page = vm_normal_page(vma, addr, pte);
if (page) { if (page) {
get_page(page); get_page(page);
page_dup_rmap(page); page_dup_rmap(page, vma, addr);
rss[!!PageAnon(page)]++; rss[!!PageAnon(page)]++;
} }
......
...@@ -529,20 +529,52 @@ static void __page_set_anon_rmap(struct page *page, ...@@ -529,20 +529,52 @@ static void __page_set_anon_rmap(struct page *page,
__inc_zone_page_state(page, NR_ANON_PAGES); __inc_zone_page_state(page, NR_ANON_PAGES);
} }
/**
* page_set_anon_rmap - sanity check anonymous rmap addition
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*/
static void __page_check_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
#ifdef CONFIG_DEBUG_VM
/*
* The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point.
*
* We have exclusion against page_add_anon_rmap because the caller
* always holds the page locked, except if called from page_dup_rmap,
* in which case the page is already known to be setup.
*
* We have exclusion against page_add_new_anon_rmap because those pages
* are initially only visible via the pagetables, and the pte is locked
* over the call to page_add_new_anon_rmap.
*/
struct anon_vma *anon_vma = vma->anon_vma;
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
BUG_ON(page->mapping != (struct address_space *)anon_vma);
BUG_ON(page->index != linear_page_index(vma, address));
#endif
}
/** /**
* page_add_anon_rmap - add pte mapping to an anonymous page * page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to * @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added * @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped * @address: the user virtual address mapped
* *
* The caller needs to hold the pte lock. * The caller needs to hold the pte lock and the page must be locked.
*/ */
void page_add_anon_rmap(struct page *page, void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
if (atomic_inc_and_test(&page->_mapcount)) if (atomic_inc_and_test(&page->_mapcount))
__page_set_anon_rmap(page, vma, address); __page_set_anon_rmap(page, vma, address);
/* else checking page index and mapping is racy */ else
__page_check_anon_rmap(page, vma, address);
} }
/* /*
...@@ -553,10 +585,12 @@ void page_add_anon_rmap(struct page *page, ...@@ -553,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
* *
* Same as page_add_anon_rmap but must only be called on *new* pages. * Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed. * This means the inc-and-test can be bypassed.
* Page does not have to be locked.
*/ */
void page_add_new_anon_rmap(struct page *page, void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
__page_set_anon_rmap(page, vma, address); __page_set_anon_rmap(page, vma, address);
} }
...@@ -573,6 +607,26 @@ void page_add_file_rmap(struct page *page) ...@@ -573,6 +607,26 @@ void page_add_file_rmap(struct page *page)
__inc_zone_page_state(page, NR_FILE_MAPPED); __inc_zone_page_state(page, NR_FILE_MAPPED);
} }
#ifdef CONFIG_DEBUG_VM
/**
* page_dup_rmap - duplicate pte mapping to a page
* @page: the page to add the mapping to
*
* For copy_page_range only: minimal extract from page_add_file_rmap /
* page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
* quicker.
*
* The caller needs to hold the pte lock.
*/
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
{
BUG_ON(page_mapcount(page) == 0);
if (PageAnon(page))
__page_check_anon_rmap(page, vma, address);
atomic_inc(&page->_mapcount);
}
#endif
/** /**
* page_remove_rmap - take down pte mapping from a page * page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from * @page: page to remove mapping from
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment