Commit 816d8b98 authored by Hugh Dickins's avatar Hugh Dickins Committed by james toy

KSM swapping will know where page_referenced_one() and try_to_unmap_one()

should look.  It could hack page->index to get them to do what it wants,
but it seems cleaner now to pass the address down to them.

Make the same change to page_mkclean_one(), since it follows the same
pattern; but there's no real need in its case.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3c40c0f6
...@@ -336,21 +336,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) ...@@ -336,21 +336,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
* Subfunctions of page_referenced: page_referenced_one called * Subfunctions of page_referenced: page_referenced_one called
* repeatedly from either page_referenced_anon or page_referenced_file. * repeatedly from either page_referenced_anon or page_referenced_file.
*/ */
static int page_referenced_one(struct page *page, static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
struct vm_area_struct *vma, unsigned long address, unsigned int *mapcount,
unsigned int *mapcount,
unsigned long *vm_flags) unsigned long *vm_flags)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
int referenced = 0; int referenced = 0;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
pte = page_check_address(page, mm, address, &ptl, 0); pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte) if (!pte)
goto out; goto out;
...@@ -409,6 +403,9 @@ static int page_referenced_anon(struct page *page, ...@@ -409,6 +403,9 @@ static int page_referenced_anon(struct page *page,
mapcount = page_mapcount(page); mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
/* /*
* If we are reclaiming on behalf of a cgroup, skip * If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different * counting on behalf of references from different
...@@ -416,7 +413,7 @@ static int page_referenced_anon(struct page *page, ...@@ -416,7 +413,7 @@ static int page_referenced_anon(struct page *page,
*/ */
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue; continue;
referenced += page_referenced_one(page, vma, referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags); &mapcount, vm_flags);
if (!mapcount) if (!mapcount)
break; break;
...@@ -474,6 +471,9 @@ static int page_referenced_file(struct page *page, ...@@ -474,6 +471,9 @@ static int page_referenced_file(struct page *page,
mapcount = page_mapcount(page); mapcount = page_mapcount(page);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
/* /*
* If we are reclaiming on behalf of a cgroup, skip * If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different * counting on behalf of references from different
...@@ -481,7 +481,7 @@ static int page_referenced_file(struct page *page, ...@@ -481,7 +481,7 @@ static int page_referenced_file(struct page *page,
*/ */
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue; continue;
referenced += page_referenced_one(page, vma, referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags); &mapcount, vm_flags);
if (!mapcount) if (!mapcount)
break; break;
...@@ -535,18 +535,14 @@ int page_referenced(struct page *page, ...@@ -535,18 +535,14 @@ int page_referenced(struct page *page,
return referenced; return referenced;
} }
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
int ret = 0; int ret = 0;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
pte = page_check_address(page, mm, address, &ptl, 1); pte = page_check_address(page, mm, address, &ptl, 1);
if (!pte) if (!pte)
goto out; goto out;
...@@ -578,8 +574,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) ...@@ -578,8 +574,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
if (vma->vm_flags & VM_SHARED) if (vma->vm_flags & VM_SHARED) {
ret += page_mkclean_one(page, vma); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret += page_mkclean_one(page, vma, address);
}
} }
spin_unlock(&mapping->i_mmap_lock); spin_unlock(&mapping->i_mmap_lock);
return ret; return ret;
...@@ -761,19 +761,14 @@ void page_remove_rmap(struct page *page) ...@@ -761,19 +761,14 @@ void page_remove_rmap(struct page *page)
* repeatedly from either try_to_unmap_anon or try_to_unmap_file. * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/ */
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
enum ttu_flags flags) unsigned long address, enum ttu_flags flags)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *pte; pte_t *pte;
pte_t pteval; pte_t pteval;
spinlock_t *ptl; spinlock_t *ptl;
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
pte = page_check_address(page, mm, address, &ptl, 0); pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte) if (!pte)
goto out; goto out;
...@@ -1018,7 +1013,10 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) ...@@ -1018,7 +1013,10 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
return ret; return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
ret = try_to_unmap_one(page, vma, flags); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = try_to_unmap_one(page, vma, address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
break; break;
} }
...@@ -1056,7 +1054,10 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1056,7 +1054,10 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
ret = try_to_unmap_one(page, vma, flags); unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = try_to_unmap_one(page, vma, address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment