Commit 981cbd03 authored by Hugh Dickins's avatar Hugh Dickins Committed by james toy

There's contorted mlock/munlock handling in try_to_unmap_anon() and

try_to_unmap_file(), which we'd prefer not to repeat for KSM swapping. 
Simplify it by moving it all down into try_to_unmap_one().

One thing is then lost, try_to_munlock()'s distinction between when no vma
holds the page mlocked, and when a vma does mlock it, but we could not get
mmap_sem to set the page flag.  But its only caller takes no interest in
that distinction (and is better testing SWAP_MLOCK anyway), so let's keep
the code simple and return SWAP_AGAIN for both cases.

try_to_unmap_file()'s TTU_MUNLOCK nonlinear handling was particularly
amusing: once unravelled, it turns out to have been choosing between two
different ways of doing the same nothing.  Ah, no, one way was actually
returning SWAP_FAIL when it meant to return SWAP_SUCCESS.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ed4047fa
...@@ -117,7 +117,7 @@ static void munlock_vma_page(struct page *page) ...@@ -117,7 +117,7 @@ static void munlock_vma_page(struct page *page)
/* /*
* did try_to_unlock() succeed or punt? * did try_to_unlock() succeed or punt?
*/ */
if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN) if (ret != SWAP_MLOCK)
count_vm_event(UNEVICTABLE_PGMUNLOCKED); count_vm_event(UNEVICTABLE_PGMUNLOCKED);
putback_lru_page(page); putback_lru_page(page);
......
...@@ -788,6 +788,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -788,6 +788,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
ret = SWAP_MLOCK; ret = SWAP_MLOCK;
goto out_unmap; goto out_unmap;
} }
if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK)
goto out_unmap;
} }
if (!(flags & TTU_IGNORE_ACCESS)) { if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address, pte)) { if (ptep_clear_flush_young_notify(vma, address, pte)) {
...@@ -853,12 +855,22 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -853,12 +855,22 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
} else } else
dec_mm_counter(mm, file_rss); dec_mm_counter(mm, file_rss);
page_remove_rmap(page); page_remove_rmap(page);
page_cache_release(page); page_cache_release(page);
out_unmap: out_unmap:
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
if (MLOCK_PAGES && ret == SWAP_MLOCK) {
ret = SWAP_AGAIN;
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
if (vma->vm_flags & VM_LOCKED) {
mlock_vma_page(page);
ret = SWAP_MLOCK;
}
up_read(&vma->vm_mm->mmap_sem);
}
}
out: out:
return ret; return ret;
} }
...@@ -980,23 +992,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, ...@@ -980,23 +992,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret; return ret;
} }
/*
* common handling for pages mapped in VM_LOCKED vmas
*/
static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
{
int mlocked = 0;
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
if (vma->vm_flags & VM_LOCKED) {
mlock_vma_page(page);
mlocked++; /* really mlocked the page */
}
up_read(&vma->vm_mm->mmap_sem);
}
return mlocked;
}
/** /**
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
* rmap method * rmap method
...@@ -1017,42 +1012,19 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) ...@@ -1017,42 +1012,19 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
{ {
struct anon_vma *anon_vma; struct anon_vma *anon_vma;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned int mlocked = 0;
int ret = SWAP_AGAIN; int ret = SWAP_AGAIN;
int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
if (MLOCK_PAGES && unlikely(unlock))
ret = SWAP_SUCCESS; /* default for try_to_munlock() */
anon_vma = page_lock_anon_vma(page); anon_vma = page_lock_anon_vma(page);
if (!anon_vma) if (!anon_vma)
return ret; return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
if (MLOCK_PAGES && unlikely(unlock)) {
if (!((vma->vm_flags & VM_LOCKED) &&
page_mapped_in_vma(page, vma)))
continue; /* must visit all unlocked vmas */
ret = SWAP_MLOCK; /* saw at least one mlocked vma */
} else {
ret = try_to_unmap_one(page, vma, flags); ret = try_to_unmap_one(page, vma, flags);
if (ret == SWAP_FAIL || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
break; break;
} }
if (ret == SWAP_MLOCK) {
mlocked = try_to_mlock_page(page, vma);
if (mlocked)
break; /* stop if actually mlocked page */
}
}
page_unlock_anon_vma(anon_vma); page_unlock_anon_vma(anon_vma);
if (mlocked)
ret = SWAP_MLOCK; /* actually mlocked the page */
else if (ret == SWAP_MLOCK)
ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
return ret; return ret;
} }
...@@ -1082,42 +1054,23 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1082,42 +1054,23 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
unsigned long max_nl_cursor = 0; unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0; unsigned long max_nl_size = 0;
unsigned int mapcount; unsigned int mapcount;
unsigned int mlocked = 0;
int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
if (MLOCK_PAGES && unlikely(unlock))
ret = SWAP_SUCCESS; /* default for try_to_munlock() */
spin_lock(&mapping->i_mmap_lock); spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
if (MLOCK_PAGES && unlikely(unlock)) {
if (!((vma->vm_flags & VM_LOCKED) &&
page_mapped_in_vma(page, vma)))
continue; /* must visit all vmas */
ret = SWAP_MLOCK;
} else {
ret = try_to_unmap_one(page, vma, flags); ret = try_to_unmap_one(page, vma, flags);
if (ret == SWAP_FAIL || !page_mapped(page)) if (ret != SWAP_AGAIN || !page_mapped(page))
goto out; goto out;
} }
if (ret == SWAP_MLOCK) {
mlocked = try_to_mlock_page(page, vma);
if (mlocked)
goto out; /* stop if actually mlocked page */
}
}
if (list_empty(&mapping->i_mmap_nonlinear)) if (list_empty(&mapping->i_mmap_nonlinear))
goto out; goto out;
/* We don't bother to try to find the munlocked page in nonlinears */
if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK)
goto out;
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) { shared.vm_set.list) {
if (MLOCK_PAGES && unlikely(unlock)) {
if (!(vma->vm_flags & VM_LOCKED))
continue; /* must visit all vmas */
ret = SWAP_MLOCK; /* leave mlocked == 0 */
goto out; /* no need to look further */
}
if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) && if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
(vma->vm_flags & VM_LOCKED)) (vma->vm_flags & VM_LOCKED))
continue; continue;
...@@ -1159,10 +1112,9 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1159,10 +1112,9 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
cursor = (unsigned long) vma->vm_private_data; cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor && while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) { cursor < vma->vm_end - vma->vm_start) {
ret = try_to_unmap_cluster(cursor, &mapcount, if (try_to_unmap_cluster(cursor, &mapcount,
vma, page); vma, page) == SWAP_MLOCK)
if (ret == SWAP_MLOCK) ret = SWAP_MLOCK;
mlocked = 2; /* to return below */
cursor += CLUSTER_SIZE; cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor; vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0) if ((int)mapcount <= 0)
...@@ -1183,10 +1135,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) ...@@ -1183,10 +1135,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
vma->vm_private_data = NULL; vma->vm_private_data = NULL;
out: out:
spin_unlock(&mapping->i_mmap_lock); spin_unlock(&mapping->i_mmap_lock);
if (mlocked)
ret = SWAP_MLOCK; /* actually mlocked the page */
else if (ret == SWAP_MLOCK)
ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
return ret; return ret;
} }
...@@ -1229,7 +1177,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) ...@@ -1229,7 +1177,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
* *
* Return values are: * Return values are:
* *
* SWAP_SUCCESS - no vma's holding page mlocked. * SWAP_AGAIN - no vma is holding page mlocked, or,
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
* SWAP_MLOCK - page is now mlocked. * SWAP_MLOCK - page is now mlocked.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment