Commit 63eb6b93 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

vmscan: let GFP_NOFS go to swap again

In the past, GFP_NOFS (but of course not GFP_NOIO) was allowed to reclaim
by writing to swap.  That got partially broken in 2.6.23, when may_enter_fs
initialization was moved up before the allocation of swap, so its
PageSwapCache test was failing the first time around,

Fix it by setting may_enter_fs when add_to_swap() succeeds with
__GFP_IO.  In fact, check __GFP_IO before calling add_to_swap():
allocating swap we're not ready to use just increases disk seeking.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bda8550d
...@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* Try to allocate it some swap space here. * Try to allocate it some swap space here.
*/ */
if (PageAnon(page) && !PageSwapCache(page)) { if (PageAnon(page) && !PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
switch (try_to_munlock(page)) { switch (try_to_munlock(page)) {
case SWAP_FAIL: /* shouldn't happen */ case SWAP_FAIL: /* shouldn't happen */
case SWAP_AGAIN: case SWAP_AGAIN:
...@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
} }
if (!add_to_swap(page, GFP_ATOMIC)) if (!add_to_swap(page, GFP_ATOMIC))
goto activate_locked; goto activate_locked;
may_enter_fs = 1;
} }
#endif /* CONFIG_SWAP */ #endif /* CONFIG_SWAP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment