Commit 327c0e96 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

vmscan: fix it to take care of nodemask

try_to_free_pages() is used for the direct reclaim of up to
SWAP_CLUSTER_MAX pages when watermarks are low.  The caller to
alloc_pages_nodemask() can specify a nodemask of nodes that are allowed to
be used but this is not passed to try_to_free_pages().  This can lead to
unnecessary reclaim of pages that are unusable by the caller and int the
worst case lead to allocation failure as progress was not been make where
it is needed.

This patch passes the nodemask used for alloc_pages_nodemask() to
try_to_free_pages().
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2678958e
...@@ -290,7 +290,7 @@ static void free_more_memory(void) ...@@ -290,7 +290,7 @@ static void free_more_memory(void)
&zone); &zone);
if (zone) if (zone)
try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
GFP_NOFS); GFP_NOFS, NULL);
} }
} }
......
...@@ -212,7 +212,7 @@ static inline void lru_cache_add_active_file(struct page *page) ...@@ -212,7 +212,7 @@ static inline void lru_cache_add_active_file(struct page *page)
/* linux/mm/vmscan.c */ /* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask); gfp_t gfp_mask, nodemask_t *mask);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap, gfp_t gfp_mask, bool noswap,
unsigned int swappiness); unsigned int swappiness);
......
...@@ -1582,7 +1582,8 @@ nofail_alloc: ...@@ -1582,7 +1582,8 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0; reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state; p->reclaim_state = &reclaim_state;
did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); did_some_progress = try_to_free_pages(zonelist, order,
gfp_mask, nodemask);
p->reclaim_state = NULL; p->reclaim_state = NULL;
lockdep_clear_current_reclaim_state(); lockdep_clear_current_reclaim_state();
......
...@@ -78,6 +78,12 @@ struct scan_control { ...@@ -78,6 +78,12 @@ struct scan_control {
/* Which cgroup do we reclaim from */ /* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup; struct mem_cgroup *mem_cgroup;
/*
* Nodemask of nodes allowed by the caller. If NULL, all nodes
* are scanned.
*/
nodemask_t *nodemask;
/* Pluggable isolate pages callback */ /* Pluggable isolate pages callback */
unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst, unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
unsigned long *scanned, int order, int mode, unsigned long *scanned, int order, int mode,
...@@ -1538,7 +1544,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist, ...@@ -1538,7 +1544,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
struct zone *zone; struct zone *zone;
sc->all_unreclaimable = 1; sc->all_unreclaimable = 1;
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
sc->nodemask) {
if (!populated_zone(zone)) if (!populated_zone(zone))
continue; continue;
/* /*
...@@ -1683,7 +1690,7 @@ out: ...@@ -1683,7 +1690,7 @@ out:
} }
unsigned long try_to_free_pages(struct zonelist *zonelist, int order, unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask) gfp_t gfp_mask, nodemask_t *nodemask)
{ {
struct scan_control sc = { struct scan_control sc = {
.gfp_mask = gfp_mask, .gfp_mask = gfp_mask,
...@@ -1694,6 +1701,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, ...@@ -1694,6 +1701,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.order = order, .order = order,
.mem_cgroup = NULL, .mem_cgroup = NULL,
.isolate_pages = isolate_pages_global, .isolate_pages = isolate_pages_global,
.nodemask = nodemask,
}; };
return do_try_to_free_pages(zonelist, &sc); return do_try_to_free_pages(zonelist, &sc);
...@@ -1714,6 +1722,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, ...@@ -1714,6 +1722,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.order = 0, .order = 0,
.mem_cgroup = mem_cont, .mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages, .isolate_pages = mem_cgroup_isolate_pages,
.nodemask = NULL, /* we don't care the placement */
}; };
struct zonelist *zonelist; struct zonelist *zonelist;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment