Commit b85a96c0 authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds

memcg: memory swap controller: fix limit check

There are scatterd calls of res_counter_check_under_limit(), and most of
them don't take mem+swap accounting into account.

define mem_cgroup_check_under_limit() and avoid direct use of
res_counter_check_limit().
Reported-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f9717d28
...@@ -571,6 +571,18 @@ done: ...@@ -571,6 +571,18 @@ done:
return ret; return ret;
} }
static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
{
if (do_swap_account) {
if (res_counter_check_under_limit(&mem->res) &&
res_counter_check_under_limit(&mem->memsw))
return true;
} else
if (res_counter_check_under_limit(&mem->res))
return true;
return false;
}
/* /*
* Dance down the hierarchy if needed to reclaim memory. We remember the * Dance down the hierarchy if needed to reclaim memory. We remember the
* last child we reclaimed from, so that we don't end up penalizing * last child we reclaimed from, so that we don't end up penalizing
...@@ -592,7 +604,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ...@@ -592,7 +604,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
* have left. * have left.
*/ */
ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap); ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
if (res_counter_check_under_limit(&root_mem->res)) if (mem_cgroup_check_under_limit(root_mem))
return 0; return 0;
next_mem = mem_cgroup_get_first_node(root_mem); next_mem = mem_cgroup_get_first_node(root_mem);
...@@ -606,7 +618,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, ...@@ -606,7 +618,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
continue; continue;
} }
ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap); ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
if (res_counter_check_under_limit(&root_mem->res)) if (mem_cgroup_check_under_limit(root_mem))
return 0; return 0;
cgroup_lock(); cgroup_lock();
next_mem = mem_cgroup_get_next_node(next_mem, root_mem); next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
...@@ -709,12 +721,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -709,12 +721,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
* current usage of the cgroup before giving up * current usage of the cgroup before giving up
* *
*/ */
if (do_swap_account) { if (mem_cgroup_check_under_limit(mem_over_limit))
if (res_counter_check_under_limit(&mem_over_limit->res) && continue;
res_counter_check_under_limit(&mem_over_limit->memsw))
continue;
} else if (res_counter_check_under_limit(&mem_over_limit->res))
continue;
if (!nr_retries--) { if (!nr_retries--) {
if (oom) { if (oom) {
...@@ -1334,7 +1342,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) ...@@ -1334,7 +1342,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
do { do {
progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true); progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
progress += res_counter_check_under_limit(&mem->res); progress += mem_cgroup_check_under_limit(mem);
} while (!progress && --retry); } while (!progress && --retry);
css_put(&mem->css); css_put(&mem->css);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment