Commit 62ee9018 authored by Hugh Dickins's avatar Hugh Dickins Committed by James Toy

CONFIG_CGROUP_MEM_RES_CTLR=y CONFIG_PREEMPT=y mmotm fails to boot:

Kernel panic - not syncing: No init found; after lots of scheduling
while atomics, starting from when async_thread does sd_probe_async.

mem_cgroup_soft_limit_check() was doing an unbalanced get_cpu(): don't
get_cpu if we won't need it, and put_cpu if we did get_cpu.

And fix the silliness of passing it an "over_soft_limit" argument
that just tells it to return false when false.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tested-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Reviewed-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reported-by: default avatarJiri Slaby <jirislaby@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fd9213af
...@@ -334,23 +334,21 @@ mem_cgroup_remove_exceeded(struct mem_cgroup *mem, ...@@ -334,23 +334,21 @@ mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
spin_unlock(&mctz->lock); spin_unlock(&mctz->lock);
} }
static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem, static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
bool over_soft_limit)
{ {
bool ret = false; bool ret = false;
int cpu = get_cpu(); int cpu;
s64 val; s64 val;
struct mem_cgroup_stat_cpu *cpustat; struct mem_cgroup_stat_cpu *cpustat;
if (!over_soft_limit) cpu = get_cpu();
return ret;
cpustat = &mem->stat.cpustat[cpu]; cpustat = &mem->stat.cpustat[cpu];
val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS); val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS);
if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) { if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) {
__mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS); __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS);
ret = true; ret = true;
} }
put_cpu();
return ret; return ret;
} }
...@@ -1231,7 +1229,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, ...@@ -1231,7 +1229,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (soft_fail_res) { if (soft_fail_res) {
mem_over_soft_limit = mem_over_soft_limit =
mem_cgroup_from_res_counter(soft_fail_res, res); mem_cgroup_from_res_counter(soft_fail_res, res);
if (mem_cgroup_soft_limit_check(mem_over_soft_limit, true)) if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
mem_cgroup_update_tree(mem_over_soft_limit, page); mem_cgroup_update_tree(mem_over_soft_limit, page);
} }
return 0; return 0;
...@@ -1762,7 +1760,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) ...@@ -1762,7 +1760,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
if (mem_cgroup_soft_limit_check(mem, soft_limit_excess)) if (soft_limit_excess && mem_cgroup_soft_limit_check(mem))
mem_cgroup_update_tree(mem, page); mem_cgroup_update_tree(mem, page);
/* at swapout, this memcg will be accessed to record to swap */ /* at swapout, this memcg will be accessed to record to swap */
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment