Commit 83aae4c7 authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds

memcg: cleanup cache_charge

Current mem_cgroup_cache_charge is a bit complicated especially
in the case of shmem's swap-in.

This patch cleans it up by using try_charge_swapin and commit_charge_swapin.
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 627991a2
...@@ -1238,6 +1238,10 @@ int mem_cgroup_newpage_charge(struct page *page, ...@@ -1238,6 +1238,10 @@ int mem_cgroup_newpage_charge(struct page *page,
MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
} }
static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype);
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
...@@ -1274,16 +1278,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, ...@@ -1274,16 +1278,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
} }
if (do_swap_account && PageSwapCache(page)) {
mem = try_get_mem_cgroup_from_swapcache(page);
if (mem)
mm = NULL;
else
mem = NULL;
/* SwapCache may be still linked to LRU now. */
mem_cgroup_lru_del_before_commit_swapcache(page);
}
if (unlikely(!mm && !mem)) if (unlikely(!mm && !mem))
mm = &init_mm; mm = &init_mm;
...@@ -1291,32 +1285,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, ...@@ -1291,32 +1285,16 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
return mem_cgroup_charge_common(page, mm, gfp_mask, return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
ret = mem_cgroup_charge_common(page, mm, gfp_mask, /* shmem */
MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); if (PageSwapCache(page)) {
if (mem) ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
css_put(&mem->css); if (!ret)
if (PageSwapCache(page)) __mem_cgroup_commit_charge_swapin(page, mem,
mem_cgroup_lru_add_after_commit_swapcache(page); MEM_CGROUP_CHARGE_TYPE_SHMEM);
} else
ret = mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
if (do_swap_account && !ret && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
unsigned short id;
/* avoid double counting */
id = swap_cgroup_record(ent, 0);
rcu_read_lock();
mem = mem_cgroup_lookup(id);
if (mem) {
/*
* We did swap-in. Then, this entry is doubly counted
* both in mem and memsw. We uncharge it, here.
* Recorded ID can be obsolete. We avoid calling
* css_tryget()
*/
res_counter_uncharge(&mem->memsw, PAGE_SIZE);
mem_cgroup_put(mem);
}
rcu_read_unlock();
}
return ret; return ret;
} }
...@@ -1359,7 +1337,9 @@ charge_cur_mm: ...@@ -1359,7 +1337,9 @@ charge_cur_mm:
return __mem_cgroup_try_charge(mm, mask, ptr, true); return __mem_cgroup_try_charge(mm, mask, ptr, true);
} }
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype)
{ {
struct page_cgroup *pc; struct page_cgroup *pc;
...@@ -1369,7 +1349,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) ...@@ -1369,7 +1349,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
return; return;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
mem_cgroup_lru_del_before_commit_swapcache(page); mem_cgroup_lru_del_before_commit_swapcache(page);
__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED); __mem_cgroup_commit_charge(ptr, pc, ctype);
mem_cgroup_lru_add_after_commit_swapcache(page); mem_cgroup_lru_add_after_commit_swapcache(page);
/* /*
* Now swap is on-memory. This means this page may be * Now swap is on-memory. This means this page may be
...@@ -1400,6 +1380,12 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) ...@@ -1400,6 +1380,12 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
} }
void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
{
__mem_cgroup_commit_charge_swapin(page, ptr,
MEM_CGROUP_CHARGE_TYPE_MAPPED);
}
void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
{ {
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment