Commit 427d5416 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

memcg: move_lists on page not page_cgroup

Each caller of mem_cgroup_move_lists is having to use page_get_page_cgroup:
it's more convenient if it acts upon the page itself not the page_cgroup; and
in a later patch this becomes important to handle within memcontrol.c.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bd845e38
...@@ -36,7 +36,7 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, ...@@ -36,7 +36,7 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask); gfp_t gfp_mask);
extern void mem_cgroup_uncharge(struct page_cgroup *pc); extern void mem_cgroup_uncharge(struct page_cgroup *pc);
extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active); extern void mem_cgroup_move_lists(struct page *page, bool active);
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst, struct list_head *dst,
unsigned long *scanned, int order, unsigned long *scanned, int order,
...@@ -106,8 +106,7 @@ static inline void mem_cgroup_uncharge_page(struct page *page) ...@@ -106,8 +106,7 @@ static inline void mem_cgroup_uncharge_page(struct page *page)
{ {
} }
static inline void mem_cgroup_move_lists(struct page_cgroup *pc, static inline void mem_cgroup_move_lists(struct page *page, bool active)
bool active)
{ {
} }
......
...@@ -407,11 +407,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) ...@@ -407,11 +407,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
/* /*
* This routine assumes that the appropriate zone's lru lock is already held * This routine assumes that the appropriate zone's lru lock is already held
*/ */
void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) void mem_cgroup_move_lists(struct page *page, bool active)
{ {
struct page_cgroup *pc;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
unsigned long flags; unsigned long flags;
pc = page_get_page_cgroup(page);
if (!pc) if (!pc)
return; return;
......
...@@ -176,7 +176,7 @@ void activate_page(struct page *page) ...@@ -176,7 +176,7 @@ void activate_page(struct page *page)
SetPageActive(page); SetPageActive(page);
add_page_to_active_list(zone, page); add_page_to_active_list(zone, page);
__count_vm_event(PGACTIVATE); __count_vm_event(PGACTIVATE);
mem_cgroup_move_lists(page_get_page_cgroup(page), true); mem_cgroup_move_lists(page, true);
} }
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
} }
......
...@@ -1128,7 +1128,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1128,7 +1128,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
ClearPageActive(page); ClearPageActive(page);
list_move(&page->lru, &zone->inactive_list); list_move(&page->lru, &zone->inactive_list);
mem_cgroup_move_lists(page_get_page_cgroup(page), false); mem_cgroup_move_lists(page, false);
pgmoved++; pgmoved++;
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_INACTIVE, pgmoved); __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
...@@ -1156,8 +1156,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1156,8 +1156,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
VM_BUG_ON(!PageActive(page)); VM_BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list); list_move(&page->lru, &zone->active_list);
mem_cgroup_move_lists(page_get_page_cgroup(page), true); mem_cgroup_move_lists(page, true);
pgmoved++; pgmoved++;
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved); __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment