Commit 4294621f authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] mm: rss = file_rss + anon_rss

I was lazy when we added anon_rss, and chose to change as few places as
possible.  So currently each anonymous page has to be counted twice, in rss
and in anon_rss.  Which won't be so good if those are atomic counts in some
configurations.

Change that around: keep file_rss and anon_rss separately, and add them
together (with get_mm_rss macro) when the total is needed - reading two
atomics is much cheaper than updating two atomics.  And update anon_rss
upfront, typically in memory.c, not tucked away in page_add_anon_rmap.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 404351e6
...@@ -330,7 +330,7 @@ void install_arg_page(struct vm_area_struct *vma, ...@@ -330,7 +330,7 @@ void install_arg_page(struct vm_area_struct *vma,
pte_unmap(pte); pte_unmap(pte);
goto out; goto out;
} }
inc_mm_counter(mm, rss); inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page); lru_cache_add_active(page);
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot)))); page, vma->vm_page_prot))));
......
...@@ -438,7 +438,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) ...@@ -438,7 +438,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
jiffies_to_clock_t(it_real_value), jiffies_to_clock_t(it_real_value),
start_time, start_time,
vsize, vsize,
mm ? get_mm_counter(mm, rss) : 0, /* you might want to shift this left 3 */ mm ? get_mm_rss(mm) : 0,
rsslim, rsslim,
mm ? mm->start_code : 0, mm ? mm->start_code : 0,
mm ? mm->end_code : 0, mm ? mm->end_code : 0,
......
...@@ -29,7 +29,7 @@ char *task_mem(struct mm_struct *mm, char *buffer) ...@@ -29,7 +29,7 @@ char *task_mem(struct mm_struct *mm, char *buffer)
"VmPTE:\t%8lu kB\n", "VmPTE:\t%8lu kB\n",
(mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), (mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10), mm->locked_vm << (PAGE_SHIFT-10),
get_mm_counter(mm, rss) << (PAGE_SHIFT-10), get_mm_rss(mm) << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10), data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib, mm->stack_vm << (PAGE_SHIFT-10), text, lib,
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
...@@ -44,13 +44,11 @@ unsigned long task_vsize(struct mm_struct *mm) ...@@ -44,13 +44,11 @@ unsigned long task_vsize(struct mm_struct *mm)
int task_statm(struct mm_struct *mm, int *shared, int *text, int task_statm(struct mm_struct *mm, int *shared, int *text,
int *data, int *resident) int *data, int *resident)
{ {
int rss = get_mm_counter(mm, rss); *shared = get_mm_counter(mm, file_rss);
*shared = rss - get_mm_counter(mm, anon_rss);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT; >> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm; *data = mm->total_vm - mm->shared_vm;
*resident = rss; *resident = *shared + get_mm_counter(mm, anon_rss);
return mm->total_vm; return mm->total_vm;
} }
......
...@@ -254,6 +254,8 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); ...@@ -254,6 +254,8 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define add_mm_counter(mm, member, value) (mm)->_##member += (value) #define add_mm_counter(mm, member, value) (mm)->_##member += (value)
#define inc_mm_counter(mm, member) (mm)->_##member++ #define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member-- #define dec_mm_counter(mm, member) (mm)->_##member--
#define get_mm_rss(mm) ((mm)->_file_rss + (mm)->_anon_rss)
typedef unsigned long mm_counter_t; typedef unsigned long mm_counter_t;
struct mm_struct { struct mm_struct {
...@@ -286,7 +288,7 @@ struct mm_struct { ...@@ -286,7 +288,7 @@ struct mm_struct {
unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes; unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes;
/* Special counters protected by the page_table_lock */ /* Special counters protected by the page_table_lock */
mm_counter_t _rss; mm_counter_t _file_rss;
mm_counter_t _anon_rss; mm_counter_t _anon_rss;
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
......
...@@ -553,7 +553,7 @@ void acct_update_integrals(struct task_struct *tsk) ...@@ -553,7 +553,7 @@ void acct_update_integrals(struct task_struct *tsk)
if (delta == 0) if (delta == 0)
return; return;
tsk->acct_stimexpd = tsk->stime; tsk->acct_stimexpd = tsk->stime;
tsk->acct_rss_mem1 += delta * get_mm_counter(tsk->mm, rss); tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
} }
} }
......
...@@ -321,7 +321,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm) ...@@ -321,7 +321,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
INIT_LIST_HEAD(&mm->mmlist); INIT_LIST_HEAD(&mm->mmlist);
mm->core_waiters = 0; mm->core_waiters = 0;
mm->nr_ptes = 0; mm->nr_ptes = 0;
set_mm_counter(mm, rss, 0); set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0); set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->page_table_lock);
rwlock_init(&mm->ioctx_list_lock); rwlock_init(&mm->ioctx_list_lock);
...@@ -499,7 +499,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) ...@@ -499,7 +499,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
if (retval) if (retval)
goto free_pt; goto free_pt;
mm->hiwater_rss = get_mm_counter(mm,rss); mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm; mm->hiwater_vm = mm->total_vm;
good_mm: good_mm:
......
...@@ -39,7 +39,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -39,7 +39,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
set_page_dirty(page); set_page_dirty(page);
page_remove_rmap(page); page_remove_rmap(page);
page_cache_release(page); page_cache_release(page);
dec_mm_counter(mm, rss); dec_mm_counter(mm, file_rss);
} }
} }
} else { } else {
...@@ -95,7 +95,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -95,7 +95,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
zap_pte(mm, vma, addr, pte); zap_pte(mm, vma, addr, pte);
inc_mm_counter(mm,rss); inc_mm_counter(mm, file_rss);
flush_icache_page(vma, page); flush_icache_page(vma, page);
set_pte_at(mm, addr, pte, mk_pte(page, prot)); set_pte_at(mm, addr, pte, mk_pte(page, prot));
page_add_file_rmap(page); page_add_file_rmap(page);
......
...@@ -286,7 +286,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -286,7 +286,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
entry = *src_pte; entry = *src_pte;
ptepage = pte_page(entry); ptepage = pte_page(entry);
get_page(ptepage); get_page(ptepage);
add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
set_huge_pte_at(dst, addr, dst_pte, entry); set_huge_pte_at(dst, addr, dst_pte, entry);
} }
spin_unlock(&src->page_table_lock); spin_unlock(&src->page_table_lock);
...@@ -324,7 +324,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, ...@@ -324,7 +324,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
page = pte_page(pte); page = pte_page(pte);
put_page(page); put_page(page);
add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE)); add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
} }
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
} }
...@@ -386,7 +386,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -386,7 +386,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
} }
add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page)); set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page));
} }
out: out:
......
...@@ -397,9 +397,10 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -397,9 +397,10 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte = pte_mkclean(pte); pte = pte_mkclean(pte);
pte = pte_mkold(pte); pte = pte_mkold(pte);
get_page(page); get_page(page);
inc_mm_counter(dst_mm, rss);
if (PageAnon(page)) if (PageAnon(page))
inc_mm_counter(dst_mm, anon_rss); inc_mm_counter(dst_mm, anon_rss);
else
inc_mm_counter(dst_mm, file_rss);
set_pte_at(dst_mm, addr, dst_pte, pte); set_pte_at(dst_mm, addr, dst_pte, pte);
page_dup_rmap(page); page_dup_rmap(page);
} }
...@@ -581,8 +582,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -581,8 +582,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
set_page_dirty(page); set_page_dirty(page);
if (pte_young(ptent)) if (pte_young(ptent))
mark_page_accessed(page); mark_page_accessed(page);
dec_mm_counter(tlb->mm, file_rss);
} }
dec_mm_counter(tlb->mm, rss);
page_remove_rmap(page); page_remove_rmap(page);
tlb_remove_page(tlb, page); tlb_remove_page(tlb, page);
continue; continue;
...@@ -1290,13 +1291,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1290,13 +1291,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address); page_table = pte_offset_map(pmd, address);
if (likely(pte_same(*page_table, orig_pte))) { if (likely(pte_same(*page_table, orig_pte))) {
if (PageAnon(old_page))
dec_mm_counter(mm, anon_rss);
if (PageReserved(old_page)) if (PageReserved(old_page))
inc_mm_counter(mm, rss); inc_mm_counter(mm, anon_rss);
else else {
page_remove_rmap(old_page); page_remove_rmap(old_page);
if (!PageAnon(old_page)) {
inc_mm_counter(mm, anon_rss);
dec_mm_counter(mm, file_rss);
}
}
flush_cache_page(vma, address, pfn); flush_cache_page(vma, address, pfn);
entry = mk_pte(new_page, vma->vm_page_prot); entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
...@@ -1701,7 +1704,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1701,7 +1704,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* The page isn't present yet, go ahead with the fault. */ /* The page isn't present yet, go ahead with the fault. */
inc_mm_counter(mm, rss); inc_mm_counter(mm, anon_rss);
pte = mk_pte(page, vma->vm_page_prot); pte = mk_pte(page, vma->vm_page_prot);
if (write_access && can_share_swap_page(page)) { if (write_access && can_share_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma); pte = maybe_mkwrite(pte_mkdirty(pte), vma);
...@@ -1774,7 +1777,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1774,7 +1777,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(page); page_cache_release(page);
goto unlock; goto unlock;
} }
inc_mm_counter(mm, rss); inc_mm_counter(mm, anon_rss);
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
lru_cache_add_active(page); lru_cache_add_active(page);
...@@ -1887,19 +1890,19 @@ retry: ...@@ -1887,19 +1890,19 @@ retry:
*/ */
/* Only go through if we didn't race with anybody else... */ /* Only go through if we didn't race with anybody else... */
if (pte_none(*page_table)) { if (pte_none(*page_table)) {
if (!PageReserved(new_page))
inc_mm_counter(mm, rss);
flush_icache_page(vma, new_page); flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot); entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access) if (write_access)
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
set_pte_at(mm, address, page_table, entry); set_pte_at(mm, address, page_table, entry);
if (anon) { if (anon) {
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page); lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address); page_add_anon_rmap(new_page, vma, address);
} else } else if (!PageReserved(new_page)) {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page); page_add_file_rmap(new_page);
}
} else { } else {
/* One of our sibling threads was faster, back out. */ /* One of our sibling threads was faster, back out. */
page_cache_release(new_page); page_cache_release(new_page);
...@@ -2192,7 +2195,7 @@ EXPORT_SYMBOL(vmalloc_to_pfn); ...@@ -2192,7 +2195,7 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
void update_mem_hiwater(struct task_struct *tsk) void update_mem_hiwater(struct task_struct *tsk)
{ {
if (tsk->mm) { if (tsk->mm) {
unsigned long rss = get_mm_counter(tsk->mm, rss); unsigned long rss = get_mm_rss(tsk->mm);
if (tsk->mm->hiwater_rss < rss) if (tsk->mm->hiwater_rss < rss)
tsk->mm->hiwater_rss = rss; tsk->mm->hiwater_rss = rss;
......
...@@ -1083,7 +1083,7 @@ void update_mem_hiwater(struct task_struct *tsk) ...@@ -1083,7 +1083,7 @@ void update_mem_hiwater(struct task_struct *tsk)
unsigned long rss; unsigned long rss;
if (likely(tsk->mm)) { if (likely(tsk->mm)) {
rss = get_mm_counter(tsk->mm, rss); rss = get_mm_rss(tsk->mm);
if (tsk->mm->hiwater_rss < rss) if (tsk->mm->hiwater_rss < rss)
tsk->mm->hiwater_rss = rss; tsk->mm->hiwater_rss = rss;
if (tsk->mm->hiwater_vm < tsk->mm->total_vm) if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
......
...@@ -445,8 +445,6 @@ void page_add_anon_rmap(struct page *page, ...@@ -445,8 +445,6 @@ void page_add_anon_rmap(struct page *page,
{ {
BUG_ON(PageReserved(page)); BUG_ON(PageReserved(page));
inc_mm_counter(vma->vm_mm, anon_rss);
if (atomic_inc_and_test(&page->_mapcount)) { if (atomic_inc_and_test(&page->_mapcount)) {
struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma *anon_vma = vma->anon_vma;
...@@ -561,9 +559,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) ...@@ -561,9 +559,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte)); BUG_ON(pte_file(*pte));
dec_mm_counter(mm, anon_rss); dec_mm_counter(mm, anon_rss);
} } else
dec_mm_counter(mm, file_rss);
dec_mm_counter(mm, rss);
page_remove_rmap(page); page_remove_rmap(page);
page_cache_release(page); page_cache_release(page);
...@@ -667,7 +665,7 @@ static void try_to_unmap_cluster(unsigned long cursor, ...@@ -667,7 +665,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
page_remove_rmap(page); page_remove_rmap(page);
page_cache_release(page); page_cache_release(page);
dec_mm_counter(mm, rss); dec_mm_counter(mm, file_rss);
(*mapcount)--; (*mapcount)--;
} }
......
...@@ -407,7 +407,7 @@ void free_swap_and_cache(swp_entry_t entry) ...@@ -407,7 +407,7 @@ void free_swap_and_cache(swp_entry_t entry)
static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
unsigned long addr, swp_entry_t entry, struct page *page) unsigned long addr, swp_entry_t entry, struct page *page)
{ {
inc_mm_counter(vma->vm_mm, rss); inc_mm_counter(vma->vm_mm, anon_rss);
get_page(page); get_page(page);
set_pte_at(vma->vm_mm, addr, pte, set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot))); pte_mkold(mk_pte(page, vma->vm_page_prot)));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment