Commit 2903fb16 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] vmscan: skip reclaim_mapped determination if we do not swap

This puts the variables and the way to get to reclaim_mapped in one block.
And allows zone_reclaim or other things to skip the determination (maybe
this whole block of code does not belong into refill_inactive_zone()?)
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 072eaa5d
...@@ -1195,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) ...@@ -1195,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
struct page *page; struct page *page;
struct pagevec pvec; struct pagevec pvec;
int reclaim_mapped = 0; int reclaim_mapped = 0;
long mapped_ratio;
long distress; if (unlikely(sc->may_swap)) {
long swap_tendency; long mapped_ratio;
long distress;
long swap_tendency;
/*
* `distress' is a measure of how much trouble we're having
* reclaiming pages. 0 -> no problems. 100 -> great trouble.
*/
distress = 100 >> zone->prev_priority;
/*
* The point of this algorithm is to decide when to start
* reclaiming mapped memory instead of just pagecache. Work out
* how much memory
* is mapped.
*/
mapped_ratio = (sc->nr_mapped * 100) / total_memory;
/*
* Now decide how much we really want to unmap some pages. The
* mapped ratio is downgraded - just because there's a lot of
* mapped memory doesn't necessarily mean that page reclaim
* isn't succeeding.
*
* The distress ratio is important - we don't want to start
* going oom.
*
* A 100% value of vm_swappiness overrides this algorithm
* altogether.
*/
swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
/*
* Now use this metric to decide whether to start moving mapped
* memory onto the inactive list.
*/
if (swap_tendency >= 100)
reclaim_mapped = 1;
}
lru_add_drain(); lru_add_drain();
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
...@@ -1207,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) ...@@ -1207,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
zone->nr_active -= pgmoved; zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
/*
* `distress' is a measure of how much trouble we're having reclaiming
* pages. 0 -> no problems. 100 -> great trouble.
*/
distress = 100 >> zone->prev_priority;
/*
* The point of this algorithm is to decide when to start reclaiming
* mapped memory instead of just pagecache. Work out how much memory
* is mapped.
*/
mapped_ratio = (sc->nr_mapped * 100) / total_memory;
/*
* Now decide how much we really want to unmap some pages. The mapped
* ratio is downgraded - just because there's a lot of mapped memory
* doesn't necessarily mean that page reclaim isn't succeeding.
*
* The distress ratio is important - we don't want to start going oom.
*
* A 100% value of vm_swappiness overrides this algorithm altogether.
*/
swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
/*
* Now use this metric to decide whether to start moving mapped memory
* onto the inactive list.
*/
if (swap_tendency >= 100 && sc->may_swap)
reclaim_mapped = 1;
while (!list_empty(&l_hold)) { while (!list_empty(&l_hold)) {
cond_resched(); cond_resched();
page = lru_to_page(&l_hold); page = lru_to_page(&l_hold);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment