Commit 8fe23e05 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

mm: clear node in N_HIGH_MEMORY and stop kswapd when all memory is offlined

When memory is hot-removed, its node must be cleared in N_HIGH_MEMORY if
there are no present pages left.

In such a situation, kswapd must also be stopped since it has nothing left
to do.
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarLee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Rik van Riel <riel@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@canonical.com>
Cc: Eric Whitney <eric.whitney@hp.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9b5e5d0f
...@@ -273,6 +273,7 @@ extern int scan_unevictable_register_node(struct node *node); ...@@ -273,6 +273,7 @@ extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node);
extern int kswapd_run(int nid); extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* linux/mm/shmem.c */ /* linux/mm/shmem.c */
......
...@@ -853,6 +853,10 @@ repeat: ...@@ -853,6 +853,10 @@ repeat:
setup_per_zone_wmarks(); setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone); calculate_zone_inactive_ratio(zone);
if (!node_present_pages(node)) {
node_clear_state(node, N_HIGH_MEMORY);
kswapd_stop(node);
}
vm_total_pages = nr_free_pagecache_pages(); vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit(); writeback_set_ratelimit();
......
...@@ -2173,6 +2173,7 @@ static int kswapd(void *p) ...@@ -2173,6 +2173,7 @@ static int kswapd(void *p)
order = 0; order = 0;
for ( ; ; ) { for ( ; ; ) {
unsigned long new_order; unsigned long new_order;
int ret;
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
new_order = pgdat->kswapd_max_order; new_order = pgdat->kswapd_max_order;
...@@ -2184,20 +2185,24 @@ static int kswapd(void *p) ...@@ -2184,20 +2185,24 @@ static int kswapd(void *p)
*/ */
order = new_order; order = new_order;
} else { } else {
if (!freezing(current)) if (!freezing(current) && !kthread_should_stop())
schedule(); schedule();
order = pgdat->kswapd_max_order; order = pgdat->kswapd_max_order;
} }
finish_wait(&pgdat->kswapd_wait, &wait); finish_wait(&pgdat->kswapd_wait, &wait);
if (!try_to_freeze()) { ret = try_to_freeze();
/* We can speed up thawing tasks if we don't call if (kthread_should_stop())
* balance_pgdat after returning from the refrigerator break;
/*
* We can speed up thawing tasks if we don't call balance_pgdat
* after returning from the refrigerator
*/ */
if (!ret)
balance_pgdat(pgdat, order); balance_pgdat(pgdat, order);
} }
}
return 0; return 0;
} }
...@@ -2451,6 +2456,17 @@ int kswapd_run(int nid) ...@@ -2451,6 +2456,17 @@ int kswapd_run(int nid)
return ret; return ret;
} }
/*
* Called by memory hotplug when all memory in a node is offlined.
*/
void kswapd_stop(int nid)
{
struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
if (kswapd)
kthread_stop(kswapd);
}
static int __init kswapd_init(void) static int __init kswapd_init(void)
{ {
int nid; int nid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment