Commit f85a4f2e authored by Hugh Dickins's avatar Hugh Dickins Committed by James Toy

Just as the swapoff system call allocates many pages of RAM to various

processes, perhaps triggering OOM, so "echo 2 >/sys/kernel/mm/ksm/run"
(unmerge) is liable to allocate many pages of RAM to various processes,
perhaps triggering OOM; and each is normally run from a modest admin
process (swapoff or shell), easily repeated until it succeeds.

So treat unmerge_and_remove_all_rmap_items() in the same way that we treat
try_to_unuse(): generalize PF_SWAPOFF to PF_OOM_ORIGIN, and bracket both
with that, to ask the OOM killer to kill them first, to prevent them from
spawning more and more OOM kills.
Signed-off-by: default avatarHugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: default avatarIzik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7a26d623
...@@ -1723,7 +1723,7 @@ extern cputime_t task_gtime(struct task_struct *p); ...@@ -1723,7 +1723,7 @@ extern cputime_t task_gtime(struct task_struct *p);
#define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */ #define PF_KSWAPD 0x00040000 /* I am kswapd */
#define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
......
...@@ -1564,7 +1564,9 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -1564,7 +1564,9 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
if (ksm_run != flags) { if (ksm_run != flags) {
ksm_run = flags; ksm_run = flags;
if (flags & KSM_RUN_UNMERGE) { if (flags & KSM_RUN_UNMERGE) {
current->flags |= PF_OOM_ORIGIN;
err = unmerge_and_remove_all_rmap_items(); err = unmerge_and_remove_all_rmap_items();
current->flags &= ~PF_OOM_ORIGIN;
if (err) { if (err) {
ksm_run = KSM_RUN_STOP; ksm_run = KSM_RUN_STOP;
count = err; count = err;
......
...@@ -79,7 +79,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) ...@@ -79,7 +79,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/* /*
* swapoff can easily use up all memory, so kill those first. * swapoff can easily use up all memory, so kill those first.
*/ */
if (p->flags & PF_SWAPOFF) if (p->flags & PF_OOM_ORIGIN)
return ULONG_MAX; return ULONG_MAX;
/* /*
......
...@@ -1573,9 +1573,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) ...@@ -1573,9 +1573,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->flags &= ~SWP_WRITEOK; p->flags &= ~SWP_WRITEOK;
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
current->flags |= PF_SWAPOFF; current->flags |= PF_OOM_ORIGIN;
err = try_to_unuse(type); err = try_to_unuse(type);
current->flags &= ~PF_SWAPOFF; current->flags &= ~PF_OOM_ORIGIN;
if (err) { if (err) {
/* re-insert swap space back into swap_list */ /* re-insert swap space back into swap_list */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment