Commit e43c3afb authored by Wu Fengguang's avatar Wu Fengguang Committed by Andi Kleen

HWPOISON: return early on non-LRU pages

Right now we have some trouble with non atomic access
to page flags when locking the page. To plug this hole
for now, limit error recovery to LRU pages for now.

This could be better fixed by defining a suitable protocol,
but let's go this simple way for now

This avoids unnecessary races with __set_page_locked() and
__SetPageSlab*() and maybe more non-atomic page flag operations.

This loses isolated pages which are currently in page reclaim, but these
are relatively limited compared to the total memory.
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
[AK: new description, bug fixes, cleanups]
parent f58ee00f
...@@ -370,9 +370,6 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) ...@@ -370,9 +370,6 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
int ret = FAILED; int ret = FAILED;
struct address_space *mapping; struct address_space *mapping;
if (!isolate_lru_page(p))
page_cache_release(p);
/* /*
* For anonymous pages we're done the only reference left * For anonymous pages we're done the only reference left
* should be the one m_f() holds. * should be the one m_f() holds.
...@@ -498,30 +495,18 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn) ...@@ -498,30 +495,18 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn)
*/ */
static int me_swapcache_dirty(struct page *p, unsigned long pfn) static int me_swapcache_dirty(struct page *p, unsigned long pfn)
{ {
int ret = FAILED;
ClearPageDirty(p); ClearPageDirty(p);
/* Trigger EIO in shmem: */ /* Trigger EIO in shmem: */
ClearPageUptodate(p); ClearPageUptodate(p);
if (!isolate_lru_page(p)) { return DELAYED;
page_cache_release(p);
ret = DELAYED;
}
return ret;
} }
static int me_swapcache_clean(struct page *p, unsigned long pfn) static int me_swapcache_clean(struct page *p, unsigned long pfn)
{ {
int ret = FAILED;
if (!isolate_lru_page(p)) {
page_cache_release(p);
ret = RECOVERED;
}
delete_from_swap_cache(p); delete_from_swap_cache(p);
return ret;
return RECOVERED;
} }
/* /*
...@@ -611,8 +596,6 @@ static struct page_state { ...@@ -611,8 +596,6 @@ static struct page_state {
{ 0, 0, "unknown page state", me_unknown }, { 0, 0, "unknown page state", me_unknown },
}; };
#undef lru
static void action_result(unsigned long pfn, char *msg, int result) static void action_result(unsigned long pfn, char *msg, int result)
{ {
struct page *page = NULL; struct page *page = NULL;
...@@ -664,9 +647,6 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -664,9 +647,6 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (PageReserved(p) || PageCompound(p) || PageSlab(p)) if (PageReserved(p) || PageCompound(p) || PageSlab(p))
return; return;
if (!PageLRU(p))
lru_add_drain_all();
/* /*
* This check implies we don't kill processes if their pages * This check implies we don't kill processes if their pages
* are in the swap cache early. Those are always late kills. * are in the swap cache early. Those are always late kills.
...@@ -738,6 +718,7 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -738,6 +718,7 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
int __memory_failure(unsigned long pfn, int trapno, int ref) int __memory_failure(unsigned long pfn, int trapno, int ref)
{ {
unsigned long lru_flag;
struct page_state *ps; struct page_state *ps;
struct page *p; struct page *p;
int res; int res;
...@@ -774,6 +755,24 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) ...@@ -774,6 +755,24 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
return PageBuddy(compound_head(p)) ? 0 : -EBUSY; return PageBuddy(compound_head(p)) ? 0 : -EBUSY;
} }
/*
* We ignore non-LRU pages for good reasons.
* - PG_locked is only well defined for LRU pages and a few others
* - to avoid races with __set_page_locked()
* - to avoid races with __SetPageSlab*() (and more non-atomic ops)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
if (!PageLRU(p))
lru_add_drain_all();
lru_flag = p->flags & lru;
if (isolate_lru_page(p)) {
action_result(pfn, "non LRU", IGNORED);
put_page(p);
return -EBUSY;
}
page_cache_release(p);
/* /*
* Lock the page and wait for writeback to finish. * Lock the page and wait for writeback to finish.
* It's very difficult to mess with pages currently under IO * It's very difficult to mess with pages currently under IO
...@@ -790,7 +789,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) ...@@ -790,7 +789,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
/* /*
* Torn down by someone else? * Torn down by someone else?
*/ */
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { if ((lru_flag & lru) && !PageSwapCache(p) && p->mapping == NULL) {
action_result(pfn, "already truncated LRU", IGNORED); action_result(pfn, "already truncated LRU", IGNORED);
res = 0; res = 0;
goto out; goto out;
...@@ -798,7 +797,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref) ...@@ -798,7 +797,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
res = -EBUSY; res = -EBUSY;
for (ps = error_states;; ps++) { for (ps = error_states;; ps++) {
if ((p->flags & ps->mask) == ps->res) { if (((p->flags | lru_flag)& ps->mask) == ps->res) {
res = page_action(ps, p, pfn, ref); res = page_action(ps, p, pfn, ref);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment