Commit 8dc04efb authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] mm: de-skew page refcounting

atomic_add_unless (atomic_inc_not_zero) no longer requires an offset refcount
to function correctly.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7c8ee9a8
...@@ -286,15 +286,6 @@ struct page { ...@@ -286,15 +286,6 @@ struct page {
* *
* Also, many kernel routines increase the page count before a critical * Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them. * routine so they can be sure the page doesn't go away from under them.
*
* Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
* can use atomic_add_negative(-1, page->_count) to detect when the page
* becomes free and so that we can also use atomic_inc_and_test to atomically
* detect when we just tried to grab a ref on a page which some other CPU has
* already deemed to be freeable.
*
* NO code should make assumptions about this internal detail! Use the provided
* macros which retain the old rules: page_count(page) == 0 is a free page.
*/ */
/* /*
...@@ -303,8 +294,8 @@ struct page { ...@@ -303,8 +294,8 @@ struct page {
*/ */
static inline int put_page_testzero(struct page *page) static inline int put_page_testzero(struct page *page)
{ {
BUG_ON(atomic_read(&page->_count) == -1); BUG_ON(atomic_read(&page->_count) == 0);
return atomic_add_negative(-1, &page->_count); return atomic_dec_and_test(&page->_count);
} }
/* /*
...@@ -313,10 +304,10 @@ static inline int put_page_testzero(struct page *page) ...@@ -313,10 +304,10 @@ static inline int put_page_testzero(struct page *page)
*/ */
static inline int get_page_unless_zero(struct page *page) static inline int get_page_unless_zero(struct page *page)
{ {
return atomic_add_unless(&page->_count, 1, -1); return atomic_inc_not_zero(&page->_count);
} }
#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) #define set_page_count(p,v) atomic_set(&(p)->_count, (v))
#define __put_page(p) atomic_dec(&(p)->_count) #define __put_page(p) atomic_dec(&(p)->_count)
extern void FASTCALL(__page_cache_release(struct page *)); extern void FASTCALL(__page_cache_release(struct page *));
...@@ -325,7 +316,7 @@ static inline int page_count(struct page *page) ...@@ -325,7 +316,7 @@ static inline int page_count(struct page *page)
{ {
if (PageCompound(page)) if (PageCompound(page))
page = (struct page *)page_private(page); page = (struct page *)page_private(page);
return atomic_read(&page->_count) + 1; return atomic_read(&page->_count);
} }
static inline void get_page(struct page *page) static inline void get_page(struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment