Commit 28ecb609 authored by Nick Piggin's avatar Nick Piggin Committed by Chris Mason

Btrfs: use add_to_page_cache_lru, use __page_cache_alloc

Pagecache pages should be allocated with __page_cache_alloc, so they
obey pagecache memory policies.

add_to_page_cache_lru is exported, so it should be used. Benefits over
using a private pagevec: neater code, 128 bytes fewer stack used, percpu
lru ordering is preserved, and finally don't need to flush pagevec
before returning so batching may be shared with other LRU insertions.

Signed-off-by: Nick Piggin <npiggin@suse.de>:
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 0cad8a11
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/bit_spinlock.h> #include <linux/bit_spinlock.h>
#include <linux/pagevec.h>
#include "compat.h" #include "compat.h"
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
...@@ -445,7 +444,6 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -445,7 +444,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
unsigned long nr_pages = 0; unsigned long nr_pages = 0;
struct extent_map *em; struct extent_map *em;
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct pagevec pvec;
struct extent_map_tree *em_tree; struct extent_map_tree *em_tree;
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 end; u64 end;
...@@ -461,7 +459,6 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -461,7 +459,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
pagevec_init(&pvec, 0);
while (last_offset < compressed_end) { while (last_offset < compressed_end) {
page_index = last_offset >> PAGE_CACHE_SHIFT; page_index = last_offset >> PAGE_CACHE_SHIFT;
...@@ -478,26 +475,17 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -478,26 +475,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
goto next; goto next;
} }
page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); page = __page_cache_alloc(mapping_gfp_mask(mapping) &
~__GFP_FS);
if (!page) if (!page)
break; break;
page->index = page_index; if (add_to_page_cache_lru(page, mapping, page_index,
/* GFP_NOFS)) {
* what we want to do here is call add_to_page_cache_lru,
* but that isn't exported, so we reproduce it here
*/
if (add_to_page_cache(page, mapping,
page->index, GFP_NOFS)) {
page_cache_release(page); page_cache_release(page);
goto next; goto next;
} }
/* open coding of lru_cache_add, also not exported */
page_cache_get(page);
if (!pagevec_add(&pvec, page))
__pagevec_lru_add_file(&pvec);
end = last_offset + PAGE_CACHE_SIZE - 1; end = last_offset + PAGE_CACHE_SIZE - 1;
/* /*
* at this point, we have a locked page in the page cache * at this point, we have a locked page in the page cache
...@@ -551,8 +539,6 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -551,8 +539,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
next: next:
last_offset += PAGE_CACHE_SIZE; last_offset += PAGE_CACHE_SIZE;
} }
if (pagevec_count(&pvec))
__pagevec_lru_add_file(&pvec);
return 0; return 0;
} }
......
...@@ -2679,33 +2679,20 @@ int extent_readpages(struct extent_io_tree *tree, ...@@ -2679,33 +2679,20 @@ int extent_readpages(struct extent_io_tree *tree,
{ {
struct bio *bio = NULL; struct bio *bio = NULL;
unsigned page_idx; unsigned page_idx;
struct pagevec pvec;
unsigned long bio_flags = 0; unsigned long bio_flags = 0;
pagevec_init(&pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) { for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, lru); struct page *page = list_entry(pages->prev, struct page, lru);
prefetchw(&page->flags); prefetchw(&page->flags);
list_del(&page->lru); list_del(&page->lru);
/* if (!add_to_page_cache_lru(page, mapping,
* what we want to do here is call add_to_page_cache_lru,
* but that isn't exported, so we reproduce it here
*/
if (!add_to_page_cache(page, mapping,
page->index, GFP_KERNEL)) { page->index, GFP_KERNEL)) {
/* open coding of lru_cache_add, also not exported */
page_cache_get(page);
if (!pagevec_add(&pvec, page))
__pagevec_lru_add_file(&pvec);
__extent_read_full_page(tree, page, get_extent, __extent_read_full_page(tree, page, get_extent,
&bio, 0, &bio_flags); &bio, 0, &bio_flags);
} }
page_cache_release(page); page_cache_release(page);
} }
if (pagevec_count(&pvec))
__pagevec_lru_add_file(&pvec);
BUG_ON(!list_empty(pages)); BUG_ON(!list_empty(pages));
if (bio) if (bio)
submit_one_bio(READ, bio, 0, bio_flags); submit_one_bio(READ, bio, 0, bio_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment