Commit 2ae88149 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] mm: clean up pagecache allocation

- Consolidate page_cache_alloc

- Fix splice: only the pagecache pages and filesystem data need to use
  mapping_gfp_mask.

- Fix grab_cache_page_nowait: same as splice, also honour NUMA placement.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 858cbcdd
......@@ -74,7 +74,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
wait_on_page_writeback(page);
if (PagePrivate(page))
try_to_release_page(page, mapping_gfp_mask(mapping));
try_to_release_page(page, GFP_KERNEL);
/*
* If we succeeded in removing the mapping, set LRU flag
......@@ -333,7 +333,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
break;
error = add_to_page_cache_lru(page, mapping, index,
mapping_gfp_mask(mapping));
GFP_KERNEL);
if (unlikely(error)) {
page_cache_release(page);
if (error == -EEXIST)
......@@ -557,7 +557,6 @@ static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
{
struct file *file = sd->file;
struct address_space *mapping = file->f_mapping;
gfp_t gfp_mask = mapping_gfp_mask(mapping);
unsigned int offset, this_len;
struct page *page;
pgoff_t index;
......@@ -591,7 +590,7 @@ static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
goto find_page;
page = buf->page;
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
if (add_to_page_cache(page, mapping, index, GFP_KERNEL)) {
unlock_page(page);
goto find_page;
}
......@@ -613,7 +612,7 @@ find_page:
* This will also lock the page
*/
ret = add_to_page_cache_lru(page, mapping, index,
gfp_mask);
GFP_KERNEL);
if (unlikely(ret))
goto out;
}
......
......@@ -52,19 +52,23 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
void release_pages(struct page **pages, int nr, int cold);
#ifdef CONFIG_NUMA
extern struct page *page_cache_alloc(struct address_space *x);
extern struct page *page_cache_alloc_cold(struct address_space *x);
extern struct page *__page_cache_alloc(gfp_t gfp);
#else
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
return alloc_pages(gfp, 0);
}
#endif
static inline struct page *page_cache_alloc(struct address_space *x)
{
return alloc_pages(mapping_gfp_mask(x), 0);
return __page_cache_alloc(mapping_gfp_mask(x));
}
static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
}
#endif
typedef int filler_t(void *, struct page *);
......
......@@ -467,25 +467,15 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
}
#ifdef CONFIG_NUMA
struct page *page_cache_alloc(struct address_space *x)
struct page *__page_cache_alloc(gfp_t gfp)
{
if (cpuset_do_page_mem_spread()) {
int n = cpuset_mem_spread_node();
return alloc_pages_node(n, mapping_gfp_mask(x), 0);
return alloc_pages_node(n, gfp, 0);
}
return alloc_pages(mapping_gfp_mask(x), 0);
return alloc_pages(gfp, 0);
}
EXPORT_SYMBOL(page_cache_alloc);
struct page *page_cache_alloc_cold(struct address_space *x)
{
if (cpuset_do_page_mem_spread()) {
int n = cpuset_mem_spread_node();
return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
}
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
}
EXPORT_SYMBOL(page_cache_alloc_cold);
EXPORT_SYMBOL(__page_cache_alloc);
#endif
static int __sleep_on_page_lock(void *word)
......@@ -826,7 +816,6 @@ struct page *
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
gfp_t gfp_mask;
if (page) {
if (!TestSetPageLocked(page))
......@@ -834,9 +823,8 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
page_cache_release(page);
return NULL;
}
gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
page = alloc_pages(gfp_mask, 0);
if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
page_cache_release(page);
page = NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment