Commit 0531b2aa authored by Linus Torvalds's avatar Linus Torvalds

mm: add new 'read_cache_page_gfp()' helper function

It's a simplified 'read_cache_page()' which takes a page allocation
flag, so that different paths can control how aggressive the memory
allocations are that populate a address space.

In particular, the intel GPU object mapping code wants to be able to do
a certain amount of own internal memory management by automatically
shrinking the address space when memory starts getting tight.  This
allows it to dynamically use different memory allocation policies on a
per-allocation basis, rather than depend on the (static) address space
gfp policy.

The actual new function is a one-liner, but re-organizing the helper
functions to the point where you can do this with a single line of code
is what most of the patch is all about.
Tested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent caf0801e
...@@ -253,6 +253,8 @@ extern struct page * read_cache_page_async(struct address_space *mapping, ...@@ -253,6 +253,8 @@ extern struct page * read_cache_page_async(struct address_space *mapping,
extern struct page * read_cache_page(struct address_space *mapping, extern struct page * read_cache_page(struct address_space *mapping,
pgoff_t index, filler_t *filler, pgoff_t index, filler_t *filler,
void *data); void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping, extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data); struct list_head *pages, filler_t *filler, void *data);
......
...@@ -1634,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap); ...@@ -1634,14 +1634,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct page *__read_cache_page(struct address_space *mapping, static struct page *__read_cache_page(struct address_space *mapping,
pgoff_t index, pgoff_t index,
int (*filler)(void *,struct page*), int (*filler)(void *,struct page*),
void *data) void *data,
gfp_t gfp)
{ {
struct page *page; struct page *page;
int err; int err;
repeat: repeat:
page = find_get_page(mapping, index); page = find_get_page(mapping, index);
if (!page) { if (!page) {
page = page_cache_alloc_cold(mapping); page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page) if (!page)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
...@@ -1661,31 +1662,18 @@ repeat: ...@@ -1661,31 +1662,18 @@ repeat:
return page; return page;
} }
/** static struct page *do_read_cache_page(struct address_space *mapping,
* read_cache_page_async - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: destination for read data
*
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page but don't wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index, pgoff_t index,
int (*filler)(void *,struct page*), int (*filler)(void *,struct page*),
void *data) void *data,
gfp_t gfp)
{ {
struct page *page; struct page *page;
int err; int err;
retry: retry:
page = __read_cache_page(mapping, index, filler, data); page = __read_cache_page(mapping, index, filler, data, gfp);
if (IS_ERR(page)) if (IS_ERR(page))
return page; return page;
if (PageUptodate(page)) if (PageUptodate(page))
...@@ -1710,38 +1698,86 @@ out: ...@@ -1710,38 +1698,86 @@ out:
mark_page_accessed(page); mark_page_accessed(page);
return page; return page;
} }
EXPORT_SYMBOL(read_cache_page_async);
/** /**
* read_cache_page - read into page cache, fill it if needed * read_cache_page_async - read into page cache, fill it if needed
* @mapping: the page's address_space * @mapping: the page's address_space
* @index: the page index * @index: the page index
* @filler: function to perform the read * @filler: function to perform the read
* @data: destination for read data * @data: destination for read data
* *
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
*
* Read into the page cache. If a page already exists, and PageUptodate() is * Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page then wait for it to become unlocked. * not set, try to fill the page but don't wait for it to become unlocked.
* *
* If the page does not get brought uptodate, return -EIO. * If the page does not get brought uptodate, return -EIO.
*/ */
struct page *read_cache_page(struct address_space *mapping, struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index, pgoff_t index,
int (*filler)(void *,struct page*), int (*filler)(void *,struct page*),
void *data) void *data)
{ {
struct page *page; return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_page_async);
page = read_cache_page_async(mapping, index, filler, data); static struct page *wait_on_page_read(struct page *page)
if (IS_ERR(page)) {
goto out; if (!IS_ERR(page)) {
wait_on_page_locked(page); wait_on_page_locked(page);
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
page_cache_release(page); page_cache_release(page);
page = ERR_PTR(-EIO); page = ERR_PTR(-EIO);
} }
out: }
return page; return page;
} }
/**
* read_cache_page_gfp - read into page cache, using specified page allocation flags.
* @mapping: the page's address_space
* @index: the page index
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
* any new page allocations done using the specified allocation flags. Note
* that the Radix tree operations will still use GFP_KERNEL, so you can't
* expect to do this atomically or anything like that - but you can pass in
* other page requirements.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_gfp(struct address_space *mapping,
pgoff_t index,
gfp_t gfp)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
}
EXPORT_SYMBOL(read_cache_page_gfp);
/**
* read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: destination for read data
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page then wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *,struct page*),
void *data)
{
return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
}
EXPORT_SYMBOL(read_cache_page); EXPORT_SYMBOL(read_cache_page);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment