Commit 10ce4444 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nathan Scott

[XFS] use pagevec lookups This reduces the time spend in the radix tree

lookups and avoids unessecary look roundtrips.

SGI-PV: 947118
SGI-Modid: xfs-linux-melb:xfs-kern:203823a
Signed-off-by: default avatarChristoph Hellwig <hch@sgi.com>
Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
parent 78539fdf
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "xfs_rw.h" #include "xfs_rw.h"
#include "xfs_iomap.h" #include "xfs_iomap.h"
#include <linux/mpage.h> #include <linux/mpage.h>
#include <linux/pagevec.h>
#include <linux/writeback.h> #include <linux/writeback.h>
STATIC void xfs_count_page_state(struct page *, int *, int *, int *); STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
...@@ -501,18 +502,13 @@ xfs_map_at_offset( ...@@ -501,18 +502,13 @@ xfs_map_at_offset(
*/ */
STATIC unsigned int STATIC unsigned int
xfs_probe_unmapped_page( xfs_probe_unmapped_page(
struct address_space *mapping, struct page *page,
pgoff_t index,
unsigned int pg_offset) unsigned int pg_offset)
{ {
struct page *page;
int ret = 0; int ret = 0;
page = find_trylock_page(mapping, index);
if (!page)
return 0;
if (PageWriteback(page)) if (PageWriteback(page))
goto out; return 0;
if (page->mapping && PageDirty(page)) { if (page->mapping && PageDirty(page)) {
if (page_has_buffers(page)) { if (page_has_buffers(page)) {
...@@ -530,8 +526,6 @@ xfs_probe_unmapped_page( ...@@ -530,8 +526,6 @@ xfs_probe_unmapped_page(
ret = PAGE_CACHE_SIZE; ret = PAGE_CACHE_SIZE;
} }
out:
unlock_page(page);
return ret; return ret;
} }
...@@ -542,59 +536,75 @@ xfs_probe_unmapped_cluster( ...@@ -542,59 +536,75 @@ xfs_probe_unmapped_cluster(
struct buffer_head *bh, struct buffer_head *bh,
struct buffer_head *head) struct buffer_head *head)
{ {
size_t len, total = 0; struct pagevec pvec;
pgoff_t tindex, tlast, tloff; pgoff_t tindex, tlast, tloff;
unsigned int pg_offset; size_t total = 0;
struct address_space *mapping = inode->i_mapping; int done = 0, i;
/* First sum forwards in this page */ /* First sum forwards in this page */
do { do {
if (buffer_mapped(bh)) if (buffer_mapped(bh))
break; return total;
total += bh->b_size; total += bh->b_size;
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
/* If we reached the end of the page, sum forwards in /* if we reached the end of the page, sum forwards in following pages */
* following pages. tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
*/ tindex = startpage->index + 1;
if (bh == head) {
tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; /* Prune this back to avoid pathological behavior */
/* Prune this back to avoid pathological behavior */ tloff = min(tlast, startpage->index + 64);
tloff = min(tlast, startpage->index + 64);
for (tindex = startpage->index + 1; tindex < tloff; tindex++) { pagevec_init(&pvec, 0);
len = xfs_probe_unmapped_page(mapping, tindex, while (!done && tindex <= tloff) {
PAGE_CACHE_SIZE); unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
if (!len)
return total; if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
break;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
size_t pg_offset, len = 0;
if (tindex == tlast) {
pg_offset =
i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
if (!pg_offset)
break;
} else
pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && !TestSetPageLocked(page)) {
len = xfs_probe_unmapped_page(page, pg_offset);
unlock_page(page);
}
if (!len) {
done = 1;
break;
}
total += len; total += len;
} }
if (tindex == tlast &&
(pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { pagevec_release(&pvec);
total += xfs_probe_unmapped_page(mapping, cond_resched();
tindex, pg_offset);
}
} }
return total; return total;
} }
/* /*
* Probe for a given page (index) in the inode and test if it is suitable * Test if a given page is suitable for writing as part of an unwritten
* for writing as part of an unwritten or delayed allocate extent. * or delayed allocate extent.
* Returns page locked and with an extra reference count if so, else NULL.
*/ */
STATIC struct page * STATIC int
xfs_probe_delayed_page( xfs_is_delayed_page(
struct inode *inode, struct page *page,
pgoff_t index,
unsigned int type) unsigned int type)
{ {
struct page *page;
page = find_trylock_page(inode->i_mapping, index);
if (!page)
return NULL;
if (PageWriteback(page)) if (PageWriteback(page))
goto out; return 0;
if (page->mapping && page_has_buffers(page)) { if (page->mapping && page_has_buffers(page)) {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
...@@ -611,12 +621,10 @@ xfs_probe_delayed_page( ...@@ -611,12 +621,10 @@ xfs_probe_delayed_page(
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
if (acceptable) if (acceptable)
return page; return 1;
} }
out: return 0;
unlock_page(page);
return NULL;
} }
/* /*
...@@ -629,10 +637,10 @@ STATIC int ...@@ -629,10 +637,10 @@ STATIC int
xfs_convert_page( xfs_convert_page(
struct inode *inode, struct inode *inode,
struct page *page, struct page *page,
loff_t tindex,
xfs_iomap_t *iomapp, xfs_iomap_t *iomapp,
xfs_ioend_t **ioendp, xfs_ioend_t **ioendp,
struct writeback_control *wbc, struct writeback_control *wbc,
void *private,
int startio, int startio,
int all_bh) int all_bh)
{ {
...@@ -644,6 +652,17 @@ xfs_convert_page( ...@@ -644,6 +652,17 @@ xfs_convert_page(
int len, page_dirty; int len, page_dirty;
int count = 0, done = 0, uptodate = 1; int count = 0, done = 0, uptodate = 1;
if (page->index != tindex)
goto fail;
if (TestSetPageLocked(page))
goto fail;
if (PageWriteback(page))
goto fail_unlock_page;
if (page->mapping != inode->i_mapping)
goto fail_unlock_page;
if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
goto fail_unlock_page;
end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)); end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
/* /*
...@@ -715,6 +734,10 @@ xfs_convert_page( ...@@ -715,6 +734,10 @@ xfs_convert_page(
} }
return done; return done;
fail_unlock_page:
unlock_page(page);
fail:
return 1;
} }
/* /*
...@@ -732,16 +755,25 @@ xfs_cluster_write( ...@@ -732,16 +755,25 @@ xfs_cluster_write(
int all_bh, int all_bh,
pgoff_t tlast) pgoff_t tlast)
{ {
struct page *page; struct pagevec pvec;
unsigned int type = (*ioendp)->io_type; int done = 0, i;
int done;
for (done = 0; tindex <= tlast && !done; tindex++) { pagevec_init(&pvec, 0);
page = xfs_probe_delayed_page(inode, tindex, type); while (!done && tindex <= tlast) {
if (!page) unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
break; break;
done = xfs_convert_page(inode, page, iomapp, ioendp,
wbc, NULL, startio, all_bh); for (i = 0; i < pagevec_count(&pvec); i++) {
done = xfs_convert_page(inode, pvec.pages[i], tindex++,
iomapp, ioendp, wbc, startio, all_bh);
if (done)
break;
}
pagevec_release(&pvec);
cond_resched();
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment