Commit 7d46a49f authored by Trond Myklebust's avatar Trond Myklebust

NFS: Clean up nfs_flush_list()

Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent deb7d638
...@@ -963,7 +963,7 @@ static void nfs_execute_write(struct nfs_write_data *data) ...@@ -963,7 +963,7 @@ static void nfs_execute_write(struct nfs_write_data *data)
* Generate multiple small requests to write out a single * Generate multiple small requests to write out a single
* contiguous dirty area on one page. * contiguous dirty area on one page.
*/ */
static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how) static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
{ {
struct nfs_page *req = nfs_list_entry(head->next); struct nfs_page *req = nfs_list_entry(head->next);
struct page *page = req->wb_page; struct page *page = req->wb_page;
...@@ -1032,16 +1032,13 @@ out_bad: ...@@ -1032,16 +1032,13 @@ out_bad:
* This is the case if nfs_updatepage detects a conflicting request * This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed. * that has been written but not committed.
*/ */
static int nfs_flush_one(struct list_head *head, struct inode *inode, int how) static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
{ {
struct nfs_page *req; struct nfs_page *req;
struct page **pages; struct page **pages;
struct nfs_write_data *data; struct nfs_write_data *data;
unsigned int count; unsigned int count;
if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
return nfs_flush_multi(head, inode, how);
data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages); data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
if (!data) if (!data)
goto out_bad; goto out_bad;
...@@ -1074,24 +1071,32 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how) ...@@ -1074,24 +1071,32 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
return -ENOMEM; return -ENOMEM;
} }
static int static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
nfs_flush_list(struct list_head *head, int wpages, int how)
{ {
LIST_HEAD(one_request); LIST_HEAD(one_request);
struct nfs_page *req; int (*flush_one)(struct inode *, struct list_head *, int);
int error = 0; struct nfs_page *req;
unsigned int pages = 0; int wpages = NFS_SERVER(inode)->wpages;
int wsize = NFS_SERVER(inode)->wsize;
int error;
while (!list_empty(head)) { flush_one = nfs_flush_one;
pages += nfs_coalesce_requests(head, &one_request, wpages); if (wsize < PAGE_CACHE_SIZE)
flush_one = nfs_flush_multi;
/* For single writes, FLUSH_STABLE is more efficient */
if (npages <= wpages && npages == NFS_I(inode)->npages
&& nfs_list_entry(head->next)->wb_bytes <= wsize)
how |= FLUSH_STABLE;
do {
nfs_coalesce_requests(head, &one_request, wpages);
req = nfs_list_entry(one_request.next); req = nfs_list_entry(one_request.next);
error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how); error = flush_one(inode, &one_request, how);
if (error < 0) if (error < 0)
break; goto out_err;
} } while (!list_empty(head));
if (error >= 0) return 0;
return pages; out_err:
while (!list_empty(head)) { while (!list_empty(head)) {
req = nfs_list_entry(head->next); req = nfs_list_entry(head->next);
nfs_list_remove_request(req); nfs_list_remove_request(req);
...@@ -1423,24 +1428,16 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, ...@@ -1423,24 +1428,16 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
{ {
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
LIST_HEAD(head); LIST_HEAD(head);
int res, int res;
error = 0;
spin_lock(&nfsi->req_lock); spin_lock(&nfsi->req_lock);
res = nfs_scan_dirty(inode, &head, idx_start, npages); res = nfs_scan_dirty(inode, &head, idx_start, npages);
spin_unlock(&nfsi->req_lock); spin_unlock(&nfsi->req_lock);
if (res) { if (res) {
struct nfs_server *server = NFS_SERVER(inode); int error = nfs_flush_list(inode, &head, res, how);
if (error < 0)
/* For single writes, FLUSH_STABLE is more efficient */ return error;
if (res == nfsi->npages && nfsi->npages <= server->wpages) {
if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
how |= FLUSH_STABLE;
}
error = nfs_flush_list(&head, server->wpages, how);
} }
if (error < 0)
return error;
return res; return res;
} }
...@@ -1449,14 +1446,13 @@ int nfs_commit_inode(struct inode *inode, int how) ...@@ -1449,14 +1446,13 @@ int nfs_commit_inode(struct inode *inode, int how)
{ {
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
LIST_HEAD(head); LIST_HEAD(head);
int res, int res;
error = 0;
spin_lock(&nfsi->req_lock); spin_lock(&nfsi->req_lock);
res = nfs_scan_commit(inode, &head, 0, 0); res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&nfsi->req_lock); spin_unlock(&nfsi->req_lock);
if (res) { if (res) {
error = nfs_commit_list(inode, &head, how); int error = nfs_commit_list(inode, &head, how);
if (error < 0) if (error < 0)
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment