Commit 01f2705d authored by Nate Diller's avatar Nate Diller Committed by Linus Torvalds

fs: convert core functions to zero_user_page

It's very common for file systems to need to zero part or all of a page,
the simplist way is just to use kmap_atomic() and memset().  There's
actually a library function in include/linux/highmem.h that does exactly
that, but it's confusingly named memclear_highpage_flush(), which is
descriptive of *how* it does the work rather than what the *purpose* is.
So this patchset renames the function to zero_user_page(), and calls it
from the various places that currently open code it.

This first patch introduces the new function call, and converts all the
core kernel callsites, both the open-coded ones and the old
memclear_highpage_flush() ones.  Following this patch is a series of
conversions for each file system individually, per AKPM, and finally a
patch deprecating the old call.  The diffstat below shows the entire
patchset.

[akpm@linux-foundation.org: fix a few things]
Signed-off-by: default avatarNate Diller <nate.diller@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 38a23e31
...@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, ...@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
transfer_result = lo_do_transfer(lo, WRITE, page, offset, transfer_result = lo_do_transfer(lo, WRITE, page, offset,
bvec->bv_page, bv_offs, size, IV); bvec->bv_page, bv_offs, size, IV);
if (unlikely(transfer_result)) { if (unlikely(transfer_result)) {
char *kaddr;
/* /*
* The transfer failed, but we still write the data to * The transfer failed, but we still write the data to
* keep prepare/commit calls balanced. * keep prepare/commit calls balanced.
*/ */
printk(KERN_ERR "loop: transfer error block %llu\n", printk(KERN_ERR "loop: transfer error block %llu\n",
(unsigned long long)index); (unsigned long long)index);
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, size, KM_USER0);
memset(kaddr + offset, 0, size);
kunmap_atomic(kaddr, KM_USER0);
} }
flush_dcache_page(page); flush_dcache_page(page);
ret = aops->commit_write(file, page, offset, ret = aops->commit_write(file, page, offset,
......
...@@ -1846,13 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1846,13 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (block_start >= to) if (block_start >= to)
break; break;
if (buffer_new(bh)) { if (buffer_new(bh)) {
void *kaddr;
clear_buffer_new(bh); clear_buffer_new(bh);
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, block_start, bh->b_size, KM_USER0);
memset(kaddr+block_start, 0, bh->b_size);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
...@@ -1940,10 +1935,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block) ...@@ -1940,10 +1935,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
SetPageError(page); SetPageError(page);
} }
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh)) {
void *kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, i * blocksize, blocksize,
memset(kaddr + i * blocksize, 0, blocksize); KM_USER0);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
if (!err) if (!err)
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
continue; continue;
...@@ -2086,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2086,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset,
long status; long status;
unsigned zerofrom; unsigned zerofrom;
unsigned blocksize = 1 << inode->i_blkbits; unsigned blocksize = 1 << inode->i_blkbits;
void *kaddr;
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
status = -ENOMEM; status = -ENOMEM;
...@@ -2108,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2108,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset,
PAGE_CACHE_SIZE, get_block); PAGE_CACHE_SIZE, get_block);
if (status) if (status)
goto out_unmap; goto out_unmap;
kaddr = kmap_atomic(new_page, KM_USER0); zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); KM_USER0);
flush_dcache_page(new_page);
kunmap_atomic(kaddr, KM_USER0);
generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
unlock_page(new_page); unlock_page(new_page);
page_cache_release(new_page); page_cache_release(new_page);
...@@ -2138,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset, ...@@ -2138,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
if (status) if (status)
goto out1; goto out1;
if (zerofrom < offset) { if (zerofrom < offset) {
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
memset(kaddr+zerofrom, 0, offset-zerofrom);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
__block_commit_write(inode, page, zerofrom, offset); __block_commit_write(inode, page, zerofrom, offset);
} }
return 0; return 0;
...@@ -2340,10 +2327,7 @@ failed: ...@@ -2340,10 +2327,7 @@ failed:
* Error recovery is pretty slack. Clear the page and mark it dirty * Error recovery is pretty slack. Clear the page and mark it dirty
* so we'll later zero out any blocks which _were_ allocated. * so we'll later zero out any blocks which _were_ allocated.
*/ */
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
memset(kaddr, 0, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
SetPageUptodate(page); SetPageUptodate(page);
set_page_dirty(page); set_page_dirty(page);
return ret; return ret;
...@@ -2382,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block, ...@@ -2382,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset; unsigned offset;
void *kaddr;
int ret; int ret;
/* Is the page fully inside i_size? */ /* Is the page fully inside i_size? */
...@@ -2413,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, ...@@ -2413,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
out: out:
ret = mpage_writepage(page, get_block, wbc); ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN) if (ret == -EAGAIN)
...@@ -2437,7 +2417,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) ...@@ -2437,7 +2417,6 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
unsigned to; unsigned to;
struct page *page; struct page *page;
const struct address_space_operations *a_ops = mapping->a_ops; const struct address_space_operations *a_ops = mapping->a_ops;
char *kaddr;
int ret = 0; int ret = 0;
if ((offset & (blocksize - 1)) == 0) if ((offset & (blocksize - 1)) == 0)
...@@ -2451,10 +2430,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from) ...@@ -2451,10 +2430,8 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
to = (offset + blocksize) & ~(blocksize - 1); to = (offset + blocksize) & ~(blocksize - 1);
ret = a_ops->prepare_write(NULL, page, offset, to); ret = a_ops->prepare_write(NULL, page, offset, to);
if (ret == 0) { if (ret == 0) {
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); KM_USER0);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
/* /*
* It would be more correct to call aops->commit_write() * It would be more correct to call aops->commit_write()
* here, but this is more efficient. * here, but this is more efficient.
...@@ -2480,7 +2457,6 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2480,7 +2457,6 @@ int block_truncate_page(struct address_space *mapping,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct page *page; struct page *page;
struct buffer_head *bh; struct buffer_head *bh;
void *kaddr;
int err; int err;
blocksize = 1 << inode->i_blkbits; blocksize = 1 << inode->i_blkbits;
...@@ -2534,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping, ...@@ -2534,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping,
goto unlock; goto unlock;
} }
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, length, KM_USER0);
memset(kaddr + offset, 0, length);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
err = 0; err = 0;
...@@ -2559,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block, ...@@ -2559,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset; unsigned offset;
void *kaddr;
/* Is the page fully inside i_size? */ /* Is the page fully inside i_size? */
if (page->index < end_index) if (page->index < end_index)
...@@ -2585,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, ...@@ -2585,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and * the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file." * writes to that region are not written out to the file."
*/ */
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
return __block_write_full_page(inode, page, get_block, wbc); return __block_write_full_page(inode, page, get_block, wbc);
} }
......
...@@ -867,7 +867,6 @@ static int do_direct_IO(struct dio *dio) ...@@ -867,7 +867,6 @@ static int do_direct_IO(struct dio *dio)
do_holes: do_holes:
/* Handle holes */ /* Handle holes */
if (!buffer_mapped(map_bh)) { if (!buffer_mapped(map_bh)) {
char *kaddr;
loff_t i_size_aligned; loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */ /* AKPM: eargh, -ENOTBLK is a hack */
...@@ -888,11 +887,8 @@ do_holes: ...@@ -888,11 +887,8 @@ do_holes:
page_cache_release(page); page_cache_release(page);
goto out; goto out;
} }
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, block_in_page << blkbits,
memset(kaddr + (block_in_page << blkbits), 1 << blkbits, KM_USER0);
0, 1 << blkbits);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
dio->block_in_file++; dio->block_in_file++;
block_in_page++; block_in_page++;
goto next_block; goto next_block;
......
...@@ -284,11 +284,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -284,11 +284,9 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
} }
if (first_hole != blocks_per_page) { if (first_hole != blocks_per_page) {
char *kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, first_hole << blkbits,
memset(kaddr + (first_hole << blkbits), 0, PAGE_CACHE_SIZE - (first_hole << blkbits),
PAGE_CACHE_SIZE - (first_hole << blkbits)); KM_USER0);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
if (first_hole == 0) { if (first_hole == 0) {
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
...@@ -576,14 +574,11 @@ page_is_mapped: ...@@ -576,14 +574,11 @@ page_is_mapped:
* written out to the file." * written out to the file."
*/ */
unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
char *kaddr;
if (page->index > end_index || !offset) if (page->index > end_index || !offset)
goto confused; goto confused;
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); KM_USER0);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
} }
/* /*
......
...@@ -94,17 +94,27 @@ static inline void clear_highpage(struct page *page) ...@@ -94,17 +94,27 @@ static inline void clear_highpage(struct page *page)
/* /*
* Same but also flushes aliased cache contents to RAM. * Same but also flushes aliased cache contents to RAM.
*
* This must be a macro because KM_USER0 and friends aren't defined if
* !CONFIG_HIGHMEM
*/ */
static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) #define zero_user_page(page, offset, size, km_type) \
do { \
void *kaddr; \
\
BUG_ON((offset) + (size) > PAGE_SIZE); \
\
kaddr = kmap_atomic(page, km_type); \
memset((char *)kaddr + (offset), 0, (size)); \
flush_dcache_page(page); \
kunmap_atomic(kaddr, (km_type)); \
} while (0)
static inline void memclear_highpage_flush(struct page *page,
unsigned int offset, unsigned int size)
{ {
void *kaddr; zero_user_page(page, offset, size, KM_USER0);
BUG_ON(offset + size > PAGE_SIZE);
kaddr = kmap_atomic(page, KM_USER0);
memset((char *)kaddr + offset, 0, size);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
} }
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
......
...@@ -434,7 +434,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from) ...@@ -434,7 +434,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
unsigned blocksize; unsigned blocksize;
unsigned length; unsigned length;
struct page *page; struct page *page;
void *kaddr;
BUG_ON(!mapping->a_ops->get_xip_page); BUG_ON(!mapping->a_ops->get_xip_page);
...@@ -458,11 +457,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from) ...@@ -458,11 +457,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
else else
return PTR_ERR(page); return PTR_ERR(page);
} }
kaddr = kmap_atomic(page, KM_USER0); zero_user_page(page, offset, length, KM_USER0);
memset(kaddr + offset, 0, length);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xip_truncate_page); EXPORT_SYMBOL_GPL(xip_truncate_page);
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h> #include <linux/task_io_accounting_ops.h>
#include <linux/buffer_head.h> /* grr. try_to_release_page, #include <linux/buffer_head.h> /* grr. try_to_release_page,
...@@ -46,7 +47,7 @@ void do_invalidatepage(struct page *page, unsigned long offset) ...@@ -46,7 +47,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
static inline void truncate_partial_page(struct page *page, unsigned partial) static inline void truncate_partial_page(struct page *page, unsigned partial)
{ {
memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
if (PagePrivate(page)) if (PagePrivate(page))
do_invalidatepage(page, partial); do_invalidatepage(page, partial);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment