Commit cf9a2ae8 authored by David Howells's avatar David Howells Committed by Jens Axboe

[PATCH] BLOCK: Move functions out of buffer code [try #6]

Move some functions out of the buffering code that aren't strictly buffering
specific.  This is a precursor to being able to disable the block layer.

 (*) Moved some stuff out of fs/buffer.c:

     (*) The file sync and general sync stuff moved to fs/sync.c.

     (*) The superblock sync stuff moved to fs/super.c.

     (*) do_invalidatepage() moved to mm/truncate.c.

     (*) try_to_release_page() moved to mm/filemap.c.

 (*) Moved some related declarations between header files:

     (*) declarations for do_invalidatepage() and try_to_release_page() moved
     	 to linux/mm.h.

     (*) __set_page_dirty_buffers() moved to linux/buffer_head.h.
Signed-Off-By: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4090959a
...@@ -159,31 +159,6 @@ int sync_blockdev(struct block_device *bdev) ...@@ -159,31 +159,6 @@ int sync_blockdev(struct block_device *bdev)
} }
EXPORT_SYMBOL(sync_blockdev); EXPORT_SYMBOL(sync_blockdev);
static void __fsync_super(struct super_block *sb)
{
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
lock_super(sb);
if (sb->s_dirt && sb->s_op->write_super)
sb->s_op->write_super(sb);
unlock_super(sb);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
sync_inodes_sb(sb, 1);
}
/*
* Write out and wait upon all dirty data associated with this
* superblock. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
int fsync_super(struct super_block *sb)
{
__fsync_super(sb);
return sync_blockdev(sb->s_bdev);
}
/* /*
* Write out and wait upon all dirty data associated with this * Write out and wait upon all dirty data associated with this
* device. Filesystem data as well as the underlying block * device. Filesystem data as well as the underlying block
...@@ -259,118 +234,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) ...@@ -259,118 +234,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
} }
EXPORT_SYMBOL(thaw_bdev); EXPORT_SYMBOL(thaw_bdev);
/*
* sync everything. Start out by waking pdflush, because that writes back
* all queues in parallel.
*/
static void do_sync(unsigned long wait)
{
wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
DQUOT_SYNC(NULL);
sync_supers(); /* Write the superblocks */
sync_filesystems(0); /* Start syncing the filesystems */
sync_filesystems(wait); /* Waitingly sync the filesystems */
sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
if (!wait)
printk("Emergency Sync complete\n");
if (unlikely(laptop_mode))
laptop_sync_completion();
}
asmlinkage long sys_sync(void)
{
do_sync(1);
return 0;
}
void emergency_sync(void)
{
pdflush_operation(do_sync, 0);
}
/*
* Generic function to fsync a file.
*
* filp may be NULL if called via the msync of a vma.
*/
int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
struct inode * inode = dentry->d_inode;
struct super_block * sb;
int ret, err;
/* sync the inode to buffers */
ret = write_inode_now(inode, 0);
/* sync the superblock to buffers */
sb = inode->i_sb;
lock_super(sb);
if (sb->s_op->write_super)
sb->s_op->write_super(sb);
unlock_super(sb);
/* .. finally sync the buffers to disk */
err = sync_blockdev(sb->s_bdev);
if (!ret)
ret = err;
return ret;
}
long do_fsync(struct file *file, int datasync)
{
int ret;
int err;
struct address_space *mapping = file->f_mapping;
if (!file->f_op || !file->f_op->fsync) {
/* Why? We can still call filemap_fdatawrite */
ret = -EINVAL;
goto out;
}
ret = filemap_fdatawrite(mapping);
/*
* We need to protect against concurrent writers, which could cause
* livelocks in fsync_buffers_list().
*/
mutex_lock(&mapping->host->i_mutex);
err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
mutex_unlock(&mapping->host->i_mutex);
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
out:
return ret;
}
static long __do_fsync(unsigned int fd, int datasync)
{
struct file *file;
int ret = -EBADF;
file = fget(fd);
if (file) {
ret = do_fsync(file, datasync);
fput(file);
}
return ret;
}
asmlinkage long sys_fsync(unsigned int fd)
{
return __do_fsync(fd, 0);
}
asmlinkage long sys_fdatasync(unsigned int fd)
{
return __do_fsync(fd, 1);
}
/* /*
* Various filesystems appear to want __find_get_block to be non-blocking. * Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this, * But it's the page lock which protects the buffers. To get around this,
...@@ -1550,35 +1413,6 @@ static void discard_buffer(struct buffer_head * bh) ...@@ -1550,35 +1413,6 @@ static void discard_buffer(struct buffer_head * bh)
unlock_buffer(bh); unlock_buffer(bh);
} }
/**
* try_to_release_page() - release old fs-specific metadata on a page
*
* @page: the page which the kernel is trying to free
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
* (presumably at page->private). If the release was successful, return `1'.
* Otherwise return zero.
*
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
*
* NOTE: @gfp_mask may go away, and this function may become non-blocking.
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
struct address_space * const mapping = page->mapping;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
}
EXPORT_SYMBOL(try_to_release_page);
/** /**
* block_invalidatepage - invalidate part of all of a buffer-backed page * block_invalidatepage - invalidate part of all of a buffer-backed page
* *
...@@ -1630,14 +1464,6 @@ out: ...@@ -1630,14 +1464,6 @@ out:
} }
EXPORT_SYMBOL(block_invalidatepage); EXPORT_SYMBOL(block_invalidatepage);
void do_invalidatepage(struct page *page, unsigned long offset)
{
void (*invalidatepage)(struct page *, unsigned long);
invalidatepage = page->mapping->a_ops->invalidatepage ? :
block_invalidatepage;
(*invalidatepage)(page, offset);
}
/* /*
* We attach and possibly dirty the buffers atomically wrt * We attach and possibly dirty the buffers atomically wrt
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
......
...@@ -220,6 +220,37 @@ static int grab_super(struct super_block *s) __releases(sb_lock) ...@@ -220,6 +220,37 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
return 0; return 0;
} }
/*
* Write out and wait upon all dirty data associated with this
* superblock. Filesystem data as well as the underlying block
* device. Takes the superblock lock. Requires a second blkdev
* flush by the caller to complete the operation.
*/
void __fsync_super(struct super_block *sb)
{
sync_inodes_sb(sb, 0);
DQUOT_SYNC(sb);
lock_super(sb);
if (sb->s_dirt && sb->s_op->write_super)
sb->s_op->write_super(sb);
unlock_super(sb);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
sync_inodes_sb(sb, 1);
}
/*
* Write out and wait upon all dirty data associated with this
* superblock. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
int fsync_super(struct super_block *sb)
{
__fsync_super(sb);
return sync_blockdev(sb->s_bdev);
}
/** /**
* generic_shutdown_super - common helper for ->kill_sb() * generic_shutdown_super - common helper for ->kill_sb()
* @sb: superblock to kill * @sb: superblock to kill
......
...@@ -10,10 +10,123 @@ ...@@ -10,10 +10,123 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
SYNC_FILE_RANGE_WAIT_AFTER) SYNC_FILE_RANGE_WAIT_AFTER)
/*
* sync everything. Start out by waking pdflush, because that writes back
* all queues in parallel.
*/
static void do_sync(unsigned long wait)
{
wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
DQUOT_SYNC(NULL);
sync_supers(); /* Write the superblocks */
sync_filesystems(0); /* Start syncing the filesystems */
sync_filesystems(wait); /* Waitingly sync the filesystems */
sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
if (!wait)
printk("Emergency Sync complete\n");
if (unlikely(laptop_mode))
laptop_sync_completion();
}
asmlinkage long sys_sync(void)
{
do_sync(1);
return 0;
}
void emergency_sync(void)
{
pdflush_operation(do_sync, 0);
}
/*
* Generic function to fsync a file.
*
* filp may be NULL if called via the msync of a vma.
*/
int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
struct inode * inode = dentry->d_inode;
struct super_block * sb;
int ret, err;
/* sync the inode to buffers */
ret = write_inode_now(inode, 0);
/* sync the superblock to buffers */
sb = inode->i_sb;
lock_super(sb);
if (sb->s_op->write_super)
sb->s_op->write_super(sb);
unlock_super(sb);
/* .. finally sync the buffers to disk */
err = sync_blockdev(sb->s_bdev);
if (!ret)
ret = err;
return ret;
}
long do_fsync(struct file *file, int datasync)
{
int ret;
int err;
struct address_space *mapping = file->f_mapping;
if (!file->f_op || !file->f_op->fsync) {
/* Why? We can still call filemap_fdatawrite */
ret = -EINVAL;
goto out;
}
ret = filemap_fdatawrite(mapping);
/*
* We need to protect against concurrent writers, which could cause
* livelocks in fsync_buffers_list().
*/
mutex_lock(&mapping->host->i_mutex);
err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
mutex_unlock(&mapping->host->i_mutex);
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
out:
return ret;
}
static long __do_fsync(unsigned int fd, int datasync)
{
struct file *file;
int ret = -EBADF;
file = fget(fd);
if (file) {
ret = do_fsync(file, datasync);
fput(file);
}
return ret;
}
asmlinkage long sys_fsync(unsigned int fd)
{
return __do_fsync(fd, 0);
}
asmlinkage long sys_fdatasync(unsigned int fd)
{
return __do_fsync(fd, 1);
}
/* /*
* sys_sync_file_range() permits finely controlled syncing over a segment of * sys_sync_file_range() permits finely controlled syncing over a segment of
* a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
......
...@@ -190,9 +190,7 @@ extern int buffer_heads_over_limit; ...@@ -190,9 +190,7 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed * Generic address_space_operations implementations for buffer_head-backed
* address_spaces. * address_spaces.
*/ */
int try_to_release_page(struct page * page, gfp_t gfp_mask);
void block_invalidatepage(struct page *page, unsigned long offset); void block_invalidatepage(struct page *page, unsigned long offset);
void do_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block, int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc); struct writeback_control *wbc);
int block_read_full_page(struct page*, get_block_t*); int block_read_full_page(struct page*, get_block_t*);
...@@ -302,4 +300,5 @@ static inline void lock_buffer(struct buffer_head *bh) ...@@ -302,4 +300,5 @@ static inline void lock_buffer(struct buffer_head *bh)
__lock_buffer(bh); __lock_buffer(bh);
} }
extern int __set_page_dirty_buffers(struct page *page);
#endif /* _LINUX_BUFFER_HEAD_H */ #endif /* _LINUX_BUFFER_HEAD_H */
...@@ -1546,6 +1546,7 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping, ...@@ -1546,6 +1546,7 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping,
extern long do_fsync(struct file *file, int datasync); extern long do_fsync(struct file *file, int datasync);
extern void sync_supers(void); extern void sync_supers(void);
extern void sync_filesystems(int wait); extern void sync_filesystems(int wait);
extern void __fsync_super(struct super_block *sb);
extern void emergency_sync(void); extern void emergency_sync(void);
extern void emergency_remount(void); extern void emergency_remount(void);
extern int do_remount_sb(struct super_block *sb, int flags, extern int do_remount_sb(struct super_block *sb, int flags,
......
...@@ -743,7 +743,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -743,7 +743,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
int __set_page_dirty_buffers(struct page *page); extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);
int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc, int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page); struct page *page);
......
...@@ -2491,3 +2491,33 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, ...@@ -2491,3 +2491,33 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
} }
return retval; return retval;
} }
/**
* try_to_release_page() - release old fs-specific metadata on a page
*
* @page: the page which the kernel is trying to free
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
* (presumably at page->private). If the release was successful, return `1'.
* Otherwise return zero.
*
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
*
* NOTE: @gfp_mask may go away, and this function may become non-blocking.
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
struct address_space * const mapping = page->mapping;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
}
EXPORT_SYMBOL(try_to_release_page);
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/buffer_head.h>
/* /*
* The maximum number of pages to writeout in a single bdflush/kupdate * The maximum number of pages to writeout in a single bdflush/kupdate
......
...@@ -17,6 +17,30 @@ ...@@ -17,6 +17,30 @@
do_invalidatepage */ do_invalidatepage */
/**
* do_invalidatepage - invalidate part of all of a page
* @page: the page which is affected
* @offset: the index of the truncation point
*
* do_invalidatepage() is called when all or part of the page has become
* invalidated by a truncate operation.
*
* do_invalidatepage() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
void do_invalidatepage(struct page *page, unsigned long offset)
{
void (*invalidatepage)(struct page *, unsigned long);
invalidatepage = page->mapping->a_ops->invalidatepage;
if (!invalidatepage)
invalidatepage = block_invalidatepage;
if (invalidatepage)
(*invalidatepage)(page, offset);
}
static inline void truncate_partial_page(struct page *page, unsigned partial) static inline void truncate_partial_page(struct page *page, unsigned partial)
{ {
memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment