Commit 2eb79076 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'block/for-next'

parents 04a01191 b96fb440
...@@ -1600,6 +1600,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1600,6 +1600,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
sb->s_blocksize = 4096; sb->s_blocksize = 4096;
sb->s_blocksize_bits = blksize_bits(4096); sb->s_blocksize_bits = blksize_bits(4096);
sb->s_bdi = &fs_info->bdi;
/* /*
* we set the i_size on the btree inode to the max possible int. * we set the i_size on the btree inode to the max possible int.
......
...@@ -75,13 +75,6 @@ static inline void bdi_work_init(struct bdi_work *work, ...@@ -75,13 +75,6 @@ static inline void bdi_work_init(struct bdi_work *work,
work->state = WS_USED; work->state = WS_USED;
} }
static inline void bdi_work_init_on_stack(struct bdi_work *work,
struct writeback_control *wbc)
{
bdi_work_init(work, wbc);
work->state |= WS_ONSTACK;
}
/** /**
* writeback_in_progress - determine whether there is writeback in progress * writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure. * @bdi: the device's backing_dev_info structure.
...@@ -146,21 +139,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) ...@@ -146,21 +139,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
{ {
if (work) { work->seen = bdi->wb_mask;
work->seen = bdi->wb_mask; BUG_ON(!work->seen);
BUG_ON(!work->seen); atomic_set(&work->pending, bdi->wb_cnt);
atomic_set(&work->pending, bdi->wb_cnt); BUG_ON(!bdi->wb_cnt);
BUG_ON(!bdi->wb_cnt);
/* /*
* Make sure stores are seen before it appears on the list * Make sure stores are seen before it appears on the list
*/ */
smp_mb(); smp_mb();
spin_lock(&bdi->wb_lock); spin_lock(&bdi->wb_lock);
list_add_tail_rcu(&work->list, &bdi->work_list); list_add_tail_rcu(&work->list, &bdi->work_list);
spin_unlock(&bdi->wb_lock); spin_unlock(&bdi->wb_lock);
}
/* /*
* If the default thread isn't there, make sure we add it. When * If the default thread isn't there, make sure we add it. When
...@@ -172,14 +163,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) ...@@ -172,14 +163,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
struct bdi_writeback *wb = &bdi->wb; struct bdi_writeback *wb = &bdi->wb;
/* /*
* If we failed allocating the bdi work item, wake up the wb * End work now if this wb has no dirty IO pending. Otherwise
* thread always. As a safety precaution, it'll flush out * wakeup the handling thread
* everything
*/ */
if (!wb_has_dirty_io(wb)) { if (!wb_has_dirty_io(wb))
if (work) wb_clear_pending(wb, work);
wb_clear_pending(wb, work); else if (wb->task)
} else if (wb->task)
wake_up_process(wb->task); wake_up_process(wb->task);
} }
} }
...@@ -194,48 +183,63 @@ static void bdi_wait_on_work_clear(struct bdi_work *work) ...@@ -194,48 +183,63 @@ static void bdi_wait_on_work_clear(struct bdi_work *work)
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
} }
static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc) static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
struct writeback_control *wbc)
{ {
struct bdi_work *work; struct bdi_work *work;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
work = kmalloc(sizeof(*work), GFP_ATOMIC); work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) if (work) {
bdi_work_init(work, wbc); bdi_work_init(work, wbc);
bdi_queue_work(bdi, work);
} else {
struct bdi_writeback *wb = &bdi->wb;
return work; if (wb->task)
wake_up_process(wb->task);
}
} }
void bdi_start_writeback(struct writeback_control *wbc) /**
* bdi_sync_writeback - start and wait for writeback
* @wbc: writeback parameters
*
* Description:
* This does WB_SYNC_ALL data integrity writeback and waits for the
* IO to complete. Callers must hold the sb s_umount semaphore for
* reading, to avoid having the super disappear before we are done.
*/
static void bdi_sync_writeback(struct writeback_control *wbc)
{ {
const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; struct bdi_work work;
struct bdi_work work_stack, *work = NULL;
if (!must_wait) wbc->sync_mode = WB_SYNC_ALL;
work = bdi_alloc_work(wbc);
if (!work) { bdi_work_init(&work, wbc);
work = &work_stack; work.state |= WS_ONSTACK;
bdi_work_init_on_stack(work, wbc);
}
bdi_queue_work(wbc->bdi, work); bdi_queue_work(wbc->bdi, &work);
bdi_wait_on_work_clear(&work);
}
/* /**
* If the sync mode is WB_SYNC_ALL, block waiting for the work to * bdi_start_writeback - start writeback
* complete. If not, we only need to wait for the work to be started, * @wbc: writeback parameters
* if we allocated it on-stack. We use the same mechanism, if the *
* wait bit is set in the bdi_work struct, then threads will not * Description:
* clear pending until after they are done. * This does WB_SYNC_NONE opportunistic writeback. The IO is only
* * started when this function returns, we make no guarentees on
* Note that work == &work_stack if must_wait is true, so we don't * completion. Caller need not hold sb s_umount semaphore.
* need to do call_rcu() here ever, since the completion path will *
* have done that for us. */
*/ void bdi_start_writeback(struct writeback_control *wbc)
if (must_wait || work == &work_stack) { {
bdi_wait_on_work_clear(work); wbc->sync_mode = WB_SYNC_NONE;
if (work != &work_stack) bdi_alloc_queue_work(wbc->bdi, wbc);
call_rcu(&work->rcu_head, bdi_work_free);
}
} }
/* /*
...@@ -858,67 +862,24 @@ int bdi_writeback_task(struct bdi_writeback *wb) ...@@ -858,67 +862,24 @@ int bdi_writeback_task(struct bdi_writeback *wb)
} }
/* /*
* Schedule writeback for all backing devices. Expensive! If this is a data * Schedule writeback for all backing devices. Can only be used for
* integrity operation, writeback will be complete when this returns. If * WB_SYNC_NONE writeback, WB_SYNC_ALL should use bdi_start_writeback()
* we are simply called for WB_SYNC_NONE, then writeback will merely be * and pass in the superblock.
* scheduled to run.
*/ */
static void bdi_writeback_all(struct writeback_control *wbc) static void bdi_writeback_all(struct writeback_control *wbc)
{ {
const bool must_wait = wbc->sync_mode == WB_SYNC_ALL;
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
struct bdi_work *work;
LIST_HEAD(list);
restart: WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
spin_lock(&bdi_lock);
list_for_each_entry(bdi, &bdi_list, bdi_list) { rcu_read_lock();
struct bdi_work *work;
if (!bdi_has_dirty_io(bdi))
continue;
/*
* If work allocation fails, do the writes inline. We drop
* the lock and restart the list writeout. This should be OK,
* since this happens rarely and because the writeout should
* eventually make more free memory available.
*/
work = bdi_alloc_work(wbc);
if (!work) {
struct writeback_control __wbc;
/*
* Not a data integrity writeout, just continue
*/
if (!must_wait)
continue;
spin_unlock(&bdi_lock);
__wbc = *wbc;
__wbc.bdi = bdi;
writeback_inodes_wbc(&__wbc);
goto restart;
}
if (must_wait)
list_add_tail(&work->wait_list, &list);
bdi_queue_work(bdi, work); list_for_each_entry(bdi, &bdi_list, bdi_list) {
if (bdi_has_dirty_io(bdi))
bdi_alloc_queue_work(bdi, wbc);
} }
spin_unlock(&bdi_lock); rcu_read_unlock();
/*
* If this is for WB_SYNC_ALL, wait for pending work to complete
* before returning.
*/
while (!list_empty(&list)) {
work = list_entry(list.next, struct bdi_work, wait_list);
list_del(&work->wait_list);
bdi_wait_on_work_clear(work);
call_rcu(&work->rcu_head, bdi_work_free);
}
} }
/* /*
...@@ -1175,14 +1136,14 @@ long sync_inodes_sb(struct super_block *sb) ...@@ -1175,14 +1136,14 @@ long sync_inodes_sb(struct super_block *sb)
{ {
struct writeback_control wbc = { struct writeback_control wbc = {
.sb = sb, .sb = sb,
.sync_mode = WB_SYNC_ALL, .bdi = sb->s_bdi,
.range_start = 0, .range_start = 0,
.range_end = LLONG_MAX, .range_end = LLONG_MAX,
}; };
long nr_to_write = LONG_MAX; /* doesn't actually matter */ long nr_to_write = LONG_MAX; /* doesn't actually matter */
wbc.nr_to_write = nr_to_write; wbc.nr_to_write = nr_to_write;
bdi_writeback_all(&wbc); bdi_sync_writeback(&wbc);
wait_sb_inodes(&wbc); wait_sb_inodes(&wbc);
return nr_to_write - wbc.nr_to_write; return nr_to_write - wbc.nr_to_write;
} }
......
...@@ -908,6 +908,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) ...@@ -908,6 +908,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (err) if (err)
goto err_put_conn; goto err_put_conn;
sb->s_bdi = &fc->bdi;
/* Handle umasking inside the fuse code */ /* Handle umasking inside the fuse code */
if (sb->s_flags & MS_POSIXACL) if (sb->s_flags & MS_POSIXACL)
fc->dont_mask = 1; fc->dont_mask = 1;
......
...@@ -707,6 +707,12 @@ static int set_bdev_super(struct super_block *s, void *data) ...@@ -707,6 +707,12 @@ static int set_bdev_super(struct super_block *s, void *data)
{ {
s->s_bdev = data; s->s_bdev = data;
s->s_dev = s->s_bdev->bd_dev; s->s_dev = s->s_bdev->bd_dev;
/*
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0; return 0;
} }
......
...@@ -27,6 +27,13 @@ ...@@ -27,6 +27,13 @@
*/ */
static int __sync_filesystem(struct super_block *sb, int wait) static int __sync_filesystem(struct super_block *sb, int wait)
{ {
/*
* This should be safe, as we require bdi backing to actually
* write out data in the first place
*/
if (!sb->s_bdi)
return 0;
/* Avoid doing twice syncing and cache pruning for quota sync */ /* Avoid doing twice syncing and cache pruning for quota sync */
if (!wait) { if (!wait) {
writeout_quota_sb(sb, -1); writeout_quota_sb(sb, -1);
...@@ -101,7 +108,7 @@ restart: ...@@ -101,7 +108,7 @@ restart:
spin_unlock(&sb_lock); spin_unlock(&sb_lock);
down_read(&sb->s_umount); down_read(&sb->s_umount);
if (!(sb->s_flags & MS_RDONLY) && sb->s_root) if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi)
__sync_filesystem(sb, wait); __sync_filesystem(sb, wait);
up_read(&sb->s_umount); up_read(&sb->s_umount);
......
...@@ -1967,6 +1967,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1967,6 +1967,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
if (err) if (err)
goto out_bdi; goto out_bdi;
sb->s_bdi = &c->bdi;
sb->s_fs_info = c; sb->s_fs_info = c;
sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_magic = UBIFS_SUPER_MAGIC;
sb->s_blocksize = UBIFS_BLOCK_SIZE; sb->s_blocksize = UBIFS_BLOCK_SIZE;
......
...@@ -59,6 +59,7 @@ struct bdi_writeback { ...@@ -59,6 +59,7 @@ struct bdi_writeback {
struct backing_dev_info { struct backing_dev_info {
struct list_head bdi_list; struct list_head bdi_list;
struct rcu_head rcu_head;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */ unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */ unsigned int capabilities; /* Device capabilities */
......
...@@ -1343,6 +1343,7 @@ struct super_block { ...@@ -1343,6 +1343,7 @@ struct super_block {
int s_nr_dentry_unused; /* # of dentry on lru */ int s_nr_dentry_unused; /* # of dentry on lru */
struct block_device *s_bdev; struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd; struct mtd_info *s_mtd;
struct list_head s_instances; struct list_head s_instances;
struct quota_info s_dquot; /* Diskquota specific options */ struct quota_info s_dquot; /* Diskquota specific options */
......
...@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = { ...@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = {
EXPORT_SYMBOL_GPL(default_backing_dev_info); EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class; static struct class *bdi_class;
/*
* bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
* reader side protection for bdi_pending_list. bdi_list has RCU reader side
* locking.
*/
DEFINE_SPINLOCK(bdi_lock); DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list); LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list); LIST_HEAD(bdi_pending_list);
...@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr) ...@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr)
/* /*
* Add us to the active bdi_list * Add us to the active bdi_list
*/ */
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
list_add(&bdi->bdi_list, &bdi_list); list_add_rcu(&bdi->bdi_list, &bdi_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
bdi_task_init(bdi, wb); bdi_task_init(bdi, wb);
...@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr) ...@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr)
if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
wb_do_writeback(me, 0); wb_do_writeback(me, 0);
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
/* /*
* Check if any existing bdi's have dirty data without * Check if any existing bdi's have dirty data without
...@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr) ...@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr)
if (list_empty(&bdi_pending_list)) { if (list_empty(&bdi_pending_list)) {
unsigned long wait; unsigned long wait;
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
wait = msecs_to_jiffies(dirty_writeback_interval * 10); wait = msecs_to_jiffies(dirty_writeback_interval * 10);
schedule_timeout(wait); schedule_timeout(wait);
try_to_freeze(); try_to_freeze();
...@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr) ...@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr)
bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
bdi_list); bdi_list);
list_del_init(&bdi->bdi_list); list_del_init(&bdi->bdi_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
wb = &bdi->wb; wb = &bdi->wb;
wb->task = kthread_run(bdi_start_fn, wb, "flush-%s", wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
...@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr) ...@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr)
* a chance to flush other bdi's to free * a chance to flush other bdi's to free
* memory. * memory.
*/ */
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
list_add_tail(&bdi->bdi_list, &bdi_pending_list); list_add_tail(&bdi->bdi_list, &bdi_pending_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
bdi_flush_io(bdi); bdi_flush_io(bdi);
} }
...@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr) ...@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr)
return 0; return 0;
} }
static void bdi_add_to_pending(struct rcu_head *head)
{
struct backing_dev_info *bdi;
bdi = container_of(head, struct backing_dev_info, rcu_head);
INIT_LIST_HEAD(&bdi->bdi_list);
spin_lock(&bdi_lock);
list_add_tail(&bdi->bdi_list, &bdi_pending_list);
spin_unlock(&bdi_lock);
/*
* We are now on the pending list, wake up bdi_forker_task()
* to finish the job and add us back to the active bdi_list
*/
wake_up_process(default_backing_dev_info.wb.task);
}
/* /*
* Add the default flusher task that gets created for any bdi * Add the default flusher task that gets created for any bdi
* that has dirty data pending writeout * that has dirty data pending writeout
...@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) ...@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
* waiting for previous additions to finish. * waiting for previous additions to finish.
*/ */
if (!test_and_set_bit(BDI_pending, &bdi->state)) { if (!test_and_set_bit(BDI_pending, &bdi->state)) {
list_move_tail(&bdi->bdi_list, &bdi_pending_list); list_del_rcu(&bdi->bdi_list);
/* /*
* We are now on the pending list, wake up bdi_forker_task() * We must wait for the current RCU period to end before
* to finish the job and add us back to the active bdi_list * moving to the pending list. So schedule that operation
* from an RCU callback.
*/ */
wake_up_process(default_backing_dev_info.wb.task); call_rcu(&bdi->rcu_head, bdi_add_to_pending);
} }
} }
/*
* Remove bdi from bdi_list, and ensure that it is no longer visible
*/
static void bdi_remove_from_list(struct backing_dev_info *bdi)
{
spin_lock_bh(&bdi_lock);
list_del_rcu(&bdi->bdi_list);
spin_unlock_bh(&bdi_lock);
synchronize_rcu();
}
int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...) const char *fmt, ...)
{ {
...@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, ...@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
goto exit; goto exit;
} }
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
list_add_tail(&bdi->bdi_list, &bdi_list); list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
bdi->dev = dev; bdi->dev = dev;
...@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, ...@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
wb->task = NULL; wb->task = NULL;
ret = -ENOMEM; ret = -ENOMEM;
spin_lock(&bdi_lock); bdi_remove_from_list(bdi);
list_del(&bdi->bdi_list);
spin_unlock(&bdi_lock);
goto exit; goto exit;
} }
} }
...@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) ...@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
/* /*
* Make sure nobody finds us on the bdi_list anymore * Make sure nobody finds us on the bdi_list anymore
*/ */
spin_lock(&bdi_lock); bdi_remove_from_list(bdi);
list_del(&bdi->bdi_list);
spin_unlock(&bdi_lock);
/* /*
* Finally, kill the kernel threads. We don't need to be RCU * Finally, kill the kernel threads. We don't need to be RCU
...@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi) ...@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->max_ratio = 100; bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE; bdi->max_prop_frac = PROP_FRAC_BASE;
spin_lock_init(&bdi->wb_lock); spin_lock_init(&bdi->wb_lock);
INIT_RCU_HEAD(&bdi->rcu_head);
INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->bdi_list);
INIT_LIST_HEAD(&bdi->wb_list); INIT_LIST_HEAD(&bdi->wb_list);
INIT_LIST_HEAD(&bdi->work_list); INIT_LIST_HEAD(&bdi->work_list);
......
...@@ -315,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) ...@@ -315,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{ {
int ret = 0; int ret = 0;
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) { if (min_ratio > bdi->max_ratio) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
...@@ -327,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) ...@@ -327,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
ret = -EINVAL; ret = -EINVAL;
} }
} }
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
return ret; return ret;
} }
...@@ -339,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) ...@@ -339,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
if (max_ratio > 100) if (max_ratio > 100)
return -EINVAL; return -EINVAL;
spin_lock(&bdi_lock); spin_lock_bh(&bdi_lock);
if (bdi->min_ratio > max_ratio) { if (bdi->min_ratio > max_ratio) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
bdi->max_ratio = max_ratio; bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
} }
spin_unlock(&bdi_lock); spin_unlock_bh(&bdi_lock);
return ret; return ret;
} }
...@@ -585,7 +585,6 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -585,7 +585,6 @@ static void balance_dirty_pages(struct address_space *mapping)
> background_thresh))) { > background_thresh))) {
struct writeback_control wbc = { struct writeback_control wbc = {
.bdi = bdi, .bdi = bdi,
.sync_mode = WB_SYNC_NONE,
.nr_to_write = nr_writeback, .nr_to_write = nr_writeback,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment