Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
2eb79076
Commit
2eb79076
authored
Sep 16, 2009
by
Stephen Rothwell
Browse files
Options
Browse Files
Download
Plain Diff
Merge commit 'block/for-next'
parents
04a01191
b96fb440
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
150 additions
and
137 deletions
+150
-137
fs/btrfs/disk-io.c
fs/btrfs/disk-io.c
+1
-0
fs/fs-writeback.c
fs/fs-writeback.c
+71
-110
fs/fuse/inode.c
fs/fuse/inode.c
+2
-0
fs/super.c
fs/super.c
+6
-0
fs/sync.c
fs/sync.c
+8
-1
fs/ubifs/super.c
fs/ubifs/super.c
+1
-0
include/linux/backing-dev.h
include/linux/backing-dev.h
+1
-0
include/linux/fs.h
include/linux/fs.h
+1
-0
mm/backing-dev.c
mm/backing-dev.c
+55
-21
mm/page-writeback.c
mm/page-writeback.c
+4
-5
No files found.
fs/btrfs/disk-io.c
View file @
2eb79076
...
...
@@ -1600,6 +1600,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
sb
->
s_blocksize
=
4096
;
sb
->
s_blocksize_bits
=
blksize_bits
(
4096
);
sb
->
s_bdi
=
&
fs_info
->
bdi
;
/*
* we set the i_size on the btree inode to the max possible int.
...
...
fs/fs-writeback.c
View file @
2eb79076
...
...
@@ -75,13 +75,6 @@ static inline void bdi_work_init(struct bdi_work *work,
work
->
state
=
WS_USED
;
}
static
inline
void
bdi_work_init_on_stack
(
struct
bdi_work
*
work
,
struct
writeback_control
*
wbc
)
{
bdi_work_init
(
work
,
wbc
);
work
->
state
|=
WS_ONSTACK
;
}
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
...
...
@@ -146,7 +139,6 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
static
void
bdi_queue_work
(
struct
backing_dev_info
*
bdi
,
struct
bdi_work
*
work
)
{
if
(
work
)
{
work
->
seen
=
bdi
->
wb_mask
;
BUG_ON
(
!
work
->
seen
);
atomic_set
(
&
work
->
pending
,
bdi
->
wb_cnt
);
...
...
@@ -160,7 +152,6 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
spin_lock
(
&
bdi
->
wb_lock
);
list_add_tail_rcu
(
&
work
->
list
,
&
bdi
->
work_list
);
spin_unlock
(
&
bdi
->
wb_lock
);
}
/*
* If the default thread isn't there, make sure we add it. When
...
...
@@ -172,14 +163,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
struct
bdi_writeback
*
wb
=
&
bdi
->
wb
;
/*
* If we failed allocating the bdi work item, wake up the wb
* thread always. As a safety precaution, it'll flush out
* everything
* End work now if this wb has no dirty IO pending. Otherwise
* wakeup the handling thread
*/
if
(
!
wb_has_dirty_io
(
wb
))
{
if
(
work
)
if
(
!
wb_has_dirty_io
(
wb
))
wb_clear_pending
(
wb
,
work
);
}
else
if
(
wb
->
task
)
else
if
(
wb
->
task
)
wake_up_process
(
wb
->
task
);
}
}
...
...
@@ -194,48 +183,63 @@ static void bdi_wait_on_work_clear(struct bdi_work *work)
TASK_UNINTERRUPTIBLE
);
}
static
struct
bdi_work
*
bdi_alloc_work
(
struct
writeback_control
*
wbc
)
static
void
bdi_alloc_queue_work
(
struct
backing_dev_info
*
bdi
,
struct
writeback_control
*
wbc
)
{
struct
bdi_work
*
work
;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
work
=
kmalloc
(
sizeof
(
*
work
),
GFP_ATOMIC
);
if
(
work
)
if
(
work
)
{
bdi_work_init
(
work
,
wbc
);
bdi_queue_work
(
bdi
,
work
);
}
else
{
struct
bdi_writeback
*
wb
=
&
bdi
->
wb
;
return
work
;
if
(
wb
->
task
)
wake_up_process
(
wb
->
task
);
}
}
void
bdi_start_writeback
(
struct
writeback_control
*
wbc
)
/**
* bdi_sync_writeback - start and wait for writeback
* @wbc: writeback parameters
*
* Description:
* This does WB_SYNC_ALL data integrity writeback and waits for the
* IO to complete. Callers must hold the sb s_umount semaphore for
* reading, to avoid having the super disappear before we are done.
*/
static
void
bdi_sync_writeback
(
struct
writeback_control
*
wbc
)
{
const
bool
must_wait
=
wbc
->
sync_mode
==
WB_SYNC_ALL
;
struct
bdi_work
work_stack
,
*
work
=
NULL
;
struct
bdi_work
work
;
if
(
!
must_wait
)
work
=
bdi_alloc_work
(
wbc
);
wbc
->
sync_mode
=
WB_SYNC_ALL
;
if
(
!
work
)
{
work
=
&
work_stack
;
bdi_work_init_on_stack
(
work
,
wbc
);
}
bdi_work_init
(
&
work
,
wbc
);
work
.
state
|=
WS_ONSTACK
;
bdi_queue_work
(
wbc
->
bdi
,
work
);
bdi_queue_work
(
wbc
->
bdi
,
&
work
);
bdi_wait_on_work_clear
(
&
work
);
}
/*
* If the sync mode is WB_SYNC_ALL, block waiting for the work to
* complete. If not, we only need to wait for the work to be started,
* if we allocated it on-stack. We use the same mechanism, if the
* wait bit is set in the bdi_work struct, then threads will not
* clear pending until after they are done.
/**
* bdi_start_writeback - start writeback
* @wbc: writeback parameters
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
* started when this function returns, we make no guarentees on
* completion. Caller need not hold sb s_umount semaphore.
*
* Note that work == &work_stack if must_wait is true, so we don't
* need to do call_rcu() here ever, since the completion path will
* have done that for us.
*/
if
(
must_wait
||
work
==
&
work_stack
)
{
bdi_wait_on_work_clear
(
work
);
if
(
work
!=
&
work_stack
)
call_rcu
(
&
work
->
rcu_head
,
bdi_work_free
);
}
void
bdi_start_writeback
(
struct
writeback_control
*
wbc
)
{
wbc
->
sync_mode
=
WB_SYNC_NONE
;
bdi_alloc_queue_work
(
wbc
->
bdi
,
wbc
);
}
/*
...
...
@@ -858,67 +862,24 @@ int bdi_writeback_task(struct bdi_writeback *wb)
}
/*
* Schedule writeback for all backing devices. Expensive! If this is a data
* integrity operation, writeback will be complete when this returns. If
* we are simply called for WB_SYNC_NONE, then writeback will merely be
* scheduled to run.
* Schedule writeback for all backing devices. Can only be used for
* WB_SYNC_NONE writeback, WB_SYNC_ALL should use bdi_start_writeback()
* and pass in the superblock.
*/
static
void
bdi_writeback_all
(
struct
writeback_control
*
wbc
)
{
const
bool
must_wait
=
wbc
->
sync_mode
==
WB_SYNC_ALL
;
struct
backing_dev_info
*
bdi
;
struct
bdi_work
*
work
;
LIST_HEAD
(
list
);
restart:
spin_lock
(
&
bdi_lock
);
list_for_each_entry
(
bdi
,
&
bdi_list
,
bdi_list
)
{
struct
bdi_work
*
work
;
if
(
!
bdi_has_dirty_io
(
bdi
))
continue
;
/*
* If work allocation fails, do the writes inline. We drop
* the lock and restart the list writeout. This should be OK,
* since this happens rarely and because the writeout should
* eventually make more free memory available.
*/
work
=
bdi_alloc_work
(
wbc
);
if
(
!
work
)
{
struct
writeback_control
__wbc
;
/*
* Not a data integrity writeout, just continue
*/
if
(
!
must_wait
)
continue
;
WARN_ON
(
wbc
->
sync_mode
==
WB_SYNC_ALL
);
spin_unlock
(
&
bdi_lock
);
__wbc
=
*
wbc
;
__wbc
.
bdi
=
bdi
;
writeback_inodes_wbc
(
&
__wbc
);
goto
restart
;
}
if
(
must_wait
)
list_add_tail
(
&
work
->
wait_list
,
&
list
);
rcu_read_lock
();
bdi_queue_work
(
bdi
,
work
);
list_for_each_entry
(
bdi
,
&
bdi_list
,
bdi_list
)
{
if
(
bdi_has_dirty_io
(
bdi
))
bdi_alloc_queue_work
(
bdi
,
wbc
);
}
spin_unlock
(
&
bdi_lock
);
/*
* If this is for WB_SYNC_ALL, wait for pending work to complete
* before returning.
*/
while
(
!
list_empty
(
&
list
))
{
work
=
list_entry
(
list
.
next
,
struct
bdi_work
,
wait_list
);
list_del
(
&
work
->
wait_list
);
bdi_wait_on_work_clear
(
work
);
call_rcu
(
&
work
->
rcu_head
,
bdi_work_free
);
}
rcu_read_unlock
();
}
/*
...
...
@@ -1175,14 +1136,14 @@ long sync_inodes_sb(struct super_block *sb)
{
struct
writeback_control
wbc
=
{
.
sb
=
sb
,
.
sync_mode
=
WB_SYNC_ALL
,
.
bdi
=
sb
->
s_bdi
,
.
range_start
=
0
,
.
range_end
=
LLONG_MAX
,
};
long
nr_to_write
=
LONG_MAX
;
/* doesn't actually matter */
wbc
.
nr_to_write
=
nr_to_write
;
bdi_
writeback_all
(
&
wbc
);
bdi_
sync_writeback
(
&
wbc
);
wait_sb_inodes
(
&
wbc
);
return
nr_to_write
-
wbc
.
nr_to_write
;
}
...
...
fs/fuse/inode.c
View file @
2eb79076
...
...
@@ -908,6 +908,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if
(
err
)
goto
err_put_conn
;
sb
->
s_bdi
=
&
fc
->
bdi
;
/* Handle umasking inside the fuse code */
if
(
sb
->
s_flags
&
MS_POSIXACL
)
fc
->
dont_mask
=
1
;
...
...
fs/super.c
View file @
2eb79076
...
...
@@ -707,6 +707,12 @@ static int set_bdev_super(struct super_block *s, void *data)
{
s
->
s_bdev
=
data
;
s
->
s_dev
=
s
->
s_bdev
->
bd_dev
;
/*
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
s
->
s_bdi
=
&
bdev_get_queue
(
s
->
s_bdev
)
->
backing_dev_info
;
return
0
;
}
...
...
fs/sync.c
View file @
2eb79076
...
...
@@ -27,6 +27,13 @@
*/
static
int
__sync_filesystem
(
struct
super_block
*
sb
,
int
wait
)
{
/*
* This should be safe, as we require bdi backing to actually
* write out data in the first place
*/
if
(
!
sb
->
s_bdi
)
return
0
;
/* Avoid doing twice syncing and cache pruning for quota sync */
if
(
!
wait
)
{
writeout_quota_sb
(
sb
,
-
1
);
...
...
@@ -101,7 +108,7 @@ restart:
spin_unlock
(
&
sb_lock
);
down_read
(
&
sb
->
s_umount
);
if
(
!
(
sb
->
s_flags
&
MS_RDONLY
)
&&
sb
->
s_root
)
if
(
!
(
sb
->
s_flags
&
MS_RDONLY
)
&&
sb
->
s_root
&&
sb
->
s_bdi
)
__sync_filesystem
(
sb
,
wait
);
up_read
(
&
sb
->
s_umount
);
...
...
fs/ubifs/super.c
View file @
2eb79076
...
...
@@ -1967,6 +1967,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
if
(
err
)
goto
out_bdi
;
sb
->
s_bdi
=
&
c
->
bdi
;
sb
->
s_fs_info
=
c
;
sb
->
s_magic
=
UBIFS_SUPER_MAGIC
;
sb
->
s_blocksize
=
UBIFS_BLOCK_SIZE
;
...
...
include/linux/backing-dev.h
View file @
2eb79076
...
...
@@ -59,6 +59,7 @@ struct bdi_writeback {
struct
backing_dev_info
{
struct
list_head
bdi_list
;
struct
rcu_head
rcu_head
;
unsigned
long
ra_pages
;
/* max readahead in PAGE_CACHE_SIZE units */
unsigned
long
state
;
/* Always use atomic bitops on this */
unsigned
int
capabilities
;
/* Device capabilities */
...
...
include/linux/fs.h
View file @
2eb79076
...
...
@@ -1343,6 +1343,7 @@ struct super_block {
int
s_nr_dentry_unused
;
/* # of dentry on lru */
struct
block_device
*
s_bdev
;
struct
backing_dev_info
*
s_bdi
;
struct
mtd_info
*
s_mtd
;
struct
list_head
s_instances
;
struct
quota_info
s_dquot
;
/* Diskquota specific options */
...
...
mm/backing-dev.c
View file @
2eb79076
...
...
@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = {
EXPORT_SYMBOL_GPL
(
default_backing_dev_info
);
static
struct
class
*
bdi_class
;
/*
* bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
* reader side protection for bdi_pending_list. bdi_list has RCU reader side
* locking.
*/
DEFINE_SPINLOCK
(
bdi_lock
);
LIST_HEAD
(
bdi_list
);
LIST_HEAD
(
bdi_pending_list
);
...
...
@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr)
/*
* Add us to the active bdi_list
*/
spin_lock
(
&
bdi_lock
);
list_add
(
&
bdi
->
bdi_list
,
&
bdi_list
);
spin_unlock
(
&
bdi_lock
);
spin_lock
_bh
(
&
bdi_lock
);
list_add
_rcu
(
&
bdi
->
bdi_list
,
&
bdi_list
);
spin_unlock
_bh
(
&
bdi_lock
);
bdi_task_init
(
bdi
,
wb
);
...
...
@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr)
if
(
wb_has_dirty_io
(
me
)
||
!
list_empty
(
&
me
->
bdi
->
work_list
))
wb_do_writeback
(
me
,
0
);
spin_lock
(
&
bdi_lock
);
spin_lock
_bh
(
&
bdi_lock
);
/*
* Check if any existing bdi's have dirty data without
...
...
@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr)
if
(
list_empty
(
&
bdi_pending_list
))
{
unsigned
long
wait
;
spin_unlock
(
&
bdi_lock
);
spin_unlock
_bh
(
&
bdi_lock
);
wait
=
msecs_to_jiffies
(
dirty_writeback_interval
*
10
);
schedule_timeout
(
wait
);
try_to_freeze
();
...
...
@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr)
bdi
=
list_entry
(
bdi_pending_list
.
next
,
struct
backing_dev_info
,
bdi_list
);
list_del_init
(
&
bdi
->
bdi_list
);
spin_unlock
(
&
bdi_lock
);
spin_unlock
_bh
(
&
bdi_lock
);
wb
=
&
bdi
->
wb
;
wb
->
task
=
kthread_run
(
bdi_start_fn
,
wb
,
"flush-%s"
,
...
...
@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr)
* a chance to flush other bdi's to free
* memory.
*/
spin_lock
(
&
bdi_lock
);
spin_lock
_bh
(
&
bdi_lock
);
list_add_tail
(
&
bdi
->
bdi_list
,
&
bdi_pending_list
);
spin_unlock
(
&
bdi_lock
);
spin_unlock
_bh
(
&
bdi_lock
);
bdi_flush_io
(
bdi
);
}
...
...
@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr)
return
0
;
}
static
void
bdi_add_to_pending
(
struct
rcu_head
*
head
)
{
struct
backing_dev_info
*
bdi
;
bdi
=
container_of
(
head
,
struct
backing_dev_info
,
rcu_head
);
INIT_LIST_HEAD
(
&
bdi
->
bdi_list
);
spin_lock
(
&
bdi_lock
);
list_add_tail
(
&
bdi
->
bdi_list
,
&
bdi_pending_list
);
spin_unlock
(
&
bdi_lock
);
/*
* We are now on the pending list, wake up bdi_forker_task()
* to finish the job and add us back to the active bdi_list
*/
wake_up_process
(
default_backing_dev_info
.
wb
.
task
);
}
/*
* Add the default flusher task that gets created for any bdi
* that has dirty data pending writeout
...
...
@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
* waiting for previous additions to finish.
*/
if
(
!
test_and_set_bit
(
BDI_pending
,
&
bdi
->
state
))
{
list_
move_tail
(
&
bdi
->
bdi_list
,
&
bdi_pending
_list
);
list_
del_rcu
(
&
bdi
->
bdi
_list
);
/*
* We are now on the pending list, wake up bdi_forker_task()
* to finish the job and add us back to the active bdi_list
* We must wait for the current RCU period to end before
* moving to the pending list. So schedule that operation
* from an RCU callback.
*/
wake_up_process
(
default_backing_dev_info
.
wb
.
task
);
call_rcu
(
&
bdi
->
rcu_head
,
bdi_add_to_pending
);
}
}
/*
* Remove bdi from bdi_list, and ensure that it is no longer visible
*/
static
void
bdi_remove_from_list
(
struct
backing_dev_info
*
bdi
)
{
spin_lock_bh
(
&
bdi_lock
);
list_del_rcu
(
&
bdi
->
bdi_list
);
spin_unlock_bh
(
&
bdi_lock
);
synchronize_rcu
();
}
int
bdi_register
(
struct
backing_dev_info
*
bdi
,
struct
device
*
parent
,
const
char
*
fmt
,
...)
{
...
...
@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
goto
exit
;
}
spin_lock
(
&
bdi_lock
);
list_add_tail
(
&
bdi
->
bdi_list
,
&
bdi_list
);
spin_unlock
(
&
bdi_lock
);
spin_lock
_bh
(
&
bdi_lock
);
list_add_tail
_rcu
(
&
bdi
->
bdi_list
,
&
bdi_list
);
spin_unlock
_bh
(
&
bdi_lock
);
bdi
->
dev
=
dev
;
...
...
@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
wb
->
task
=
NULL
;
ret
=
-
ENOMEM
;
spin_lock
(
&
bdi_lock
);
list_del
(
&
bdi
->
bdi_list
);
spin_unlock
(
&
bdi_lock
);
bdi_remove_from_list
(
bdi
);
goto
exit
;
}
}
...
...
@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
/*
* Make sure nobody finds us on the bdi_list anymore
*/
spin_lock
(
&
bdi_lock
);
list_del
(
&
bdi
->
bdi_list
);
spin_unlock
(
&
bdi_lock
);
bdi_remove_from_list
(
bdi
);
/*
* Finally, kill the kernel threads. We don't need to be RCU
...
...
@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi
->
max_ratio
=
100
;
bdi
->
max_prop_frac
=
PROP_FRAC_BASE
;
spin_lock_init
(
&
bdi
->
wb_lock
);
INIT_RCU_HEAD
(
&
bdi
->
rcu_head
);
INIT_LIST_HEAD
(
&
bdi
->
bdi_list
);
INIT_LIST_HEAD
(
&
bdi
->
wb_list
);
INIT_LIST_HEAD
(
&
bdi
->
work_list
);
...
...
mm/page-writeback.c
View file @
2eb79076
...
...
@@ -315,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
int
ret
=
0
;
spin_lock
(
&
bdi_lock
);
spin_lock
_bh
(
&
bdi_lock
);
if
(
min_ratio
>
bdi
->
max_ratio
)
{
ret
=
-
EINVAL
;
}
else
{
...
...
@@ -327,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
ret
=
-
EINVAL
;
}
}
spin_unlock
(
&
bdi_lock
);
spin_unlock
_bh
(
&
bdi_lock
);
return
ret
;
}
...
...
@@ -339,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
if
(
max_ratio
>
100
)
return
-
EINVAL
;
spin_lock
(
&
bdi_lock
);
spin_lock
_bh
(
&
bdi_lock
);
if
(
bdi
->
min_ratio
>
max_ratio
)
{
ret
=
-
EINVAL
;
}
else
{
bdi
->
max_ratio
=
max_ratio
;
bdi
->
max_prop_frac
=
(
PROP_FRAC_BASE
*
max_ratio
)
/
100
;
}
spin_unlock
(
&
bdi_lock
);
spin_unlock
_bh
(
&
bdi_lock
);
return
ret
;
}
...
...
@@ -585,7 +585,6 @@ static void balance_dirty_pages(struct address_space *mapping)
>
background_thresh
)))
{
struct
writeback_control
wbc
=
{
.
bdi
=
bdi
,
.
sync_mode
=
WB_SYNC_NONE
,
.
nr_to_write
=
nr_writeback
,
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment