Commit d6065f7b authored by Suzanne Wood's avatar Suzanne Wood Committed by Linus Torvalds

[PATCH] md: provide proper rcu_dereference / rcu_assign_pointer annotations in md

Acked-by: <paulmck@us.ibm.com>
Signed-off-by: default avatarSuzanne Wood <suzannew@cs.pdx.edu>
Signed-off-by: default avatarNeil Brown <neilb@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9d88883e
...@@ -63,7 +63,7 @@ static int multipath_map (multipath_conf_t *conf) ...@@ -63,7 +63,7 @@ static int multipath_map (multipath_conf_t *conf)
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
mdk_rdev_t *rdev = conf->multipaths[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && rdev->in_sync) { if (rdev && rdev->in_sync) {
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
...@@ -139,7 +139,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -139,7 +139,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->multipaths[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev); request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
...@@ -224,7 +224,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, ...@@ -224,7 +224,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) { for (i=0; i<mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = conf->multipaths[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !rdev->faulty) { if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev; struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev); request_queue_t *r_queue = bdev_get_queue(bdev);
...@@ -331,7 +331,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -331,7 +331,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
conf->working_disks++; conf->working_disks++;
rdev->raid_disk = path; rdev->raid_disk = path;
rdev->in_sync = 1; rdev->in_sync = 1;
p->rdev = rdev; rcu_assign_pointer(p->rdev, rdev);
found = 1; found = 1;
} }
......
...@@ -416,10 +416,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) ...@@ -416,10 +416,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* Choose the first operation device, for consistancy */ /* Choose the first operation device, for consistancy */
new_disk = 0; new_disk = 0;
for (rdev = conf->mirrors[new_disk].rdev; for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
!rdev || !rdev->in_sync !rdev || !rdev->in_sync
|| test_bit(WriteMostly, &rdev->flags); || test_bit(WriteMostly, &rdev->flags);
rdev = conf->mirrors[++new_disk].rdev) { rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
if (rdev && rdev->in_sync) if (rdev && rdev->in_sync)
wonly_disk = new_disk; wonly_disk = new_disk;
...@@ -434,10 +434,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) ...@@ -434,10 +434,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* make sure the disk is operational */ /* make sure the disk is operational */
for (rdev = conf->mirrors[new_disk].rdev; for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
!rdev || !rdev->in_sync || !rdev || !rdev->in_sync ||
test_bit(WriteMostly, &rdev->flags); test_bit(WriteMostly, &rdev->flags);
rdev = conf->mirrors[new_disk].rdev) { rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
if (rdev && rdev->in_sync) if (rdev && rdev->in_sync)
wonly_disk = new_disk; wonly_disk = new_disk;
...@@ -474,7 +474,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) ...@@ -474,7 +474,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
disk = conf->raid_disks; disk = conf->raid_disks;
disk--; disk--;
rdev = conf->mirrors[disk].rdev; rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (!rdev || if (!rdev ||
!rdev->in_sync || !rdev->in_sync ||
...@@ -496,7 +496,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) ...@@ -496,7 +496,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
if (new_disk >= 0) { if (new_disk >= 0) {
rdev = conf->mirrors[new_disk].rdev; rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
if (!rdev) if (!rdev)
goto retry; goto retry;
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
...@@ -522,7 +522,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -522,7 +522,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->mirrors[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev); request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
...@@ -556,7 +556,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, ...@@ -556,7 +556,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) { for (i=0; i<mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = conf->mirrors[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty) { if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev; struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev); request_queue_t *r_queue = bdev_get_queue(bdev);
...@@ -728,7 +728,7 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -728,7 +728,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
#endif #endif
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
if ((rdev=conf->mirrors[i].rdev) != NULL && if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
!rdev->faulty) { !rdev->faulty) {
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
if (rdev->faulty) { if (rdev->faulty) {
...@@ -954,7 +954,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -954,7 +954,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
found = 1; found = 1;
if (rdev->saved_raid_disk != mirror) if (rdev->saved_raid_disk != mirror)
conf->fullsync = 1; conf->fullsync = 1;
p->rdev = rdev; rcu_assign_pointer(p->rdev, rdev);
break; break;
} }
......
...@@ -496,6 +496,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) ...@@ -496,6 +496,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
int disk, slot, nslot; int disk, slot, nslot;
const int sectors = r10_bio->sectors; const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance; sector_t new_distance, current_distance;
mdk_rdev_t *rdev;
raid10_find_phys(conf, r10_bio); raid10_find_phys(conf, r10_bio);
rcu_read_lock(); rcu_read_lock();
...@@ -510,8 +511,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) ...@@ -510,8 +511,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
slot = 0; slot = 0;
disk = r10_bio->devs[slot].devnum; disk = r10_bio->devs[slot].devnum;
while (!conf->mirrors[disk].rdev || while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
!conf->mirrors[disk].rdev->in_sync) { !rdev->in_sync) {
slot++; slot++;
if (slot == conf->copies) { if (slot == conf->copies) {
slot = 0; slot = 0;
...@@ -527,8 +528,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) ...@@ -527,8 +528,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
/* make sure the disk is operational */ /* make sure the disk is operational */
slot = 0; slot = 0;
disk = r10_bio->devs[slot].devnum; disk = r10_bio->devs[slot].devnum;
while (!conf->mirrors[disk].rdev || while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
!conf->mirrors[disk].rdev->in_sync) { !rdev->in_sync) {
slot ++; slot ++;
if (slot == conf->copies) { if (slot == conf->copies) {
disk = -1; disk = -1;
...@@ -547,11 +548,11 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) ...@@ -547,11 +548,11 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
int ndisk = r10_bio->devs[nslot].devnum; int ndisk = r10_bio->devs[nslot].devnum;
if (!conf->mirrors[ndisk].rdev || if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
!conf->mirrors[ndisk].rdev->in_sync) !rdev->in_sync)
continue; continue;
if (!atomic_read(&conf->mirrors[ndisk].rdev->nr_pending)) { if (!atomic_read(&rdev->nr_pending)) {
disk = ndisk; disk = ndisk;
slot = nslot; slot = nslot;
break; break;
...@@ -569,7 +570,7 @@ rb_out: ...@@ -569,7 +570,7 @@ rb_out:
r10_bio->read_slot = slot; r10_bio->read_slot = slot;
/* conf->next_seq_sect = this_sector + sectors;*/ /* conf->next_seq_sect = this_sector + sectors;*/
if (disk >= 0 && conf->mirrors[disk].rdev) if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
atomic_inc(&conf->mirrors[disk].rdev->nr_pending); atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
...@@ -583,7 +584,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -583,7 +584,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->mirrors[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev); request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
...@@ -614,7 +615,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, ...@@ -614,7 +615,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) { for (i=0; i<mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = conf->mirrors[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty) { if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev; struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev); request_queue_t *r_queue = bdev_get_queue(bdev);
...@@ -768,9 +769,10 @@ static int make_request(request_queue_t *q, struct bio * bio) ...@@ -768,9 +769,10 @@ static int make_request(request_queue_t *q, struct bio * bio)
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < conf->copies; i++) { for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum; int d = r10_bio->devs[i].devnum;
if (conf->mirrors[d].rdev && mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
!conf->mirrors[d].rdev->faulty) { if (rdev &&
atomic_inc(&conf->mirrors[d].rdev->nr_pending); !rdev->faulty) {
atomic_inc(&rdev->nr_pending);
r10_bio->devs[i].bio = bio; r10_bio->devs[i].bio = bio;
} else } else
r10_bio->devs[i].bio = NULL; r10_bio->devs[i].bio = NULL;
...@@ -980,7 +982,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -980,7 +982,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
p->head_position = 0; p->head_position = 0;
rdev->raid_disk = mirror; rdev->raid_disk = mirror;
found = 1; found = 1;
p->rdev = rdev; rcu_assign_pointer(p->rdev, rdev);
break; break;
} }
......
...@@ -1374,7 +1374,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1374,7 +1374,7 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_end_io = raid5_end_read_request; bi->bi_end_io = raid5_end_read_request;
rcu_read_lock(); rcu_read_lock();
rdev = conf->disks[i].rdev; rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && rdev->faulty) if (rdev && rdev->faulty)
rdev = NULL; rdev = NULL;
if (rdev) if (rdev)
...@@ -1448,7 +1448,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -1448,7 +1448,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->disks[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev); request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
...@@ -1493,7 +1493,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, ...@@ -1493,7 +1493,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) { for (i=0; i<mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = conf->disks[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty) { if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev; struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev); request_queue_t *r_queue = bdev_get_queue(bdev);
...@@ -2165,7 +2165,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -2165,7 +2165,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
found = 1; found = 1;
if (rdev->saved_raid_disk != disk) if (rdev->saved_raid_disk != disk)
conf->fullsync = 1; conf->fullsync = 1;
p->rdev = rdev; rcu_assign_pointer(p->rdev, rdev);
break; break;
} }
print_raid5_conf(conf); print_raid5_conf(conf);
......
...@@ -1464,7 +1464,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1464,7 +1464,7 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_end_io = raid6_end_read_request; bi->bi_end_io = raid6_end_read_request;
rcu_read_lock(); rcu_read_lock();
rdev = conf->disks[i].rdev; rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && rdev->faulty) if (rdev && rdev->faulty)
rdev = NULL; rdev = NULL;
if (rdev) if (rdev)
...@@ -1538,7 +1538,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -1538,7 +1538,7 @@ static void unplug_slaves(mddev_t *mddev)
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = conf->disks[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev); request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
...@@ -1583,7 +1583,7 @@ static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk, ...@@ -1583,7 +1583,7 @@ static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk,
rcu_read_lock(); rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) { for (i=0; i<mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = conf->disks[i].rdev; mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty) { if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev; struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev); request_queue_t *r_queue = bdev_get_queue(bdev);
...@@ -2154,7 +2154,7 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -2154,7 +2154,7 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
found = 1; found = 1;
if (rdev->saved_raid_disk != disk) if (rdev->saved_raid_disk != disk)
conf->fullsync = 1; conf->fullsync = 1;
p->rdev = rdev; rcu_assign_pointer(p->rdev, rdev);
break; break;
} }
print_raid6_conf(conf); print_raid6_conf(conf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment