rcu_read_lock();
for (i = 0; i < disks; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && rdev->in_sync) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
conf->working_disks++;
rdev->raid_disk = path;
rdev->in_sync = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
found = 1;
}
/* Choose the first operation device, for consistancy */
new_disk = 0;
- for (rdev = conf->mirrors[new_disk].rdev;
+ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
!rdev || !rdev->in_sync
|| test_bit(WriteMostly, &rdev->flags);
- rdev = conf->mirrors[++new_disk].rdev) {
+ rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
if (rdev && rdev->in_sync)
wonly_disk = new_disk;
/* make sure the disk is operational */
- for (rdev = conf->mirrors[new_disk].rdev;
+ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
!rdev || !rdev->in_sync ||
test_bit(WriteMostly, &rdev->flags);
- rdev = conf->mirrors[new_disk].rdev) {
+ rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
if (rdev && rdev->in_sync)
wonly_disk = new_disk;
disk = conf->raid_disks;
disk--;
- rdev = conf->mirrors[disk].rdev;
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (!rdev ||
!rdev->in_sync ||
if (new_disk >= 0) {
- rdev = conf->mirrors[new_disk].rdev;
+ rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
if (!rdev)
goto retry;
atomic_inc(&rdev->nr_pending);
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
#endif
rcu_read_lock();
for (i = 0; i < disks; i++) {
- if ((rdev=conf->mirrors[i].rdev) != NULL &&
+ if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
!rdev->faulty) {
atomic_inc(&rdev->nr_pending);
if (rdev->faulty) {
found = 1;
if (rdev->saved_raid_disk != mirror)
conf->fullsync = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
int disk, slot, nslot;
const int sectors = r10_bio->sectors;
sector_t new_distance, current_distance;
+ mdk_rdev_t *rdev;
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
slot = 0;
disk = r10_bio->devs[slot].devnum;
- while (!conf->mirrors[disk].rdev ||
- !conf->mirrors[disk].rdev->in_sync) {
+ while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
+ !rdev->in_sync) {
slot++;
if (slot == conf->copies) {
slot = 0;
/* make sure the disk is operational */
slot = 0;
disk = r10_bio->devs[slot].devnum;
- while (!conf->mirrors[disk].rdev ||
- !conf->mirrors[disk].rdev->in_sync) {
+ while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
+ !rdev->in_sync) {
slot ++;
if (slot == conf->copies) {
disk = -1;
int ndisk = r10_bio->devs[nslot].devnum;
- if (!conf->mirrors[ndisk].rdev ||
- !conf->mirrors[ndisk].rdev->in_sync)
+ if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
+ !rdev->in_sync)
continue;
- if (!atomic_read(&conf->mirrors[ndisk].rdev->nr_pending)) {
+ if (!atomic_read(&rdev->nr_pending)) {
disk = ndisk;
slot = nslot;
break;
r10_bio->read_slot = slot;
/* conf->next_seq_sect = this_sector + sectors;*/
- if (disk >= 0 && conf->mirrors[disk].rdev)
+ if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
rcu_read_unlock();
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
rcu_read_lock();
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
- if (conf->mirrors[d].rdev &&
- !conf->mirrors[d].rdev->faulty) {
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
+ if (rdev &&
+ !rdev->faulty) {
+ atomic_inc(&rdev->nr_pending);
r10_bio->devs[i].bio = bio;
} else
r10_bio->devs[i].bio = NULL;
p->head_position = 0;
rdev->raid_disk = mirror;
found = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
bi->bi_end_io = raid5_end_read_request;
rcu_read_lock();
- rdev = conf->disks[i].rdev;
+ rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && rdev->faulty)
rdev = NULL;
if (rdev)
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
found = 1;
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
print_raid5_conf(conf);
bi->bi_end_io = raid6_end_read_request;
rcu_read_lock();
- rdev = conf->disks[i].rdev;
+ rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && rdev->faulty)
rdev = NULL;
if (rdev)
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
+ mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !rdev->faulty) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
found = 1;
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
- p->rdev = rdev;
+ rcu_assign_pointer(p->rdev, rdev);
break;
}
print_raid6_conf(conf);