2 * raid10.c : Multiple Devices driver for Linux
4 * Copyright (C) 2000-2004 Neil Brown
6 * RAID-10 support for md.
8 * Base on code in raid1.c. See raid1.c for futher copyright information.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/blkdev.h>
24 #include <linux/seq_file.h>
31 * RAID10 provides a combination of RAID0 and RAID1 functionality.
32 * The layout of data is defined by
35 * near_copies (stored in low byte of layout)
36 * far_copies (stored in second byte of layout)
37 * far_offset (stored in bit 16 of layout )
39 * The data to be stored is divided into chunks using chunksize.
40 * Each device is divided into far_copies sections.
41 * In each section, chunks are laid out in a style similar to raid0, but
42 * near_copies copies of each chunk is stored (each on a different drive).
43 * The starting device for each section is offset near_copies from the starting
44 * device of the previous section.
45 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
47 * near_copies and far_copies must be at least one, and their product is at most
50 * If far_offset is true, then the far_copies are handled a bit differently.
51 * The copies are still in different stripes, but instead of be very far apart
52 * on disk, there are adjacent stripes.
56 * Number of guaranteed r10bios in case of extreme VM load:
58 #define NR_RAID10_BIOS 256
60 static void unplug_slaves(mddev_t
*mddev
);
62 static void allow_barrier(conf_t
*conf
);
63 static void lower_barrier(conf_t
*conf
);
65 static void * r10bio_pool_alloc(gfp_t gfp_flags
, void *data
)
69 int size
= offsetof(struct r10bio_s
, devs
[conf
->copies
]);
71 /* allocate a r10bio with room for raid_disks entries in the bios array */
72 r10_bio
= kzalloc(size
, gfp_flags
);
73 if (!r10_bio
&& conf
->mddev
)
74 unplug_slaves(conf
->mddev
);
79 static void r10bio_pool_free(void *r10_bio
, void *data
)
84 /* Maximum size of each resync request */
85 #define RESYNC_BLOCK_SIZE (64*1024)
86 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
87 /* amount of memory to reserve for resync requests */
88 #define RESYNC_WINDOW (1024*1024)
89 /* maximum number of concurrent requests, memory permitting */
90 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
93 * When performing a resync, we need to read and compare, so
94 * we need as many pages are there are copies.
95 * When performing a recovery, we need 2 bios, one for read,
96 * one for write (we recover only one drive per r10buf)
99 static void * r10buf_pool_alloc(gfp_t gfp_flags
, void *data
)
108 r10_bio
= r10bio_pool_alloc(gfp_flags
, conf
);
110 unplug_slaves(conf
->mddev
);
114 if (test_bit(MD_RECOVERY_SYNC
, &conf
->mddev
->recovery
))
115 nalloc
= conf
->copies
; /* resync */
117 nalloc
= 2; /* recovery */
122 for (j
= nalloc
; j
-- ; ) {
123 bio
= bio_kmalloc(gfp_flags
, RESYNC_PAGES
);
126 r10_bio
->devs
[j
].bio
= bio
;
129 * Allocate RESYNC_PAGES data pages and attach them
132 for (j
= 0 ; j
< nalloc
; j
++) {
133 bio
= r10_bio
->devs
[j
].bio
;
134 for (i
= 0; i
< RESYNC_PAGES
; i
++) {
135 page
= alloc_page(gfp_flags
);
139 bio
->bi_io_vec
[i
].bv_page
= page
;
147 safe_put_page(bio
->bi_io_vec
[i
-1].bv_page
);
149 for (i
= 0; i
< RESYNC_PAGES
; i
++)
150 safe_put_page(r10_bio
->devs
[j
].bio
->bi_io_vec
[i
].bv_page
);
153 while ( ++j
< nalloc
)
154 bio_put(r10_bio
->devs
[j
].bio
);
155 r10bio_pool_free(r10_bio
, conf
);
159 static void r10buf_pool_free(void *__r10_bio
, void *data
)
163 r10bio_t
*r10bio
= __r10_bio
;
166 for (j
=0; j
< conf
->copies
; j
++) {
167 struct bio
*bio
= r10bio
->devs
[j
].bio
;
169 for (i
= 0; i
< RESYNC_PAGES
; i
++) {
170 safe_put_page(bio
->bi_io_vec
[i
].bv_page
);
171 bio
->bi_io_vec
[i
].bv_page
= NULL
;
176 r10bio_pool_free(r10bio
, conf
);
179 static void put_all_bios(conf_t
*conf
, r10bio_t
*r10_bio
)
183 for (i
= 0; i
< conf
->copies
; i
++) {
184 struct bio
**bio
= & r10_bio
->devs
[i
].bio
;
185 if (*bio
&& *bio
!= IO_BLOCKED
)
191 static void free_r10bio(r10bio_t
*r10_bio
)
193 conf_t
*conf
= r10_bio
->mddev
->private;
196 * Wake up any possible resync thread that waits for the device
201 put_all_bios(conf
, r10_bio
);
202 mempool_free(r10_bio
, conf
->r10bio_pool
);
205 static void put_buf(r10bio_t
*r10_bio
)
207 conf_t
*conf
= r10_bio
->mddev
->private;
209 mempool_free(r10_bio
, conf
->r10buf_pool
);
214 static void reschedule_retry(r10bio_t
*r10_bio
)
217 mddev_t
*mddev
= r10_bio
->mddev
;
218 conf_t
*conf
= mddev
->private;
220 spin_lock_irqsave(&conf
->device_lock
, flags
);
221 list_add(&r10_bio
->retry_list
, &conf
->retry_list
);
223 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
225 /* wake up frozen array... */
226 wake_up(&conf
->wait_barrier
);
228 md_wakeup_thread(mddev
->thread
);
232 * raid_end_bio_io() is called when we have finished servicing a mirrored
233 * operation and are ready to return a success/failure code to the buffer
236 static void raid_end_bio_io(r10bio_t
*r10_bio
)
238 struct bio
*bio
= r10_bio
->master_bio
;
241 test_bit(R10BIO_Uptodate
, &r10_bio
->state
) ? 0 : -EIO
);
242 free_r10bio(r10_bio
);
246 * Update disk head position estimator based on IRQ completion info.
248 static inline void update_head_pos(int slot
, r10bio_t
*r10_bio
)
250 conf_t
*conf
= r10_bio
->mddev
->private;
252 conf
->mirrors
[r10_bio
->devs
[slot
].devnum
].head_position
=
253 r10_bio
->devs
[slot
].addr
+ (r10_bio
->sectors
);
256 static void raid10_end_read_request(struct bio
*bio
, int error
)
258 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
259 r10bio_t
*r10_bio
= bio
->bi_private
;
261 conf_t
*conf
= r10_bio
->mddev
->private;
264 slot
= r10_bio
->read_slot
;
265 dev
= r10_bio
->devs
[slot
].devnum
;
267 * this branch is our 'one mirror IO has finished' event handler:
269 update_head_pos(slot
, r10_bio
);
273 * Set R10BIO_Uptodate in our master bio, so that
274 * we will return a good error code to the higher
275 * levels even if IO on some other mirrored buffer fails.
277 * The 'master' represents the composite IO operation to
278 * user-side. So if something waits for IO, then it will
279 * wait for the 'master' bio.
281 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
282 raid_end_bio_io(r10_bio
);
287 char b
[BDEVNAME_SIZE
];
288 if (printk_ratelimit())
289 printk(KERN_ERR
"md/raid10:%s: %s: rescheduling sector %llu\n",
291 bdevname(conf
->mirrors
[dev
].rdev
->bdev
,b
), (unsigned long long)r10_bio
->sector
);
292 reschedule_retry(r10_bio
);
295 rdev_dec_pending(conf
->mirrors
[dev
].rdev
, conf
->mddev
);
298 static void raid10_end_write_request(struct bio
*bio
, int error
)
300 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
301 r10bio_t
*r10_bio
= bio
->bi_private
;
303 conf_t
*conf
= r10_bio
->mddev
->private;
305 for (slot
= 0; slot
< conf
->copies
; slot
++)
306 if (r10_bio
->devs
[slot
].bio
== bio
)
308 dev
= r10_bio
->devs
[slot
].devnum
;
311 * this branch is our 'one mirror IO has finished' event handler:
314 md_error(r10_bio
->mddev
, conf
->mirrors
[dev
].rdev
);
315 /* an I/O failed, we can't clear the bitmap */
316 set_bit(R10BIO_Degraded
, &r10_bio
->state
);
319 * Set R10BIO_Uptodate in our master bio, so that
320 * we will return a good error code for to the higher
321 * levels even if IO on some other mirrored buffer fails.
323 * The 'master' represents the composite IO operation to
324 * user-side. So if something waits for IO, then it will
325 * wait for the 'master' bio.
327 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
329 update_head_pos(slot
, r10_bio
);
333 * Let's see if all mirrored write operations have finished
336 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
337 /* clear the bitmap if all writes complete successfully */
338 bitmap_endwrite(r10_bio
->mddev
->bitmap
, r10_bio
->sector
,
340 !test_bit(R10BIO_Degraded
, &r10_bio
->state
),
342 md_write_end(r10_bio
->mddev
);
343 raid_end_bio_io(r10_bio
);
346 rdev_dec_pending(conf
->mirrors
[dev
].rdev
, conf
->mddev
);
351 * RAID10 layout manager
352 * Aswell as the chunksize and raid_disks count, there are two
353 * parameters: near_copies and far_copies.
354 * near_copies * far_copies must be <= raid_disks.
355 * Normally one of these will be 1.
356 * If both are 1, we get raid0.
357 * If near_copies == raid_disks, we get raid1.
359 * Chunks are layed out in raid0 style with near_copies copies of the
360 * first chunk, followed by near_copies copies of the next chunk and
362 * If far_copies > 1, then after 1/far_copies of the array has been assigned
363 * as described above, we start again with a device offset of near_copies.
364 * So we effectively have another copy of the whole array further down all
365 * the drives, but with blocks on different drives.
366 * With this layout, and block is never stored twice on the one device.
368 * raid10_find_phys finds the sector offset of a given virtual sector
369 * on each device that it is on.
371 * raid10_find_virt does the reverse mapping, from a device and a
372 * sector offset to a virtual address
375 static void raid10_find_phys(conf_t
*conf
, r10bio_t
*r10bio
)
385 /* now calculate first sector/dev */
386 chunk
= r10bio
->sector
>> conf
->chunk_shift
;
387 sector
= r10bio
->sector
& conf
->chunk_mask
;
389 chunk
*= conf
->near_copies
;
391 dev
= sector_div(stripe
, conf
->raid_disks
);
392 if (conf
->far_offset
)
393 stripe
*= conf
->far_copies
;
395 sector
+= stripe
<< conf
->chunk_shift
;
397 /* and calculate all the others */
398 for (n
=0; n
< conf
->near_copies
; n
++) {
401 r10bio
->devs
[slot
].addr
= sector
;
402 r10bio
->devs
[slot
].devnum
= d
;
405 for (f
= 1; f
< conf
->far_copies
; f
++) {
406 d
+= conf
->near_copies
;
407 if (d
>= conf
->raid_disks
)
408 d
-= conf
->raid_disks
;
410 r10bio
->devs
[slot
].devnum
= d
;
411 r10bio
->devs
[slot
].addr
= s
;
415 if (dev
>= conf
->raid_disks
) {
417 sector
+= (conf
->chunk_mask
+ 1);
420 BUG_ON(slot
!= conf
->copies
);
423 static sector_t
raid10_find_virt(conf_t
*conf
, sector_t sector
, int dev
)
425 sector_t offset
, chunk
, vchunk
;
427 offset
= sector
& conf
->chunk_mask
;
428 if (conf
->far_offset
) {
430 chunk
= sector
>> conf
->chunk_shift
;
431 fc
= sector_div(chunk
, conf
->far_copies
);
432 dev
-= fc
* conf
->near_copies
;
434 dev
+= conf
->raid_disks
;
436 while (sector
>= conf
->stride
) {
437 sector
-= conf
->stride
;
438 if (dev
< conf
->near_copies
)
439 dev
+= conf
->raid_disks
- conf
->near_copies
;
441 dev
-= conf
->near_copies
;
443 chunk
= sector
>> conf
->chunk_shift
;
445 vchunk
= chunk
* conf
->raid_disks
+ dev
;
446 sector_div(vchunk
, conf
->near_copies
);
447 return (vchunk
<< conf
->chunk_shift
) + offset
;
451 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
453 * @bvm: properties of new bio
454 * @biovec: the request that could be merged to it.
456 * Return amount of bytes we can accept at this offset
457 * If near_copies == raid_disk, there are no striping issues,
458 * but in that case, the function isn't called at all.
460 static int raid10_mergeable_bvec(struct request_queue
*q
,
461 struct bvec_merge_data
*bvm
,
462 struct bio_vec
*biovec
)
464 mddev_t
*mddev
= q
->queuedata
;
465 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
467 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
468 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
470 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
471 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
472 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
473 return biovec
->bv_len
;
479 * This routine returns the disk from which the requested read should
480 * be done. There is a per-array 'next expected sequential IO' sector
481 * number - if this matches on the next IO then we use the last disk.
482 * There is also a per-disk 'last know head position' sector that is
483 * maintained from IRQ contexts, both the normal and the resync IO
484 * completion handlers update this position correctly. If there is no
485 * perfect sequential match then we pick the disk whose head is closest.
487 * If there are 2 mirrors in the same 2 devices, performance degrades
488 * because position is mirror, not device based.
490 * The rdev for the device selected will have nr_pending incremented.
494 * FIXME: possibly should rethink readbalancing and do it differently
495 * depending on near_copies / far_copies geometry.
497 static int read_balance(conf_t
*conf
, r10bio_t
*r10_bio
)
499 const sector_t this_sector
= r10_bio
->sector
;
500 int disk
, slot
, nslot
;
501 const int sectors
= r10_bio
->sectors
;
502 sector_t new_distance
, current_distance
;
505 raid10_find_phys(conf
, r10_bio
);
508 * Check if we can balance. We can balance on the whole
509 * device if no resync is going on (recovery is ok), or below
510 * the resync window. We take the first readable disk when
511 * above the resync window.
513 if (conf
->mddev
->recovery_cp
< MaxSector
514 && (this_sector
+ sectors
>= conf
->next_resync
)) {
515 /* make sure that disk is operational */
517 disk
= r10_bio
->devs
[slot
].devnum
;
519 while ((rdev
= rcu_dereference(conf
->mirrors
[disk
].rdev
)) == NULL
||
520 r10_bio
->devs
[slot
].bio
== IO_BLOCKED
||
521 !test_bit(In_sync
, &rdev
->flags
)) {
523 if (slot
== conf
->copies
) {
528 disk
= r10_bio
->devs
[slot
].devnum
;
534 /* make sure the disk is operational */
536 disk
= r10_bio
->devs
[slot
].devnum
;
537 while ((rdev
=rcu_dereference(conf
->mirrors
[disk
].rdev
)) == NULL
||
538 r10_bio
->devs
[slot
].bio
== IO_BLOCKED
||
539 !test_bit(In_sync
, &rdev
->flags
)) {
541 if (slot
== conf
->copies
) {
545 disk
= r10_bio
->devs
[slot
].devnum
;
549 current_distance
= abs(r10_bio
->devs
[slot
].addr
-
550 conf
->mirrors
[disk
].head_position
);
552 /* Find the disk whose head is closest,
553 * or - for far > 1 - find the closest to partition beginning */
555 for (nslot
= slot
; nslot
< conf
->copies
; nslot
++) {
556 int ndisk
= r10_bio
->devs
[nslot
].devnum
;
559 if ((rdev
=rcu_dereference(conf
->mirrors
[ndisk
].rdev
)) == NULL
||
560 r10_bio
->devs
[nslot
].bio
== IO_BLOCKED
||
561 !test_bit(In_sync
, &rdev
->flags
))
564 /* This optimisation is debatable, and completely destroys
565 * sequential read speed for 'far copies' arrays. So only
566 * keep it for 'near' arrays, and review those later.
568 if (conf
->near_copies
> 1 && !atomic_read(&rdev
->nr_pending
)) {
574 /* for far > 1 always use the lowest address */
575 if (conf
->far_copies
> 1)
576 new_distance
= r10_bio
->devs
[nslot
].addr
;
578 new_distance
= abs(r10_bio
->devs
[nslot
].addr
-
579 conf
->mirrors
[ndisk
].head_position
);
580 if (new_distance
< current_distance
) {
581 current_distance
= new_distance
;
588 r10_bio
->read_slot
= slot
;
589 /* conf->next_seq_sect = this_sector + sectors;*/
591 if (disk
>= 0 && (rdev
=rcu_dereference(conf
->mirrors
[disk
].rdev
))!= NULL
)
592 atomic_inc(&conf
->mirrors
[disk
].rdev
->nr_pending
);
600 static void unplug_slaves(mddev_t
*mddev
)
602 conf_t
*conf
= mddev
->private;
606 for (i
=0; i
< conf
->raid_disks
; i
++) {
607 mdk_rdev_t
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
608 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
609 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
611 atomic_inc(&rdev
->nr_pending
);
616 rdev_dec_pending(rdev
, mddev
);
623 static void raid10_unplug(struct request_queue
*q
)
625 mddev_t
*mddev
= q
->queuedata
;
627 unplug_slaves(q
->queuedata
);
628 md_wakeup_thread(mddev
->thread
);
631 static int raid10_congested(void *data
, int bits
)
633 mddev_t
*mddev
= data
;
634 conf_t
*conf
= mddev
->private;
637 if (mddev_congested(mddev
, bits
))
640 for (i
= 0; i
< conf
->raid_disks
&& ret
== 0; i
++) {
641 mdk_rdev_t
*rdev
= rcu_dereference(conf
->mirrors
[i
].rdev
);
642 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
643 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
645 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
652 static int flush_pending_writes(conf_t
*conf
)
654 /* Any writes that have been queued but are awaiting
655 * bitmap updates get flushed here.
656 * We return 1 if any requests were actually submitted.
660 spin_lock_irq(&conf
->device_lock
);
662 if (conf
->pending_bio_list
.head
) {
664 bio
= bio_list_get(&conf
->pending_bio_list
);
665 /* Spinlock only taken to quiet a warning */
666 spin_lock(conf
->mddev
->queue
->queue_lock
);
667 blk_remove_plug(conf
->mddev
->queue
);
668 spin_unlock(conf
->mddev
->queue
->queue_lock
);
669 spin_unlock_irq(&conf
->device_lock
);
670 /* flush any pending bitmap writes to disk
671 * before proceeding w/ I/O */
672 bitmap_unplug(conf
->mddev
->bitmap
);
674 while (bio
) { /* submit pending writes */
675 struct bio
*next
= bio
->bi_next
;
677 generic_make_request(bio
);
682 spin_unlock_irq(&conf
->device_lock
);
686 * Sometimes we need to suspend IO while we do something else,
687 * either some resync/recovery, or reconfigure the array.
688 * To do this we raise a 'barrier'.
689 * The 'barrier' is a counter that can be raised multiple times
690 * to count how many activities are happening which preclude
692 * We can only raise the barrier if there is no pending IO.
693 * i.e. if nr_pending == 0.
694 * We choose only to raise the barrier if no-one is waiting for the
695 * barrier to go down. This means that as soon as an IO request
696 * is ready, no other operations which require a barrier will start
697 * until the IO request has had a chance.
699 * So: regular IO calls 'wait_barrier'. When that returns there
700 * is no backgroup IO happening, It must arrange to call
701 * allow_barrier when it has finished its IO.
702 * backgroup IO calls must call raise_barrier. Once that returns
703 * there is no normal IO happeing. It must arrange to call
704 * lower_barrier when the particular background IO completes.
707 static void raise_barrier(conf_t
*conf
, int force
)
709 BUG_ON(force
&& !conf
->barrier
);
710 spin_lock_irq(&conf
->resync_lock
);
712 /* Wait until no block IO is waiting (unless 'force') */
713 wait_event_lock_irq(conf
->wait_barrier
, force
|| !conf
->nr_waiting
,
715 raid10_unplug(conf
->mddev
->queue
));
717 /* block any new IO from starting */
720 /* No wait for all pending IO to complete */
721 wait_event_lock_irq(conf
->wait_barrier
,
722 !conf
->nr_pending
&& conf
->barrier
< RESYNC_DEPTH
,
724 raid10_unplug(conf
->mddev
->queue
));
726 spin_unlock_irq(&conf
->resync_lock
);
729 static void lower_barrier(conf_t
*conf
)
732 spin_lock_irqsave(&conf
->resync_lock
, flags
);
734 spin_unlock_irqrestore(&conf
->resync_lock
, flags
);
735 wake_up(&conf
->wait_barrier
);
738 static void wait_barrier(conf_t
*conf
)
740 spin_lock_irq(&conf
->resync_lock
);
743 wait_event_lock_irq(conf
->wait_barrier
, !conf
->barrier
,
745 raid10_unplug(conf
->mddev
->queue
));
749 spin_unlock_irq(&conf
->resync_lock
);
752 static void allow_barrier(conf_t
*conf
)
755 spin_lock_irqsave(&conf
->resync_lock
, flags
);
757 spin_unlock_irqrestore(&conf
->resync_lock
, flags
);
758 wake_up(&conf
->wait_barrier
);
761 static void freeze_array(conf_t
*conf
)
763 /* stop syncio and normal IO and wait for everything to
765 * We increment barrier and nr_waiting, and then
766 * wait until nr_pending match nr_queued+1
767 * This is called in the context of one normal IO request
768 * that has failed. Thus any sync request that might be pending
769 * will be blocked by nr_pending, and we need to wait for
770 * pending IO requests to complete or be queued for re-try.
771 * Thus the number queued (nr_queued) plus this request (1)
772 * must match the number of pending IOs (nr_pending) before
775 spin_lock_irq(&conf
->resync_lock
);
778 wait_event_lock_irq(conf
->wait_barrier
,
779 conf
->nr_pending
== conf
->nr_queued
+1,
781 ({ flush_pending_writes(conf
);
782 raid10_unplug(conf
->mddev
->queue
); }));
783 spin_unlock_irq(&conf
->resync_lock
);
786 static void unfreeze_array(conf_t
*conf
)
788 /* reverse the effect of the freeze */
789 spin_lock_irq(&conf
->resync_lock
);
792 wake_up(&conf
->wait_barrier
);
793 spin_unlock_irq(&conf
->resync_lock
);
796 static int make_request(mddev_t
*mddev
, struct bio
* bio
)
798 conf_t
*conf
= mddev
->private;
799 mirror_info_t
*mirror
;
801 struct bio
*read_bio
;
803 int chunk_sects
= conf
->chunk_mask
+ 1;
804 const int rw
= bio_data_dir(bio
);
805 const unsigned long do_sync
= (bio
->bi_rw
& REQ_SYNC
);
806 const unsigned long do_fua
= (bio
->bi_rw
& REQ_FUA
);
808 mdk_rdev_t
*blocked_rdev
;
810 if (unlikely(bio
->bi_rw
& REQ_FLUSH
)) {
811 md_flush_request(mddev
, bio
);
815 /* If this request crosses a chunk boundary, we need to
816 * split it. This will only happen for 1 PAGE (or less) requests.
818 if (unlikely( (bio
->bi_sector
& conf
->chunk_mask
) + (bio
->bi_size
>> 9)
820 conf
->near_copies
< conf
->raid_disks
)) {
822 /* Sanity check -- queue functions should prevent this happening */
823 if (bio
->bi_vcnt
!= 1 ||
826 /* This is a one page bio that upper layers
827 * refuse to split for us, so we need to split it.
830 chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)) );
832 /* Each of these 'make_request' calls will call 'wait_barrier'.
833 * If the first succeeds but the second blocks due to the resync
834 * thread raising the barrier, we will deadlock because the
835 * IO to the underlying device will be queued in generic_make_request
836 * and will never complete, so will never reduce nr_pending.
837 * So increment nr_waiting here so no new raise_barriers will
838 * succeed, and so the second wait_barrier cannot block.
840 spin_lock_irq(&conf
->resync_lock
);
842 spin_unlock_irq(&conf
->resync_lock
);
844 if (make_request(mddev
, &bp
->bio1
))
845 generic_make_request(&bp
->bio1
);
846 if (make_request(mddev
, &bp
->bio2
))
847 generic_make_request(&bp
->bio2
);
849 spin_lock_irq(&conf
->resync_lock
);
851 wake_up(&conf
->wait_barrier
);
852 spin_unlock_irq(&conf
->resync_lock
);
854 bio_pair_release(bp
);
857 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
858 " or bigger than %dk %llu %d\n", mdname(mddev
), chunk_sects
/2,
859 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
865 md_write_start(mddev
, bio
);
868 * Register the new request and wait if the reconstruction
869 * thread has put up a bar for new requests.
870 * Continue immediately if no resync is active currently.
874 r10_bio
= mempool_alloc(conf
->r10bio_pool
, GFP_NOIO
);
876 r10_bio
->master_bio
= bio
;
877 r10_bio
->sectors
= bio
->bi_size
>> 9;
879 r10_bio
->mddev
= mddev
;
880 r10_bio
->sector
= bio
->bi_sector
;
885 * read balancing logic:
887 int disk
= read_balance(conf
, r10_bio
);
888 int slot
= r10_bio
->read_slot
;
890 raid_end_bio_io(r10_bio
);
893 mirror
= conf
->mirrors
+ disk
;
895 read_bio
= bio_clone_mddev(bio
, GFP_NOIO
, mddev
);
897 r10_bio
->devs
[slot
].bio
= read_bio
;
899 read_bio
->bi_sector
= r10_bio
->devs
[slot
].addr
+
900 mirror
->rdev
->data_offset
;
901 read_bio
->bi_bdev
= mirror
->rdev
->bdev
;
902 read_bio
->bi_end_io
= raid10_end_read_request
;
903 read_bio
->bi_rw
= READ
| do_sync
;
904 read_bio
->bi_private
= r10_bio
;
906 generic_make_request(read_bio
);
913 /* first select target devices under rcu_lock and
914 * inc refcount on their rdev. Record them by setting
917 raid10_find_phys(conf
, r10_bio
);
921 for (i
= 0; i
< conf
->copies
; i
++) {
922 int d
= r10_bio
->devs
[i
].devnum
;
923 mdk_rdev_t
*rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
924 if (rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
925 atomic_inc(&rdev
->nr_pending
);
929 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
930 atomic_inc(&rdev
->nr_pending
);
931 r10_bio
->devs
[i
].bio
= bio
;
933 r10_bio
->devs
[i
].bio
= NULL
;
934 set_bit(R10BIO_Degraded
, &r10_bio
->state
);
939 if (unlikely(blocked_rdev
)) {
940 /* Have to wait for this device to get unblocked, then retry */
944 for (j
= 0; j
< i
; j
++)
945 if (r10_bio
->devs
[j
].bio
) {
946 d
= r10_bio
->devs
[j
].devnum
;
947 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
950 md_wait_for_blocked_rdev(blocked_rdev
, mddev
);
955 atomic_set(&r10_bio
->remaining
, 1);
956 bitmap_startwrite(mddev
->bitmap
, bio
->bi_sector
, r10_bio
->sectors
, 0);
958 for (i
= 0; i
< conf
->copies
; i
++) {
960 int d
= r10_bio
->devs
[i
].devnum
;
961 if (!r10_bio
->devs
[i
].bio
)
964 mbio
= bio_clone_mddev(bio
, GFP_NOIO
, mddev
);
965 r10_bio
->devs
[i
].bio
= mbio
;
967 mbio
->bi_sector
= r10_bio
->devs
[i
].addr
+
968 conf
->mirrors
[d
].rdev
->data_offset
;
969 mbio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
970 mbio
->bi_end_io
= raid10_end_write_request
;
971 mbio
->bi_rw
= WRITE
| do_sync
| do_fua
;
972 mbio
->bi_private
= r10_bio
;
974 atomic_inc(&r10_bio
->remaining
);
975 spin_lock_irqsave(&conf
->device_lock
, flags
);
976 bio_list_add(&conf
->pending_bio_list
, mbio
);
977 blk_plug_device_unlocked(mddev
->queue
);
978 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
981 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
982 /* This matches the end of raid10_end_write_request() */
983 bitmap_endwrite(r10_bio
->mddev
->bitmap
, r10_bio
->sector
,
985 !test_bit(R10BIO_Degraded
, &r10_bio
->state
),
988 raid_end_bio_io(r10_bio
);
991 /* In case raid10d snuck in to freeze_array */
992 wake_up(&conf
->wait_barrier
);
995 md_wakeup_thread(mddev
->thread
);
1000 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
1002 conf_t
*conf
= mddev
->private;
1005 if (conf
->near_copies
< conf
->raid_disks
)
1006 seq_printf(seq
, " %dK chunks", mddev
->chunk_sectors
/ 2);
1007 if (conf
->near_copies
> 1)
1008 seq_printf(seq
, " %d near-copies", conf
->near_copies
);
1009 if (conf
->far_copies
> 1) {
1010 if (conf
->far_offset
)
1011 seq_printf(seq
, " %d offset-copies", conf
->far_copies
);
1013 seq_printf(seq
, " %d far-copies", conf
->far_copies
);
1015 seq_printf(seq
, " [%d/%d] [", conf
->raid_disks
,
1016 conf
->raid_disks
- mddev
->degraded
);
1017 for (i
= 0; i
< conf
->raid_disks
; i
++)
1018 seq_printf(seq
, "%s",
1019 conf
->mirrors
[i
].rdev
&&
1020 test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
) ? "U" : "_");
1021 seq_printf(seq
, "]");
1024 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1026 char b
[BDEVNAME_SIZE
];
1027 conf_t
*conf
= mddev
->private;
1030 * If it is not operational, then we have already marked it as dead
1031 * else if it is the last working disks, ignore the error, let the
1032 * next level up know.
1033 * else mark the drive as failed
1035 if (test_bit(In_sync
, &rdev
->flags
)
1036 && conf
->raid_disks
-mddev
->degraded
== 1)
1038 * Don't fail the drive, just return an IO error.
1039 * The test should really be more sophisticated than
1040 * "working_disks == 1", but it isn't critical, and
1041 * can wait until we do more sophisticated "is the drive
1042 * really dead" tests...
1045 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1046 unsigned long flags
;
1047 spin_lock_irqsave(&conf
->device_lock
, flags
);
1049 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1051 * if recovery is running, make sure it aborts.
1053 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1055 set_bit(Faulty
, &rdev
->flags
);
1056 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1058 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1059 "md/raid10:%s: Operation continuing on %d devices.\n",
1060 mdname(mddev
), bdevname(rdev
->bdev
, b
),
1061 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
);
1064 static void print_conf(conf_t
*conf
)
1069 printk(KERN_DEBUG
"RAID10 conf printout:\n");
1071 printk(KERN_DEBUG
"(!conf)\n");
1074 printk(KERN_DEBUG
" --- wd:%d rd:%d\n", conf
->raid_disks
- conf
->mddev
->degraded
,
1077 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1078 char b
[BDEVNAME_SIZE
];
1079 tmp
= conf
->mirrors
+ i
;
1081 printk(KERN_DEBUG
" disk %d, wo:%d, o:%d, dev:%s\n",
1082 i
, !test_bit(In_sync
, &tmp
->rdev
->flags
),
1083 !test_bit(Faulty
, &tmp
->rdev
->flags
),
1084 bdevname(tmp
->rdev
->bdev
,b
));
1088 static void close_sync(conf_t
*conf
)
1091 allow_barrier(conf
);
1093 mempool_destroy(conf
->r10buf_pool
);
1094 conf
->r10buf_pool
= NULL
;
1097 /* check if there are enough drives for
1098 * every block to appear on atleast one
1100 static int enough(conf_t
*conf
)
1105 int n
= conf
->copies
;
1108 if (conf
->mirrors
[first
].rdev
)
1110 first
= (first
+1) % conf
->raid_disks
;
1114 } while (first
!= 0);
1118 static int raid10_spare_active(mddev_t
*mddev
)
1121 conf_t
*conf
= mddev
->private;
1124 unsigned long flags
;
1127 * Find all non-in_sync disks within the RAID10 configuration
1128 * and mark them in_sync
1130 for (i
= 0; i
< conf
->raid_disks
; i
++) {
1131 tmp
= conf
->mirrors
+ i
;
1133 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
1134 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
1136 sysfs_notify_dirent(tmp
->rdev
->sysfs_state
);
1139 spin_lock_irqsave(&conf
->device_lock
, flags
);
1140 mddev
->degraded
-= count
;
1141 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1148 static int raid10_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1150 conf_t
*conf
= mddev
->private;
1155 int last
= conf
->raid_disks
- 1;
1157 if (mddev
->recovery_cp
< MaxSector
)
1158 /* only hot-add to in-sync arrays, as recovery is
1159 * very different from resync
1165 if (rdev
->raid_disk
>= 0)
1166 first
= last
= rdev
->raid_disk
;
1168 if (rdev
->saved_raid_disk
>= 0 &&
1169 rdev
->saved_raid_disk
>= first
&&
1170 conf
->mirrors
[rdev
->saved_raid_disk
].rdev
== NULL
)
1171 mirror
= rdev
->saved_raid_disk
;
1174 for ( ; mirror
<= last
; mirror
++)
1175 if ( !(p
=conf
->mirrors
+mirror
)->rdev
) {
1177 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
1178 rdev
->data_offset
<< 9);
1179 /* as we don't honour merge_bvec_fn, we must
1180 * never risk violating it, so limit
1181 * ->max_segments to one lying with a single
1182 * page, as a one page request is never in
1185 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
1186 blk_queue_max_segments(mddev
->queue
, 1);
1187 blk_queue_segment_boundary(mddev
->queue
,
1188 PAGE_CACHE_SIZE
- 1);
1191 p
->head_position
= 0;
1192 rdev
->raid_disk
= mirror
;
1194 if (rdev
->saved_raid_disk
!= mirror
)
1196 rcu_assign_pointer(p
->rdev
, rdev
);
1200 md_integrity_add_rdev(rdev
, mddev
);
1205 static int raid10_remove_disk(mddev_t
*mddev
, int number
)
1207 conf_t
*conf
= mddev
->private;
1210 mirror_info_t
*p
= conf
->mirrors
+ number
;
1215 if (test_bit(In_sync
, &rdev
->flags
) ||
1216 atomic_read(&rdev
->nr_pending
)) {
1220 /* Only remove faulty devices in recovery
1223 if (!test_bit(Faulty
, &rdev
->flags
) &&
1230 if (atomic_read(&rdev
->nr_pending
)) {
1231 /* lost the race, try later */
1236 md_integrity_register(mddev
);
1245 static void end_sync_read(struct bio
*bio
, int error
)
1247 r10bio_t
*r10_bio
= bio
->bi_private
;
1248 conf_t
*conf
= r10_bio
->mddev
->private;
1251 for (i
=0; i
<conf
->copies
; i
++)
1252 if (r10_bio
->devs
[i
].bio
== bio
)
1254 BUG_ON(i
== conf
->copies
);
1255 update_head_pos(i
, r10_bio
);
1256 d
= r10_bio
->devs
[i
].devnum
;
1258 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1259 set_bit(R10BIO_Uptodate
, &r10_bio
->state
);
1261 atomic_add(r10_bio
->sectors
,
1262 &conf
->mirrors
[d
].rdev
->corrected_errors
);
1263 if (!test_bit(MD_RECOVERY_SYNC
, &conf
->mddev
->recovery
))
1264 md_error(r10_bio
->mddev
,
1265 conf
->mirrors
[d
].rdev
);
1268 /* for reconstruct, we always reschedule after a read.
1269 * for resync, only after all reads
1271 rdev_dec_pending(conf
->mirrors
[d
].rdev
, conf
->mddev
);
1272 if (test_bit(R10BIO_IsRecover
, &r10_bio
->state
) ||
1273 atomic_dec_and_test(&r10_bio
->remaining
)) {
1274 /* we have read all the blocks,
1275 * do the comparison in process context in raid10d
1277 reschedule_retry(r10_bio
);
1281 static void end_sync_write(struct bio
*bio
, int error
)
1283 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1284 r10bio_t
*r10_bio
= bio
->bi_private
;
1285 mddev_t
*mddev
= r10_bio
->mddev
;
1286 conf_t
*conf
= mddev
->private;
1289 for (i
= 0; i
< conf
->copies
; i
++)
1290 if (r10_bio
->devs
[i
].bio
== bio
)
1292 d
= r10_bio
->devs
[i
].devnum
;
1295 md_error(mddev
, conf
->mirrors
[d
].rdev
);
1297 update_head_pos(i
, r10_bio
);
1299 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
1300 while (atomic_dec_and_test(&r10_bio
->remaining
)) {
1301 if (r10_bio
->master_bio
== NULL
) {
1302 /* the primary of several recovery bios */
1303 sector_t s
= r10_bio
->sectors
;
1305 md_done_sync(mddev
, s
, 1);
1308 r10bio_t
*r10_bio2
= (r10bio_t
*)r10_bio
->master_bio
;
1316 * Note: sync and recover and handled very differently for raid10
1317 * This code is for resync.
1318 * For resync, we read through virtual addresses and read all blocks.
1319 * If there is any error, we schedule a write. The lowest numbered
1320 * drive is authoritative.
1321 * However requests come for physical address, so we need to map.
1322 * For every physical address there are raid_disks/copies virtual addresses,
1323 * which is always are least one, but is not necessarly an integer.
1324 * This means that a physical address can span multiple chunks, so we may
1325 * have to submit multiple io requests for a single sync request.
1328 * We check if all blocks are in-sync and only write to blocks that
1331 static void sync_request_write(mddev_t
*mddev
, r10bio_t
*r10_bio
)
1333 conf_t
*conf
= mddev
->private;
1335 struct bio
*tbio
, *fbio
;
1337 atomic_set(&r10_bio
->remaining
, 1);
1339 /* find the first device with a block */
1340 for (i
=0; i
<conf
->copies
; i
++)
1341 if (test_bit(BIO_UPTODATE
, &r10_bio
->devs
[i
].bio
->bi_flags
))
1344 if (i
== conf
->copies
)
1348 fbio
= r10_bio
->devs
[i
].bio
;
1350 /* now find blocks with errors */
1351 for (i
=0 ; i
< conf
->copies
; i
++) {
1353 int vcnt
= r10_bio
->sectors
>> (PAGE_SHIFT
-9);
1355 tbio
= r10_bio
->devs
[i
].bio
;
1357 if (tbio
->bi_end_io
!= end_sync_read
)
1361 if (test_bit(BIO_UPTODATE
, &r10_bio
->devs
[i
].bio
->bi_flags
)) {
1362 /* We know that the bi_io_vec layout is the same for
1363 * both 'first' and 'i', so we just compare them.
1364 * All vec entries are PAGE_SIZE;
1366 for (j
= 0; j
< vcnt
; j
++)
1367 if (memcmp(page_address(fbio
->bi_io_vec
[j
].bv_page
),
1368 page_address(tbio
->bi_io_vec
[j
].bv_page
),
1373 mddev
->resync_mismatches
+= r10_bio
->sectors
;
1375 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
1376 /* Don't fix anything. */
1378 /* Ok, we need to write this bio
1379 * First we need to fixup bv_offset, bv_len and
1380 * bi_vecs, as the read request might have corrupted these
1382 tbio
->bi_vcnt
= vcnt
;
1383 tbio
->bi_size
= r10_bio
->sectors
<< 9;
1385 tbio
->bi_phys_segments
= 0;
1386 tbio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
1387 tbio
->bi_flags
|= 1 << BIO_UPTODATE
;
1388 tbio
->bi_next
= NULL
;
1389 tbio
->bi_rw
= WRITE
;
1390 tbio
->bi_private
= r10_bio
;
1391 tbio
->bi_sector
= r10_bio
->devs
[i
].addr
;
1393 for (j
=0; j
< vcnt
; j
++) {
1394 tbio
->bi_io_vec
[j
].bv_offset
= 0;
1395 tbio
->bi_io_vec
[j
].bv_len
= PAGE_SIZE
;
1397 memcpy(page_address(tbio
->bi_io_vec
[j
].bv_page
),
1398 page_address(fbio
->bi_io_vec
[j
].bv_page
),
1401 tbio
->bi_end_io
= end_sync_write
;
1403 d
= r10_bio
->devs
[i
].devnum
;
1404 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1405 atomic_inc(&r10_bio
->remaining
);
1406 md_sync_acct(conf
->mirrors
[d
].rdev
->bdev
, tbio
->bi_size
>> 9);
1408 tbio
->bi_sector
+= conf
->mirrors
[d
].rdev
->data_offset
;
1409 tbio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
1410 generic_make_request(tbio
);
1414 if (atomic_dec_and_test(&r10_bio
->remaining
)) {
1415 md_done_sync(mddev
, r10_bio
->sectors
, 1);
1421 * Now for the recovery code.
1422 * Recovery happens across physical sectors.
1423 * We recover all non-is_sync drives by finding the virtual address of
1424 * each, and then choose a working drive that also has that virt address.
1425 * There is a separate r10_bio for each non-in_sync drive.
1426 * Only the first two slots are in use. The first for reading,
1427 * The second for writing.
1431 static void recovery_request_write(mddev_t
*mddev
, r10bio_t
*r10_bio
)
1433 conf_t
*conf
= mddev
->private;
1435 struct bio
*bio
, *wbio
;
1438 /* move the pages across to the second bio
1439 * and submit the write request
1441 bio
= r10_bio
->devs
[0].bio
;
1442 wbio
= r10_bio
->devs
[1].bio
;
1443 for (i
=0; i
< wbio
->bi_vcnt
; i
++) {
1444 struct page
*p
= bio
->bi_io_vec
[i
].bv_page
;
1445 bio
->bi_io_vec
[i
].bv_page
= wbio
->bi_io_vec
[i
].bv_page
;
1446 wbio
->bi_io_vec
[i
].bv_page
= p
;
1448 d
= r10_bio
->devs
[1].devnum
;
1450 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1451 md_sync_acct(conf
->mirrors
[d
].rdev
->bdev
, wbio
->bi_size
>> 9);
1452 if (test_bit(R10BIO_Uptodate
, &r10_bio
->state
))
1453 generic_make_request(wbio
);
1455 bio_endio(wbio
, -EIO
);
1460 * Used by fix_read_error() to decay the per rdev read_errors.
1461 * We halve the read error count for every hour that has elapsed
1462 * since the last recorded read error.
1465 static void check_decay_read_errors(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1467 struct timespec cur_time_mon
;
1468 unsigned long hours_since_last
;
1469 unsigned int read_errors
= atomic_read(&rdev
->read_errors
);
1471 ktime_get_ts(&cur_time_mon
);
1473 if (rdev
->last_read_error
.tv_sec
== 0 &&
1474 rdev
->last_read_error
.tv_nsec
== 0) {
1475 /* first time we've seen a read error */
1476 rdev
->last_read_error
= cur_time_mon
;
1480 hours_since_last
= (cur_time_mon
.tv_sec
-
1481 rdev
->last_read_error
.tv_sec
) / 3600;
1483 rdev
->last_read_error
= cur_time_mon
;
1486 * if hours_since_last is > the number of bits in read_errors
1487 * just set read errors to 0. We do this to avoid
1488 * overflowing the shift of read_errors by hours_since_last.
1490 if (hours_since_last
>= 8 * sizeof(read_errors
))
1491 atomic_set(&rdev
->read_errors
, 0);
1493 atomic_set(&rdev
->read_errors
, read_errors
>> hours_since_last
);
1497 * This is a kernel thread which:
1499 * 1. Retries failed read operations on working mirrors.
1500 * 2. Updates the raid superblock when problems encounter.
1501 * 3. Performs writes following reads for array synchronising.
1504 static void fix_read_error(conf_t
*conf
, mddev_t
*mddev
, r10bio_t
*r10_bio
)
1506 int sect
= 0; /* Offset from r10_bio->sector */
1507 int sectors
= r10_bio
->sectors
;
1509 int max_read_errors
= atomic_read(&mddev
->max_corr_read_errors
);
1510 int d
= r10_bio
->devs
[r10_bio
->read_slot
].devnum
;
1513 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1514 if (rdev
) { /* If rdev is not NULL */
1515 char b
[BDEVNAME_SIZE
];
1516 int cur_read_error_count
= 0;
1518 bdevname(rdev
->bdev
, b
);
1520 if (test_bit(Faulty
, &rdev
->flags
)) {
1522 /* drive has already been failed, just ignore any
1523 more fix_read_error() attempts */
1527 check_decay_read_errors(mddev
, rdev
);
1528 atomic_inc(&rdev
->read_errors
);
1529 cur_read_error_count
= atomic_read(&rdev
->read_errors
);
1530 if (cur_read_error_count
> max_read_errors
) {
1533 "md/raid10:%s: %s: Raid device exceeded "
1534 "read_error threshold "
1535 "[cur %d:max %d]\n",
1537 b
, cur_read_error_count
, max_read_errors
);
1539 "md/raid10:%s: %s: Failing raid "
1540 "device\n", mdname(mddev
), b
);
1541 md_error(mddev
, conf
->mirrors
[d
].rdev
);
1549 int sl
= r10_bio
->read_slot
;
1553 if (s
> (PAGE_SIZE
>>9))
1558 d
= r10_bio
->devs
[sl
].devnum
;
1559 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1561 test_bit(In_sync
, &rdev
->flags
)) {
1562 atomic_inc(&rdev
->nr_pending
);
1564 success
= sync_page_io(rdev
,
1565 r10_bio
->devs
[sl
].addr
+
1568 conf
->tmppage
, READ
, false);
1569 rdev_dec_pending(rdev
, mddev
);
1575 if (sl
== conf
->copies
)
1577 } while (!success
&& sl
!= r10_bio
->read_slot
);
1581 /* Cannot read from anywhere -- bye bye array */
1582 int dn
= r10_bio
->devs
[r10_bio
->read_slot
].devnum
;
1583 md_error(mddev
, conf
->mirrors
[dn
].rdev
);
1588 /* write it back and re-read */
1590 while (sl
!= r10_bio
->read_slot
) {
1591 char b
[BDEVNAME_SIZE
];
1596 d
= r10_bio
->devs
[sl
].devnum
;
1597 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1599 test_bit(In_sync
, &rdev
->flags
)) {
1600 atomic_inc(&rdev
->nr_pending
);
1602 atomic_add(s
, &rdev
->corrected_errors
);
1603 if (sync_page_io(rdev
,
1604 r10_bio
->devs
[sl
].addr
+
1606 s
<<9, conf
->tmppage
, WRITE
, false)
1608 /* Well, this device is dead */
1610 "md/raid10:%s: read correction "
1612 " (%d sectors at %llu on %s)\n",
1614 (unsigned long long)(sect
+
1616 bdevname(rdev
->bdev
, b
));
1617 printk(KERN_NOTICE
"md/raid10:%s: %s: failing "
1620 bdevname(rdev
->bdev
, b
));
1621 md_error(mddev
, rdev
);
1623 rdev_dec_pending(rdev
, mddev
);
1628 while (sl
!= r10_bio
->read_slot
) {
1633 d
= r10_bio
->devs
[sl
].devnum
;
1634 rdev
= rcu_dereference(conf
->mirrors
[d
].rdev
);
1636 test_bit(In_sync
, &rdev
->flags
)) {
1637 char b
[BDEVNAME_SIZE
];
1638 atomic_inc(&rdev
->nr_pending
);
1640 if (sync_page_io(rdev
,
1641 r10_bio
->devs
[sl
].addr
+
1643 s
<<9, conf
->tmppage
,
1644 READ
, false) == 0) {
1645 /* Well, this device is dead */
1647 "md/raid10:%s: unable to read back "
1649 " (%d sectors at %llu on %s)\n",
1651 (unsigned long long)(sect
+
1653 bdevname(rdev
->bdev
, b
));
1654 printk(KERN_NOTICE
"md/raid10:%s: %s: failing drive\n",
1656 bdevname(rdev
->bdev
, b
));
1658 md_error(mddev
, rdev
);
1661 "md/raid10:%s: read error corrected"
1662 " (%d sectors at %llu on %s)\n",
1664 (unsigned long long)(sect
+
1666 bdevname(rdev
->bdev
, b
));
1669 rdev_dec_pending(rdev
, mddev
);
1680 static void raid10d(mddev_t
*mddev
)
1684 unsigned long flags
;
1685 conf_t
*conf
= mddev
->private;
1686 struct list_head
*head
= &conf
->retry_list
;
1690 md_check_recovery(mddev
);
1693 char b
[BDEVNAME_SIZE
];
1695 unplug
+= flush_pending_writes(conf
);
1697 spin_lock_irqsave(&conf
->device_lock
, flags
);
1698 if (list_empty(head
)) {
1699 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1702 r10_bio
= list_entry(head
->prev
, r10bio_t
, retry_list
);
1703 list_del(head
->prev
);
1705 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1707 mddev
= r10_bio
->mddev
;
1708 conf
= mddev
->private;
1709 if (test_bit(R10BIO_IsSync
, &r10_bio
->state
)) {
1710 sync_request_write(mddev
, r10_bio
);
1712 } else if (test_bit(R10BIO_IsRecover
, &r10_bio
->state
)) {
1713 recovery_request_write(mddev
, r10_bio
);
1717 /* we got a read error. Maybe the drive is bad. Maybe just
1718 * the block and we can fix it.
1719 * We freeze all other IO, and try reading the block from
1720 * other devices. When we find one, we re-write
1721 * and check it that fixes the read error.
1722 * This is all done synchronously while the array is
1725 if (mddev
->ro
== 0) {
1727 fix_read_error(conf
, mddev
, r10_bio
);
1728 unfreeze_array(conf
);
1731 bio
= r10_bio
->devs
[r10_bio
->read_slot
].bio
;
1732 r10_bio
->devs
[r10_bio
->read_slot
].bio
=
1733 mddev
->ro
? IO_BLOCKED
: NULL
;
1734 mirror
= read_balance(conf
, r10_bio
);
1736 printk(KERN_ALERT
"md/raid10:%s: %s: unrecoverable I/O"
1737 " read error for block %llu\n",
1739 bdevname(bio
->bi_bdev
,b
),
1740 (unsigned long long)r10_bio
->sector
);
1741 raid_end_bio_io(r10_bio
);
1744 const unsigned long do_sync
= (r10_bio
->master_bio
->bi_rw
& REQ_SYNC
);
1746 rdev
= conf
->mirrors
[mirror
].rdev
;
1747 if (printk_ratelimit())
1748 printk(KERN_ERR
"md/raid10:%s: %s: redirecting sector %llu to"
1749 " another mirror\n",
1751 bdevname(rdev
->bdev
,b
),
1752 (unsigned long long)r10_bio
->sector
);
1753 bio
= bio_clone_mddev(r10_bio
->master_bio
,
1755 r10_bio
->devs
[r10_bio
->read_slot
].bio
= bio
;
1756 bio
->bi_sector
= r10_bio
->devs
[r10_bio
->read_slot
].addr
1757 + rdev
->data_offset
;
1758 bio
->bi_bdev
= rdev
->bdev
;
1759 bio
->bi_rw
= READ
| do_sync
;
1760 bio
->bi_private
= r10_bio
;
1761 bio
->bi_end_io
= raid10_end_read_request
;
1763 generic_make_request(bio
);
1769 unplug_slaves(mddev
);
1773 static int init_resync(conf_t
*conf
)
1777 buffs
= RESYNC_WINDOW
/ RESYNC_BLOCK_SIZE
;
1778 BUG_ON(conf
->r10buf_pool
);
1779 conf
->r10buf_pool
= mempool_create(buffs
, r10buf_pool_alloc
, r10buf_pool_free
, conf
);
1780 if (!conf
->r10buf_pool
)
1782 conf
->next_resync
= 0;
1787 * perform a "sync" on one "block"
1789 * We need to make sure that no normal I/O request - particularly write
1790 * requests - conflict with active sync requests.
1792 * This is achieved by tracking pending requests and a 'barrier' concept
1793 * that can be installed to exclude normal IO requests.
1795 * Resync and recovery are handled very differently.
1796 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1798 * For resync, we iterate over virtual addresses, read all copies,
1799 * and update if there are differences. If only one copy is live,
1801 * For recovery, we iterate over physical addresses, read a good
1802 * value for each non-in_sync drive, and over-write.
1804 * So, for recovery we may have several outstanding complex requests for a
1805 * given address, one for each out-of-sync device. We model this by allocating
1806 * a number of r10_bio structures, one for each out-of-sync device.
1807 * As we setup these structures, we collect all bio's together into a list
1808 * which we then process collectively to add pages, and then process again
1809 * to pass to generic_make_request.
1811 * The r10_bio structures are linked using a borrowed master_bio pointer.
1812 * This link is counted in ->remaining. When the r10_bio that points to NULL
1813 * has its remaining count decremented to 0, the whole complex operation
1818 static sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
1820 conf_t
*conf
= mddev
->private;
1822 struct bio
*biolist
= NULL
, *bio
;
1823 sector_t max_sector
, nr_sectors
;
1827 sector_t sync_blocks
;
1829 sector_t sectors_skipped
= 0;
1830 int chunks_skipped
= 0;
1832 if (!conf
->r10buf_pool
)
1833 if (init_resync(conf
))
1837 max_sector
= mddev
->dev_sectors
;
1838 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
1839 max_sector
= mddev
->resync_max_sectors
;
1840 if (sector_nr
>= max_sector
) {
1841 /* If we aborted, we need to abort the
1842 * sync on the 'current' bitmap chucks (there can
1843 * be several when recovering multiple devices).
1844 * as we may have started syncing it but not finished.
1845 * We can find the current address in
1846 * mddev->curr_resync, but for recovery,
1847 * we need to convert that to several
1848 * virtual addresses.
1850 if (mddev
->curr_resync
< max_sector
) { /* aborted */
1851 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
1852 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
1854 else for (i
=0; i
<conf
->raid_disks
; i
++) {
1856 raid10_find_virt(conf
, mddev
->curr_resync
, i
);
1857 bitmap_end_sync(mddev
->bitmap
, sect
,
1860 } else /* completed sync */
1863 bitmap_close_sync(mddev
->bitmap
);
1866 return sectors_skipped
;
1868 if (chunks_skipped
>= conf
->raid_disks
) {
1869 /* if there has been nothing to do on any drive,
1870 * then there is nothing to do at all..
1873 return (max_sector
- sector_nr
) + sectors_skipped
;
1876 if (max_sector
> mddev
->resync_max
)
1877 max_sector
= mddev
->resync_max
; /* Don't do IO beyond here */
1879 /* make sure whole request will fit in a chunk - if chunks
1882 if (conf
->near_copies
< conf
->raid_disks
&&
1883 max_sector
> (sector_nr
| conf
->chunk_mask
))
1884 max_sector
= (sector_nr
| conf
->chunk_mask
) + 1;
1886 * If there is non-resync activity waiting for us then
1887 * put in a delay to throttle resync.
1889 if (!go_faster
&& conf
->nr_waiting
)
1890 msleep_interruptible(1000);
1892 /* Again, very different code for resync and recovery.
1893 * Both must result in an r10bio with a list of bios that
1894 * have bi_end_io, bi_sector, bi_bdev set,
1895 * and bi_private set to the r10bio.
1896 * For recovery, we may actually create several r10bios
1897 * with 2 bios in each, that correspond to the bios in the main one.
1898 * In this case, the subordinate r10bios link back through a
1899 * borrowed master_bio pointer, and the counter in the master
1900 * includes a ref from each subordinate.
1902 /* First, we decide what to do and set ->bi_end_io
1903 * To end_sync_read if we want to read, and
1904 * end_sync_write if we will want to write.
1907 max_sync
= RESYNC_PAGES
<< (PAGE_SHIFT
-9);
1908 if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
1909 /* recovery... the complicated one */
1913 for (i
=0 ; i
<conf
->raid_disks
; i
++)
1914 if (conf
->mirrors
[i
].rdev
&&
1915 !test_bit(In_sync
, &conf
->mirrors
[i
].rdev
->flags
)) {
1916 int still_degraded
= 0;
1917 /* want to reconstruct this device */
1918 r10bio_t
*rb2
= r10_bio
;
1919 sector_t sect
= raid10_find_virt(conf
, sector_nr
, i
);
1921 /* Unless we are doing a full sync, we only need
1922 * to recover the block if it is set in the bitmap
1924 must_sync
= bitmap_start_sync(mddev
->bitmap
, sect
,
1926 if (sync_blocks
< max_sync
)
1927 max_sync
= sync_blocks
;
1930 /* yep, skip the sync_blocks here, but don't assume
1931 * that there will never be anything to do here
1933 chunks_skipped
= -1;
1937 r10_bio
= mempool_alloc(conf
->r10buf_pool
, GFP_NOIO
);
1938 raise_barrier(conf
, rb2
!= NULL
);
1939 atomic_set(&r10_bio
->remaining
, 0);
1941 r10_bio
->master_bio
= (struct bio
*)rb2
;
1943 atomic_inc(&rb2
->remaining
);
1944 r10_bio
->mddev
= mddev
;
1945 set_bit(R10BIO_IsRecover
, &r10_bio
->state
);
1946 r10_bio
->sector
= sect
;
1948 raid10_find_phys(conf
, r10_bio
);
1950 /* Need to check if the array will still be
1953 for (j
=0; j
<conf
->raid_disks
; j
++)
1954 if (conf
->mirrors
[j
].rdev
== NULL
||
1955 test_bit(Faulty
, &conf
->mirrors
[j
].rdev
->flags
)) {
1960 must_sync
= bitmap_start_sync(mddev
->bitmap
, sect
,
1961 &sync_blocks
, still_degraded
);
1963 for (j
=0; j
<conf
->copies
;j
++) {
1964 int d
= r10_bio
->devs
[j
].devnum
;
1965 if (conf
->mirrors
[d
].rdev
&&
1966 test_bit(In_sync
, &conf
->mirrors
[d
].rdev
->flags
)) {
1967 /* This is where we read from */
1968 bio
= r10_bio
->devs
[0].bio
;
1969 bio
->bi_next
= biolist
;
1971 bio
->bi_private
= r10_bio
;
1972 bio
->bi_end_io
= end_sync_read
;
1974 bio
->bi_sector
= r10_bio
->devs
[j
].addr
+
1975 conf
->mirrors
[d
].rdev
->data_offset
;
1976 bio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
1977 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
1978 atomic_inc(&r10_bio
->remaining
);
1979 /* and we write to 'i' */
1981 for (k
=0; k
<conf
->copies
; k
++)
1982 if (r10_bio
->devs
[k
].devnum
== i
)
1984 BUG_ON(k
== conf
->copies
);
1985 bio
= r10_bio
->devs
[1].bio
;
1986 bio
->bi_next
= biolist
;
1988 bio
->bi_private
= r10_bio
;
1989 bio
->bi_end_io
= end_sync_write
;
1991 bio
->bi_sector
= r10_bio
->devs
[k
].addr
+
1992 conf
->mirrors
[i
].rdev
->data_offset
;
1993 bio
->bi_bdev
= conf
->mirrors
[i
].rdev
->bdev
;
1995 r10_bio
->devs
[0].devnum
= d
;
1996 r10_bio
->devs
[1].devnum
= i
;
2001 if (j
== conf
->copies
) {
2002 /* Cannot recover, so abort the recovery */
2005 atomic_dec(&rb2
->remaining
);
2007 if (!test_and_set_bit(MD_RECOVERY_INTR
,
2009 printk(KERN_INFO
"md/raid10:%s: insufficient "
2010 "working devices for recovery.\n",
2015 if (biolist
== NULL
) {
2017 r10bio_t
*rb2
= r10_bio
;
2018 r10_bio
= (r10bio_t
*) rb2
->master_bio
;
2019 rb2
->master_bio
= NULL
;
2025 /* resync. Schedule a read for every block at this virt offset */
2028 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
2030 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
,
2031 &sync_blocks
, mddev
->degraded
) &&
2032 !conf
->fullsync
&& !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
)) {
2033 /* We can skip this block */
2035 return sync_blocks
+ sectors_skipped
;
2037 if (sync_blocks
< max_sync
)
2038 max_sync
= sync_blocks
;
2039 r10_bio
= mempool_alloc(conf
->r10buf_pool
, GFP_NOIO
);
2041 r10_bio
->mddev
= mddev
;
2042 atomic_set(&r10_bio
->remaining
, 0);
2043 raise_barrier(conf
, 0);
2044 conf
->next_resync
= sector_nr
;
2046 r10_bio
->master_bio
= NULL
;
2047 r10_bio
->sector
= sector_nr
;
2048 set_bit(R10BIO_IsSync
, &r10_bio
->state
);
2049 raid10_find_phys(conf
, r10_bio
);
2050 r10_bio
->sectors
= (sector_nr
| conf
->chunk_mask
) - sector_nr
+1;
2052 for (i
=0; i
<conf
->copies
; i
++) {
2053 int d
= r10_bio
->devs
[i
].devnum
;
2054 bio
= r10_bio
->devs
[i
].bio
;
2055 bio
->bi_end_io
= NULL
;
2056 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
2057 if (conf
->mirrors
[d
].rdev
== NULL
||
2058 test_bit(Faulty
, &conf
->mirrors
[d
].rdev
->flags
))
2060 atomic_inc(&conf
->mirrors
[d
].rdev
->nr_pending
);
2061 atomic_inc(&r10_bio
->remaining
);
2062 bio
->bi_next
= biolist
;
2064 bio
->bi_private
= r10_bio
;
2065 bio
->bi_end_io
= end_sync_read
;
2067 bio
->bi_sector
= r10_bio
->devs
[i
].addr
+
2068 conf
->mirrors
[d
].rdev
->data_offset
;
2069 bio
->bi_bdev
= conf
->mirrors
[d
].rdev
->bdev
;
2074 for (i
=0; i
<conf
->copies
; i
++) {
2075 int d
= r10_bio
->devs
[i
].devnum
;
2076 if (r10_bio
->devs
[i
].bio
->bi_end_io
)
2077 rdev_dec_pending(conf
->mirrors
[d
].rdev
, mddev
);
2085 for (bio
= biolist
; bio
; bio
=bio
->bi_next
) {
2087 bio
->bi_flags
&= ~(BIO_POOL_MASK
- 1);
2089 bio
->bi_flags
|= 1 << BIO_UPTODATE
;
2092 bio
->bi_phys_segments
= 0;
2097 if (sector_nr
+ max_sync
< max_sector
)
2098 max_sector
= sector_nr
+ max_sync
;
2101 int len
= PAGE_SIZE
;
2103 if (sector_nr
+ (len
>>9) > max_sector
)
2104 len
= (max_sector
- sector_nr
) << 9;
2107 for (bio
= biolist
; bio
; bio
=bio
->bi_next
) {
2108 page
= bio
->bi_io_vec
[bio
->bi_vcnt
].bv_page
;
2109 if (bio_add_page(bio
, page
, len
, 0) == 0) {
2112 bio
->bi_io_vec
[bio
->bi_vcnt
].bv_page
= page
;
2113 for (bio2
= biolist
; bio2
&& bio2
!= bio
; bio2
= bio2
->bi_next
) {
2114 /* remove last page from this bio */
2116 bio2
->bi_size
-= len
;
2117 bio2
->bi_flags
&= ~(1<< BIO_SEG_VALID
);
2123 nr_sectors
+= len
>>9;
2124 sector_nr
+= len
>>9;
2125 } while (biolist
->bi_vcnt
< RESYNC_PAGES
);
2127 r10_bio
->sectors
= nr_sectors
;
2131 biolist
= biolist
->bi_next
;
2133 bio
->bi_next
= NULL
;
2134 r10_bio
= bio
->bi_private
;
2135 r10_bio
->sectors
= nr_sectors
;
2137 if (bio
->bi_end_io
== end_sync_read
) {
2138 md_sync_acct(bio
->bi_bdev
, nr_sectors
);
2139 generic_make_request(bio
);
2143 if (sectors_skipped
)
2144 /* pretend they weren't skipped, it makes
2145 * no important difference in this case
2147 md_done_sync(mddev
, sectors_skipped
, 1);
2149 return sectors_skipped
+ nr_sectors
;
2151 /* There is nowhere to write, so all non-sync
2152 * drives must be failed, so try the next chunk...
2154 if (sector_nr
+ max_sync
< max_sector
)
2155 max_sector
= sector_nr
+ max_sync
;
2157 sectors_skipped
+= (max_sector
- sector_nr
);
2159 sector_nr
= max_sector
;
2164 raid10_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
2167 conf_t
*conf
= mddev
->private;
2170 raid_disks
= conf
->raid_disks
;
2172 sectors
= conf
->dev_sectors
;
2174 size
= sectors
>> conf
->chunk_shift
;
2175 sector_div(size
, conf
->far_copies
);
2176 size
= size
* raid_disks
;
2177 sector_div(size
, conf
->near_copies
);
2179 return size
<< conf
->chunk_shift
;
2183 static conf_t
*setup_conf(mddev_t
*mddev
)
2185 conf_t
*conf
= NULL
;
2187 sector_t stride
, size
;
2190 if (mddev
->new_chunk_sectors
< (PAGE_SIZE
>> 9) ||
2191 !is_power_of_2(mddev
->new_chunk_sectors
)) {
2192 printk(KERN_ERR
"md/raid10:%s: chunk size must be "
2193 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
2194 mdname(mddev
), PAGE_SIZE
);
2198 nc
= mddev
->new_layout
& 255;
2199 fc
= (mddev
->new_layout
>> 8) & 255;
2200 fo
= mddev
->new_layout
& (1<<16);
2202 if ((nc
*fc
) <2 || (nc
*fc
) > mddev
->raid_disks
||
2203 (mddev
->new_layout
>> 17)) {
2204 printk(KERN_ERR
"md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
2205 mdname(mddev
), mddev
->new_layout
);
2210 conf
= kzalloc(sizeof(conf_t
), GFP_KERNEL
);
2214 conf
->mirrors
= kzalloc(sizeof(struct mirror_info
)*mddev
->raid_disks
,
2219 conf
->tmppage
= alloc_page(GFP_KERNEL
);
2224 conf
->raid_disks
= mddev
->raid_disks
;
2225 conf
->near_copies
= nc
;
2226 conf
->far_copies
= fc
;
2227 conf
->copies
= nc
*fc
;
2228 conf
->far_offset
= fo
;
2229 conf
->chunk_mask
= mddev
->new_chunk_sectors
- 1;
2230 conf
->chunk_shift
= ffz(~mddev
->new_chunk_sectors
);
2232 conf
->r10bio_pool
= mempool_create(NR_RAID10_BIOS
, r10bio_pool_alloc
,
2233 r10bio_pool_free
, conf
);
2234 if (!conf
->r10bio_pool
)
2237 size
= mddev
->dev_sectors
>> conf
->chunk_shift
;
2238 sector_div(size
, fc
);
2239 size
= size
* conf
->raid_disks
;
2240 sector_div(size
, nc
);
2241 /* 'size' is now the number of chunks in the array */
2242 /* calculate "used chunks per device" in 'stride' */
2243 stride
= size
* conf
->copies
;
2245 /* We need to round up when dividing by raid_disks to
2246 * get the stride size.
2248 stride
+= conf
->raid_disks
- 1;
2249 sector_div(stride
, conf
->raid_disks
);
2251 conf
->dev_sectors
= stride
<< conf
->chunk_shift
;
2256 sector_div(stride
, fc
);
2257 conf
->stride
= stride
<< conf
->chunk_shift
;
2260 spin_lock_init(&conf
->device_lock
);
2261 INIT_LIST_HEAD(&conf
->retry_list
);
2263 spin_lock_init(&conf
->resync_lock
);
2264 init_waitqueue_head(&conf
->wait_barrier
);
2266 conf
->thread
= md_register_thread(raid10d
, mddev
, NULL
);
2270 conf
->mddev
= mddev
;
2274 printk(KERN_ERR
"md/raid10:%s: couldn't allocate memory.\n",
2277 if (conf
->r10bio_pool
)
2278 mempool_destroy(conf
->r10bio_pool
);
2279 kfree(conf
->mirrors
);
2280 safe_put_page(conf
->tmppage
);
2283 return ERR_PTR(err
);
2286 static int run(mddev_t
*mddev
)
2289 int i
, disk_idx
, chunk_size
;
2290 mirror_info_t
*disk
;
2295 * copy the already verified devices into our private RAID10
2296 * bookkeeping area. [whatever we allocate in run(),
2297 * should be freed in stop()]
2300 if (mddev
->private == NULL
) {
2301 conf
= setup_conf(mddev
);
2303 return PTR_ERR(conf
);
2304 mddev
->private = conf
;
2306 conf
= mddev
->private;
2310 mddev
->thread
= conf
->thread
;
2311 conf
->thread
= NULL
;
2313 chunk_size
= mddev
->chunk_sectors
<< 9;
2314 blk_queue_io_min(mddev
->queue
, chunk_size
);
2315 if (conf
->raid_disks
% conf
->near_copies
)
2316 blk_queue_io_opt(mddev
->queue
, chunk_size
* conf
->raid_disks
);
2318 blk_queue_io_opt(mddev
->queue
, chunk_size
*
2319 (conf
->raid_disks
/ conf
->near_copies
));
2321 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
2322 disk_idx
= rdev
->raid_disk
;
2323 if (disk_idx
>= conf
->raid_disks
2326 disk
= conf
->mirrors
+ disk_idx
;
2329 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
2330 rdev
->data_offset
<< 9);
2331 /* as we don't honour merge_bvec_fn, we must never risk
2332 * violating it, so limit max_segments to 1 lying
2333 * within a single page.
2335 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
2336 blk_queue_max_segments(mddev
->queue
, 1);
2337 blk_queue_segment_boundary(mddev
->queue
,
2338 PAGE_CACHE_SIZE
- 1);
2341 disk
->head_position
= 0;
2343 /* need to check that every block has at least one working mirror */
2344 if (!enough(conf
)) {
2345 printk(KERN_ERR
"md/raid10:%s: not enough operational mirrors.\n",
2350 mddev
->degraded
= 0;
2351 for (i
= 0; i
< conf
->raid_disks
; i
++) {
2353 disk
= conf
->mirrors
+ i
;
2356 !test_bit(In_sync
, &disk
->rdev
->flags
)) {
2357 disk
->head_position
= 0;
2364 if (mddev
->recovery_cp
!= MaxSector
)
2365 printk(KERN_NOTICE
"md/raid10:%s: not clean"
2366 " -- starting background reconstruction\n",
2369 "md/raid10:%s: active with %d out of %d devices\n",
2370 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
,
2373 * Ok, everything is just fine now
2375 mddev
->dev_sectors
= conf
->dev_sectors
;
2376 size
= raid10_size(mddev
, 0, 0);
2377 md_set_array_sectors(mddev
, size
);
2378 mddev
->resync_max_sectors
= size
;
2380 mddev
->queue
->unplug_fn
= raid10_unplug
;
2381 mddev
->queue
->backing_dev_info
.congested_fn
= raid10_congested
;
2382 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
2384 /* Calculate max read-ahead size.
2385 * We need to readahead at least twice a whole stripe....
2389 int stripe
= conf
->raid_disks
*
2390 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
2391 stripe
/= conf
->near_copies
;
2392 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
2393 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
2396 if (conf
->near_copies
< conf
->raid_disks
)
2397 blk_queue_merge_bvec(mddev
->queue
, raid10_mergeable_bvec
);
2398 md_integrity_register(mddev
);
2402 md_unregister_thread(mddev
->thread
);
2403 if (conf
->r10bio_pool
)
2404 mempool_destroy(conf
->r10bio_pool
);
2405 safe_put_page(conf
->tmppage
);
2406 kfree(conf
->mirrors
);
2408 mddev
->private = NULL
;
2413 static int stop(mddev_t
*mddev
)
2415 conf_t
*conf
= mddev
->private;
2417 raise_barrier(conf
, 0);
2418 lower_barrier(conf
);
2420 md_unregister_thread(mddev
->thread
);
2421 mddev
->thread
= NULL
;
2422 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
2423 if (conf
->r10bio_pool
)
2424 mempool_destroy(conf
->r10bio_pool
);
2425 kfree(conf
->mirrors
);
2427 mddev
->private = NULL
;
2431 static void raid10_quiesce(mddev_t
*mddev
, int state
)
2433 conf_t
*conf
= mddev
->private;
2437 raise_barrier(conf
, 0);
2440 lower_barrier(conf
);
2445 static void *raid10_takeover_raid0(mddev_t
*mddev
)
2450 if (mddev
->degraded
> 0) {
2451 printk(KERN_ERR
"md/raid10:%s: Error: degraded raid0!\n",
2453 return ERR_PTR(-EINVAL
);
2456 /* Set new parameters */
2457 mddev
->new_level
= 10;
2458 /* new layout: far_copies = 1, near_copies = 2 */
2459 mddev
->new_layout
= (1<<8) + 2;
2460 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
2461 mddev
->delta_disks
= mddev
->raid_disks
;
2462 mddev
->raid_disks
*= 2;
2463 /* make sure it will be not marked as dirty */
2464 mddev
->recovery_cp
= MaxSector
;
2466 conf
= setup_conf(mddev
);
2467 if (!IS_ERR(conf
)) {
2468 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
2469 if (rdev
->raid_disk
>= 0)
2470 rdev
->new_raid_disk
= rdev
->raid_disk
* 2;
2477 static void *raid10_takeover(mddev_t
*mddev
)
2479 struct raid0_private_data
*raid0_priv
;
2481 /* raid10 can take over:
2482 * raid0 - providing it has only two drives
2484 if (mddev
->level
== 0) {
2485 /* for raid0 takeover only one zone is supported */
2486 raid0_priv
= mddev
->private;
2487 if (raid0_priv
->nr_strip_zones
> 1) {
2488 printk(KERN_ERR
"md/raid10:%s: cannot takeover raid 0"
2489 " with more than one zone.\n",
2491 return ERR_PTR(-EINVAL
);
2493 return raid10_takeover_raid0(mddev
);
2495 return ERR_PTR(-EINVAL
);
2498 static struct mdk_personality raid10_personality
=
2502 .owner
= THIS_MODULE
,
2503 .make_request
= make_request
,
2507 .error_handler
= error
,
2508 .hot_add_disk
= raid10_add_disk
,
2509 .hot_remove_disk
= raid10_remove_disk
,
2510 .spare_active
= raid10_spare_active
,
2511 .sync_request
= sync_request
,
2512 .quiesce
= raid10_quiesce
,
2513 .size
= raid10_size
,
2514 .takeover
= raid10_takeover
,
2517 static int __init
raid_init(void)
2519 return register_md_personality(&raid10_personality
);
2522 static void raid_exit(void)
2524 unregister_md_personality(&raid10_personality
);
2527 module_init(raid_init
);
2528 module_exit(raid_exit
);
2529 MODULE_LICENSE("GPL");
2530 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
2531 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2532 MODULE_ALIAS("md-raid10");
2533 MODULE_ALIAS("md-level-10");