2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
61 #define NR_STRIPES 256
62 #define STRIPE_SIZE PAGE_SIZE
63 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
64 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
65 #define IO_THRESHOLD 1
66 #define BYPASS_THRESHOLD 1
67 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
68 #define HASH_MASK (NR_HASH - 1)
70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
83 * The following can be used to debug the driver
85 #define RAID5_PARANOIA 1
86 #if RAID5_PARANOIA && defined(CONFIG_SMP)
87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
89 # define CHECK_DEVLOCK()
97 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
100 * We maintain a biased count of active stripes in the bottom 16 bits of
101 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
103 static inline int raid5_bi_phys_segments(struct bio
*bio
)
105 return bio
->bi_phys_segments
& 0xffff;
108 static inline int raid5_bi_hw_segments(struct bio
*bio
)
110 return (bio
->bi_phys_segments
>> 16) & 0xffff;
113 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
115 --bio
->bi_phys_segments
;
116 return raid5_bi_phys_segments(bio
);
119 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
121 unsigned short val
= raid5_bi_hw_segments(bio
);
124 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
128 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
130 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) || (cnt
<< 16);
133 /* Find first data disk in a raid6 stripe */
134 static inline int raid6_d0(struct stripe_head
*sh
)
137 /* ddf always start from first device */
139 /* md starts just after Q block */
140 if (sh
->qd_idx
== sh
->disks
- 1)
143 return sh
->qd_idx
+ 1;
145 static inline int raid6_next_disk(int disk
, int raid_disks
)
148 return (disk
< raid_disks
) ? disk
: 0;
151 /* When walking through the disks in a raid5, starting at raid6_d0,
152 * We need to map each disk to a 'slot', where the data disks are slot
153 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154 * is raid_disks-1. This help does that mapping.
156 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
157 int *count
, int syndrome_disks
)
161 if (idx
== sh
->pd_idx
)
162 return syndrome_disks
;
163 if (idx
== sh
->qd_idx
)
164 return syndrome_disks
+ 1;
169 static void return_io(struct bio
*return_bi
)
171 struct bio
*bi
= return_bi
;
174 return_bi
= bi
->bi_next
;
182 static void print_raid5_conf (raid5_conf_t
*conf
);
184 static int stripe_operations_active(struct stripe_head
*sh
)
186 return sh
->check_state
|| sh
->reconstruct_state
||
187 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
188 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
191 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
193 if (atomic_dec_and_test(&sh
->count
)) {
194 BUG_ON(!list_empty(&sh
->lru
));
195 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
196 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
197 if (test_bit(STRIPE_DELAYED
, &sh
->state
)) {
198 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
199 blk_plug_device(conf
->mddev
->queue
);
200 } else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
201 sh
->bm_seq
- conf
->seq_write
> 0) {
202 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
203 blk_plug_device(conf
->mddev
->queue
);
205 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
206 list_add_tail(&sh
->lru
, &conf
->handle_list
);
208 md_wakeup_thread(conf
->mddev
->thread
);
210 BUG_ON(stripe_operations_active(sh
));
211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
212 atomic_dec(&conf
->preread_active_stripes
);
213 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
214 md_wakeup_thread(conf
->mddev
->thread
);
216 atomic_dec(&conf
->active_stripes
);
217 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
218 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
219 wake_up(&conf
->wait_for_stripe
);
220 if (conf
->retry_read_aligned
)
221 md_wakeup_thread(conf
->mddev
->thread
);
227 static void release_stripe(struct stripe_head
*sh
)
229 raid5_conf_t
*conf
= sh
->raid_conf
;
232 spin_lock_irqsave(&conf
->device_lock
, flags
);
233 __release_stripe(conf
, sh
);
234 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
237 static inline void remove_hash(struct stripe_head
*sh
)
239 pr_debug("remove_hash(), stripe %llu\n",
240 (unsigned long long)sh
->sector
);
242 hlist_del_init(&sh
->hash
);
245 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
247 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
249 pr_debug("insert_hash(), stripe %llu\n",
250 (unsigned long long)sh
->sector
);
253 hlist_add_head(&sh
->hash
, hp
);
257 /* find an idle stripe, make sure it is unhashed, and return it. */
258 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
260 struct stripe_head
*sh
= NULL
;
261 struct list_head
*first
;
264 if (list_empty(&conf
->inactive_list
))
266 first
= conf
->inactive_list
.next
;
267 sh
= list_entry(first
, struct stripe_head
, lru
);
268 list_del_init(first
);
270 atomic_inc(&conf
->active_stripes
);
275 static void shrink_buffers(struct stripe_head
*sh
, int num
)
280 for (i
=0; i
<num
; i
++) {
284 sh
->dev
[i
].page
= NULL
;
289 static int grow_buffers(struct stripe_head
*sh
, int num
)
293 for (i
=0; i
<num
; i
++) {
296 if (!(page
= alloc_page(GFP_KERNEL
))) {
299 sh
->dev
[i
].page
= page
;
304 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
305 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
306 struct stripe_head
*sh
);
308 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
310 raid5_conf_t
*conf
= sh
->raid_conf
;
313 BUG_ON(atomic_read(&sh
->count
) != 0);
314 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
315 BUG_ON(stripe_operations_active(sh
));
318 pr_debug("init_stripe called, stripe %llu\n",
319 (unsigned long long)sh
->sector
);
323 sh
->generation
= conf
->generation
- previous
;
324 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
326 stripe_set_idx(sector
, conf
, previous
, sh
);
330 for (i
= sh
->disks
; i
--; ) {
331 struct r5dev
*dev
= &sh
->dev
[i
];
333 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
334 test_bit(R5_LOCKED
, &dev
->flags
)) {
335 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
336 (unsigned long long)sh
->sector
, i
, dev
->toread
,
337 dev
->read
, dev
->towrite
, dev
->written
,
338 test_bit(R5_LOCKED
, &dev
->flags
));
342 raid5_build_block(sh
, i
, previous
);
344 insert_hash(conf
, sh
);
347 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
,
350 struct stripe_head
*sh
;
351 struct hlist_node
*hn
;
354 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
355 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
356 if (sh
->sector
== sector
&& sh
->generation
== generation
)
358 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
362 static void unplug_slaves(mddev_t
*mddev
);
363 static void raid5_unplug_device(struct request_queue
*q
);
365 static struct stripe_head
*
366 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
367 int previous
, int noblock
, int noquiesce
)
369 struct stripe_head
*sh
;
371 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
373 spin_lock_irq(&conf
->device_lock
);
376 wait_event_lock_irq(conf
->wait_for_stripe
,
377 conf
->quiesce
== 0 || noquiesce
,
378 conf
->device_lock
, /* nothing */);
379 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
381 if (!conf
->inactive_blocked
)
382 sh
= get_free_stripe(conf
);
383 if (noblock
&& sh
== NULL
)
386 conf
->inactive_blocked
= 1;
387 wait_event_lock_irq(conf
->wait_for_stripe
,
388 !list_empty(&conf
->inactive_list
) &&
389 (atomic_read(&conf
->active_stripes
)
390 < (conf
->max_nr_stripes
*3/4)
391 || !conf
->inactive_blocked
),
393 raid5_unplug_device(conf
->mddev
->queue
)
395 conf
->inactive_blocked
= 0;
397 init_stripe(sh
, sector
, previous
);
399 if (atomic_read(&sh
->count
)) {
400 BUG_ON(!list_empty(&sh
->lru
)
401 && !test_bit(STRIPE_EXPANDING
, &sh
->state
));
403 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
404 atomic_inc(&conf
->active_stripes
);
405 if (list_empty(&sh
->lru
) &&
406 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
408 list_del_init(&sh
->lru
);
411 } while (sh
== NULL
);
414 atomic_inc(&sh
->count
);
416 spin_unlock_irq(&conf
->device_lock
);
421 raid5_end_read_request(struct bio
*bi
, int error
);
423 raid5_end_write_request(struct bio
*bi
, int error
);
425 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
427 raid5_conf_t
*conf
= sh
->raid_conf
;
428 int i
, disks
= sh
->disks
;
432 for (i
= disks
; i
--; ) {
436 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
438 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
443 bi
= &sh
->dev
[i
].req
;
447 bi
->bi_end_io
= raid5_end_write_request
;
449 bi
->bi_end_io
= raid5_end_read_request
;
452 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
453 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
456 atomic_inc(&rdev
->nr_pending
);
460 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
461 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
463 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
465 bi
->bi_bdev
= rdev
->bdev
;
466 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
467 __func__
, (unsigned long long)sh
->sector
,
469 atomic_inc(&sh
->count
);
470 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
471 bi
->bi_flags
= 1 << BIO_UPTODATE
;
475 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
476 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
477 bi
->bi_io_vec
[0].bv_offset
= 0;
478 bi
->bi_size
= STRIPE_SIZE
;
481 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
482 atomic_add(STRIPE_SECTORS
,
483 &rdev
->corrected_errors
);
484 generic_make_request(bi
);
487 set_bit(STRIPE_DEGRADED
, &sh
->state
);
488 pr_debug("skip op %ld on disc %d for sector %llu\n",
489 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
490 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
491 set_bit(STRIPE_HANDLE
, &sh
->state
);
496 static struct dma_async_tx_descriptor
*
497 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
498 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
501 struct page
*bio_page
;
504 struct async_submit_ctl submit
;
505 enum async_tx_flags flags
= 0;
507 if (bio
->bi_sector
>= sector
)
508 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
510 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
513 flags
|= ASYNC_TX_FENCE
;
514 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
516 bio_for_each_segment(bvl
, bio
, i
) {
517 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
521 if (page_offset
< 0) {
522 b_offset
= -page_offset
;
523 page_offset
+= b_offset
;
527 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
528 clen
= STRIPE_SIZE
- page_offset
;
533 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
534 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
536 tx
= async_memcpy(page
, bio_page
, page_offset
,
537 b_offset
, clen
, &submit
);
539 tx
= async_memcpy(bio_page
, page
, b_offset
,
540 page_offset
, clen
, &submit
);
542 /* chain the operations */
543 submit
.depend_tx
= tx
;
545 if (clen
< len
) /* hit end of page */
553 static void ops_complete_biofill(void *stripe_head_ref
)
555 struct stripe_head
*sh
= stripe_head_ref
;
556 struct bio
*return_bi
= NULL
;
557 raid5_conf_t
*conf
= sh
->raid_conf
;
560 pr_debug("%s: stripe %llu\n", __func__
,
561 (unsigned long long)sh
->sector
);
563 /* clear completed biofills */
564 spin_lock_irq(&conf
->device_lock
);
565 for (i
= sh
->disks
; i
--; ) {
566 struct r5dev
*dev
= &sh
->dev
[i
];
568 /* acknowledge completion of a biofill operation */
569 /* and check if we need to reply to a read request,
570 * new R5_Wantfill requests are held off until
571 * !STRIPE_BIOFILL_RUN
573 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
574 struct bio
*rbi
, *rbi2
;
579 while (rbi
&& rbi
->bi_sector
<
580 dev
->sector
+ STRIPE_SECTORS
) {
581 rbi2
= r5_next_bio(rbi
, dev
->sector
);
582 if (!raid5_dec_bi_phys_segments(rbi
)) {
583 rbi
->bi_next
= return_bi
;
590 spin_unlock_irq(&conf
->device_lock
);
591 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
593 return_io(return_bi
);
595 set_bit(STRIPE_HANDLE
, &sh
->state
);
599 static void ops_run_biofill(struct stripe_head
*sh
)
601 struct dma_async_tx_descriptor
*tx
= NULL
;
602 raid5_conf_t
*conf
= sh
->raid_conf
;
603 struct async_submit_ctl submit
;
606 pr_debug("%s: stripe %llu\n", __func__
,
607 (unsigned long long)sh
->sector
);
609 for (i
= sh
->disks
; i
--; ) {
610 struct r5dev
*dev
= &sh
->dev
[i
];
611 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
613 spin_lock_irq(&conf
->device_lock
);
614 dev
->read
= rbi
= dev
->toread
;
616 spin_unlock_irq(&conf
->device_lock
);
617 while (rbi
&& rbi
->bi_sector
<
618 dev
->sector
+ STRIPE_SECTORS
) {
619 tx
= async_copy_data(0, rbi
, dev
->page
,
621 rbi
= r5_next_bio(rbi
, dev
->sector
);
626 atomic_inc(&sh
->count
);
627 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
628 async_trigger_callback(&submit
);
631 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
638 tgt
= &sh
->dev
[target
];
639 set_bit(R5_UPTODATE
, &tgt
->flags
);
640 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
641 clear_bit(R5_Wantcompute
, &tgt
->flags
);
644 static void ops_complete_compute(void *stripe_head_ref
)
646 struct stripe_head
*sh
= stripe_head_ref
;
648 pr_debug("%s: stripe %llu\n", __func__
,
649 (unsigned long long)sh
->sector
);
651 /* mark the computed target(s) as uptodate */
652 mark_target_uptodate(sh
, sh
->ops
.target
);
653 mark_target_uptodate(sh
, sh
->ops
.target2
);
655 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
656 if (sh
->check_state
== check_state_compute_run
)
657 sh
->check_state
= check_state_compute_result
;
658 set_bit(STRIPE_HANDLE
, &sh
->state
);
662 /* return a pointer to the address conversion region of the scribble buffer */
663 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
664 struct raid5_percpu
*percpu
)
666 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
669 static struct dma_async_tx_descriptor
*
670 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
672 int disks
= sh
->disks
;
673 struct page
**xor_srcs
= percpu
->scribble
;
674 int target
= sh
->ops
.target
;
675 struct r5dev
*tgt
= &sh
->dev
[target
];
676 struct page
*xor_dest
= tgt
->page
;
678 struct dma_async_tx_descriptor
*tx
;
679 struct async_submit_ctl submit
;
682 pr_debug("%s: stripe %llu block: %d\n",
683 __func__
, (unsigned long long)sh
->sector
, target
);
684 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
686 for (i
= disks
; i
--; )
688 xor_srcs
[count
++] = sh
->dev
[i
].page
;
690 atomic_inc(&sh
->count
);
692 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
693 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
694 if (unlikely(count
== 1))
695 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
697 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
702 /* set_syndrome_sources - populate source buffers for gen_syndrome
703 * @srcs - (struct page *) array of size sh->disks
704 * @sh - stripe_head to parse
706 * Populates srcs in proper layout order for the stripe and returns the
707 * 'count' of sources to be used in a call to async_gen_syndrome. The P
708 * destination buffer is recorded in srcs[count] and the Q destination
709 * is recorded in srcs[count+1]].
711 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
713 int disks
= sh
->disks
;
714 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
715 int d0_idx
= raid6_d0(sh
);
719 for (i
= 0; i
< disks
; i
++)
720 srcs
[i
] = (void *)raid6_empty_zero_page
;
725 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
727 srcs
[slot
] = sh
->dev
[i
].page
;
728 i
= raid6_next_disk(i
, disks
);
729 } while (i
!= d0_idx
);
730 BUG_ON(count
!= syndrome_disks
);
735 static struct dma_async_tx_descriptor
*
736 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
738 int disks
= sh
->disks
;
739 struct page
**blocks
= percpu
->scribble
;
741 int qd_idx
= sh
->qd_idx
;
742 struct dma_async_tx_descriptor
*tx
;
743 struct async_submit_ctl submit
;
749 if (sh
->ops
.target
< 0)
750 target
= sh
->ops
.target2
;
751 else if (sh
->ops
.target2
< 0)
752 target
= sh
->ops
.target
;
754 /* we should only have one valid target */
757 pr_debug("%s: stripe %llu block: %d\n",
758 __func__
, (unsigned long long)sh
->sector
, target
);
760 tgt
= &sh
->dev
[target
];
761 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
764 atomic_inc(&sh
->count
);
766 if (target
== qd_idx
) {
767 count
= set_syndrome_sources(blocks
, sh
);
768 blocks
[count
] = NULL
; /* regenerating p is not necessary */
769 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
770 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
771 ops_complete_compute
, sh
,
772 to_addr_conv(sh
, percpu
));
773 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
775 /* Compute any data- or p-drive using XOR */
777 for (i
= disks
; i
-- ; ) {
778 if (i
== target
|| i
== qd_idx
)
780 blocks
[count
++] = sh
->dev
[i
].page
;
783 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
784 NULL
, ops_complete_compute
, sh
,
785 to_addr_conv(sh
, percpu
));
786 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
792 static struct dma_async_tx_descriptor
*
793 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
795 int i
, count
, disks
= sh
->disks
;
796 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
797 int d0_idx
= raid6_d0(sh
);
798 int faila
= -1, failb
= -1;
799 int target
= sh
->ops
.target
;
800 int target2
= sh
->ops
.target2
;
801 struct r5dev
*tgt
= &sh
->dev
[target
];
802 struct r5dev
*tgt2
= &sh
->dev
[target2
];
803 struct dma_async_tx_descriptor
*tx
;
804 struct page
**blocks
= percpu
->scribble
;
805 struct async_submit_ctl submit
;
807 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
808 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
809 BUG_ON(target
< 0 || target2
< 0);
810 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
811 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
813 /* we need to open-code set_syndrome_sources to handle to the
814 * slot number conversion for 'faila' and 'failb'
816 for (i
= 0; i
< disks
; i
++)
817 blocks
[i
] = (void *)raid6_empty_zero_page
;
821 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
823 blocks
[slot
] = sh
->dev
[i
].page
;
829 i
= raid6_next_disk(i
, disks
);
830 } while (i
!= d0_idx
);
831 BUG_ON(count
!= syndrome_disks
);
833 BUG_ON(faila
== failb
);
836 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
837 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
839 atomic_inc(&sh
->count
);
841 if (failb
== syndrome_disks
+1) {
842 /* Q disk is one of the missing disks */
843 if (faila
== syndrome_disks
) {
844 /* Missing P+Q, just recompute */
845 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
846 ops_complete_compute
, sh
,
847 to_addr_conv(sh
, percpu
));
848 return async_gen_syndrome(blocks
, 0, count
+2,
849 STRIPE_SIZE
, &submit
);
853 int qd_idx
= sh
->qd_idx
;
855 /* Missing D+Q: recompute D from P, then recompute Q */
856 if (target
== qd_idx
)
857 data_target
= target2
;
859 data_target
= target
;
862 for (i
= disks
; i
-- ; ) {
863 if (i
== data_target
|| i
== qd_idx
)
865 blocks
[count
++] = sh
->dev
[i
].page
;
867 dest
= sh
->dev
[data_target
].page
;
868 init_async_submit(&submit
,
869 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
871 to_addr_conv(sh
, percpu
));
872 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
875 count
= set_syndrome_sources(blocks
, sh
);
876 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
877 ops_complete_compute
, sh
,
878 to_addr_conv(sh
, percpu
));
879 return async_gen_syndrome(blocks
, 0, count
+2,
880 STRIPE_SIZE
, &submit
);
884 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
, ops_complete_compute
,
885 sh
, to_addr_conv(sh
, percpu
));
886 if (failb
== syndrome_disks
) {
887 /* We're missing D+P. */
888 return async_raid6_datap_recov(syndrome_disks
+2, STRIPE_SIZE
,
889 faila
, blocks
, &submit
);
891 /* We're missing D+D. */
892 return async_raid6_2data_recov(syndrome_disks
+2, STRIPE_SIZE
,
893 faila
, failb
, blocks
, &submit
);
898 static void ops_complete_prexor(void *stripe_head_ref
)
900 struct stripe_head
*sh
= stripe_head_ref
;
902 pr_debug("%s: stripe %llu\n", __func__
,
903 (unsigned long long)sh
->sector
);
906 static struct dma_async_tx_descriptor
*
907 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
908 struct dma_async_tx_descriptor
*tx
)
910 int disks
= sh
->disks
;
911 struct page
**xor_srcs
= percpu
->scribble
;
912 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
913 struct async_submit_ctl submit
;
915 /* existing parity data subtracted */
916 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
918 pr_debug("%s: stripe %llu\n", __func__
,
919 (unsigned long long)sh
->sector
);
921 for (i
= disks
; i
--; ) {
922 struct r5dev
*dev
= &sh
->dev
[i
];
923 /* Only process blocks that are known to be uptodate */
924 if (test_bit(R5_Wantdrain
, &dev
->flags
))
925 xor_srcs
[count
++] = dev
->page
;
928 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
929 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
930 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
935 static struct dma_async_tx_descriptor
*
936 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
938 int disks
= sh
->disks
;
941 pr_debug("%s: stripe %llu\n", __func__
,
942 (unsigned long long)sh
->sector
);
944 for (i
= disks
; i
--; ) {
945 struct r5dev
*dev
= &sh
->dev
[i
];
948 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
951 spin_lock(&sh
->lock
);
952 chosen
= dev
->towrite
;
954 BUG_ON(dev
->written
);
955 wbi
= dev
->written
= chosen
;
956 spin_unlock(&sh
->lock
);
958 while (wbi
&& wbi
->bi_sector
<
959 dev
->sector
+ STRIPE_SECTORS
) {
960 tx
= async_copy_data(1, wbi
, dev
->page
,
962 wbi
= r5_next_bio(wbi
, dev
->sector
);
970 static void ops_complete_reconstruct(void *stripe_head_ref
)
972 struct stripe_head
*sh
= stripe_head_ref
;
973 int disks
= sh
->disks
;
974 int pd_idx
= sh
->pd_idx
;
975 int qd_idx
= sh
->qd_idx
;
978 pr_debug("%s: stripe %llu\n", __func__
,
979 (unsigned long long)sh
->sector
);
981 for (i
= disks
; i
--; ) {
982 struct r5dev
*dev
= &sh
->dev
[i
];
984 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
)
985 set_bit(R5_UPTODATE
, &dev
->flags
);
988 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
989 sh
->reconstruct_state
= reconstruct_state_drain_result
;
990 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
991 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
993 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
994 sh
->reconstruct_state
= reconstruct_state_result
;
997 set_bit(STRIPE_HANDLE
, &sh
->state
);
1002 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1003 struct dma_async_tx_descriptor
*tx
)
1005 int disks
= sh
->disks
;
1006 struct page
**xor_srcs
= percpu
->scribble
;
1007 struct async_submit_ctl submit
;
1008 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1009 struct page
*xor_dest
;
1011 unsigned long flags
;
1013 pr_debug("%s: stripe %llu\n", __func__
,
1014 (unsigned long long)sh
->sector
);
1016 /* check if prexor is active which means only process blocks
1017 * that are part of a read-modify-write (written)
1019 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1021 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1022 for (i
= disks
; i
--; ) {
1023 struct r5dev
*dev
= &sh
->dev
[i
];
1025 xor_srcs
[count
++] = dev
->page
;
1028 xor_dest
= sh
->dev
[pd_idx
].page
;
1029 for (i
= disks
; i
--; ) {
1030 struct r5dev
*dev
= &sh
->dev
[i
];
1032 xor_srcs
[count
++] = dev
->page
;
1036 /* 1/ if we prexor'd then the dest is reused as a source
1037 * 2/ if we did not prexor then we are redoing the parity
1038 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1039 * for the synchronous xor case
1041 flags
= ASYNC_TX_ACK
|
1042 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1044 atomic_inc(&sh
->count
);
1046 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1047 to_addr_conv(sh
, percpu
));
1048 if (unlikely(count
== 1))
1049 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1051 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1055 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1056 struct dma_async_tx_descriptor
*tx
)
1058 struct async_submit_ctl submit
;
1059 struct page
**blocks
= percpu
->scribble
;
1062 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1064 count
= set_syndrome_sources(blocks
, sh
);
1066 atomic_inc(&sh
->count
);
1068 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1069 sh
, to_addr_conv(sh
, percpu
));
1070 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1073 static void ops_complete_check(void *stripe_head_ref
)
1075 struct stripe_head
*sh
= stripe_head_ref
;
1077 pr_debug("%s: stripe %llu\n", __func__
,
1078 (unsigned long long)sh
->sector
);
1080 sh
->check_state
= check_state_check_result
;
1081 set_bit(STRIPE_HANDLE
, &sh
->state
);
1085 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1087 int disks
= sh
->disks
;
1088 int pd_idx
= sh
->pd_idx
;
1089 int qd_idx
= sh
->qd_idx
;
1090 struct page
*xor_dest
;
1091 struct page
**xor_srcs
= percpu
->scribble
;
1092 struct dma_async_tx_descriptor
*tx
;
1093 struct async_submit_ctl submit
;
1097 pr_debug("%s: stripe %llu\n", __func__
,
1098 (unsigned long long)sh
->sector
);
1101 xor_dest
= sh
->dev
[pd_idx
].page
;
1102 xor_srcs
[count
++] = xor_dest
;
1103 for (i
= disks
; i
--; ) {
1104 if (i
== pd_idx
|| i
== qd_idx
)
1106 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1109 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1110 to_addr_conv(sh
, percpu
));
1111 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1112 &sh
->ops
.zero_sum_result
, &submit
);
1114 atomic_inc(&sh
->count
);
1115 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1116 tx
= async_trigger_callback(&submit
);
1119 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1121 struct page
**srcs
= percpu
->scribble
;
1122 struct async_submit_ctl submit
;
1125 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1126 (unsigned long long)sh
->sector
, checkp
);
1128 count
= set_syndrome_sources(srcs
, sh
);
1132 atomic_inc(&sh
->count
);
1133 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1134 sh
, to_addr_conv(sh
, percpu
));
1135 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1136 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1139 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1141 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1142 struct dma_async_tx_descriptor
*tx
= NULL
;
1143 raid5_conf_t
*conf
= sh
->raid_conf
;
1144 int level
= conf
->level
;
1145 struct raid5_percpu
*percpu
;
1149 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1150 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1151 ops_run_biofill(sh
);
1155 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1157 tx
= ops_run_compute5(sh
, percpu
);
1159 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1160 tx
= ops_run_compute6_1(sh
, percpu
);
1162 tx
= ops_run_compute6_2(sh
, percpu
);
1164 /* terminate the chain if reconstruct is not set to be run */
1165 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1169 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1170 tx
= ops_run_prexor(sh
, percpu
, tx
);
1172 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1173 tx
= ops_run_biodrain(sh
, tx
);
1177 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1179 ops_run_reconstruct5(sh
, percpu
, tx
);
1181 ops_run_reconstruct6(sh
, percpu
, tx
);
1184 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1185 if (sh
->check_state
== check_state_run
)
1186 ops_run_check_p(sh
, percpu
);
1187 else if (sh
->check_state
== check_state_run_q
)
1188 ops_run_check_pq(sh
, percpu
, 0);
1189 else if (sh
->check_state
== check_state_run_pq
)
1190 ops_run_check_pq(sh
, percpu
, 1);
1196 for (i
= disks
; i
--; ) {
1197 struct r5dev
*dev
= &sh
->dev
[i
];
1198 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1199 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1204 static int grow_one_stripe(raid5_conf_t
*conf
)
1206 struct stripe_head
*sh
;
1207 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
1210 memset(sh
, 0, sizeof(*sh
) + (conf
->raid_disks
-1)*sizeof(struct r5dev
));
1211 sh
->raid_conf
= conf
;
1212 spin_lock_init(&sh
->lock
);
1214 if (grow_buffers(sh
, conf
->raid_disks
)) {
1215 shrink_buffers(sh
, conf
->raid_disks
);
1216 kmem_cache_free(conf
->slab_cache
, sh
);
1219 sh
->disks
= conf
->raid_disks
;
1220 /* we just created an active stripe so... */
1221 atomic_set(&sh
->count
, 1);
1222 atomic_inc(&conf
->active_stripes
);
1223 INIT_LIST_HEAD(&sh
->lru
);
1228 static int grow_stripes(raid5_conf_t
*conf
, int num
)
1230 struct kmem_cache
*sc
;
1231 int devs
= conf
->raid_disks
;
1233 sprintf(conf
->cache_name
[0],
1234 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1235 sprintf(conf
->cache_name
[1],
1236 "raid%d-%s-alt", conf
->level
, mdname(conf
->mddev
));
1237 conf
->active_name
= 0;
1238 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1239 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1243 conf
->slab_cache
= sc
;
1244 conf
->pool_size
= devs
;
1246 if (!grow_one_stripe(conf
))
1252 * scribble_len - return the required size of the scribble region
1253 * @num - total number of disks in the array
1255 * The size must be enough to contain:
1256 * 1/ a struct page pointer for each device in the array +2
1257 * 2/ room to convert each entry in (1) to its corresponding dma
1258 * (dma_map_page()) or page (page_address()) address.
1260 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1261 * calculate over all devices (not just the data blocks), using zeros in place
1262 * of the P and Q blocks.
1264 static size_t scribble_len(int num
)
1268 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1273 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
1275 /* Make all the stripes able to hold 'newsize' devices.
1276 * New slots in each stripe get 'page' set to a new page.
1278 * This happens in stages:
1279 * 1/ create a new kmem_cache and allocate the required number of
1281 * 2/ gather all the old stripe_heads and tranfer the pages across
1282 * to the new stripe_heads. This will have the side effect of
1283 * freezing the array as once all stripe_heads have been collected,
1284 * no IO will be possible. Old stripe heads are freed once their
1285 * pages have been transferred over, and the old kmem_cache is
1286 * freed when all stripes are done.
1287 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1288 * we simple return a failre status - no need to clean anything up.
1289 * 4/ allocate new pages for the new slots in the new stripe_heads.
1290 * If this fails, we don't bother trying the shrink the
1291 * stripe_heads down again, we just leave them as they are.
1292 * As each stripe_head is processed the new one is released into
1295 * Once step2 is started, we cannot afford to wait for a write,
1296 * so we use GFP_NOIO allocations.
1298 struct stripe_head
*osh
, *nsh
;
1299 LIST_HEAD(newstripes
);
1300 struct disk_info
*ndisks
;
1303 struct kmem_cache
*sc
;
1306 if (newsize
<= conf
->pool_size
)
1307 return 0; /* never bother to shrink */
1309 err
= md_allow_write(conf
->mddev
);
1314 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1315 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1320 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1321 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1325 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1327 nsh
->raid_conf
= conf
;
1328 spin_lock_init(&nsh
->lock
);
1330 list_add(&nsh
->lru
, &newstripes
);
1333 /* didn't get enough, give up */
1334 while (!list_empty(&newstripes
)) {
1335 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1336 list_del(&nsh
->lru
);
1337 kmem_cache_free(sc
, nsh
);
1339 kmem_cache_destroy(sc
);
1342 /* Step 2 - Must use GFP_NOIO now.
1343 * OK, we have enough stripes, start collecting inactive
1344 * stripes and copying them over
1346 list_for_each_entry(nsh
, &newstripes
, lru
) {
1347 spin_lock_irq(&conf
->device_lock
);
1348 wait_event_lock_irq(conf
->wait_for_stripe
,
1349 !list_empty(&conf
->inactive_list
),
1351 unplug_slaves(conf
->mddev
)
1353 osh
= get_free_stripe(conf
);
1354 spin_unlock_irq(&conf
->device_lock
);
1355 atomic_set(&nsh
->count
, 1);
1356 for(i
=0; i
<conf
->pool_size
; i
++)
1357 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1358 for( ; i
<newsize
; i
++)
1359 nsh
->dev
[i
].page
= NULL
;
1360 kmem_cache_free(conf
->slab_cache
, osh
);
1362 kmem_cache_destroy(conf
->slab_cache
);
1365 * At this point, we are holding all the stripes so the array
1366 * is completely stalled, so now is a good time to resize
1367 * conf->disks and the scribble region
1369 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1371 for (i
=0; i
<conf
->raid_disks
; i
++)
1372 ndisks
[i
] = conf
->disks
[i
];
1374 conf
->disks
= ndisks
;
1379 conf
->scribble_len
= scribble_len(newsize
);
1380 for_each_present_cpu(cpu
) {
1381 struct raid5_percpu
*percpu
;
1384 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1385 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1388 kfree(percpu
->scribble
);
1389 percpu
->scribble
= scribble
;
1397 /* Step 4, return new stripes to service */
1398 while(!list_empty(&newstripes
)) {
1399 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1400 list_del_init(&nsh
->lru
);
1402 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1403 if (nsh
->dev
[i
].page
== NULL
) {
1404 struct page
*p
= alloc_page(GFP_NOIO
);
1405 nsh
->dev
[i
].page
= p
;
1409 release_stripe(nsh
);
1411 /* critical section pass, GFP_NOIO no longer needed */
1413 conf
->slab_cache
= sc
;
1414 conf
->active_name
= 1-conf
->active_name
;
1415 conf
->pool_size
= newsize
;
1419 static int drop_one_stripe(raid5_conf_t
*conf
)
1421 struct stripe_head
*sh
;
1423 spin_lock_irq(&conf
->device_lock
);
1424 sh
= get_free_stripe(conf
);
1425 spin_unlock_irq(&conf
->device_lock
);
1428 BUG_ON(atomic_read(&sh
->count
));
1429 shrink_buffers(sh
, conf
->pool_size
);
1430 kmem_cache_free(conf
->slab_cache
, sh
);
1431 atomic_dec(&conf
->active_stripes
);
1435 static void shrink_stripes(raid5_conf_t
*conf
)
1437 while (drop_one_stripe(conf
))
1440 if (conf
->slab_cache
)
1441 kmem_cache_destroy(conf
->slab_cache
);
1442 conf
->slab_cache
= NULL
;
1445 static void raid5_end_read_request(struct bio
* bi
, int error
)
1447 struct stripe_head
*sh
= bi
->bi_private
;
1448 raid5_conf_t
*conf
= sh
->raid_conf
;
1449 int disks
= sh
->disks
, i
;
1450 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1451 char b
[BDEVNAME_SIZE
];
1455 for (i
=0 ; i
<disks
; i
++)
1456 if (bi
== &sh
->dev
[i
].req
)
1459 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1460 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1468 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1469 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1470 rdev
= conf
->disks
[i
].rdev
;
1471 printk_rl(KERN_INFO
"raid5:%s: read error corrected"
1472 " (%lu sectors at %llu on %s)\n",
1473 mdname(conf
->mddev
), STRIPE_SECTORS
,
1474 (unsigned long long)(sh
->sector
1475 + rdev
->data_offset
),
1476 bdevname(rdev
->bdev
, b
));
1477 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1478 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1480 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1481 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1483 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1485 rdev
= conf
->disks
[i
].rdev
;
1487 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1488 atomic_inc(&rdev
->read_errors
);
1489 if (conf
->mddev
->degraded
)
1490 printk_rl(KERN_WARNING
1491 "raid5:%s: read error not correctable "
1492 "(sector %llu on %s).\n",
1493 mdname(conf
->mddev
),
1494 (unsigned long long)(sh
->sector
1495 + rdev
->data_offset
),
1497 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1499 printk_rl(KERN_WARNING
1500 "raid5:%s: read error NOT corrected!! "
1501 "(sector %llu on %s).\n",
1502 mdname(conf
->mddev
),
1503 (unsigned long long)(sh
->sector
1504 + rdev
->data_offset
),
1506 else if (atomic_read(&rdev
->read_errors
)
1507 > conf
->max_nr_stripes
)
1509 "raid5:%s: Too many read errors, failing device %s.\n",
1510 mdname(conf
->mddev
), bdn
);
1514 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1516 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1517 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1518 md_error(conf
->mddev
, rdev
);
1521 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1522 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1523 set_bit(STRIPE_HANDLE
, &sh
->state
);
1527 static void raid5_end_write_request(struct bio
*bi
, int error
)
1529 struct stripe_head
*sh
= bi
->bi_private
;
1530 raid5_conf_t
*conf
= sh
->raid_conf
;
1531 int disks
= sh
->disks
, i
;
1532 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1534 for (i
=0 ; i
<disks
; i
++)
1535 if (bi
== &sh
->dev
[i
].req
)
1538 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1539 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1547 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1549 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1551 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1552 set_bit(STRIPE_HANDLE
, &sh
->state
);
1557 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1559 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1561 struct r5dev
*dev
= &sh
->dev
[i
];
1563 bio_init(&dev
->req
);
1564 dev
->req
.bi_io_vec
= &dev
->vec
;
1566 dev
->req
.bi_max_vecs
++;
1567 dev
->vec
.bv_page
= dev
->page
;
1568 dev
->vec
.bv_len
= STRIPE_SIZE
;
1569 dev
->vec
.bv_offset
= 0;
1571 dev
->req
.bi_sector
= sh
->sector
;
1572 dev
->req
.bi_private
= sh
;
1575 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1578 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1580 char b
[BDEVNAME_SIZE
];
1581 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
1582 pr_debug("raid5: error called\n");
1584 if (!test_bit(Faulty
, &rdev
->flags
)) {
1585 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1586 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1587 unsigned long flags
;
1588 spin_lock_irqsave(&conf
->device_lock
, flags
);
1590 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1592 * if recovery was running, make sure it aborts.
1594 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1596 set_bit(Faulty
, &rdev
->flags
);
1598 "raid5: Disk failure on %s, disabling device.\n"
1599 "raid5: Operation continuing on %d devices.\n",
1600 bdevname(rdev
->bdev
,b
), conf
->raid_disks
- mddev
->degraded
);
1605 * Input: a 'big' sector number,
1606 * Output: index of the data and parity disk, and the sector # in them.
1608 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1609 int previous
, int *dd_idx
,
1610 struct stripe_head
*sh
)
1613 unsigned long chunk_number
;
1614 unsigned int chunk_offset
;
1617 sector_t new_sector
;
1618 int algorithm
= previous
? conf
->prev_algo
1620 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1621 : conf
->chunk_sectors
;
1622 int raid_disks
= previous
? conf
->previous_raid_disks
1624 int data_disks
= raid_disks
- conf
->max_degraded
;
1626 /* First compute the information on this sector */
1629 * Compute the chunk number and the sector offset inside the chunk
1631 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1632 chunk_number
= r_sector
;
1633 BUG_ON(r_sector
!= chunk_number
);
1636 * Compute the stripe number
1638 stripe
= chunk_number
/ data_disks
;
1641 * Compute the data disk and parity disk indexes inside the stripe
1643 *dd_idx
= chunk_number
% data_disks
;
1646 * Select the parity disk based on the user selected algorithm.
1648 pd_idx
= qd_idx
= ~0;
1649 switch(conf
->level
) {
1651 pd_idx
= data_disks
;
1654 switch (algorithm
) {
1655 case ALGORITHM_LEFT_ASYMMETRIC
:
1656 pd_idx
= data_disks
- stripe
% raid_disks
;
1657 if (*dd_idx
>= pd_idx
)
1660 case ALGORITHM_RIGHT_ASYMMETRIC
:
1661 pd_idx
= stripe
% raid_disks
;
1662 if (*dd_idx
>= pd_idx
)
1665 case ALGORITHM_LEFT_SYMMETRIC
:
1666 pd_idx
= data_disks
- stripe
% raid_disks
;
1667 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1669 case ALGORITHM_RIGHT_SYMMETRIC
:
1670 pd_idx
= stripe
% raid_disks
;
1671 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1673 case ALGORITHM_PARITY_0
:
1677 case ALGORITHM_PARITY_N
:
1678 pd_idx
= data_disks
;
1681 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1688 switch (algorithm
) {
1689 case ALGORITHM_LEFT_ASYMMETRIC
:
1690 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1691 qd_idx
= pd_idx
+ 1;
1692 if (pd_idx
== raid_disks
-1) {
1693 (*dd_idx
)++; /* Q D D D P */
1695 } else if (*dd_idx
>= pd_idx
)
1696 (*dd_idx
) += 2; /* D D P Q D */
1698 case ALGORITHM_RIGHT_ASYMMETRIC
:
1699 pd_idx
= stripe
% raid_disks
;
1700 qd_idx
= pd_idx
+ 1;
1701 if (pd_idx
== raid_disks
-1) {
1702 (*dd_idx
)++; /* Q D D D P */
1704 } else if (*dd_idx
>= pd_idx
)
1705 (*dd_idx
) += 2; /* D D P Q D */
1707 case ALGORITHM_LEFT_SYMMETRIC
:
1708 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1709 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1710 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1712 case ALGORITHM_RIGHT_SYMMETRIC
:
1713 pd_idx
= stripe
% raid_disks
;
1714 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1715 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1718 case ALGORITHM_PARITY_0
:
1723 case ALGORITHM_PARITY_N
:
1724 pd_idx
= data_disks
;
1725 qd_idx
= data_disks
+ 1;
1728 case ALGORITHM_ROTATING_ZERO_RESTART
:
1729 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1730 * of blocks for computing Q is different.
1732 pd_idx
= stripe
% raid_disks
;
1733 qd_idx
= pd_idx
+ 1;
1734 if (pd_idx
== raid_disks
-1) {
1735 (*dd_idx
)++; /* Q D D D P */
1737 } else if (*dd_idx
>= pd_idx
)
1738 (*dd_idx
) += 2; /* D D P Q D */
1742 case ALGORITHM_ROTATING_N_RESTART
:
1743 /* Same a left_asymmetric, by first stripe is
1744 * D D D P Q rather than
1747 pd_idx
= raid_disks
- 1 - ((stripe
+ 1) % raid_disks
);
1748 qd_idx
= pd_idx
+ 1;
1749 if (pd_idx
== raid_disks
-1) {
1750 (*dd_idx
)++; /* Q D D D P */
1752 } else if (*dd_idx
>= pd_idx
)
1753 (*dd_idx
) += 2; /* D D P Q D */
1757 case ALGORITHM_ROTATING_N_CONTINUE
:
1758 /* Same as left_symmetric but Q is before P */
1759 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1760 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1761 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1765 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1766 /* RAID5 left_asymmetric, with Q on last device */
1767 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1768 if (*dd_idx
>= pd_idx
)
1770 qd_idx
= raid_disks
- 1;
1773 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1774 pd_idx
= stripe
% (raid_disks
-1);
1775 if (*dd_idx
>= pd_idx
)
1777 qd_idx
= raid_disks
- 1;
1780 case ALGORITHM_LEFT_SYMMETRIC_6
:
1781 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1782 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1783 qd_idx
= raid_disks
- 1;
1786 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1787 pd_idx
= stripe
% (raid_disks
-1);
1788 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1789 qd_idx
= raid_disks
- 1;
1792 case ALGORITHM_PARITY_0_6
:
1795 qd_idx
= raid_disks
- 1;
1800 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1808 sh
->pd_idx
= pd_idx
;
1809 sh
->qd_idx
= qd_idx
;
1810 sh
->ddf_layout
= ddf_layout
;
1813 * Finally, compute the new sector number
1815 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1820 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
1822 raid5_conf_t
*conf
= sh
->raid_conf
;
1823 int raid_disks
= sh
->disks
;
1824 int data_disks
= raid_disks
- conf
->max_degraded
;
1825 sector_t new_sector
= sh
->sector
, check
;
1826 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1827 : conf
->chunk_sectors
;
1828 int algorithm
= previous
? conf
->prev_algo
1832 int chunk_number
, dummy1
, dd_idx
= i
;
1834 struct stripe_head sh2
;
1837 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1838 stripe
= new_sector
;
1839 BUG_ON(new_sector
!= stripe
);
1841 if (i
== sh
->pd_idx
)
1843 switch(conf
->level
) {
1846 switch (algorithm
) {
1847 case ALGORITHM_LEFT_ASYMMETRIC
:
1848 case ALGORITHM_RIGHT_ASYMMETRIC
:
1852 case ALGORITHM_LEFT_SYMMETRIC
:
1853 case ALGORITHM_RIGHT_SYMMETRIC
:
1856 i
-= (sh
->pd_idx
+ 1);
1858 case ALGORITHM_PARITY_0
:
1861 case ALGORITHM_PARITY_N
:
1864 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1870 if (i
== sh
->qd_idx
)
1871 return 0; /* It is the Q disk */
1872 switch (algorithm
) {
1873 case ALGORITHM_LEFT_ASYMMETRIC
:
1874 case ALGORITHM_RIGHT_ASYMMETRIC
:
1875 case ALGORITHM_ROTATING_ZERO_RESTART
:
1876 case ALGORITHM_ROTATING_N_RESTART
:
1877 if (sh
->pd_idx
== raid_disks
-1)
1878 i
--; /* Q D D D P */
1879 else if (i
> sh
->pd_idx
)
1880 i
-= 2; /* D D P Q D */
1882 case ALGORITHM_LEFT_SYMMETRIC
:
1883 case ALGORITHM_RIGHT_SYMMETRIC
:
1884 if (sh
->pd_idx
== raid_disks
-1)
1885 i
--; /* Q D D D P */
1890 i
-= (sh
->pd_idx
+ 2);
1893 case ALGORITHM_PARITY_0
:
1896 case ALGORITHM_PARITY_N
:
1898 case ALGORITHM_ROTATING_N_CONTINUE
:
1899 if (sh
->pd_idx
== 0)
1900 i
--; /* P D D D Q */
1901 else if (i
> sh
->pd_idx
)
1902 i
-= 2; /* D D Q P D */
1904 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1905 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1909 case ALGORITHM_LEFT_SYMMETRIC_6
:
1910 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1912 i
+= data_disks
+ 1;
1913 i
-= (sh
->pd_idx
+ 1);
1915 case ALGORITHM_PARITY_0_6
:
1919 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1926 chunk_number
= stripe
* data_disks
+ i
;
1927 r_sector
= (sector_t
)chunk_number
* sectors_per_chunk
+ chunk_offset
;
1929 check
= raid5_compute_sector(conf
, r_sector
,
1930 previous
, &dummy1
, &sh2
);
1931 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
1932 || sh2
.qd_idx
!= sh
->qd_idx
) {
1933 printk(KERN_ERR
"compute_blocknr: map not correct\n");
1941 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
1942 int rcw
, int expand
)
1944 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
1945 raid5_conf_t
*conf
= sh
->raid_conf
;
1946 int level
= conf
->level
;
1949 /* if we are not expanding this is a proper write request, and
1950 * there will be bios with new data to be drained into the
1954 sh
->reconstruct_state
= reconstruct_state_drain_run
;
1955 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1957 sh
->reconstruct_state
= reconstruct_state_run
;
1959 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
1961 for (i
= disks
; i
--; ) {
1962 struct r5dev
*dev
= &sh
->dev
[i
];
1965 set_bit(R5_LOCKED
, &dev
->flags
);
1966 set_bit(R5_Wantdrain
, &dev
->flags
);
1968 clear_bit(R5_UPTODATE
, &dev
->flags
);
1972 if (s
->locked
+ conf
->max_degraded
== disks
)
1973 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
1974 atomic_inc(&conf
->pending_full_writes
);
1977 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
1978 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
1980 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
1981 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
1982 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
1983 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
1985 for (i
= disks
; i
--; ) {
1986 struct r5dev
*dev
= &sh
->dev
[i
];
1991 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
1992 test_bit(R5_Wantcompute
, &dev
->flags
))) {
1993 set_bit(R5_Wantdrain
, &dev
->flags
);
1994 set_bit(R5_LOCKED
, &dev
->flags
);
1995 clear_bit(R5_UPTODATE
, &dev
->flags
);
2001 /* keep the parity disk(s) locked while asynchronous operations
2004 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2005 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2009 int qd_idx
= sh
->qd_idx
;
2010 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2012 set_bit(R5_LOCKED
, &dev
->flags
);
2013 clear_bit(R5_UPTODATE
, &dev
->flags
);
2017 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2018 __func__
, (unsigned long long)sh
->sector
,
2019 s
->locked
, s
->ops_request
);
2023 * Each stripe/dev can have one or more bion attached.
2024 * toread/towrite point to the first in a chain.
2025 * The bi_next chain must be in order.
2027 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2030 raid5_conf_t
*conf
= sh
->raid_conf
;
2033 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2034 (unsigned long long)bi
->bi_sector
,
2035 (unsigned long long)sh
->sector
);
2038 spin_lock(&sh
->lock
);
2039 spin_lock_irq(&conf
->device_lock
);
2041 bip
= &sh
->dev
[dd_idx
].towrite
;
2042 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
2045 bip
= &sh
->dev
[dd_idx
].toread
;
2046 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2047 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2049 bip
= & (*bip
)->bi_next
;
2051 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2054 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2058 bi
->bi_phys_segments
++;
2059 spin_unlock_irq(&conf
->device_lock
);
2060 spin_unlock(&sh
->lock
);
2062 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2063 (unsigned long long)bi
->bi_sector
,
2064 (unsigned long long)sh
->sector
, dd_idx
);
2066 if (conf
->mddev
->bitmap
&& firstwrite
) {
2067 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2069 sh
->bm_seq
= conf
->seq_flush
+1;
2070 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2074 /* check if page is covered */
2075 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2076 for (bi
=sh
->dev
[dd_idx
].towrite
;
2077 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2078 bi
&& bi
->bi_sector
<= sector
;
2079 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2080 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2081 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2083 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2084 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2089 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2090 spin_unlock_irq(&conf
->device_lock
);
2091 spin_unlock(&sh
->lock
);
2095 static void end_reshape(raid5_conf_t
*conf
);
2097 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
2098 struct stripe_head
*sh
)
2100 int sectors_per_chunk
=
2101 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2103 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2104 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2106 raid5_compute_sector(conf
,
2107 stripe
* (disks
- conf
->max_degraded
)
2108 *sectors_per_chunk
+ chunk_offset
,
2114 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2115 struct stripe_head_state
*s
, int disks
,
2116 struct bio
**return_bi
)
2119 for (i
= disks
; i
--; ) {
2123 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2126 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2127 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2128 /* multiple read failures in one stripe */
2129 md_error(conf
->mddev
, rdev
);
2132 spin_lock_irq(&conf
->device_lock
);
2133 /* fail all writes first */
2134 bi
= sh
->dev
[i
].towrite
;
2135 sh
->dev
[i
].towrite
= NULL
;
2141 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2142 wake_up(&conf
->wait_for_overlap
);
2144 while (bi
&& bi
->bi_sector
<
2145 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2146 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2147 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2148 if (!raid5_dec_bi_phys_segments(bi
)) {
2149 md_write_end(conf
->mddev
);
2150 bi
->bi_next
= *return_bi
;
2155 /* and fail all 'written' */
2156 bi
= sh
->dev
[i
].written
;
2157 sh
->dev
[i
].written
= NULL
;
2158 if (bi
) bitmap_end
= 1;
2159 while (bi
&& bi
->bi_sector
<
2160 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2161 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2162 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2163 if (!raid5_dec_bi_phys_segments(bi
)) {
2164 md_write_end(conf
->mddev
);
2165 bi
->bi_next
= *return_bi
;
2171 /* fail any reads if this device is non-operational and
2172 * the data has not reached the cache yet.
2174 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2175 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2176 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2177 bi
= sh
->dev
[i
].toread
;
2178 sh
->dev
[i
].toread
= NULL
;
2179 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2180 wake_up(&conf
->wait_for_overlap
);
2181 if (bi
) s
->to_read
--;
2182 while (bi
&& bi
->bi_sector
<
2183 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2184 struct bio
*nextbi
=
2185 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2186 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2187 if (!raid5_dec_bi_phys_segments(bi
)) {
2188 bi
->bi_next
= *return_bi
;
2194 spin_unlock_irq(&conf
->device_lock
);
2196 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2197 STRIPE_SECTORS
, 0, 0);
2200 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2201 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2202 md_wakeup_thread(conf
->mddev
->thread
);
2205 /* fetch_block5 - checks the given member device to see if its data needs
2206 * to be read or computed to satisfy a request.
2208 * Returns 1 when no more member devices need to be checked, otherwise returns
2209 * 0 to tell the loop in handle_stripe_fill5 to continue
2211 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2212 int disk_idx
, int disks
)
2214 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2215 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2217 /* is the data in this block needed, and can we get it? */
2218 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2219 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2221 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2222 s
->syncing
|| s
->expanding
||
2224 (failed_dev
->toread
||
2225 (failed_dev
->towrite
&&
2226 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2227 /* We would like to get this block, possibly by computing it,
2228 * otherwise read it if the backing disk is insync
2230 if ((s
->uptodate
== disks
- 1) &&
2231 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2232 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2233 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2234 set_bit(R5_Wantcompute
, &dev
->flags
);
2235 sh
->ops
.target
= disk_idx
;
2236 sh
->ops
.target2
= -1;
2238 /* Careful: from this point on 'uptodate' is in the eye
2239 * of raid_run_ops which services 'compute' operations
2240 * before writes. R5_Wantcompute flags a block that will
2241 * be R5_UPTODATE by the time it is needed for a
2242 * subsequent operation.
2245 return 1; /* uptodate + compute == disks */
2246 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2247 set_bit(R5_LOCKED
, &dev
->flags
);
2248 set_bit(R5_Wantread
, &dev
->flags
);
2250 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2259 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2261 static void handle_stripe_fill5(struct stripe_head
*sh
,
2262 struct stripe_head_state
*s
, int disks
)
2266 /* look for blocks to read/compute, skip this if a compute
2267 * is already in flight, or if the stripe contents are in the
2268 * midst of changing due to a write
2270 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2271 !sh
->reconstruct_state
)
2272 for (i
= disks
; i
--; )
2273 if (fetch_block5(sh
, s
, i
, disks
))
2275 set_bit(STRIPE_HANDLE
, &sh
->state
);
2278 /* fetch_block6 - checks the given member device to see if its data needs
2279 * to be read or computed to satisfy a request.
2281 * Returns 1 when no more member devices need to be checked, otherwise returns
2282 * 0 to tell the loop in handle_stripe_fill6 to continue
2284 static int fetch_block6(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2285 struct r6_state
*r6s
, int disk_idx
, int disks
)
2287 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2288 struct r5dev
*fdev
[2] = { &sh
->dev
[r6s
->failed_num
[0]],
2289 &sh
->dev
[r6s
->failed_num
[1]] };
2291 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2292 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2294 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2295 s
->syncing
|| s
->expanding
||
2297 (fdev
[0]->toread
|| s
->to_write
)) ||
2299 (fdev
[1]->toread
|| s
->to_write
)))) {
2300 /* we would like to get this block, possibly by computing it,
2301 * otherwise read it if the backing disk is insync
2303 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2304 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2305 if ((s
->uptodate
== disks
- 1) &&
2306 (s
->failed
&& (disk_idx
== r6s
->failed_num
[0] ||
2307 disk_idx
== r6s
->failed_num
[1]))) {
2308 /* have disk failed, and we're requested to fetch it;
2311 pr_debug("Computing stripe %llu block %d\n",
2312 (unsigned long long)sh
->sector
, disk_idx
);
2313 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2314 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2315 set_bit(R5_Wantcompute
, &dev
->flags
);
2316 sh
->ops
.target
= disk_idx
;
2317 sh
->ops
.target2
= -1; /* no 2nd target */
2321 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2322 /* Computing 2-failure is *very* expensive; only
2323 * do it if failed >= 2
2326 for (other
= disks
; other
--; ) {
2327 if (other
== disk_idx
)
2329 if (!test_bit(R5_UPTODATE
,
2330 &sh
->dev
[other
].flags
))
2334 pr_debug("Computing stripe %llu blocks %d,%d\n",
2335 (unsigned long long)sh
->sector
,
2337 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2338 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2339 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2340 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2341 sh
->ops
.target
= disk_idx
;
2342 sh
->ops
.target2
= other
;
2346 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2347 set_bit(R5_LOCKED
, &dev
->flags
);
2348 set_bit(R5_Wantread
, &dev
->flags
);
2350 pr_debug("Reading block %d (sync=%d)\n",
2351 disk_idx
, s
->syncing
);
2359 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2361 static void handle_stripe_fill6(struct stripe_head
*sh
,
2362 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2367 /* look for blocks to read/compute, skip this if a compute
2368 * is already in flight, or if the stripe contents are in the
2369 * midst of changing due to a write
2371 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2372 !sh
->reconstruct_state
)
2373 for (i
= disks
; i
--; )
2374 if (fetch_block6(sh
, s
, r6s
, i
, disks
))
2376 set_bit(STRIPE_HANDLE
, &sh
->state
);
2380 /* handle_stripe_clean_event
2381 * any written block on an uptodate or failed drive can be returned.
2382 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2383 * never LOCKED, so we don't need to test 'failed' directly.
2385 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2386 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2391 for (i
= disks
; i
--; )
2392 if (sh
->dev
[i
].written
) {
2394 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2395 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2396 /* We can return any write requests */
2397 struct bio
*wbi
, *wbi2
;
2399 pr_debug("Return write for disc %d\n", i
);
2400 spin_lock_irq(&conf
->device_lock
);
2402 dev
->written
= NULL
;
2403 while (wbi
&& wbi
->bi_sector
<
2404 dev
->sector
+ STRIPE_SECTORS
) {
2405 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2406 if (!raid5_dec_bi_phys_segments(wbi
)) {
2407 md_write_end(conf
->mddev
);
2408 wbi
->bi_next
= *return_bi
;
2413 if (dev
->towrite
== NULL
)
2415 spin_unlock_irq(&conf
->device_lock
);
2417 bitmap_endwrite(conf
->mddev
->bitmap
,
2420 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2425 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2426 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2427 md_wakeup_thread(conf
->mddev
->thread
);
2430 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2431 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2433 int rmw
= 0, rcw
= 0, i
;
2434 for (i
= disks
; i
--; ) {
2435 /* would I have to read this buffer for read_modify_write */
2436 struct r5dev
*dev
= &sh
->dev
[i
];
2437 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2438 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2439 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2440 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2441 if (test_bit(R5_Insync
, &dev
->flags
))
2444 rmw
+= 2*disks
; /* cannot read it */
2446 /* Would I have to read this buffer for reconstruct_write */
2447 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2448 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2449 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2450 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2451 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2456 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2457 (unsigned long long)sh
->sector
, rmw
, rcw
);
2458 set_bit(STRIPE_HANDLE
, &sh
->state
);
2459 if (rmw
< rcw
&& rmw
> 0)
2460 /* prefer read-modify-write, but need to get some data */
2461 for (i
= disks
; i
--; ) {
2462 struct r5dev
*dev
= &sh
->dev
[i
];
2463 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2464 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2465 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2466 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2467 test_bit(R5_Insync
, &dev
->flags
)) {
2469 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2470 pr_debug("Read_old block "
2471 "%d for r-m-w\n", i
);
2472 set_bit(R5_LOCKED
, &dev
->flags
);
2473 set_bit(R5_Wantread
, &dev
->flags
);
2476 set_bit(STRIPE_DELAYED
, &sh
->state
);
2477 set_bit(STRIPE_HANDLE
, &sh
->state
);
2481 if (rcw
<= rmw
&& rcw
> 0)
2482 /* want reconstruct write, but need to get some data */
2483 for (i
= disks
; i
--; ) {
2484 struct r5dev
*dev
= &sh
->dev
[i
];
2485 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2487 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2488 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2489 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2490 test_bit(R5_Insync
, &dev
->flags
)) {
2492 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2493 pr_debug("Read_old block "
2494 "%d for Reconstruct\n", i
);
2495 set_bit(R5_LOCKED
, &dev
->flags
);
2496 set_bit(R5_Wantread
, &dev
->flags
);
2499 set_bit(STRIPE_DELAYED
, &sh
->state
);
2500 set_bit(STRIPE_HANDLE
, &sh
->state
);
2504 /* now if nothing is locked, and if we have enough data,
2505 * we can start a write request
2507 /* since handle_stripe can be called at any time we need to handle the
2508 * case where a compute block operation has been submitted and then a
2509 * subsequent call wants to start a write request. raid_run_ops only
2510 * handles the case where compute block and reconstruct are requested
2511 * simultaneously. If this is not the case then new writes need to be
2512 * held off until the compute completes.
2514 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2515 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2516 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2517 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2520 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2521 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2522 struct r6_state
*r6s
, int disks
)
2524 int rcw
= 0, pd_idx
= sh
->pd_idx
, i
;
2525 int qd_idx
= sh
->qd_idx
;
2527 set_bit(STRIPE_HANDLE
, &sh
->state
);
2528 for (i
= disks
; i
--; ) {
2529 struct r5dev
*dev
= &sh
->dev
[i
];
2530 /* check if we haven't enough data */
2531 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2532 i
!= pd_idx
&& i
!= qd_idx
&&
2533 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2534 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2535 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2537 if (!test_bit(R5_Insync
, &dev
->flags
))
2538 continue; /* it's a failed drive */
2541 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2542 pr_debug("Read_old stripe %llu "
2543 "block %d for Reconstruct\n",
2544 (unsigned long long)sh
->sector
, i
);
2545 set_bit(R5_LOCKED
, &dev
->flags
);
2546 set_bit(R5_Wantread
, &dev
->flags
);
2549 pr_debug("Request delayed stripe %llu "
2550 "block %d for Reconstruct\n",
2551 (unsigned long long)sh
->sector
, i
);
2552 set_bit(STRIPE_DELAYED
, &sh
->state
);
2553 set_bit(STRIPE_HANDLE
, &sh
->state
);
2557 /* now if nothing is locked, and if we have enough data, we can start a
2560 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2561 s
->locked
== 0 && rcw
== 0 &&
2562 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2563 schedule_reconstruction(sh
, s
, 1, 0);
2567 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2568 struct stripe_head_state
*s
, int disks
)
2570 struct r5dev
*dev
= NULL
;
2572 set_bit(STRIPE_HANDLE
, &sh
->state
);
2574 switch (sh
->check_state
) {
2575 case check_state_idle
:
2576 /* start a new check operation if there are no failures */
2577 if (s
->failed
== 0) {
2578 BUG_ON(s
->uptodate
!= disks
);
2579 sh
->check_state
= check_state_run
;
2580 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2581 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2585 dev
= &sh
->dev
[s
->failed_num
];
2587 case check_state_compute_result
:
2588 sh
->check_state
= check_state_idle
;
2590 dev
= &sh
->dev
[sh
->pd_idx
];
2592 /* check that a write has not made the stripe insync */
2593 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2596 /* either failed parity check, or recovery is happening */
2597 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2598 BUG_ON(s
->uptodate
!= disks
);
2600 set_bit(R5_LOCKED
, &dev
->flags
);
2602 set_bit(R5_Wantwrite
, &dev
->flags
);
2604 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2605 set_bit(STRIPE_INSYNC
, &sh
->state
);
2607 case check_state_run
:
2608 break; /* we will be called again upon completion */
2609 case check_state_check_result
:
2610 sh
->check_state
= check_state_idle
;
2612 /* if a failure occurred during the check operation, leave
2613 * STRIPE_INSYNC not set and let the stripe be handled again
2618 /* handle a successful check operation, if parity is correct
2619 * we are done. Otherwise update the mismatch count and repair
2620 * parity if !MD_RECOVERY_CHECK
2622 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2623 /* parity is correct (on disc,
2624 * not in buffer any more)
2626 set_bit(STRIPE_INSYNC
, &sh
->state
);
2628 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2629 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2630 /* don't try to repair!! */
2631 set_bit(STRIPE_INSYNC
, &sh
->state
);
2633 sh
->check_state
= check_state_compute_run
;
2634 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2635 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2636 set_bit(R5_Wantcompute
,
2637 &sh
->dev
[sh
->pd_idx
].flags
);
2638 sh
->ops
.target
= sh
->pd_idx
;
2639 sh
->ops
.target2
= -1;
2644 case check_state_compute_run
:
2647 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2648 __func__
, sh
->check_state
,
2649 (unsigned long long) sh
->sector
);
2655 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2656 struct stripe_head_state
*s
,
2657 struct r6_state
*r6s
, int disks
)
2659 int pd_idx
= sh
->pd_idx
;
2660 int qd_idx
= sh
->qd_idx
;
2663 set_bit(STRIPE_HANDLE
, &sh
->state
);
2665 BUG_ON(s
->failed
> 2);
2667 /* Want to check and possibly repair P and Q.
2668 * However there could be one 'failed' device, in which
2669 * case we can only check one of them, possibly using the
2670 * other to generate missing data
2673 switch (sh
->check_state
) {
2674 case check_state_idle
:
2675 /* start a new check operation if there are < 2 failures */
2676 if (s
->failed
== r6s
->q_failed
) {
2677 /* The only possible failed device holds Q, so it
2678 * makes sense to check P (If anything else were failed,
2679 * we would have used P to recreate it).
2681 sh
->check_state
= check_state_run
;
2683 if (!r6s
->q_failed
&& s
->failed
< 2) {
2684 /* Q is not failed, and we didn't use it to generate
2685 * anything, so it makes sense to check it
2687 if (sh
->check_state
== check_state_run
)
2688 sh
->check_state
= check_state_run_pq
;
2690 sh
->check_state
= check_state_run_q
;
2693 /* discard potentially stale zero_sum_result */
2694 sh
->ops
.zero_sum_result
= 0;
2696 if (sh
->check_state
== check_state_run
) {
2697 /* async_xor_zero_sum destroys the contents of P */
2698 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2701 if (sh
->check_state
>= check_state_run
&&
2702 sh
->check_state
<= check_state_run_pq
) {
2703 /* async_syndrome_zero_sum preserves P and Q, so
2704 * no need to mark them !uptodate here
2706 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2710 /* we have 2-disk failure */
2711 BUG_ON(s
->failed
!= 2);
2713 case check_state_compute_result
:
2714 sh
->check_state
= check_state_idle
;
2716 /* check that a write has not made the stripe insync */
2717 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2720 /* now write out any block on a failed drive,
2721 * or P or Q if they were recomputed
2723 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
2724 if (s
->failed
== 2) {
2725 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2727 set_bit(R5_LOCKED
, &dev
->flags
);
2728 set_bit(R5_Wantwrite
, &dev
->flags
);
2730 if (s
->failed
>= 1) {
2731 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2733 set_bit(R5_LOCKED
, &dev
->flags
);
2734 set_bit(R5_Wantwrite
, &dev
->flags
);
2736 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2737 dev
= &sh
->dev
[pd_idx
];
2739 set_bit(R5_LOCKED
, &dev
->flags
);
2740 set_bit(R5_Wantwrite
, &dev
->flags
);
2742 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2743 dev
= &sh
->dev
[qd_idx
];
2745 set_bit(R5_LOCKED
, &dev
->flags
);
2746 set_bit(R5_Wantwrite
, &dev
->flags
);
2748 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2750 set_bit(STRIPE_INSYNC
, &sh
->state
);
2752 case check_state_run
:
2753 case check_state_run_q
:
2754 case check_state_run_pq
:
2755 break; /* we will be called again upon completion */
2756 case check_state_check_result
:
2757 sh
->check_state
= check_state_idle
;
2759 /* handle a successful check operation, if parity is correct
2760 * we are done. Otherwise update the mismatch count and repair
2761 * parity if !MD_RECOVERY_CHECK
2763 if (sh
->ops
.zero_sum_result
== 0) {
2764 /* both parities are correct */
2766 set_bit(STRIPE_INSYNC
, &sh
->state
);
2768 /* in contrast to the raid5 case we can validate
2769 * parity, but still have a failure to write
2772 sh
->check_state
= check_state_compute_result
;
2773 /* Returning at this point means that we may go
2774 * off and bring p and/or q uptodate again so
2775 * we make sure to check zero_sum_result again
2776 * to verify if p or q need writeback
2780 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2781 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2782 /* don't try to repair!! */
2783 set_bit(STRIPE_INSYNC
, &sh
->state
);
2785 int *target
= &sh
->ops
.target
;
2787 sh
->ops
.target
= -1;
2788 sh
->ops
.target2
= -1;
2789 sh
->check_state
= check_state_compute_run
;
2790 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2791 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2792 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2793 set_bit(R5_Wantcompute
,
2794 &sh
->dev
[pd_idx
].flags
);
2796 target
= &sh
->ops
.target2
;
2799 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2800 set_bit(R5_Wantcompute
,
2801 &sh
->dev
[qd_idx
].flags
);
2808 case check_state_compute_run
:
2811 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2812 __func__
, sh
->check_state
,
2813 (unsigned long long) sh
->sector
);
2818 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2819 struct r6_state
*r6s
)
2823 /* We have read all the blocks in this stripe and now we need to
2824 * copy some of them into a target stripe for expand.
2826 struct dma_async_tx_descriptor
*tx
= NULL
;
2827 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2828 for (i
= 0; i
< sh
->disks
; i
++)
2829 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
2831 struct stripe_head
*sh2
;
2832 struct async_submit_ctl submit
;
2834 sector_t bn
= compute_blocknr(sh
, i
, 1);
2835 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2837 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
2839 /* so far only the early blocks of this stripe
2840 * have been requested. When later blocks
2841 * get requested, we will try again
2844 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2845 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2846 /* must have already done this block */
2847 release_stripe(sh2
);
2851 /* place all the copies on one channel */
2852 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
2853 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2854 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2857 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2858 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2859 for (j
= 0; j
< conf
->raid_disks
; j
++)
2860 if (j
!= sh2
->pd_idx
&&
2861 (!r6s
|| j
!= sh2
->qd_idx
) &&
2862 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2864 if (j
== conf
->raid_disks
) {
2865 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2866 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2868 release_stripe(sh2
);
2871 /* done submitting copies, wait for them to complete */
2874 dma_wait_for_async_tx(tx
);
2880 * handle_stripe - do things to a stripe.
2882 * We lock the stripe and then examine the state of various bits
2883 * to see what needs to be done.
2885 * return some read request which now have data
2886 * return some write requests which are safely on disc
2887 * schedule a read on some buffers
2888 * schedule a write of some buffers
2889 * return confirmation of parity correctness
2891 * buffers are taken off read_list or write_list, and bh_cache buffers
2892 * get BH_Lock set before the stripe lock is released.
2896 static bool handle_stripe5(struct stripe_head
*sh
)
2898 raid5_conf_t
*conf
= sh
->raid_conf
;
2899 int disks
= sh
->disks
, i
;
2900 struct bio
*return_bi
= NULL
;
2901 struct stripe_head_state s
;
2903 mdk_rdev_t
*blocked_rdev
= NULL
;
2906 memset(&s
, 0, sizeof(s
));
2907 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2908 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
2909 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
2910 sh
->reconstruct_state
);
2912 spin_lock(&sh
->lock
);
2913 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2914 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2916 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2917 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2918 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2920 /* Now to look around and see what can be done */
2922 for (i
=disks
; i
--; ) {
2924 struct r5dev
*dev
= &sh
->dev
[i
];
2925 clear_bit(R5_Insync
, &dev
->flags
);
2927 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2928 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
2929 dev
->towrite
, dev
->written
);
2931 /* maybe we can request a biofill operation
2933 * new wantfill requests are only permitted while
2934 * ops_complete_biofill is guaranteed to be inactive
2936 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
2937 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
2938 set_bit(R5_Wantfill
, &dev
->flags
);
2940 /* now count some things */
2941 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2942 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
2943 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
2945 if (test_bit(R5_Wantfill
, &dev
->flags
))
2947 else if (dev
->toread
)
2951 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
2956 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2957 if (blocked_rdev
== NULL
&&
2958 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
2959 blocked_rdev
= rdev
;
2960 atomic_inc(&rdev
->nr_pending
);
2962 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
2963 /* The ReadError flag will just be confusing now */
2964 clear_bit(R5_ReadError
, &dev
->flags
);
2965 clear_bit(R5_ReWrite
, &dev
->flags
);
2967 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
2968 || test_bit(R5_ReadError
, &dev
->flags
)) {
2972 set_bit(R5_Insync
, &dev
->flags
);
2976 if (unlikely(blocked_rdev
)) {
2977 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
2978 s
.to_write
|| s
.written
) {
2979 set_bit(STRIPE_HANDLE
, &sh
->state
);
2982 /* There is nothing for the blocked_rdev to block */
2983 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
2984 blocked_rdev
= NULL
;
2987 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
2988 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
2989 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
2992 pr_debug("locked=%d uptodate=%d to_read=%d"
2993 " to_write=%d failed=%d failed_num=%d\n",
2994 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
2995 s
.failed
, s
.failed_num
);
2996 /* check if the array has lost two devices and, if so, some requests might
2999 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
3000 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3001 if (s
.failed
> 1 && s
.syncing
) {
3002 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3003 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3007 /* might be able to return some write requests if the parity block
3008 * is safe, or on a failed drive
3010 dev
= &sh
->dev
[sh
->pd_idx
];
3012 ((test_bit(R5_Insync
, &dev
->flags
) &&
3013 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3014 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
3015 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
3016 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3018 /* Now we might consider reading some blocks, either to check/generate
3019 * parity, or to satisfy requests
3020 * or to load a block that is being partially written.
3022 if (s
.to_read
|| s
.non_overwrite
||
3023 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3024 handle_stripe_fill5(sh
, &s
, disks
);
3026 /* Now we check to see if any write operations have recently
3030 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3032 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3033 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3034 sh
->reconstruct_state
= reconstruct_state_idle
;
3036 /* All the 'written' buffers and the parity block are ready to
3037 * be written back to disk
3039 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3040 for (i
= disks
; i
--; ) {
3042 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3043 (i
== sh
->pd_idx
|| dev
->written
)) {
3044 pr_debug("Writing block %d\n", i
);
3045 set_bit(R5_Wantwrite
, &dev
->flags
);
3048 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3049 (i
== sh
->pd_idx
&& s
.failed
== 0))
3050 set_bit(STRIPE_INSYNC
, &sh
->state
);
3053 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
3054 atomic_dec(&conf
->preread_active_stripes
);
3055 if (atomic_read(&conf
->preread_active_stripes
) <
3057 md_wakeup_thread(conf
->mddev
->thread
);
3061 /* Now to consider new write requests and what else, if anything
3062 * should be read. We do not handle new writes when:
3063 * 1/ A 'write' operation (copy+xor) is already in flight.
3064 * 2/ A 'check' operation is in flight, as it may clobber the parity
3067 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3068 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
3070 /* maybe we need to check and possibly fix the parity for this stripe
3071 * Any reads will already have been scheduled, so we just see if enough
3072 * data is available. The parity check is held off while parity
3073 * dependent operations are in flight.
3075 if (sh
->check_state
||
3076 (s
.syncing
&& s
.locked
== 0 &&
3077 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3078 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3079 handle_parity_checks5(conf
, sh
, &s
, disks
);
3081 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3082 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3083 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3086 /* If the failed drive is just a ReadError, then we might need to progress
3087 * the repair/check process
3089 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
3090 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
3091 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
3092 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
3094 dev
= &sh
->dev
[s
.failed_num
];
3095 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3096 set_bit(R5_Wantwrite
, &dev
->flags
);
3097 set_bit(R5_ReWrite
, &dev
->flags
);
3098 set_bit(R5_LOCKED
, &dev
->flags
);
3101 /* let's read it back */
3102 set_bit(R5_Wantread
, &dev
->flags
);
3103 set_bit(R5_LOCKED
, &dev
->flags
);
3108 /* Finish reconstruct operations initiated by the expansion process */
3109 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3110 struct stripe_head
*sh2
3111 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3112 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3113 /* sh cannot be written until sh2 has been read.
3114 * so arrange for sh to be delayed a little
3116 set_bit(STRIPE_DELAYED
, &sh
->state
);
3117 set_bit(STRIPE_HANDLE
, &sh
->state
);
3118 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3120 atomic_inc(&conf
->preread_active_stripes
);
3121 release_stripe(sh2
);
3125 release_stripe(sh2
);
3127 sh
->reconstruct_state
= reconstruct_state_idle
;
3128 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3129 for (i
= conf
->raid_disks
; i
--; ) {
3130 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3131 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3136 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3137 !sh
->reconstruct_state
) {
3138 /* Need to write out all blocks after computing parity */
3139 sh
->disks
= conf
->raid_disks
;
3140 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3141 schedule_reconstruction(sh
, &s
, 1, 1);
3142 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3143 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3144 atomic_dec(&conf
->reshape_stripes
);
3145 wake_up(&conf
->wait_for_overlap
);
3146 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3149 if (s
.expanding
&& s
.locked
== 0 &&
3150 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3151 handle_stripe_expansion(conf
, sh
, NULL
);
3154 spin_unlock(&sh
->lock
);
3156 /* wait for this device to become unblocked */
3157 if (unlikely(blocked_rdev
))
3158 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3161 raid_run_ops(sh
, s
.ops_request
);
3165 return_io(return_bi
);
3167 return blocked_rdev
== NULL
;
3170 static bool handle_stripe6(struct stripe_head
*sh
)
3172 raid5_conf_t
*conf
= sh
->raid_conf
;
3173 int disks
= sh
->disks
;
3174 struct bio
*return_bi
= NULL
;
3175 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
;
3176 struct stripe_head_state s
;
3177 struct r6_state r6s
;
3178 struct r5dev
*dev
, *pdev
, *qdev
;
3179 mdk_rdev_t
*blocked_rdev
= NULL
;
3181 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3182 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3183 (unsigned long long)sh
->sector
, sh
->state
,
3184 atomic_read(&sh
->count
), pd_idx
, qd_idx
,
3185 sh
->check_state
, sh
->reconstruct_state
);
3186 memset(&s
, 0, sizeof(s
));
3188 spin_lock(&sh
->lock
);
3189 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3190 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3192 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3193 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3194 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3195 /* Now to look around and see what can be done */
3198 for (i
=disks
; i
--; ) {
3201 clear_bit(R5_Insync
, &dev
->flags
);
3203 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3204 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
3205 /* maybe we can reply to a read
3207 * new wantfill requests are only permitted while
3208 * ops_complete_biofill is guaranteed to be inactive
3210 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3211 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3212 set_bit(R5_Wantfill
, &dev
->flags
);
3214 /* now count some things */
3215 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3216 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3217 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3219 BUG_ON(s
.compute
> 2);
3222 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
3224 } else if (dev
->toread
)
3228 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3233 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3234 if (blocked_rdev
== NULL
&&
3235 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3236 blocked_rdev
= rdev
;
3237 atomic_inc(&rdev
->nr_pending
);
3239 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3240 /* The ReadError flag will just be confusing now */
3241 clear_bit(R5_ReadError
, &dev
->flags
);
3242 clear_bit(R5_ReWrite
, &dev
->flags
);
3244 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3245 || test_bit(R5_ReadError
, &dev
->flags
)) {
3247 r6s
.failed_num
[s
.failed
] = i
;
3250 set_bit(R5_Insync
, &dev
->flags
);
3254 if (unlikely(blocked_rdev
)) {
3255 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3256 s
.to_write
|| s
.written
) {
3257 set_bit(STRIPE_HANDLE
, &sh
->state
);
3260 /* There is nothing for the blocked_rdev to block */
3261 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3262 blocked_rdev
= NULL
;
3265 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3266 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3267 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3270 pr_debug("locked=%d uptodate=%d to_read=%d"
3271 " to_write=%d failed=%d failed_num=%d,%d\n",
3272 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3273 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3274 /* check if the array has lost >2 devices and, if so, some requests
3275 * might need to be failed
3277 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3278 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3279 if (s
.failed
> 2 && s
.syncing
) {
3280 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3281 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3286 * might be able to return some write requests if the parity blocks
3287 * are safe, or on a failed drive
3289 pdev
= &sh
->dev
[pd_idx
];
3290 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3291 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3292 qdev
= &sh
->dev
[qd_idx
];
3293 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == qd_idx
)
3294 || (s
.failed
>= 2 && r6s
.failed_num
[1] == qd_idx
);
3297 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3298 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3299 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3300 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3301 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3302 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3303 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3305 /* Now we might consider reading some blocks, either to check/generate
3306 * parity, or to satisfy requests
3307 * or to load a block that is being partially written.
3309 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3310 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3311 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3313 /* Now we check to see if any write operations have recently
3316 if (sh
->reconstruct_state
== reconstruct_state_drain_result
) {
3317 int qd_idx
= sh
->qd_idx
;
3319 sh
->reconstruct_state
= reconstruct_state_idle
;
3320 /* All the 'written' buffers and the parity blocks are ready to
3321 * be written back to disk
3323 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3324 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
));
3325 for (i
= disks
; i
--; ) {
3327 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3328 (i
== sh
->pd_idx
|| i
== qd_idx
||
3330 pr_debug("Writing block %d\n", i
);
3331 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3332 set_bit(R5_Wantwrite
, &dev
->flags
);
3333 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3334 ((i
== sh
->pd_idx
|| i
== qd_idx
) &&
3336 set_bit(STRIPE_INSYNC
, &sh
->state
);
3339 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
3340 atomic_dec(&conf
->preread_active_stripes
);
3341 if (atomic_read(&conf
->preread_active_stripes
) <
3343 md_wakeup_thread(conf
->mddev
->thread
);
3347 /* Now to consider new write requests and what else, if anything
3348 * should be read. We do not handle new writes when:
3349 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3350 * 2/ A 'check' operation is in flight, as it may clobber the parity
3353 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3354 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3356 /* maybe we need to check and possibly fix the parity for this stripe
3357 * Any reads will already have been scheduled, so we just see if enough
3358 * data is available. The parity check is held off while parity
3359 * dependent operations are in flight.
3361 if (sh
->check_state
||
3362 (s
.syncing
&& s
.locked
== 0 &&
3363 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3364 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3365 handle_parity_checks6(conf
, sh
, &s
, &r6s
, disks
);
3367 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3368 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3369 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3372 /* If the failed drives are just a ReadError, then we might need
3373 * to progress the repair/check process
3375 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3376 for (i
= 0; i
< s
.failed
; i
++) {
3377 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3378 if (test_bit(R5_ReadError
, &dev
->flags
)
3379 && !test_bit(R5_LOCKED
, &dev
->flags
)
3380 && test_bit(R5_UPTODATE
, &dev
->flags
)
3382 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3383 set_bit(R5_Wantwrite
, &dev
->flags
);
3384 set_bit(R5_ReWrite
, &dev
->flags
);
3385 set_bit(R5_LOCKED
, &dev
->flags
);
3388 /* let's read it back */
3389 set_bit(R5_Wantread
, &dev
->flags
);
3390 set_bit(R5_LOCKED
, &dev
->flags
);
3396 /* Finish reconstruct operations initiated by the expansion process */
3397 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3398 sh
->reconstruct_state
= reconstruct_state_idle
;
3399 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3400 for (i
= conf
->raid_disks
; i
--; ) {
3401 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3402 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3407 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3408 !sh
->reconstruct_state
) {
3409 struct stripe_head
*sh2
3410 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3411 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3412 /* sh cannot be written until sh2 has been read.
3413 * so arrange for sh to be delayed a little
3415 set_bit(STRIPE_DELAYED
, &sh
->state
);
3416 set_bit(STRIPE_HANDLE
, &sh
->state
);
3417 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3419 atomic_inc(&conf
->preread_active_stripes
);
3420 release_stripe(sh2
);
3424 release_stripe(sh2
);
3426 /* Need to write out all blocks after computing P&Q */
3427 sh
->disks
= conf
->raid_disks
;
3428 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3429 schedule_reconstruction(sh
, &s
, 1, 1);
3430 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3431 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3432 atomic_dec(&conf
->reshape_stripes
);
3433 wake_up(&conf
->wait_for_overlap
);
3434 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3437 if (s
.expanding
&& s
.locked
== 0 &&
3438 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3439 handle_stripe_expansion(conf
, sh
, &r6s
);
3442 spin_unlock(&sh
->lock
);
3444 /* wait for this device to become unblocked */
3445 if (unlikely(blocked_rdev
))
3446 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3449 raid_run_ops(sh
, s
.ops_request
);
3453 return_io(return_bi
);
3455 return blocked_rdev
== NULL
;
3458 /* returns true if the stripe was handled */
3459 static bool handle_stripe(struct stripe_head
*sh
)
3461 if (sh
->raid_conf
->level
== 6)
3462 return handle_stripe6(sh
);
3464 return handle_stripe5(sh
);
3467 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3469 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3470 while (!list_empty(&conf
->delayed_list
)) {
3471 struct list_head
*l
= conf
->delayed_list
.next
;
3472 struct stripe_head
*sh
;
3473 sh
= list_entry(l
, struct stripe_head
, lru
);
3475 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3476 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3477 atomic_inc(&conf
->preread_active_stripes
);
3478 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3481 blk_plug_device(conf
->mddev
->queue
);
3484 static void activate_bit_delay(raid5_conf_t
*conf
)
3486 /* device_lock is held */
3487 struct list_head head
;
3488 list_add(&head
, &conf
->bitmap_list
);
3489 list_del_init(&conf
->bitmap_list
);
3490 while (!list_empty(&head
)) {
3491 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3492 list_del_init(&sh
->lru
);
3493 atomic_inc(&sh
->count
);
3494 __release_stripe(conf
, sh
);
3498 static void unplug_slaves(mddev_t
*mddev
)
3500 raid5_conf_t
*conf
= mddev
->private;
3504 for (i
= 0; i
< conf
->raid_disks
; i
++) {
3505 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3506 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
3507 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
3509 atomic_inc(&rdev
->nr_pending
);
3512 blk_unplug(r_queue
);
3514 rdev_dec_pending(rdev
, mddev
);
3521 static void raid5_unplug_device(struct request_queue
*q
)
3523 mddev_t
*mddev
= q
->queuedata
;
3524 raid5_conf_t
*conf
= mddev
->private;
3525 unsigned long flags
;
3527 spin_lock_irqsave(&conf
->device_lock
, flags
);
3529 if (blk_remove_plug(q
)) {
3531 raid5_activate_delayed(conf
);
3533 md_wakeup_thread(mddev
->thread
);
3535 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3537 unplug_slaves(mddev
);
3540 static int raid5_congested(void *data
, int bits
)
3542 mddev_t
*mddev
= data
;
3543 raid5_conf_t
*conf
= mddev
->private;
3545 /* No difference between reads and writes. Just check
3546 * how busy the stripe_cache is
3548 if (conf
->inactive_blocked
)
3552 if (list_empty_careful(&conf
->inactive_list
))
3558 /* We want read requests to align with chunks where possible,
3559 * but write requests don't need to.
3561 static int raid5_mergeable_bvec(struct request_queue
*q
,
3562 struct bvec_merge_data
*bvm
,
3563 struct bio_vec
*biovec
)
3565 mddev_t
*mddev
= q
->queuedata
;
3566 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3568 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3569 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3571 if ((bvm
->bi_rw
& 1) == WRITE
)
3572 return biovec
->bv_len
; /* always allow writes to be mergeable */
3574 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3575 chunk_sectors
= mddev
->new_chunk_sectors
;
3576 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3577 if (max
< 0) max
= 0;
3578 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3579 return biovec
->bv_len
;
3585 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3587 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3588 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3589 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3591 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3592 chunk_sectors
= mddev
->new_chunk_sectors
;
3593 return chunk_sectors
>=
3594 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3598 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3599 * later sampled by raid5d.
3601 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3603 unsigned long flags
;
3605 spin_lock_irqsave(&conf
->device_lock
, flags
);
3607 bi
->bi_next
= conf
->retry_read_aligned_list
;
3608 conf
->retry_read_aligned_list
= bi
;
3610 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3611 md_wakeup_thread(conf
->mddev
->thread
);
3615 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3619 bi
= conf
->retry_read_aligned
;
3621 conf
->retry_read_aligned
= NULL
;
3624 bi
= conf
->retry_read_aligned_list
;
3626 conf
->retry_read_aligned_list
= bi
->bi_next
;
3629 * this sets the active strip count to 1 and the processed
3630 * strip count to zero (upper 8 bits)
3632 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3640 * The "raid5_align_endio" should check if the read succeeded and if it
3641 * did, call bio_endio on the original bio (having bio_put the new bio
3643 * If the read failed..
3645 static void raid5_align_endio(struct bio
*bi
, int error
)
3647 struct bio
* raid_bi
= bi
->bi_private
;
3650 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3655 mddev
= raid_bi
->bi_bdev
->bd_disk
->queue
->queuedata
;
3656 conf
= mddev
->private;
3657 rdev
= (void*)raid_bi
->bi_next
;
3658 raid_bi
->bi_next
= NULL
;
3660 rdev_dec_pending(rdev
, conf
->mddev
);
3662 if (!error
&& uptodate
) {
3663 bio_endio(raid_bi
, 0);
3664 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3665 wake_up(&conf
->wait_for_stripe
);
3670 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3672 add_bio_to_retry(raid_bi
, conf
);
3675 static int bio_fits_rdev(struct bio
*bi
)
3677 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3679 if ((bi
->bi_size
>>9) > queue_max_sectors(q
))
3681 blk_recount_segments(q
, bi
);
3682 if (bi
->bi_phys_segments
> queue_max_phys_segments(q
))
3685 if (q
->merge_bvec_fn
)
3686 /* it's too hard to apply the merge_bvec_fn at this stage,
3695 static int chunk_aligned_read(struct request_queue
*q
, struct bio
* raid_bio
)
3697 mddev_t
*mddev
= q
->queuedata
;
3698 raid5_conf_t
*conf
= mddev
->private;
3699 unsigned int dd_idx
;
3700 struct bio
* align_bi
;
3703 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3704 pr_debug("chunk_aligned_read : non aligned\n");
3708 * use bio_clone to make a copy of the bio
3710 align_bi
= bio_clone(raid_bio
, GFP_NOIO
);
3714 * set bi_end_io to a new function, and set bi_private to the
3717 align_bi
->bi_end_io
= raid5_align_endio
;
3718 align_bi
->bi_private
= raid_bio
;
3722 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3727 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3728 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3729 atomic_inc(&rdev
->nr_pending
);
3731 raid_bio
->bi_next
= (void*)rdev
;
3732 align_bi
->bi_bdev
= rdev
->bdev
;
3733 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3734 align_bi
->bi_sector
+= rdev
->data_offset
;
3736 if (!bio_fits_rdev(align_bi
)) {
3737 /* too big in some way */
3739 rdev_dec_pending(rdev
, mddev
);
3743 spin_lock_irq(&conf
->device_lock
);
3744 wait_event_lock_irq(conf
->wait_for_stripe
,
3746 conf
->device_lock
, /* nothing */);
3747 atomic_inc(&conf
->active_aligned_reads
);
3748 spin_unlock_irq(&conf
->device_lock
);
3750 generic_make_request(align_bi
);
3759 /* __get_priority_stripe - get the next stripe to process
3761 * Full stripe writes are allowed to pass preread active stripes up until
3762 * the bypass_threshold is exceeded. In general the bypass_count
3763 * increments when the handle_list is handled before the hold_list; however, it
3764 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3765 * stripe with in flight i/o. The bypass_count will be reset when the
3766 * head of the hold_list has changed, i.e. the head was promoted to the
3769 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3771 struct stripe_head
*sh
;
3773 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3775 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3776 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3777 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3779 if (!list_empty(&conf
->handle_list
)) {
3780 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3782 if (list_empty(&conf
->hold_list
))
3783 conf
->bypass_count
= 0;
3784 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3785 if (conf
->hold_list
.next
== conf
->last_hold
)
3786 conf
->bypass_count
++;
3788 conf
->last_hold
= conf
->hold_list
.next
;
3789 conf
->bypass_count
-= conf
->bypass_threshold
;
3790 if (conf
->bypass_count
< 0)
3791 conf
->bypass_count
= 0;
3794 } else if (!list_empty(&conf
->hold_list
) &&
3795 ((conf
->bypass_threshold
&&
3796 conf
->bypass_count
> conf
->bypass_threshold
) ||
3797 atomic_read(&conf
->pending_full_writes
) == 0)) {
3798 sh
= list_entry(conf
->hold_list
.next
,
3800 conf
->bypass_count
-= conf
->bypass_threshold
;
3801 if (conf
->bypass_count
< 0)
3802 conf
->bypass_count
= 0;
3806 list_del_init(&sh
->lru
);
3807 atomic_inc(&sh
->count
);
3808 BUG_ON(atomic_read(&sh
->count
) != 1);
3812 static int make_request(struct request_queue
*q
, struct bio
* bi
)
3814 mddev_t
*mddev
= q
->queuedata
;
3815 raid5_conf_t
*conf
= mddev
->private;
3817 sector_t new_sector
;
3818 sector_t logical_sector
, last_sector
;
3819 struct stripe_head
*sh
;
3820 const int rw
= bio_data_dir(bi
);
3823 if (unlikely(bio_barrier(bi
))) {
3824 bio_endio(bi
, -EOPNOTSUPP
);
3828 md_write_start(mddev
, bi
);
3830 cpu
= part_stat_lock();
3831 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
3832 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
3837 mddev
->reshape_position
== MaxSector
&&
3838 chunk_aligned_read(q
,bi
))
3841 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3842 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3844 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3846 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3848 int disks
, data_disks
;
3853 disks
= conf
->raid_disks
;
3854 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3855 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
3856 /* spinlock is needed as reshape_progress may be
3857 * 64bit on a 32bit platform, and so it might be
3858 * possible to see a half-updated value
3859 * Ofcourse reshape_progress could change after
3860 * the lock is dropped, so once we get a reference
3861 * to the stripe that we think it is, we will have
3864 spin_lock_irq(&conf
->device_lock
);
3865 if (mddev
->delta_disks
< 0
3866 ? logical_sector
< conf
->reshape_progress
3867 : logical_sector
>= conf
->reshape_progress
) {
3868 disks
= conf
->previous_raid_disks
;
3871 if (mddev
->delta_disks
< 0
3872 ? logical_sector
< conf
->reshape_safe
3873 : logical_sector
>= conf
->reshape_safe
) {
3874 spin_unlock_irq(&conf
->device_lock
);
3879 spin_unlock_irq(&conf
->device_lock
);
3881 data_disks
= disks
- conf
->max_degraded
;
3883 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3886 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3887 (unsigned long long)new_sector
,
3888 (unsigned long long)logical_sector
);
3890 sh
= get_active_stripe(conf
, new_sector
, previous
,
3891 (bi
->bi_rw
&RWA_MASK
), 0);
3893 if (unlikely(previous
)) {
3894 /* expansion might have moved on while waiting for a
3895 * stripe, so we must do the range check again.
3896 * Expansion could still move past after this
3897 * test, but as we are holding a reference to
3898 * 'sh', we know that if that happens,
3899 * STRIPE_EXPANDING will get set and the expansion
3900 * won't proceed until we finish with the stripe.
3903 spin_lock_irq(&conf
->device_lock
);
3904 if (mddev
->delta_disks
< 0
3905 ? logical_sector
>= conf
->reshape_progress
3906 : logical_sector
< conf
->reshape_progress
)
3907 /* mismatch, need to try again */
3909 spin_unlock_irq(&conf
->device_lock
);
3917 if (bio_data_dir(bi
) == WRITE
&&
3918 logical_sector
>= mddev
->suspend_lo
&&
3919 logical_sector
< mddev
->suspend_hi
) {
3921 /* As the suspend_* range is controlled by
3922 * userspace, we want an interruptible
3925 flush_signals(current
);
3926 prepare_to_wait(&conf
->wait_for_overlap
,
3927 &w
, TASK_INTERRUPTIBLE
);
3928 if (logical_sector
>= mddev
->suspend_lo
&&
3929 logical_sector
< mddev
->suspend_hi
)
3934 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
3935 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
3936 /* Stripe is busy expanding or
3937 * add failed due to overlap. Flush everything
3940 raid5_unplug_device(mddev
->queue
);
3945 finish_wait(&conf
->wait_for_overlap
, &w
);
3946 set_bit(STRIPE_HANDLE
, &sh
->state
);
3947 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3950 /* cannot get stripe for read-ahead, just give-up */
3951 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3952 finish_wait(&conf
->wait_for_overlap
, &w
);
3957 spin_lock_irq(&conf
->device_lock
);
3958 remaining
= raid5_dec_bi_phys_segments(bi
);
3959 spin_unlock_irq(&conf
->device_lock
);
3960 if (remaining
== 0) {
3963 md_write_end(mddev
);
3970 static sector_t
raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
);
3972 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
3974 /* reshaping is quite different to recovery/resync so it is
3975 * handled quite separately ... here.
3977 * On each call to sync_request, we gather one chunk worth of
3978 * destination stripes and flag them as expanding.
3979 * Then we find all the source stripes and request reads.
3980 * As the reads complete, handle_stripe will copy the data
3981 * into the destination stripe and release that stripe.
3983 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
3984 struct stripe_head
*sh
;
3985 sector_t first_sector
, last_sector
;
3986 int raid_disks
= conf
->previous_raid_disks
;
3987 int data_disks
= raid_disks
- conf
->max_degraded
;
3988 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
3991 sector_t writepos
, readpos
, safepos
;
3992 sector_t stripe_addr
;
3993 int reshape_sectors
;
3994 struct list_head stripes
;
3996 if (sector_nr
== 0) {
3997 /* If restarting in the middle, skip the initial sectors */
3998 if (mddev
->delta_disks
< 0 &&
3999 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4000 sector_nr
= raid5_size(mddev
, 0, 0)
4001 - conf
->reshape_progress
;
4002 } else if (mddev
->delta_disks
>= 0 &&
4003 conf
->reshape_progress
> 0)
4004 sector_nr
= conf
->reshape_progress
;
4005 sector_div(sector_nr
, new_data_disks
);
4012 /* We need to process a full chunk at a time.
4013 * If old and new chunk sizes differ, we need to process the
4016 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4017 reshape_sectors
= mddev
->new_chunk_sectors
;
4019 reshape_sectors
= mddev
->chunk_sectors
;
4021 /* we update the metadata when there is more than 3Meg
4022 * in the block range (that is rather arbitrary, should
4023 * probably be time based) or when the data about to be
4024 * copied would over-write the source of the data at
4025 * the front of the range.
4026 * i.e. one new_stripe along from reshape_progress new_maps
4027 * to after where reshape_safe old_maps to
4029 writepos
= conf
->reshape_progress
;
4030 sector_div(writepos
, new_data_disks
);
4031 readpos
= conf
->reshape_progress
;
4032 sector_div(readpos
, data_disks
);
4033 safepos
= conf
->reshape_safe
;
4034 sector_div(safepos
, data_disks
);
4035 if (mddev
->delta_disks
< 0) {
4036 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4037 readpos
+= reshape_sectors
;
4038 safepos
+= reshape_sectors
;
4040 writepos
+= reshape_sectors
;
4041 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4042 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4045 /* 'writepos' is the most advanced device address we might write.
4046 * 'readpos' is the least advanced device address we might read.
4047 * 'safepos' is the least address recorded in the metadata as having
4049 * If 'readpos' is behind 'writepos', then there is no way that we can
4050 * ensure safety in the face of a crash - that must be done by userspace
4051 * making a backup of the data. So in that case there is no particular
4052 * rush to update metadata.
4053 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4054 * update the metadata to advance 'safepos' to match 'readpos' so that
4055 * we can be safe in the event of a crash.
4056 * So we insist on updating metadata if safepos is behind writepos and
4057 * readpos is beyond writepos.
4058 * In any case, update the metadata every 10 seconds.
4059 * Maybe that number should be configurable, but I'm not sure it is
4060 * worth it.... maybe it could be a multiple of safemode_delay???
4062 if ((mddev
->delta_disks
< 0
4063 ? (safepos
> writepos
&& readpos
< writepos
)
4064 : (safepos
< writepos
&& readpos
> writepos
)) ||
4065 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4066 /* Cannot proceed until we've updated the superblock... */
4067 wait_event(conf
->wait_for_overlap
,
4068 atomic_read(&conf
->reshape_stripes
)==0);
4069 mddev
->reshape_position
= conf
->reshape_progress
;
4070 mddev
->curr_resync_completed
= mddev
->curr_resync
;
4071 conf
->reshape_checkpoint
= jiffies
;
4072 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4073 md_wakeup_thread(mddev
->thread
);
4074 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4075 kthread_should_stop());
4076 spin_lock_irq(&conf
->device_lock
);
4077 conf
->reshape_safe
= mddev
->reshape_position
;
4078 spin_unlock_irq(&conf
->device_lock
);
4079 wake_up(&conf
->wait_for_overlap
);
4080 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4083 if (mddev
->delta_disks
< 0) {
4084 BUG_ON(conf
->reshape_progress
== 0);
4085 stripe_addr
= writepos
;
4086 BUG_ON((mddev
->dev_sectors
&
4087 ~((sector_t
)reshape_sectors
- 1))
4088 - reshape_sectors
- stripe_addr
4091 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4092 stripe_addr
= sector_nr
;
4094 INIT_LIST_HEAD(&stripes
);
4095 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4098 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4099 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4100 atomic_inc(&conf
->reshape_stripes
);
4101 /* If any of this stripe is beyond the end of the old
4102 * array, then we need to zero those blocks
4104 for (j
=sh
->disks
; j
--;) {
4106 if (j
== sh
->pd_idx
)
4108 if (conf
->level
== 6 &&
4111 s
= compute_blocknr(sh
, j
, 0);
4112 if (s
< raid5_size(mddev
, 0, 0)) {
4116 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4117 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4118 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4121 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4122 set_bit(STRIPE_HANDLE
, &sh
->state
);
4124 list_add(&sh
->lru
, &stripes
);
4126 spin_lock_irq(&conf
->device_lock
);
4127 if (mddev
->delta_disks
< 0)
4128 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4130 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4131 spin_unlock_irq(&conf
->device_lock
);
4132 /* Ok, those stripe are ready. We can start scheduling
4133 * reads on the source stripes.
4134 * The source stripes are determined by mapping the first and last
4135 * block on the destination stripes.
4138 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4141 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4142 * new_data_disks
- 1),
4144 if (last_sector
>= mddev
->dev_sectors
)
4145 last_sector
= mddev
->dev_sectors
- 1;
4146 while (first_sector
<= last_sector
) {
4147 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4148 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4149 set_bit(STRIPE_HANDLE
, &sh
->state
);
4151 first_sector
+= STRIPE_SECTORS
;
4153 /* Now that the sources are clearly marked, we can release
4154 * the destination stripes
4156 while (!list_empty(&stripes
)) {
4157 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4158 list_del_init(&sh
->lru
);
4161 /* If this takes us to the resync_max point where we have to pause,
4162 * then we need to write out the superblock.
4164 sector_nr
+= reshape_sectors
;
4165 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4166 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4167 /* Cannot proceed until we've updated the superblock... */
4168 wait_event(conf
->wait_for_overlap
,
4169 atomic_read(&conf
->reshape_stripes
) == 0);
4170 mddev
->reshape_position
= conf
->reshape_progress
;
4171 mddev
->curr_resync_completed
= mddev
->curr_resync
+ reshape_sectors
;
4172 conf
->reshape_checkpoint
= jiffies
;
4173 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4174 md_wakeup_thread(mddev
->thread
);
4175 wait_event(mddev
->sb_wait
,
4176 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4177 || kthread_should_stop());
4178 spin_lock_irq(&conf
->device_lock
);
4179 conf
->reshape_safe
= mddev
->reshape_position
;
4180 spin_unlock_irq(&conf
->device_lock
);
4181 wake_up(&conf
->wait_for_overlap
);
4182 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4184 return reshape_sectors
;
4187 /* FIXME go_faster isn't used */
4188 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4190 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
4191 struct stripe_head
*sh
;
4192 sector_t max_sector
= mddev
->dev_sectors
;
4194 int still_degraded
= 0;
4197 if (sector_nr
>= max_sector
) {
4198 /* just being told to finish up .. nothing much to do */
4199 unplug_slaves(mddev
);
4201 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4206 if (mddev
->curr_resync
< max_sector
) /* aborted */
4207 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4209 else /* completed sync */
4211 bitmap_close_sync(mddev
->bitmap
);
4216 /* Allow raid5_quiesce to complete */
4217 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
4219 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4220 return reshape_request(mddev
, sector_nr
, skipped
);
4222 /* No need to check resync_max as we never do more than one
4223 * stripe, and as resync_max will always be on a chunk boundary,
4224 * if the check in md_do_sync didn't fire, there is no chance
4225 * of overstepping resync_max here
4228 /* if there is too many failed drives and we are trying
4229 * to resync, then assert that we are finished, because there is
4230 * nothing we can do.
4232 if (mddev
->degraded
>= conf
->max_degraded
&&
4233 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4234 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4238 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4239 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4240 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4241 /* we can skip this block, and probably more */
4242 sync_blocks
/= STRIPE_SECTORS
;
4244 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4248 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4250 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4252 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4253 /* make sure we don't swamp the stripe cache if someone else
4254 * is trying to get access
4256 schedule_timeout_uninterruptible(1);
4258 /* Need to check if array will still be degraded after recovery/resync
4259 * We don't need to check the 'failed' flag as when that gets set,
4262 for (i
= 0; i
< conf
->raid_disks
; i
++)
4263 if (conf
->disks
[i
].rdev
== NULL
)
4266 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4268 spin_lock(&sh
->lock
);
4269 set_bit(STRIPE_SYNCING
, &sh
->state
);
4270 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4271 spin_unlock(&sh
->lock
);
4273 /* wait for any blocked device to be handled */
4274 while (unlikely(!handle_stripe(sh
)))
4278 return STRIPE_SECTORS
;
4281 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
4283 /* We may not be able to submit a whole bio at once as there
4284 * may not be enough stripe_heads available.
4285 * We cannot pre-allocate enough stripe_heads as we may need
4286 * more than exist in the cache (if we allow ever large chunks).
4287 * So we do one stripe head at a time and record in
4288 * ->bi_hw_segments how many have been done.
4290 * We *know* that this entire raid_bio is in one chunk, so
4291 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4293 struct stripe_head
*sh
;
4295 sector_t sector
, logical_sector
, last_sector
;
4300 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4301 sector
= raid5_compute_sector(conf
, logical_sector
,
4303 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4305 for (; logical_sector
< last_sector
;
4306 logical_sector
+= STRIPE_SECTORS
,
4307 sector
+= STRIPE_SECTORS
,
4310 if (scnt
< raid5_bi_hw_segments(raid_bio
))
4311 /* already done this stripe */
4314 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4317 /* failed to get a stripe - must wait */
4318 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4319 conf
->retry_read_aligned
= raid_bio
;
4323 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
4324 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4326 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4327 conf
->retry_read_aligned
= raid_bio
;
4335 spin_lock_irq(&conf
->device_lock
);
4336 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
4337 spin_unlock_irq(&conf
->device_lock
);
4339 bio_endio(raid_bio
, 0);
4340 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4341 wake_up(&conf
->wait_for_stripe
);
4345 #ifdef CONFIG_MULTICORE_RAID456
4346 static void __process_stripe(void *param
, async_cookie_t cookie
)
4348 struct stripe_head
*sh
= param
;
4354 static void process_stripe(struct stripe_head
*sh
, struct list_head
*domain
)
4356 async_schedule_domain(__process_stripe
, sh
, domain
);
4359 static void synchronize_stripe_processing(struct list_head
*domain
)
4361 async_synchronize_full_domain(domain
);
4364 static void process_stripe(struct stripe_head
*sh
, struct list_head
*domain
)
4371 static void synchronize_stripe_processing(struct list_head
*domain
)
4378 * This is our raid5 kernel thread.
4380 * We scan the hash table for stripes which can be handled now.
4381 * During the scan, completed stripes are saved for us by the interrupt
4382 * handler, so that they will not have to wait for our next wakeup.
4384 static void raid5d(mddev_t
*mddev
)
4386 struct stripe_head
*sh
;
4387 raid5_conf_t
*conf
= mddev
->private;
4389 LIST_HEAD(raid_domain
);
4391 pr_debug("+++ raid5d active\n");
4393 md_check_recovery(mddev
);
4396 spin_lock_irq(&conf
->device_lock
);
4400 if (conf
->seq_flush
!= conf
->seq_write
) {
4401 int seq
= conf
->seq_flush
;
4402 spin_unlock_irq(&conf
->device_lock
);
4403 bitmap_unplug(mddev
->bitmap
);
4404 spin_lock_irq(&conf
->device_lock
);
4405 conf
->seq_write
= seq
;
4406 activate_bit_delay(conf
);
4409 while ((bio
= remove_bio_from_retry(conf
))) {
4411 spin_unlock_irq(&conf
->device_lock
);
4412 ok
= retry_aligned_read(conf
, bio
);
4413 spin_lock_irq(&conf
->device_lock
);
4419 sh
= __get_priority_stripe(conf
);
4423 spin_unlock_irq(&conf
->device_lock
);
4426 process_stripe(sh
, &raid_domain
);
4428 spin_lock_irq(&conf
->device_lock
);
4430 pr_debug("%d stripes handled\n", handled
);
4432 spin_unlock_irq(&conf
->device_lock
);
4434 synchronize_stripe_processing(&raid_domain
);
4435 async_tx_issue_pending_all();
4436 unplug_slaves(mddev
);
4438 pr_debug("--- raid5d inactive\n");
4442 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4444 raid5_conf_t
*conf
= mddev
->private;
4446 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4452 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4454 raid5_conf_t
*conf
= mddev
->private;
4458 if (len
>= PAGE_SIZE
)
4463 if (strict_strtoul(page
, 10, &new))
4465 if (new <= 16 || new > 32768)
4467 while (new < conf
->max_nr_stripes
) {
4468 if (drop_one_stripe(conf
))
4469 conf
->max_nr_stripes
--;
4473 err
= md_allow_write(mddev
);
4476 while (new > conf
->max_nr_stripes
) {
4477 if (grow_one_stripe(conf
))
4478 conf
->max_nr_stripes
++;
4484 static struct md_sysfs_entry
4485 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4486 raid5_show_stripe_cache_size
,
4487 raid5_store_stripe_cache_size
);
4490 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4492 raid5_conf_t
*conf
= mddev
->private;
4494 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4500 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4502 raid5_conf_t
*conf
= mddev
->private;
4504 if (len
>= PAGE_SIZE
)
4509 if (strict_strtoul(page
, 10, &new))
4511 if (new > conf
->max_nr_stripes
)
4513 conf
->bypass_threshold
= new;
4517 static struct md_sysfs_entry
4518 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4520 raid5_show_preread_threshold
,
4521 raid5_store_preread_threshold
);
4524 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4526 raid5_conf_t
*conf
= mddev
->private;
4528 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4533 static struct md_sysfs_entry
4534 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4536 static struct attribute
*raid5_attrs
[] = {
4537 &raid5_stripecache_size
.attr
,
4538 &raid5_stripecache_active
.attr
,
4539 &raid5_preread_bypass_threshold
.attr
,
4542 static struct attribute_group raid5_attrs_group
= {
4544 .attrs
= raid5_attrs
,
4548 raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
4550 raid5_conf_t
*conf
= mddev
->private;
4553 sectors
= mddev
->dev_sectors
;
4555 /* size is defined by the smallest of previous and new size */
4556 if (conf
->raid_disks
< conf
->previous_raid_disks
)
4557 raid_disks
= conf
->raid_disks
;
4559 raid_disks
= conf
->previous_raid_disks
;
4562 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
4563 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
4564 return sectors
* (raid_disks
- conf
->max_degraded
);
4567 static void raid5_free_percpu(raid5_conf_t
*conf
)
4569 struct raid5_percpu
*percpu
;
4576 for_each_possible_cpu(cpu
) {
4577 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4578 safe_put_page(percpu
->spare_page
);
4579 kfree(percpu
->scribble
);
4581 #ifdef CONFIG_HOTPLUG_CPU
4582 unregister_cpu_notifier(&conf
->cpu_notify
);
4586 free_percpu(conf
->percpu
);
4589 static void free_conf(raid5_conf_t
*conf
)
4591 shrink_stripes(conf
);
4592 raid5_free_percpu(conf
);
4594 kfree(conf
->stripe_hashtbl
);
4598 #ifdef CONFIG_HOTPLUG_CPU
4599 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4602 raid5_conf_t
*conf
= container_of(nfb
, raid5_conf_t
, cpu_notify
);
4603 long cpu
= (long)hcpu
;
4604 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4607 case CPU_UP_PREPARE
:
4608 case CPU_UP_PREPARE_FROZEN
:
4609 if (conf
->level
== 6 && !percpu
->spare_page
)
4610 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4611 if (!percpu
->scribble
)
4612 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4614 if (!percpu
->scribble
||
4615 (conf
->level
== 6 && !percpu
->spare_page
)) {
4616 safe_put_page(percpu
->spare_page
);
4617 kfree(percpu
->scribble
);
4618 pr_err("%s: failed memory allocation for cpu%ld\n",
4624 case CPU_DEAD_FROZEN
:
4625 safe_put_page(percpu
->spare_page
);
4626 kfree(percpu
->scribble
);
4627 percpu
->spare_page
= NULL
;
4628 percpu
->scribble
= NULL
;
4637 static int raid5_alloc_percpu(raid5_conf_t
*conf
)
4640 struct page
*spare_page
;
4641 struct raid5_percpu
*allcpus
;
4645 allcpus
= alloc_percpu(struct raid5_percpu
);
4648 conf
->percpu
= allcpus
;
4652 for_each_present_cpu(cpu
) {
4653 if (conf
->level
== 6) {
4654 spare_page
= alloc_page(GFP_KERNEL
);
4659 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4661 scribble
= kmalloc(scribble_len(conf
->raid_disks
), GFP_KERNEL
);
4666 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4668 #ifdef CONFIG_HOTPLUG_CPU
4669 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4670 conf
->cpu_notify
.priority
= 0;
4672 err
= register_cpu_notifier(&conf
->cpu_notify
);
4679 static raid5_conf_t
*setup_conf(mddev_t
*mddev
)
4682 int raid_disk
, memory
;
4684 struct disk_info
*disk
;
4686 if (mddev
->new_level
!= 5
4687 && mddev
->new_level
!= 4
4688 && mddev
->new_level
!= 6) {
4689 printk(KERN_ERR
"raid5: %s: raid level not set to 4/5/6 (%d)\n",
4690 mdname(mddev
), mddev
->new_level
);
4691 return ERR_PTR(-EIO
);
4693 if ((mddev
->new_level
== 5
4694 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4695 (mddev
->new_level
== 6
4696 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4697 printk(KERN_ERR
"raid5: %s: layout %d not supported\n",
4698 mdname(mddev
), mddev
->new_layout
);
4699 return ERR_PTR(-EIO
);
4701 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4702 printk(KERN_ERR
"raid6: not enough configured devices for %s (%d, minimum 4)\n",
4703 mdname(mddev
), mddev
->raid_disks
);
4704 return ERR_PTR(-EINVAL
);
4707 if (!mddev
->new_chunk_sectors
||
4708 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
4709 !is_power_of_2(mddev
->new_chunk_sectors
)) {
4710 printk(KERN_ERR
"raid5: invalid chunk size %d for %s\n",
4711 mddev
->new_chunk_sectors
<< 9, mdname(mddev
));
4712 return ERR_PTR(-EINVAL
);
4715 conf
= kzalloc(sizeof(raid5_conf_t
), GFP_KERNEL
);
4719 conf
->raid_disks
= mddev
->raid_disks
;
4720 conf
->scribble_len
= scribble_len(conf
->raid_disks
);
4721 if (mddev
->reshape_position
== MaxSector
)
4722 conf
->previous_raid_disks
= mddev
->raid_disks
;
4724 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4726 conf
->disks
= kzalloc(conf
->raid_disks
* sizeof(struct disk_info
),
4731 conf
->mddev
= mddev
;
4733 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4736 conf
->level
= mddev
->new_level
;
4737 if (raid5_alloc_percpu(conf
) != 0)
4740 spin_lock_init(&conf
->device_lock
);
4741 init_waitqueue_head(&conf
->wait_for_stripe
);
4742 init_waitqueue_head(&conf
->wait_for_overlap
);
4743 INIT_LIST_HEAD(&conf
->handle_list
);
4744 INIT_LIST_HEAD(&conf
->hold_list
);
4745 INIT_LIST_HEAD(&conf
->delayed_list
);
4746 INIT_LIST_HEAD(&conf
->bitmap_list
);
4747 INIT_LIST_HEAD(&conf
->inactive_list
);
4748 atomic_set(&conf
->active_stripes
, 0);
4749 atomic_set(&conf
->preread_active_stripes
, 0);
4750 atomic_set(&conf
->active_aligned_reads
, 0);
4751 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4753 pr_debug("raid5: run(%s) called.\n", mdname(mddev
));
4755 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4756 raid_disk
= rdev
->raid_disk
;
4757 if (raid_disk
>= conf
->raid_disks
4760 disk
= conf
->disks
+ raid_disk
;
4764 if (test_bit(In_sync
, &rdev
->flags
)) {
4765 char b
[BDEVNAME_SIZE
];
4766 printk(KERN_INFO
"raid5: device %s operational as raid"
4767 " disk %d\n", bdevname(rdev
->bdev
,b
),
4770 /* Cannot rely on bitmap to complete recovery */
4774 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
4775 conf
->level
= mddev
->new_level
;
4776 if (conf
->level
== 6)
4777 conf
->max_degraded
= 2;
4779 conf
->max_degraded
= 1;
4780 conf
->algorithm
= mddev
->new_layout
;
4781 conf
->max_nr_stripes
= NR_STRIPES
;
4782 conf
->reshape_progress
= mddev
->reshape_position
;
4783 if (conf
->reshape_progress
!= MaxSector
) {
4784 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
4785 conf
->prev_algo
= mddev
->layout
;
4788 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4789 conf
->raid_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4790 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4792 "raid5: couldn't allocate %dkB for buffers\n", memory
);
4795 printk(KERN_INFO
"raid5: allocated %dkB for %s\n",
4796 memory
, mdname(mddev
));
4798 conf
->thread
= md_register_thread(raid5d
, mddev
, "%s_raid5");
4799 if (!conf
->thread
) {
4801 "raid5: couldn't allocate thread for %s\n",
4811 return ERR_PTR(-EIO
);
4813 return ERR_PTR(-ENOMEM
);
4816 static int run(mddev_t
*mddev
)
4819 int working_disks
= 0, chunk_size
;
4822 if (mddev
->recovery_cp
!= MaxSector
)
4823 printk(KERN_NOTICE
"raid5: %s is not clean"
4824 " -- starting background reconstruction\n",
4826 if (mddev
->reshape_position
!= MaxSector
) {
4827 /* Check that we can continue the reshape.
4828 * Currently only disks can change, it must
4829 * increase, and we must be past the point where
4830 * a stripe over-writes itself
4832 sector_t here_new
, here_old
;
4834 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
4836 if (mddev
->new_level
!= mddev
->level
) {
4837 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4838 "required - aborting.\n",
4842 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4843 /* reshape_position must be on a new-stripe boundary, and one
4844 * further up in new geometry must map after here in old
4847 here_new
= mddev
->reshape_position
;
4848 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
4849 (mddev
->raid_disks
- max_degraded
))) {
4850 printk(KERN_ERR
"raid5: reshape_position not "
4851 "on a stripe boundary\n");
4854 /* here_new is the stripe we will write to */
4855 here_old
= mddev
->reshape_position
;
4856 sector_div(here_old
, mddev
->chunk_sectors
*
4857 (old_disks
-max_degraded
));
4858 /* here_old is the first stripe that we might need to read
4860 if (mddev
->delta_disks
== 0) {
4861 /* We cannot be sure it is safe to start an in-place
4862 * reshape. It is only safe if user-space if monitoring
4863 * and taking constant backups.
4864 * mdadm always starts a situation like this in
4865 * readonly mode so it can take control before
4866 * allowing any writes. So just check for that.
4868 if ((here_new
* mddev
->new_chunk_sectors
!=
4869 here_old
* mddev
->chunk_sectors
) ||
4871 printk(KERN_ERR
"raid5: in-place reshape must be started"
4872 " in read-only mode - aborting\n");
4875 } else if (mddev
->delta_disks
< 0
4876 ? (here_new
* mddev
->new_chunk_sectors
<=
4877 here_old
* mddev
->chunk_sectors
)
4878 : (here_new
* mddev
->new_chunk_sectors
>=
4879 here_old
* mddev
->chunk_sectors
)) {
4880 /* Reading from the same stripe as writing to - bad */
4881 printk(KERN_ERR
"raid5: reshape_position too early for "
4882 "auto-recovery - aborting.\n");
4885 printk(KERN_INFO
"raid5: reshape will continue\n");
4886 /* OK, we should be able to continue; */
4888 BUG_ON(mddev
->level
!= mddev
->new_level
);
4889 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
4890 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
4891 BUG_ON(mddev
->delta_disks
!= 0);
4894 if (mddev
->private == NULL
)
4895 conf
= setup_conf(mddev
);
4897 conf
= mddev
->private;
4900 return PTR_ERR(conf
);
4902 mddev
->thread
= conf
->thread
;
4903 conf
->thread
= NULL
;
4904 mddev
->private = conf
;
4907 * 0 for a fully functional array, 1 or 2 for a degraded array.
4909 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4910 if (rdev
->raid_disk
>= 0 &&
4911 test_bit(In_sync
, &rdev
->flags
))
4914 mddev
->degraded
= conf
->raid_disks
- working_disks
;
4916 if (mddev
->degraded
> conf
->max_degraded
) {
4917 printk(KERN_ERR
"raid5: not enough operational devices for %s"
4918 " (%d/%d failed)\n",
4919 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
4923 /* device size must be a multiple of chunk size */
4924 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
4925 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
4927 if (mddev
->degraded
> 0 &&
4928 mddev
->recovery_cp
!= MaxSector
) {
4929 if (mddev
->ok_start_degraded
)
4931 "raid5: starting dirty degraded array: %s"
4932 "- data corruption possible.\n",
4936 "raid5: cannot start dirty degraded array for %s\n",
4942 if (mddev
->degraded
== 0)
4943 printk("raid5: raid level %d set %s active with %d out of %d"
4944 " devices, algorithm %d\n", conf
->level
, mdname(mddev
),
4945 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
4948 printk(KERN_ALERT
"raid5: raid level %d set %s active with %d"
4949 " out of %d devices, algorithm %d\n", conf
->level
,
4950 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
4951 mddev
->raid_disks
, mddev
->new_layout
);
4953 print_raid5_conf(conf
);
4955 if (conf
->reshape_progress
!= MaxSector
) {
4956 printk("...ok start reshape thread\n");
4957 conf
->reshape_safe
= conf
->reshape_progress
;
4958 atomic_set(&conf
->reshape_stripes
, 0);
4959 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
4960 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
4961 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
4962 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
4963 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
4967 /* read-ahead size must cover two whole stripes, which is
4968 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4971 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
4972 int stripe
= data_disks
*
4973 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
4974 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
4975 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
4978 /* Ok, everything is just fine now */
4979 if (sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
4981 "raid5: failed to create sysfs attributes for %s\n",
4984 mddev
->queue
->queue_lock
= &conf
->device_lock
;
4986 mddev
->queue
->unplug_fn
= raid5_unplug_device
;
4987 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
4988 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
4990 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
4992 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
4993 chunk_size
= mddev
->chunk_sectors
<< 9;
4994 blk_queue_io_min(mddev
->queue
, chunk_size
);
4995 blk_queue_io_opt(mddev
->queue
, chunk_size
*
4996 (conf
->raid_disks
- conf
->max_degraded
));
4998 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
4999 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5000 rdev
->data_offset
<< 9);
5004 md_unregister_thread(mddev
->thread
);
5005 mddev
->thread
= NULL
;
5007 print_raid5_conf(conf
);
5010 mddev
->private = NULL
;
5011 printk(KERN_ALERT
"raid5: failed to run raid set %s\n", mdname(mddev
));
5017 static int stop(mddev_t
*mddev
)
5019 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
5021 md_unregister_thread(mddev
->thread
);
5022 mddev
->thread
= NULL
;
5023 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
5024 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
5025 sysfs_remove_group(&mddev
->kobj
, &raid5_attrs_group
);
5027 mddev
->private = NULL
;
5032 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
5036 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
5037 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
5038 seq_printf(seq
, "sh %llu, count %d.\n",
5039 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
5040 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
5041 for (i
= 0; i
< sh
->disks
; i
++) {
5042 seq_printf(seq
, "(cache%d: %p %ld) ",
5043 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
5045 seq_printf(seq
, "\n");
5048 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
5050 struct stripe_head
*sh
;
5051 struct hlist_node
*hn
;
5054 spin_lock_irq(&conf
->device_lock
);
5055 for (i
= 0; i
< NR_HASH
; i
++) {
5056 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
5057 if (sh
->raid_conf
!= conf
)
5062 spin_unlock_irq(&conf
->device_lock
);
5066 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
5068 raid5_conf_t
*conf
= (raid5_conf_t
*) mddev
->private;
5071 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
5072 mddev
->chunk_sectors
/ 2, mddev
->layout
);
5073 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5074 for (i
= 0; i
< conf
->raid_disks
; i
++)
5075 seq_printf (seq
, "%s",
5076 conf
->disks
[i
].rdev
&&
5077 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5078 seq_printf (seq
, "]");
5080 seq_printf (seq
, "\n");
5081 printall(seq
, conf
);
5085 static void print_raid5_conf (raid5_conf_t
*conf
)
5088 struct disk_info
*tmp
;
5090 printk("RAID5 conf printout:\n");
5092 printk("(conf==NULL)\n");
5095 printk(" --- rd:%d wd:%d\n", conf
->raid_disks
,
5096 conf
->raid_disks
- conf
->mddev
->degraded
);
5098 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5099 char b
[BDEVNAME_SIZE
];
5100 tmp
= conf
->disks
+ i
;
5102 printk(" disk %d, o:%d, dev:%s\n",
5103 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5104 bdevname(tmp
->rdev
->bdev
,b
));
5108 static int raid5_spare_active(mddev_t
*mddev
)
5111 raid5_conf_t
*conf
= mddev
->private;
5112 struct disk_info
*tmp
;
5114 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5115 tmp
= conf
->disks
+ i
;
5117 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5118 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5119 unsigned long flags
;
5120 spin_lock_irqsave(&conf
->device_lock
, flags
);
5122 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5125 print_raid5_conf(conf
);
5129 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
5131 raid5_conf_t
*conf
= mddev
->private;
5134 struct disk_info
*p
= conf
->disks
+ number
;
5136 print_raid5_conf(conf
);
5139 if (number
>= conf
->raid_disks
&&
5140 conf
->reshape_progress
== MaxSector
)
5141 clear_bit(In_sync
, &rdev
->flags
);
5143 if (test_bit(In_sync
, &rdev
->flags
) ||
5144 atomic_read(&rdev
->nr_pending
)) {
5148 /* Only remove non-faulty devices if recovery
5151 if (!test_bit(Faulty
, &rdev
->flags
) &&
5152 mddev
->degraded
<= conf
->max_degraded
&&
5153 number
< conf
->raid_disks
) {
5159 if (atomic_read(&rdev
->nr_pending
)) {
5160 /* lost the race, try later */
5167 print_raid5_conf(conf
);
5171 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5173 raid5_conf_t
*conf
= mddev
->private;
5176 struct disk_info
*p
;
5178 int last
= conf
->raid_disks
- 1;
5180 if (mddev
->degraded
> conf
->max_degraded
)
5181 /* no point adding a device */
5184 if (rdev
->raid_disk
>= 0)
5185 first
= last
= rdev
->raid_disk
;
5188 * find the disk ... but prefer rdev->saved_raid_disk
5191 if (rdev
->saved_raid_disk
>= 0 &&
5192 rdev
->saved_raid_disk
>= first
&&
5193 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5194 disk
= rdev
->saved_raid_disk
;
5197 for ( ; disk
<= last
; disk
++)
5198 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
5199 clear_bit(In_sync
, &rdev
->flags
);
5200 rdev
->raid_disk
= disk
;
5202 if (rdev
->saved_raid_disk
!= disk
)
5204 rcu_assign_pointer(p
->rdev
, rdev
);
5207 print_raid5_conf(conf
);
5211 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
5213 /* no resync is happening, and there is enough space
5214 * on all devices, so we can resize.
5215 * We need to make sure resync covers any new space.
5216 * If the array is shrinking we should possibly wait until
5217 * any io in the removed space completes, but it hardly seems
5220 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5221 md_set_array_sectors(mddev
, raid5_size(mddev
, sectors
,
5222 mddev
->raid_disks
));
5223 if (mddev
->array_sectors
>
5224 raid5_size(mddev
, sectors
, mddev
->raid_disks
))
5226 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5228 revalidate_disk(mddev
->gendisk
);
5229 if (sectors
> mddev
->dev_sectors
&& mddev
->recovery_cp
== MaxSector
) {
5230 mddev
->recovery_cp
= mddev
->dev_sectors
;
5231 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5233 mddev
->dev_sectors
= sectors
;
5234 mddev
->resync_max_sectors
= sectors
;
5238 static int check_stripe_cache(mddev_t
*mddev
)
5240 /* Can only proceed if there are plenty of stripe_heads.
5241 * We need a minimum of one full stripe,, and for sensible progress
5242 * it is best to have about 4 times that.
5243 * If we require 4 times, then the default 256 4K stripe_heads will
5244 * allow for chunk sizes up to 256K, which is probably OK.
5245 * If the chunk size is greater, user-space should request more
5246 * stripe_heads first.
5248 raid5_conf_t
*conf
= mddev
->private;
5249 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5250 > conf
->max_nr_stripes
||
5251 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5252 > conf
->max_nr_stripes
) {
5253 printk(KERN_WARNING
"raid5: reshape: not enough stripes. Needed %lu\n",
5254 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
5261 static int check_reshape(mddev_t
*mddev
)
5263 raid5_conf_t
*conf
= mddev
->private;
5265 if (mddev
->delta_disks
== 0 &&
5266 mddev
->new_layout
== mddev
->layout
&&
5267 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
5268 return 0; /* nothing to do */
5270 /* Cannot grow a bitmap yet */
5272 if (mddev
->degraded
> conf
->max_degraded
)
5274 if (mddev
->delta_disks
< 0) {
5275 /* We might be able to shrink, but the devices must
5276 * be made bigger first.
5277 * For raid6, 4 is the minimum size.
5278 * Otherwise 2 is the minimum
5281 if (mddev
->level
== 6)
5283 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5287 if (!check_stripe_cache(mddev
))
5290 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5293 static int raid5_start_reshape(mddev_t
*mddev
)
5295 raid5_conf_t
*conf
= mddev
->private;
5298 int added_devices
= 0;
5299 unsigned long flags
;
5301 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5304 if (!check_stripe_cache(mddev
))
5307 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5308 if (rdev
->raid_disk
< 0 &&
5309 !test_bit(Faulty
, &rdev
->flags
))
5312 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5313 /* Not enough devices even to make a degraded array
5318 /* Refuse to reduce size of the array. Any reductions in
5319 * array size must be through explicit setting of array_size
5322 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5323 < mddev
->array_sectors
) {
5324 printk(KERN_ERR
"md: %s: array size must be reduced "
5325 "before number of disks\n", mdname(mddev
));
5329 atomic_set(&conf
->reshape_stripes
, 0);
5330 spin_lock_irq(&conf
->device_lock
);
5331 conf
->previous_raid_disks
= conf
->raid_disks
;
5332 conf
->raid_disks
+= mddev
->delta_disks
;
5333 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
5334 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5335 conf
->prev_algo
= conf
->algorithm
;
5336 conf
->algorithm
= mddev
->new_layout
;
5337 if (mddev
->delta_disks
< 0)
5338 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5340 conf
->reshape_progress
= 0;
5341 conf
->reshape_safe
= conf
->reshape_progress
;
5343 spin_unlock_irq(&conf
->device_lock
);
5345 /* Add some new drives, as many as will fit.
5346 * We know there are enough to make the newly sized array work.
5348 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5349 if (rdev
->raid_disk
< 0 &&
5350 !test_bit(Faulty
, &rdev
->flags
)) {
5351 if (raid5_add_disk(mddev
, rdev
) == 0) {
5353 set_bit(In_sync
, &rdev
->flags
);
5355 rdev
->recovery_offset
= 0;
5356 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5357 if (sysfs_create_link(&mddev
->kobj
,
5360 "raid5: failed to create "
5361 " link %s for %s\n",
5367 if (mddev
->delta_disks
> 0) {
5368 spin_lock_irqsave(&conf
->device_lock
, flags
);
5369 mddev
->degraded
= (conf
->raid_disks
- conf
->previous_raid_disks
)
5371 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5373 mddev
->raid_disks
= conf
->raid_disks
;
5374 mddev
->reshape_position
= conf
->reshape_progress
;
5375 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5377 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5378 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5379 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5380 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5381 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5383 if (!mddev
->sync_thread
) {
5384 mddev
->recovery
= 0;
5385 spin_lock_irq(&conf
->device_lock
);
5386 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5387 conf
->reshape_progress
= MaxSector
;
5388 spin_unlock_irq(&conf
->device_lock
);
5391 conf
->reshape_checkpoint
= jiffies
;
5392 md_wakeup_thread(mddev
->sync_thread
);
5393 md_new_event(mddev
);
5397 /* This is called from the reshape thread and should make any
5398 * changes needed in 'conf'
5400 static void end_reshape(raid5_conf_t
*conf
)
5403 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5405 spin_lock_irq(&conf
->device_lock
);
5406 conf
->previous_raid_disks
= conf
->raid_disks
;
5407 conf
->reshape_progress
= MaxSector
;
5408 spin_unlock_irq(&conf
->device_lock
);
5409 wake_up(&conf
->wait_for_overlap
);
5411 /* read-ahead size must cover two whole stripes, which is
5412 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5415 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5416 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
5418 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5419 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5424 /* This is called from the raid5d thread with mddev_lock held.
5425 * It makes config changes to the device.
5427 static void raid5_finish_reshape(mddev_t
*mddev
)
5429 raid5_conf_t
*conf
= mddev
->private;
5431 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5433 if (mddev
->delta_disks
> 0) {
5434 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5435 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5437 revalidate_disk(mddev
->gendisk
);
5440 mddev
->degraded
= conf
->raid_disks
;
5441 for (d
= 0; d
< conf
->raid_disks
; d
++)
5442 if (conf
->disks
[d
].rdev
&&
5444 &conf
->disks
[d
].rdev
->flags
))
5446 for (d
= conf
->raid_disks
;
5447 d
< conf
->raid_disks
- mddev
->delta_disks
;
5449 mdk_rdev_t
*rdev
= conf
->disks
[d
].rdev
;
5450 if (rdev
&& raid5_remove_disk(mddev
, d
) == 0) {
5452 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5453 sysfs_remove_link(&mddev
->kobj
, nm
);
5454 rdev
->raid_disk
= -1;
5458 mddev
->layout
= conf
->algorithm
;
5459 mddev
->chunk_sectors
= conf
->chunk_sectors
;
5460 mddev
->reshape_position
= MaxSector
;
5461 mddev
->delta_disks
= 0;
5465 static void raid5_quiesce(mddev_t
*mddev
, int state
)
5467 raid5_conf_t
*conf
= mddev
->private;
5470 case 2: /* resume for a suspend */
5471 wake_up(&conf
->wait_for_overlap
);
5474 case 1: /* stop all writes */
5475 spin_lock_irq(&conf
->device_lock
);
5476 /* '2' tells resync/reshape to pause so that all
5477 * active stripes can drain
5480 wait_event_lock_irq(conf
->wait_for_stripe
,
5481 atomic_read(&conf
->active_stripes
) == 0 &&
5482 atomic_read(&conf
->active_aligned_reads
) == 0,
5483 conf
->device_lock
, /* nothing */);
5485 spin_unlock_irq(&conf
->device_lock
);
5486 /* allow reshape to continue */
5487 wake_up(&conf
->wait_for_overlap
);
5490 case 0: /* re-enable writes */
5491 spin_lock_irq(&conf
->device_lock
);
5493 wake_up(&conf
->wait_for_stripe
);
5494 wake_up(&conf
->wait_for_overlap
);
5495 spin_unlock_irq(&conf
->device_lock
);
5501 static void *raid5_takeover_raid1(mddev_t
*mddev
)
5505 if (mddev
->raid_disks
!= 2 ||
5506 mddev
->degraded
> 1)
5507 return ERR_PTR(-EINVAL
);
5509 /* Should check if there are write-behind devices? */
5511 chunksect
= 64*2; /* 64K by default */
5513 /* The array must be an exact multiple of chunksize */
5514 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5517 if ((chunksect
<<9) < STRIPE_SIZE
)
5518 /* array size does not allow a suitable chunk size */
5519 return ERR_PTR(-EINVAL
);
5521 mddev
->new_level
= 5;
5522 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5523 mddev
->new_chunk_sectors
= chunksect
;
5525 return setup_conf(mddev
);
5528 static void *raid5_takeover_raid6(mddev_t
*mddev
)
5532 switch (mddev
->layout
) {
5533 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5534 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5536 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5537 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5539 case ALGORITHM_LEFT_SYMMETRIC_6
:
5540 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5542 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5543 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
5545 case ALGORITHM_PARITY_0_6
:
5546 new_layout
= ALGORITHM_PARITY_0
;
5548 case ALGORITHM_PARITY_N
:
5549 new_layout
= ALGORITHM_PARITY_N
;
5552 return ERR_PTR(-EINVAL
);
5554 mddev
->new_level
= 5;
5555 mddev
->new_layout
= new_layout
;
5556 mddev
->delta_disks
= -1;
5557 mddev
->raid_disks
-= 1;
5558 return setup_conf(mddev
);
5562 static int raid5_check_reshape(mddev_t
*mddev
)
5564 /* For a 2-drive array, the layout and chunk size can be changed
5565 * immediately as not restriping is needed.
5566 * For larger arrays we record the new value - after validation
5567 * to be used by a reshape pass.
5569 raid5_conf_t
*conf
= mddev
->private;
5570 int new_chunk
= mddev
->new_chunk_sectors
;
5572 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
5574 if (new_chunk
> 0) {
5575 if (!is_power_of_2(new_chunk
))
5577 if (new_chunk
< (PAGE_SIZE
>>9))
5579 if (mddev
->array_sectors
& (new_chunk
-1))
5580 /* not factor of array size */
5584 /* They look valid */
5586 if (mddev
->raid_disks
== 2) {
5587 /* can make the change immediately */
5588 if (mddev
->new_layout
>= 0) {
5589 conf
->algorithm
= mddev
->new_layout
;
5590 mddev
->layout
= mddev
->new_layout
;
5592 if (new_chunk
> 0) {
5593 conf
->chunk_sectors
= new_chunk
;
5594 mddev
->chunk_sectors
= new_chunk
;
5596 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5597 md_wakeup_thread(mddev
->thread
);
5599 return check_reshape(mddev
);
5602 static int raid6_check_reshape(mddev_t
*mddev
)
5604 int new_chunk
= mddev
->new_chunk_sectors
;
5606 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
5608 if (new_chunk
> 0) {
5609 if (!is_power_of_2(new_chunk
))
5611 if (new_chunk
< (PAGE_SIZE
>> 9))
5613 if (mddev
->array_sectors
& (new_chunk
-1))
5614 /* not factor of array size */
5618 /* They look valid */
5619 return check_reshape(mddev
);
5622 static void *raid5_takeover(mddev_t
*mddev
)
5624 /* raid5 can take over:
5625 * raid0 - if all devices are the same - make it a raid4 layout
5626 * raid1 - if there are two drives. We need to know the chunk size
5627 * raid4 - trivial - just use a raid4 layout.
5628 * raid6 - Providing it is a *_6 layout
5631 if (mddev
->level
== 1)
5632 return raid5_takeover_raid1(mddev
);
5633 if (mddev
->level
== 4) {
5634 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5635 mddev
->new_level
= 5;
5636 return setup_conf(mddev
);
5638 if (mddev
->level
== 6)
5639 return raid5_takeover_raid6(mddev
);
5641 return ERR_PTR(-EINVAL
);
5645 static struct mdk_personality raid5_personality
;
5647 static void *raid6_takeover(mddev_t
*mddev
)
5649 /* Currently can only take over a raid5. We map the
5650 * personality to an equivalent raid6 personality
5651 * with the Q block at the end.
5655 if (mddev
->pers
!= &raid5_personality
)
5656 return ERR_PTR(-EINVAL
);
5657 if (mddev
->degraded
> 1)
5658 return ERR_PTR(-EINVAL
);
5659 if (mddev
->raid_disks
> 253)
5660 return ERR_PTR(-EINVAL
);
5661 if (mddev
->raid_disks
< 3)
5662 return ERR_PTR(-EINVAL
);
5664 switch (mddev
->layout
) {
5665 case ALGORITHM_LEFT_ASYMMETRIC
:
5666 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
5668 case ALGORITHM_RIGHT_ASYMMETRIC
:
5669 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
5671 case ALGORITHM_LEFT_SYMMETRIC
:
5672 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
5674 case ALGORITHM_RIGHT_SYMMETRIC
:
5675 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
5677 case ALGORITHM_PARITY_0
:
5678 new_layout
= ALGORITHM_PARITY_0_6
;
5680 case ALGORITHM_PARITY_N
:
5681 new_layout
= ALGORITHM_PARITY_N
;
5684 return ERR_PTR(-EINVAL
);
5686 mddev
->new_level
= 6;
5687 mddev
->new_layout
= new_layout
;
5688 mddev
->delta_disks
= 1;
5689 mddev
->raid_disks
+= 1;
5690 return setup_conf(mddev
);
5694 static struct mdk_personality raid6_personality
=
5698 .owner
= THIS_MODULE
,
5699 .make_request
= make_request
,
5703 .error_handler
= error
,
5704 .hot_add_disk
= raid5_add_disk
,
5705 .hot_remove_disk
= raid5_remove_disk
,
5706 .spare_active
= raid5_spare_active
,
5707 .sync_request
= sync_request
,
5708 .resize
= raid5_resize
,
5710 .check_reshape
= raid6_check_reshape
,
5711 .start_reshape
= raid5_start_reshape
,
5712 .finish_reshape
= raid5_finish_reshape
,
5713 .quiesce
= raid5_quiesce
,
5714 .takeover
= raid6_takeover
,
5716 static struct mdk_personality raid5_personality
=
5720 .owner
= THIS_MODULE
,
5721 .make_request
= make_request
,
5725 .error_handler
= error
,
5726 .hot_add_disk
= raid5_add_disk
,
5727 .hot_remove_disk
= raid5_remove_disk
,
5728 .spare_active
= raid5_spare_active
,
5729 .sync_request
= sync_request
,
5730 .resize
= raid5_resize
,
5732 .check_reshape
= raid5_check_reshape
,
5733 .start_reshape
= raid5_start_reshape
,
5734 .finish_reshape
= raid5_finish_reshape
,
5735 .quiesce
= raid5_quiesce
,
5736 .takeover
= raid5_takeover
,
5739 static struct mdk_personality raid4_personality
=
5743 .owner
= THIS_MODULE
,
5744 .make_request
= make_request
,
5748 .error_handler
= error
,
5749 .hot_add_disk
= raid5_add_disk
,
5750 .hot_remove_disk
= raid5_remove_disk
,
5751 .spare_active
= raid5_spare_active
,
5752 .sync_request
= sync_request
,
5753 .resize
= raid5_resize
,
5755 .check_reshape
= raid5_check_reshape
,
5756 .start_reshape
= raid5_start_reshape
,
5757 .finish_reshape
= raid5_finish_reshape
,
5758 .quiesce
= raid5_quiesce
,
5761 static int __init
raid5_init(void)
5763 register_md_personality(&raid6_personality
);
5764 register_md_personality(&raid5_personality
);
5765 register_md_personality(&raid4_personality
);
5769 static void raid5_exit(void)
5771 unregister_md_personality(&raid6_personality
);
5772 unregister_md_personality(&raid5_personality
);
5773 unregister_md_personality(&raid4_personality
);
5776 module_init(raid5_init
);
5777 module_exit(raid5_exit
);
5778 MODULE_LICENSE("GPL");
5779 MODULE_ALIAS("md-personality-4"); /* RAID5 */
5780 MODULE_ALIAS("md-raid5");
5781 MODULE_ALIAS("md-raid4");
5782 MODULE_ALIAS("md-level-5");
5783 MODULE_ALIAS("md-level-4");
5784 MODULE_ALIAS("md-personality-8"); /* RAID6 */
5785 MODULE_ALIAS("md-raid6");
5786 MODULE_ALIAS("md-level-6");
5788 /* This used to be two separate modules, they were: */
5789 MODULE_ALIAS("raid5");
5790 MODULE_ALIAS("raid6");