Merge branch 'for-linus-3.6' of git://dev.laptop.org/users/dilinger/linux-olpc
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / raid5.c
1 /*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
6 *
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21 /*
22 * BITMAP UNPLUGGING:
23 *
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
26 * explanation.
27 *
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
32 * new additions.
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
39 * batch.
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
43 * miss any bits.
44 */
45
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/module.h>
51 #include <linux/async.h>
52 #include <linux/seq_file.h>
53 #include <linux/cpu.h>
54 #include <linux/slab.h>
55 #include <linux/ratelimit.h>
56 #include "md.h"
57 #include "raid5.h"
58 #include "raid0.h"
59 #include "bitmap.h"
60
61 /*
62 * Stripe cache
63 */
64
65 #define NR_STRIPES 256
66 #define STRIPE_SIZE PAGE_SIZE
67 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
68 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
69 #define IO_THRESHOLD 1
70 #define BYPASS_THRESHOLD 1
71 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
72 #define HASH_MASK (NR_HASH - 1)
73
74 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
75 {
76 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
77 return &conf->stripe_hashtbl[hash];
78 }
79
80 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
81 * order without overlap. There may be several bio's per stripe+device, and
82 * a bio could span several devices.
83 * When walking this list for a particular stripe+device, we must never proceed
84 * beyond a bio that extends past this device, as the next bio might no longer
85 * be valid.
86 * This function is used to determine the 'next' bio in the list, given the sector
87 * of the current stripe+device
88 */
89 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
90 {
91 int sectors = bio->bi_size >> 9;
92 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
93 return bio->bi_next;
94 else
95 return NULL;
96 }
97
98 /*
99 * We maintain a biased count of active stripes in the bottom 16 bits of
100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
101 */
102 static inline int raid5_bi_processed_stripes(struct bio *bio)
103 {
104 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
105 return (atomic_read(segments) >> 16) & 0xffff;
106 }
107
108 static inline int raid5_dec_bi_active_stripes(struct bio *bio)
109 {
110 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
111 return atomic_sub_return(1, segments) & 0xffff;
112 }
113
114 static inline void raid5_inc_bi_active_stripes(struct bio *bio)
115 {
116 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
117 atomic_inc(segments);
118 }
119
120 static inline void raid5_set_bi_processed_stripes(struct bio *bio,
121 unsigned int cnt)
122 {
123 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
124 int old, new;
125
126 do {
127 old = atomic_read(segments);
128 new = (old & 0xffff) | (cnt << 16);
129 } while (atomic_cmpxchg(segments, old, new) != old);
130 }
131
132 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
133 {
134 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
135 atomic_set(segments, cnt);
136 }
137
138 /* Find first data disk in a raid6 stripe */
139 static inline int raid6_d0(struct stripe_head *sh)
140 {
141 if (sh->ddf_layout)
142 /* ddf always start from first device */
143 return 0;
144 /* md starts just after Q block */
145 if (sh->qd_idx == sh->disks - 1)
146 return 0;
147 else
148 return sh->qd_idx + 1;
149 }
150 static inline int raid6_next_disk(int disk, int raid_disks)
151 {
152 disk++;
153 return (disk < raid_disks) ? disk : 0;
154 }
155
156 /* When walking through the disks in a raid5, starting at raid6_d0,
157 * We need to map each disk to a 'slot', where the data disks are slot
158 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
159 * is raid_disks-1. This help does that mapping.
160 */
161 static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
162 int *count, int syndrome_disks)
163 {
164 int slot = *count;
165
166 if (sh->ddf_layout)
167 (*count)++;
168 if (idx == sh->pd_idx)
169 return syndrome_disks;
170 if (idx == sh->qd_idx)
171 return syndrome_disks + 1;
172 if (!sh->ddf_layout)
173 (*count)++;
174 return slot;
175 }
176
177 static void return_io(struct bio *return_bi)
178 {
179 struct bio *bi = return_bi;
180 while (bi) {
181
182 return_bi = bi->bi_next;
183 bi->bi_next = NULL;
184 bi->bi_size = 0;
185 bio_endio(bi, 0);
186 bi = return_bi;
187 }
188 }
189
190 static void print_raid5_conf (struct r5conf *conf);
191
192 static int stripe_operations_active(struct stripe_head *sh)
193 {
194 return sh->check_state || sh->reconstruct_state ||
195 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
196 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
197 }
198
199 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
200 {
201 BUG_ON(!list_empty(&sh->lru));
202 BUG_ON(atomic_read(&conf->active_stripes)==0);
203 if (test_bit(STRIPE_HANDLE, &sh->state)) {
204 if (test_bit(STRIPE_DELAYED, &sh->state) &&
205 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
206 list_add_tail(&sh->lru, &conf->delayed_list);
207 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
208 sh->bm_seq - conf->seq_write > 0)
209 list_add_tail(&sh->lru, &conf->bitmap_list);
210 else {
211 clear_bit(STRIPE_DELAYED, &sh->state);
212 clear_bit(STRIPE_BIT_DELAY, &sh->state);
213 list_add_tail(&sh->lru, &conf->handle_list);
214 }
215 md_wakeup_thread(conf->mddev->thread);
216 } else {
217 BUG_ON(stripe_operations_active(sh));
218 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
219 if (atomic_dec_return(&conf->preread_active_stripes)
220 < IO_THRESHOLD)
221 md_wakeup_thread(conf->mddev->thread);
222 atomic_dec(&conf->active_stripes);
223 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
224 list_add_tail(&sh->lru, &conf->inactive_list);
225 wake_up(&conf->wait_for_stripe);
226 if (conf->retry_read_aligned)
227 md_wakeup_thread(conf->mddev->thread);
228 }
229 }
230 }
231
232 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
233 {
234 if (atomic_dec_and_test(&sh->count))
235 do_release_stripe(conf, sh);
236 }
237
238 static void release_stripe(struct stripe_head *sh)
239 {
240 struct r5conf *conf = sh->raid_conf;
241 unsigned long flags;
242
243 local_irq_save(flags);
244 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
245 do_release_stripe(conf, sh);
246 spin_unlock(&conf->device_lock);
247 }
248 local_irq_restore(flags);
249 }
250
251 static inline void remove_hash(struct stripe_head *sh)
252 {
253 pr_debug("remove_hash(), stripe %llu\n",
254 (unsigned long long)sh->sector);
255
256 hlist_del_init(&sh->hash);
257 }
258
259 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
260 {
261 struct hlist_head *hp = stripe_hash(conf, sh->sector);
262
263 pr_debug("insert_hash(), stripe %llu\n",
264 (unsigned long long)sh->sector);
265
266 hlist_add_head(&sh->hash, hp);
267 }
268
269
270 /* find an idle stripe, make sure it is unhashed, and return it. */
271 static struct stripe_head *get_free_stripe(struct r5conf *conf)
272 {
273 struct stripe_head *sh = NULL;
274 struct list_head *first;
275
276 if (list_empty(&conf->inactive_list))
277 goto out;
278 first = conf->inactive_list.next;
279 sh = list_entry(first, struct stripe_head, lru);
280 list_del_init(first);
281 remove_hash(sh);
282 atomic_inc(&conf->active_stripes);
283 out:
284 return sh;
285 }
286
287 static void shrink_buffers(struct stripe_head *sh)
288 {
289 struct page *p;
290 int i;
291 int num = sh->raid_conf->pool_size;
292
293 for (i = 0; i < num ; i++) {
294 p = sh->dev[i].page;
295 if (!p)
296 continue;
297 sh->dev[i].page = NULL;
298 put_page(p);
299 }
300 }
301
302 static int grow_buffers(struct stripe_head *sh)
303 {
304 int i;
305 int num = sh->raid_conf->pool_size;
306
307 for (i = 0; i < num; i++) {
308 struct page *page;
309
310 if (!(page = alloc_page(GFP_KERNEL))) {
311 return 1;
312 }
313 sh->dev[i].page = page;
314 }
315 return 0;
316 }
317
318 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
319 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
320 struct stripe_head *sh);
321
322 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
323 {
324 struct r5conf *conf = sh->raid_conf;
325 int i;
326
327 BUG_ON(atomic_read(&sh->count) != 0);
328 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
329 BUG_ON(stripe_operations_active(sh));
330
331 pr_debug("init_stripe called, stripe %llu\n",
332 (unsigned long long)sh->sector);
333
334 remove_hash(sh);
335
336 sh->generation = conf->generation - previous;
337 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
338 sh->sector = sector;
339 stripe_set_idx(sector, conf, previous, sh);
340 sh->state = 0;
341
342
343 for (i = sh->disks; i--; ) {
344 struct r5dev *dev = &sh->dev[i];
345
346 if (dev->toread || dev->read || dev->towrite || dev->written ||
347 test_bit(R5_LOCKED, &dev->flags)) {
348 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
349 (unsigned long long)sh->sector, i, dev->toread,
350 dev->read, dev->towrite, dev->written,
351 test_bit(R5_LOCKED, &dev->flags));
352 WARN_ON(1);
353 }
354 dev->flags = 0;
355 raid5_build_block(sh, i, previous);
356 }
357 insert_hash(conf, sh);
358 }
359
360 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
361 short generation)
362 {
363 struct stripe_head *sh;
364 struct hlist_node *hn;
365
366 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
367 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
368 if (sh->sector == sector && sh->generation == generation)
369 return sh;
370 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
371 return NULL;
372 }
373
374 /*
375 * Need to check if array has failed when deciding whether to:
376 * - start an array
377 * - remove non-faulty devices
378 * - add a spare
379 * - allow a reshape
380 * This determination is simple when no reshape is happening.
381 * However if there is a reshape, we need to carefully check
382 * both the before and after sections.
383 * This is because some failed devices may only affect one
384 * of the two sections, and some non-in_sync devices may
385 * be insync in the section most affected by failed devices.
386 */
387 static int calc_degraded(struct r5conf *conf)
388 {
389 int degraded, degraded2;
390 int i;
391
392 rcu_read_lock();
393 degraded = 0;
394 for (i = 0; i < conf->previous_raid_disks; i++) {
395 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
396 if (!rdev || test_bit(Faulty, &rdev->flags))
397 degraded++;
398 else if (test_bit(In_sync, &rdev->flags))
399 ;
400 else
401 /* not in-sync or faulty.
402 * If the reshape increases the number of devices,
403 * this is being recovered by the reshape, so
404 * this 'previous' section is not in_sync.
405 * If the number of devices is being reduced however,
406 * the device can only be part of the array if
407 * we are reverting a reshape, so this section will
408 * be in-sync.
409 */
410 if (conf->raid_disks >= conf->previous_raid_disks)
411 degraded++;
412 }
413 rcu_read_unlock();
414 if (conf->raid_disks == conf->previous_raid_disks)
415 return degraded;
416 rcu_read_lock();
417 degraded2 = 0;
418 for (i = 0; i < conf->raid_disks; i++) {
419 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
420 if (!rdev || test_bit(Faulty, &rdev->flags))
421 degraded2++;
422 else if (test_bit(In_sync, &rdev->flags))
423 ;
424 else
425 /* not in-sync or faulty.
426 * If reshape increases the number of devices, this
427 * section has already been recovered, else it
428 * almost certainly hasn't.
429 */
430 if (conf->raid_disks <= conf->previous_raid_disks)
431 degraded2++;
432 }
433 rcu_read_unlock();
434 if (degraded2 > degraded)
435 return degraded2;
436 return degraded;
437 }
438
439 static int has_failed(struct r5conf *conf)
440 {
441 int degraded;
442
443 if (conf->mddev->reshape_position == MaxSector)
444 return conf->mddev->degraded > conf->max_degraded;
445
446 degraded = calc_degraded(conf);
447 if (degraded > conf->max_degraded)
448 return 1;
449 return 0;
450 }
451
452 static struct stripe_head *
453 get_active_stripe(struct r5conf *conf, sector_t sector,
454 int previous, int noblock, int noquiesce)
455 {
456 struct stripe_head *sh;
457
458 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
459
460 spin_lock_irq(&conf->device_lock);
461
462 do {
463 wait_event_lock_irq(conf->wait_for_stripe,
464 conf->quiesce == 0 || noquiesce,
465 conf->device_lock, /* nothing */);
466 sh = __find_stripe(conf, sector, conf->generation - previous);
467 if (!sh) {
468 if (!conf->inactive_blocked)
469 sh = get_free_stripe(conf);
470 if (noblock && sh == NULL)
471 break;
472 if (!sh) {
473 conf->inactive_blocked = 1;
474 wait_event_lock_irq(conf->wait_for_stripe,
475 !list_empty(&conf->inactive_list) &&
476 (atomic_read(&conf->active_stripes)
477 < (conf->max_nr_stripes *3/4)
478 || !conf->inactive_blocked),
479 conf->device_lock,
480 );
481 conf->inactive_blocked = 0;
482 } else
483 init_stripe(sh, sector, previous);
484 } else {
485 if (atomic_read(&sh->count)) {
486 BUG_ON(!list_empty(&sh->lru)
487 && !test_bit(STRIPE_EXPANDING, &sh->state)
488 && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
489 } else {
490 if (!test_bit(STRIPE_HANDLE, &sh->state))
491 atomic_inc(&conf->active_stripes);
492 if (list_empty(&sh->lru) &&
493 !test_bit(STRIPE_EXPANDING, &sh->state))
494 BUG();
495 list_del_init(&sh->lru);
496 }
497 }
498 } while (sh == NULL);
499
500 if (sh)
501 atomic_inc(&sh->count);
502
503 spin_unlock_irq(&conf->device_lock);
504 return sh;
505 }
506
507 /* Determine if 'data_offset' or 'new_data_offset' should be used
508 * in this stripe_head.
509 */
510 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
511 {
512 sector_t progress = conf->reshape_progress;
513 /* Need a memory barrier to make sure we see the value
514 * of conf->generation, or ->data_offset that was set before
515 * reshape_progress was updated.
516 */
517 smp_rmb();
518 if (progress == MaxSector)
519 return 0;
520 if (sh->generation == conf->generation - 1)
521 return 0;
522 /* We are in a reshape, and this is a new-generation stripe,
523 * so use new_data_offset.
524 */
525 return 1;
526 }
527
528 static void
529 raid5_end_read_request(struct bio *bi, int error);
530 static void
531 raid5_end_write_request(struct bio *bi, int error);
532
533 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
534 {
535 struct r5conf *conf = sh->raid_conf;
536 int i, disks = sh->disks;
537
538 might_sleep();
539
540 for (i = disks; i--; ) {
541 int rw;
542 int replace_only = 0;
543 struct bio *bi, *rbi;
544 struct md_rdev *rdev, *rrdev = NULL;
545 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
546 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
547 rw = WRITE_FUA;
548 else
549 rw = WRITE;
550 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
551 rw = READ;
552 else if (test_and_clear_bit(R5_WantReplace,
553 &sh->dev[i].flags)) {
554 rw = WRITE;
555 replace_only = 1;
556 } else
557 continue;
558 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
559 rw |= REQ_SYNC;
560
561 bi = &sh->dev[i].req;
562 rbi = &sh->dev[i].rreq; /* For writing to replacement */
563
564 bi->bi_rw = rw;
565 rbi->bi_rw = rw;
566 if (rw & WRITE) {
567 bi->bi_end_io = raid5_end_write_request;
568 rbi->bi_end_io = raid5_end_write_request;
569 } else
570 bi->bi_end_io = raid5_end_read_request;
571
572 rcu_read_lock();
573 rrdev = rcu_dereference(conf->disks[i].replacement);
574 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
575 rdev = rcu_dereference(conf->disks[i].rdev);
576 if (!rdev) {
577 rdev = rrdev;
578 rrdev = NULL;
579 }
580 if (rw & WRITE) {
581 if (replace_only)
582 rdev = NULL;
583 if (rdev == rrdev)
584 /* We raced and saw duplicates */
585 rrdev = NULL;
586 } else {
587 if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
588 rdev = rrdev;
589 rrdev = NULL;
590 }
591
592 if (rdev && test_bit(Faulty, &rdev->flags))
593 rdev = NULL;
594 if (rdev)
595 atomic_inc(&rdev->nr_pending);
596 if (rrdev && test_bit(Faulty, &rrdev->flags))
597 rrdev = NULL;
598 if (rrdev)
599 atomic_inc(&rrdev->nr_pending);
600 rcu_read_unlock();
601
602 /* We have already checked bad blocks for reads. Now
603 * need to check for writes. We never accept write errors
604 * on the replacement, so we don't to check rrdev.
605 */
606 while ((rw & WRITE) && rdev &&
607 test_bit(WriteErrorSeen, &rdev->flags)) {
608 sector_t first_bad;
609 int bad_sectors;
610 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
611 &first_bad, &bad_sectors);
612 if (!bad)
613 break;
614
615 if (bad < 0) {
616 set_bit(BlockedBadBlocks, &rdev->flags);
617 if (!conf->mddev->external &&
618 conf->mddev->flags) {
619 /* It is very unlikely, but we might
620 * still need to write out the
621 * bad block log - better give it
622 * a chance*/
623 md_check_recovery(conf->mddev);
624 }
625 /*
626 * Because md_wait_for_blocked_rdev
627 * will dec nr_pending, we must
628 * increment it first.
629 */
630 atomic_inc(&rdev->nr_pending);
631 md_wait_for_blocked_rdev(rdev, conf->mddev);
632 } else {
633 /* Acknowledged bad block - skip the write */
634 rdev_dec_pending(rdev, conf->mddev);
635 rdev = NULL;
636 }
637 }
638
639 if (rdev) {
640 if (s->syncing || s->expanding || s->expanded
641 || s->replacing)
642 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
643
644 set_bit(STRIPE_IO_STARTED, &sh->state);
645
646 bi->bi_bdev = rdev->bdev;
647 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
648 __func__, (unsigned long long)sh->sector,
649 bi->bi_rw, i);
650 atomic_inc(&sh->count);
651 if (use_new_offset(conf, sh))
652 bi->bi_sector = (sh->sector
653 + rdev->new_data_offset);
654 else
655 bi->bi_sector = (sh->sector
656 + rdev->data_offset);
657 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
658 bi->bi_rw |= REQ_FLUSH;
659
660 bi->bi_flags = 1 << BIO_UPTODATE;
661 bi->bi_idx = 0;
662 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
663 bi->bi_io_vec[0].bv_offset = 0;
664 bi->bi_size = STRIPE_SIZE;
665 bi->bi_next = NULL;
666 if (rrdev)
667 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
668 generic_make_request(bi);
669 }
670 if (rrdev) {
671 if (s->syncing || s->expanding || s->expanded
672 || s->replacing)
673 md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
674
675 set_bit(STRIPE_IO_STARTED, &sh->state);
676
677 rbi->bi_bdev = rrdev->bdev;
678 pr_debug("%s: for %llu schedule op %ld on "
679 "replacement disc %d\n",
680 __func__, (unsigned long long)sh->sector,
681 rbi->bi_rw, i);
682 atomic_inc(&sh->count);
683 if (use_new_offset(conf, sh))
684 rbi->bi_sector = (sh->sector
685 + rrdev->new_data_offset);
686 else
687 rbi->bi_sector = (sh->sector
688 + rrdev->data_offset);
689 rbi->bi_flags = 1 << BIO_UPTODATE;
690 rbi->bi_idx = 0;
691 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
692 rbi->bi_io_vec[0].bv_offset = 0;
693 rbi->bi_size = STRIPE_SIZE;
694 rbi->bi_next = NULL;
695 generic_make_request(rbi);
696 }
697 if (!rdev && !rrdev) {
698 if (rw & WRITE)
699 set_bit(STRIPE_DEGRADED, &sh->state);
700 pr_debug("skip op %ld on disc %d for sector %llu\n",
701 bi->bi_rw, i, (unsigned long long)sh->sector);
702 clear_bit(R5_LOCKED, &sh->dev[i].flags);
703 set_bit(STRIPE_HANDLE, &sh->state);
704 }
705 }
706 }
707
708 static struct dma_async_tx_descriptor *
709 async_copy_data(int frombio, struct bio *bio, struct page *page,
710 sector_t sector, struct dma_async_tx_descriptor *tx)
711 {
712 struct bio_vec *bvl;
713 struct page *bio_page;
714 int i;
715 int page_offset;
716 struct async_submit_ctl submit;
717 enum async_tx_flags flags = 0;
718
719 if (bio->bi_sector >= sector)
720 page_offset = (signed)(bio->bi_sector - sector) * 512;
721 else
722 page_offset = (signed)(sector - bio->bi_sector) * -512;
723
724 if (frombio)
725 flags |= ASYNC_TX_FENCE;
726 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
727
728 bio_for_each_segment(bvl, bio, i) {
729 int len = bvl->bv_len;
730 int clen;
731 int b_offset = 0;
732
733 if (page_offset < 0) {
734 b_offset = -page_offset;
735 page_offset += b_offset;
736 len -= b_offset;
737 }
738
739 if (len > 0 && page_offset + len > STRIPE_SIZE)
740 clen = STRIPE_SIZE - page_offset;
741 else
742 clen = len;
743
744 if (clen > 0) {
745 b_offset += bvl->bv_offset;
746 bio_page = bvl->bv_page;
747 if (frombio)
748 tx = async_memcpy(page, bio_page, page_offset,
749 b_offset, clen, &submit);
750 else
751 tx = async_memcpy(bio_page, page, b_offset,
752 page_offset, clen, &submit);
753 }
754 /* chain the operations */
755 submit.depend_tx = tx;
756
757 if (clen < len) /* hit end of page */
758 break;
759 page_offset += len;
760 }
761
762 return tx;
763 }
764
765 static void ops_complete_biofill(void *stripe_head_ref)
766 {
767 struct stripe_head *sh = stripe_head_ref;
768 struct bio *return_bi = NULL;
769 int i;
770
771 pr_debug("%s: stripe %llu\n", __func__,
772 (unsigned long long)sh->sector);
773
774 /* clear completed biofills */
775 for (i = sh->disks; i--; ) {
776 struct r5dev *dev = &sh->dev[i];
777
778 /* acknowledge completion of a biofill operation */
779 /* and check if we need to reply to a read request,
780 * new R5_Wantfill requests are held off until
781 * !STRIPE_BIOFILL_RUN
782 */
783 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
784 struct bio *rbi, *rbi2;
785
786 BUG_ON(!dev->read);
787 rbi = dev->read;
788 dev->read = NULL;
789 while (rbi && rbi->bi_sector <
790 dev->sector + STRIPE_SECTORS) {
791 rbi2 = r5_next_bio(rbi, dev->sector);
792 if (!raid5_dec_bi_active_stripes(rbi)) {
793 rbi->bi_next = return_bi;
794 return_bi = rbi;
795 }
796 rbi = rbi2;
797 }
798 }
799 }
800 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
801
802 return_io(return_bi);
803
804 set_bit(STRIPE_HANDLE, &sh->state);
805 release_stripe(sh);
806 }
807
808 static void ops_run_biofill(struct stripe_head *sh)
809 {
810 struct dma_async_tx_descriptor *tx = NULL;
811 struct async_submit_ctl submit;
812 int i;
813
814 pr_debug("%s: stripe %llu\n", __func__,
815 (unsigned long long)sh->sector);
816
817 for (i = sh->disks; i--; ) {
818 struct r5dev *dev = &sh->dev[i];
819 if (test_bit(R5_Wantfill, &dev->flags)) {
820 struct bio *rbi;
821 spin_lock_irq(&sh->stripe_lock);
822 dev->read = rbi = dev->toread;
823 dev->toread = NULL;
824 spin_unlock_irq(&sh->stripe_lock);
825 while (rbi && rbi->bi_sector <
826 dev->sector + STRIPE_SECTORS) {
827 tx = async_copy_data(0, rbi, dev->page,
828 dev->sector, tx);
829 rbi = r5_next_bio(rbi, dev->sector);
830 }
831 }
832 }
833
834 atomic_inc(&sh->count);
835 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
836 async_trigger_callback(&submit);
837 }
838
839 static void mark_target_uptodate(struct stripe_head *sh, int target)
840 {
841 struct r5dev *tgt;
842
843 if (target < 0)
844 return;
845
846 tgt = &sh->dev[target];
847 set_bit(R5_UPTODATE, &tgt->flags);
848 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
849 clear_bit(R5_Wantcompute, &tgt->flags);
850 }
851
852 static void ops_complete_compute(void *stripe_head_ref)
853 {
854 struct stripe_head *sh = stripe_head_ref;
855
856 pr_debug("%s: stripe %llu\n", __func__,
857 (unsigned long long)sh->sector);
858
859 /* mark the computed target(s) as uptodate */
860 mark_target_uptodate(sh, sh->ops.target);
861 mark_target_uptodate(sh, sh->ops.target2);
862
863 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
864 if (sh->check_state == check_state_compute_run)
865 sh->check_state = check_state_compute_result;
866 set_bit(STRIPE_HANDLE, &sh->state);
867 release_stripe(sh);
868 }
869
870 /* return a pointer to the address conversion region of the scribble buffer */
871 static addr_conv_t *to_addr_conv(struct stripe_head *sh,
872 struct raid5_percpu *percpu)
873 {
874 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
875 }
876
877 static struct dma_async_tx_descriptor *
878 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
879 {
880 int disks = sh->disks;
881 struct page **xor_srcs = percpu->scribble;
882 int target = sh->ops.target;
883 struct r5dev *tgt = &sh->dev[target];
884 struct page *xor_dest = tgt->page;
885 int count = 0;
886 struct dma_async_tx_descriptor *tx;
887 struct async_submit_ctl submit;
888 int i;
889
890 pr_debug("%s: stripe %llu block: %d\n",
891 __func__, (unsigned long long)sh->sector, target);
892 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
893
894 for (i = disks; i--; )
895 if (i != target)
896 xor_srcs[count++] = sh->dev[i].page;
897
898 atomic_inc(&sh->count);
899
900 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
901 ops_complete_compute, sh, to_addr_conv(sh, percpu));
902 if (unlikely(count == 1))
903 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
904 else
905 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
906
907 return tx;
908 }
909
910 /* set_syndrome_sources - populate source buffers for gen_syndrome
911 * @srcs - (struct page *) array of size sh->disks
912 * @sh - stripe_head to parse
913 *
914 * Populates srcs in proper layout order for the stripe and returns the
915 * 'count' of sources to be used in a call to async_gen_syndrome. The P
916 * destination buffer is recorded in srcs[count] and the Q destination
917 * is recorded in srcs[count+1]].
918 */
919 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
920 {
921 int disks = sh->disks;
922 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
923 int d0_idx = raid6_d0(sh);
924 int count;
925 int i;
926
927 for (i = 0; i < disks; i++)
928 srcs[i] = NULL;
929
930 count = 0;
931 i = d0_idx;
932 do {
933 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
934
935 srcs[slot] = sh->dev[i].page;
936 i = raid6_next_disk(i, disks);
937 } while (i != d0_idx);
938
939 return syndrome_disks;
940 }
941
942 static struct dma_async_tx_descriptor *
943 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
944 {
945 int disks = sh->disks;
946 struct page **blocks = percpu->scribble;
947 int target;
948 int qd_idx = sh->qd_idx;
949 struct dma_async_tx_descriptor *tx;
950 struct async_submit_ctl submit;
951 struct r5dev *tgt;
952 struct page *dest;
953 int i;
954 int count;
955
956 if (sh->ops.target < 0)
957 target = sh->ops.target2;
958 else if (sh->ops.target2 < 0)
959 target = sh->ops.target;
960 else
961 /* we should only have one valid target */
962 BUG();
963 BUG_ON(target < 0);
964 pr_debug("%s: stripe %llu block: %d\n",
965 __func__, (unsigned long long)sh->sector, target);
966
967 tgt = &sh->dev[target];
968 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
969 dest = tgt->page;
970
971 atomic_inc(&sh->count);
972
973 if (target == qd_idx) {
974 count = set_syndrome_sources(blocks, sh);
975 blocks[count] = NULL; /* regenerating p is not necessary */
976 BUG_ON(blocks[count+1] != dest); /* q should already be set */
977 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
978 ops_complete_compute, sh,
979 to_addr_conv(sh, percpu));
980 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
981 } else {
982 /* Compute any data- or p-drive using XOR */
983 count = 0;
984 for (i = disks; i-- ; ) {
985 if (i == target || i == qd_idx)
986 continue;
987 blocks[count++] = sh->dev[i].page;
988 }
989
990 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
991 NULL, ops_complete_compute, sh,
992 to_addr_conv(sh, percpu));
993 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
994 }
995
996 return tx;
997 }
998
999 static struct dma_async_tx_descriptor *
1000 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1001 {
1002 int i, count, disks = sh->disks;
1003 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1004 int d0_idx = raid6_d0(sh);
1005 int faila = -1, failb = -1;
1006 int target = sh->ops.target;
1007 int target2 = sh->ops.target2;
1008 struct r5dev *tgt = &sh->dev[target];
1009 struct r5dev *tgt2 = &sh->dev[target2];
1010 struct dma_async_tx_descriptor *tx;
1011 struct page **blocks = percpu->scribble;
1012 struct async_submit_ctl submit;
1013
1014 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1015 __func__, (unsigned long long)sh->sector, target, target2);
1016 BUG_ON(target < 0 || target2 < 0);
1017 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1018 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1019
1020 /* we need to open-code set_syndrome_sources to handle the
1021 * slot number conversion for 'faila' and 'failb'
1022 */
1023 for (i = 0; i < disks ; i++)
1024 blocks[i] = NULL;
1025 count = 0;
1026 i = d0_idx;
1027 do {
1028 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1029
1030 blocks[slot] = sh->dev[i].page;
1031
1032 if (i == target)
1033 faila = slot;
1034 if (i == target2)
1035 failb = slot;
1036 i = raid6_next_disk(i, disks);
1037 } while (i != d0_idx);
1038
1039 BUG_ON(faila == failb);
1040 if (failb < faila)
1041 swap(faila, failb);
1042 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1043 __func__, (unsigned long long)sh->sector, faila, failb);
1044
1045 atomic_inc(&sh->count);
1046
1047 if (failb == syndrome_disks+1) {
1048 /* Q disk is one of the missing disks */
1049 if (faila == syndrome_disks) {
1050 /* Missing P+Q, just recompute */
1051 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1052 ops_complete_compute, sh,
1053 to_addr_conv(sh, percpu));
1054 return async_gen_syndrome(blocks, 0, syndrome_disks+2,
1055 STRIPE_SIZE, &submit);
1056 } else {
1057 struct page *dest;
1058 int data_target;
1059 int qd_idx = sh->qd_idx;
1060
1061 /* Missing D+Q: recompute D from P, then recompute Q */
1062 if (target == qd_idx)
1063 data_target = target2;
1064 else
1065 data_target = target;
1066
1067 count = 0;
1068 for (i = disks; i-- ; ) {
1069 if (i == data_target || i == qd_idx)
1070 continue;
1071 blocks[count++] = sh->dev[i].page;
1072 }
1073 dest = sh->dev[data_target].page;
1074 init_async_submit(&submit,
1075 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1076 NULL, NULL, NULL,
1077 to_addr_conv(sh, percpu));
1078 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
1079 &submit);
1080
1081 count = set_syndrome_sources(blocks, sh);
1082 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1083 ops_complete_compute, sh,
1084 to_addr_conv(sh, percpu));
1085 return async_gen_syndrome(blocks, 0, count+2,
1086 STRIPE_SIZE, &submit);
1087 }
1088 } else {
1089 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1090 ops_complete_compute, sh,
1091 to_addr_conv(sh, percpu));
1092 if (failb == syndrome_disks) {
1093 /* We're missing D+P. */
1094 return async_raid6_datap_recov(syndrome_disks+2,
1095 STRIPE_SIZE, faila,
1096 blocks, &submit);
1097 } else {
1098 /* We're missing D+D. */
1099 return async_raid6_2data_recov(syndrome_disks+2,
1100 STRIPE_SIZE, faila, failb,
1101 blocks, &submit);
1102 }
1103 }
1104 }
1105
1106
1107 static void ops_complete_prexor(void *stripe_head_ref)
1108 {
1109 struct stripe_head *sh = stripe_head_ref;
1110
1111 pr_debug("%s: stripe %llu\n", __func__,
1112 (unsigned long long)sh->sector);
1113 }
1114
1115 static struct dma_async_tx_descriptor *
1116 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1117 struct dma_async_tx_descriptor *tx)
1118 {
1119 int disks = sh->disks;
1120 struct page **xor_srcs = percpu->scribble;
1121 int count = 0, pd_idx = sh->pd_idx, i;
1122 struct async_submit_ctl submit;
1123
1124 /* existing parity data subtracted */
1125 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1126
1127 pr_debug("%s: stripe %llu\n", __func__,
1128 (unsigned long long)sh->sector);
1129
1130 for (i = disks; i--; ) {
1131 struct r5dev *dev = &sh->dev[i];
1132 /* Only process blocks that are known to be uptodate */
1133 if (test_bit(R5_Wantdrain, &dev->flags))
1134 xor_srcs[count++] = dev->page;
1135 }
1136
1137 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1138 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
1139 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1140
1141 return tx;
1142 }
1143
1144 static struct dma_async_tx_descriptor *
1145 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1146 {
1147 int disks = sh->disks;
1148 int i;
1149
1150 pr_debug("%s: stripe %llu\n", __func__,
1151 (unsigned long long)sh->sector);
1152
1153 for (i = disks; i--; ) {
1154 struct r5dev *dev = &sh->dev[i];
1155 struct bio *chosen;
1156
1157 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1158 struct bio *wbi;
1159
1160 spin_lock_irq(&sh->stripe_lock);
1161 chosen = dev->towrite;
1162 dev->towrite = NULL;
1163 BUG_ON(dev->written);
1164 wbi = dev->written = chosen;
1165 spin_unlock_irq(&sh->stripe_lock);
1166
1167 while (wbi && wbi->bi_sector <
1168 dev->sector + STRIPE_SECTORS) {
1169 if (wbi->bi_rw & REQ_FUA)
1170 set_bit(R5_WantFUA, &dev->flags);
1171 if (wbi->bi_rw & REQ_SYNC)
1172 set_bit(R5_SyncIO, &dev->flags);
1173 tx = async_copy_data(1, wbi, dev->page,
1174 dev->sector, tx);
1175 wbi = r5_next_bio(wbi, dev->sector);
1176 }
1177 }
1178 }
1179
1180 return tx;
1181 }
1182
1183 static void ops_complete_reconstruct(void *stripe_head_ref)
1184 {
1185 struct stripe_head *sh = stripe_head_ref;
1186 int disks = sh->disks;
1187 int pd_idx = sh->pd_idx;
1188 int qd_idx = sh->qd_idx;
1189 int i;
1190 bool fua = false, sync = false;
1191
1192 pr_debug("%s: stripe %llu\n", __func__,
1193 (unsigned long long)sh->sector);
1194
1195 for (i = disks; i--; ) {
1196 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1197 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1198 }
1199
1200 for (i = disks; i--; ) {
1201 struct r5dev *dev = &sh->dev[i];
1202
1203 if (dev->written || i == pd_idx || i == qd_idx) {
1204 set_bit(R5_UPTODATE, &dev->flags);
1205 if (fua)
1206 set_bit(R5_WantFUA, &dev->flags);
1207 if (sync)
1208 set_bit(R5_SyncIO, &dev->flags);
1209 }
1210 }
1211
1212 if (sh->reconstruct_state == reconstruct_state_drain_run)
1213 sh->reconstruct_state = reconstruct_state_drain_result;
1214 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1215 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1216 else {
1217 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1218 sh->reconstruct_state = reconstruct_state_result;
1219 }
1220
1221 set_bit(STRIPE_HANDLE, &sh->state);
1222 release_stripe(sh);
1223 }
1224
1225 static void
1226 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1227 struct dma_async_tx_descriptor *tx)
1228 {
1229 int disks = sh->disks;
1230 struct page **xor_srcs = percpu->scribble;
1231 struct async_submit_ctl submit;
1232 int count = 0, pd_idx = sh->pd_idx, i;
1233 struct page *xor_dest;
1234 int prexor = 0;
1235 unsigned long flags;
1236
1237 pr_debug("%s: stripe %llu\n", __func__,
1238 (unsigned long long)sh->sector);
1239
1240 /* check if prexor is active which means only process blocks
1241 * that are part of a read-modify-write (written)
1242 */
1243 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1244 prexor = 1;
1245 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1246 for (i = disks; i--; ) {
1247 struct r5dev *dev = &sh->dev[i];
1248 if (dev->written)
1249 xor_srcs[count++] = dev->page;
1250 }
1251 } else {
1252 xor_dest = sh->dev[pd_idx].page;
1253 for (i = disks; i--; ) {
1254 struct r5dev *dev = &sh->dev[i];
1255 if (i != pd_idx)
1256 xor_srcs[count++] = dev->page;
1257 }
1258 }
1259
1260 /* 1/ if we prexor'd then the dest is reused as a source
1261 * 2/ if we did not prexor then we are redoing the parity
1262 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1263 * for the synchronous xor case
1264 */
1265 flags = ASYNC_TX_ACK |
1266 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1267
1268 atomic_inc(&sh->count);
1269
1270 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1271 to_addr_conv(sh, percpu));
1272 if (unlikely(count == 1))
1273 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1274 else
1275 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1276 }
1277
1278 static void
1279 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1280 struct dma_async_tx_descriptor *tx)
1281 {
1282 struct async_submit_ctl submit;
1283 struct page **blocks = percpu->scribble;
1284 int count;
1285
1286 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1287
1288 count = set_syndrome_sources(blocks, sh);
1289
1290 atomic_inc(&sh->count);
1291
1292 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1293 sh, to_addr_conv(sh, percpu));
1294 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1295 }
1296
1297 static void ops_complete_check(void *stripe_head_ref)
1298 {
1299 struct stripe_head *sh = stripe_head_ref;
1300
1301 pr_debug("%s: stripe %llu\n", __func__,
1302 (unsigned long long)sh->sector);
1303
1304 sh->check_state = check_state_check_result;
1305 set_bit(STRIPE_HANDLE, &sh->state);
1306 release_stripe(sh);
1307 }
1308
1309 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1310 {
1311 int disks = sh->disks;
1312 int pd_idx = sh->pd_idx;
1313 int qd_idx = sh->qd_idx;
1314 struct page *xor_dest;
1315 struct page **xor_srcs = percpu->scribble;
1316 struct dma_async_tx_descriptor *tx;
1317 struct async_submit_ctl submit;
1318 int count;
1319 int i;
1320
1321 pr_debug("%s: stripe %llu\n", __func__,
1322 (unsigned long long)sh->sector);
1323
1324 count = 0;
1325 xor_dest = sh->dev[pd_idx].page;
1326 xor_srcs[count++] = xor_dest;
1327 for (i = disks; i--; ) {
1328 if (i == pd_idx || i == qd_idx)
1329 continue;
1330 xor_srcs[count++] = sh->dev[i].page;
1331 }
1332
1333 init_async_submit(&submit, 0, NULL, NULL, NULL,
1334 to_addr_conv(sh, percpu));
1335 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1336 &sh->ops.zero_sum_result, &submit);
1337
1338 atomic_inc(&sh->count);
1339 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1340 tx = async_trigger_callback(&submit);
1341 }
1342
1343 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1344 {
1345 struct page **srcs = percpu->scribble;
1346 struct async_submit_ctl submit;
1347 int count;
1348
1349 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1350 (unsigned long long)sh->sector, checkp);
1351
1352 count = set_syndrome_sources(srcs, sh);
1353 if (!checkp)
1354 srcs[count] = NULL;
1355
1356 atomic_inc(&sh->count);
1357 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1358 sh, to_addr_conv(sh, percpu));
1359 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1360 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1361 }
1362
1363 static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1364 {
1365 int overlap_clear = 0, i, disks = sh->disks;
1366 struct dma_async_tx_descriptor *tx = NULL;
1367 struct r5conf *conf = sh->raid_conf;
1368 int level = conf->level;
1369 struct raid5_percpu *percpu;
1370 unsigned long cpu;
1371
1372 cpu = get_cpu();
1373 percpu = per_cpu_ptr(conf->percpu, cpu);
1374 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1375 ops_run_biofill(sh);
1376 overlap_clear++;
1377 }
1378
1379 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1380 if (level < 6)
1381 tx = ops_run_compute5(sh, percpu);
1382 else {
1383 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1384 tx = ops_run_compute6_1(sh, percpu);
1385 else
1386 tx = ops_run_compute6_2(sh, percpu);
1387 }
1388 /* terminate the chain if reconstruct is not set to be run */
1389 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1390 async_tx_ack(tx);
1391 }
1392
1393 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1394 tx = ops_run_prexor(sh, percpu, tx);
1395
1396 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1397 tx = ops_run_biodrain(sh, tx);
1398 overlap_clear++;
1399 }
1400
1401 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1402 if (level < 6)
1403 ops_run_reconstruct5(sh, percpu, tx);
1404 else
1405 ops_run_reconstruct6(sh, percpu, tx);
1406 }
1407
1408 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1409 if (sh->check_state == check_state_run)
1410 ops_run_check_p(sh, percpu);
1411 else if (sh->check_state == check_state_run_q)
1412 ops_run_check_pq(sh, percpu, 0);
1413 else if (sh->check_state == check_state_run_pq)
1414 ops_run_check_pq(sh, percpu, 1);
1415 else
1416 BUG();
1417 }
1418
1419 if (overlap_clear)
1420 for (i = disks; i--; ) {
1421 struct r5dev *dev = &sh->dev[i];
1422 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1423 wake_up(&sh->raid_conf->wait_for_overlap);
1424 }
1425 put_cpu();
1426 }
1427
1428 #ifdef CONFIG_MULTICORE_RAID456
1429 static void async_run_ops(void *param, async_cookie_t cookie)
1430 {
1431 struct stripe_head *sh = param;
1432 unsigned long ops_request = sh->ops.request;
1433
1434 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1435 wake_up(&sh->ops.wait_for_ops);
1436
1437 __raid_run_ops(sh, ops_request);
1438 release_stripe(sh);
1439 }
1440
1441 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1442 {
1443 /* since handle_stripe can be called outside of raid5d context
1444 * we need to ensure sh->ops.request is de-staged before another
1445 * request arrives
1446 */
1447 wait_event(sh->ops.wait_for_ops,
1448 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1449 sh->ops.request = ops_request;
1450
1451 atomic_inc(&sh->count);
1452 async_schedule(async_run_ops, sh);
1453 }
1454 #else
1455 #define raid_run_ops __raid_run_ops
1456 #endif
1457
1458 static int grow_one_stripe(struct r5conf *conf)
1459 {
1460 struct stripe_head *sh;
1461 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1462 if (!sh)
1463 return 0;
1464
1465 sh->raid_conf = conf;
1466 #ifdef CONFIG_MULTICORE_RAID456
1467 init_waitqueue_head(&sh->ops.wait_for_ops);
1468 #endif
1469
1470 spin_lock_init(&sh->stripe_lock);
1471
1472 if (grow_buffers(sh)) {
1473 shrink_buffers(sh);
1474 kmem_cache_free(conf->slab_cache, sh);
1475 return 0;
1476 }
1477 /* we just created an active stripe so... */
1478 atomic_set(&sh->count, 1);
1479 atomic_inc(&conf->active_stripes);
1480 INIT_LIST_HEAD(&sh->lru);
1481 release_stripe(sh);
1482 return 1;
1483 }
1484
1485 static int grow_stripes(struct r5conf *conf, int num)
1486 {
1487 struct kmem_cache *sc;
1488 int devs = max(conf->raid_disks, conf->previous_raid_disks);
1489
1490 if (conf->mddev->gendisk)
1491 sprintf(conf->cache_name[0],
1492 "raid%d-%s", conf->level, mdname(conf->mddev));
1493 else
1494 sprintf(conf->cache_name[0],
1495 "raid%d-%p", conf->level, conf->mddev);
1496 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1497
1498 conf->active_name = 0;
1499 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1500 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1501 0, 0, NULL);
1502 if (!sc)
1503 return 1;
1504 conf->slab_cache = sc;
1505 conf->pool_size = devs;
1506 while (num--)
1507 if (!grow_one_stripe(conf))
1508 return 1;
1509 return 0;
1510 }
1511
1512 /**
1513 * scribble_len - return the required size of the scribble region
1514 * @num - total number of disks in the array
1515 *
1516 * The size must be enough to contain:
1517 * 1/ a struct page pointer for each device in the array +2
1518 * 2/ room to convert each entry in (1) to its corresponding dma
1519 * (dma_map_page()) or page (page_address()) address.
1520 *
1521 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1522 * calculate over all devices (not just the data blocks), using zeros in place
1523 * of the P and Q blocks.
1524 */
1525 static size_t scribble_len(int num)
1526 {
1527 size_t len;
1528
1529 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1530
1531 return len;
1532 }
1533
1534 static int resize_stripes(struct r5conf *conf, int newsize)
1535 {
1536 /* Make all the stripes able to hold 'newsize' devices.
1537 * New slots in each stripe get 'page' set to a new page.
1538 *
1539 * This happens in stages:
1540 * 1/ create a new kmem_cache and allocate the required number of
1541 * stripe_heads.
1542 * 2/ gather all the old stripe_heads and tranfer the pages across
1543 * to the new stripe_heads. This will have the side effect of
1544 * freezing the array as once all stripe_heads have been collected,
1545 * no IO will be possible. Old stripe heads are freed once their
1546 * pages have been transferred over, and the old kmem_cache is
1547 * freed when all stripes are done.
1548 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1549 * we simple return a failre status - no need to clean anything up.
1550 * 4/ allocate new pages for the new slots in the new stripe_heads.
1551 * If this fails, we don't bother trying the shrink the
1552 * stripe_heads down again, we just leave them as they are.
1553 * As each stripe_head is processed the new one is released into
1554 * active service.
1555 *
1556 * Once step2 is started, we cannot afford to wait for a write,
1557 * so we use GFP_NOIO allocations.
1558 */
1559 struct stripe_head *osh, *nsh;
1560 LIST_HEAD(newstripes);
1561 struct disk_info *ndisks;
1562 unsigned long cpu;
1563 int err;
1564 struct kmem_cache *sc;
1565 int i;
1566
1567 if (newsize <= conf->pool_size)
1568 return 0; /* never bother to shrink */
1569
1570 err = md_allow_write(conf->mddev);
1571 if (err)
1572 return err;
1573
1574 /* Step 1 */
1575 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1576 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1577 0, 0, NULL);
1578 if (!sc)
1579 return -ENOMEM;
1580
1581 for (i = conf->max_nr_stripes; i; i--) {
1582 nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1583 if (!nsh)
1584 break;
1585
1586 nsh->raid_conf = conf;
1587 #ifdef CONFIG_MULTICORE_RAID456
1588 init_waitqueue_head(&nsh->ops.wait_for_ops);
1589 #endif
1590
1591 list_add(&nsh->lru, &newstripes);
1592 }
1593 if (i) {
1594 /* didn't get enough, give up */
1595 while (!list_empty(&newstripes)) {
1596 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1597 list_del(&nsh->lru);
1598 kmem_cache_free(sc, nsh);
1599 }
1600 kmem_cache_destroy(sc);
1601 return -ENOMEM;
1602 }
1603 /* Step 2 - Must use GFP_NOIO now.
1604 * OK, we have enough stripes, start collecting inactive
1605 * stripes and copying them over
1606 */
1607 list_for_each_entry(nsh, &newstripes, lru) {
1608 spin_lock_irq(&conf->device_lock);
1609 wait_event_lock_irq(conf->wait_for_stripe,
1610 !list_empty(&conf->inactive_list),
1611 conf->device_lock,
1612 );
1613 osh = get_free_stripe(conf);
1614 spin_unlock_irq(&conf->device_lock);
1615 atomic_set(&nsh->count, 1);
1616 for(i=0; i<conf->pool_size; i++)
1617 nsh->dev[i].page = osh->dev[i].page;
1618 for( ; i<newsize; i++)
1619 nsh->dev[i].page = NULL;
1620 kmem_cache_free(conf->slab_cache, osh);
1621 }
1622 kmem_cache_destroy(conf->slab_cache);
1623
1624 /* Step 3.
1625 * At this point, we are holding all the stripes so the array
1626 * is completely stalled, so now is a good time to resize
1627 * conf->disks and the scribble region
1628 */
1629 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1630 if (ndisks) {
1631 for (i=0; i<conf->raid_disks; i++)
1632 ndisks[i] = conf->disks[i];
1633 kfree(conf->disks);
1634 conf->disks = ndisks;
1635 } else
1636 err = -ENOMEM;
1637
1638 get_online_cpus();
1639 conf->scribble_len = scribble_len(newsize);
1640 for_each_present_cpu(cpu) {
1641 struct raid5_percpu *percpu;
1642 void *scribble;
1643
1644 percpu = per_cpu_ptr(conf->percpu, cpu);
1645 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1646
1647 if (scribble) {
1648 kfree(percpu->scribble);
1649 percpu->scribble = scribble;
1650 } else {
1651 err = -ENOMEM;
1652 break;
1653 }
1654 }
1655 put_online_cpus();
1656
1657 /* Step 4, return new stripes to service */
1658 while(!list_empty(&newstripes)) {
1659 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1660 list_del_init(&nsh->lru);
1661
1662 for (i=conf->raid_disks; i < newsize; i++)
1663 if (nsh->dev[i].page == NULL) {
1664 struct page *p = alloc_page(GFP_NOIO);
1665 nsh->dev[i].page = p;
1666 if (!p)
1667 err = -ENOMEM;
1668 }
1669 release_stripe(nsh);
1670 }
1671 /* critical section pass, GFP_NOIO no longer needed */
1672
1673 conf->slab_cache = sc;
1674 conf->active_name = 1-conf->active_name;
1675 conf->pool_size = newsize;
1676 return err;
1677 }
1678
1679 static int drop_one_stripe(struct r5conf *conf)
1680 {
1681 struct stripe_head *sh;
1682
1683 spin_lock_irq(&conf->device_lock);
1684 sh = get_free_stripe(conf);
1685 spin_unlock_irq(&conf->device_lock);
1686 if (!sh)
1687 return 0;
1688 BUG_ON(atomic_read(&sh->count));
1689 shrink_buffers(sh);
1690 kmem_cache_free(conf->slab_cache, sh);
1691 atomic_dec(&conf->active_stripes);
1692 return 1;
1693 }
1694
1695 static void shrink_stripes(struct r5conf *conf)
1696 {
1697 while (drop_one_stripe(conf))
1698 ;
1699
1700 if (conf->slab_cache)
1701 kmem_cache_destroy(conf->slab_cache);
1702 conf->slab_cache = NULL;
1703 }
1704
1705 static void raid5_end_read_request(struct bio * bi, int error)
1706 {
1707 struct stripe_head *sh = bi->bi_private;
1708 struct r5conf *conf = sh->raid_conf;
1709 int disks = sh->disks, i;
1710 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1711 char b[BDEVNAME_SIZE];
1712 struct md_rdev *rdev = NULL;
1713 sector_t s;
1714
1715 for (i=0 ; i<disks; i++)
1716 if (bi == &sh->dev[i].req)
1717 break;
1718
1719 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1720 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1721 uptodate);
1722 if (i == disks) {
1723 BUG();
1724 return;
1725 }
1726 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
1727 /* If replacement finished while this request was outstanding,
1728 * 'replacement' might be NULL already.
1729 * In that case it moved down to 'rdev'.
1730 * rdev is not removed until all requests are finished.
1731 */
1732 rdev = conf->disks[i].replacement;
1733 if (!rdev)
1734 rdev = conf->disks[i].rdev;
1735
1736 if (use_new_offset(conf, sh))
1737 s = sh->sector + rdev->new_data_offset;
1738 else
1739 s = sh->sector + rdev->data_offset;
1740 if (uptodate) {
1741 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1742 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1743 /* Note that this cannot happen on a
1744 * replacement device. We just fail those on
1745 * any error
1746 */
1747 printk_ratelimited(
1748 KERN_INFO
1749 "md/raid:%s: read error corrected"
1750 " (%lu sectors at %llu on %s)\n",
1751 mdname(conf->mddev), STRIPE_SECTORS,
1752 (unsigned long long)s,
1753 bdevname(rdev->bdev, b));
1754 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1755 clear_bit(R5_ReadError, &sh->dev[i].flags);
1756 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1757 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
1758 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
1759
1760 if (atomic_read(&rdev->read_errors))
1761 atomic_set(&rdev->read_errors, 0);
1762 } else {
1763 const char *bdn = bdevname(rdev->bdev, b);
1764 int retry = 0;
1765 int set_bad = 0;
1766
1767 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1768 atomic_inc(&rdev->read_errors);
1769 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
1770 printk_ratelimited(
1771 KERN_WARNING
1772 "md/raid:%s: read error on replacement device "
1773 "(sector %llu on %s).\n",
1774 mdname(conf->mddev),
1775 (unsigned long long)s,
1776 bdn);
1777 else if (conf->mddev->degraded >= conf->max_degraded) {
1778 set_bad = 1;
1779 printk_ratelimited(
1780 KERN_WARNING
1781 "md/raid:%s: read error not correctable "
1782 "(sector %llu on %s).\n",
1783 mdname(conf->mddev),
1784 (unsigned long long)s,
1785 bdn);
1786 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
1787 /* Oh, no!!! */
1788 set_bad = 1;
1789 printk_ratelimited(
1790 KERN_WARNING
1791 "md/raid:%s: read error NOT corrected!! "
1792 "(sector %llu on %s).\n",
1793 mdname(conf->mddev),
1794 (unsigned long long)s,
1795 bdn);
1796 } else if (atomic_read(&rdev->read_errors)
1797 > conf->max_nr_stripes)
1798 printk(KERN_WARNING
1799 "md/raid:%s: Too many read errors, failing device %s.\n",
1800 mdname(conf->mddev), bdn);
1801 else
1802 retry = 1;
1803 if (retry)
1804 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
1805 set_bit(R5_ReadError, &sh->dev[i].flags);
1806 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
1807 } else
1808 set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
1809 else {
1810 clear_bit(R5_ReadError, &sh->dev[i].flags);
1811 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1812 if (!(set_bad
1813 && test_bit(In_sync, &rdev->flags)
1814 && rdev_set_badblocks(
1815 rdev, sh->sector, STRIPE_SECTORS, 0)))
1816 md_error(conf->mddev, rdev);
1817 }
1818 }
1819 rdev_dec_pending(rdev, conf->mddev);
1820 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1821 set_bit(STRIPE_HANDLE, &sh->state);
1822 release_stripe(sh);
1823 }
1824
1825 static void raid5_end_write_request(struct bio *bi, int error)
1826 {
1827 struct stripe_head *sh = bi->bi_private;
1828 struct r5conf *conf = sh->raid_conf;
1829 int disks = sh->disks, i;
1830 struct md_rdev *uninitialized_var(rdev);
1831 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1832 sector_t first_bad;
1833 int bad_sectors;
1834 int replacement = 0;
1835
1836 for (i = 0 ; i < disks; i++) {
1837 if (bi == &sh->dev[i].req) {
1838 rdev = conf->disks[i].rdev;
1839 break;
1840 }
1841 if (bi == &sh->dev[i].rreq) {
1842 rdev = conf->disks[i].replacement;
1843 if (rdev)
1844 replacement = 1;
1845 else
1846 /* rdev was removed and 'replacement'
1847 * replaced it. rdev is not removed
1848 * until all requests are finished.
1849 */
1850 rdev = conf->disks[i].rdev;
1851 break;
1852 }
1853 }
1854 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1855 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1856 uptodate);
1857 if (i == disks) {
1858 BUG();
1859 return;
1860 }
1861
1862 if (replacement) {
1863 if (!uptodate)
1864 md_error(conf->mddev, rdev);
1865 else if (is_badblock(rdev, sh->sector,
1866 STRIPE_SECTORS,
1867 &first_bad, &bad_sectors))
1868 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
1869 } else {
1870 if (!uptodate) {
1871 set_bit(WriteErrorSeen, &rdev->flags);
1872 set_bit(R5_WriteError, &sh->dev[i].flags);
1873 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1874 set_bit(MD_RECOVERY_NEEDED,
1875 &rdev->mddev->recovery);
1876 } else if (is_badblock(rdev, sh->sector,
1877 STRIPE_SECTORS,
1878 &first_bad, &bad_sectors))
1879 set_bit(R5_MadeGood, &sh->dev[i].flags);
1880 }
1881 rdev_dec_pending(rdev, conf->mddev);
1882
1883 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
1884 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1885 set_bit(STRIPE_HANDLE, &sh->state);
1886 release_stripe(sh);
1887 }
1888
1889 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1890
1891 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1892 {
1893 struct r5dev *dev = &sh->dev[i];
1894
1895 bio_init(&dev->req);
1896 dev->req.bi_io_vec = &dev->vec;
1897 dev->req.bi_vcnt++;
1898 dev->req.bi_max_vecs++;
1899 dev->req.bi_private = sh;
1900 dev->vec.bv_page = dev->page;
1901
1902 bio_init(&dev->rreq);
1903 dev->rreq.bi_io_vec = &dev->rvec;
1904 dev->rreq.bi_vcnt++;
1905 dev->rreq.bi_max_vecs++;
1906 dev->rreq.bi_private = sh;
1907 dev->rvec.bv_page = dev->page;
1908
1909 dev->flags = 0;
1910 dev->sector = compute_blocknr(sh, i, previous);
1911 }
1912
1913 static void error(struct mddev *mddev, struct md_rdev *rdev)
1914 {
1915 char b[BDEVNAME_SIZE];
1916 struct r5conf *conf = mddev->private;
1917 unsigned long flags;
1918 pr_debug("raid456: error called\n");
1919
1920 spin_lock_irqsave(&conf->device_lock, flags);
1921 clear_bit(In_sync, &rdev->flags);
1922 mddev->degraded = calc_degraded(conf);
1923 spin_unlock_irqrestore(&conf->device_lock, flags);
1924 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1925
1926 set_bit(Blocked, &rdev->flags);
1927 set_bit(Faulty, &rdev->flags);
1928 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1929 printk(KERN_ALERT
1930 "md/raid:%s: Disk failure on %s, disabling device.\n"
1931 "md/raid:%s: Operation continuing on %d devices.\n",
1932 mdname(mddev),
1933 bdevname(rdev->bdev, b),
1934 mdname(mddev),
1935 conf->raid_disks - mddev->degraded);
1936 }
1937
1938 /*
1939 * Input: a 'big' sector number,
1940 * Output: index of the data and parity disk, and the sector # in them.
1941 */
1942 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
1943 int previous, int *dd_idx,
1944 struct stripe_head *sh)
1945 {
1946 sector_t stripe, stripe2;
1947 sector_t chunk_number;
1948 unsigned int chunk_offset;
1949 int pd_idx, qd_idx;
1950 int ddf_layout = 0;
1951 sector_t new_sector;
1952 int algorithm = previous ? conf->prev_algo
1953 : conf->algorithm;
1954 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1955 : conf->chunk_sectors;
1956 int raid_disks = previous ? conf->previous_raid_disks
1957 : conf->raid_disks;
1958 int data_disks = raid_disks - conf->max_degraded;
1959
1960 /* First compute the information on this sector */
1961
1962 /*
1963 * Compute the chunk number and the sector offset inside the chunk
1964 */
1965 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1966 chunk_number = r_sector;
1967
1968 /*
1969 * Compute the stripe number
1970 */
1971 stripe = chunk_number;
1972 *dd_idx = sector_div(stripe, data_disks);
1973 stripe2 = stripe;
1974 /*
1975 * Select the parity disk based on the user selected algorithm.
1976 */
1977 pd_idx = qd_idx = -1;
1978 switch(conf->level) {
1979 case 4:
1980 pd_idx = data_disks;
1981 break;
1982 case 5:
1983 switch (algorithm) {
1984 case ALGORITHM_LEFT_ASYMMETRIC:
1985 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1986 if (*dd_idx >= pd_idx)
1987 (*dd_idx)++;
1988 break;
1989 case ALGORITHM_RIGHT_ASYMMETRIC:
1990 pd_idx = sector_div(stripe2, raid_disks);
1991 if (*dd_idx >= pd_idx)
1992 (*dd_idx)++;
1993 break;
1994 case ALGORITHM_LEFT_SYMMETRIC:
1995 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1996 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1997 break;
1998 case ALGORITHM_RIGHT_SYMMETRIC:
1999 pd_idx = sector_div(stripe2, raid_disks);
2000 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2001 break;
2002 case ALGORITHM_PARITY_0:
2003 pd_idx = 0;
2004 (*dd_idx)++;
2005 break;
2006 case ALGORITHM_PARITY_N:
2007 pd_idx = data_disks;
2008 break;
2009 default:
2010 BUG();
2011 }
2012 break;
2013 case 6:
2014
2015 switch (algorithm) {
2016 case ALGORITHM_LEFT_ASYMMETRIC:
2017 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2018 qd_idx = pd_idx + 1;
2019 if (pd_idx == raid_disks-1) {
2020 (*dd_idx)++; /* Q D D D P */
2021 qd_idx = 0;
2022 } else if (*dd_idx >= pd_idx)
2023 (*dd_idx) += 2; /* D D P Q D */
2024 break;
2025 case ALGORITHM_RIGHT_ASYMMETRIC:
2026 pd_idx = sector_div(stripe2, raid_disks);
2027 qd_idx = pd_idx + 1;
2028 if (pd_idx == raid_disks-1) {
2029 (*dd_idx)++; /* Q D D D P */
2030 qd_idx = 0;
2031 } else if (*dd_idx >= pd_idx)
2032 (*dd_idx) += 2; /* D D P Q D */
2033 break;
2034 case ALGORITHM_LEFT_SYMMETRIC:
2035 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2036 qd_idx = (pd_idx + 1) % raid_disks;
2037 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2038 break;
2039 case ALGORITHM_RIGHT_SYMMETRIC:
2040 pd_idx = sector_div(stripe2, raid_disks);
2041 qd_idx = (pd_idx + 1) % raid_disks;
2042 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2043 break;
2044
2045 case ALGORITHM_PARITY_0:
2046 pd_idx = 0;
2047 qd_idx = 1;
2048 (*dd_idx) += 2;
2049 break;
2050 case ALGORITHM_PARITY_N:
2051 pd_idx = data_disks;
2052 qd_idx = data_disks + 1;
2053 break;
2054
2055 case ALGORITHM_ROTATING_ZERO_RESTART:
2056 /* Exactly the same as RIGHT_ASYMMETRIC, but or
2057 * of blocks for computing Q is different.
2058 */
2059 pd_idx = sector_div(stripe2, raid_disks);
2060 qd_idx = pd_idx + 1;
2061 if (pd_idx == raid_disks-1) {
2062 (*dd_idx)++; /* Q D D D P */
2063 qd_idx = 0;
2064 } else if (*dd_idx >= pd_idx)
2065 (*dd_idx) += 2; /* D D P Q D */
2066 ddf_layout = 1;
2067 break;
2068
2069 case ALGORITHM_ROTATING_N_RESTART:
2070 /* Same a left_asymmetric, by first stripe is
2071 * D D D P Q rather than
2072 * Q D D D P
2073 */
2074 stripe2 += 1;
2075 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2076 qd_idx = pd_idx + 1;
2077 if (pd_idx == raid_disks-1) {
2078 (*dd_idx)++; /* Q D D D P */
2079 qd_idx = 0;
2080 } else if (*dd_idx >= pd_idx)
2081 (*dd_idx) += 2; /* D D P Q D */
2082 ddf_layout = 1;
2083 break;
2084
2085 case ALGORITHM_ROTATING_N_CONTINUE:
2086 /* Same as left_symmetric but Q is before P */
2087 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2088 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
2089 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2090 ddf_layout = 1;
2091 break;
2092
2093 case ALGORITHM_LEFT_ASYMMETRIC_6:
2094 /* RAID5 left_asymmetric, with Q on last device */
2095 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2096 if (*dd_idx >= pd_idx)
2097 (*dd_idx)++;
2098 qd_idx = raid_disks - 1;
2099 break;
2100
2101 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2102 pd_idx = sector_div(stripe2, raid_disks-1);
2103 if (*dd_idx >= pd_idx)
2104 (*dd_idx)++;
2105 qd_idx = raid_disks - 1;
2106 break;
2107
2108 case ALGORITHM_LEFT_SYMMETRIC_6:
2109 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2110 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2111 qd_idx = raid_disks - 1;
2112 break;
2113
2114 case ALGORITHM_RIGHT_SYMMETRIC_6:
2115 pd_idx = sector_div(stripe2, raid_disks-1);
2116 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2117 qd_idx = raid_disks - 1;
2118 break;
2119
2120 case ALGORITHM_PARITY_0_6:
2121 pd_idx = 0;
2122 (*dd_idx)++;
2123 qd_idx = raid_disks - 1;
2124 break;
2125
2126 default:
2127 BUG();
2128 }
2129 break;
2130 }
2131
2132 if (sh) {
2133 sh->pd_idx = pd_idx;
2134 sh->qd_idx = qd_idx;
2135 sh->ddf_layout = ddf_layout;
2136 }
2137 /*
2138 * Finally, compute the new sector number
2139 */
2140 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
2141 return new_sector;
2142 }
2143
2144
2145 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
2146 {
2147 struct r5conf *conf = sh->raid_conf;
2148 int raid_disks = sh->disks;
2149 int data_disks = raid_disks - conf->max_degraded;
2150 sector_t new_sector = sh->sector, check;
2151 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2152 : conf->chunk_sectors;
2153 int algorithm = previous ? conf->prev_algo
2154 : conf->algorithm;
2155 sector_t stripe;
2156 int chunk_offset;
2157 sector_t chunk_number;
2158 int dummy1, dd_idx = i;
2159 sector_t r_sector;
2160 struct stripe_head sh2;
2161
2162
2163 chunk_offset = sector_div(new_sector, sectors_per_chunk);
2164 stripe = new_sector;
2165
2166 if (i == sh->pd_idx)
2167 return 0;
2168 switch(conf->level) {
2169 case 4: break;
2170 case 5:
2171 switch (algorithm) {
2172 case ALGORITHM_LEFT_ASYMMETRIC:
2173 case ALGORITHM_RIGHT_ASYMMETRIC:
2174 if (i > sh->pd_idx)
2175 i--;
2176 break;
2177 case ALGORITHM_LEFT_SYMMETRIC:
2178 case ALGORITHM_RIGHT_SYMMETRIC:
2179 if (i < sh->pd_idx)
2180 i += raid_disks;
2181 i -= (sh->pd_idx + 1);
2182 break;
2183 case ALGORITHM_PARITY_0:
2184 i -= 1;
2185 break;
2186 case ALGORITHM_PARITY_N:
2187 break;
2188 default:
2189 BUG();
2190 }
2191 break;
2192 case 6:
2193 if (i == sh->qd_idx)
2194 return 0; /* It is the Q disk */
2195 switch (algorithm) {
2196 case ALGORITHM_LEFT_ASYMMETRIC:
2197 case ALGORITHM_RIGHT_ASYMMETRIC:
2198 case ALGORITHM_ROTATING_ZERO_RESTART:
2199 case ALGORITHM_ROTATING_N_RESTART:
2200 if (sh->pd_idx == raid_disks-1)
2201 i--; /* Q D D D P */
2202 else if (i > sh->pd_idx)
2203 i -= 2; /* D D P Q D */
2204 break;
2205 case ALGORITHM_LEFT_SYMMETRIC:
2206 case ALGORITHM_RIGHT_SYMMETRIC:
2207 if (sh->pd_idx == raid_disks-1)
2208 i--; /* Q D D D P */
2209 else {
2210 /* D D P Q D */
2211 if (i < sh->pd_idx)
2212 i += raid_disks;
2213 i -= (sh->pd_idx + 2);
2214 }
2215 break;
2216 case ALGORITHM_PARITY_0:
2217 i -= 2;
2218 break;
2219 case ALGORITHM_PARITY_N:
2220 break;
2221 case ALGORITHM_ROTATING_N_CONTINUE:
2222 /* Like left_symmetric, but P is before Q */
2223 if (sh->pd_idx == 0)
2224 i--; /* P D D D Q */
2225 else {
2226 /* D D Q P D */
2227 if (i < sh->pd_idx)
2228 i += raid_disks;
2229 i -= (sh->pd_idx + 1);
2230 }
2231 break;
2232 case ALGORITHM_LEFT_ASYMMETRIC_6:
2233 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2234 if (i > sh->pd_idx)
2235 i--;
2236 break;
2237 case ALGORITHM_LEFT_SYMMETRIC_6:
2238 case ALGORITHM_RIGHT_SYMMETRIC_6:
2239 if (i < sh->pd_idx)
2240 i += data_disks + 1;
2241 i -= (sh->pd_idx + 1);
2242 break;
2243 case ALGORITHM_PARITY_0_6:
2244 i -= 1;
2245 break;
2246 default:
2247 BUG();
2248 }
2249 break;
2250 }
2251
2252 chunk_number = stripe * data_disks + i;
2253 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2254
2255 check = raid5_compute_sector(conf, r_sector,
2256 previous, &dummy1, &sh2);
2257 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2258 || sh2.qd_idx != sh->qd_idx) {
2259 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2260 mdname(conf->mddev));
2261 return 0;
2262 }
2263 return r_sector;
2264 }
2265
2266
2267 static void
2268 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2269 int rcw, int expand)
2270 {
2271 int i, pd_idx = sh->pd_idx, disks = sh->disks;
2272 struct r5conf *conf = sh->raid_conf;
2273 int level = conf->level;
2274
2275 if (rcw) {
2276 /* if we are not expanding this is a proper write request, and
2277 * there will be bios with new data to be drained into the
2278 * stripe cache
2279 */
2280 if (!expand) {
2281 sh->reconstruct_state = reconstruct_state_drain_run;
2282 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2283 } else
2284 sh->reconstruct_state = reconstruct_state_run;
2285
2286 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2287
2288 for (i = disks; i--; ) {
2289 struct r5dev *dev = &sh->dev[i];
2290
2291 if (dev->towrite) {
2292 set_bit(R5_LOCKED, &dev->flags);
2293 set_bit(R5_Wantdrain, &dev->flags);
2294 if (!expand)
2295 clear_bit(R5_UPTODATE, &dev->flags);
2296 s->locked++;
2297 }
2298 }
2299 if (s->locked + conf->max_degraded == disks)
2300 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2301 atomic_inc(&conf->pending_full_writes);
2302 } else {
2303 BUG_ON(level == 6);
2304 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2305 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2306
2307 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2308 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2309 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2310 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2311
2312 for (i = disks; i--; ) {
2313 struct r5dev *dev = &sh->dev[i];
2314 if (i == pd_idx)
2315 continue;
2316
2317 if (dev->towrite &&
2318 (test_bit(R5_UPTODATE, &dev->flags) ||
2319 test_bit(R5_Wantcompute, &dev->flags))) {
2320 set_bit(R5_Wantdrain, &dev->flags);
2321 set_bit(R5_LOCKED, &dev->flags);
2322 clear_bit(R5_UPTODATE, &dev->flags);
2323 s->locked++;
2324 }
2325 }
2326 }
2327
2328 /* keep the parity disk(s) locked while asynchronous operations
2329 * are in flight
2330 */
2331 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2332 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2333 s->locked++;
2334
2335 if (level == 6) {
2336 int qd_idx = sh->qd_idx;
2337 struct r5dev *dev = &sh->dev[qd_idx];
2338
2339 set_bit(R5_LOCKED, &dev->flags);
2340 clear_bit(R5_UPTODATE, &dev->flags);
2341 s->locked++;
2342 }
2343
2344 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2345 __func__, (unsigned long long)sh->sector,
2346 s->locked, s->ops_request);
2347 }
2348
2349 /*
2350 * Each stripe/dev can have one or more bion attached.
2351 * toread/towrite point to the first in a chain.
2352 * The bi_next chain must be in order.
2353 */
2354 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2355 {
2356 struct bio **bip;
2357 struct r5conf *conf = sh->raid_conf;
2358 int firstwrite=0;
2359
2360 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2361 (unsigned long long)bi->bi_sector,
2362 (unsigned long long)sh->sector);
2363
2364 /*
2365 * If several bio share a stripe. The bio bi_phys_segments acts as a
2366 * reference count to avoid race. The reference count should already be
2367 * increased before this function is called (for example, in
2368 * make_request()), so other bio sharing this stripe will not free the
2369 * stripe. If a stripe is owned by one stripe, the stripe lock will
2370 * protect it.
2371 */
2372 spin_lock_irq(&sh->stripe_lock);
2373 if (forwrite) {
2374 bip = &sh->dev[dd_idx].towrite;
2375 if (*bip == NULL)
2376 firstwrite = 1;
2377 } else
2378 bip = &sh->dev[dd_idx].toread;
2379 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2380 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2381 goto overlap;
2382 bip = & (*bip)->bi_next;
2383 }
2384 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2385 goto overlap;
2386
2387 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2388 if (*bip)
2389 bi->bi_next = *bip;
2390 *bip = bi;
2391 raid5_inc_bi_active_stripes(bi);
2392
2393 if (forwrite) {
2394 /* check if page is covered */
2395 sector_t sector = sh->dev[dd_idx].sector;
2396 for (bi=sh->dev[dd_idx].towrite;
2397 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2398 bi && bi->bi_sector <= sector;
2399 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2400 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2401 sector = bi->bi_sector + (bi->bi_size>>9);
2402 }
2403 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2404 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2405 }
2406 spin_unlock_irq(&sh->stripe_lock);
2407
2408 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2409 (unsigned long long)(*bip)->bi_sector,
2410 (unsigned long long)sh->sector, dd_idx);
2411
2412 if (conf->mddev->bitmap && firstwrite) {
2413 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2414 STRIPE_SECTORS, 0);
2415 sh->bm_seq = conf->seq_flush+1;
2416 set_bit(STRIPE_BIT_DELAY, &sh->state);
2417 }
2418 return 1;
2419
2420 overlap:
2421 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2422 spin_unlock_irq(&sh->stripe_lock);
2423 return 0;
2424 }
2425
2426 static void end_reshape(struct r5conf *conf);
2427
2428 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
2429 struct stripe_head *sh)
2430 {
2431 int sectors_per_chunk =
2432 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2433 int dd_idx;
2434 int chunk_offset = sector_div(stripe, sectors_per_chunk);
2435 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2436
2437 raid5_compute_sector(conf,
2438 stripe * (disks - conf->max_degraded)
2439 *sectors_per_chunk + chunk_offset,
2440 previous,
2441 &dd_idx, sh);
2442 }
2443
2444 static void
2445 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2446 struct stripe_head_state *s, int disks,
2447 struct bio **return_bi)
2448 {
2449 int i;
2450 for (i = disks; i--; ) {
2451 struct bio *bi;
2452 int bitmap_end = 0;
2453
2454 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2455 struct md_rdev *rdev;
2456 rcu_read_lock();
2457 rdev = rcu_dereference(conf->disks[i].rdev);
2458 if (rdev && test_bit(In_sync, &rdev->flags))
2459 atomic_inc(&rdev->nr_pending);
2460 else
2461 rdev = NULL;
2462 rcu_read_unlock();
2463 if (rdev) {
2464 if (!rdev_set_badblocks(
2465 rdev,
2466 sh->sector,
2467 STRIPE_SECTORS, 0))
2468 md_error(conf->mddev, rdev);
2469 rdev_dec_pending(rdev, conf->mddev);
2470 }
2471 }
2472 spin_lock_irq(&sh->stripe_lock);
2473 /* fail all writes first */
2474 bi = sh->dev[i].towrite;
2475 sh->dev[i].towrite = NULL;
2476 spin_unlock_irq(&sh->stripe_lock);
2477 if (bi) {
2478 s->to_write--;
2479 bitmap_end = 1;
2480 }
2481
2482 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2483 wake_up(&conf->wait_for_overlap);
2484
2485 while (bi && bi->bi_sector <
2486 sh->dev[i].sector + STRIPE_SECTORS) {
2487 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2488 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2489 if (!raid5_dec_bi_active_stripes(bi)) {
2490 md_write_end(conf->mddev);
2491 bi->bi_next = *return_bi;
2492 *return_bi = bi;
2493 }
2494 bi = nextbi;
2495 }
2496 if (bitmap_end)
2497 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2498 STRIPE_SECTORS, 0, 0);
2499 bitmap_end = 0;
2500 /* and fail all 'written' */
2501 bi = sh->dev[i].written;
2502 sh->dev[i].written = NULL;
2503 if (bi) bitmap_end = 1;
2504 while (bi && bi->bi_sector <
2505 sh->dev[i].sector + STRIPE_SECTORS) {
2506 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2507 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2508 if (!raid5_dec_bi_active_stripes(bi)) {
2509 md_write_end(conf->mddev);
2510 bi->bi_next = *return_bi;
2511 *return_bi = bi;
2512 }
2513 bi = bi2;
2514 }
2515
2516 /* fail any reads if this device is non-operational and
2517 * the data has not reached the cache yet.
2518 */
2519 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2520 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2521 test_bit(R5_ReadError, &sh->dev[i].flags))) {
2522 bi = sh->dev[i].toread;
2523 sh->dev[i].toread = NULL;
2524 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2525 wake_up(&conf->wait_for_overlap);
2526 if (bi) s->to_read--;
2527 while (bi && bi->bi_sector <
2528 sh->dev[i].sector + STRIPE_SECTORS) {
2529 struct bio *nextbi =
2530 r5_next_bio(bi, sh->dev[i].sector);
2531 clear_bit(BIO_UPTODATE, &bi->bi_flags);
2532 if (!raid5_dec_bi_active_stripes(bi)) {
2533 bi->bi_next = *return_bi;
2534 *return_bi = bi;
2535 }
2536 bi = nextbi;
2537 }
2538 }
2539 if (bitmap_end)
2540 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2541 STRIPE_SECTORS, 0, 0);
2542 /* If we were in the middle of a write the parity block might
2543 * still be locked - so just clear all R5_LOCKED flags
2544 */
2545 clear_bit(R5_LOCKED, &sh->dev[i].flags);
2546 }
2547
2548 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2549 if (atomic_dec_and_test(&conf->pending_full_writes))
2550 md_wakeup_thread(conf->mddev->thread);
2551 }
2552
2553 static void
2554 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
2555 struct stripe_head_state *s)
2556 {
2557 int abort = 0;
2558 int i;
2559
2560 clear_bit(STRIPE_SYNCING, &sh->state);
2561 s->syncing = 0;
2562 s->replacing = 0;
2563 /* There is nothing more to do for sync/check/repair.
2564 * Don't even need to abort as that is handled elsewhere
2565 * if needed, and not always wanted e.g. if there is a known
2566 * bad block here.
2567 * For recover/replace we need to record a bad block on all
2568 * non-sync devices, or abort the recovery
2569 */
2570 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
2571 /* During recovery devices cannot be removed, so
2572 * locking and refcounting of rdevs is not needed
2573 */
2574 for (i = 0; i < conf->raid_disks; i++) {
2575 struct md_rdev *rdev = conf->disks[i].rdev;
2576 if (rdev
2577 && !test_bit(Faulty, &rdev->flags)
2578 && !test_bit(In_sync, &rdev->flags)
2579 && !rdev_set_badblocks(rdev, sh->sector,
2580 STRIPE_SECTORS, 0))
2581 abort = 1;
2582 rdev = conf->disks[i].replacement;
2583 if (rdev
2584 && !test_bit(Faulty, &rdev->flags)
2585 && !test_bit(In_sync, &rdev->flags)
2586 && !rdev_set_badblocks(rdev, sh->sector,
2587 STRIPE_SECTORS, 0))
2588 abort = 1;
2589 }
2590 if (abort)
2591 conf->recovery_disabled =
2592 conf->mddev->recovery_disabled;
2593 }
2594 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
2595 }
2596
2597 static int want_replace(struct stripe_head *sh, int disk_idx)
2598 {
2599 struct md_rdev *rdev;
2600 int rv = 0;
2601 /* Doing recovery so rcu locking not required */
2602 rdev = sh->raid_conf->disks[disk_idx].replacement;
2603 if (rdev
2604 && !test_bit(Faulty, &rdev->flags)
2605 && !test_bit(In_sync, &rdev->flags)
2606 && (rdev->recovery_offset <= sh->sector
2607 || rdev->mddev->recovery_cp <= sh->sector))
2608 rv = 1;
2609
2610 return rv;
2611 }
2612
2613 /* fetch_block - checks the given member device to see if its data needs
2614 * to be read or computed to satisfy a request.
2615 *
2616 * Returns 1 when no more member devices need to be checked, otherwise returns
2617 * 0 to tell the loop in handle_stripe_fill to continue
2618 */
2619 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2620 int disk_idx, int disks)
2621 {
2622 struct r5dev *dev = &sh->dev[disk_idx];
2623 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2624 &sh->dev[s->failed_num[1]] };
2625
2626 /* is the data in this block needed, and can we get it? */
2627 if (!test_bit(R5_LOCKED, &dev->flags) &&
2628 !test_bit(R5_UPTODATE, &dev->flags) &&
2629 (dev->toread ||
2630 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2631 s->syncing || s->expanding ||
2632 (s->replacing && want_replace(sh, disk_idx)) ||
2633 (s->failed >= 1 && fdev[0]->toread) ||
2634 (s->failed >= 2 && fdev[1]->toread) ||
2635 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2636 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2637 (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2638 /* we would like to get this block, possibly by computing it,
2639 * otherwise read it if the backing disk is insync
2640 */
2641 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2642 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2643 if ((s->uptodate == disks - 1) &&
2644 (s->failed && (disk_idx == s->failed_num[0] ||
2645 disk_idx == s->failed_num[1]))) {
2646 /* have disk failed, and we're requested to fetch it;
2647 * do compute it
2648 */
2649 pr_debug("Computing stripe %llu block %d\n",
2650 (unsigned long long)sh->sector, disk_idx);
2651 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2652 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2653 set_bit(R5_Wantcompute, &dev->flags);
2654 sh->ops.target = disk_idx;
2655 sh->ops.target2 = -1; /* no 2nd target */
2656 s->req_compute = 1;
2657 /* Careful: from this point on 'uptodate' is in the eye
2658 * of raid_run_ops which services 'compute' operations
2659 * before writes. R5_Wantcompute flags a block that will
2660 * be R5_UPTODATE by the time it is needed for a
2661 * subsequent operation.
2662 */
2663 s->uptodate++;
2664 return 1;
2665 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2666 /* Computing 2-failure is *very* expensive; only
2667 * do it if failed >= 2
2668 */
2669 int other;
2670 for (other = disks; other--; ) {
2671 if (other == disk_idx)
2672 continue;
2673 if (!test_bit(R5_UPTODATE,
2674 &sh->dev[other].flags))
2675 break;
2676 }
2677 BUG_ON(other < 0);
2678 pr_debug("Computing stripe %llu blocks %d,%d\n",
2679 (unsigned long long)sh->sector,
2680 disk_idx, other);
2681 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2682 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2683 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2684 set_bit(R5_Wantcompute, &sh->dev[other].flags);
2685 sh->ops.target = disk_idx;
2686 sh->ops.target2 = other;
2687 s->uptodate += 2;
2688 s->req_compute = 1;
2689 return 1;
2690 } else if (test_bit(R5_Insync, &dev->flags)) {
2691 set_bit(R5_LOCKED, &dev->flags);
2692 set_bit(R5_Wantread, &dev->flags);
2693 s->locked++;
2694 pr_debug("Reading block %d (sync=%d)\n",
2695 disk_idx, s->syncing);
2696 }
2697 }
2698
2699 return 0;
2700 }
2701
2702 /**
2703 * handle_stripe_fill - read or compute data to satisfy pending requests.
2704 */
2705 static void handle_stripe_fill(struct stripe_head *sh,
2706 struct stripe_head_state *s,
2707 int disks)
2708 {
2709 int i;
2710
2711 /* look for blocks to read/compute, skip this if a compute
2712 * is already in flight, or if the stripe contents are in the
2713 * midst of changing due to a write
2714 */
2715 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2716 !sh->reconstruct_state)
2717 for (i = disks; i--; )
2718 if (fetch_block(sh, s, i, disks))
2719 break;
2720 set_bit(STRIPE_HANDLE, &sh->state);
2721 }
2722
2723
2724 /* handle_stripe_clean_event
2725 * any written block on an uptodate or failed drive can be returned.
2726 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2727 * never LOCKED, so we don't need to test 'failed' directly.
2728 */
2729 static void handle_stripe_clean_event(struct r5conf *conf,
2730 struct stripe_head *sh, int disks, struct bio **return_bi)
2731 {
2732 int i;
2733 struct r5dev *dev;
2734
2735 for (i = disks; i--; )
2736 if (sh->dev[i].written) {
2737 dev = &sh->dev[i];
2738 if (!test_bit(R5_LOCKED, &dev->flags) &&
2739 test_bit(R5_UPTODATE, &dev->flags)) {
2740 /* We can return any write requests */
2741 struct bio *wbi, *wbi2;
2742 pr_debug("Return write for disc %d\n", i);
2743 wbi = dev->written;
2744 dev->written = NULL;
2745 while (wbi && wbi->bi_sector <
2746 dev->sector + STRIPE_SECTORS) {
2747 wbi2 = r5_next_bio(wbi, dev->sector);
2748 if (!raid5_dec_bi_active_stripes(wbi)) {
2749 md_write_end(conf->mddev);
2750 wbi->bi_next = *return_bi;
2751 *return_bi = wbi;
2752 }
2753 wbi = wbi2;
2754 }
2755 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2756 STRIPE_SECTORS,
2757 !test_bit(STRIPE_DEGRADED, &sh->state),
2758 0);
2759 }
2760 }
2761
2762 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2763 if (atomic_dec_and_test(&conf->pending_full_writes))
2764 md_wakeup_thread(conf->mddev->thread);
2765 }
2766
2767 static void handle_stripe_dirtying(struct r5conf *conf,
2768 struct stripe_head *sh,
2769 struct stripe_head_state *s,
2770 int disks)
2771 {
2772 int rmw = 0, rcw = 0, i;
2773 if (conf->max_degraded == 2) {
2774 /* RAID6 requires 'rcw' in current implementation
2775 * Calculate the real rcw later - for now fake it
2776 * look like rcw is cheaper
2777 */
2778 rcw = 1; rmw = 2;
2779 } else for (i = disks; i--; ) {
2780 /* would I have to read this buffer for read_modify_write */
2781 struct r5dev *dev = &sh->dev[i];
2782 if ((dev->towrite || i == sh->pd_idx) &&
2783 !test_bit(R5_LOCKED, &dev->flags) &&
2784 !(test_bit(R5_UPTODATE, &dev->flags) ||
2785 test_bit(R5_Wantcompute, &dev->flags))) {
2786 if (test_bit(R5_Insync, &dev->flags))
2787 rmw++;
2788 else
2789 rmw += 2*disks; /* cannot read it */
2790 }
2791 /* Would I have to read this buffer for reconstruct_write */
2792 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2793 !test_bit(R5_LOCKED, &dev->flags) &&
2794 !(test_bit(R5_UPTODATE, &dev->flags) ||
2795 test_bit(R5_Wantcompute, &dev->flags))) {
2796 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2797 else
2798 rcw += 2*disks;
2799 }
2800 }
2801 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2802 (unsigned long long)sh->sector, rmw, rcw);
2803 set_bit(STRIPE_HANDLE, &sh->state);
2804 if (rmw < rcw && rmw > 0)
2805 /* prefer read-modify-write, but need to get some data */
2806 for (i = disks; i--; ) {
2807 struct r5dev *dev = &sh->dev[i];
2808 if ((dev->towrite || i == sh->pd_idx) &&
2809 !test_bit(R5_LOCKED, &dev->flags) &&
2810 !(test_bit(R5_UPTODATE, &dev->flags) ||
2811 test_bit(R5_Wantcompute, &dev->flags)) &&
2812 test_bit(R5_Insync, &dev->flags)) {
2813 if (
2814 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2815 pr_debug("Read_old block "
2816 "%d for r-m-w\n", i);
2817 set_bit(R5_LOCKED, &dev->flags);
2818 set_bit(R5_Wantread, &dev->flags);
2819 s->locked++;
2820 } else {
2821 set_bit(STRIPE_DELAYED, &sh->state);
2822 set_bit(STRIPE_HANDLE, &sh->state);
2823 }
2824 }
2825 }
2826 if (rcw <= rmw && rcw > 0) {
2827 /* want reconstruct write, but need to get some data */
2828 rcw = 0;
2829 for (i = disks; i--; ) {
2830 struct r5dev *dev = &sh->dev[i];
2831 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2832 i != sh->pd_idx && i != sh->qd_idx &&
2833 !test_bit(R5_LOCKED, &dev->flags) &&
2834 !(test_bit(R5_UPTODATE, &dev->flags) ||
2835 test_bit(R5_Wantcompute, &dev->flags))) {
2836 rcw++;
2837 if (!test_bit(R5_Insync, &dev->flags))
2838 continue; /* it's a failed drive */
2839 if (
2840 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2841 pr_debug("Read_old block "
2842 "%d for Reconstruct\n", i);
2843 set_bit(R5_LOCKED, &dev->flags);
2844 set_bit(R5_Wantread, &dev->flags);
2845 s->locked++;
2846 } else {
2847 set_bit(STRIPE_DELAYED, &sh->state);
2848 set_bit(STRIPE_HANDLE, &sh->state);
2849 }
2850 }
2851 }
2852 }
2853 /* now if nothing is locked, and if we have enough data,
2854 * we can start a write request
2855 */
2856 /* since handle_stripe can be called at any time we need to handle the
2857 * case where a compute block operation has been submitted and then a
2858 * subsequent call wants to start a write request. raid_run_ops only
2859 * handles the case where compute block and reconstruct are requested
2860 * simultaneously. If this is not the case then new writes need to be
2861 * held off until the compute completes.
2862 */
2863 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2864 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2865 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2866 schedule_reconstruction(sh, s, rcw == 0, 0);
2867 }
2868
2869 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
2870 struct stripe_head_state *s, int disks)
2871 {
2872 struct r5dev *dev = NULL;
2873
2874 set_bit(STRIPE_HANDLE, &sh->state);
2875
2876 switch (sh->check_state) {
2877 case check_state_idle:
2878 /* start a new check operation if there are no failures */
2879 if (s->failed == 0) {
2880 BUG_ON(s->uptodate != disks);
2881 sh->check_state = check_state_run;
2882 set_bit(STRIPE_OP_CHECK, &s->ops_request);
2883 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2884 s->uptodate--;
2885 break;
2886 }
2887 dev = &sh->dev[s->failed_num[0]];
2888 /* fall through */
2889 case check_state_compute_result:
2890 sh->check_state = check_state_idle;
2891 if (!dev)
2892 dev = &sh->dev[sh->pd_idx];
2893
2894 /* check that a write has not made the stripe insync */
2895 if (test_bit(STRIPE_INSYNC, &sh->state))
2896 break;
2897
2898 /* either failed parity check, or recovery is happening */
2899 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2900 BUG_ON(s->uptodate != disks);
2901
2902 set_bit(R5_LOCKED, &dev->flags);
2903 s->locked++;
2904 set_bit(R5_Wantwrite, &dev->flags);
2905
2906 clear_bit(STRIPE_DEGRADED, &sh->state);
2907 set_bit(STRIPE_INSYNC, &sh->state);
2908 break;
2909 case check_state_run:
2910 break; /* we will be called again upon completion */
2911 case check_state_check_result:
2912 sh->check_state = check_state_idle;
2913
2914 /* if a failure occurred during the check operation, leave
2915 * STRIPE_INSYNC not set and let the stripe be handled again
2916 */
2917 if (s->failed)
2918 break;
2919
2920 /* handle a successful check operation, if parity is correct
2921 * we are done. Otherwise update the mismatch count and repair
2922 * parity if !MD_RECOVERY_CHECK
2923 */
2924 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2925 /* parity is correct (on disc,
2926 * not in buffer any more)
2927 */
2928 set_bit(STRIPE_INSYNC, &sh->state);
2929 else {
2930 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2931 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2932 /* don't try to repair!! */
2933 set_bit(STRIPE_INSYNC, &sh->state);
2934 else {
2935 sh->check_state = check_state_compute_run;
2936 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2937 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2938 set_bit(R5_Wantcompute,
2939 &sh->dev[sh->pd_idx].flags);
2940 sh->ops.target = sh->pd_idx;
2941 sh->ops.target2 = -1;
2942 s->uptodate++;
2943 }
2944 }
2945 break;
2946 case check_state_compute_run:
2947 break;
2948 default:
2949 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2950 __func__, sh->check_state,
2951 (unsigned long long) sh->sector);
2952 BUG();
2953 }
2954 }
2955
2956
2957 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
2958 struct stripe_head_state *s,
2959 int disks)
2960 {
2961 int pd_idx = sh->pd_idx;
2962 int qd_idx = sh->qd_idx;
2963 struct r5dev *dev;
2964
2965 set_bit(STRIPE_HANDLE, &sh->state);
2966
2967 BUG_ON(s->failed > 2);
2968
2969 /* Want to check and possibly repair P and Q.
2970 * However there could be one 'failed' device, in which
2971 * case we can only check one of them, possibly using the
2972 * other to generate missing data
2973 */
2974
2975 switch (sh->check_state) {
2976 case check_state_idle:
2977 /* start a new check operation if there are < 2 failures */
2978 if (s->failed == s->q_failed) {
2979 /* The only possible failed device holds Q, so it
2980 * makes sense to check P (If anything else were failed,
2981 * we would have used P to recreate it).
2982 */
2983 sh->check_state = check_state_run;
2984 }
2985 if (!s->q_failed && s->failed < 2) {
2986 /* Q is not failed, and we didn't use it to generate
2987 * anything, so it makes sense to check it
2988 */
2989 if (sh->check_state == check_state_run)
2990 sh->check_state = check_state_run_pq;
2991 else
2992 sh->check_state = check_state_run_q;
2993 }
2994
2995 /* discard potentially stale zero_sum_result */
2996 sh->ops.zero_sum_result = 0;
2997
2998 if (sh->check_state == check_state_run) {
2999 /* async_xor_zero_sum destroys the contents of P */
3000 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
3001 s->uptodate--;
3002 }
3003 if (sh->check_state >= check_state_run &&
3004 sh->check_state <= check_state_run_pq) {
3005 /* async_syndrome_zero_sum preserves P and Q, so
3006 * no need to mark them !uptodate here
3007 */
3008 set_bit(STRIPE_OP_CHECK, &s->ops_request);
3009 break;
3010 }
3011
3012 /* we have 2-disk failure */
3013 BUG_ON(s->failed != 2);
3014 /* fall through */
3015 case check_state_compute_result:
3016 sh->check_state = check_state_idle;
3017
3018 /* check that a write has not made the stripe insync */
3019 if (test_bit(STRIPE_INSYNC, &sh->state))
3020 break;
3021
3022 /* now write out any block on a failed drive,
3023 * or P or Q if they were recomputed
3024 */
3025 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
3026 if (s->failed == 2) {
3027 dev = &sh->dev[s->failed_num[1]];
3028 s->locked++;
3029 set_bit(R5_LOCKED, &dev->flags);
3030 set_bit(R5_Wantwrite, &dev->flags);
3031 }
3032 if (s->failed >= 1) {
3033 dev = &sh->dev[s->failed_num[0]];
3034 s->locked++;
3035 set_bit(R5_LOCKED, &dev->flags);
3036 set_bit(R5_Wantwrite, &dev->flags);
3037 }
3038 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3039 dev = &sh->dev[pd_idx];
3040 s->locked++;
3041 set_bit(R5_LOCKED, &dev->flags);
3042 set_bit(R5_Wantwrite, &dev->flags);
3043 }
3044 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3045 dev = &sh->dev[qd_idx];
3046 s->locked++;
3047 set_bit(R5_LOCKED, &dev->flags);
3048 set_bit(R5_Wantwrite, &dev->flags);
3049 }
3050 clear_bit(STRIPE_DEGRADED, &sh->state);
3051
3052 set_bit(STRIPE_INSYNC, &sh->state);
3053 break;
3054 case check_state_run:
3055 case check_state_run_q:
3056 case check_state_run_pq:
3057 break; /* we will be called again upon completion */
3058 case check_state_check_result:
3059 sh->check_state = check_state_idle;
3060
3061 /* handle a successful check operation, if parity is correct
3062 * we are done. Otherwise update the mismatch count and repair
3063 * parity if !MD_RECOVERY_CHECK
3064 */
3065 if (sh->ops.zero_sum_result == 0) {
3066 /* both parities are correct */
3067 if (!s->failed)
3068 set_bit(STRIPE_INSYNC, &sh->state);
3069 else {
3070 /* in contrast to the raid5 case we can validate
3071 * parity, but still have a failure to write
3072 * back
3073 */
3074 sh->check_state = check_state_compute_result;
3075 /* Returning at this point means that we may go
3076 * off and bring p and/or q uptodate again so
3077 * we make sure to check zero_sum_result again
3078 * to verify if p or q need writeback
3079 */
3080 }
3081 } else {
3082 conf->mddev->resync_mismatches += STRIPE_SECTORS;
3083 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3084 /* don't try to repair!! */
3085 set_bit(STRIPE_INSYNC, &sh->state);
3086 else {
3087 int *target = &sh->ops.target;
3088
3089 sh->ops.target = -1;
3090 sh->ops.target2 = -1;
3091 sh->check_state = check_state_compute_run;
3092 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3093 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3094 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3095 set_bit(R5_Wantcompute,
3096 &sh->dev[pd_idx].flags);
3097 *target = pd_idx;
3098 target = &sh->ops.target2;
3099 s->uptodate++;
3100 }
3101 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3102 set_bit(R5_Wantcompute,
3103 &sh->dev[qd_idx].flags);
3104 *target = qd_idx;
3105 s->uptodate++;
3106 }
3107 }
3108 }
3109 break;
3110 case check_state_compute_run:
3111 break;
3112 default:
3113 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3114 __func__, sh->check_state,
3115 (unsigned long long) sh->sector);
3116 BUG();
3117 }
3118 }
3119
3120 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3121 {
3122 int i;
3123
3124 /* We have read all the blocks in this stripe and now we need to
3125 * copy some of them into a target stripe for expand.
3126 */
3127 struct dma_async_tx_descriptor *tx = NULL;
3128 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3129 for (i = 0; i < sh->disks; i++)
3130 if (i != sh->pd_idx && i != sh->qd_idx) {
3131 int dd_idx, j;
3132 struct stripe_head *sh2;
3133 struct async_submit_ctl submit;
3134
3135 sector_t bn = compute_blocknr(sh, i, 1);
3136 sector_t s = raid5_compute_sector(conf, bn, 0,
3137 &dd_idx, NULL);
3138 sh2 = get_active_stripe(conf, s, 0, 1, 1);
3139 if (sh2 == NULL)
3140 /* so far only the early blocks of this stripe
3141 * have been requested. When later blocks
3142 * get requested, we will try again
3143 */
3144 continue;
3145 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3146 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3147 /* must have already done this block */
3148 release_stripe(sh2);
3149 continue;
3150 }
3151
3152 /* place all the copies on one channel */
3153 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
3154 tx = async_memcpy(sh2->dev[dd_idx].page,
3155 sh->dev[i].page, 0, 0, STRIPE_SIZE,
3156 &submit);
3157
3158 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
3159 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
3160 for (j = 0; j < conf->raid_disks; j++)
3161 if (j != sh2->pd_idx &&
3162 j != sh2->qd_idx &&
3163 !test_bit(R5_Expanded, &sh2->dev[j].flags))
3164 break;
3165 if (j == conf->raid_disks) {
3166 set_bit(STRIPE_EXPAND_READY, &sh2->state);
3167 set_bit(STRIPE_HANDLE, &sh2->state);
3168 }
3169 release_stripe(sh2);
3170
3171 }
3172 /* done submitting copies, wait for them to complete */
3173 if (tx) {
3174 async_tx_ack(tx);
3175 dma_wait_for_async_tx(tx);
3176 }
3177 }
3178
3179 /*
3180 * handle_stripe - do things to a stripe.
3181 *
3182 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3183 * state of various bits to see what needs to be done.
3184 * Possible results:
3185 * return some read requests which now have data
3186 * return some write requests which are safely on storage
3187 * schedule a read on some buffers
3188 * schedule a write of some buffers
3189 * return confirmation of parity correctness
3190 *
3191 */
3192
3193 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3194 {
3195 struct r5conf *conf = sh->raid_conf;
3196 int disks = sh->disks;
3197 struct r5dev *dev;
3198 int i;
3199 int do_recovery = 0;
3200
3201 memset(s, 0, sizeof(*s));
3202
3203 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3204 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3205 s->failed_num[0] = -1;
3206 s->failed_num[1] = -1;
3207
3208 /* Now to look around and see what can be done */
3209 rcu_read_lock();
3210 for (i=disks; i--; ) {
3211 struct md_rdev *rdev;
3212 sector_t first_bad;
3213 int bad_sectors;
3214 int is_bad = 0;
3215
3216 dev = &sh->dev[i];
3217
3218 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3219 i, dev->flags,
3220 dev->toread, dev->towrite, dev->written);
3221 /* maybe we can reply to a read
3222 *
3223 * new wantfill requests are only permitted while
3224 * ops_complete_biofill is guaranteed to be inactive
3225 */
3226 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3227 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3228 set_bit(R5_Wantfill, &dev->flags);
3229
3230 /* now count some things */
3231 if (test_bit(R5_LOCKED, &dev->flags))
3232 s->locked++;
3233 if (test_bit(R5_UPTODATE, &dev->flags))
3234 s->uptodate++;
3235 if (test_bit(R5_Wantcompute, &dev->flags)) {
3236 s->compute++;
3237 BUG_ON(s->compute > 2);
3238 }
3239
3240 if (test_bit(R5_Wantfill, &dev->flags))
3241 s->to_fill++;
3242 else if (dev->toread)
3243 s->to_read++;
3244 if (dev->towrite) {
3245 s->to_write++;
3246 if (!test_bit(R5_OVERWRITE, &dev->flags))
3247 s->non_overwrite++;
3248 }
3249 if (dev->written)
3250 s->written++;
3251 /* Prefer to use the replacement for reads, but only
3252 * if it is recovered enough and has no bad blocks.
3253 */
3254 rdev = rcu_dereference(conf->disks[i].replacement);
3255 if (rdev && !test_bit(Faulty, &rdev->flags) &&
3256 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
3257 !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3258 &first_bad, &bad_sectors))
3259 set_bit(R5_ReadRepl, &dev->flags);
3260 else {
3261 if (rdev)
3262 set_bit(R5_NeedReplace, &dev->flags);
3263 rdev = rcu_dereference(conf->disks[i].rdev);
3264 clear_bit(R5_ReadRepl, &dev->flags);
3265 }
3266 if (rdev && test_bit(Faulty, &rdev->flags))
3267 rdev = NULL;
3268 if (rdev) {
3269 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3270 &first_bad, &bad_sectors);
3271 if (s->blocked_rdev == NULL
3272 && (test_bit(Blocked, &rdev->flags)
3273 || is_bad < 0)) {
3274 if (is_bad < 0)
3275 set_bit(BlockedBadBlocks,
3276 &rdev->flags);
3277 s->blocked_rdev = rdev;
3278 atomic_inc(&rdev->nr_pending);
3279 }
3280 }
3281 clear_bit(R5_Insync, &dev->flags);
3282 if (!rdev)
3283 /* Not in-sync */;
3284 else if (is_bad) {
3285 /* also not in-sync */
3286 if (!test_bit(WriteErrorSeen, &rdev->flags) &&
3287 test_bit(R5_UPTODATE, &dev->flags)) {
3288 /* treat as in-sync, but with a read error
3289 * which we can now try to correct
3290 */
3291 set_bit(R5_Insync, &dev->flags);
3292 set_bit(R5_ReadError, &dev->flags);
3293 }
3294 } else if (test_bit(In_sync, &rdev->flags))
3295 set_bit(R5_Insync, &dev->flags);
3296 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3297 /* in sync if before recovery_offset */
3298 set_bit(R5_Insync, &dev->flags);
3299 else if (test_bit(R5_UPTODATE, &dev->flags) &&
3300 test_bit(R5_Expanded, &dev->flags))
3301 /* If we've reshaped into here, we assume it is Insync.
3302 * We will shortly update recovery_offset to make
3303 * it official.
3304 */
3305 set_bit(R5_Insync, &dev->flags);
3306
3307 if (rdev && test_bit(R5_WriteError, &dev->flags)) {
3308 /* This flag does not apply to '.replacement'
3309 * only to .rdev, so make sure to check that*/
3310 struct md_rdev *rdev2 = rcu_dereference(
3311 conf->disks[i].rdev);
3312 if (rdev2 == rdev)
3313 clear_bit(R5_Insync, &dev->flags);
3314 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3315 s->handle_bad_blocks = 1;
3316 atomic_inc(&rdev2->nr_pending);
3317 } else
3318 clear_bit(R5_WriteError, &dev->flags);
3319 }
3320 if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
3321 /* This flag does not apply to '.replacement'
3322 * only to .rdev, so make sure to check that*/
3323 struct md_rdev *rdev2 = rcu_dereference(
3324 conf->disks[i].rdev);
3325 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3326 s->handle_bad_blocks = 1;
3327 atomic_inc(&rdev2->nr_pending);
3328 } else
3329 clear_bit(R5_MadeGood, &dev->flags);
3330 }
3331 if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
3332 struct md_rdev *rdev2 = rcu_dereference(
3333 conf->disks[i].replacement);
3334 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3335 s->handle_bad_blocks = 1;
3336 atomic_inc(&rdev2->nr_pending);
3337 } else
3338 clear_bit(R5_MadeGoodRepl, &dev->flags);
3339 }
3340 if (!test_bit(R5_Insync, &dev->flags)) {
3341 /* The ReadError flag will just be confusing now */
3342 clear_bit(R5_ReadError, &dev->flags);
3343 clear_bit(R5_ReWrite, &dev->flags);
3344 }
3345 if (test_bit(R5_ReadError, &dev->flags))
3346 clear_bit(R5_Insync, &dev->flags);
3347 if (!test_bit(R5_Insync, &dev->flags)) {
3348 if (s->failed < 2)
3349 s->failed_num[s->failed] = i;
3350 s->failed++;
3351 if (rdev && !test_bit(Faulty, &rdev->flags))
3352 do_recovery = 1;
3353 }
3354 }
3355 if (test_bit(STRIPE_SYNCING, &sh->state)) {
3356 /* If there is a failed device being replaced,
3357 * we must be recovering.
3358 * else if we are after recovery_cp, we must be syncing
3359 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
3360 * else we can only be replacing
3361 * sync and recovery both need to read all devices, and so
3362 * use the same flag.
3363 */
3364 if (do_recovery ||
3365 sh->sector >= conf->mddev->recovery_cp ||
3366 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
3367 s->syncing = 1;
3368 else
3369 s->replacing = 1;
3370 }
3371 rcu_read_unlock();
3372 }
3373
3374 static void handle_stripe(struct stripe_head *sh)
3375 {
3376 struct stripe_head_state s;
3377 struct r5conf *conf = sh->raid_conf;
3378 int i;
3379 int prexor;
3380 int disks = sh->disks;
3381 struct r5dev *pdev, *qdev;
3382
3383 clear_bit(STRIPE_HANDLE, &sh->state);
3384 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3385 /* already being handled, ensure it gets handled
3386 * again when current action finishes */
3387 set_bit(STRIPE_HANDLE, &sh->state);
3388 return;
3389 }
3390
3391 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3392 set_bit(STRIPE_SYNCING, &sh->state);
3393 clear_bit(STRIPE_INSYNC, &sh->state);
3394 }
3395 clear_bit(STRIPE_DELAYED, &sh->state);
3396
3397 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3398 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3399 (unsigned long long)sh->sector, sh->state,
3400 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3401 sh->check_state, sh->reconstruct_state);
3402
3403 analyse_stripe(sh, &s);
3404
3405 if (s.handle_bad_blocks) {
3406 set_bit(STRIPE_HANDLE, &sh->state);
3407 goto finish;
3408 }
3409
3410 if (unlikely(s.blocked_rdev)) {
3411 if (s.syncing || s.expanding || s.expanded ||
3412 s.replacing || s.to_write || s.written) {
3413 set_bit(STRIPE_HANDLE, &sh->state);
3414 goto finish;
3415 }
3416 /* There is nothing for the blocked_rdev to block */
3417 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3418 s.blocked_rdev = NULL;
3419 }
3420
3421 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3422 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3423 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3424 }
3425
3426 pr_debug("locked=%d uptodate=%d to_read=%d"
3427 " to_write=%d failed=%d failed_num=%d,%d\n",
3428 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3429 s.failed_num[0], s.failed_num[1]);
3430 /* check if the array has lost more than max_degraded devices and,
3431 * if so, some requests might need to be failed.
3432 */
3433 if (s.failed > conf->max_degraded) {
3434 sh->check_state = 0;
3435 sh->reconstruct_state = 0;
3436 if (s.to_read+s.to_write+s.written)
3437 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3438 if (s.syncing + s.replacing)
3439 handle_failed_sync(conf, sh, &s);
3440 }
3441
3442 /*
3443 * might be able to return some write requests if the parity blocks
3444 * are safe, or on a failed drive
3445 */
3446 pdev = &sh->dev[sh->pd_idx];
3447 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3448 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3449 qdev = &sh->dev[sh->qd_idx];
3450 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3451 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3452 || conf->level < 6;
3453
3454 if (s.written &&
3455 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3456 && !test_bit(R5_LOCKED, &pdev->flags)
3457 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3458 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3459 && !test_bit(R5_LOCKED, &qdev->flags)
3460 && test_bit(R5_UPTODATE, &qdev->flags)))))
3461 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3462
3463 /* Now we might consider reading some blocks, either to check/generate
3464 * parity, or to satisfy requests
3465 * or to load a block that is being partially written.
3466 */
3467 if (s.to_read || s.non_overwrite
3468 || (conf->level == 6 && s.to_write && s.failed)
3469 || (s.syncing && (s.uptodate + s.compute < disks))
3470 || s.replacing
3471 || s.expanding)
3472 handle_stripe_fill(sh, &s, disks);
3473
3474 /* Now we check to see if any write operations have recently
3475 * completed
3476 */
3477 prexor = 0;
3478 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3479 prexor = 1;
3480 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3481 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3482 sh->reconstruct_state = reconstruct_state_idle;
3483
3484 /* All the 'written' buffers and the parity block are ready to
3485 * be written back to disk
3486 */
3487 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3488 BUG_ON(sh->qd_idx >= 0 &&
3489 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3490 for (i = disks; i--; ) {
3491 struct r5dev *dev = &sh->dev[i];
3492 if (test_bit(R5_LOCKED, &dev->flags) &&
3493 (i == sh->pd_idx || i == sh->qd_idx ||
3494 dev->written)) {
3495 pr_debug("Writing block %d\n", i);
3496 set_bit(R5_Wantwrite, &dev->flags);
3497 if (prexor)
3498 continue;
3499 if (!test_bit(R5_Insync, &dev->flags) ||
3500 ((i == sh->pd_idx || i == sh->qd_idx) &&
3501 s.failed == 0))
3502 set_bit(STRIPE_INSYNC, &sh->state);
3503 }
3504 }
3505 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3506 s.dec_preread_active = 1;
3507 }
3508
3509 /* Now to consider new write requests and what else, if anything
3510 * should be read. We do not handle new writes when:
3511 * 1/ A 'write' operation (copy+xor) is already in flight.
3512 * 2/ A 'check' operation is in flight, as it may clobber the parity
3513 * block.
3514 */
3515 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3516 handle_stripe_dirtying(conf, sh, &s, disks);
3517
3518 /* maybe we need to check and possibly fix the parity for this stripe
3519 * Any reads will already have been scheduled, so we just see if enough
3520 * data is available. The parity check is held off while parity
3521 * dependent operations are in flight.
3522 */
3523 if (sh->check_state ||
3524 (s.syncing && s.locked == 0 &&
3525 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3526 !test_bit(STRIPE_INSYNC, &sh->state))) {
3527 if (conf->level == 6)
3528 handle_parity_checks6(conf, sh, &s, disks);
3529 else
3530 handle_parity_checks5(conf, sh, &s, disks);
3531 }
3532
3533 if (s.replacing && s.locked == 0
3534 && !test_bit(STRIPE_INSYNC, &sh->state)) {
3535 /* Write out to replacement devices where possible */
3536 for (i = 0; i < conf->raid_disks; i++)
3537 if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
3538 test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
3539 set_bit(R5_WantReplace, &sh->dev[i].flags);
3540 set_bit(R5_LOCKED, &sh->dev[i].flags);
3541 s.locked++;
3542 }
3543 set_bit(STRIPE_INSYNC, &sh->state);
3544 }
3545 if ((s.syncing || s.replacing) && s.locked == 0 &&
3546 test_bit(STRIPE_INSYNC, &sh->state)) {
3547 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3548 clear_bit(STRIPE_SYNCING, &sh->state);
3549 }
3550
3551 /* If the failed drives are just a ReadError, then we might need
3552 * to progress the repair/check process
3553 */
3554 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3555 for (i = 0; i < s.failed; i++) {
3556 struct r5dev *dev = &sh->dev[s.failed_num[i]];
3557 if (test_bit(R5_ReadError, &dev->flags)
3558 && !test_bit(R5_LOCKED, &dev->flags)
3559 && test_bit(R5_UPTODATE, &dev->flags)
3560 ) {
3561 if (!test_bit(R5_ReWrite, &dev->flags)) {
3562 set_bit(R5_Wantwrite, &dev->flags);
3563 set_bit(R5_ReWrite, &dev->flags);
3564 set_bit(R5_LOCKED, &dev->flags);
3565 s.locked++;
3566 } else {
3567 /* let's read it back */
3568 set_bit(R5_Wantread, &dev->flags);
3569 set_bit(R5_LOCKED, &dev->flags);
3570 s.locked++;
3571 }
3572 }
3573 }
3574
3575
3576 /* Finish reconstruct operations initiated by the expansion process */
3577 if (sh->reconstruct_state == reconstruct_state_result) {
3578 struct stripe_head *sh_src
3579 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3580 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3581 /* sh cannot be written until sh_src has been read.
3582 * so arrange for sh to be delayed a little
3583 */
3584 set_bit(STRIPE_DELAYED, &sh->state);
3585 set_bit(STRIPE_HANDLE, &sh->state);
3586 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3587 &sh_src->state))
3588 atomic_inc(&conf->preread_active_stripes);
3589 release_stripe(sh_src);
3590 goto finish;
3591 }
3592 if (sh_src)
3593 release_stripe(sh_src);
3594
3595 sh->reconstruct_state = reconstruct_state_idle;
3596 clear_bit(STRIPE_EXPANDING, &sh->state);
3597 for (i = conf->raid_disks; i--; ) {
3598 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3599 set_bit(R5_LOCKED, &sh->dev[i].flags);
3600 s.locked++;
3601 }
3602 }
3603
3604 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3605 !sh->reconstruct_state) {
3606 /* Need to write out all blocks after computing parity */
3607 sh->disks = conf->raid_disks;
3608 stripe_set_idx(sh->sector, conf, 0, sh);
3609 schedule_reconstruction(sh, &s, 1, 1);
3610 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3611 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3612 atomic_dec(&conf->reshape_stripes);
3613 wake_up(&conf->wait_for_overlap);
3614 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3615 }
3616
3617 if (s.expanding && s.locked == 0 &&
3618 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3619 handle_stripe_expansion(conf, sh);
3620
3621 finish:
3622 /* wait for this device to become unblocked */
3623 if (unlikely(s.blocked_rdev)) {
3624 if (conf->mddev->external)
3625 md_wait_for_blocked_rdev(s.blocked_rdev,
3626 conf->mddev);
3627 else
3628 /* Internal metadata will immediately
3629 * be written by raid5d, so we don't
3630 * need to wait here.
3631 */
3632 rdev_dec_pending(s.blocked_rdev,
3633 conf->mddev);
3634 }
3635
3636 if (s.handle_bad_blocks)
3637 for (i = disks; i--; ) {
3638 struct md_rdev *rdev;
3639 struct r5dev *dev = &sh->dev[i];
3640 if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3641 /* We own a safe reference to the rdev */
3642 rdev = conf->disks[i].rdev;
3643 if (!rdev_set_badblocks(rdev, sh->sector,
3644 STRIPE_SECTORS, 0))
3645 md_error(conf->mddev, rdev);
3646 rdev_dec_pending(rdev, conf->mddev);
3647 }
3648 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3649 rdev = conf->disks[i].rdev;
3650 rdev_clear_badblocks(rdev, sh->sector,
3651 STRIPE_SECTORS, 0);
3652 rdev_dec_pending(rdev, conf->mddev);
3653 }
3654 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
3655 rdev = conf->disks[i].replacement;
3656 if (!rdev)
3657 /* rdev have been moved down */
3658 rdev = conf->disks[i].rdev;
3659 rdev_clear_badblocks(rdev, sh->sector,
3660 STRIPE_SECTORS, 0);
3661 rdev_dec_pending(rdev, conf->mddev);
3662 }
3663 }
3664
3665 if (s.ops_request)
3666 raid_run_ops(sh, s.ops_request);
3667
3668 ops_run_io(sh, &s);
3669
3670 if (s.dec_preread_active) {
3671 /* We delay this until after ops_run_io so that if make_request
3672 * is waiting on a flush, it won't continue until the writes
3673 * have actually been submitted.
3674 */
3675 atomic_dec(&conf->preread_active_stripes);
3676 if (atomic_read(&conf->preread_active_stripes) <
3677 IO_THRESHOLD)
3678 md_wakeup_thread(conf->mddev->thread);
3679 }
3680
3681 return_io(s.return_bi);
3682
3683 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3684 }
3685
3686 static void raid5_activate_delayed(struct r5conf *conf)
3687 {
3688 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3689 while (!list_empty(&conf->delayed_list)) {
3690 struct list_head *l = conf->delayed_list.next;
3691 struct stripe_head *sh;
3692 sh = list_entry(l, struct stripe_head, lru);
3693 list_del_init(l);
3694 clear_bit(STRIPE_DELAYED, &sh->state);
3695 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3696 atomic_inc(&conf->preread_active_stripes);
3697 list_add_tail(&sh->lru, &conf->hold_list);
3698 }
3699 }
3700 }
3701
3702 static void activate_bit_delay(struct r5conf *conf)
3703 {
3704 /* device_lock is held */
3705 struct list_head head;
3706 list_add(&head, &conf->bitmap_list);
3707 list_del_init(&conf->bitmap_list);
3708 while (!list_empty(&head)) {
3709 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3710 list_del_init(&sh->lru);
3711 atomic_inc(&sh->count);
3712 __release_stripe(conf, sh);
3713 }
3714 }
3715
3716 int md_raid5_congested(struct mddev *mddev, int bits)
3717 {
3718 struct r5conf *conf = mddev->private;
3719
3720 /* No difference between reads and writes. Just check
3721 * how busy the stripe_cache is
3722 */
3723
3724 if (conf->inactive_blocked)
3725 return 1;
3726 if (conf->quiesce)
3727 return 1;
3728 if (list_empty_careful(&conf->inactive_list))
3729 return 1;
3730
3731 return 0;
3732 }
3733 EXPORT_SYMBOL_GPL(md_raid5_congested);
3734
3735 static int raid5_congested(void *data, int bits)
3736 {
3737 struct mddev *mddev = data;
3738
3739 return mddev_congested(mddev, bits) ||
3740 md_raid5_congested(mddev, bits);
3741 }
3742
3743 /* We want read requests to align with chunks where possible,
3744 * but write requests don't need to.
3745 */
3746 static int raid5_mergeable_bvec(struct request_queue *q,
3747 struct bvec_merge_data *bvm,
3748 struct bio_vec *biovec)
3749 {
3750 struct mddev *mddev = q->queuedata;
3751 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3752 int max;
3753 unsigned int chunk_sectors = mddev->chunk_sectors;
3754 unsigned int bio_sectors = bvm->bi_size >> 9;
3755
3756 if ((bvm->bi_rw & 1) == WRITE)
3757 return biovec->bv_len; /* always allow writes to be mergeable */
3758
3759 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3760 chunk_sectors = mddev->new_chunk_sectors;
3761 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3762 if (max < 0) max = 0;
3763 if (max <= biovec->bv_len && bio_sectors == 0)
3764 return biovec->bv_len;
3765 else
3766 return max;
3767 }
3768
3769
3770 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
3771 {
3772 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3773 unsigned int chunk_sectors = mddev->chunk_sectors;
3774 unsigned int bio_sectors = bio->bi_size >> 9;
3775
3776 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3777 chunk_sectors = mddev->new_chunk_sectors;
3778 return chunk_sectors >=
3779 ((sector & (chunk_sectors - 1)) + bio_sectors);
3780 }
3781
3782 /*
3783 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3784 * later sampled by raid5d.
3785 */
3786 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
3787 {
3788 unsigned long flags;
3789
3790 spin_lock_irqsave(&conf->device_lock, flags);
3791
3792 bi->bi_next = conf->retry_read_aligned_list;
3793 conf->retry_read_aligned_list = bi;
3794
3795 spin_unlock_irqrestore(&conf->device_lock, flags);
3796 md_wakeup_thread(conf->mddev->thread);
3797 }
3798
3799
3800 static struct bio *remove_bio_from_retry(struct r5conf *conf)
3801 {
3802 struct bio *bi;
3803
3804 bi = conf->retry_read_aligned;
3805 if (bi) {
3806 conf->retry_read_aligned = NULL;
3807 return bi;
3808 }
3809 bi = conf->retry_read_aligned_list;
3810 if(bi) {
3811 conf->retry_read_aligned_list = bi->bi_next;
3812 bi->bi_next = NULL;
3813 /*
3814 * this sets the active strip count to 1 and the processed
3815 * strip count to zero (upper 8 bits)
3816 */
3817 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
3818 }
3819
3820 return bi;
3821 }
3822
3823
3824 /*
3825 * The "raid5_align_endio" should check if the read succeeded and if it
3826 * did, call bio_endio on the original bio (having bio_put the new bio
3827 * first).
3828 * If the read failed..
3829 */
3830 static void raid5_align_endio(struct bio *bi, int error)
3831 {
3832 struct bio* raid_bi = bi->bi_private;
3833 struct mddev *mddev;
3834 struct r5conf *conf;
3835 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3836 struct md_rdev *rdev;
3837
3838 bio_put(bi);
3839
3840 rdev = (void*)raid_bi->bi_next;
3841 raid_bi->bi_next = NULL;
3842 mddev = rdev->mddev;
3843 conf = mddev->private;
3844
3845 rdev_dec_pending(rdev, conf->mddev);
3846
3847 if (!error && uptodate) {
3848 bio_endio(raid_bi, 0);
3849 if (atomic_dec_and_test(&conf->active_aligned_reads))
3850 wake_up(&conf->wait_for_stripe);
3851 return;
3852 }
3853
3854
3855 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3856
3857 add_bio_to_retry(raid_bi, conf);
3858 }
3859
3860 static int bio_fits_rdev(struct bio *bi)
3861 {
3862 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3863
3864 if ((bi->bi_size>>9) > queue_max_sectors(q))
3865 return 0;
3866 blk_recount_segments(q, bi);
3867 if (bi->bi_phys_segments > queue_max_segments(q))
3868 return 0;
3869
3870 if (q->merge_bvec_fn)
3871 /* it's too hard to apply the merge_bvec_fn at this stage,
3872 * just just give up
3873 */
3874 return 0;
3875
3876 return 1;
3877 }
3878
3879
3880 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3881 {
3882 struct r5conf *conf = mddev->private;
3883 int dd_idx;
3884 struct bio* align_bi;
3885 struct md_rdev *rdev;
3886 sector_t end_sector;
3887
3888 if (!in_chunk_boundary(mddev, raid_bio)) {
3889 pr_debug("chunk_aligned_read : non aligned\n");
3890 return 0;
3891 }
3892 /*
3893 * use bio_clone_mddev to make a copy of the bio
3894 */
3895 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
3896 if (!align_bi)
3897 return 0;
3898 /*
3899 * set bi_end_io to a new function, and set bi_private to the
3900 * original bio.
3901 */
3902 align_bi->bi_end_io = raid5_align_endio;
3903 align_bi->bi_private = raid_bio;
3904 /*
3905 * compute position
3906 */
3907 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3908 0,
3909 &dd_idx, NULL);
3910
3911 end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
3912 rcu_read_lock();
3913 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
3914 if (!rdev || test_bit(Faulty, &rdev->flags) ||
3915 rdev->recovery_offset < end_sector) {
3916 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3917 if (rdev &&
3918 (test_bit(Faulty, &rdev->flags) ||
3919 !(test_bit(In_sync, &rdev->flags) ||
3920 rdev->recovery_offset >= end_sector)))
3921 rdev = NULL;
3922 }
3923 if (rdev) {
3924 sector_t first_bad;
3925 int bad_sectors;
3926
3927 atomic_inc(&rdev->nr_pending);
3928 rcu_read_unlock();
3929 raid_bio->bi_next = (void*)rdev;
3930 align_bi->bi_bdev = rdev->bdev;
3931 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3932
3933 if (!bio_fits_rdev(align_bi) ||
3934 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3935 &first_bad, &bad_sectors)) {
3936 /* too big in some way, or has a known bad block */
3937 bio_put(align_bi);
3938 rdev_dec_pending(rdev, mddev);
3939 return 0;
3940 }
3941
3942 /* No reshape active, so we can trust rdev->data_offset */
3943 align_bi->bi_sector += rdev->data_offset;
3944
3945 spin_lock_irq(&conf->device_lock);
3946 wait_event_lock_irq(conf->wait_for_stripe,
3947 conf->quiesce == 0,
3948 conf->device_lock, /* nothing */);
3949 atomic_inc(&conf->active_aligned_reads);
3950 spin_unlock_irq(&conf->device_lock);
3951
3952 generic_make_request(align_bi);
3953 return 1;
3954 } else {
3955 rcu_read_unlock();
3956 bio_put(align_bi);
3957 return 0;
3958 }
3959 }
3960
3961 /* __get_priority_stripe - get the next stripe to process
3962 *
3963 * Full stripe writes are allowed to pass preread active stripes up until
3964 * the bypass_threshold is exceeded. In general the bypass_count
3965 * increments when the handle_list is handled before the hold_list; however, it
3966 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3967 * stripe with in flight i/o. The bypass_count will be reset when the
3968 * head of the hold_list has changed, i.e. the head was promoted to the
3969 * handle_list.
3970 */
3971 static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
3972 {
3973 struct stripe_head *sh;
3974
3975 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3976 __func__,
3977 list_empty(&conf->handle_list) ? "empty" : "busy",
3978 list_empty(&conf->hold_list) ? "empty" : "busy",
3979 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3980
3981 if (!list_empty(&conf->handle_list)) {
3982 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3983
3984 if (list_empty(&conf->hold_list))
3985 conf->bypass_count = 0;
3986 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3987 if (conf->hold_list.next == conf->last_hold)
3988 conf->bypass_count++;
3989 else {
3990 conf->last_hold = conf->hold_list.next;
3991 conf->bypass_count -= conf->bypass_threshold;
3992 if (conf->bypass_count < 0)
3993 conf->bypass_count = 0;
3994 }
3995 }
3996 } else if (!list_empty(&conf->hold_list) &&
3997 ((conf->bypass_threshold &&
3998 conf->bypass_count > conf->bypass_threshold) ||
3999 atomic_read(&conf->pending_full_writes) == 0)) {
4000 sh = list_entry(conf->hold_list.next,
4001 typeof(*sh), lru);
4002 conf->bypass_count -= conf->bypass_threshold;
4003 if (conf->bypass_count < 0)
4004 conf->bypass_count = 0;
4005 } else
4006 return NULL;
4007
4008 list_del_init(&sh->lru);
4009 atomic_inc(&sh->count);
4010 BUG_ON(atomic_read(&sh->count) != 1);
4011 return sh;
4012 }
4013
4014 struct raid5_plug_cb {
4015 struct blk_plug_cb cb;
4016 struct list_head list;
4017 };
4018
4019 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4020 {
4021 struct raid5_plug_cb *cb = container_of(
4022 blk_cb, struct raid5_plug_cb, cb);
4023 struct stripe_head *sh;
4024 struct mddev *mddev = cb->cb.data;
4025 struct r5conf *conf = mddev->private;
4026
4027 if (cb->list.next && !list_empty(&cb->list)) {
4028 spin_lock_irq(&conf->device_lock);
4029 while (!list_empty(&cb->list)) {
4030 sh = list_first_entry(&cb->list, struct stripe_head, lru);
4031 list_del_init(&sh->lru);
4032 /*
4033 * avoid race release_stripe_plug() sees
4034 * STRIPE_ON_UNPLUG_LIST clear but the stripe
4035 * is still in our list
4036 */
4037 smp_mb__before_clear_bit();
4038 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
4039 __release_stripe(conf, sh);
4040 }
4041 spin_unlock_irq(&conf->device_lock);
4042 }
4043 kfree(cb);
4044 }
4045
4046 static void release_stripe_plug(struct mddev *mddev,
4047 struct stripe_head *sh)
4048 {
4049 struct blk_plug_cb *blk_cb = blk_check_plugged(
4050 raid5_unplug, mddev,
4051 sizeof(struct raid5_plug_cb));
4052 struct raid5_plug_cb *cb;
4053
4054 if (!blk_cb) {
4055 release_stripe(sh);
4056 return;
4057 }
4058
4059 cb = container_of(blk_cb, struct raid5_plug_cb, cb);
4060
4061 if (cb->list.next == NULL)
4062 INIT_LIST_HEAD(&cb->list);
4063
4064 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
4065 list_add_tail(&sh->lru, &cb->list);
4066 else
4067 release_stripe(sh);
4068 }
4069
4070 static void make_request(struct mddev *mddev, struct bio * bi)
4071 {
4072 struct r5conf *conf = mddev->private;
4073 int dd_idx;
4074 sector_t new_sector;
4075 sector_t logical_sector, last_sector;
4076 struct stripe_head *sh;
4077 const int rw = bio_data_dir(bi);
4078 int remaining;
4079
4080 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
4081 md_flush_request(mddev, bi);
4082 return;
4083 }
4084
4085 md_write_start(mddev, bi);
4086
4087 if (rw == READ &&
4088 mddev->reshape_position == MaxSector &&
4089 chunk_aligned_read(mddev,bi))
4090 return;
4091
4092 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4093 last_sector = bi->bi_sector + (bi->bi_size>>9);
4094 bi->bi_next = NULL;
4095 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
4096
4097 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
4098 DEFINE_WAIT(w);
4099 int previous;
4100
4101 retry:
4102 previous = 0;
4103 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
4104 if (unlikely(conf->reshape_progress != MaxSector)) {
4105 /* spinlock is needed as reshape_progress may be
4106 * 64bit on a 32bit platform, and so it might be
4107 * possible to see a half-updated value
4108 * Of course reshape_progress could change after
4109 * the lock is dropped, so once we get a reference
4110 * to the stripe that we think it is, we will have
4111 * to check again.
4112 */
4113 spin_lock_irq(&conf->device_lock);
4114 if (mddev->reshape_backwards
4115 ? logical_sector < conf->reshape_progress
4116 : logical_sector >= conf->reshape_progress) {
4117 previous = 1;
4118 } else {
4119 if (mddev->reshape_backwards
4120 ? logical_sector < conf->reshape_safe
4121 : logical_sector >= conf->reshape_safe) {
4122 spin_unlock_irq(&conf->device_lock);
4123 schedule();
4124 goto retry;
4125 }
4126 }
4127 spin_unlock_irq(&conf->device_lock);
4128 }
4129
4130 new_sector = raid5_compute_sector(conf, logical_sector,
4131 previous,
4132 &dd_idx, NULL);
4133 pr_debug("raid456: make_request, sector %llu logical %llu\n",
4134 (unsigned long long)new_sector,
4135 (unsigned long long)logical_sector);
4136
4137 sh = get_active_stripe(conf, new_sector, previous,
4138 (bi->bi_rw&RWA_MASK), 0);
4139 if (sh) {
4140 if (unlikely(previous)) {
4141 /* expansion might have moved on while waiting for a
4142 * stripe, so we must do the range check again.
4143 * Expansion could still move past after this
4144 * test, but as we are holding a reference to
4145 * 'sh', we know that if that happens,
4146 * STRIPE_EXPANDING will get set and the expansion
4147 * won't proceed until we finish with the stripe.
4148 */
4149 int must_retry = 0;
4150 spin_lock_irq(&conf->device_lock);
4151 if (mddev->reshape_backwards
4152 ? logical_sector >= conf->reshape_progress
4153 : logical_sector < conf->reshape_progress)
4154 /* mismatch, need to try again */
4155 must_retry = 1;
4156 spin_unlock_irq(&conf->device_lock);
4157 if (must_retry) {
4158 release_stripe(sh);
4159 schedule();
4160 goto retry;
4161 }
4162 }
4163
4164 if (rw == WRITE &&
4165 logical_sector >= mddev->suspend_lo &&
4166 logical_sector < mddev->suspend_hi) {
4167 release_stripe(sh);
4168 /* As the suspend_* range is controlled by
4169 * userspace, we want an interruptible
4170 * wait.
4171 */
4172 flush_signals(current);
4173 prepare_to_wait(&conf->wait_for_overlap,
4174 &w, TASK_INTERRUPTIBLE);
4175 if (logical_sector >= mddev->suspend_lo &&
4176 logical_sector < mddev->suspend_hi)
4177 schedule();
4178 goto retry;
4179 }
4180
4181 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
4182 !add_stripe_bio(sh, bi, dd_idx, rw)) {
4183 /* Stripe is busy expanding or
4184 * add failed due to overlap. Flush everything
4185 * and wait a while
4186 */
4187 md_wakeup_thread(mddev->thread);
4188 release_stripe(sh);
4189 schedule();
4190 goto retry;
4191 }
4192 finish_wait(&conf->wait_for_overlap, &w);
4193 set_bit(STRIPE_HANDLE, &sh->state);
4194 clear_bit(STRIPE_DELAYED, &sh->state);
4195 if ((bi->bi_rw & REQ_NOIDLE) &&
4196 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4197 atomic_inc(&conf->preread_active_stripes);
4198 release_stripe_plug(mddev, sh);
4199 } else {
4200 /* cannot get stripe for read-ahead, just give-up */
4201 clear_bit(BIO_UPTODATE, &bi->bi_flags);
4202 finish_wait(&conf->wait_for_overlap, &w);
4203 break;
4204 }
4205 }
4206
4207 remaining = raid5_dec_bi_active_stripes(bi);
4208 if (remaining == 0) {
4209
4210 if ( rw == WRITE )
4211 md_write_end(mddev);
4212
4213 bio_endio(bi, 0);
4214 }
4215 }
4216
4217 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
4218
4219 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
4220 {
4221 /* reshaping is quite different to recovery/resync so it is
4222 * handled quite separately ... here.
4223 *
4224 * On each call to sync_request, we gather one chunk worth of
4225 * destination stripes and flag them as expanding.
4226 * Then we find all the source stripes and request reads.
4227 * As the reads complete, handle_stripe will copy the data
4228 * into the destination stripe and release that stripe.
4229 */
4230 struct r5conf *conf = mddev->private;
4231 struct stripe_head *sh;
4232 sector_t first_sector, last_sector;
4233 int raid_disks = conf->previous_raid_disks;
4234 int data_disks = raid_disks - conf->max_degraded;
4235 int new_data_disks = conf->raid_disks - conf->max_degraded;
4236 int i;
4237 int dd_idx;
4238 sector_t writepos, readpos, safepos;
4239 sector_t stripe_addr;
4240 int reshape_sectors;
4241 struct list_head stripes;
4242
4243 if (sector_nr == 0) {
4244 /* If restarting in the middle, skip the initial sectors */
4245 if (mddev->reshape_backwards &&
4246 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
4247 sector_nr = raid5_size(mddev, 0, 0)
4248 - conf->reshape_progress;
4249 } else if (!mddev->reshape_backwards &&
4250 conf->reshape_progress > 0)
4251 sector_nr = conf->reshape_progress;
4252 sector_div(sector_nr, new_data_disks);
4253 if (sector_nr) {
4254 mddev->curr_resync_completed = sector_nr;
4255 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4256 *skipped = 1;
4257 return sector_nr;
4258 }
4259 }
4260
4261 /* We need to process a full chunk at a time.
4262 * If old and new chunk sizes differ, we need to process the
4263 * largest of these
4264 */
4265 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4266 reshape_sectors = mddev->new_chunk_sectors;
4267 else
4268 reshape_sectors = mddev->chunk_sectors;
4269
4270 /* We update the metadata at least every 10 seconds, or when
4271 * the data about to be copied would over-write the source of
4272 * the data at the front of the range. i.e. one new_stripe
4273 * along from reshape_progress new_maps to after where
4274 * reshape_safe old_maps to
4275 */
4276 writepos = conf->reshape_progress;
4277 sector_div(writepos, new_data_disks);
4278 readpos = conf->reshape_progress;
4279 sector_div(readpos, data_disks);
4280 safepos = conf->reshape_safe;
4281 sector_div(safepos, data_disks);
4282 if (mddev->reshape_backwards) {
4283 writepos -= min_t(sector_t, reshape_sectors, writepos);
4284 readpos += reshape_sectors;
4285 safepos += reshape_sectors;
4286 } else {
4287 writepos += reshape_sectors;
4288 readpos -= min_t(sector_t, reshape_sectors, readpos);
4289 safepos -= min_t(sector_t, reshape_sectors, safepos);
4290 }
4291
4292 /* Having calculated the 'writepos' possibly use it
4293 * to set 'stripe_addr' which is where we will write to.
4294 */
4295 if (mddev->reshape_backwards) {
4296 BUG_ON(conf->reshape_progress == 0);
4297 stripe_addr = writepos;
4298 BUG_ON((mddev->dev_sectors &
4299 ~((sector_t)reshape_sectors - 1))
4300 - reshape_sectors - stripe_addr
4301 != sector_nr);
4302 } else {
4303 BUG_ON(writepos != sector_nr + reshape_sectors);
4304 stripe_addr = sector_nr;
4305 }
4306
4307 /* 'writepos' is the most advanced device address we might write.
4308 * 'readpos' is the least advanced device address we might read.
4309 * 'safepos' is the least address recorded in the metadata as having
4310 * been reshaped.
4311 * If there is a min_offset_diff, these are adjusted either by
4312 * increasing the safepos/readpos if diff is negative, or
4313 * increasing writepos if diff is positive.
4314 * If 'readpos' is then behind 'writepos', there is no way that we can
4315 * ensure safety in the face of a crash - that must be done by userspace
4316 * making a backup of the data. So in that case there is no particular
4317 * rush to update metadata.
4318 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4319 * update the metadata to advance 'safepos' to match 'readpos' so that
4320 * we can be safe in the event of a crash.
4321 * So we insist on updating metadata if safepos is behind writepos and
4322 * readpos is beyond writepos.
4323 * In any case, update the metadata every 10 seconds.
4324 * Maybe that number should be configurable, but I'm not sure it is
4325 * worth it.... maybe it could be a multiple of safemode_delay???
4326 */
4327 if (conf->min_offset_diff < 0) {
4328 safepos += -conf->min_offset_diff;
4329 readpos += -conf->min_offset_diff;
4330 } else
4331 writepos += conf->min_offset_diff;
4332
4333 if ((mddev->reshape_backwards
4334 ? (safepos > writepos && readpos < writepos)
4335 : (safepos < writepos && readpos > writepos)) ||
4336 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4337 /* Cannot proceed until we've updated the superblock... */
4338 wait_event(conf->wait_for_overlap,
4339 atomic_read(&conf->reshape_stripes)==0);
4340 mddev->reshape_position = conf->reshape_progress;
4341 mddev->curr_resync_completed = sector_nr;
4342 conf->reshape_checkpoint = jiffies;
4343 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4344 md_wakeup_thread(mddev->thread);
4345 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4346 kthread_should_stop());
4347 spin_lock_irq(&conf->device_lock);
4348 conf->reshape_safe = mddev->reshape_position;
4349 spin_unlock_irq(&conf->device_lock);
4350 wake_up(&conf->wait_for_overlap);
4351 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4352 }
4353
4354 INIT_LIST_HEAD(&stripes);
4355 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
4356 int j;
4357 int skipped_disk = 0;
4358 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
4359 set_bit(STRIPE_EXPANDING, &sh->state);
4360 atomic_inc(&conf->reshape_stripes);
4361 /* If any of this stripe is beyond the end of the old
4362 * array, then we need to zero those blocks
4363 */
4364 for (j=sh->disks; j--;) {
4365 sector_t s;
4366 if (j == sh->pd_idx)
4367 continue;
4368 if (conf->level == 6 &&
4369 j == sh->qd_idx)
4370 continue;
4371 s = compute_blocknr(sh, j, 0);
4372 if (s < raid5_size(mddev, 0, 0)) {
4373 skipped_disk = 1;
4374 continue;
4375 }
4376 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4377 set_bit(R5_Expanded, &sh->dev[j].flags);
4378 set_bit(R5_UPTODATE, &sh->dev[j].flags);
4379 }
4380 if (!skipped_disk) {
4381 set_bit(STRIPE_EXPAND_READY, &sh->state);
4382 set_bit(STRIPE_HANDLE, &sh->state);
4383 }
4384 list_add(&sh->lru, &stripes);
4385 }
4386 spin_lock_irq(&conf->device_lock);
4387 if (mddev->reshape_backwards)
4388 conf->reshape_progress -= reshape_sectors * new_data_disks;
4389 else
4390 conf->reshape_progress += reshape_sectors * new_data_disks;
4391 spin_unlock_irq(&conf->device_lock);
4392 /* Ok, those stripe are ready. We can start scheduling
4393 * reads on the source stripes.
4394 * The source stripes are determined by mapping the first and last
4395 * block on the destination stripes.
4396 */
4397 first_sector =
4398 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4399 1, &dd_idx, NULL);
4400 last_sector =
4401 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4402 * new_data_disks - 1),
4403 1, &dd_idx, NULL);
4404 if (last_sector >= mddev->dev_sectors)
4405 last_sector = mddev->dev_sectors - 1;
4406 while (first_sector <= last_sector) {
4407 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4408 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4409 set_bit(STRIPE_HANDLE, &sh->state);
4410 release_stripe(sh);
4411 first_sector += STRIPE_SECTORS;
4412 }
4413 /* Now that the sources are clearly marked, we can release
4414 * the destination stripes
4415 */
4416 while (!list_empty(&stripes)) {
4417 sh = list_entry(stripes.next, struct stripe_head, lru);
4418 list_del_init(&sh->lru);
4419 release_stripe(sh);
4420 }
4421 /* If this takes us to the resync_max point where we have to pause,
4422 * then we need to write out the superblock.
4423 */
4424 sector_nr += reshape_sectors;
4425 if ((sector_nr - mddev->curr_resync_completed) * 2
4426 >= mddev->resync_max - mddev->curr_resync_completed) {
4427 /* Cannot proceed until we've updated the superblock... */
4428 wait_event(conf->wait_for_overlap,
4429 atomic_read(&conf->reshape_stripes) == 0);
4430 mddev->reshape_position = conf->reshape_progress;
4431 mddev->curr_resync_completed = sector_nr;
4432 conf->reshape_checkpoint = jiffies;
4433 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4434 md_wakeup_thread(mddev->thread);
4435 wait_event(mddev->sb_wait,
4436 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4437 || kthread_should_stop());
4438 spin_lock_irq(&conf->device_lock);
4439 conf->reshape_safe = mddev->reshape_position;
4440 spin_unlock_irq(&conf->device_lock);
4441 wake_up(&conf->wait_for_overlap);
4442 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4443 }
4444 return reshape_sectors;
4445 }
4446
4447 /* FIXME go_faster isn't used */
4448 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
4449 {
4450 struct r5conf *conf = mddev->private;
4451 struct stripe_head *sh;
4452 sector_t max_sector = mddev->dev_sectors;
4453 sector_t sync_blocks;
4454 int still_degraded = 0;
4455 int i;
4456
4457 if (sector_nr >= max_sector) {
4458 /* just being told to finish up .. nothing much to do */
4459
4460 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4461 end_reshape(conf);
4462 return 0;
4463 }
4464
4465 if (mddev->curr_resync < max_sector) /* aborted */
4466 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4467 &sync_blocks, 1);
4468 else /* completed sync */
4469 conf->fullsync = 0;
4470 bitmap_close_sync(mddev->bitmap);
4471
4472 return 0;
4473 }
4474
4475 /* Allow raid5_quiesce to complete */
4476 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4477
4478 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4479 return reshape_request(mddev, sector_nr, skipped);
4480
4481 /* No need to check resync_max as we never do more than one
4482 * stripe, and as resync_max will always be on a chunk boundary,
4483 * if the check in md_do_sync didn't fire, there is no chance
4484 * of overstepping resync_max here
4485 */
4486
4487 /* if there is too many failed drives and we are trying
4488 * to resync, then assert that we are finished, because there is
4489 * nothing we can do.
4490 */
4491 if (mddev->degraded >= conf->max_degraded &&
4492 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4493 sector_t rv = mddev->dev_sectors - sector_nr;
4494 *skipped = 1;
4495 return rv;
4496 }
4497 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4498 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4499 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4500 /* we can skip this block, and probably more */
4501 sync_blocks /= STRIPE_SECTORS;
4502 *skipped = 1;
4503 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4504 }
4505
4506 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4507
4508 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4509 if (sh == NULL) {
4510 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4511 /* make sure we don't swamp the stripe cache if someone else
4512 * is trying to get access
4513 */
4514 schedule_timeout_uninterruptible(1);
4515 }
4516 /* Need to check if array will still be degraded after recovery/resync
4517 * We don't need to check the 'failed' flag as when that gets set,
4518 * recovery aborts.
4519 */
4520 for (i = 0; i < conf->raid_disks; i++)
4521 if (conf->disks[i].rdev == NULL)
4522 still_degraded = 1;
4523
4524 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4525
4526 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
4527
4528 handle_stripe(sh);
4529 release_stripe(sh);
4530
4531 return STRIPE_SECTORS;
4532 }
4533
4534 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4535 {
4536 /* We may not be able to submit a whole bio at once as there
4537 * may not be enough stripe_heads available.
4538 * We cannot pre-allocate enough stripe_heads as we may need
4539 * more than exist in the cache (if we allow ever large chunks).
4540 * So we do one stripe head at a time and record in
4541 * ->bi_hw_segments how many have been done.
4542 *
4543 * We *know* that this entire raid_bio is in one chunk, so
4544 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4545 */
4546 struct stripe_head *sh;
4547 int dd_idx;
4548 sector_t sector, logical_sector, last_sector;
4549 int scnt = 0;
4550 int remaining;
4551 int handled = 0;
4552
4553 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4554 sector = raid5_compute_sector(conf, logical_sector,
4555 0, &dd_idx, NULL);
4556 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4557
4558 for (; logical_sector < last_sector;
4559 logical_sector += STRIPE_SECTORS,
4560 sector += STRIPE_SECTORS,
4561 scnt++) {
4562
4563 if (scnt < raid5_bi_processed_stripes(raid_bio))
4564 /* already done this stripe */
4565 continue;
4566
4567 sh = get_active_stripe(conf, sector, 0, 1, 0);
4568
4569 if (!sh) {
4570 /* failed to get a stripe - must wait */
4571 raid5_set_bi_processed_stripes(raid_bio, scnt);
4572 conf->retry_read_aligned = raid_bio;
4573 return handled;
4574 }
4575
4576 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4577 release_stripe(sh);
4578 raid5_set_bi_processed_stripes(raid_bio, scnt);
4579 conf->retry_read_aligned = raid_bio;
4580 return handled;
4581 }
4582
4583 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
4584 handle_stripe(sh);
4585 release_stripe(sh);
4586 handled++;
4587 }
4588 remaining = raid5_dec_bi_active_stripes(raid_bio);
4589 if (remaining == 0)
4590 bio_endio(raid_bio, 0);
4591 if (atomic_dec_and_test(&conf->active_aligned_reads))
4592 wake_up(&conf->wait_for_stripe);
4593 return handled;
4594 }
4595
4596 #define MAX_STRIPE_BATCH 8
4597 static int handle_active_stripes(struct r5conf *conf)
4598 {
4599 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
4600 int i, batch_size = 0;
4601
4602 while (batch_size < MAX_STRIPE_BATCH &&
4603 (sh = __get_priority_stripe(conf)) != NULL)
4604 batch[batch_size++] = sh;
4605
4606 if (batch_size == 0)
4607 return batch_size;
4608 spin_unlock_irq(&conf->device_lock);
4609
4610 for (i = 0; i < batch_size; i++)
4611 handle_stripe(batch[i]);
4612
4613 cond_resched();
4614
4615 spin_lock_irq(&conf->device_lock);
4616 for (i = 0; i < batch_size; i++)
4617 __release_stripe(conf, batch[i]);
4618 return batch_size;
4619 }
4620
4621 /*
4622 * This is our raid5 kernel thread.
4623 *
4624 * We scan the hash table for stripes which can be handled now.
4625 * During the scan, completed stripes are saved for us by the interrupt
4626 * handler, so that they will not have to wait for our next wakeup.
4627 */
4628 static void raid5d(struct mddev *mddev)
4629 {
4630 struct r5conf *conf = mddev->private;
4631 int handled;
4632 struct blk_plug plug;
4633
4634 pr_debug("+++ raid5d active\n");
4635
4636 md_check_recovery(mddev);
4637
4638 blk_start_plug(&plug);
4639 handled = 0;
4640 spin_lock_irq(&conf->device_lock);
4641 while (1) {
4642 struct bio *bio;
4643 int batch_size;
4644
4645 if (
4646 !list_empty(&conf->bitmap_list)) {
4647 /* Now is a good time to flush some bitmap updates */
4648 conf->seq_flush++;
4649 spin_unlock_irq(&conf->device_lock);
4650 bitmap_unplug(mddev->bitmap);
4651 spin_lock_irq(&conf->device_lock);
4652 conf->seq_write = conf->seq_flush;
4653 activate_bit_delay(conf);
4654 }
4655 raid5_activate_delayed(conf);
4656
4657 while ((bio = remove_bio_from_retry(conf))) {
4658 int ok;
4659 spin_unlock_irq(&conf->device_lock);
4660 ok = retry_aligned_read(conf, bio);
4661 spin_lock_irq(&conf->device_lock);
4662 if (!ok)
4663 break;
4664 handled++;
4665 }
4666
4667 batch_size = handle_active_stripes(conf);
4668 if (!batch_size)
4669 break;
4670 handled += batch_size;
4671
4672 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
4673 spin_unlock_irq(&conf->device_lock);
4674 md_check_recovery(mddev);
4675 spin_lock_irq(&conf->device_lock);
4676 }
4677 }
4678 pr_debug("%d stripes handled\n", handled);
4679
4680 spin_unlock_irq(&conf->device_lock);
4681
4682 async_tx_issue_pending_all();
4683 blk_finish_plug(&plug);
4684
4685 pr_debug("--- raid5d inactive\n");
4686 }
4687
4688 static ssize_t
4689 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
4690 {
4691 struct r5conf *conf = mddev->private;
4692 if (conf)
4693 return sprintf(page, "%d\n", conf->max_nr_stripes);
4694 else
4695 return 0;
4696 }
4697
4698 int
4699 raid5_set_cache_size(struct mddev *mddev, int size)
4700 {
4701 struct r5conf *conf = mddev->private;
4702 int err;
4703
4704 if (size <= 16 || size > 32768)
4705 return -EINVAL;
4706 while (size < conf->max_nr_stripes) {
4707 if (drop_one_stripe(conf))
4708 conf->max_nr_stripes--;
4709 else
4710 break;
4711 }
4712 err = md_allow_write(mddev);
4713 if (err)
4714 return err;
4715 while (size > conf->max_nr_stripes) {
4716 if (grow_one_stripe(conf))
4717 conf->max_nr_stripes++;
4718 else break;
4719 }
4720 return 0;
4721 }
4722 EXPORT_SYMBOL(raid5_set_cache_size);
4723
4724 static ssize_t
4725 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
4726 {
4727 struct r5conf *conf = mddev->private;
4728 unsigned long new;
4729 int err;
4730
4731 if (len >= PAGE_SIZE)
4732 return -EINVAL;
4733 if (!conf)
4734 return -ENODEV;
4735
4736 if (strict_strtoul(page, 10, &new))
4737 return -EINVAL;
4738 err = raid5_set_cache_size(mddev, new);
4739 if (err)
4740 return err;
4741 return len;
4742 }
4743
4744 static struct md_sysfs_entry
4745 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4746 raid5_show_stripe_cache_size,
4747 raid5_store_stripe_cache_size);
4748
4749 static ssize_t
4750 raid5_show_preread_threshold(struct mddev *mddev, char *page)
4751 {
4752 struct r5conf *conf = mddev->private;
4753 if (conf)
4754 return sprintf(page, "%d\n", conf->bypass_threshold);
4755 else
4756 return 0;
4757 }
4758
4759 static ssize_t
4760 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
4761 {
4762 struct r5conf *conf = mddev->private;
4763 unsigned long new;
4764 if (len >= PAGE_SIZE)
4765 return -EINVAL;
4766 if (!conf)
4767 return -ENODEV;
4768
4769 if (strict_strtoul(page, 10, &new))
4770 return -EINVAL;
4771 if (new > conf->max_nr_stripes)
4772 return -EINVAL;
4773 conf->bypass_threshold = new;
4774 return len;
4775 }
4776
4777 static struct md_sysfs_entry
4778 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4779 S_IRUGO | S_IWUSR,
4780 raid5_show_preread_threshold,
4781 raid5_store_preread_threshold);
4782
4783 static ssize_t
4784 stripe_cache_active_show(struct mddev *mddev, char *page)
4785 {
4786 struct r5conf *conf = mddev->private;
4787 if (conf)
4788 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4789 else
4790 return 0;
4791 }
4792
4793 static struct md_sysfs_entry
4794 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4795
4796 static struct attribute *raid5_attrs[] = {
4797 &raid5_stripecache_size.attr,
4798 &raid5_stripecache_active.attr,
4799 &raid5_preread_bypass_threshold.attr,
4800 NULL,
4801 };
4802 static struct attribute_group raid5_attrs_group = {
4803 .name = NULL,
4804 .attrs = raid5_attrs,
4805 };
4806
4807 static sector_t
4808 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
4809 {
4810 struct r5conf *conf = mddev->private;
4811
4812 if (!sectors)
4813 sectors = mddev->dev_sectors;
4814 if (!raid_disks)
4815 /* size is defined by the smallest of previous and new size */
4816 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4817
4818 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4819 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4820 return sectors * (raid_disks - conf->max_degraded);
4821 }
4822
4823 static void raid5_free_percpu(struct r5conf *conf)
4824 {
4825 struct raid5_percpu *percpu;
4826 unsigned long cpu;
4827
4828 if (!conf->percpu)
4829 return;
4830
4831 get_online_cpus();
4832 for_each_possible_cpu(cpu) {
4833 percpu = per_cpu_ptr(conf->percpu, cpu);
4834 safe_put_page(percpu->spare_page);
4835 kfree(percpu->scribble);
4836 }
4837 #ifdef CONFIG_HOTPLUG_CPU
4838 unregister_cpu_notifier(&conf->cpu_notify);
4839 #endif
4840 put_online_cpus();
4841
4842 free_percpu(conf->percpu);
4843 }
4844
4845 static void free_conf(struct r5conf *conf)
4846 {
4847 shrink_stripes(conf);
4848 raid5_free_percpu(conf);
4849 kfree(conf->disks);
4850 kfree(conf->stripe_hashtbl);
4851 kfree(conf);
4852 }
4853
4854 #ifdef CONFIG_HOTPLUG_CPU
4855 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4856 void *hcpu)
4857 {
4858 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
4859 long cpu = (long)hcpu;
4860 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4861
4862 switch (action) {
4863 case CPU_UP_PREPARE:
4864 case CPU_UP_PREPARE_FROZEN:
4865 if (conf->level == 6 && !percpu->spare_page)
4866 percpu->spare_page = alloc_page(GFP_KERNEL);
4867 if (!percpu->scribble)
4868 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4869
4870 if (!percpu->scribble ||
4871 (conf->level == 6 && !percpu->spare_page)) {
4872 safe_put_page(percpu->spare_page);
4873 kfree(percpu->scribble);
4874 pr_err("%s: failed memory allocation for cpu%ld\n",
4875 __func__, cpu);
4876 return notifier_from_errno(-ENOMEM);
4877 }
4878 break;
4879 case CPU_DEAD:
4880 case CPU_DEAD_FROZEN:
4881 safe_put_page(percpu->spare_page);
4882 kfree(percpu->scribble);
4883 percpu->spare_page = NULL;
4884 percpu->scribble = NULL;
4885 break;
4886 default:
4887 break;
4888 }
4889 return NOTIFY_OK;
4890 }
4891 #endif
4892
4893 static int raid5_alloc_percpu(struct r5conf *conf)
4894 {
4895 unsigned long cpu;
4896 struct page *spare_page;
4897 struct raid5_percpu __percpu *allcpus;
4898 void *scribble;
4899 int err;
4900
4901 allcpus = alloc_percpu(struct raid5_percpu);
4902 if (!allcpus)
4903 return -ENOMEM;
4904 conf->percpu = allcpus;
4905
4906 get_online_cpus();
4907 err = 0;
4908 for_each_present_cpu(cpu) {
4909 if (conf->level == 6) {
4910 spare_page = alloc_page(GFP_KERNEL);
4911 if (!spare_page) {
4912 err = -ENOMEM;
4913 break;
4914 }
4915 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4916 }
4917 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4918 if (!scribble) {
4919 err = -ENOMEM;
4920 break;
4921 }
4922 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4923 }
4924 #ifdef CONFIG_HOTPLUG_CPU
4925 conf->cpu_notify.notifier_call = raid456_cpu_notify;
4926 conf->cpu_notify.priority = 0;
4927 if (err == 0)
4928 err = register_cpu_notifier(&conf->cpu_notify);
4929 #endif
4930 put_online_cpus();
4931
4932 return err;
4933 }
4934
4935 static struct r5conf *setup_conf(struct mddev *mddev)
4936 {
4937 struct r5conf *conf;
4938 int raid_disk, memory, max_disks;
4939 struct md_rdev *rdev;
4940 struct disk_info *disk;
4941 char pers_name[6];
4942
4943 if (mddev->new_level != 5
4944 && mddev->new_level != 4
4945 && mddev->new_level != 6) {
4946 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4947 mdname(mddev), mddev->new_level);
4948 return ERR_PTR(-EIO);
4949 }
4950 if ((mddev->new_level == 5
4951 && !algorithm_valid_raid5(mddev->new_layout)) ||
4952 (mddev->new_level == 6
4953 && !algorithm_valid_raid6(mddev->new_layout))) {
4954 printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4955 mdname(mddev), mddev->new_layout);
4956 return ERR_PTR(-EIO);
4957 }
4958 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4959 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4960 mdname(mddev), mddev->raid_disks);
4961 return ERR_PTR(-EINVAL);
4962 }
4963
4964 if (!mddev->new_chunk_sectors ||
4965 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4966 !is_power_of_2(mddev->new_chunk_sectors)) {
4967 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4968 mdname(mddev), mddev->new_chunk_sectors << 9);
4969 return ERR_PTR(-EINVAL);
4970 }
4971
4972 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
4973 if (conf == NULL)
4974 goto abort;
4975 spin_lock_init(&conf->device_lock);
4976 init_waitqueue_head(&conf->wait_for_stripe);
4977 init_waitqueue_head(&conf->wait_for_overlap);
4978 INIT_LIST_HEAD(&conf->handle_list);
4979 INIT_LIST_HEAD(&conf->hold_list);
4980 INIT_LIST_HEAD(&conf->delayed_list);
4981 INIT_LIST_HEAD(&conf->bitmap_list);
4982 INIT_LIST_HEAD(&conf->inactive_list);
4983 atomic_set(&conf->active_stripes, 0);
4984 atomic_set(&conf->preread_active_stripes, 0);
4985 atomic_set(&conf->active_aligned_reads, 0);
4986 conf->bypass_threshold = BYPASS_THRESHOLD;
4987 conf->recovery_disabled = mddev->recovery_disabled - 1;
4988
4989 conf->raid_disks = mddev->raid_disks;
4990 if (mddev->reshape_position == MaxSector)
4991 conf->previous_raid_disks = mddev->raid_disks;
4992 else
4993 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4994 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4995 conf->scribble_len = scribble_len(max_disks);
4996
4997 conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4998 GFP_KERNEL);
4999 if (!conf->disks)
5000 goto abort;
5001
5002 conf->mddev = mddev;
5003
5004 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
5005 goto abort;
5006
5007 conf->level = mddev->new_level;
5008 if (raid5_alloc_percpu(conf) != 0)
5009 goto abort;
5010
5011 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
5012
5013 rdev_for_each(rdev, mddev) {
5014 raid_disk = rdev->raid_disk;
5015 if (raid_disk >= max_disks
5016 || raid_disk < 0)
5017 continue;
5018 disk = conf->disks + raid_disk;
5019
5020 if (test_bit(Replacement, &rdev->flags)) {
5021 if (disk->replacement)
5022 goto abort;
5023 disk->replacement = rdev;
5024 } else {
5025 if (disk->rdev)
5026 goto abort;
5027 disk->rdev = rdev;
5028 }
5029
5030 if (test_bit(In_sync, &rdev->flags)) {
5031 char b[BDEVNAME_SIZE];
5032 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
5033 " disk %d\n",
5034 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
5035 } else if (rdev->saved_raid_disk != raid_disk)
5036 /* Cannot rely on bitmap to complete recovery */
5037 conf->fullsync = 1;
5038 }
5039
5040 conf->chunk_sectors = mddev->new_chunk_sectors;
5041 conf->level = mddev->new_level;
5042 if (conf->level == 6)
5043 conf->max_degraded = 2;
5044 else
5045 conf->max_degraded = 1;
5046 conf->algorithm = mddev->new_layout;
5047 conf->max_nr_stripes = NR_STRIPES;
5048 conf->reshape_progress = mddev->reshape_position;
5049 if (conf->reshape_progress != MaxSector) {
5050 conf->prev_chunk_sectors = mddev->chunk_sectors;
5051 conf->prev_algo = mddev->layout;
5052 }
5053
5054 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
5055 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
5056 if (grow_stripes(conf, conf->max_nr_stripes)) {
5057 printk(KERN_ERR
5058 "md/raid:%s: couldn't allocate %dkB for buffers\n",
5059 mdname(mddev), memory);
5060 goto abort;
5061 } else
5062 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
5063 mdname(mddev), memory);
5064
5065 sprintf(pers_name, "raid%d", mddev->new_level);
5066 conf->thread = md_register_thread(raid5d, mddev, pers_name);
5067 if (!conf->thread) {
5068 printk(KERN_ERR
5069 "md/raid:%s: couldn't allocate thread.\n",
5070 mdname(mddev));
5071 goto abort;
5072 }
5073
5074 return conf;
5075
5076 abort:
5077 if (conf) {
5078 free_conf(conf);
5079 return ERR_PTR(-EIO);
5080 } else
5081 return ERR_PTR(-ENOMEM);
5082 }
5083
5084
5085 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
5086 {
5087 switch (algo) {
5088 case ALGORITHM_PARITY_0:
5089 if (raid_disk < max_degraded)
5090 return 1;
5091 break;
5092 case ALGORITHM_PARITY_N:
5093 if (raid_disk >= raid_disks - max_degraded)
5094 return 1;
5095 break;
5096 case ALGORITHM_PARITY_0_6:
5097 if (raid_disk == 0 ||
5098 raid_disk == raid_disks - 1)
5099 return 1;
5100 break;
5101 case ALGORITHM_LEFT_ASYMMETRIC_6:
5102 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5103 case ALGORITHM_LEFT_SYMMETRIC_6:
5104 case ALGORITHM_RIGHT_SYMMETRIC_6:
5105 if (raid_disk == raid_disks - 1)
5106 return 1;
5107 }
5108 return 0;
5109 }
5110
5111 static int run(struct mddev *mddev)
5112 {
5113 struct r5conf *conf;
5114 int working_disks = 0;
5115 int dirty_parity_disks = 0;
5116 struct md_rdev *rdev;
5117 sector_t reshape_offset = 0;
5118 int i;
5119 long long min_offset_diff = 0;
5120 int first = 1;
5121
5122 if (mddev->recovery_cp != MaxSector)
5123 printk(KERN_NOTICE "md/raid:%s: not clean"
5124 " -- starting background reconstruction\n",
5125 mdname(mddev));
5126
5127 rdev_for_each(rdev, mddev) {
5128 long long diff;
5129 if (rdev->raid_disk < 0)
5130 continue;
5131 diff = (rdev->new_data_offset - rdev->data_offset);
5132 if (first) {
5133 min_offset_diff = diff;
5134 first = 0;
5135 } else if (mddev->reshape_backwards &&
5136 diff < min_offset_diff)
5137 min_offset_diff = diff;
5138 else if (!mddev->reshape_backwards &&
5139 diff > min_offset_diff)
5140 min_offset_diff = diff;
5141 }
5142
5143 if (mddev->reshape_position != MaxSector) {
5144 /* Check that we can continue the reshape.
5145 * Difficulties arise if the stripe we would write to
5146 * next is at or after the stripe we would read from next.
5147 * For a reshape that changes the number of devices, this
5148 * is only possible for a very short time, and mdadm makes
5149 * sure that time appears to have past before assembling
5150 * the array. So we fail if that time hasn't passed.
5151 * For a reshape that keeps the number of devices the same
5152 * mdadm must be monitoring the reshape can keeping the
5153 * critical areas read-only and backed up. It will start
5154 * the array in read-only mode, so we check for that.
5155 */
5156 sector_t here_new, here_old;
5157 int old_disks;
5158 int max_degraded = (mddev->level == 6 ? 2 : 1);
5159
5160 if (mddev->new_level != mddev->level) {
5161 printk(KERN_ERR "md/raid:%s: unsupported reshape "
5162 "required - aborting.\n",
5163 mdname(mddev));
5164 return -EINVAL;
5165 }
5166 old_disks = mddev->raid_disks - mddev->delta_disks;
5167 /* reshape_position must be on a new-stripe boundary, and one
5168 * further up in new geometry must map after here in old
5169 * geometry.
5170 */
5171 here_new = mddev->reshape_position;
5172 if (sector_div(here_new, mddev->new_chunk_sectors *
5173 (mddev->raid_disks - max_degraded))) {
5174 printk(KERN_ERR "md/raid:%s: reshape_position not "
5175 "on a stripe boundary\n", mdname(mddev));
5176 return -EINVAL;
5177 }
5178 reshape_offset = here_new * mddev->new_chunk_sectors;
5179 /* here_new is the stripe we will write to */
5180 here_old = mddev->reshape_position;
5181 sector_div(here_old, mddev->chunk_sectors *
5182 (old_disks-max_degraded));
5183 /* here_old is the first stripe that we might need to read
5184 * from */
5185 if (mddev->delta_disks == 0) {
5186 if ((here_new * mddev->new_chunk_sectors !=
5187 here_old * mddev->chunk_sectors)) {
5188 printk(KERN_ERR "md/raid:%s: reshape position is"
5189 " confused - aborting\n", mdname(mddev));
5190 return -EINVAL;
5191 }
5192 /* We cannot be sure it is safe to start an in-place
5193 * reshape. It is only safe if user-space is monitoring
5194 * and taking constant backups.
5195 * mdadm always starts a situation like this in
5196 * readonly mode so it can take control before
5197 * allowing any writes. So just check for that.
5198 */
5199 if (abs(min_offset_diff) >= mddev->chunk_sectors &&
5200 abs(min_offset_diff) >= mddev->new_chunk_sectors)
5201 /* not really in-place - so OK */;
5202 else if (mddev->ro == 0) {
5203 printk(KERN_ERR "md/raid:%s: in-place reshape "
5204 "must be started in read-only mode "
5205 "- aborting\n",
5206 mdname(mddev));
5207 return -EINVAL;
5208 }
5209 } else if (mddev->reshape_backwards
5210 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
5211 here_old * mddev->chunk_sectors)
5212 : (here_new * mddev->new_chunk_sectors >=
5213 here_old * mddev->chunk_sectors + (-min_offset_diff))) {
5214 /* Reading from the same stripe as writing to - bad */
5215 printk(KERN_ERR "md/raid:%s: reshape_position too early for "
5216 "auto-recovery - aborting.\n",
5217 mdname(mddev));
5218 return -EINVAL;
5219 }
5220 printk(KERN_INFO "md/raid:%s: reshape will continue\n",
5221 mdname(mddev));
5222 /* OK, we should be able to continue; */
5223 } else {
5224 BUG_ON(mddev->level != mddev->new_level);
5225 BUG_ON(mddev->layout != mddev->new_layout);
5226 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
5227 BUG_ON(mddev->delta_disks != 0);
5228 }
5229
5230 if (mddev->private == NULL)
5231 conf = setup_conf(mddev);
5232 else
5233 conf = mddev->private;
5234
5235 if (IS_ERR(conf))
5236 return PTR_ERR(conf);
5237
5238 conf->min_offset_diff = min_offset_diff;
5239 mddev->thread = conf->thread;
5240 conf->thread = NULL;
5241 mddev->private = conf;
5242
5243 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
5244 i++) {
5245 rdev = conf->disks[i].rdev;
5246 if (!rdev && conf->disks[i].replacement) {
5247 /* The replacement is all we have yet */
5248 rdev = conf->disks[i].replacement;
5249 conf->disks[i].replacement = NULL;
5250 clear_bit(Replacement, &rdev->flags);
5251 conf->disks[i].rdev = rdev;
5252 }
5253 if (!rdev)
5254 continue;
5255 if (conf->disks[i].replacement &&
5256 conf->reshape_progress != MaxSector) {
5257 /* replacements and reshape simply do not mix. */
5258 printk(KERN_ERR "md: cannot handle concurrent "
5259 "replacement and reshape.\n");
5260 goto abort;
5261 }
5262 if (test_bit(In_sync, &rdev->flags)) {
5263 working_disks++;
5264 continue;
5265 }
5266 /* This disc is not fully in-sync. However if it
5267 * just stored parity (beyond the recovery_offset),
5268 * when we don't need to be concerned about the
5269 * array being dirty.
5270 * When reshape goes 'backwards', we never have
5271 * partially completed devices, so we only need
5272 * to worry about reshape going forwards.
5273 */
5274 /* Hack because v0.91 doesn't store recovery_offset properly. */
5275 if (mddev->major_version == 0 &&
5276 mddev->minor_version > 90)
5277 rdev->recovery_offset = reshape_offset;
5278
5279 if (rdev->recovery_offset < reshape_offset) {
5280 /* We need to check old and new layout */
5281 if (!only_parity(rdev->raid_disk,
5282 conf->algorithm,
5283 conf->raid_disks,
5284 conf->max_degraded))
5285 continue;
5286 }
5287 if (!only_parity(rdev->raid_disk,
5288 conf->prev_algo,
5289 conf->previous_raid_disks,
5290 conf->max_degraded))
5291 continue;
5292 dirty_parity_disks++;
5293 }
5294
5295 /*
5296 * 0 for a fully functional array, 1 or 2 for a degraded array.
5297 */
5298 mddev->degraded = calc_degraded(conf);
5299
5300 if (has_failed(conf)) {
5301 printk(KERN_ERR "md/raid:%s: not enough operational devices"
5302 " (%d/%d failed)\n",
5303 mdname(mddev), mddev->degraded, conf->raid_disks);
5304 goto abort;
5305 }
5306
5307 /* device size must be a multiple of chunk size */
5308 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
5309 mddev->resync_max_sectors = mddev->dev_sectors;
5310
5311 if (mddev->degraded > dirty_parity_disks &&
5312 mddev->recovery_cp != MaxSector) {
5313 if (mddev->ok_start_degraded)
5314 printk(KERN_WARNING
5315 "md/raid:%s: starting dirty degraded array"
5316 " - data corruption possible.\n",
5317 mdname(mddev));
5318 else {
5319 printk(KERN_ERR
5320 "md/raid:%s: cannot start dirty degraded array.\n",
5321 mdname(mddev));
5322 goto abort;
5323 }
5324 }
5325
5326 if (mddev->degraded == 0)
5327 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
5328 " devices, algorithm %d\n", mdname(mddev), conf->level,
5329 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
5330 mddev->new_layout);
5331 else
5332 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
5333 " out of %d devices, algorithm %d\n",
5334 mdname(mddev), conf->level,
5335 mddev->raid_disks - mddev->degraded,
5336 mddev->raid_disks, mddev->new_layout);
5337
5338 print_raid5_conf(conf);
5339
5340 if (conf->reshape_progress != MaxSector) {
5341 conf->reshape_safe = conf->reshape_progress;
5342 atomic_set(&conf->reshape_stripes, 0);
5343 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5344 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5345 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5346 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5347 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5348 "reshape");
5349 }
5350
5351
5352 /* Ok, everything is just fine now */
5353 if (mddev->to_remove == &raid5_attrs_group)
5354 mddev->to_remove = NULL;
5355 else if (mddev->kobj.sd &&
5356 sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
5357 printk(KERN_WARNING
5358 "raid5: failed to create sysfs attributes for %s\n",
5359 mdname(mddev));
5360 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5361
5362 if (mddev->queue) {
5363 int chunk_size;
5364 /* read-ahead size must cover two whole stripes, which
5365 * is 2 * (datadisks) * chunksize where 'n' is the
5366 * number of raid devices
5367 */
5368 int data_disks = conf->previous_raid_disks - conf->max_degraded;
5369 int stripe = data_disks *
5370 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
5371 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5372 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5373
5374 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
5375
5376 mddev->queue->backing_dev_info.congested_data = mddev;
5377 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5378
5379 chunk_size = mddev->chunk_sectors << 9;
5380 blk_queue_io_min(mddev->queue, chunk_size);
5381 blk_queue_io_opt(mddev->queue, chunk_size *
5382 (conf->raid_disks - conf->max_degraded));
5383
5384 rdev_for_each(rdev, mddev) {
5385 disk_stack_limits(mddev->gendisk, rdev->bdev,
5386 rdev->data_offset << 9);
5387 disk_stack_limits(mddev->gendisk, rdev->bdev,
5388 rdev->new_data_offset << 9);
5389 }
5390 }
5391
5392 return 0;
5393 abort:
5394 md_unregister_thread(&mddev->thread);
5395 print_raid5_conf(conf);
5396 free_conf(conf);
5397 mddev->private = NULL;
5398 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
5399 return -EIO;
5400 }
5401
5402 static int stop(struct mddev *mddev)
5403 {
5404 struct r5conf *conf = mddev->private;
5405
5406 md_unregister_thread(&mddev->thread);
5407 if (mddev->queue)
5408 mddev->queue->backing_dev_info.congested_fn = NULL;
5409 free_conf(conf);
5410 mddev->private = NULL;
5411 mddev->to_remove = &raid5_attrs_group;
5412 return 0;
5413 }
5414
5415 static void status(struct seq_file *seq, struct mddev *mddev)
5416 {
5417 struct r5conf *conf = mddev->private;
5418 int i;
5419
5420 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5421 mddev->chunk_sectors / 2, mddev->layout);
5422 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5423 for (i = 0; i < conf->raid_disks; i++)
5424 seq_printf (seq, "%s",
5425 conf->disks[i].rdev &&
5426 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
5427 seq_printf (seq, "]");
5428 }
5429
5430 static void print_raid5_conf (struct r5conf *conf)
5431 {
5432 int i;
5433 struct disk_info *tmp;
5434
5435 printk(KERN_DEBUG "RAID conf printout:\n");
5436 if (!conf) {
5437 printk("(conf==NULL)\n");
5438 return;
5439 }
5440 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
5441 conf->raid_disks,
5442 conf->raid_disks - conf->mddev->degraded);
5443
5444 for (i = 0; i < conf->raid_disks; i++) {
5445 char b[BDEVNAME_SIZE];
5446 tmp = conf->disks + i;
5447 if (tmp->rdev)
5448 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
5449 i, !test_bit(Faulty, &tmp->rdev->flags),
5450 bdevname(tmp->rdev->bdev, b));
5451 }
5452 }
5453
5454 static int raid5_spare_active(struct mddev *mddev)
5455 {
5456 int i;
5457 struct r5conf *conf = mddev->private;
5458 struct disk_info *tmp;
5459 int count = 0;
5460 unsigned long flags;
5461
5462 for (i = 0; i < conf->raid_disks; i++) {
5463 tmp = conf->disks + i;
5464 if (tmp->replacement
5465 && tmp->replacement->recovery_offset == MaxSector
5466 && !test_bit(Faulty, &tmp->replacement->flags)
5467 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
5468 /* Replacement has just become active. */
5469 if (!tmp->rdev
5470 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
5471 count++;
5472 if (tmp->rdev) {
5473 /* Replaced device not technically faulty,
5474 * but we need to be sure it gets removed
5475 * and never re-added.
5476 */
5477 set_bit(Faulty, &tmp->rdev->flags);
5478 sysfs_notify_dirent_safe(
5479 tmp->rdev->sysfs_state);
5480 }
5481 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
5482 } else if (tmp->rdev
5483 && tmp->rdev->recovery_offset == MaxSector
5484 && !test_bit(Faulty, &tmp->rdev->flags)
5485 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5486 count++;
5487 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
5488 }
5489 }
5490 spin_lock_irqsave(&conf->device_lock, flags);
5491 mddev->degraded = calc_degraded(conf);
5492 spin_unlock_irqrestore(&conf->device_lock, flags);
5493 print_raid5_conf(conf);
5494 return count;
5495 }
5496
5497 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
5498 {
5499 struct r5conf *conf = mddev->private;
5500 int err = 0;
5501 int number = rdev->raid_disk;
5502 struct md_rdev **rdevp;
5503 struct disk_info *p = conf->disks + number;
5504
5505 print_raid5_conf(conf);
5506 if (rdev == p->rdev)
5507 rdevp = &p->rdev;
5508 else if (rdev == p->replacement)
5509 rdevp = &p->replacement;
5510 else
5511 return 0;
5512
5513 if (number >= conf->raid_disks &&
5514 conf->reshape_progress == MaxSector)
5515 clear_bit(In_sync, &rdev->flags);
5516
5517 if (test_bit(In_sync, &rdev->flags) ||
5518 atomic_read(&rdev->nr_pending)) {
5519 err = -EBUSY;
5520 goto abort;
5521 }
5522 /* Only remove non-faulty devices if recovery
5523 * isn't possible.
5524 */
5525 if (!test_bit(Faulty, &rdev->flags) &&
5526 mddev->recovery_disabled != conf->recovery_disabled &&
5527 !has_failed(conf) &&
5528 (!p->replacement || p->replacement == rdev) &&
5529 number < conf->raid_disks) {
5530 err = -EBUSY;
5531 goto abort;
5532 }
5533 *rdevp = NULL;
5534 synchronize_rcu();
5535 if (atomic_read(&rdev->nr_pending)) {
5536 /* lost the race, try later */
5537 err = -EBUSY;
5538 *rdevp = rdev;
5539 } else if (p->replacement) {
5540 /* We must have just cleared 'rdev' */
5541 p->rdev = p->replacement;
5542 clear_bit(Replacement, &p->replacement->flags);
5543 smp_mb(); /* Make sure other CPUs may see both as identical
5544 * but will never see neither - if they are careful
5545 */
5546 p->replacement = NULL;
5547 clear_bit(WantReplacement, &rdev->flags);
5548 } else
5549 /* We might have just removed the Replacement as faulty-
5550 * clear the bit just in case
5551 */
5552 clear_bit(WantReplacement, &rdev->flags);
5553 abort:
5554
5555 print_raid5_conf(conf);
5556 return err;
5557 }
5558
5559 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
5560 {
5561 struct r5conf *conf = mddev->private;
5562 int err = -EEXIST;
5563 int disk;
5564 struct disk_info *p;
5565 int first = 0;
5566 int last = conf->raid_disks - 1;
5567
5568 if (mddev->recovery_disabled == conf->recovery_disabled)
5569 return -EBUSY;
5570
5571 if (rdev->saved_raid_disk < 0 && has_failed(conf))
5572 /* no point adding a device */
5573 return -EINVAL;
5574
5575 if (rdev->raid_disk >= 0)
5576 first = last = rdev->raid_disk;
5577
5578 /*
5579 * find the disk ... but prefer rdev->saved_raid_disk
5580 * if possible.
5581 */
5582 if (rdev->saved_raid_disk >= 0 &&
5583 rdev->saved_raid_disk >= first &&
5584 conf->disks[rdev->saved_raid_disk].rdev == NULL)
5585 first = rdev->saved_raid_disk;
5586
5587 for (disk = first; disk <= last; disk++) {
5588 p = conf->disks + disk;
5589 if (p->rdev == NULL) {
5590 clear_bit(In_sync, &rdev->flags);
5591 rdev->raid_disk = disk;
5592 err = 0;
5593 if (rdev->saved_raid_disk != disk)
5594 conf->fullsync = 1;
5595 rcu_assign_pointer(p->rdev, rdev);
5596 goto out;
5597 }
5598 }
5599 for (disk = first; disk <= last; disk++) {
5600 p = conf->disks + disk;
5601 if (test_bit(WantReplacement, &p->rdev->flags) &&
5602 p->replacement == NULL) {
5603 clear_bit(In_sync, &rdev->flags);
5604 set_bit(Replacement, &rdev->flags);
5605 rdev->raid_disk = disk;
5606 err = 0;
5607 conf->fullsync = 1;
5608 rcu_assign_pointer(p->replacement, rdev);
5609 break;
5610 }
5611 }
5612 out:
5613 print_raid5_conf(conf);
5614 return err;
5615 }
5616
5617 static int raid5_resize(struct mddev *mddev, sector_t sectors)
5618 {
5619 /* no resync is happening, and there is enough space
5620 * on all devices, so we can resize.
5621 * We need to make sure resync covers any new space.
5622 * If the array is shrinking we should possibly wait until
5623 * any io in the removed space completes, but it hardly seems
5624 * worth it.
5625 */
5626 sector_t newsize;
5627 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5628 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
5629 if (mddev->external_size &&
5630 mddev->array_sectors > newsize)
5631 return -EINVAL;
5632 if (mddev->bitmap) {
5633 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
5634 if (ret)
5635 return ret;
5636 }
5637 md_set_array_sectors(mddev, newsize);
5638 set_capacity(mddev->gendisk, mddev->array_sectors);
5639 revalidate_disk(mddev->gendisk);
5640 if (sectors > mddev->dev_sectors &&
5641 mddev->recovery_cp > mddev->dev_sectors) {
5642 mddev->recovery_cp = mddev->dev_sectors;
5643 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5644 }
5645 mddev->dev_sectors = sectors;
5646 mddev->resync_max_sectors = sectors;
5647 return 0;
5648 }
5649
5650 static int check_stripe_cache(struct mddev *mddev)
5651 {
5652 /* Can only proceed if there are plenty of stripe_heads.
5653 * We need a minimum of one full stripe,, and for sensible progress
5654 * it is best to have about 4 times that.
5655 * If we require 4 times, then the default 256 4K stripe_heads will
5656 * allow for chunk sizes up to 256K, which is probably OK.
5657 * If the chunk size is greater, user-space should request more
5658 * stripe_heads first.
5659 */
5660 struct r5conf *conf = mddev->private;
5661 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5662 > conf->max_nr_stripes ||
5663 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5664 > conf->max_nr_stripes) {
5665 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
5666 mdname(mddev),
5667 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5668 / STRIPE_SIZE)*4);
5669 return 0;
5670 }
5671 return 1;
5672 }
5673
5674 static int check_reshape(struct mddev *mddev)
5675 {
5676 struct r5conf *conf = mddev->private;
5677
5678 if (mddev->delta_disks == 0 &&
5679 mddev->new_layout == mddev->layout &&
5680 mddev->new_chunk_sectors == mddev->chunk_sectors)
5681 return 0; /* nothing to do */
5682 if (has_failed(conf))
5683 return -EINVAL;
5684 if (mddev->delta_disks < 0) {
5685 /* We might be able to shrink, but the devices must
5686 * be made bigger first.
5687 * For raid6, 4 is the minimum size.
5688 * Otherwise 2 is the minimum
5689 */
5690 int min = 2;
5691 if (mddev->level == 6)
5692 min = 4;
5693 if (mddev->raid_disks + mddev->delta_disks < min)
5694 return -EINVAL;
5695 }
5696
5697 if (!check_stripe_cache(mddev))
5698 return -ENOSPC;
5699
5700 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
5701 }
5702
5703 static int raid5_start_reshape(struct mddev *mddev)
5704 {
5705 struct r5conf *conf = mddev->private;
5706 struct md_rdev *rdev;
5707 int spares = 0;
5708 unsigned long flags;
5709
5710 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5711 return -EBUSY;
5712
5713 if (!check_stripe_cache(mddev))
5714 return -ENOSPC;
5715
5716 if (has_failed(conf))
5717 return -EINVAL;
5718
5719 rdev_for_each(rdev, mddev) {
5720 if (!test_bit(In_sync, &rdev->flags)
5721 && !test_bit(Faulty, &rdev->flags))
5722 spares++;
5723 }
5724
5725 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5726 /* Not enough devices even to make a degraded array
5727 * of that size
5728 */
5729 return -EINVAL;
5730
5731 /* Refuse to reduce size of the array. Any reductions in
5732 * array size must be through explicit setting of array_size
5733 * attribute.
5734 */
5735 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5736 < mddev->array_sectors) {
5737 printk(KERN_ERR "md/raid:%s: array size must be reduced "
5738 "before number of disks\n", mdname(mddev));
5739 return -EINVAL;
5740 }
5741
5742 atomic_set(&conf->reshape_stripes, 0);
5743 spin_lock_irq(&conf->device_lock);
5744 conf->previous_raid_disks = conf->raid_disks;
5745 conf->raid_disks += mddev->delta_disks;
5746 conf->prev_chunk_sectors = conf->chunk_sectors;
5747 conf->chunk_sectors = mddev->new_chunk_sectors;
5748 conf->prev_algo = conf->algorithm;
5749 conf->algorithm = mddev->new_layout;
5750 conf->generation++;
5751 /* Code that selects data_offset needs to see the generation update
5752 * if reshape_progress has been set - so a memory barrier needed.
5753 */
5754 smp_mb();
5755 if (mddev->reshape_backwards)
5756 conf->reshape_progress = raid5_size(mddev, 0, 0);
5757 else
5758 conf->reshape_progress = 0;
5759 conf->reshape_safe = conf->reshape_progress;
5760 spin_unlock_irq(&conf->device_lock);
5761
5762 /* Add some new drives, as many as will fit.
5763 * We know there are enough to make the newly sized array work.
5764 * Don't add devices if we are reducing the number of
5765 * devices in the array. This is because it is not possible
5766 * to correctly record the "partially reconstructed" state of
5767 * such devices during the reshape and confusion could result.
5768 */
5769 if (mddev->delta_disks >= 0) {
5770 rdev_for_each(rdev, mddev)
5771 if (rdev->raid_disk < 0 &&
5772 !test_bit(Faulty, &rdev->flags)) {
5773 if (raid5_add_disk(mddev, rdev) == 0) {
5774 if (rdev->raid_disk
5775 >= conf->previous_raid_disks)
5776 set_bit(In_sync, &rdev->flags);
5777 else
5778 rdev->recovery_offset = 0;
5779
5780 if (sysfs_link_rdev(mddev, rdev))
5781 /* Failure here is OK */;
5782 }
5783 } else if (rdev->raid_disk >= conf->previous_raid_disks
5784 && !test_bit(Faulty, &rdev->flags)) {
5785 /* This is a spare that was manually added */
5786 set_bit(In_sync, &rdev->flags);
5787 }
5788
5789 /* When a reshape changes the number of devices,
5790 * ->degraded is measured against the larger of the
5791 * pre and post number of devices.
5792 */
5793 spin_lock_irqsave(&conf->device_lock, flags);
5794 mddev->degraded = calc_degraded(conf);
5795 spin_unlock_irqrestore(&conf->device_lock, flags);
5796 }
5797 mddev->raid_disks = conf->raid_disks;
5798 mddev->reshape_position = conf->reshape_progress;
5799 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5800
5801 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5802 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5803 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5804 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5805 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5806 "reshape");
5807 if (!mddev->sync_thread) {
5808 mddev->recovery = 0;
5809 spin_lock_irq(&conf->device_lock);
5810 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
5811 rdev_for_each(rdev, mddev)
5812 rdev->new_data_offset = rdev->data_offset;
5813 smp_wmb();
5814 conf->reshape_progress = MaxSector;
5815 mddev->reshape_position = MaxSector;
5816 spin_unlock_irq(&conf->device_lock);
5817 return -EAGAIN;
5818 }
5819 conf->reshape_checkpoint = jiffies;
5820 md_wakeup_thread(mddev->sync_thread);
5821 md_new_event(mddev);
5822 return 0;
5823 }
5824
5825 /* This is called from the reshape thread and should make any
5826 * changes needed in 'conf'
5827 */
5828 static void end_reshape(struct r5conf *conf)
5829 {
5830
5831 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
5832 struct md_rdev *rdev;
5833
5834 spin_lock_irq(&conf->device_lock);
5835 conf->previous_raid_disks = conf->raid_disks;
5836 rdev_for_each(rdev, conf->mddev)
5837 rdev->data_offset = rdev->new_data_offset;
5838 smp_wmb();
5839 conf->reshape_progress = MaxSector;
5840 spin_unlock_irq(&conf->device_lock);
5841 wake_up(&conf->wait_for_overlap);
5842
5843 /* read-ahead size must cover two whole stripes, which is
5844 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5845 */
5846 if (conf->mddev->queue) {
5847 int data_disks = conf->raid_disks - conf->max_degraded;
5848 int stripe = data_disks * ((conf->chunk_sectors << 9)
5849 / PAGE_SIZE);
5850 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5851 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5852 }
5853 }
5854 }
5855
5856 /* This is called from the raid5d thread with mddev_lock held.
5857 * It makes config changes to the device.
5858 */
5859 static void raid5_finish_reshape(struct mddev *mddev)
5860 {
5861 struct r5conf *conf = mddev->private;
5862
5863 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5864
5865 if (mddev->delta_disks > 0) {
5866 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5867 set_capacity(mddev->gendisk, mddev->array_sectors);
5868 revalidate_disk(mddev->gendisk);
5869 } else {
5870 int d;
5871 spin_lock_irq(&conf->device_lock);
5872 mddev->degraded = calc_degraded(conf);
5873 spin_unlock_irq(&conf->device_lock);
5874 for (d = conf->raid_disks ;
5875 d < conf->raid_disks - mddev->delta_disks;
5876 d++) {
5877 struct md_rdev *rdev = conf->disks[d].rdev;
5878 if (rdev)
5879 clear_bit(In_sync, &rdev->flags);
5880 rdev = conf->disks[d].replacement;
5881 if (rdev)
5882 clear_bit(In_sync, &rdev->flags);
5883 }
5884 }
5885 mddev->layout = conf->algorithm;
5886 mddev->chunk_sectors = conf->chunk_sectors;
5887 mddev->reshape_position = MaxSector;
5888 mddev->delta_disks = 0;
5889 mddev->reshape_backwards = 0;
5890 }
5891 }
5892
5893 static void raid5_quiesce(struct mddev *mddev, int state)
5894 {
5895 struct r5conf *conf = mddev->private;
5896
5897 switch(state) {
5898 case 2: /* resume for a suspend */
5899 wake_up(&conf->wait_for_overlap);
5900 break;
5901
5902 case 1: /* stop all writes */
5903 spin_lock_irq(&conf->device_lock);
5904 /* '2' tells resync/reshape to pause so that all
5905 * active stripes can drain
5906 */
5907 conf->quiesce = 2;
5908 wait_event_lock_irq(conf->wait_for_stripe,
5909 atomic_read(&conf->active_stripes) == 0 &&
5910 atomic_read(&conf->active_aligned_reads) == 0,
5911 conf->device_lock, /* nothing */);
5912 conf->quiesce = 1;
5913 spin_unlock_irq(&conf->device_lock);
5914 /* allow reshape to continue */
5915 wake_up(&conf->wait_for_overlap);
5916 break;
5917
5918 case 0: /* re-enable writes */
5919 spin_lock_irq(&conf->device_lock);
5920 conf->quiesce = 0;
5921 wake_up(&conf->wait_for_stripe);
5922 wake_up(&conf->wait_for_overlap);
5923 spin_unlock_irq(&conf->device_lock);
5924 break;
5925 }
5926 }
5927
5928
5929 static void *raid45_takeover_raid0(struct mddev *mddev, int level)
5930 {
5931 struct r0conf *raid0_conf = mddev->private;
5932 sector_t sectors;
5933
5934 /* for raid0 takeover only one zone is supported */
5935 if (raid0_conf->nr_strip_zones > 1) {
5936 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5937 mdname(mddev));
5938 return ERR_PTR(-EINVAL);
5939 }
5940
5941 sectors = raid0_conf->strip_zone[0].zone_end;
5942 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
5943 mddev->dev_sectors = sectors;
5944 mddev->new_level = level;
5945 mddev->new_layout = ALGORITHM_PARITY_N;
5946 mddev->new_chunk_sectors = mddev->chunk_sectors;
5947 mddev->raid_disks += 1;
5948 mddev->delta_disks = 1;
5949 /* make sure it will be not marked as dirty */
5950 mddev->recovery_cp = MaxSector;
5951
5952 return setup_conf(mddev);
5953 }
5954
5955
5956 static void *raid5_takeover_raid1(struct mddev *mddev)
5957 {
5958 int chunksect;
5959
5960 if (mddev->raid_disks != 2 ||
5961 mddev->degraded > 1)
5962 return ERR_PTR(-EINVAL);
5963
5964 /* Should check if there are write-behind devices? */
5965
5966 chunksect = 64*2; /* 64K by default */
5967
5968 /* The array must be an exact multiple of chunksize */
5969 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5970 chunksect >>= 1;
5971
5972 if ((chunksect<<9) < STRIPE_SIZE)
5973 /* array size does not allow a suitable chunk size */
5974 return ERR_PTR(-EINVAL);
5975
5976 mddev->new_level = 5;
5977 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5978 mddev->new_chunk_sectors = chunksect;
5979
5980 return setup_conf(mddev);
5981 }
5982
5983 static void *raid5_takeover_raid6(struct mddev *mddev)
5984 {
5985 int new_layout;
5986
5987 switch (mddev->layout) {
5988 case ALGORITHM_LEFT_ASYMMETRIC_6:
5989 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5990 break;
5991 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5992 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5993 break;
5994 case ALGORITHM_LEFT_SYMMETRIC_6:
5995 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5996 break;
5997 case ALGORITHM_RIGHT_SYMMETRIC_6:
5998 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5999 break;
6000 case ALGORITHM_PARITY_0_6:
6001 new_layout = ALGORITHM_PARITY_0;
6002 break;
6003 case ALGORITHM_PARITY_N:
6004 new_layout = ALGORITHM_PARITY_N;
6005 break;
6006 default:
6007 return ERR_PTR(-EINVAL);
6008 }
6009 mddev->new_level = 5;
6010 mddev->new_layout = new_layout;
6011 mddev->delta_disks = -1;
6012 mddev->raid_disks -= 1;
6013 return setup_conf(mddev);
6014 }
6015
6016
6017 static int raid5_check_reshape(struct mddev *mddev)
6018 {
6019 /* For a 2-drive array, the layout and chunk size can be changed
6020 * immediately as not restriping is needed.
6021 * For larger arrays we record the new value - after validation
6022 * to be used by a reshape pass.
6023 */
6024 struct r5conf *conf = mddev->private;
6025 int new_chunk = mddev->new_chunk_sectors;
6026
6027 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
6028 return -EINVAL;
6029 if (new_chunk > 0) {
6030 if (!is_power_of_2(new_chunk))
6031 return -EINVAL;
6032 if (new_chunk < (PAGE_SIZE>>9))
6033 return -EINVAL;
6034 if (mddev->array_sectors & (new_chunk-1))
6035 /* not factor of array size */
6036 return -EINVAL;
6037 }
6038
6039 /* They look valid */
6040
6041 if (mddev->raid_disks == 2) {
6042 /* can make the change immediately */
6043 if (mddev->new_layout >= 0) {
6044 conf->algorithm = mddev->new_layout;
6045 mddev->layout = mddev->new_layout;
6046 }
6047 if (new_chunk > 0) {
6048 conf->chunk_sectors = new_chunk ;
6049 mddev->chunk_sectors = new_chunk;
6050 }
6051 set_bit(MD_CHANGE_DEVS, &mddev->flags);
6052 md_wakeup_thread(mddev->thread);
6053 }
6054 return check_reshape(mddev);
6055 }
6056
6057 static int raid6_check_reshape(struct mddev *mddev)
6058 {
6059 int new_chunk = mddev->new_chunk_sectors;
6060
6061 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
6062 return -EINVAL;
6063 if (new_chunk > 0) {
6064 if (!is_power_of_2(new_chunk))
6065 return -EINVAL;
6066 if (new_chunk < (PAGE_SIZE >> 9))
6067 return -EINVAL;
6068 if (mddev->array_sectors & (new_chunk-1))
6069 /* not factor of array size */
6070 return -EINVAL;
6071 }
6072
6073 /* They look valid */
6074 return check_reshape(mddev);
6075 }
6076
6077 static void *raid5_takeover(struct mddev *mddev)
6078 {
6079 /* raid5 can take over:
6080 * raid0 - if there is only one strip zone - make it a raid4 layout
6081 * raid1 - if there are two drives. We need to know the chunk size
6082 * raid4 - trivial - just use a raid4 layout.
6083 * raid6 - Providing it is a *_6 layout
6084 */
6085 if (mddev->level == 0)
6086 return raid45_takeover_raid0(mddev, 5);
6087 if (mddev->level == 1)
6088 return raid5_takeover_raid1(mddev);
6089 if (mddev->level == 4) {
6090 mddev->new_layout = ALGORITHM_PARITY_N;
6091 mddev->new_level = 5;
6092 return setup_conf(mddev);
6093 }
6094 if (mddev->level == 6)
6095 return raid5_takeover_raid6(mddev);
6096
6097 return ERR_PTR(-EINVAL);
6098 }
6099
6100 static void *raid4_takeover(struct mddev *mddev)
6101 {
6102 /* raid4 can take over:
6103 * raid0 - if there is only one strip zone
6104 * raid5 - if layout is right
6105 */
6106 if (mddev->level == 0)
6107 return raid45_takeover_raid0(mddev, 4);
6108 if (mddev->level == 5 &&
6109 mddev->layout == ALGORITHM_PARITY_N) {
6110 mddev->new_layout = 0;
6111 mddev->new_level = 4;
6112 return setup_conf(mddev);
6113 }
6114 return ERR_PTR(-EINVAL);
6115 }
6116
6117 static struct md_personality raid5_personality;
6118
6119 static void *raid6_takeover(struct mddev *mddev)
6120 {
6121 /* Currently can only take over a raid5. We map the
6122 * personality to an equivalent raid6 personality
6123 * with the Q block at the end.
6124 */
6125 int new_layout;
6126
6127 if (mddev->pers != &raid5_personality)
6128 return ERR_PTR(-EINVAL);
6129 if (mddev->degraded > 1)
6130 return ERR_PTR(-EINVAL);
6131 if (mddev->raid_disks > 253)
6132 return ERR_PTR(-EINVAL);
6133 if (mddev->raid_disks < 3)
6134 return ERR_PTR(-EINVAL);
6135
6136 switch (mddev->layout) {
6137 case ALGORITHM_LEFT_ASYMMETRIC:
6138 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
6139 break;
6140 case ALGORITHM_RIGHT_ASYMMETRIC:
6141 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
6142 break;
6143 case ALGORITHM_LEFT_SYMMETRIC:
6144 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
6145 break;
6146 case ALGORITHM_RIGHT_SYMMETRIC:
6147 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
6148 break;
6149 case ALGORITHM_PARITY_0:
6150 new_layout = ALGORITHM_PARITY_0_6;
6151 break;
6152 case ALGORITHM_PARITY_N:
6153 new_layout = ALGORITHM_PARITY_N;
6154 break;
6155 default:
6156 return ERR_PTR(-EINVAL);
6157 }
6158 mddev->new_level = 6;
6159 mddev->new_layout = new_layout;
6160 mddev->delta_disks = 1;
6161 mddev->raid_disks += 1;
6162 return setup_conf(mddev);
6163 }
6164
6165
6166 static struct md_personality raid6_personality =
6167 {
6168 .name = "raid6",
6169 .level = 6,
6170 .owner = THIS_MODULE,
6171 .make_request = make_request,
6172 .run = run,
6173 .stop = stop,
6174 .status = status,
6175 .error_handler = error,
6176 .hot_add_disk = raid5_add_disk,
6177 .hot_remove_disk= raid5_remove_disk,
6178 .spare_active = raid5_spare_active,
6179 .sync_request = sync_request,
6180 .resize = raid5_resize,
6181 .size = raid5_size,
6182 .check_reshape = raid6_check_reshape,
6183 .start_reshape = raid5_start_reshape,
6184 .finish_reshape = raid5_finish_reshape,
6185 .quiesce = raid5_quiesce,
6186 .takeover = raid6_takeover,
6187 };
6188 static struct md_personality raid5_personality =
6189 {
6190 .name = "raid5",
6191 .level = 5,
6192 .owner = THIS_MODULE,
6193 .make_request = make_request,
6194 .run = run,
6195 .stop = stop,
6196 .status = status,
6197 .error_handler = error,
6198 .hot_add_disk = raid5_add_disk,
6199 .hot_remove_disk= raid5_remove_disk,
6200 .spare_active = raid5_spare_active,
6201 .sync_request = sync_request,
6202 .resize = raid5_resize,
6203 .size = raid5_size,
6204 .check_reshape = raid5_check_reshape,
6205 .start_reshape = raid5_start_reshape,
6206 .finish_reshape = raid5_finish_reshape,
6207 .quiesce = raid5_quiesce,
6208 .takeover = raid5_takeover,
6209 };
6210
6211 static struct md_personality raid4_personality =
6212 {
6213 .name = "raid4",
6214 .level = 4,
6215 .owner = THIS_MODULE,
6216 .make_request = make_request,
6217 .run = run,
6218 .stop = stop,
6219 .status = status,
6220 .error_handler = error,
6221 .hot_add_disk = raid5_add_disk,
6222 .hot_remove_disk= raid5_remove_disk,
6223 .spare_active = raid5_spare_active,
6224 .sync_request = sync_request,
6225 .resize = raid5_resize,
6226 .size = raid5_size,
6227 .check_reshape = raid5_check_reshape,
6228 .start_reshape = raid5_start_reshape,
6229 .finish_reshape = raid5_finish_reshape,
6230 .quiesce = raid5_quiesce,
6231 .takeover = raid4_takeover,
6232 };
6233
6234 static int __init raid5_init(void)
6235 {
6236 register_md_personality(&raid6_personality);
6237 register_md_personality(&raid5_personality);
6238 register_md_personality(&raid4_personality);
6239 return 0;
6240 }
6241
6242 static void raid5_exit(void)
6243 {
6244 unregister_md_personality(&raid6_personality);
6245 unregister_md_personality(&raid5_personality);
6246 unregister_md_personality(&raid4_personality);
6247 }
6248
6249 module_init(raid5_init);
6250 module_exit(raid5_exit);
6251 MODULE_LICENSE("GPL");
6252 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
6253 MODULE_ALIAS("md-personality-4"); /* RAID5 */
6254 MODULE_ALIAS("md-raid5");
6255 MODULE_ALIAS("md-raid4");
6256 MODULE_ALIAS("md-level-5");
6257 MODULE_ALIAS("md-level-4");
6258 MODULE_ALIAS("md-personality-8"); /* RAID6 */
6259 MODULE_ALIAS("md-raid6");
6260 MODULE_ALIAS("md-level-6");
6261
6262 /* This used to be two separate modules, they were: */
6263 MODULE_ALIAS("raid5");
6264 MODULE_ALIAS("raid6");