md/raid1/raid10: add a cond_resched
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / md / raid5.c
CommitLineData
1da177e4
LT
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
16a53ecc 5 * Copyright (C) 2002, 2003 H. Peter Anvin
1da177e4 6 *
16a53ecc
N
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
1da177e4
LT
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
ae3c20cc
N
21/*
22 * BITMAP UNPLUGGING:
23 *
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
26 * explanation.
27 *
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
32 * new additions.
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
39 * batch.
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
43 * miss any bits.
44 */
1da177e4 45
bff61975 46#include <linux/blkdev.h>
f6705578 47#include <linux/kthread.h>
f701d589 48#include <linux/raid/pq.h>
91c00924 49#include <linux/async_tx.h>
07a3b417 50#include <linux/async.h>
bff61975 51#include <linux/seq_file.h>
36d1c647 52#include <linux/cpu.h>
43b2e5d8 53#include "md.h"
bff61975 54#include "raid5.h"
ef740c37 55#include "bitmap.h"
72626685 56
1da177e4
LT
57/*
58 * Stripe cache
59 */
60
61#define NR_STRIPES 256
62#define STRIPE_SIZE PAGE_SIZE
63#define STRIPE_SHIFT (PAGE_SHIFT - 9)
64#define STRIPE_SECTORS (STRIPE_SIZE>>9)
65#define IO_THRESHOLD 1
8b3e6cdc 66#define BYPASS_THRESHOLD 1
fccddba0 67#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
1da177e4
LT
68#define HASH_MASK (NR_HASH - 1)
69
fccddba0 70#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
1da177e4
LT
71
72/* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
77 * be valid.
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
80 */
81#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
82/*
83 * The following can be used to debug the driver
84 */
1da177e4
LT
85#define RAID5_PARANOIA 1
86#if RAID5_PARANOIA && defined(CONFIG_SMP)
87# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
88#else
89# define CHECK_DEVLOCK()
90#endif
91
45b4233c 92#ifdef DEBUG
1da177e4
LT
93#define inline
94#define __inline__
95#endif
96
6be9d494
BS
97#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
98
960e739d 99/*
5b99c2ff
JA
100 * We maintain a biased count of active stripes in the bottom 16 bits of
101 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
960e739d
JA
102 */
103static inline int raid5_bi_phys_segments(struct bio *bio)
104{
5b99c2ff 105 return bio->bi_phys_segments & 0xffff;
960e739d
JA
106}
107
108static inline int raid5_bi_hw_segments(struct bio *bio)
109{
5b99c2ff 110 return (bio->bi_phys_segments >> 16) & 0xffff;
960e739d
JA
111}
112
113static inline int raid5_dec_bi_phys_segments(struct bio *bio)
114{
115 --bio->bi_phys_segments;
116 return raid5_bi_phys_segments(bio);
117}
118
119static inline int raid5_dec_bi_hw_segments(struct bio *bio)
120{
121 unsigned short val = raid5_bi_hw_segments(bio);
122
123 --val;
5b99c2ff 124 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
960e739d
JA
125 return val;
126}
127
128static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
129{
5b99c2ff 130 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
960e739d
JA
131}
132
d0dabf7e
N
133/* Find first data disk in a raid6 stripe */
134static inline int raid6_d0(struct stripe_head *sh)
135{
67cc2b81
N
136 if (sh->ddf_layout)
137 /* ddf always start from first device */
138 return 0;
139 /* md starts just after Q block */
d0dabf7e
N
140 if (sh->qd_idx == sh->disks - 1)
141 return 0;
142 else
143 return sh->qd_idx + 1;
144}
16a53ecc
N
145static inline int raid6_next_disk(int disk, int raid_disks)
146{
147 disk++;
148 return (disk < raid_disks) ? disk : 0;
149}
a4456856 150
d0dabf7e
N
151/* When walking through the disks in a raid5, starting at raid6_d0,
152 * We need to map each disk to a 'slot', where the data disks are slot
153 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154 * is raid_disks-1. This help does that mapping.
155 */
67cc2b81
N
156static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
157 int *count, int syndrome_disks)
d0dabf7e
N
158{
159 int slot;
67cc2b81 160
d0dabf7e 161 if (idx == sh->pd_idx)
67cc2b81 162 return syndrome_disks;
d0dabf7e 163 if (idx == sh->qd_idx)
67cc2b81 164 return syndrome_disks + 1;
d0dabf7e
N
165 slot = (*count)++;
166 return slot;
167}
168
a4456856
DW
169static void return_io(struct bio *return_bi)
170{
171 struct bio *bi = return_bi;
172 while (bi) {
a4456856
DW
173
174 return_bi = bi->bi_next;
175 bi->bi_next = NULL;
176 bi->bi_size = 0;
0e13fe23 177 bio_endio(bi, 0);
a4456856
DW
178 bi = return_bi;
179 }
180}
181
1da177e4
LT
182static void print_raid5_conf (raid5_conf_t *conf);
183
600aa109
DW
184static int stripe_operations_active(struct stripe_head *sh)
185{
186 return sh->check_state || sh->reconstruct_state ||
187 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
188 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
189}
190
858119e1 191static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4
LT
192{
193 if (atomic_dec_and_test(&sh->count)) {
78bafebd
ES
194 BUG_ON(!list_empty(&sh->lru));
195 BUG_ON(atomic_read(&conf->active_stripes)==0);
1da177e4 196 if (test_bit(STRIPE_HANDLE, &sh->state)) {
7c785b7a 197 if (test_bit(STRIPE_DELAYED, &sh->state)) {
1da177e4 198 list_add_tail(&sh->lru, &conf->delayed_list);
7c785b7a
N
199 blk_plug_device(conf->mddev->queue);
200 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
ae3c20cc 201 sh->bm_seq - conf->seq_write > 0) {
72626685 202 list_add_tail(&sh->lru, &conf->bitmap_list);
7c785b7a
N
203 blk_plug_device(conf->mddev->queue);
204 } else {
72626685 205 clear_bit(STRIPE_BIT_DELAY, &sh->state);
1da177e4 206 list_add_tail(&sh->lru, &conf->handle_list);
72626685 207 }
1da177e4
LT
208 md_wakeup_thread(conf->mddev->thread);
209 } else {
600aa109 210 BUG_ON(stripe_operations_active(sh));
1da177e4
LT
211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
212 atomic_dec(&conf->preread_active_stripes);
213 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
214 md_wakeup_thread(conf->mddev->thread);
215 }
1da177e4 216 atomic_dec(&conf->active_stripes);
ccfcc3c1
N
217 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
218 list_add_tail(&sh->lru, &conf->inactive_list);
1da177e4 219 wake_up(&conf->wait_for_stripe);
46031f9a
RBJ
220 if (conf->retry_read_aligned)
221 md_wakeup_thread(conf->mddev->thread);
ccfcc3c1 222 }
1da177e4
LT
223 }
224 }
225}
d0dabf7e 226
1da177e4
LT
227static void release_stripe(struct stripe_head *sh)
228{
229 raid5_conf_t *conf = sh->raid_conf;
230 unsigned long flags;
16a53ecc 231
1da177e4
LT
232 spin_lock_irqsave(&conf->device_lock, flags);
233 __release_stripe(conf, sh);
234 spin_unlock_irqrestore(&conf->device_lock, flags);
235}
236
fccddba0 237static inline void remove_hash(struct stripe_head *sh)
1da177e4 238{
45b4233c
DW
239 pr_debug("remove_hash(), stripe %llu\n",
240 (unsigned long long)sh->sector);
1da177e4 241
fccddba0 242 hlist_del_init(&sh->hash);
1da177e4
LT
243}
244
16a53ecc 245static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4 246{
fccddba0 247 struct hlist_head *hp = stripe_hash(conf, sh->sector);
1da177e4 248
45b4233c
DW
249 pr_debug("insert_hash(), stripe %llu\n",
250 (unsigned long long)sh->sector);
1da177e4
LT
251
252 CHECK_DEVLOCK();
fccddba0 253 hlist_add_head(&sh->hash, hp);
1da177e4
LT
254}
255
256
257/* find an idle stripe, make sure it is unhashed, and return it. */
258static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
259{
260 struct stripe_head *sh = NULL;
261 struct list_head *first;
262
263 CHECK_DEVLOCK();
264 if (list_empty(&conf->inactive_list))
265 goto out;
266 first = conf->inactive_list.next;
267 sh = list_entry(first, struct stripe_head, lru);
268 list_del_init(first);
269 remove_hash(sh);
270 atomic_inc(&conf->active_stripes);
271out:
272 return sh;
273}
274
275static void shrink_buffers(struct stripe_head *sh, int num)
276{
277 struct page *p;
278 int i;
279
280 for (i=0; i<num ; i++) {
281 p = sh->dev[i].page;
282 if (!p)
283 continue;
284 sh->dev[i].page = NULL;
2d1f3b5d 285 put_page(p);
1da177e4
LT
286 }
287}
288
289static int grow_buffers(struct stripe_head *sh, int num)
290{
291 int i;
292
293 for (i=0; i<num; i++) {
294 struct page *page;
295
296 if (!(page = alloc_page(GFP_KERNEL))) {
297 return 1;
298 }
299 sh->dev[i].page = page;
300 }
301 return 0;
302}
303
784052ec 304static void raid5_build_block(struct stripe_head *sh, int i, int previous);
911d4ee8
N
305static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
306 struct stripe_head *sh);
1da177e4 307
b5663ba4 308static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
1da177e4
LT
309{
310 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 311 int i;
1da177e4 312
78bafebd
ES
313 BUG_ON(atomic_read(&sh->count) != 0);
314 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
600aa109 315 BUG_ON(stripe_operations_active(sh));
d84e0f10 316
1da177e4 317 CHECK_DEVLOCK();
45b4233c 318 pr_debug("init_stripe called, stripe %llu\n",
1da177e4
LT
319 (unsigned long long)sh->sector);
320
321 remove_hash(sh);
16a53ecc 322
86b42c71 323 sh->generation = conf->generation - previous;
b5663ba4 324 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
1da177e4 325 sh->sector = sector;
911d4ee8 326 stripe_set_idx(sector, conf, previous, sh);
1da177e4
LT
327 sh->state = 0;
328
7ecaa1e6
N
329
330 for (i = sh->disks; i--; ) {
1da177e4
LT
331 struct r5dev *dev = &sh->dev[i];
332
d84e0f10 333 if (dev->toread || dev->read || dev->towrite || dev->written ||
1da177e4 334 test_bit(R5_LOCKED, &dev->flags)) {
d84e0f10 335 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
1da177e4 336 (unsigned long long)sh->sector, i, dev->toread,
d84e0f10 337 dev->read, dev->towrite, dev->written,
1da177e4
LT
338 test_bit(R5_LOCKED, &dev->flags));
339 BUG();
340 }
341 dev->flags = 0;
784052ec 342 raid5_build_block(sh, i, previous);
1da177e4
LT
343 }
344 insert_hash(conf, sh);
345}
346
86b42c71
N
347static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
348 short generation)
1da177e4
LT
349{
350 struct stripe_head *sh;
fccddba0 351 struct hlist_node *hn;
1da177e4
LT
352
353 CHECK_DEVLOCK();
45b4233c 354 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
fccddba0 355 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
86b42c71 356 if (sh->sector == sector && sh->generation == generation)
1da177e4 357 return sh;
45b4233c 358 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
1da177e4
LT
359 return NULL;
360}
361
362static void unplug_slaves(mddev_t *mddev);
165125e1 363static void raid5_unplug_device(struct request_queue *q);
1da177e4 364
b5663ba4
N
365static struct stripe_head *
366get_active_stripe(raid5_conf_t *conf, sector_t sector,
a8c906ca 367 int previous, int noblock, int noquiesce)
1da177e4
LT
368{
369 struct stripe_head *sh;
370
45b4233c 371 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
1da177e4
LT
372
373 spin_lock_irq(&conf->device_lock);
374
375 do {
72626685 376 wait_event_lock_irq(conf->wait_for_stripe,
a8c906ca 377 conf->quiesce == 0 || noquiesce,
72626685 378 conf->device_lock, /* nothing */);
86b42c71 379 sh = __find_stripe(conf, sector, conf->generation - previous);
1da177e4
LT
380 if (!sh) {
381 if (!conf->inactive_blocked)
382 sh = get_free_stripe(conf);
383 if (noblock && sh == NULL)
384 break;
385 if (!sh) {
386 conf->inactive_blocked = 1;
387 wait_event_lock_irq(conf->wait_for_stripe,
388 !list_empty(&conf->inactive_list) &&
5036805b
N
389 (atomic_read(&conf->active_stripes)
390 < (conf->max_nr_stripes *3/4)
1da177e4
LT
391 || !conf->inactive_blocked),
392 conf->device_lock,
f4370781 393 raid5_unplug_device(conf->mddev->queue)
1da177e4
LT
394 );
395 conf->inactive_blocked = 0;
396 } else
b5663ba4 397 init_stripe(sh, sector, previous);
1da177e4
LT
398 } else {
399 if (atomic_read(&sh->count)) {
ab69ae12
N
400 BUG_ON(!list_empty(&sh->lru)
401 && !test_bit(STRIPE_EXPANDING, &sh->state));
1da177e4
LT
402 } else {
403 if (!test_bit(STRIPE_HANDLE, &sh->state))
404 atomic_inc(&conf->active_stripes);
ff4e8d9a
N
405 if (list_empty(&sh->lru) &&
406 !test_bit(STRIPE_EXPANDING, &sh->state))
16a53ecc
N
407 BUG();
408 list_del_init(&sh->lru);
1da177e4
LT
409 }
410 }
411 } while (sh == NULL);
412
413 if (sh)
414 atomic_inc(&sh->count);
415
416 spin_unlock_irq(&conf->device_lock);
417 return sh;
418}
419
6712ecf8
N
420static void
421raid5_end_read_request(struct bio *bi, int error);
422static void
423raid5_end_write_request(struct bio *bi, int error);
91c00924 424
c4e5ac0a 425static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
91c00924
DW
426{
427 raid5_conf_t *conf = sh->raid_conf;
428 int i, disks = sh->disks;
429
430 might_sleep();
431
432 for (i = disks; i--; ) {
433 int rw;
434 struct bio *bi;
435 mdk_rdev_t *rdev;
436 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
437 rw = WRITE;
438 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
439 rw = READ;
440 else
441 continue;
442
443 bi = &sh->dev[i].req;
444
445 bi->bi_rw = rw;
446 if (rw == WRITE)
447 bi->bi_end_io = raid5_end_write_request;
448 else
449 bi->bi_end_io = raid5_end_read_request;
450
451 rcu_read_lock();
452 rdev = rcu_dereference(conf->disks[i].rdev);
453 if (rdev && test_bit(Faulty, &rdev->flags))
454 rdev = NULL;
455 if (rdev)
456 atomic_inc(&rdev->nr_pending);
457 rcu_read_unlock();
458
459 if (rdev) {
c4e5ac0a 460 if (s->syncing || s->expanding || s->expanded)
91c00924
DW
461 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
462
2b7497f0
DW
463 set_bit(STRIPE_IO_STARTED, &sh->state);
464
91c00924
DW
465 bi->bi_bdev = rdev->bdev;
466 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
e46b272b 467 __func__, (unsigned long long)sh->sector,
91c00924
DW
468 bi->bi_rw, i);
469 atomic_inc(&sh->count);
470 bi->bi_sector = sh->sector + rdev->data_offset;
471 bi->bi_flags = 1 << BIO_UPTODATE;
472 bi->bi_vcnt = 1;
473 bi->bi_max_vecs = 1;
474 bi->bi_idx = 0;
475 bi->bi_io_vec = &sh->dev[i].vec;
476 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
477 bi->bi_io_vec[0].bv_offset = 0;
478 bi->bi_size = STRIPE_SIZE;
479 bi->bi_next = NULL;
480 if (rw == WRITE &&
481 test_bit(R5_ReWrite, &sh->dev[i].flags))
482 atomic_add(STRIPE_SECTORS,
483 &rdev->corrected_errors);
484 generic_make_request(bi);
485 } else {
486 if (rw == WRITE)
487 set_bit(STRIPE_DEGRADED, &sh->state);
488 pr_debug("skip op %ld on disc %d for sector %llu\n",
489 bi->bi_rw, i, (unsigned long long)sh->sector);
490 clear_bit(R5_LOCKED, &sh->dev[i].flags);
491 set_bit(STRIPE_HANDLE, &sh->state);
492 }
493 }
494}
495
496static struct dma_async_tx_descriptor *
497async_copy_data(int frombio, struct bio *bio, struct page *page,
498 sector_t sector, struct dma_async_tx_descriptor *tx)
499{
500 struct bio_vec *bvl;
501 struct page *bio_page;
502 int i;
503 int page_offset;
a08abd8c 504 struct async_submit_ctl submit;
0403e382 505 enum async_tx_flags flags = 0;
91c00924
DW
506
507 if (bio->bi_sector >= sector)
508 page_offset = (signed)(bio->bi_sector - sector) * 512;
509 else
510 page_offset = (signed)(sector - bio->bi_sector) * -512;
a08abd8c 511
0403e382
DW
512 if (frombio)
513 flags |= ASYNC_TX_FENCE;
514 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
515
91c00924
DW
516 bio_for_each_segment(bvl, bio, i) {
517 int len = bio_iovec_idx(bio, i)->bv_len;
518 int clen;
519 int b_offset = 0;
520
521 if (page_offset < 0) {
522 b_offset = -page_offset;
523 page_offset += b_offset;
524 len -= b_offset;
525 }
526
527 if (len > 0 && page_offset + len > STRIPE_SIZE)
528 clen = STRIPE_SIZE - page_offset;
529 else
530 clen = len;
531
532 if (clen > 0) {
533 b_offset += bio_iovec_idx(bio, i)->bv_offset;
534 bio_page = bio_iovec_idx(bio, i)->bv_page;
535 if (frombio)
536 tx = async_memcpy(page, bio_page, page_offset,
a08abd8c 537 b_offset, clen, &submit);
91c00924
DW
538 else
539 tx = async_memcpy(bio_page, page, b_offset,
a08abd8c 540 page_offset, clen, &submit);
91c00924 541 }
a08abd8c
DW
542 /* chain the operations */
543 submit.depend_tx = tx;
544
91c00924
DW
545 if (clen < len) /* hit end of page */
546 break;
547 page_offset += len;
548 }
549
550 return tx;
551}
552
553static void ops_complete_biofill(void *stripe_head_ref)
554{
555 struct stripe_head *sh = stripe_head_ref;
556 struct bio *return_bi = NULL;
557 raid5_conf_t *conf = sh->raid_conf;
e4d84909 558 int i;
91c00924 559
e46b272b 560 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
561 (unsigned long long)sh->sector);
562
563 /* clear completed biofills */
83de75cc 564 spin_lock_irq(&conf->device_lock);
91c00924
DW
565 for (i = sh->disks; i--; ) {
566 struct r5dev *dev = &sh->dev[i];
91c00924
DW
567
568 /* acknowledge completion of a biofill operation */
e4d84909
DW
569 /* and check if we need to reply to a read request,
570 * new R5_Wantfill requests are held off until
83de75cc 571 * !STRIPE_BIOFILL_RUN
e4d84909
DW
572 */
573 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
91c00924 574 struct bio *rbi, *rbi2;
91c00924 575
91c00924
DW
576 BUG_ON(!dev->read);
577 rbi = dev->read;
578 dev->read = NULL;
579 while (rbi && rbi->bi_sector <
580 dev->sector + STRIPE_SECTORS) {
581 rbi2 = r5_next_bio(rbi, dev->sector);
960e739d 582 if (!raid5_dec_bi_phys_segments(rbi)) {
91c00924
DW
583 rbi->bi_next = return_bi;
584 return_bi = rbi;
585 }
91c00924
DW
586 rbi = rbi2;
587 }
588 }
589 }
83de75cc
DW
590 spin_unlock_irq(&conf->device_lock);
591 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
91c00924
DW
592
593 return_io(return_bi);
594
e4d84909 595 set_bit(STRIPE_HANDLE, &sh->state);
91c00924
DW
596 release_stripe(sh);
597}
598
599static void ops_run_biofill(struct stripe_head *sh)
600{
601 struct dma_async_tx_descriptor *tx = NULL;
602 raid5_conf_t *conf = sh->raid_conf;
a08abd8c 603 struct async_submit_ctl submit;
91c00924
DW
604 int i;
605
e46b272b 606 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
607 (unsigned long long)sh->sector);
608
609 for (i = sh->disks; i--; ) {
610 struct r5dev *dev = &sh->dev[i];
611 if (test_bit(R5_Wantfill, &dev->flags)) {
612 struct bio *rbi;
613 spin_lock_irq(&conf->device_lock);
614 dev->read = rbi = dev->toread;
615 dev->toread = NULL;
616 spin_unlock_irq(&conf->device_lock);
617 while (rbi && rbi->bi_sector <
618 dev->sector + STRIPE_SECTORS) {
619 tx = async_copy_data(0, rbi, dev->page,
620 dev->sector, tx);
621 rbi = r5_next_bio(rbi, dev->sector);
622 }
623 }
624 }
625
626 atomic_inc(&sh->count);
a08abd8c
DW
627 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
628 async_trigger_callback(&submit);
91c00924
DW
629}
630
4e7d2c0a 631static void mark_target_uptodate(struct stripe_head *sh, int target)
91c00924 632{
4e7d2c0a 633 struct r5dev *tgt;
91c00924 634
4e7d2c0a
DW
635 if (target < 0)
636 return;
91c00924 637
4e7d2c0a 638 tgt = &sh->dev[target];
91c00924
DW
639 set_bit(R5_UPTODATE, &tgt->flags);
640 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
641 clear_bit(R5_Wantcompute, &tgt->flags);
4e7d2c0a
DW
642}
643
ac6b53b6 644static void ops_complete_compute(void *stripe_head_ref)
91c00924
DW
645{
646 struct stripe_head *sh = stripe_head_ref;
91c00924 647
e46b272b 648 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
649 (unsigned long long)sh->sector);
650
ac6b53b6 651 /* mark the computed target(s) as uptodate */
4e7d2c0a 652 mark_target_uptodate(sh, sh->ops.target);
ac6b53b6 653 mark_target_uptodate(sh, sh->ops.target2);
4e7d2c0a 654
ecc65c9b
DW
655 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
656 if (sh->check_state == check_state_compute_run)
657 sh->check_state = check_state_compute_result;
91c00924
DW
658 set_bit(STRIPE_HANDLE, &sh->state);
659 release_stripe(sh);
660}
661
d6f38f31
DW
662/* return a pointer to the address conversion region of the scribble buffer */
663static addr_conv_t *to_addr_conv(struct stripe_head *sh,
664 struct raid5_percpu *percpu)
665{
666 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
667}
668
669static struct dma_async_tx_descriptor *
670ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
91c00924 671{
91c00924 672 int disks = sh->disks;
d6f38f31 673 struct page **xor_srcs = percpu->scribble;
91c00924
DW
674 int target = sh->ops.target;
675 struct r5dev *tgt = &sh->dev[target];
676 struct page *xor_dest = tgt->page;
677 int count = 0;
678 struct dma_async_tx_descriptor *tx;
a08abd8c 679 struct async_submit_ctl submit;
91c00924
DW
680 int i;
681
682 pr_debug("%s: stripe %llu block: %d\n",
e46b272b 683 __func__, (unsigned long long)sh->sector, target);
91c00924
DW
684 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
685
686 for (i = disks; i--; )
687 if (i != target)
688 xor_srcs[count++] = sh->dev[i].page;
689
690 atomic_inc(&sh->count);
691
0403e382 692 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
ac6b53b6 693 ops_complete_compute, sh, to_addr_conv(sh, percpu));
91c00924 694 if (unlikely(count == 1))
a08abd8c 695 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
91c00924 696 else
a08abd8c 697 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
91c00924 698
91c00924
DW
699 return tx;
700}
701
ac6b53b6
DW
702/* set_syndrome_sources - populate source buffers for gen_syndrome
703 * @srcs - (struct page *) array of size sh->disks
704 * @sh - stripe_head to parse
705 *
706 * Populates srcs in proper layout order for the stripe and returns the
707 * 'count' of sources to be used in a call to async_gen_syndrome. The P
708 * destination buffer is recorded in srcs[count] and the Q destination
709 * is recorded in srcs[count+1]].
710 */
711static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
712{
713 int disks = sh->disks;
714 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
715 int d0_idx = raid6_d0(sh);
716 int count;
717 int i;
718
719 for (i = 0; i < disks; i++)
720 srcs[i] = (void *)raid6_empty_zero_page;
721
722 count = 0;
723 i = d0_idx;
724 do {
725 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
726
727 srcs[slot] = sh->dev[i].page;
728 i = raid6_next_disk(i, disks);
729 } while (i != d0_idx);
730 BUG_ON(count != syndrome_disks);
731
732 return count;
733}
734
735static struct dma_async_tx_descriptor *
736ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
737{
738 int disks = sh->disks;
739 struct page **blocks = percpu->scribble;
740 int target;
741 int qd_idx = sh->qd_idx;
742 struct dma_async_tx_descriptor *tx;
743 struct async_submit_ctl submit;
744 struct r5dev *tgt;
745 struct page *dest;
746 int i;
747 int count;
748
749 if (sh->ops.target < 0)
750 target = sh->ops.target2;
751 else if (sh->ops.target2 < 0)
752 target = sh->ops.target;
91c00924 753 else
ac6b53b6
DW
754 /* we should only have one valid target */
755 BUG();
756 BUG_ON(target < 0);
757 pr_debug("%s: stripe %llu block: %d\n",
758 __func__, (unsigned long long)sh->sector, target);
759
760 tgt = &sh->dev[target];
761 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
762 dest = tgt->page;
763
764 atomic_inc(&sh->count);
765
766 if (target == qd_idx) {
767 count = set_syndrome_sources(blocks, sh);
768 blocks[count] = NULL; /* regenerating p is not necessary */
769 BUG_ON(blocks[count+1] != dest); /* q should already be set */
0403e382
DW
770 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
771 ops_complete_compute, sh,
ac6b53b6
DW
772 to_addr_conv(sh, percpu));
773 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
774 } else {
775 /* Compute any data- or p-drive using XOR */
776 count = 0;
777 for (i = disks; i-- ; ) {
778 if (i == target || i == qd_idx)
779 continue;
780 blocks[count++] = sh->dev[i].page;
781 }
782
0403e382
DW
783 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
784 NULL, ops_complete_compute, sh,
ac6b53b6
DW
785 to_addr_conv(sh, percpu));
786 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
787 }
91c00924 788
91c00924
DW
789 return tx;
790}
791
ac6b53b6
DW
792static struct dma_async_tx_descriptor *
793ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
794{
795 int i, count, disks = sh->disks;
796 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
797 int d0_idx = raid6_d0(sh);
798 int faila = -1, failb = -1;
799 int target = sh->ops.target;
800 int target2 = sh->ops.target2;
801 struct r5dev *tgt = &sh->dev[target];
802 struct r5dev *tgt2 = &sh->dev[target2];
803 struct dma_async_tx_descriptor *tx;
804 struct page **blocks = percpu->scribble;
805 struct async_submit_ctl submit;
806
807 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
808 __func__, (unsigned long long)sh->sector, target, target2);
809 BUG_ON(target < 0 || target2 < 0);
810 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
811 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
812
6c910a78 813 /* we need to open-code set_syndrome_sources to handle the
ac6b53b6
DW
814 * slot number conversion for 'faila' and 'failb'
815 */
816 for (i = 0; i < disks ; i++)
817 blocks[i] = (void *)raid6_empty_zero_page;
818 count = 0;
819 i = d0_idx;
820 do {
821 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
822
823 blocks[slot] = sh->dev[i].page;
824
825 if (i == target)
826 faila = slot;
827 if (i == target2)
828 failb = slot;
829 i = raid6_next_disk(i, disks);
830 } while (i != d0_idx);
831 BUG_ON(count != syndrome_disks);
832
833 BUG_ON(faila == failb);
834 if (failb < faila)
835 swap(faila, failb);
836 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
837 __func__, (unsigned long long)sh->sector, faila, failb);
838
839 atomic_inc(&sh->count);
840
841 if (failb == syndrome_disks+1) {
842 /* Q disk is one of the missing disks */
843 if (faila == syndrome_disks) {
844 /* Missing P+Q, just recompute */
0403e382
DW
845 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
846 ops_complete_compute, sh,
847 to_addr_conv(sh, percpu));
ac6b53b6
DW
848 return async_gen_syndrome(blocks, 0, count+2,
849 STRIPE_SIZE, &submit);
850 } else {
851 struct page *dest;
852 int data_target;
853 int qd_idx = sh->qd_idx;
854
855 /* Missing D+Q: recompute D from P, then recompute Q */
856 if (target == qd_idx)
857 data_target = target2;
858 else
859 data_target = target;
860
861 count = 0;
862 for (i = disks; i-- ; ) {
863 if (i == data_target || i == qd_idx)
864 continue;
865 blocks[count++] = sh->dev[i].page;
866 }
867 dest = sh->dev[data_target].page;
0403e382
DW
868 init_async_submit(&submit,
869 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
870 NULL, NULL, NULL,
871 to_addr_conv(sh, percpu));
ac6b53b6
DW
872 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
873 &submit);
874
875 count = set_syndrome_sources(blocks, sh);
0403e382
DW
876 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
877 ops_complete_compute, sh,
878 to_addr_conv(sh, percpu));
ac6b53b6
DW
879 return async_gen_syndrome(blocks, 0, count+2,
880 STRIPE_SIZE, &submit);
881 }
ac6b53b6 882 } else {
6c910a78
DW
883 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
884 ops_complete_compute, sh,
885 to_addr_conv(sh, percpu));
886 if (failb == syndrome_disks) {
887 /* We're missing D+P. */
888 return async_raid6_datap_recov(syndrome_disks+2,
889 STRIPE_SIZE, faila,
890 blocks, &submit);
891 } else {
892 /* We're missing D+D. */
893 return async_raid6_2data_recov(syndrome_disks+2,
894 STRIPE_SIZE, faila, failb,
895 blocks, &submit);
896 }
ac6b53b6
DW
897 }
898}
899
900
91c00924
DW
901static void ops_complete_prexor(void *stripe_head_ref)
902{
903 struct stripe_head *sh = stripe_head_ref;
904
e46b272b 905 pr_debug("%s: stripe %llu\n", __func__,
91c00924 906 (unsigned long long)sh->sector);
91c00924
DW
907}
908
909static struct dma_async_tx_descriptor *
d6f38f31
DW
910ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
911 struct dma_async_tx_descriptor *tx)
91c00924 912{
91c00924 913 int disks = sh->disks;
d6f38f31 914 struct page **xor_srcs = percpu->scribble;
91c00924 915 int count = 0, pd_idx = sh->pd_idx, i;
a08abd8c 916 struct async_submit_ctl submit;
91c00924
DW
917
918 /* existing parity data subtracted */
919 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
920
e46b272b 921 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
922 (unsigned long long)sh->sector);
923
924 for (i = disks; i--; ) {
925 struct r5dev *dev = &sh->dev[i];
926 /* Only process blocks that are known to be uptodate */
d8ee0728 927 if (test_bit(R5_Wantdrain, &dev->flags))
91c00924
DW
928 xor_srcs[count++] = dev->page;
929 }
930
0403e382 931 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
d6f38f31 932 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
a08abd8c 933 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
91c00924
DW
934
935 return tx;
936}
937
938static struct dma_async_tx_descriptor *
d8ee0728 939ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
91c00924
DW
940{
941 int disks = sh->disks;
d8ee0728 942 int i;
91c00924 943
e46b272b 944 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
945 (unsigned long long)sh->sector);
946
947 for (i = disks; i--; ) {
948 struct r5dev *dev = &sh->dev[i];
949 struct bio *chosen;
91c00924 950
d8ee0728 951 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
91c00924
DW
952 struct bio *wbi;
953
954 spin_lock(&sh->lock);
955 chosen = dev->towrite;
956 dev->towrite = NULL;
957 BUG_ON(dev->written);
958 wbi = dev->written = chosen;
959 spin_unlock(&sh->lock);
960
961 while (wbi && wbi->bi_sector <
962 dev->sector + STRIPE_SECTORS) {
963 tx = async_copy_data(1, wbi, dev->page,
964 dev->sector, tx);
965 wbi = r5_next_bio(wbi, dev->sector);
966 }
967 }
968 }
969
970 return tx;
971}
972
ac6b53b6 973static void ops_complete_reconstruct(void *stripe_head_ref)
91c00924
DW
974{
975 struct stripe_head *sh = stripe_head_ref;
ac6b53b6
DW
976 int disks = sh->disks;
977 int pd_idx = sh->pd_idx;
978 int qd_idx = sh->qd_idx;
979 int i;
91c00924 980
e46b272b 981 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
982 (unsigned long long)sh->sector);
983
984 for (i = disks; i--; ) {
985 struct r5dev *dev = &sh->dev[i];
ac6b53b6
DW
986
987 if (dev->written || i == pd_idx || i == qd_idx)
91c00924
DW
988 set_bit(R5_UPTODATE, &dev->flags);
989 }
990
d8ee0728
DW
991 if (sh->reconstruct_state == reconstruct_state_drain_run)
992 sh->reconstruct_state = reconstruct_state_drain_result;
993 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
994 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
995 else {
996 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
997 sh->reconstruct_state = reconstruct_state_result;
998 }
91c00924
DW
999
1000 set_bit(STRIPE_HANDLE, &sh->state);
1001 release_stripe(sh);
1002}
1003
1004static void
ac6b53b6
DW
1005ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1006 struct dma_async_tx_descriptor *tx)
91c00924 1007{
91c00924 1008 int disks = sh->disks;
d6f38f31 1009 struct page **xor_srcs = percpu->scribble;
a08abd8c 1010 struct async_submit_ctl submit;
91c00924
DW
1011 int count = 0, pd_idx = sh->pd_idx, i;
1012 struct page *xor_dest;
d8ee0728 1013 int prexor = 0;
91c00924 1014 unsigned long flags;
91c00924 1015
e46b272b 1016 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1017 (unsigned long long)sh->sector);
1018
1019 /* check if prexor is active which means only process blocks
1020 * that are part of a read-modify-write (written)
1021 */
d8ee0728
DW
1022 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1023 prexor = 1;
91c00924
DW
1024 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1025 for (i = disks; i--; ) {
1026 struct r5dev *dev = &sh->dev[i];
1027 if (dev->written)
1028 xor_srcs[count++] = dev->page;
1029 }
1030 } else {
1031 xor_dest = sh->dev[pd_idx].page;
1032 for (i = disks; i--; ) {
1033 struct r5dev *dev = &sh->dev[i];
1034 if (i != pd_idx)
1035 xor_srcs[count++] = dev->page;
1036 }
1037 }
1038
91c00924
DW
1039 /* 1/ if we prexor'd then the dest is reused as a source
1040 * 2/ if we did not prexor then we are redoing the parity
1041 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1042 * for the synchronous xor case
1043 */
88ba2aa5 1044 flags = ASYNC_TX_ACK |
91c00924
DW
1045 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1046
1047 atomic_inc(&sh->count);
1048
ac6b53b6 1049 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
d6f38f31 1050 to_addr_conv(sh, percpu));
a08abd8c
DW
1051 if (unlikely(count == 1))
1052 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1053 else
1054 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
91c00924
DW
1055}
1056
ac6b53b6
DW
1057static void
1058ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1059 struct dma_async_tx_descriptor *tx)
1060{
1061 struct async_submit_ctl submit;
1062 struct page **blocks = percpu->scribble;
1063 int count;
1064
1065 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1066
1067 count = set_syndrome_sources(blocks, sh);
1068
1069 atomic_inc(&sh->count);
1070
1071 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1072 sh, to_addr_conv(sh, percpu));
1073 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
91c00924
DW
1074}
1075
1076static void ops_complete_check(void *stripe_head_ref)
1077{
1078 struct stripe_head *sh = stripe_head_ref;
91c00924 1079
e46b272b 1080 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1081 (unsigned long long)sh->sector);
1082
ecc65c9b 1083 sh->check_state = check_state_check_result;
91c00924
DW
1084 set_bit(STRIPE_HANDLE, &sh->state);
1085 release_stripe(sh);
1086}
1087
ac6b53b6 1088static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
91c00924 1089{
91c00924 1090 int disks = sh->disks;
ac6b53b6
DW
1091 int pd_idx = sh->pd_idx;
1092 int qd_idx = sh->qd_idx;
1093 struct page *xor_dest;
d6f38f31 1094 struct page **xor_srcs = percpu->scribble;
91c00924 1095 struct dma_async_tx_descriptor *tx;
a08abd8c 1096 struct async_submit_ctl submit;
ac6b53b6
DW
1097 int count;
1098 int i;
91c00924 1099
e46b272b 1100 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1101 (unsigned long long)sh->sector);
1102
ac6b53b6
DW
1103 count = 0;
1104 xor_dest = sh->dev[pd_idx].page;
1105 xor_srcs[count++] = xor_dest;
91c00924 1106 for (i = disks; i--; ) {
ac6b53b6
DW
1107 if (i == pd_idx || i == qd_idx)
1108 continue;
1109 xor_srcs[count++] = sh->dev[i].page;
91c00924
DW
1110 }
1111
d6f38f31
DW
1112 init_async_submit(&submit, 0, NULL, NULL, NULL,
1113 to_addr_conv(sh, percpu));
099f53cb 1114 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
a08abd8c 1115 &sh->ops.zero_sum_result, &submit);
91c00924 1116
91c00924 1117 atomic_inc(&sh->count);
a08abd8c
DW
1118 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1119 tx = async_trigger_callback(&submit);
91c00924
DW
1120}
1121
ac6b53b6
DW
1122static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1123{
1124 struct page **srcs = percpu->scribble;
1125 struct async_submit_ctl submit;
1126 int count;
1127
1128 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1129 (unsigned long long)sh->sector, checkp);
1130
1131 count = set_syndrome_sources(srcs, sh);
1132 if (!checkp)
1133 srcs[count] = NULL;
91c00924 1134
91c00924 1135 atomic_inc(&sh->count);
ac6b53b6
DW
1136 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1137 sh, to_addr_conv(sh, percpu));
1138 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1139 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
91c00924
DW
1140}
1141
ac6b53b6 1142static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
91c00924
DW
1143{
1144 int overlap_clear = 0, i, disks = sh->disks;
1145 struct dma_async_tx_descriptor *tx = NULL;
d6f38f31 1146 raid5_conf_t *conf = sh->raid_conf;
ac6b53b6 1147 int level = conf->level;
d6f38f31
DW
1148 struct raid5_percpu *percpu;
1149 unsigned long cpu;
91c00924 1150
d6f38f31
DW
1151 cpu = get_cpu();
1152 percpu = per_cpu_ptr(conf->percpu, cpu);
83de75cc 1153 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
91c00924
DW
1154 ops_run_biofill(sh);
1155 overlap_clear++;
1156 }
1157
7b3a871e 1158 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
ac6b53b6
DW
1159 if (level < 6)
1160 tx = ops_run_compute5(sh, percpu);
1161 else {
1162 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1163 tx = ops_run_compute6_1(sh, percpu);
1164 else
1165 tx = ops_run_compute6_2(sh, percpu);
1166 }
1167 /* terminate the chain if reconstruct is not set to be run */
1168 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
7b3a871e
DW
1169 async_tx_ack(tx);
1170 }
91c00924 1171
600aa109 1172 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
d6f38f31 1173 tx = ops_run_prexor(sh, percpu, tx);
91c00924 1174
600aa109 1175 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
d8ee0728 1176 tx = ops_run_biodrain(sh, tx);
91c00924
DW
1177 overlap_clear++;
1178 }
1179
ac6b53b6
DW
1180 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1181 if (level < 6)
1182 ops_run_reconstruct5(sh, percpu, tx);
1183 else
1184 ops_run_reconstruct6(sh, percpu, tx);
1185 }
91c00924 1186
ac6b53b6
DW
1187 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1188 if (sh->check_state == check_state_run)
1189 ops_run_check_p(sh, percpu);
1190 else if (sh->check_state == check_state_run_q)
1191 ops_run_check_pq(sh, percpu, 0);
1192 else if (sh->check_state == check_state_run_pq)
1193 ops_run_check_pq(sh, percpu, 1);
1194 else
1195 BUG();
1196 }
91c00924 1197
91c00924
DW
1198 if (overlap_clear)
1199 for (i = disks; i--; ) {
1200 struct r5dev *dev = &sh->dev[i];
1201 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1202 wake_up(&sh->raid_conf->wait_for_overlap);
1203 }
d6f38f31 1204 put_cpu();
91c00924
DW
1205}
1206
3f294f4f 1207static int grow_one_stripe(raid5_conf_t *conf)
1da177e4
LT
1208{
1209 struct stripe_head *sh;
3f294f4f
N
1210 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
1211 if (!sh)
1212 return 0;
1213 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
1214 sh->raid_conf = conf;
1215 spin_lock_init(&sh->lock);
1216
1217 if (grow_buffers(sh, conf->raid_disks)) {
1218 shrink_buffers(sh, conf->raid_disks);
1219 kmem_cache_free(conf->slab_cache, sh);
1220 return 0;
1221 }
7ecaa1e6 1222 sh->disks = conf->raid_disks;
3f294f4f
N
1223 /* we just created an active stripe so... */
1224 atomic_set(&sh->count, 1);
1225 atomic_inc(&conf->active_stripes);
1226 INIT_LIST_HEAD(&sh->lru);
1227 release_stripe(sh);
1228 return 1;
1229}
1230
1231static int grow_stripes(raid5_conf_t *conf, int num)
1232{
e18b890b 1233 struct kmem_cache *sc;
1da177e4
LT
1234 int devs = conf->raid_disks;
1235
245f46c2
N
1236 sprintf(conf->cache_name[0],
1237 "raid%d-%s", conf->level, mdname(conf->mddev));
1238 sprintf(conf->cache_name[1],
1239 "raid%d-%s-alt", conf->level, mdname(conf->mddev));
ad01c9e3
N
1240 conf->active_name = 0;
1241 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1da177e4 1242 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
20c2df83 1243 0, 0, NULL);
1da177e4
LT
1244 if (!sc)
1245 return 1;
1246 conf->slab_cache = sc;
ad01c9e3 1247 conf->pool_size = devs;
16a53ecc 1248 while (num--)
3f294f4f 1249 if (!grow_one_stripe(conf))
1da177e4 1250 return 1;
1da177e4
LT
1251 return 0;
1252}
29269553 1253
d6f38f31
DW
1254/**
1255 * scribble_len - return the required size of the scribble region
1256 * @num - total number of disks in the array
1257 *
1258 * The size must be enough to contain:
1259 * 1/ a struct page pointer for each device in the array +2
1260 * 2/ room to convert each entry in (1) to its corresponding dma
1261 * (dma_map_page()) or page (page_address()) address.
1262 *
1263 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1264 * calculate over all devices (not just the data blocks), using zeros in place
1265 * of the P and Q blocks.
1266 */
1267static size_t scribble_len(int num)
1268{
1269 size_t len;
1270
1271 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1272
1273 return len;
1274}
1275
ad01c9e3
N
1276static int resize_stripes(raid5_conf_t *conf, int newsize)
1277{
1278 /* Make all the stripes able to hold 'newsize' devices.
1279 * New slots in each stripe get 'page' set to a new page.
1280 *
1281 * This happens in stages:
1282 * 1/ create a new kmem_cache and allocate the required number of
1283 * stripe_heads.
1284 * 2/ gather all the old stripe_heads and tranfer the pages across
1285 * to the new stripe_heads. This will have the side effect of
1286 * freezing the array as once all stripe_heads have been collected,
1287 * no IO will be possible. Old stripe heads are freed once their
1288 * pages have been transferred over, and the old kmem_cache is
1289 * freed when all stripes are done.
1290 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1291 * we simple return a failre status - no need to clean anything up.
1292 * 4/ allocate new pages for the new slots in the new stripe_heads.
1293 * If this fails, we don't bother trying the shrink the
1294 * stripe_heads down again, we just leave them as they are.
1295 * As each stripe_head is processed the new one is released into
1296 * active service.
1297 *
1298 * Once step2 is started, we cannot afford to wait for a write,
1299 * so we use GFP_NOIO allocations.
1300 */
1301 struct stripe_head *osh, *nsh;
1302 LIST_HEAD(newstripes);
1303 struct disk_info *ndisks;
d6f38f31 1304 unsigned long cpu;
b5470dc5 1305 int err;
e18b890b 1306 struct kmem_cache *sc;
ad01c9e3
N
1307 int i;
1308
1309 if (newsize <= conf->pool_size)
1310 return 0; /* never bother to shrink */
1311
b5470dc5
DW
1312 err = md_allow_write(conf->mddev);
1313 if (err)
1314 return err;
2a2275d6 1315
ad01c9e3
N
1316 /* Step 1 */
1317 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1318 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
20c2df83 1319 0, 0, NULL);
ad01c9e3
N
1320 if (!sc)
1321 return -ENOMEM;
1322
1323 for (i = conf->max_nr_stripes; i; i--) {
1324 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
1325 if (!nsh)
1326 break;
1327
1328 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1329
1330 nsh->raid_conf = conf;
1331 spin_lock_init(&nsh->lock);
1332
1333 list_add(&nsh->lru, &newstripes);
1334 }
1335 if (i) {
1336 /* didn't get enough, give up */
1337 while (!list_empty(&newstripes)) {
1338 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1339 list_del(&nsh->lru);
1340 kmem_cache_free(sc, nsh);
1341 }
1342 kmem_cache_destroy(sc);
1343 return -ENOMEM;
1344 }
1345 /* Step 2 - Must use GFP_NOIO now.
1346 * OK, we have enough stripes, start collecting inactive
1347 * stripes and copying them over
1348 */
1349 list_for_each_entry(nsh, &newstripes, lru) {
1350 spin_lock_irq(&conf->device_lock);
1351 wait_event_lock_irq(conf->wait_for_stripe,
1352 !list_empty(&conf->inactive_list),
1353 conf->device_lock,
b3b46be3 1354 unplug_slaves(conf->mddev)
ad01c9e3
N
1355 );
1356 osh = get_free_stripe(conf);
1357 spin_unlock_irq(&conf->device_lock);
1358 atomic_set(&nsh->count, 1);
1359 for(i=0; i<conf->pool_size; i++)
1360 nsh->dev[i].page = osh->dev[i].page;
1361 for( ; i<newsize; i++)
1362 nsh->dev[i].page = NULL;
1363 kmem_cache_free(conf->slab_cache, osh);
1364 }
1365 kmem_cache_destroy(conf->slab_cache);
1366
1367 /* Step 3.
1368 * At this point, we are holding all the stripes so the array
1369 * is completely stalled, so now is a good time to resize
d6f38f31 1370 * conf->disks and the scribble region
ad01c9e3
N
1371 */
1372 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1373 if (ndisks) {
1374 for (i=0; i<conf->raid_disks; i++)
1375 ndisks[i] = conf->disks[i];
1376 kfree(conf->disks);
1377 conf->disks = ndisks;
1378 } else
1379 err = -ENOMEM;
1380
d6f38f31
DW
1381 get_online_cpus();
1382 conf->scribble_len = scribble_len(newsize);
1383 for_each_present_cpu(cpu) {
1384 struct raid5_percpu *percpu;
1385 void *scribble;
1386
1387 percpu = per_cpu_ptr(conf->percpu, cpu);
1388 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1389
1390 if (scribble) {
1391 kfree(percpu->scribble);
1392 percpu->scribble = scribble;
1393 } else {
1394 err = -ENOMEM;
1395 break;
1396 }
1397 }
1398 put_online_cpus();
1399
ad01c9e3
N
1400 /* Step 4, return new stripes to service */
1401 while(!list_empty(&newstripes)) {
1402 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1403 list_del_init(&nsh->lru);
d6f38f31 1404
ad01c9e3
N
1405 for (i=conf->raid_disks; i < newsize; i++)
1406 if (nsh->dev[i].page == NULL) {
1407 struct page *p = alloc_page(GFP_NOIO);
1408 nsh->dev[i].page = p;
1409 if (!p)
1410 err = -ENOMEM;
1411 }
1412 release_stripe(nsh);
1413 }
1414 /* critical section pass, GFP_NOIO no longer needed */
1415
1416 conf->slab_cache = sc;
1417 conf->active_name = 1-conf->active_name;
1418 conf->pool_size = newsize;
1419 return err;
1420}
1da177e4 1421
3f294f4f 1422static int drop_one_stripe(raid5_conf_t *conf)
1da177e4
LT
1423{
1424 struct stripe_head *sh;
1425
3f294f4f
N
1426 spin_lock_irq(&conf->device_lock);
1427 sh = get_free_stripe(conf);
1428 spin_unlock_irq(&conf->device_lock);
1429 if (!sh)
1430 return 0;
78bafebd 1431 BUG_ON(atomic_read(&sh->count));
ad01c9e3 1432 shrink_buffers(sh, conf->pool_size);
3f294f4f
N
1433 kmem_cache_free(conf->slab_cache, sh);
1434 atomic_dec(&conf->active_stripes);
1435 return 1;
1436}
1437
1438static void shrink_stripes(raid5_conf_t *conf)
1439{
1440 while (drop_one_stripe(conf))
1441 ;
1442
29fc7e3e
N
1443 if (conf->slab_cache)
1444 kmem_cache_destroy(conf->slab_cache);
1da177e4
LT
1445 conf->slab_cache = NULL;
1446}
1447
6712ecf8 1448static void raid5_end_read_request(struct bio * bi, int error)
1da177e4 1449{
99c0fb5f 1450 struct stripe_head *sh = bi->bi_private;
1da177e4 1451 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 1452 int disks = sh->disks, i;
1da177e4 1453 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
d6950432
N
1454 char b[BDEVNAME_SIZE];
1455 mdk_rdev_t *rdev;
1da177e4 1456
1da177e4
LT
1457
1458 for (i=0 ; i<disks; i++)
1459 if (bi == &sh->dev[i].req)
1460 break;
1461
45b4233c
DW
1462 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1463 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1da177e4
LT
1464 uptodate);
1465 if (i == disks) {
1466 BUG();
6712ecf8 1467 return;
1da177e4
LT
1468 }
1469
1470 if (uptodate) {
1da177e4 1471 set_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5 1472 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
d6950432 1473 rdev = conf->disks[i].rdev;
6be9d494
BS
1474 printk_rl(KERN_INFO "raid5:%s: read error corrected"
1475 " (%lu sectors at %llu on %s)\n",
1476 mdname(conf->mddev), STRIPE_SECTORS,
1477 (unsigned long long)(sh->sector
1478 + rdev->data_offset),
1479 bdevname(rdev->bdev, b));
4e5314b5
N
1480 clear_bit(R5_ReadError, &sh->dev[i].flags);
1481 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1482 }
ba22dcbf
N
1483 if (atomic_read(&conf->disks[i].rdev->read_errors))
1484 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1da177e4 1485 } else {
d6950432 1486 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
ba22dcbf 1487 int retry = 0;
d6950432
N
1488 rdev = conf->disks[i].rdev;
1489
1da177e4 1490 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
d6950432 1491 atomic_inc(&rdev->read_errors);
ba22dcbf 1492 if (conf->mddev->degraded)
6be9d494
BS
1493 printk_rl(KERN_WARNING
1494 "raid5:%s: read error not correctable "
1495 "(sector %llu on %s).\n",
1496 mdname(conf->mddev),
1497 (unsigned long long)(sh->sector
1498 + rdev->data_offset),
1499 bdn);
ba22dcbf 1500 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
4e5314b5 1501 /* Oh, no!!! */
6be9d494
BS
1502 printk_rl(KERN_WARNING
1503 "raid5:%s: read error NOT corrected!! "
1504 "(sector %llu on %s).\n",
1505 mdname(conf->mddev),
1506 (unsigned long long)(sh->sector
1507 + rdev->data_offset),
1508 bdn);
d6950432 1509 else if (atomic_read(&rdev->read_errors)
ba22dcbf 1510 > conf->max_nr_stripes)
14f8d26b 1511 printk(KERN_WARNING
d6950432
N
1512 "raid5:%s: Too many read errors, failing device %s.\n",
1513 mdname(conf->mddev), bdn);
ba22dcbf
N
1514 else
1515 retry = 1;
1516 if (retry)
1517 set_bit(R5_ReadError, &sh->dev[i].flags);
1518 else {
4e5314b5
N
1519 clear_bit(R5_ReadError, &sh->dev[i].flags);
1520 clear_bit(R5_ReWrite, &sh->dev[i].flags);
d6950432 1521 md_error(conf->mddev, rdev);
ba22dcbf 1522 }
1da177e4
LT
1523 }
1524 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1da177e4
LT
1525 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1526 set_bit(STRIPE_HANDLE, &sh->state);
1527 release_stripe(sh);
1da177e4
LT
1528}
1529
d710e138 1530static void raid5_end_write_request(struct bio *bi, int error)
1da177e4 1531{
99c0fb5f 1532 struct stripe_head *sh = bi->bi_private;
1da177e4 1533 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 1534 int disks = sh->disks, i;
1da177e4
LT
1535 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1536
1da177e4
LT
1537 for (i=0 ; i<disks; i++)
1538 if (bi == &sh->dev[i].req)
1539 break;
1540
45b4233c 1541 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1da177e4
LT
1542 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1543 uptodate);
1544 if (i == disks) {
1545 BUG();
6712ecf8 1546 return;
1da177e4
LT
1547 }
1548
1da177e4
LT
1549 if (!uptodate)
1550 md_error(conf->mddev, conf->disks[i].rdev);
1551
1552 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1553
1554 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1555 set_bit(STRIPE_HANDLE, &sh->state);
c04be0aa 1556 release_stripe(sh);
1da177e4
LT
1557}
1558
1559
784052ec 1560static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1da177e4 1561
784052ec 1562static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1da177e4
LT
1563{
1564 struct r5dev *dev = &sh->dev[i];
1565
1566 bio_init(&dev->req);
1567 dev->req.bi_io_vec = &dev->vec;
1568 dev->req.bi_vcnt++;
1569 dev->req.bi_max_vecs++;
1570 dev->vec.bv_page = dev->page;
1571 dev->vec.bv_len = STRIPE_SIZE;
1572 dev->vec.bv_offset = 0;
1573
1574 dev->req.bi_sector = sh->sector;
1575 dev->req.bi_private = sh;
1576
1577 dev->flags = 0;
784052ec 1578 dev->sector = compute_blocknr(sh, i, previous);
1da177e4
LT
1579}
1580
1581static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1582{
1583 char b[BDEVNAME_SIZE];
1584 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
45b4233c 1585 pr_debug("raid5: error called\n");
1da177e4 1586
b2d444d7 1587 if (!test_bit(Faulty, &rdev->flags)) {
850b2b42 1588 set_bit(MD_CHANGE_DEVS, &mddev->flags);
c04be0aa
N
1589 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1590 unsigned long flags;
1591 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 1592 mddev->degraded++;
c04be0aa 1593 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1594 /*
1595 * if recovery was running, make sure it aborts.
1596 */
dfc70645 1597 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1da177e4 1598 }
b2d444d7 1599 set_bit(Faulty, &rdev->flags);
d710e138
N
1600 printk(KERN_ALERT
1601 "raid5: Disk failure on %s, disabling device.\n"
1602 "raid5: Operation continuing on %d devices.\n",
1603 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1da177e4 1604 }
16a53ecc 1605}
1da177e4
LT
1606
1607/*
1608 * Input: a 'big' sector number,
1609 * Output: index of the data and parity disk, and the sector # in them.
1610 */
112bf897 1611static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
911d4ee8
N
1612 int previous, int *dd_idx,
1613 struct stripe_head *sh)
1da177e4
LT
1614{
1615 long stripe;
1616 unsigned long chunk_number;
1617 unsigned int chunk_offset;
911d4ee8 1618 int pd_idx, qd_idx;
67cc2b81 1619 int ddf_layout = 0;
1da177e4 1620 sector_t new_sector;
e183eaed
N
1621 int algorithm = previous ? conf->prev_algo
1622 : conf->algorithm;
09c9e5fa
AN
1623 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1624 : conf->chunk_sectors;
112bf897
N
1625 int raid_disks = previous ? conf->previous_raid_disks
1626 : conf->raid_disks;
1627 int data_disks = raid_disks - conf->max_degraded;
1da177e4
LT
1628
1629 /* First compute the information on this sector */
1630
1631 /*
1632 * Compute the chunk number and the sector offset inside the chunk
1633 */
1634 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1635 chunk_number = r_sector;
1636 BUG_ON(r_sector != chunk_number);
1637
1638 /*
1639 * Compute the stripe number
1640 */
1641 stripe = chunk_number / data_disks;
1642
1643 /*
1644 * Compute the data disk and parity disk indexes inside the stripe
1645 */
1646 *dd_idx = chunk_number % data_disks;
1647
1648 /*
1649 * Select the parity disk based on the user selected algorithm.
1650 */
911d4ee8 1651 pd_idx = qd_idx = ~0;
16a53ecc
N
1652 switch(conf->level) {
1653 case 4:
911d4ee8 1654 pd_idx = data_disks;
16a53ecc
N
1655 break;
1656 case 5:
e183eaed 1657 switch (algorithm) {
1da177e4 1658 case ALGORITHM_LEFT_ASYMMETRIC:
911d4ee8
N
1659 pd_idx = data_disks - stripe % raid_disks;
1660 if (*dd_idx >= pd_idx)
1da177e4
LT
1661 (*dd_idx)++;
1662 break;
1663 case ALGORITHM_RIGHT_ASYMMETRIC:
911d4ee8
N
1664 pd_idx = stripe % raid_disks;
1665 if (*dd_idx >= pd_idx)
1da177e4
LT
1666 (*dd_idx)++;
1667 break;
1668 case ALGORITHM_LEFT_SYMMETRIC:
911d4ee8
N
1669 pd_idx = data_disks - stripe % raid_disks;
1670 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4
LT
1671 break;
1672 case ALGORITHM_RIGHT_SYMMETRIC:
911d4ee8
N
1673 pd_idx = stripe % raid_disks;
1674 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4 1675 break;
99c0fb5f
N
1676 case ALGORITHM_PARITY_0:
1677 pd_idx = 0;
1678 (*dd_idx)++;
1679 break;
1680 case ALGORITHM_PARITY_N:
1681 pd_idx = data_disks;
1682 break;
1da177e4 1683 default:
14f8d26b 1684 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
e183eaed 1685 algorithm);
99c0fb5f 1686 BUG();
16a53ecc
N
1687 }
1688 break;
1689 case 6:
1690
e183eaed 1691 switch (algorithm) {
16a53ecc 1692 case ALGORITHM_LEFT_ASYMMETRIC:
911d4ee8
N
1693 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1694 qd_idx = pd_idx + 1;
1695 if (pd_idx == raid_disks-1) {
99c0fb5f 1696 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1697 qd_idx = 0;
1698 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1699 (*dd_idx) += 2; /* D D P Q D */
1700 break;
1701 case ALGORITHM_RIGHT_ASYMMETRIC:
911d4ee8
N
1702 pd_idx = stripe % raid_disks;
1703 qd_idx = pd_idx + 1;
1704 if (pd_idx == raid_disks-1) {
99c0fb5f 1705 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1706 qd_idx = 0;
1707 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1708 (*dd_idx) += 2; /* D D P Q D */
1709 break;
1710 case ALGORITHM_LEFT_SYMMETRIC:
911d4ee8
N
1711 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1712 qd_idx = (pd_idx + 1) % raid_disks;
1713 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc
N
1714 break;
1715 case ALGORITHM_RIGHT_SYMMETRIC:
911d4ee8
N
1716 pd_idx = stripe % raid_disks;
1717 qd_idx = (pd_idx + 1) % raid_disks;
1718 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc 1719 break;
99c0fb5f
N
1720
1721 case ALGORITHM_PARITY_0:
1722 pd_idx = 0;
1723 qd_idx = 1;
1724 (*dd_idx) += 2;
1725 break;
1726 case ALGORITHM_PARITY_N:
1727 pd_idx = data_disks;
1728 qd_idx = data_disks + 1;
1729 break;
1730
1731 case ALGORITHM_ROTATING_ZERO_RESTART:
1732 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1733 * of blocks for computing Q is different.
1734 */
1735 pd_idx = stripe % raid_disks;
1736 qd_idx = pd_idx + 1;
1737 if (pd_idx == raid_disks-1) {
1738 (*dd_idx)++; /* Q D D D P */
1739 qd_idx = 0;
1740 } else if (*dd_idx >= pd_idx)
1741 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1742 ddf_layout = 1;
99c0fb5f
N
1743 break;
1744
1745 case ALGORITHM_ROTATING_N_RESTART:
1746 /* Same a left_asymmetric, by first stripe is
1747 * D D D P Q rather than
1748 * Q D D D P
1749 */
1750 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1751 qd_idx = pd_idx + 1;
1752 if (pd_idx == raid_disks-1) {
1753 (*dd_idx)++; /* Q D D D P */
1754 qd_idx = 0;
1755 } else if (*dd_idx >= pd_idx)
1756 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1757 ddf_layout = 1;
99c0fb5f
N
1758 break;
1759
1760 case ALGORITHM_ROTATING_N_CONTINUE:
1761 /* Same as left_symmetric but Q is before P */
1762 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1763 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1764 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
67cc2b81 1765 ddf_layout = 1;
99c0fb5f
N
1766 break;
1767
1768 case ALGORITHM_LEFT_ASYMMETRIC_6:
1769 /* RAID5 left_asymmetric, with Q on last device */
1770 pd_idx = data_disks - stripe % (raid_disks-1);
1771 if (*dd_idx >= pd_idx)
1772 (*dd_idx)++;
1773 qd_idx = raid_disks - 1;
1774 break;
1775
1776 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1777 pd_idx = stripe % (raid_disks-1);
1778 if (*dd_idx >= pd_idx)
1779 (*dd_idx)++;
1780 qd_idx = raid_disks - 1;
1781 break;
1782
1783 case ALGORITHM_LEFT_SYMMETRIC_6:
1784 pd_idx = data_disks - stripe % (raid_disks-1);
1785 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1786 qd_idx = raid_disks - 1;
1787 break;
1788
1789 case ALGORITHM_RIGHT_SYMMETRIC_6:
1790 pd_idx = stripe % (raid_disks-1);
1791 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1792 qd_idx = raid_disks - 1;
1793 break;
1794
1795 case ALGORITHM_PARITY_0_6:
1796 pd_idx = 0;
1797 (*dd_idx)++;
1798 qd_idx = raid_disks - 1;
1799 break;
1800
1801
16a53ecc 1802 default:
d710e138 1803 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
e183eaed 1804 algorithm);
99c0fb5f 1805 BUG();
16a53ecc
N
1806 }
1807 break;
1da177e4
LT
1808 }
1809
911d4ee8
N
1810 if (sh) {
1811 sh->pd_idx = pd_idx;
1812 sh->qd_idx = qd_idx;
67cc2b81 1813 sh->ddf_layout = ddf_layout;
911d4ee8 1814 }
1da177e4
LT
1815 /*
1816 * Finally, compute the new sector number
1817 */
1818 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1819 return new_sector;
1820}
1821
1822
784052ec 1823static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1da177e4
LT
1824{
1825 raid5_conf_t *conf = sh->raid_conf;
b875e531
N
1826 int raid_disks = sh->disks;
1827 int data_disks = raid_disks - conf->max_degraded;
1da177e4 1828 sector_t new_sector = sh->sector, check;
09c9e5fa
AN
1829 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1830 : conf->chunk_sectors;
e183eaed
N
1831 int algorithm = previous ? conf->prev_algo
1832 : conf->algorithm;
1da177e4
LT
1833 sector_t stripe;
1834 int chunk_offset;
911d4ee8 1835 int chunk_number, dummy1, dd_idx = i;
1da177e4 1836 sector_t r_sector;
911d4ee8 1837 struct stripe_head sh2;
1da177e4 1838
16a53ecc 1839
1da177e4
LT
1840 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1841 stripe = new_sector;
1842 BUG_ON(new_sector != stripe);
1843
16a53ecc
N
1844 if (i == sh->pd_idx)
1845 return 0;
1846 switch(conf->level) {
1847 case 4: break;
1848 case 5:
e183eaed 1849 switch (algorithm) {
1da177e4
LT
1850 case ALGORITHM_LEFT_ASYMMETRIC:
1851 case ALGORITHM_RIGHT_ASYMMETRIC:
1852 if (i > sh->pd_idx)
1853 i--;
1854 break;
1855 case ALGORITHM_LEFT_SYMMETRIC:
1856 case ALGORITHM_RIGHT_SYMMETRIC:
1857 if (i < sh->pd_idx)
1858 i += raid_disks;
1859 i -= (sh->pd_idx + 1);
1860 break;
99c0fb5f
N
1861 case ALGORITHM_PARITY_0:
1862 i -= 1;
1863 break;
1864 case ALGORITHM_PARITY_N:
1865 break;
1da177e4 1866 default:
14f8d26b 1867 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
e183eaed 1868 algorithm);
99c0fb5f 1869 BUG();
16a53ecc
N
1870 }
1871 break;
1872 case 6:
d0dabf7e 1873 if (i == sh->qd_idx)
16a53ecc 1874 return 0; /* It is the Q disk */
e183eaed 1875 switch (algorithm) {
16a53ecc
N
1876 case ALGORITHM_LEFT_ASYMMETRIC:
1877 case ALGORITHM_RIGHT_ASYMMETRIC:
99c0fb5f
N
1878 case ALGORITHM_ROTATING_ZERO_RESTART:
1879 case ALGORITHM_ROTATING_N_RESTART:
1880 if (sh->pd_idx == raid_disks-1)
1881 i--; /* Q D D D P */
16a53ecc
N
1882 else if (i > sh->pd_idx)
1883 i -= 2; /* D D P Q D */
1884 break;
1885 case ALGORITHM_LEFT_SYMMETRIC:
1886 case ALGORITHM_RIGHT_SYMMETRIC:
1887 if (sh->pd_idx == raid_disks-1)
1888 i--; /* Q D D D P */
1889 else {
1890 /* D D P Q D */
1891 if (i < sh->pd_idx)
1892 i += raid_disks;
1893 i -= (sh->pd_idx + 2);
1894 }
1895 break;
99c0fb5f
N
1896 case ALGORITHM_PARITY_0:
1897 i -= 2;
1898 break;
1899 case ALGORITHM_PARITY_N:
1900 break;
1901 case ALGORITHM_ROTATING_N_CONTINUE:
1902 if (sh->pd_idx == 0)
1903 i--; /* P D D D Q */
1904 else if (i > sh->pd_idx)
1905 i -= 2; /* D D Q P D */
1906 break;
1907 case ALGORITHM_LEFT_ASYMMETRIC_6:
1908 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1909 if (i > sh->pd_idx)
1910 i--;
1911 break;
1912 case ALGORITHM_LEFT_SYMMETRIC_6:
1913 case ALGORITHM_RIGHT_SYMMETRIC_6:
1914 if (i < sh->pd_idx)
1915 i += data_disks + 1;
1916 i -= (sh->pd_idx + 1);
1917 break;
1918 case ALGORITHM_PARITY_0_6:
1919 i -= 1;
1920 break;
16a53ecc 1921 default:
d710e138 1922 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
e183eaed 1923 algorithm);
99c0fb5f 1924 BUG();
16a53ecc
N
1925 }
1926 break;
1da177e4
LT
1927 }
1928
1929 chunk_number = stripe * data_disks + i;
1930 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1931
112bf897 1932 check = raid5_compute_sector(conf, r_sector,
784052ec 1933 previous, &dummy1, &sh2);
911d4ee8
N
1934 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1935 || sh2.qd_idx != sh->qd_idx) {
14f8d26b 1936 printk(KERN_ERR "compute_blocknr: map not correct\n");
1da177e4
LT
1937 return 0;
1938 }
1939 return r_sector;
1940}
1941
1942
600aa109 1943static void
c0f7bddb 1944schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
600aa109 1945 int rcw, int expand)
e33129d8
DW
1946{
1947 int i, pd_idx = sh->pd_idx, disks = sh->disks;
c0f7bddb
YT
1948 raid5_conf_t *conf = sh->raid_conf;
1949 int level = conf->level;
e33129d8
DW
1950
1951 if (rcw) {
1952 /* if we are not expanding this is a proper write request, and
1953 * there will be bios with new data to be drained into the
1954 * stripe cache
1955 */
1956 if (!expand) {
600aa109
DW
1957 sh->reconstruct_state = reconstruct_state_drain_run;
1958 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1959 } else
1960 sh->reconstruct_state = reconstruct_state_run;
16a53ecc 1961
ac6b53b6 1962 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
e33129d8
DW
1963
1964 for (i = disks; i--; ) {
1965 struct r5dev *dev = &sh->dev[i];
1966
1967 if (dev->towrite) {
1968 set_bit(R5_LOCKED, &dev->flags);
d8ee0728 1969 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
1970 if (!expand)
1971 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 1972 s->locked++;
e33129d8
DW
1973 }
1974 }
c0f7bddb 1975 if (s->locked + conf->max_degraded == disks)
8b3e6cdc 1976 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
c0f7bddb 1977 atomic_inc(&conf->pending_full_writes);
e33129d8 1978 } else {
c0f7bddb 1979 BUG_ON(level == 6);
e33129d8
DW
1980 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
1981 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
1982
d8ee0728 1983 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
600aa109
DW
1984 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
1985 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
ac6b53b6 1986 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
e33129d8
DW
1987
1988 for (i = disks; i--; ) {
1989 struct r5dev *dev = &sh->dev[i];
1990 if (i == pd_idx)
1991 continue;
1992
e33129d8
DW
1993 if (dev->towrite &&
1994 (test_bit(R5_UPTODATE, &dev->flags) ||
d8ee0728
DW
1995 test_bit(R5_Wantcompute, &dev->flags))) {
1996 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
1997 set_bit(R5_LOCKED, &dev->flags);
1998 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 1999 s->locked++;
e33129d8
DW
2000 }
2001 }
2002 }
2003
c0f7bddb 2004 /* keep the parity disk(s) locked while asynchronous operations
e33129d8
DW
2005 * are in flight
2006 */
2007 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2008 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
600aa109 2009 s->locked++;
e33129d8 2010
c0f7bddb
YT
2011 if (level == 6) {
2012 int qd_idx = sh->qd_idx;
2013 struct r5dev *dev = &sh->dev[qd_idx];
2014
2015 set_bit(R5_LOCKED, &dev->flags);
2016 clear_bit(R5_UPTODATE, &dev->flags);
2017 s->locked++;
2018 }
2019
600aa109 2020 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
e46b272b 2021 __func__, (unsigned long long)sh->sector,
600aa109 2022 s->locked, s->ops_request);
e33129d8 2023}
16a53ecc 2024
1da177e4
LT
2025/*
2026 * Each stripe/dev can have one or more bion attached.
16a53ecc 2027 * toread/towrite point to the first in a chain.
1da177e4
LT
2028 * The bi_next chain must be in order.
2029 */
2030static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2031{
2032 struct bio **bip;
2033 raid5_conf_t *conf = sh->raid_conf;
72626685 2034 int firstwrite=0;
1da177e4 2035
45b4233c 2036 pr_debug("adding bh b#%llu to stripe s#%llu\n",
1da177e4
LT
2037 (unsigned long long)bi->bi_sector,
2038 (unsigned long long)sh->sector);
2039
2040
2041 spin_lock(&sh->lock);
2042 spin_lock_irq(&conf->device_lock);
72626685 2043 if (forwrite) {
1da177e4 2044 bip = &sh->dev[dd_idx].towrite;
72626685
N
2045 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2046 firstwrite = 1;
2047 } else
1da177e4
LT
2048 bip = &sh->dev[dd_idx].toread;
2049 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2050 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2051 goto overlap;
2052 bip = & (*bip)->bi_next;
2053 }
2054 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2055 goto overlap;
2056
78bafebd 2057 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
1da177e4
LT
2058 if (*bip)
2059 bi->bi_next = *bip;
2060 *bip = bi;
960e739d 2061 bi->bi_phys_segments++;
1da177e4
LT
2062 spin_unlock_irq(&conf->device_lock);
2063 spin_unlock(&sh->lock);
2064
45b4233c 2065 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
1da177e4
LT
2066 (unsigned long long)bi->bi_sector,
2067 (unsigned long long)sh->sector, dd_idx);
2068
72626685 2069 if (conf->mddev->bitmap && firstwrite) {
72626685
N
2070 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2071 STRIPE_SECTORS, 0);
ae3c20cc 2072 sh->bm_seq = conf->seq_flush+1;
72626685
N
2073 set_bit(STRIPE_BIT_DELAY, &sh->state);
2074 }
2075
1da177e4
LT
2076 if (forwrite) {
2077 /* check if page is covered */
2078 sector_t sector = sh->dev[dd_idx].sector;
2079 for (bi=sh->dev[dd_idx].towrite;
2080 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2081 bi && bi->bi_sector <= sector;
2082 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2083 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2084 sector = bi->bi_sector + (bi->bi_size>>9);
2085 }
2086 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2087 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2088 }
2089 return 1;
2090
2091 overlap:
2092 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2093 spin_unlock_irq(&conf->device_lock);
2094 spin_unlock(&sh->lock);
2095 return 0;
2096}
2097
29269553
N
2098static void end_reshape(raid5_conf_t *conf);
2099
911d4ee8
N
2100static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2101 struct stripe_head *sh)
ccfcc3c1 2102{
784052ec 2103 int sectors_per_chunk =
09c9e5fa 2104 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
911d4ee8 2105 int dd_idx;
2d2063ce 2106 int chunk_offset = sector_div(stripe, sectors_per_chunk);
112bf897 2107 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2d2063ce 2108
112bf897
N
2109 raid5_compute_sector(conf,
2110 stripe * (disks - conf->max_degraded)
b875e531 2111 *sectors_per_chunk + chunk_offset,
112bf897 2112 previous,
911d4ee8 2113 &dd_idx, sh);
ccfcc3c1
N
2114}
2115
a4456856 2116static void
1fe797e6 2117handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
a4456856
DW
2118 struct stripe_head_state *s, int disks,
2119 struct bio **return_bi)
2120{
2121 int i;
2122 for (i = disks; i--; ) {
2123 struct bio *bi;
2124 int bitmap_end = 0;
2125
2126 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2127 mdk_rdev_t *rdev;
2128 rcu_read_lock();
2129 rdev = rcu_dereference(conf->disks[i].rdev);
2130 if (rdev && test_bit(In_sync, &rdev->flags))
2131 /* multiple read failures in one stripe */
2132 md_error(conf->mddev, rdev);
2133 rcu_read_unlock();
2134 }
2135 spin_lock_irq(&conf->device_lock);
2136 /* fail all writes first */
2137 bi = sh->dev[i].towrite;
2138 sh->dev[i].towrite = NULL;
2139 if (bi) {
2140 s->to_write--;
2141 bitmap_end = 1;
2142 }
2143
2144 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2145 wake_up(&conf->wait_for_overlap);
2146
2147 while (bi && bi->bi_sector <
2148 sh->dev[i].sector + STRIPE_SECTORS) {
2149 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2150 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2151 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2152 md_write_end(conf->mddev);
2153 bi->bi_next = *return_bi;
2154 *return_bi = bi;
2155 }
2156 bi = nextbi;
2157 }
2158 /* and fail all 'written' */
2159 bi = sh->dev[i].written;
2160 sh->dev[i].written = NULL;
2161 if (bi) bitmap_end = 1;
2162 while (bi && bi->bi_sector <
2163 sh->dev[i].sector + STRIPE_SECTORS) {
2164 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2165 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2166 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2167 md_write_end(conf->mddev);
2168 bi->bi_next = *return_bi;
2169 *return_bi = bi;
2170 }
2171 bi = bi2;
2172 }
2173
b5e98d65
DW
2174 /* fail any reads if this device is non-operational and
2175 * the data has not reached the cache yet.
2176 */
2177 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2178 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2179 test_bit(R5_ReadError, &sh->dev[i].flags))) {
a4456856
DW
2180 bi = sh->dev[i].toread;
2181 sh->dev[i].toread = NULL;
2182 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2183 wake_up(&conf->wait_for_overlap);
2184 if (bi) s->to_read--;
2185 while (bi && bi->bi_sector <
2186 sh->dev[i].sector + STRIPE_SECTORS) {
2187 struct bio *nextbi =
2188 r5_next_bio(bi, sh->dev[i].sector);
2189 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2190 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2191 bi->bi_next = *return_bi;
2192 *return_bi = bi;
2193 }
2194 bi = nextbi;
2195 }
2196 }
2197 spin_unlock_irq(&conf->device_lock);
2198 if (bitmap_end)
2199 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2200 STRIPE_SECTORS, 0, 0);
2201 }
2202
8b3e6cdc
DW
2203 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2204 if (atomic_dec_and_test(&conf->pending_full_writes))
2205 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2206}
2207
1fe797e6
DW
2208/* fetch_block5 - checks the given member device to see if its data needs
2209 * to be read or computed to satisfy a request.
2210 *
2211 * Returns 1 when no more member devices need to be checked, otherwise returns
2212 * 0 to tell the loop in handle_stripe_fill5 to continue
f38e1219 2213 */
1fe797e6
DW
2214static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2215 int disk_idx, int disks)
f38e1219
DW
2216{
2217 struct r5dev *dev = &sh->dev[disk_idx];
2218 struct r5dev *failed_dev = &sh->dev[s->failed_num];
2219
f38e1219
DW
2220 /* is the data in this block needed, and can we get it? */
2221 if (!test_bit(R5_LOCKED, &dev->flags) &&
1fe797e6
DW
2222 !test_bit(R5_UPTODATE, &dev->flags) &&
2223 (dev->toread ||
2224 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2225 s->syncing || s->expanding ||
2226 (s->failed &&
2227 (failed_dev->toread ||
2228 (failed_dev->towrite &&
2229 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
976ea8d4
DW
2230 /* We would like to get this block, possibly by computing it,
2231 * otherwise read it if the backing disk is insync
f38e1219
DW
2232 */
2233 if ((s->uptodate == disks - 1) &&
ecc65c9b 2234 (s->failed && disk_idx == s->failed_num)) {
976ea8d4
DW
2235 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2236 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
f38e1219
DW
2237 set_bit(R5_Wantcompute, &dev->flags);
2238 sh->ops.target = disk_idx;
ac6b53b6 2239 sh->ops.target2 = -1;
f38e1219 2240 s->req_compute = 1;
f38e1219 2241 /* Careful: from this point on 'uptodate' is in the eye
ac6b53b6 2242 * of raid_run_ops which services 'compute' operations
f38e1219
DW
2243 * before writes. R5_Wantcompute flags a block that will
2244 * be R5_UPTODATE by the time it is needed for a
2245 * subsequent operation.
2246 */
2247 s->uptodate++;
1fe797e6 2248 return 1; /* uptodate + compute == disks */
7a1fc53c 2249 } else if (test_bit(R5_Insync, &dev->flags)) {
f38e1219
DW
2250 set_bit(R5_LOCKED, &dev->flags);
2251 set_bit(R5_Wantread, &dev->flags);
f38e1219
DW
2252 s->locked++;
2253 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2254 s->syncing);
2255 }
2256 }
2257
1fe797e6 2258 return 0;
f38e1219
DW
2259}
2260
1fe797e6
DW
2261/**
2262 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2263 */
2264static void handle_stripe_fill5(struct stripe_head *sh,
a4456856
DW
2265 struct stripe_head_state *s, int disks)
2266{
2267 int i;
f38e1219 2268
f38e1219
DW
2269 /* look for blocks to read/compute, skip this if a compute
2270 * is already in flight, or if the stripe contents are in the
2271 * midst of changing due to a write
2272 */
976ea8d4 2273 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
1fe797e6 2274 !sh->reconstruct_state)
f38e1219 2275 for (i = disks; i--; )
1fe797e6 2276 if (fetch_block5(sh, s, i, disks))
f38e1219 2277 break;
a4456856
DW
2278 set_bit(STRIPE_HANDLE, &sh->state);
2279}
2280
5599becc
YT
2281/* fetch_block6 - checks the given member device to see if its data needs
2282 * to be read or computed to satisfy a request.
2283 *
2284 * Returns 1 when no more member devices need to be checked, otherwise returns
2285 * 0 to tell the loop in handle_stripe_fill6 to continue
2286 */
2287static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
2288 struct r6_state *r6s, int disk_idx, int disks)
a4456856 2289{
5599becc
YT
2290 struct r5dev *dev = &sh->dev[disk_idx];
2291 struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
2292 &sh->dev[r6s->failed_num[1]] };
2293
2294 if (!test_bit(R5_LOCKED, &dev->flags) &&
2295 !test_bit(R5_UPTODATE, &dev->flags) &&
2296 (dev->toread ||
2297 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2298 s->syncing || s->expanding ||
2299 (s->failed >= 1 &&
2300 (fdev[0]->toread || s->to_write)) ||
2301 (s->failed >= 2 &&
2302 (fdev[1]->toread || s->to_write)))) {
2303 /* we would like to get this block, possibly by computing it,
2304 * otherwise read it if the backing disk is insync
2305 */
2306 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2307 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2308 if ((s->uptodate == disks - 1) &&
2309 (s->failed && (disk_idx == r6s->failed_num[0] ||
2310 disk_idx == r6s->failed_num[1]))) {
2311 /* have disk failed, and we're requested to fetch it;
2312 * do compute it
a4456856 2313 */
5599becc
YT
2314 pr_debug("Computing stripe %llu block %d\n",
2315 (unsigned long long)sh->sector, disk_idx);
2316 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2317 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2318 set_bit(R5_Wantcompute, &dev->flags);
2319 sh->ops.target = disk_idx;
2320 sh->ops.target2 = -1; /* no 2nd target */
2321 s->req_compute = 1;
2322 s->uptodate++;
2323 return 1;
2324 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2325 /* Computing 2-failure is *very* expensive; only
2326 * do it if failed >= 2
2327 */
2328 int other;
2329 for (other = disks; other--; ) {
2330 if (other == disk_idx)
2331 continue;
2332 if (!test_bit(R5_UPTODATE,
2333 &sh->dev[other].flags))
2334 break;
a4456856 2335 }
5599becc
YT
2336 BUG_ON(other < 0);
2337 pr_debug("Computing stripe %llu blocks %d,%d\n",
2338 (unsigned long long)sh->sector,
2339 disk_idx, other);
2340 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2341 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2342 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2343 set_bit(R5_Wantcompute, &sh->dev[other].flags);
2344 sh->ops.target = disk_idx;
2345 sh->ops.target2 = other;
2346 s->uptodate += 2;
2347 s->req_compute = 1;
2348 return 1;
2349 } else if (test_bit(R5_Insync, &dev->flags)) {
2350 set_bit(R5_LOCKED, &dev->flags);
2351 set_bit(R5_Wantread, &dev->flags);
2352 s->locked++;
2353 pr_debug("Reading block %d (sync=%d)\n",
2354 disk_idx, s->syncing);
a4456856
DW
2355 }
2356 }
5599becc
YT
2357
2358 return 0;
2359}
2360
2361/**
2362 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2363 */
2364static void handle_stripe_fill6(struct stripe_head *sh,
2365 struct stripe_head_state *s, struct r6_state *r6s,
2366 int disks)
2367{
2368 int i;
2369
2370 /* look for blocks to read/compute, skip this if a compute
2371 * is already in flight, or if the stripe contents are in the
2372 * midst of changing due to a write
2373 */
2374 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2375 !sh->reconstruct_state)
2376 for (i = disks; i--; )
2377 if (fetch_block6(sh, s, r6s, i, disks))
2378 break;
a4456856
DW
2379 set_bit(STRIPE_HANDLE, &sh->state);
2380}
2381
2382
1fe797e6 2383/* handle_stripe_clean_event
a4456856
DW
2384 * any written block on an uptodate or failed drive can be returned.
2385 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2386 * never LOCKED, so we don't need to test 'failed' directly.
2387 */
1fe797e6 2388static void handle_stripe_clean_event(raid5_conf_t *conf,
a4456856
DW
2389 struct stripe_head *sh, int disks, struct bio **return_bi)
2390{
2391 int i;
2392 struct r5dev *dev;
2393
2394 for (i = disks; i--; )
2395 if (sh->dev[i].written) {
2396 dev = &sh->dev[i];
2397 if (!test_bit(R5_LOCKED, &dev->flags) &&
2398 test_bit(R5_UPTODATE, &dev->flags)) {
2399 /* We can return any write requests */
2400 struct bio *wbi, *wbi2;
2401 int bitmap_end = 0;
45b4233c 2402 pr_debug("Return write for disc %d\n", i);
a4456856
DW
2403 spin_lock_irq(&conf->device_lock);
2404 wbi = dev->written;
2405 dev->written = NULL;
2406 while (wbi && wbi->bi_sector <
2407 dev->sector + STRIPE_SECTORS) {
2408 wbi2 = r5_next_bio(wbi, dev->sector);
960e739d 2409 if (!raid5_dec_bi_phys_segments(wbi)) {
a4456856
DW
2410 md_write_end(conf->mddev);
2411 wbi->bi_next = *return_bi;
2412 *return_bi = wbi;
2413 }
2414 wbi = wbi2;
2415 }
2416 if (dev->towrite == NULL)
2417 bitmap_end = 1;
2418 spin_unlock_irq(&conf->device_lock);
2419 if (bitmap_end)
2420 bitmap_endwrite(conf->mddev->bitmap,
2421 sh->sector,
2422 STRIPE_SECTORS,
2423 !test_bit(STRIPE_DEGRADED, &sh->state),
2424 0);
2425 }
2426 }
8b3e6cdc
DW
2427
2428 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2429 if (atomic_dec_and_test(&conf->pending_full_writes))
2430 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2431}
2432
1fe797e6 2433static void handle_stripe_dirtying5(raid5_conf_t *conf,
a4456856
DW
2434 struct stripe_head *sh, struct stripe_head_state *s, int disks)
2435{
2436 int rmw = 0, rcw = 0, i;
2437 for (i = disks; i--; ) {
2438 /* would I have to read this buffer for read_modify_write */
2439 struct r5dev *dev = &sh->dev[i];
2440 if ((dev->towrite || i == sh->pd_idx) &&
2441 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2442 !(test_bit(R5_UPTODATE, &dev->flags) ||
2443 test_bit(R5_Wantcompute, &dev->flags))) {
a4456856
DW
2444 if (test_bit(R5_Insync, &dev->flags))
2445 rmw++;
2446 else
2447 rmw += 2*disks; /* cannot read it */
2448 }
2449 /* Would I have to read this buffer for reconstruct_write */
2450 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2451 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2452 !(test_bit(R5_UPTODATE, &dev->flags) ||
2453 test_bit(R5_Wantcompute, &dev->flags))) {
2454 if (test_bit(R5_Insync, &dev->flags)) rcw++;
a4456856
DW
2455 else
2456 rcw += 2*disks;
2457 }
2458 }
45b4233c 2459 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
a4456856
DW
2460 (unsigned long long)sh->sector, rmw, rcw);
2461 set_bit(STRIPE_HANDLE, &sh->state);
2462 if (rmw < rcw && rmw > 0)
2463 /* prefer read-modify-write, but need to get some data */
2464 for (i = disks; i--; ) {
2465 struct r5dev *dev = &sh->dev[i];
2466 if ((dev->towrite || i == sh->pd_idx) &&
2467 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2468 !(test_bit(R5_UPTODATE, &dev->flags) ||
2469 test_bit(R5_Wantcompute, &dev->flags)) &&
a4456856
DW
2470 test_bit(R5_Insync, &dev->flags)) {
2471 if (
2472 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2473 pr_debug("Read_old block "
a4456856
DW
2474 "%d for r-m-w\n", i);
2475 set_bit(R5_LOCKED, &dev->flags);
2476 set_bit(R5_Wantread, &dev->flags);
2477 s->locked++;
2478 } else {
2479 set_bit(STRIPE_DELAYED, &sh->state);
2480 set_bit(STRIPE_HANDLE, &sh->state);
2481 }
2482 }
2483 }
2484 if (rcw <= rmw && rcw > 0)
2485 /* want reconstruct write, but need to get some data */
2486 for (i = disks; i--; ) {
2487 struct r5dev *dev = &sh->dev[i];
2488 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2489 i != sh->pd_idx &&
2490 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2491 !(test_bit(R5_UPTODATE, &dev->flags) ||
2492 test_bit(R5_Wantcompute, &dev->flags)) &&
a4456856
DW
2493 test_bit(R5_Insync, &dev->flags)) {
2494 if (
2495 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2496 pr_debug("Read_old block "
a4456856
DW
2497 "%d for Reconstruct\n", i);
2498 set_bit(R5_LOCKED, &dev->flags);
2499 set_bit(R5_Wantread, &dev->flags);
2500 s->locked++;
2501 } else {
2502 set_bit(STRIPE_DELAYED, &sh->state);
2503 set_bit(STRIPE_HANDLE, &sh->state);
2504 }
2505 }
2506 }
2507 /* now if nothing is locked, and if we have enough data,
2508 * we can start a write request
2509 */
f38e1219
DW
2510 /* since handle_stripe can be called at any time we need to handle the
2511 * case where a compute block operation has been submitted and then a
ac6b53b6
DW
2512 * subsequent call wants to start a write request. raid_run_ops only
2513 * handles the case where compute block and reconstruct are requested
f38e1219
DW
2514 * simultaneously. If this is not the case then new writes need to be
2515 * held off until the compute completes.
2516 */
976ea8d4
DW
2517 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2518 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2519 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
c0f7bddb 2520 schedule_reconstruction(sh, s, rcw == 0, 0);
a4456856
DW
2521}
2522
1fe797e6 2523static void handle_stripe_dirtying6(raid5_conf_t *conf,
a4456856
DW
2524 struct stripe_head *sh, struct stripe_head_state *s,
2525 struct r6_state *r6s, int disks)
2526{
a9b39a74 2527 int rcw = 0, pd_idx = sh->pd_idx, i;
34e04e87 2528 int qd_idx = sh->qd_idx;
a9b39a74
YT
2529
2530 set_bit(STRIPE_HANDLE, &sh->state);
a4456856
DW
2531 for (i = disks; i--; ) {
2532 struct r5dev *dev = &sh->dev[i];
a9b39a74
YT
2533 /* check if we haven't enough data */
2534 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2535 i != pd_idx && i != qd_idx &&
2536 !test_bit(R5_LOCKED, &dev->flags) &&
2537 !(test_bit(R5_UPTODATE, &dev->flags) ||
2538 test_bit(R5_Wantcompute, &dev->flags))) {
2539 rcw++;
2540 if (!test_bit(R5_Insync, &dev->flags))
2541 continue; /* it's a failed drive */
2542
2543 if (
2544 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2545 pr_debug("Read_old stripe %llu "
2546 "block %d for Reconstruct\n",
2547 (unsigned long long)sh->sector, i);
2548 set_bit(R5_LOCKED, &dev->flags);
2549 set_bit(R5_Wantread, &dev->flags);
2550 s->locked++;
2551 } else {
2552 pr_debug("Request delayed stripe %llu "
2553 "block %d for Reconstruct\n",
2554 (unsigned long long)sh->sector, i);
2555 set_bit(STRIPE_DELAYED, &sh->state);
2556 set_bit(STRIPE_HANDLE, &sh->state);
a4456856
DW
2557 }
2558 }
2559 }
a4456856
DW
2560 /* now if nothing is locked, and if we have enough data, we can start a
2561 * write request
2562 */
a9b39a74
YT
2563 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2564 s->locked == 0 && rcw == 0 &&
a4456856 2565 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
a9b39a74 2566 schedule_reconstruction(sh, s, 1, 0);
a4456856
DW
2567 }
2568}
2569
2570static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2571 struct stripe_head_state *s, int disks)
2572{
ecc65c9b 2573 struct r5dev *dev = NULL;
bd2ab670 2574
a4456856 2575 set_bit(STRIPE_HANDLE, &sh->state);
e89f8962 2576
ecc65c9b
DW
2577 switch (sh->check_state) {
2578 case check_state_idle:
2579 /* start a new check operation if there are no failures */
bd2ab670 2580 if (s->failed == 0) {
bd2ab670 2581 BUG_ON(s->uptodate != disks);
ecc65c9b
DW
2582 sh->check_state = check_state_run;
2583 set_bit(STRIPE_OP_CHECK, &s->ops_request);
bd2ab670 2584 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
bd2ab670 2585 s->uptodate--;
ecc65c9b 2586 break;
bd2ab670 2587 }
ecc65c9b
DW
2588 dev = &sh->dev[s->failed_num];
2589 /* fall through */
2590 case check_state_compute_result:
2591 sh->check_state = check_state_idle;
2592 if (!dev)
2593 dev = &sh->dev[sh->pd_idx];
2594
2595 /* check that a write has not made the stripe insync */
2596 if (test_bit(STRIPE_INSYNC, &sh->state))
2597 break;
c8894419 2598
a4456856 2599 /* either failed parity check, or recovery is happening */
a4456856
DW
2600 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2601 BUG_ON(s->uptodate != disks);
2602
2603 set_bit(R5_LOCKED, &dev->flags);
ecc65c9b 2604 s->locked++;
a4456856 2605 set_bit(R5_Wantwrite, &dev->flags);
830ea016 2606
a4456856 2607 clear_bit(STRIPE_DEGRADED, &sh->state);
a4456856 2608 set_bit(STRIPE_INSYNC, &sh->state);
ecc65c9b
DW
2609 break;
2610 case check_state_run:
2611 break; /* we will be called again upon completion */
2612 case check_state_check_result:
2613 sh->check_state = check_state_idle;
2614
2615 /* if a failure occurred during the check operation, leave
2616 * STRIPE_INSYNC not set and let the stripe be handled again
2617 */
2618 if (s->failed)
2619 break;
2620
2621 /* handle a successful check operation, if parity is correct
2622 * we are done. Otherwise update the mismatch count and repair
2623 * parity if !MD_RECOVERY_CHECK
2624 */
ad283ea4 2625 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
ecc65c9b
DW
2626 /* parity is correct (on disc,
2627 * not in buffer any more)
2628 */
2629 set_bit(STRIPE_INSYNC, &sh->state);
2630 else {
2631 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2632 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2633 /* don't try to repair!! */
2634 set_bit(STRIPE_INSYNC, &sh->state);
2635 else {
2636 sh->check_state = check_state_compute_run;
976ea8d4 2637 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
ecc65c9b
DW
2638 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2639 set_bit(R5_Wantcompute,
2640 &sh->dev[sh->pd_idx].flags);
2641 sh->ops.target = sh->pd_idx;
ac6b53b6 2642 sh->ops.target2 = -1;
ecc65c9b
DW
2643 s->uptodate++;
2644 }
2645 }
2646 break;
2647 case check_state_compute_run:
2648 break;
2649 default:
2650 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2651 __func__, sh->check_state,
2652 (unsigned long long) sh->sector);
2653 BUG();
a4456856
DW
2654 }
2655}
2656
2657
2658static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
36d1c647
DW
2659 struct stripe_head_state *s,
2660 struct r6_state *r6s, int disks)
a4456856 2661{
a4456856 2662 int pd_idx = sh->pd_idx;
34e04e87 2663 int qd_idx = sh->qd_idx;
d82dfee0 2664 struct r5dev *dev;
a4456856
DW
2665
2666 set_bit(STRIPE_HANDLE, &sh->state);
2667
2668 BUG_ON(s->failed > 2);
d82dfee0 2669
a4456856
DW
2670 /* Want to check and possibly repair P and Q.
2671 * However there could be one 'failed' device, in which
2672 * case we can only check one of them, possibly using the
2673 * other to generate missing data
2674 */
2675
d82dfee0
DW
2676 switch (sh->check_state) {
2677 case check_state_idle:
2678 /* start a new check operation if there are < 2 failures */
a4456856 2679 if (s->failed == r6s->q_failed) {
d82dfee0 2680 /* The only possible failed device holds Q, so it
a4456856
DW
2681 * makes sense to check P (If anything else were failed,
2682 * we would have used P to recreate it).
2683 */
d82dfee0 2684 sh->check_state = check_state_run;
a4456856
DW
2685 }
2686 if (!r6s->q_failed && s->failed < 2) {
d82dfee0 2687 /* Q is not failed, and we didn't use it to generate
a4456856
DW
2688 * anything, so it makes sense to check it
2689 */
d82dfee0
DW
2690 if (sh->check_state == check_state_run)
2691 sh->check_state = check_state_run_pq;
2692 else
2693 sh->check_state = check_state_run_q;
a4456856 2694 }
a4456856 2695
d82dfee0
DW
2696 /* discard potentially stale zero_sum_result */
2697 sh->ops.zero_sum_result = 0;
a4456856 2698
d82dfee0
DW
2699 if (sh->check_state == check_state_run) {
2700 /* async_xor_zero_sum destroys the contents of P */
2701 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2702 s->uptodate--;
a4456856 2703 }
d82dfee0
DW
2704 if (sh->check_state >= check_state_run &&
2705 sh->check_state <= check_state_run_pq) {
2706 /* async_syndrome_zero_sum preserves P and Q, so
2707 * no need to mark them !uptodate here
2708 */
2709 set_bit(STRIPE_OP_CHECK, &s->ops_request);
2710 break;
a4456856
DW
2711 }
2712
d82dfee0
DW
2713 /* we have 2-disk failure */
2714 BUG_ON(s->failed != 2);
2715 /* fall through */
2716 case check_state_compute_result:
2717 sh->check_state = check_state_idle;
a4456856 2718
d82dfee0
DW
2719 /* check that a write has not made the stripe insync */
2720 if (test_bit(STRIPE_INSYNC, &sh->state))
2721 break;
a4456856
DW
2722
2723 /* now write out any block on a failed drive,
d82dfee0 2724 * or P or Q if they were recomputed
a4456856 2725 */
d82dfee0 2726 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
a4456856
DW
2727 if (s->failed == 2) {
2728 dev = &sh->dev[r6s->failed_num[1]];
2729 s->locked++;
2730 set_bit(R5_LOCKED, &dev->flags);
2731 set_bit(R5_Wantwrite, &dev->flags);
2732 }
2733 if (s->failed >= 1) {
2734 dev = &sh->dev[r6s->failed_num[0]];
2735 s->locked++;
2736 set_bit(R5_LOCKED, &dev->flags);
2737 set_bit(R5_Wantwrite, &dev->flags);
2738 }
d82dfee0 2739 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
a4456856
DW
2740 dev = &sh->dev[pd_idx];
2741 s->locked++;
2742 set_bit(R5_LOCKED, &dev->flags);
2743 set_bit(R5_Wantwrite, &dev->flags);
2744 }
d82dfee0 2745 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
a4456856
DW
2746 dev = &sh->dev[qd_idx];
2747 s->locked++;
2748 set_bit(R5_LOCKED, &dev->flags);
2749 set_bit(R5_Wantwrite, &dev->flags);
2750 }
2751 clear_bit(STRIPE_DEGRADED, &sh->state);
2752
2753 set_bit(STRIPE_INSYNC, &sh->state);
d82dfee0
DW
2754 break;
2755 case check_state_run:
2756 case check_state_run_q:
2757 case check_state_run_pq:
2758 break; /* we will be called again upon completion */
2759 case check_state_check_result:
2760 sh->check_state = check_state_idle;
2761
2762 /* handle a successful check operation, if parity is correct
2763 * we are done. Otherwise update the mismatch count and repair
2764 * parity if !MD_RECOVERY_CHECK
2765 */
2766 if (sh->ops.zero_sum_result == 0) {
2767 /* both parities are correct */
2768 if (!s->failed)
2769 set_bit(STRIPE_INSYNC, &sh->state);
2770 else {
2771 /* in contrast to the raid5 case we can validate
2772 * parity, but still have a failure to write
2773 * back
2774 */
2775 sh->check_state = check_state_compute_result;
2776 /* Returning at this point means that we may go
2777 * off and bring p and/or q uptodate again so
2778 * we make sure to check zero_sum_result again
2779 * to verify if p or q need writeback
2780 */
2781 }
2782 } else {
2783 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2784 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2785 /* don't try to repair!! */
2786 set_bit(STRIPE_INSYNC, &sh->state);
2787 else {
2788 int *target = &sh->ops.target;
2789
2790 sh->ops.target = -1;
2791 sh->ops.target2 = -1;
2792 sh->check_state = check_state_compute_run;
2793 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2794 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2795 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2796 set_bit(R5_Wantcompute,
2797 &sh->dev[pd_idx].flags);
2798 *target = pd_idx;
2799 target = &sh->ops.target2;
2800 s->uptodate++;
2801 }
2802 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2803 set_bit(R5_Wantcompute,
2804 &sh->dev[qd_idx].flags);
2805 *target = qd_idx;
2806 s->uptodate++;
2807 }
2808 }
2809 }
2810 break;
2811 case check_state_compute_run:
2812 break;
2813 default:
2814 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2815 __func__, sh->check_state,
2816 (unsigned long long) sh->sector);
2817 BUG();
a4456856
DW
2818 }
2819}
2820
2821static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2822 struct r6_state *r6s)
2823{
2824 int i;
2825
2826 /* We have read all the blocks in this stripe and now we need to
2827 * copy some of them into a target stripe for expand.
2828 */
f0a50d37 2829 struct dma_async_tx_descriptor *tx = NULL;
a4456856
DW
2830 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2831 for (i = 0; i < sh->disks; i++)
34e04e87 2832 if (i != sh->pd_idx && i != sh->qd_idx) {
911d4ee8 2833 int dd_idx, j;
a4456856 2834 struct stripe_head *sh2;
a08abd8c 2835 struct async_submit_ctl submit;
a4456856 2836
784052ec 2837 sector_t bn = compute_blocknr(sh, i, 1);
911d4ee8
N
2838 sector_t s = raid5_compute_sector(conf, bn, 0,
2839 &dd_idx, NULL);
a8c906ca 2840 sh2 = get_active_stripe(conf, s, 0, 1, 1);
a4456856
DW
2841 if (sh2 == NULL)
2842 /* so far only the early blocks of this stripe
2843 * have been requested. When later blocks
2844 * get requested, we will try again
2845 */
2846 continue;
2847 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2848 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2849 /* must have already done this block */
2850 release_stripe(sh2);
2851 continue;
2852 }
f0a50d37
DW
2853
2854 /* place all the copies on one channel */
a08abd8c 2855 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
f0a50d37 2856 tx = async_memcpy(sh2->dev[dd_idx].page,
88ba2aa5 2857 sh->dev[i].page, 0, 0, STRIPE_SIZE,
a08abd8c 2858 &submit);
f0a50d37 2859
a4456856
DW
2860 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2861 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2862 for (j = 0; j < conf->raid_disks; j++)
2863 if (j != sh2->pd_idx &&
d0dabf7e 2864 (!r6s || j != sh2->qd_idx) &&
a4456856
DW
2865 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2866 break;
2867 if (j == conf->raid_disks) {
2868 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2869 set_bit(STRIPE_HANDLE, &sh2->state);
2870 }
2871 release_stripe(sh2);
f0a50d37 2872
a4456856 2873 }
a2e08551
N
2874 /* done submitting copies, wait for them to complete */
2875 if (tx) {
2876 async_tx_ack(tx);
2877 dma_wait_for_async_tx(tx);
2878 }
a4456856 2879}
1da177e4 2880
6bfe0b49 2881
1da177e4
LT
2882/*
2883 * handle_stripe - do things to a stripe.
2884 *
2885 * We lock the stripe and then examine the state of various bits
2886 * to see what needs to be done.
2887 * Possible results:
2888 * return some read request which now have data
2889 * return some write requests which are safely on disc
2890 * schedule a read on some buffers
2891 * schedule a write of some buffers
2892 * return confirmation of parity correctness
2893 *
1da177e4
LT
2894 * buffers are taken off read_list or write_list, and bh_cache buffers
2895 * get BH_Lock set before the stripe lock is released.
2896 *
2897 */
a4456856 2898
1442577b 2899static void handle_stripe5(struct stripe_head *sh)
1da177e4
LT
2900{
2901 raid5_conf_t *conf = sh->raid_conf;
a4456856
DW
2902 int disks = sh->disks, i;
2903 struct bio *return_bi = NULL;
2904 struct stripe_head_state s;
1da177e4 2905 struct r5dev *dev;
6bfe0b49 2906 mdk_rdev_t *blocked_rdev = NULL;
e0a115e5 2907 int prexor;
1da177e4 2908
a4456856 2909 memset(&s, 0, sizeof(s));
600aa109
DW
2910 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2911 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
2912 atomic_read(&sh->count), sh->pd_idx, sh->check_state,
2913 sh->reconstruct_state);
1da177e4
LT
2914
2915 spin_lock(&sh->lock);
2916 clear_bit(STRIPE_HANDLE, &sh->state);
2917 clear_bit(STRIPE_DELAYED, &sh->state);
2918
a4456856
DW
2919 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2920 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2921 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
def6ae26 2922
83de75cc 2923 /* Now to look around and see what can be done */
9910f16a 2924 rcu_read_lock();
1da177e4
LT
2925 for (i=disks; i--; ) {
2926 mdk_rdev_t *rdev;
a9f326eb
N
2927
2928 dev = &sh->dev[i];
1da177e4 2929 clear_bit(R5_Insync, &dev->flags);
1da177e4 2930
b5e98d65
DW
2931 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2932 "written %p\n", i, dev->flags, dev->toread, dev->read,
2933 dev->towrite, dev->written);
2934
2935 /* maybe we can request a biofill operation
2936 *
2937 * new wantfill requests are only permitted while
83de75cc 2938 * ops_complete_biofill is guaranteed to be inactive
b5e98d65
DW
2939 */
2940 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
83de75cc 2941 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
b5e98d65 2942 set_bit(R5_Wantfill, &dev->flags);
1da177e4
LT
2943
2944 /* now count some things */
a4456856
DW
2945 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2946 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
f38e1219 2947 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
1da177e4 2948
b5e98d65
DW
2949 if (test_bit(R5_Wantfill, &dev->flags))
2950 s.to_fill++;
2951 else if (dev->toread)
a4456856 2952 s.to_read++;
1da177e4 2953 if (dev->towrite) {
a4456856 2954 s.to_write++;
1da177e4 2955 if (!test_bit(R5_OVERWRITE, &dev->flags))
a4456856 2956 s.non_overwrite++;
1da177e4 2957 }
a4456856
DW
2958 if (dev->written)
2959 s.written++;
9910f16a 2960 rdev = rcu_dereference(conf->disks[i].rdev);
ac4090d2
N
2961 if (blocked_rdev == NULL &&
2962 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
6bfe0b49
DW
2963 blocked_rdev = rdev;
2964 atomic_inc(&rdev->nr_pending);
6bfe0b49 2965 }
b2d444d7 2966 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
14f8d26b 2967 /* The ReadError flag will just be confusing now */
4e5314b5
N
2968 clear_bit(R5_ReadError, &dev->flags);
2969 clear_bit(R5_ReWrite, &dev->flags);
2970 }
b2d444d7 2971 if (!rdev || !test_bit(In_sync, &rdev->flags)
4e5314b5 2972 || test_bit(R5_ReadError, &dev->flags)) {
a4456856
DW
2973 s.failed++;
2974 s.failed_num = i;
1da177e4
LT
2975 } else
2976 set_bit(R5_Insync, &dev->flags);
2977 }
9910f16a 2978 rcu_read_unlock();
b5e98d65 2979
6bfe0b49 2980 if (unlikely(blocked_rdev)) {
ac4090d2
N
2981 if (s.syncing || s.expanding || s.expanded ||
2982 s.to_write || s.written) {
2983 set_bit(STRIPE_HANDLE, &sh->state);
2984 goto unlock;
2985 }
2986 /* There is nothing for the blocked_rdev to block */
2987 rdev_dec_pending(blocked_rdev, conf->mddev);
2988 blocked_rdev = NULL;
6bfe0b49
DW
2989 }
2990
83de75cc
DW
2991 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
2992 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
2993 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
2994 }
b5e98d65 2995
45b4233c 2996 pr_debug("locked=%d uptodate=%d to_read=%d"
1da177e4 2997 " to_write=%d failed=%d failed_num=%d\n",
a4456856
DW
2998 s.locked, s.uptodate, s.to_read, s.to_write,
2999 s.failed, s.failed_num);
1da177e4
LT
3000 /* check if the array has lost two devices and, if so, some requests might
3001 * need to be failed
3002 */
a4456856 3003 if (s.failed > 1 && s.to_read+s.to_write+s.written)
1fe797e6 3004 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
a4456856 3005 if (s.failed > 1 && s.syncing) {
1da177e4
LT
3006 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3007 clear_bit(STRIPE_SYNCING, &sh->state);
a4456856 3008 s.syncing = 0;
1da177e4
LT
3009 }
3010
3011 /* might be able to return some write requests if the parity block
3012 * is safe, or on a failed drive
3013 */
3014 dev = &sh->dev[sh->pd_idx];
a4456856
DW
3015 if ( s.written &&
3016 ((test_bit(R5_Insync, &dev->flags) &&
3017 !test_bit(R5_LOCKED, &dev->flags) &&
3018 test_bit(R5_UPTODATE, &dev->flags)) ||
3019 (s.failed == 1 && s.failed_num == sh->pd_idx)))
1fe797e6 3020 handle_stripe_clean_event(conf, sh, disks, &return_bi);
1da177e4
LT
3021
3022 /* Now we might consider reading some blocks, either to check/generate
3023 * parity, or to satisfy requests
3024 * or to load a block that is being partially written.
3025 */
a4456856 3026 if (s.to_read || s.non_overwrite ||
976ea8d4 3027 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
1fe797e6 3028 handle_stripe_fill5(sh, &s, disks);
1da177e4 3029
e33129d8
DW
3030 /* Now we check to see if any write operations have recently
3031 * completed
3032 */
e0a115e5 3033 prexor = 0;
d8ee0728 3034 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
e0a115e5 3035 prexor = 1;
d8ee0728
DW
3036 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3037 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
600aa109 3038 sh->reconstruct_state = reconstruct_state_idle;
e33129d8
DW
3039
3040 /* All the 'written' buffers and the parity block are ready to
3041 * be written back to disk
3042 */
3043 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3044 for (i = disks; i--; ) {
3045 dev = &sh->dev[i];
3046 if (test_bit(R5_LOCKED, &dev->flags) &&
3047 (i == sh->pd_idx || dev->written)) {
3048 pr_debug("Writing block %d\n", i);
3049 set_bit(R5_Wantwrite, &dev->flags);
e0a115e5
DW
3050 if (prexor)
3051 continue;
e33129d8
DW
3052 if (!test_bit(R5_Insync, &dev->flags) ||
3053 (i == sh->pd_idx && s.failed == 0))
3054 set_bit(STRIPE_INSYNC, &sh->state);
3055 }
3056 }
3057 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
3058 atomic_dec(&conf->preread_active_stripes);
3059 if (atomic_read(&conf->preread_active_stripes) <
3060 IO_THRESHOLD)
3061 md_wakeup_thread(conf->mddev->thread);
3062 }
3063 }
3064
3065 /* Now to consider new write requests and what else, if anything
3066 * should be read. We do not handle new writes when:
3067 * 1/ A 'write' operation (copy+xor) is already in flight.
3068 * 2/ A 'check' operation is in flight, as it may clobber the parity
3069 * block.
3070 */
600aa109 3071 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
1fe797e6 3072 handle_stripe_dirtying5(conf, sh, &s, disks);
1da177e4
LT
3073
3074 /* maybe we need to check and possibly fix the parity for this stripe
e89f8962
DW
3075 * Any reads will already have been scheduled, so we just see if enough
3076 * data is available. The parity check is held off while parity
3077 * dependent operations are in flight.
1da177e4 3078 */
ecc65c9b
DW
3079 if (sh->check_state ||
3080 (s.syncing && s.locked == 0 &&
976ea8d4 3081 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
ecc65c9b 3082 !test_bit(STRIPE_INSYNC, &sh->state)))
a4456856 3083 handle_parity_checks5(conf, sh, &s, disks);
e89f8962 3084
a4456856 3085 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1da177e4
LT
3086 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3087 clear_bit(STRIPE_SYNCING, &sh->state);
3088 }
4e5314b5
N
3089
3090 /* If the failed drive is just a ReadError, then we might need to progress
3091 * the repair/check process
3092 */
a4456856
DW
3093 if (s.failed == 1 && !conf->mddev->ro &&
3094 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
3095 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
3096 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
4e5314b5 3097 ) {
a4456856 3098 dev = &sh->dev[s.failed_num];
4e5314b5
N
3099 if (!test_bit(R5_ReWrite, &dev->flags)) {
3100 set_bit(R5_Wantwrite, &dev->flags);
3101 set_bit(R5_ReWrite, &dev->flags);
3102 set_bit(R5_LOCKED, &dev->flags);
a4456856 3103 s.locked++;
4e5314b5
N
3104 } else {
3105 /* let's read it back */
3106 set_bit(R5_Wantread, &dev->flags);
3107 set_bit(R5_LOCKED, &dev->flags);
a4456856 3108 s.locked++;
4e5314b5
N
3109 }
3110 }
3111
600aa109
DW
3112 /* Finish reconstruct operations initiated by the expansion process */
3113 if (sh->reconstruct_state == reconstruct_state_result) {
ab69ae12 3114 struct stripe_head *sh2
a8c906ca 3115 = get_active_stripe(conf, sh->sector, 1, 1, 1);
ab69ae12
N
3116 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3117 /* sh cannot be written until sh2 has been read.
3118 * so arrange for sh to be delayed a little
3119 */
3120 set_bit(STRIPE_DELAYED, &sh->state);
3121 set_bit(STRIPE_HANDLE, &sh->state);
3122 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3123 &sh2->state))
3124 atomic_inc(&conf->preread_active_stripes);
3125 release_stripe(sh2);
3126 goto unlock;
3127 }
3128 if (sh2)
3129 release_stripe(sh2);
3130
600aa109 3131 sh->reconstruct_state = reconstruct_state_idle;
f0a50d37 3132 clear_bit(STRIPE_EXPANDING, &sh->state);
23397883 3133 for (i = conf->raid_disks; i--; ) {
ccfcc3c1 3134 set_bit(R5_Wantwrite, &sh->dev[i].flags);
23397883 3135 set_bit(R5_LOCKED, &sh->dev[i].flags);
efe31143 3136 s.locked++;
23397883 3137 }
f0a50d37
DW
3138 }
3139
3140 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
600aa109 3141 !sh->reconstruct_state) {
f0a50d37
DW
3142 /* Need to write out all blocks after computing parity */
3143 sh->disks = conf->raid_disks;
911d4ee8 3144 stripe_set_idx(sh->sector, conf, 0, sh);
c0f7bddb 3145 schedule_reconstruction(sh, &s, 1, 1);
600aa109 3146 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
ccfcc3c1 3147 clear_bit(STRIPE_EXPAND_READY, &sh->state);
f6705578 3148 atomic_dec(&conf->reshape_stripes);
ccfcc3c1
N
3149 wake_up(&conf->wait_for_overlap);
3150 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3151 }
3152
0f94e87c 3153 if (s.expanding && s.locked == 0 &&
976ea8d4 3154 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
a4456856 3155 handle_stripe_expansion(conf, sh, NULL);
ccfcc3c1 3156
6bfe0b49 3157 unlock:
1da177e4
LT
3158 spin_unlock(&sh->lock);
3159
6bfe0b49
DW
3160 /* wait for this device to become unblocked */
3161 if (unlikely(blocked_rdev))
3162 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3163
600aa109 3164 if (s.ops_request)
ac6b53b6 3165 raid_run_ops(sh, s.ops_request);
d84e0f10 3166
c4e5ac0a 3167 ops_run_io(sh, &s);
1da177e4 3168
a4456856 3169 return_io(return_bi);
1da177e4
LT
3170}
3171
1442577b 3172static void handle_stripe6(struct stripe_head *sh)
1da177e4 3173{
bff61975 3174 raid5_conf_t *conf = sh->raid_conf;
f416885e 3175 int disks = sh->disks;
a4456856 3176 struct bio *return_bi = NULL;
34e04e87 3177 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
a4456856
DW
3178 struct stripe_head_state s;
3179 struct r6_state r6s;
16a53ecc 3180 struct r5dev *dev, *pdev, *qdev;
6bfe0b49 3181 mdk_rdev_t *blocked_rdev = NULL;
1da177e4 3182
45b4233c 3183 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
6c0069c0 3184 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
a4456856 3185 (unsigned long long)sh->sector, sh->state,
6c0069c0
YT
3186 atomic_read(&sh->count), pd_idx, qd_idx,
3187 sh->check_state, sh->reconstruct_state);
a4456856 3188 memset(&s, 0, sizeof(s));
72626685 3189
16a53ecc
N
3190 spin_lock(&sh->lock);
3191 clear_bit(STRIPE_HANDLE, &sh->state);
3192 clear_bit(STRIPE_DELAYED, &sh->state);
3193
a4456856
DW
3194 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
3195 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3196 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
16a53ecc 3197 /* Now to look around and see what can be done */
1da177e4
LT
3198
3199 rcu_read_lock();
16a53ecc
N
3200 for (i=disks; i--; ) {
3201 mdk_rdev_t *rdev;
3202 dev = &sh->dev[i];
3203 clear_bit(R5_Insync, &dev->flags);
1da177e4 3204
45b4233c 3205 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
16a53ecc 3206 i, dev->flags, dev->toread, dev->towrite, dev->written);
6c0069c0
YT
3207 /* maybe we can reply to a read
3208 *
3209 * new wantfill requests are only permitted while
3210 * ops_complete_biofill is guaranteed to be inactive
3211 */
3212 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3213 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3214 set_bit(R5_Wantfill, &dev->flags);
1da177e4 3215
16a53ecc 3216 /* now count some things */
a4456856
DW
3217 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
3218 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
2d6e4ecc
DW
3219 if (test_bit(R5_Wantcompute, &dev->flags)) {
3220 s.compute++;
3221 BUG_ON(s.compute > 2);
3222 }
1da177e4 3223
6c0069c0
YT
3224 if (test_bit(R5_Wantfill, &dev->flags)) {
3225 s.to_fill++;
3226 } else if (dev->toread)
a4456856 3227 s.to_read++;
16a53ecc 3228 if (dev->towrite) {
a4456856 3229 s.to_write++;
16a53ecc 3230 if (!test_bit(R5_OVERWRITE, &dev->flags))
a4456856 3231 s.non_overwrite++;
16a53ecc 3232 }
a4456856
DW
3233 if (dev->written)
3234 s.written++;
16a53ecc 3235 rdev = rcu_dereference(conf->disks[i].rdev);
ac4090d2
N
3236 if (blocked_rdev == NULL &&
3237 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
6bfe0b49
DW
3238 blocked_rdev = rdev;
3239 atomic_inc(&rdev->nr_pending);
6bfe0b49 3240 }
16a53ecc
N
3241 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
3242 /* The ReadError flag will just be confusing now */
3243 clear_bit(R5_ReadError, &dev->flags);
3244 clear_bit(R5_ReWrite, &dev->flags);
1da177e4 3245 }
16a53ecc
N
3246 if (!rdev || !test_bit(In_sync, &rdev->flags)
3247 || test_bit(R5_ReadError, &dev->flags)) {
a4456856
DW
3248 if (s.failed < 2)
3249 r6s.failed_num[s.failed] = i;
3250 s.failed++;
16a53ecc
N
3251 } else
3252 set_bit(R5_Insync, &dev->flags);
1da177e4
LT
3253 }
3254 rcu_read_unlock();
6bfe0b49
DW
3255
3256 if (unlikely(blocked_rdev)) {
ac4090d2
N
3257 if (s.syncing || s.expanding || s.expanded ||
3258 s.to_write || s.written) {
3259 set_bit(STRIPE_HANDLE, &sh->state);
3260 goto unlock;
3261 }
3262 /* There is nothing for the blocked_rdev to block */
3263 rdev_dec_pending(blocked_rdev, conf->mddev);
3264 blocked_rdev = NULL;
6bfe0b49 3265 }
ac4090d2 3266
6c0069c0
YT
3267 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3268 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3269 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3270 }
3271
45b4233c 3272 pr_debug("locked=%d uptodate=%d to_read=%d"
16a53ecc 3273 " to_write=%d failed=%d failed_num=%d,%d\n",
a4456856
DW
3274 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3275 r6s.failed_num[0], r6s.failed_num[1]);
3276 /* check if the array has lost >2 devices and, if so, some requests
3277 * might need to be failed
16a53ecc 3278 */
a4456856 3279 if (s.failed > 2 && s.to_read+s.to_write+s.written)
1fe797e6 3280 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
a4456856 3281 if (s.failed > 2 && s.syncing) {
16a53ecc
N
3282 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3283 clear_bit(STRIPE_SYNCING, &sh->state);
a4456856 3284 s.syncing = 0;
16a53ecc
N
3285 }
3286
3287 /*
3288 * might be able to return some write requests if the parity blocks
3289 * are safe, or on a failed drive
3290 */
3291 pdev = &sh->dev[pd_idx];
a4456856
DW
3292 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
3293 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
34e04e87
N
3294 qdev = &sh->dev[qd_idx];
3295 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
3296 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
a4456856
DW
3297
3298 if ( s.written &&
3299 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
16a53ecc 3300 && !test_bit(R5_LOCKED, &pdev->flags)
a4456856
DW
3301 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3302 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
16a53ecc 3303 && !test_bit(R5_LOCKED, &qdev->flags)
a4456856 3304 && test_bit(R5_UPTODATE, &qdev->flags)))))
1fe797e6 3305 handle_stripe_clean_event(conf, sh, disks, &return_bi);
16a53ecc
N
3306
3307 /* Now we might consider reading some blocks, either to check/generate
3308 * parity, or to satisfy requests
3309 * or to load a block that is being partially written.
3310 */
a4456856 3311 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
6c0069c0 3312 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
1fe797e6 3313 handle_stripe_fill6(sh, &s, &r6s, disks);
16a53ecc 3314
6c0069c0
YT
3315 /* Now we check to see if any write operations have recently
3316 * completed
3317 */
3318 if (sh->reconstruct_state == reconstruct_state_drain_result) {
3319 int qd_idx = sh->qd_idx;
3320
3321 sh->reconstruct_state = reconstruct_state_idle;
3322 /* All the 'written' buffers and the parity blocks are ready to
3323 * be written back to disk
3324 */
3325 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3326 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
3327 for (i = disks; i--; ) {
3328 dev = &sh->dev[i];
3329 if (test_bit(R5_LOCKED, &dev->flags) &&
3330 (i == sh->pd_idx || i == qd_idx ||
3331 dev->written)) {
3332 pr_debug("Writing block %d\n", i);
3333 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3334 set_bit(R5_Wantwrite, &dev->flags);
3335 if (!test_bit(R5_Insync, &dev->flags) ||
3336 ((i == sh->pd_idx || i == qd_idx) &&
3337 s.failed == 0))
3338 set_bit(STRIPE_INSYNC, &sh->state);
3339 }
3340 }
3341 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
3342 atomic_dec(&conf->preread_active_stripes);
3343 if (atomic_read(&conf->preread_active_stripes) <
3344 IO_THRESHOLD)
3345 md_wakeup_thread(conf->mddev->thread);
3346 }
3347 }
3348
a9b39a74
YT
3349 /* Now to consider new write requests and what else, if anything
3350 * should be read. We do not handle new writes when:
3351 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3352 * 2/ A 'check' operation is in flight, as it may clobber the parity
3353 * block.
3354 */
3355 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
1fe797e6 3356 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
16a53ecc
N
3357
3358 /* maybe we need to check and possibly fix the parity for this stripe
a4456856 3359 * Any reads will already have been scheduled, so we just see if enough
6c0069c0
YT
3360 * data is available. The parity check is held off while parity
3361 * dependent operations are in flight.
16a53ecc 3362 */
6c0069c0
YT
3363 if (sh->check_state ||
3364 (s.syncing && s.locked == 0 &&
3365 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3366 !test_bit(STRIPE_INSYNC, &sh->state)))
36d1c647 3367 handle_parity_checks6(conf, sh, &s, &r6s, disks);
16a53ecc 3368
a4456856 3369 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
16a53ecc
N
3370 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3371 clear_bit(STRIPE_SYNCING, &sh->state);
3372 }
3373
3374 /* If the failed drives are just a ReadError, then we might need
3375 * to progress the repair/check process
3376 */
a4456856
DW
3377 if (s.failed <= 2 && !conf->mddev->ro)
3378 for (i = 0; i < s.failed; i++) {
3379 dev = &sh->dev[r6s.failed_num[i]];
16a53ecc
N
3380 if (test_bit(R5_ReadError, &dev->flags)
3381 && !test_bit(R5_LOCKED, &dev->flags)
3382 && test_bit(R5_UPTODATE, &dev->flags)
3383 ) {
3384 if (!test_bit(R5_ReWrite, &dev->flags)) {
3385 set_bit(R5_Wantwrite, &dev->flags);
3386 set_bit(R5_ReWrite, &dev->flags);
3387 set_bit(R5_LOCKED, &dev->flags);
6c0069c0 3388 s.locked++;
16a53ecc
N
3389 } else {
3390 /* let's read it back */
3391 set_bit(R5_Wantread, &dev->flags);
3392 set_bit(R5_LOCKED, &dev->flags);
6c0069c0 3393 s.locked++;
16a53ecc
N
3394 }
3395 }
3396 }
f416885e 3397
6c0069c0
YT
3398 /* Finish reconstruct operations initiated by the expansion process */
3399 if (sh->reconstruct_state == reconstruct_state_result) {
3400 sh->reconstruct_state = reconstruct_state_idle;
3401 clear_bit(STRIPE_EXPANDING, &sh->state);
3402 for (i = conf->raid_disks; i--; ) {
3403 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3404 set_bit(R5_LOCKED, &sh->dev[i].flags);
3405 s.locked++;
3406 }
3407 }
3408
3409 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3410 !sh->reconstruct_state) {
ab69ae12 3411 struct stripe_head *sh2
a8c906ca 3412 = get_active_stripe(conf, sh->sector, 1, 1, 1);
ab69ae12
N
3413 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3414 /* sh cannot be written until sh2 has been read.
3415 * so arrange for sh to be delayed a little
3416 */
3417 set_bit(STRIPE_DELAYED, &sh->state);
3418 set_bit(STRIPE_HANDLE, &sh->state);
3419 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3420 &sh2->state))
3421 atomic_inc(&conf->preread_active_stripes);
3422 release_stripe(sh2);
3423 goto unlock;
3424 }
3425 if (sh2)
3426 release_stripe(sh2);
3427
f416885e
N
3428 /* Need to write out all blocks after computing P&Q */
3429 sh->disks = conf->raid_disks;
911d4ee8 3430 stripe_set_idx(sh->sector, conf, 0, sh);
6c0069c0
YT
3431 schedule_reconstruction(sh, &s, 1, 1);
3432 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
f416885e
N
3433 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3434 atomic_dec(&conf->reshape_stripes);
3435 wake_up(&conf->wait_for_overlap);
3436 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3437 }
3438
0f94e87c 3439 if (s.expanding && s.locked == 0 &&
976ea8d4 3440 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
a4456856 3441 handle_stripe_expansion(conf, sh, &r6s);
f416885e 3442
6bfe0b49 3443 unlock:
16a53ecc
N
3444 spin_unlock(&sh->lock);
3445
6bfe0b49
DW
3446 /* wait for this device to become unblocked */
3447 if (unlikely(blocked_rdev))
3448 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3449
6c0069c0
YT
3450 if (s.ops_request)
3451 raid_run_ops(sh, s.ops_request);
3452
f0e43bcd 3453 ops_run_io(sh, &s);
16a53ecc 3454
f0e43bcd 3455 return_io(return_bi);
16a53ecc
N
3456}
3457
1442577b 3458static void handle_stripe(struct stripe_head *sh)
16a53ecc
N
3459{
3460 if (sh->raid_conf->level == 6)
1442577b 3461 handle_stripe6(sh);
16a53ecc 3462 else
1442577b 3463 handle_stripe5(sh);
16a53ecc
N
3464}
3465
16a53ecc
N
3466static void raid5_activate_delayed(raid5_conf_t *conf)
3467{
3468 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3469 while (!list_empty(&conf->delayed_list)) {
3470 struct list_head *l = conf->delayed_list.next;
3471 struct stripe_head *sh;
3472 sh = list_entry(l, struct stripe_head, lru);
3473 list_del_init(l);
3474 clear_bit(STRIPE_DELAYED, &sh->state);
3475 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3476 atomic_inc(&conf->preread_active_stripes);
8b3e6cdc 3477 list_add_tail(&sh->lru, &conf->hold_list);
16a53ecc 3478 }
6ed3003c
N
3479 } else
3480 blk_plug_device(conf->mddev->queue);
16a53ecc
N
3481}
3482
3483static void activate_bit_delay(raid5_conf_t *conf)
3484{
3485 /* device_lock is held */
3486 struct list_head head;
3487 list_add(&head, &conf->bitmap_list);
3488 list_del_init(&conf->bitmap_list);
3489 while (!list_empty(&head)) {
3490 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3491 list_del_init(&sh->lru);
3492 atomic_inc(&sh->count);
3493 __release_stripe(conf, sh);
3494 }
3495}
3496
3497static void unplug_slaves(mddev_t *mddev)
3498{
070ec55d 3499 raid5_conf_t *conf = mddev->private;
16a53ecc
N
3500 int i;
3501
3502 rcu_read_lock();
f001a70c 3503 for (i = 0; i < conf->raid_disks; i++) {
16a53ecc
N
3504 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3505 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
165125e1 3506 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
16a53ecc
N
3507
3508 atomic_inc(&rdev->nr_pending);
3509 rcu_read_unlock();
3510
2ad8b1ef 3511 blk_unplug(r_queue);
16a53ecc
N
3512
3513 rdev_dec_pending(rdev, mddev);
3514 rcu_read_lock();
3515 }
3516 }
3517 rcu_read_unlock();
3518}
3519
165125e1 3520static void raid5_unplug_device(struct request_queue *q)
16a53ecc
N
3521{
3522 mddev_t *mddev = q->queuedata;
070ec55d 3523 raid5_conf_t *conf = mddev->private;
16a53ecc
N
3524 unsigned long flags;
3525
3526 spin_lock_irqsave(&conf->device_lock, flags);
3527
3528 if (blk_remove_plug(q)) {
3529 conf->seq_flush++;
3530 raid5_activate_delayed(conf);
72626685 3531 }
1da177e4
LT
3532 md_wakeup_thread(mddev->thread);
3533
3534 spin_unlock_irqrestore(&conf->device_lock, flags);
3535
3536 unplug_slaves(mddev);
3537}
3538
f022b2fd
N
3539static int raid5_congested(void *data, int bits)
3540{
3541 mddev_t *mddev = data;
070ec55d 3542 raid5_conf_t *conf = mddev->private;
f022b2fd
N
3543
3544 /* No difference between reads and writes. Just check
3545 * how busy the stripe_cache is
3546 */
3fa841d7
N
3547
3548 if (mddev_congested(mddev, bits))
3549 return 1;
f022b2fd
N
3550 if (conf->inactive_blocked)
3551 return 1;
3552 if (conf->quiesce)
3553 return 1;
3554 if (list_empty_careful(&conf->inactive_list))
3555 return 1;
3556
3557 return 0;
3558}
3559
23032a0e
RBJ
3560/* We want read requests to align with chunks where possible,
3561 * but write requests don't need to.
3562 */
cc371e66
AK
3563static int raid5_mergeable_bvec(struct request_queue *q,
3564 struct bvec_merge_data *bvm,
3565 struct bio_vec *biovec)
23032a0e
RBJ
3566{
3567 mddev_t *mddev = q->queuedata;
cc371e66 3568 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
23032a0e 3569 int max;
9d8f0363 3570 unsigned int chunk_sectors = mddev->chunk_sectors;
cc371e66 3571 unsigned int bio_sectors = bvm->bi_size >> 9;
23032a0e 3572
cc371e66 3573 if ((bvm->bi_rw & 1) == WRITE)
23032a0e
RBJ
3574 return biovec->bv_len; /* always allow writes to be mergeable */
3575
664e7c41
AN
3576 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3577 chunk_sectors = mddev->new_chunk_sectors;
23032a0e
RBJ
3578 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3579 if (max < 0) max = 0;
3580 if (max <= biovec->bv_len && bio_sectors == 0)
3581 return biovec->bv_len;
3582 else
3583 return max;
3584}
3585
f679623f
RBJ
3586
3587static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3588{
3589 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
9d8f0363 3590 unsigned int chunk_sectors = mddev->chunk_sectors;
f679623f
RBJ
3591 unsigned int bio_sectors = bio->bi_size >> 9;
3592
664e7c41
AN
3593 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3594 chunk_sectors = mddev->new_chunk_sectors;
f679623f
RBJ
3595 return chunk_sectors >=
3596 ((sector & (chunk_sectors - 1)) + bio_sectors);
3597}
3598
46031f9a
RBJ
3599/*
3600 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3601 * later sampled by raid5d.
3602 */
3603static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3604{
3605 unsigned long flags;
3606
3607 spin_lock_irqsave(&conf->device_lock, flags);
3608
3609 bi->bi_next = conf->retry_read_aligned_list;
3610 conf->retry_read_aligned_list = bi;
3611
3612 spin_unlock_irqrestore(&conf->device_lock, flags);
3613 md_wakeup_thread(conf->mddev->thread);
3614}
3615
3616
3617static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3618{
3619 struct bio *bi;
3620
3621 bi = conf->retry_read_aligned;
3622 if (bi) {
3623 conf->retry_read_aligned = NULL;
3624 return bi;
3625 }
3626 bi = conf->retry_read_aligned_list;
3627 if(bi) {
387bb173 3628 conf->retry_read_aligned_list = bi->bi_next;
46031f9a 3629 bi->bi_next = NULL;
960e739d
JA
3630 /*
3631 * this sets the active strip count to 1 and the processed
3632 * strip count to zero (upper 8 bits)
3633 */
46031f9a 3634 bi->bi_phys_segments = 1; /* biased count of active stripes */
46031f9a
RBJ
3635 }
3636
3637 return bi;
3638}
3639
3640
f679623f
RBJ
3641/*
3642 * The "raid5_align_endio" should check if the read succeeded and if it
3643 * did, call bio_endio on the original bio (having bio_put the new bio
3644 * first).
3645 * If the read failed..
3646 */
6712ecf8 3647static void raid5_align_endio(struct bio *bi, int error)
f679623f
RBJ
3648{
3649 struct bio* raid_bi = bi->bi_private;
46031f9a
RBJ
3650 mddev_t *mddev;
3651 raid5_conf_t *conf;
3652 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3653 mdk_rdev_t *rdev;
3654
f679623f 3655 bio_put(bi);
46031f9a
RBJ
3656
3657 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
070ec55d 3658 conf = mddev->private;
46031f9a
RBJ
3659 rdev = (void*)raid_bi->bi_next;
3660 raid_bi->bi_next = NULL;
3661
3662 rdev_dec_pending(rdev, conf->mddev);
3663
3664 if (!error && uptodate) {
6712ecf8 3665 bio_endio(raid_bi, 0);
46031f9a
RBJ
3666 if (atomic_dec_and_test(&conf->active_aligned_reads))
3667 wake_up(&conf->wait_for_stripe);
6712ecf8 3668 return;
46031f9a
RBJ
3669 }
3670
3671
45b4233c 3672 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
46031f9a
RBJ
3673
3674 add_bio_to_retry(raid_bi, conf);
f679623f
RBJ
3675}
3676
387bb173
NB
3677static int bio_fits_rdev(struct bio *bi)
3678{
165125e1 3679 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
387bb173 3680
ae03bf63 3681 if ((bi->bi_size>>9) > queue_max_sectors(q))
387bb173
NB
3682 return 0;
3683 blk_recount_segments(q, bi);
ae03bf63 3684 if (bi->bi_phys_segments > queue_max_phys_segments(q))
387bb173
NB
3685 return 0;
3686
3687 if (q->merge_bvec_fn)
3688 /* it's too hard to apply the merge_bvec_fn at this stage,
3689 * just just give up
3690 */
3691 return 0;
3692
3693 return 1;
3694}
3695
3696
165125e1 3697static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
f679623f
RBJ
3698{
3699 mddev_t *mddev = q->queuedata;
070ec55d 3700 raid5_conf_t *conf = mddev->private;
911d4ee8 3701 unsigned int dd_idx;
f679623f
RBJ
3702 struct bio* align_bi;
3703 mdk_rdev_t *rdev;
3704
3705 if (!in_chunk_boundary(mddev, raid_bio)) {
45b4233c 3706 pr_debug("chunk_aligned_read : non aligned\n");
f679623f
RBJ
3707 return 0;
3708 }
3709 /*
99c0fb5f 3710 * use bio_clone to make a copy of the bio
f679623f
RBJ
3711 */
3712 align_bi = bio_clone(raid_bio, GFP_NOIO);
3713 if (!align_bi)
3714 return 0;
3715 /*
3716 * set bi_end_io to a new function, and set bi_private to the
3717 * original bio.
3718 */
3719 align_bi->bi_end_io = raid5_align_endio;
3720 align_bi->bi_private = raid_bio;
3721 /*
3722 * compute position
3723 */
112bf897
N
3724 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3725 0,
911d4ee8 3726 &dd_idx, NULL);
f679623f
RBJ
3727
3728 rcu_read_lock();
3729 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3730 if (rdev && test_bit(In_sync, &rdev->flags)) {
f679623f
RBJ
3731 atomic_inc(&rdev->nr_pending);
3732 rcu_read_unlock();
46031f9a
RBJ
3733 raid_bio->bi_next = (void*)rdev;
3734 align_bi->bi_bdev = rdev->bdev;
3735 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3736 align_bi->bi_sector += rdev->data_offset;
3737
387bb173
NB
3738 if (!bio_fits_rdev(align_bi)) {
3739 /* too big in some way */
3740 bio_put(align_bi);
3741 rdev_dec_pending(rdev, mddev);
3742 return 0;
3743 }
3744
46031f9a
RBJ
3745 spin_lock_irq(&conf->device_lock);
3746 wait_event_lock_irq(conf->wait_for_stripe,
3747 conf->quiesce == 0,
3748 conf->device_lock, /* nothing */);
3749 atomic_inc(&conf->active_aligned_reads);
3750 spin_unlock_irq(&conf->device_lock);
3751
f679623f
RBJ
3752 generic_make_request(align_bi);
3753 return 1;
3754 } else {
3755 rcu_read_unlock();
46031f9a 3756 bio_put(align_bi);
f679623f
RBJ
3757 return 0;
3758 }
3759}
3760
8b3e6cdc
DW
3761/* __get_priority_stripe - get the next stripe to process
3762 *
3763 * Full stripe writes are allowed to pass preread active stripes up until
3764 * the bypass_threshold is exceeded. In general the bypass_count
3765 * increments when the handle_list is handled before the hold_list; however, it
3766 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3767 * stripe with in flight i/o. The bypass_count will be reset when the
3768 * head of the hold_list has changed, i.e. the head was promoted to the
3769 * handle_list.
3770 */
3771static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3772{
3773 struct stripe_head *sh;
3774
3775 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3776 __func__,
3777 list_empty(&conf->handle_list) ? "empty" : "busy",
3778 list_empty(&conf->hold_list) ? "empty" : "busy",
3779 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3780
3781 if (!list_empty(&conf->handle_list)) {
3782 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3783
3784 if (list_empty(&conf->hold_list))
3785 conf->bypass_count = 0;
3786 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3787 if (conf->hold_list.next == conf->last_hold)
3788 conf->bypass_count++;
3789 else {
3790 conf->last_hold = conf->hold_list.next;
3791 conf->bypass_count -= conf->bypass_threshold;
3792 if (conf->bypass_count < 0)
3793 conf->bypass_count = 0;
3794 }
3795 }
3796 } else if (!list_empty(&conf->hold_list) &&
3797 ((conf->bypass_threshold &&
3798 conf->bypass_count > conf->bypass_threshold) ||
3799 atomic_read(&conf->pending_full_writes) == 0)) {
3800 sh = list_entry(conf->hold_list.next,
3801 typeof(*sh), lru);
3802 conf->bypass_count -= conf->bypass_threshold;
3803 if (conf->bypass_count < 0)
3804 conf->bypass_count = 0;
3805 } else
3806 return NULL;
3807
3808 list_del_init(&sh->lru);
3809 atomic_inc(&sh->count);
3810 BUG_ON(atomic_read(&sh->count) != 1);
3811 return sh;
3812}
f679623f 3813
165125e1 3814static int make_request(struct request_queue *q, struct bio * bi)
1da177e4
LT
3815{
3816 mddev_t *mddev = q->queuedata;
070ec55d 3817 raid5_conf_t *conf = mddev->private;
911d4ee8 3818 int dd_idx;
1da177e4
LT
3819 sector_t new_sector;
3820 sector_t logical_sector, last_sector;
3821 struct stripe_head *sh;
a362357b 3822 const int rw = bio_data_dir(bi);
c9959059 3823 int cpu, remaining;
1da177e4 3824
1f98a13f 3825 if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
6712ecf8 3826 bio_endio(bi, -EOPNOTSUPP);
e5dcdd80
N
3827 return 0;
3828 }
3829
3d310eb7 3830 md_write_start(mddev, bi);
06d91a5f 3831
074a7aca
TH
3832 cpu = part_stat_lock();
3833 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3834 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3835 bio_sectors(bi));
3836 part_stat_unlock();
1da177e4 3837
802ba064 3838 if (rw == READ &&
52488615
RBJ
3839 mddev->reshape_position == MaxSector &&
3840 chunk_aligned_read(q,bi))
99c0fb5f 3841 return 0;
52488615 3842
1da177e4
LT
3843 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3844 last_sector = bi->bi_sector + (bi->bi_size>>9);
3845 bi->bi_next = NULL;
3846 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
06d91a5f 3847
1da177e4
LT
3848 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3849 DEFINE_WAIT(w);
16a53ecc 3850 int disks, data_disks;
b5663ba4 3851 int previous;
b578d55f 3852
7ecaa1e6 3853 retry:
b5663ba4 3854 previous = 0;
b0f9ec04 3855 disks = conf->raid_disks;
b578d55f 3856 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
b0f9ec04 3857 if (unlikely(conf->reshape_progress != MaxSector)) {
fef9c61f 3858 /* spinlock is needed as reshape_progress may be
df8e7f76
N
3859 * 64bit on a 32bit platform, and so it might be
3860 * possible to see a half-updated value
fef9c61f 3861 * Ofcourse reshape_progress could change after
df8e7f76
N
3862 * the lock is dropped, so once we get a reference
3863 * to the stripe that we think it is, we will have
3864 * to check again.
3865 */
7ecaa1e6 3866 spin_lock_irq(&conf->device_lock);
fef9c61f
N
3867 if (mddev->delta_disks < 0
3868 ? logical_sector < conf->reshape_progress
3869 : logical_sector >= conf->reshape_progress) {
7ecaa1e6 3870 disks = conf->previous_raid_disks;
b5663ba4
N
3871 previous = 1;
3872 } else {
fef9c61f
N
3873 if (mddev->delta_disks < 0
3874 ? logical_sector < conf->reshape_safe
3875 : logical_sector >= conf->reshape_safe) {
b578d55f
N
3876 spin_unlock_irq(&conf->device_lock);
3877 schedule();
3878 goto retry;
3879 }
3880 }
7ecaa1e6
N
3881 spin_unlock_irq(&conf->device_lock);
3882 }
16a53ecc
N
3883 data_disks = disks - conf->max_degraded;
3884
112bf897
N
3885 new_sector = raid5_compute_sector(conf, logical_sector,
3886 previous,
911d4ee8 3887 &dd_idx, NULL);
45b4233c 3888 pr_debug("raid5: make_request, sector %llu logical %llu\n",
1da177e4
LT
3889 (unsigned long long)new_sector,
3890 (unsigned long long)logical_sector);
3891
b5663ba4 3892 sh = get_active_stripe(conf, new_sector, previous,
a8c906ca 3893 (bi->bi_rw&RWA_MASK), 0);
1da177e4 3894 if (sh) {
b0f9ec04 3895 if (unlikely(previous)) {
7ecaa1e6 3896 /* expansion might have moved on while waiting for a
df8e7f76
N
3897 * stripe, so we must do the range check again.
3898 * Expansion could still move past after this
3899 * test, but as we are holding a reference to
3900 * 'sh', we know that if that happens,
3901 * STRIPE_EXPANDING will get set and the expansion
3902 * won't proceed until we finish with the stripe.
7ecaa1e6
N
3903 */
3904 int must_retry = 0;
3905 spin_lock_irq(&conf->device_lock);
b0f9ec04
N
3906 if (mddev->delta_disks < 0
3907 ? logical_sector >= conf->reshape_progress
3908 : logical_sector < conf->reshape_progress)
7ecaa1e6
N
3909 /* mismatch, need to try again */
3910 must_retry = 1;
3911 spin_unlock_irq(&conf->device_lock);
3912 if (must_retry) {
3913 release_stripe(sh);
7a3ab908 3914 schedule();
7ecaa1e6
N
3915 goto retry;
3916 }
3917 }
e62e58a5 3918
a5c308d4
N
3919 if (bio_data_dir(bi) == WRITE &&
3920 logical_sector >= mddev->suspend_lo &&
e464eafd
N
3921 logical_sector < mddev->suspend_hi) {
3922 release_stripe(sh);
e62e58a5
N
3923 /* As the suspend_* range is controlled by
3924 * userspace, we want an interruptible
3925 * wait.
3926 */
3927 flush_signals(current);
3928 prepare_to_wait(&conf->wait_for_overlap,
3929 &w, TASK_INTERRUPTIBLE);
3930 if (logical_sector >= mddev->suspend_lo &&
3931 logical_sector < mddev->suspend_hi)
3932 schedule();
e464eafd
N
3933 goto retry;
3934 }
7ecaa1e6
N
3935
3936 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3937 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
3938 /* Stripe is busy expanding or
3939 * add failed due to overlap. Flush everything
1da177e4
LT
3940 * and wait a while
3941 */
3942 raid5_unplug_device(mddev->queue);
3943 release_stripe(sh);
3944 schedule();
3945 goto retry;
3946 }
3947 finish_wait(&conf->wait_for_overlap, &w);
6ed3003c
N
3948 set_bit(STRIPE_HANDLE, &sh->state);
3949 clear_bit(STRIPE_DELAYED, &sh->state);
1da177e4 3950 release_stripe(sh);
1da177e4
LT
3951 } else {
3952 /* cannot get stripe for read-ahead, just give-up */
3953 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3954 finish_wait(&conf->wait_for_overlap, &w);
3955 break;
3956 }
3957
3958 }
3959 spin_lock_irq(&conf->device_lock);
960e739d 3960 remaining = raid5_dec_bi_phys_segments(bi);
f6344757
N
3961 spin_unlock_irq(&conf->device_lock);
3962 if (remaining == 0) {
1da177e4 3963
16a53ecc 3964 if ( rw == WRITE )
1da177e4 3965 md_write_end(mddev);
6712ecf8 3966
0e13fe23 3967 bio_endio(bi, 0);
1da177e4 3968 }
1da177e4
LT
3969 return 0;
3970}
3971
b522adcd
DW
3972static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3973
52c03291 3974static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
1da177e4 3975{
52c03291
N
3976 /* reshaping is quite different to recovery/resync so it is
3977 * handled quite separately ... here.
3978 *
3979 * On each call to sync_request, we gather one chunk worth of
3980 * destination stripes and flag them as expanding.
3981 * Then we find all the source stripes and request reads.
3982 * As the reads complete, handle_stripe will copy the data
3983 * into the destination stripe and release that stripe.
3984 */
1da177e4
LT
3985 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3986 struct stripe_head *sh;
ccfcc3c1 3987 sector_t first_sector, last_sector;
f416885e
N
3988 int raid_disks = conf->previous_raid_disks;
3989 int data_disks = raid_disks - conf->max_degraded;
3990 int new_data_disks = conf->raid_disks - conf->max_degraded;
52c03291
N
3991 int i;
3992 int dd_idx;
c8f517c4 3993 sector_t writepos, readpos, safepos;
ec32a2bd 3994 sector_t stripe_addr;
7a661381 3995 int reshape_sectors;
ab69ae12 3996 struct list_head stripes;
52c03291 3997
fef9c61f
N
3998 if (sector_nr == 0) {
3999 /* If restarting in the middle, skip the initial sectors */
4000 if (mddev->delta_disks < 0 &&
4001 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
4002 sector_nr = raid5_size(mddev, 0, 0)
4003 - conf->reshape_progress;
a639755c 4004 } else if (mddev->delta_disks >= 0 &&
fef9c61f
N
4005 conf->reshape_progress > 0)
4006 sector_nr = conf->reshape_progress;
f416885e 4007 sector_div(sector_nr, new_data_disks);
fef9c61f
N
4008 if (sector_nr) {
4009 *skipped = 1;
4010 return sector_nr;
4011 }
52c03291
N
4012 }
4013
7a661381
N
4014 /* We need to process a full chunk at a time.
4015 * If old and new chunk sizes differ, we need to process the
4016 * largest of these
4017 */
664e7c41
AN
4018 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4019 reshape_sectors = mddev->new_chunk_sectors;
7a661381 4020 else
9d8f0363 4021 reshape_sectors = mddev->chunk_sectors;
7a661381 4022
52c03291
N
4023 /* we update the metadata when there is more than 3Meg
4024 * in the block range (that is rather arbitrary, should
4025 * probably be time based) or when the data about to be
4026 * copied would over-write the source of the data at
4027 * the front of the range.
fef9c61f
N
4028 * i.e. one new_stripe along from reshape_progress new_maps
4029 * to after where reshape_safe old_maps to
52c03291 4030 */
fef9c61f 4031 writepos = conf->reshape_progress;
f416885e 4032 sector_div(writepos, new_data_disks);
c8f517c4
N
4033 readpos = conf->reshape_progress;
4034 sector_div(readpos, data_disks);
fef9c61f 4035 safepos = conf->reshape_safe;
f416885e 4036 sector_div(safepos, data_disks);
fef9c61f 4037 if (mddev->delta_disks < 0) {
ed37d83e 4038 writepos -= min_t(sector_t, reshape_sectors, writepos);
c8f517c4 4039 readpos += reshape_sectors;
7a661381 4040 safepos += reshape_sectors;
fef9c61f 4041 } else {
7a661381 4042 writepos += reshape_sectors;
ed37d83e
N
4043 readpos -= min_t(sector_t, reshape_sectors, readpos);
4044 safepos -= min_t(sector_t, reshape_sectors, safepos);
fef9c61f 4045 }
52c03291 4046
c8f517c4
N
4047 /* 'writepos' is the most advanced device address we might write.
4048 * 'readpos' is the least advanced device address we might read.
4049 * 'safepos' is the least address recorded in the metadata as having
4050 * been reshaped.
4051 * If 'readpos' is behind 'writepos', then there is no way that we can
4052 * ensure safety in the face of a crash - that must be done by userspace
4053 * making a backup of the data. So in that case there is no particular
4054 * rush to update metadata.
4055 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4056 * update the metadata to advance 'safepos' to match 'readpos' so that
4057 * we can be safe in the event of a crash.
4058 * So we insist on updating metadata if safepos is behind writepos and
4059 * readpos is beyond writepos.
4060 * In any case, update the metadata every 10 seconds.
4061 * Maybe that number should be configurable, but I'm not sure it is
4062 * worth it.... maybe it could be a multiple of safemode_delay???
4063 */
fef9c61f 4064 if ((mddev->delta_disks < 0
c8f517c4
N
4065 ? (safepos > writepos && readpos < writepos)
4066 : (safepos < writepos && readpos > writepos)) ||
4067 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
52c03291
N
4068 /* Cannot proceed until we've updated the superblock... */
4069 wait_event(conf->wait_for_overlap,
4070 atomic_read(&conf->reshape_stripes)==0);
fef9c61f 4071 mddev->reshape_position = conf->reshape_progress;
acb180b0 4072 mddev->curr_resync_completed = mddev->curr_resync;
c8f517c4 4073 conf->reshape_checkpoint = jiffies;
850b2b42 4074 set_bit(MD_CHANGE_DEVS, &mddev->flags);
52c03291 4075 md_wakeup_thread(mddev->thread);
850b2b42 4076 wait_event(mddev->sb_wait, mddev->flags == 0 ||
52c03291
N
4077 kthread_should_stop());
4078 spin_lock_irq(&conf->device_lock);
fef9c61f 4079 conf->reshape_safe = mddev->reshape_position;
52c03291
N
4080 spin_unlock_irq(&conf->device_lock);
4081 wake_up(&conf->wait_for_overlap);
acb180b0 4082 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
52c03291
N
4083 }
4084
ec32a2bd
N
4085 if (mddev->delta_disks < 0) {
4086 BUG_ON(conf->reshape_progress == 0);
4087 stripe_addr = writepos;
4088 BUG_ON((mddev->dev_sectors &
7a661381
N
4089 ~((sector_t)reshape_sectors - 1))
4090 - reshape_sectors - stripe_addr
ec32a2bd
N
4091 != sector_nr);
4092 } else {
7a661381 4093 BUG_ON(writepos != sector_nr + reshape_sectors);
ec32a2bd
N
4094 stripe_addr = sector_nr;
4095 }
ab69ae12 4096 INIT_LIST_HEAD(&stripes);
7a661381 4097 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
52c03291 4098 int j;
a9f326eb 4099 int skipped_disk = 0;
a8c906ca 4100 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
52c03291
N
4101 set_bit(STRIPE_EXPANDING, &sh->state);
4102 atomic_inc(&conf->reshape_stripes);
4103 /* If any of this stripe is beyond the end of the old
4104 * array, then we need to zero those blocks
4105 */
4106 for (j=sh->disks; j--;) {
4107 sector_t s;
4108 if (j == sh->pd_idx)
4109 continue;
f416885e 4110 if (conf->level == 6 &&
d0dabf7e 4111 j == sh->qd_idx)
f416885e 4112 continue;
784052ec 4113 s = compute_blocknr(sh, j, 0);
b522adcd 4114 if (s < raid5_size(mddev, 0, 0)) {
a9f326eb 4115 skipped_disk = 1;
52c03291
N
4116 continue;
4117 }
4118 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4119 set_bit(R5_Expanded, &sh->dev[j].flags);
4120 set_bit(R5_UPTODATE, &sh->dev[j].flags);
4121 }
a9f326eb 4122 if (!skipped_disk) {
52c03291
N
4123 set_bit(STRIPE_EXPAND_READY, &sh->state);
4124 set_bit(STRIPE_HANDLE, &sh->state);
4125 }
ab69ae12 4126 list_add(&sh->lru, &stripes);
52c03291
N
4127 }
4128 spin_lock_irq(&conf->device_lock);
fef9c61f 4129 if (mddev->delta_disks < 0)
7a661381 4130 conf->reshape_progress -= reshape_sectors * new_data_disks;
fef9c61f 4131 else
7a661381 4132 conf->reshape_progress += reshape_sectors * new_data_disks;
52c03291
N
4133 spin_unlock_irq(&conf->device_lock);
4134 /* Ok, those stripe are ready. We can start scheduling
4135 * reads on the source stripes.
4136 * The source stripes are determined by mapping the first and last
4137 * block on the destination stripes.
4138 */
52c03291 4139 first_sector =
ec32a2bd 4140 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
911d4ee8 4141 1, &dd_idx, NULL);
52c03291 4142 last_sector =
0e6e0271 4143 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
09c9e5fa 4144 * new_data_disks - 1),
911d4ee8 4145 1, &dd_idx, NULL);
58c0fed4
AN
4146 if (last_sector >= mddev->dev_sectors)
4147 last_sector = mddev->dev_sectors - 1;
52c03291 4148 while (first_sector <= last_sector) {
a8c906ca 4149 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
52c03291
N
4150 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4151 set_bit(STRIPE_HANDLE, &sh->state);
4152 release_stripe(sh);
4153 first_sector += STRIPE_SECTORS;
4154 }
ab69ae12
N
4155 /* Now that the sources are clearly marked, we can release
4156 * the destination stripes
4157 */
4158 while (!list_empty(&stripes)) {
4159 sh = list_entry(stripes.next, struct stripe_head, lru);
4160 list_del_init(&sh->lru);
4161 release_stripe(sh);
4162 }
c6207277
N
4163 /* If this takes us to the resync_max point where we have to pause,
4164 * then we need to write out the superblock.
4165 */
7a661381 4166 sector_nr += reshape_sectors;
c03f6a19
N
4167 if ((sector_nr - mddev->curr_resync_completed) * 2
4168 >= mddev->resync_max - mddev->curr_resync_completed) {
c6207277
N
4169 /* Cannot proceed until we've updated the superblock... */
4170 wait_event(conf->wait_for_overlap,
4171 atomic_read(&conf->reshape_stripes) == 0);
fef9c61f 4172 mddev->reshape_position = conf->reshape_progress;
48606a9f 4173 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
c8f517c4 4174 conf->reshape_checkpoint = jiffies;
c6207277
N
4175 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4176 md_wakeup_thread(mddev->thread);
4177 wait_event(mddev->sb_wait,
4178 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4179 || kthread_should_stop());
4180 spin_lock_irq(&conf->device_lock);
fef9c61f 4181 conf->reshape_safe = mddev->reshape_position;
c6207277
N
4182 spin_unlock_irq(&conf->device_lock);
4183 wake_up(&conf->wait_for_overlap);
acb180b0 4184 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
c6207277 4185 }
7a661381 4186 return reshape_sectors;
52c03291
N
4187}
4188
4189/* FIXME go_faster isn't used */
4190static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4191{
4192 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4193 struct stripe_head *sh;
58c0fed4 4194 sector_t max_sector = mddev->dev_sectors;
72626685 4195 int sync_blocks;
16a53ecc
N
4196 int still_degraded = 0;
4197 int i;
1da177e4 4198
72626685 4199 if (sector_nr >= max_sector) {
1da177e4
LT
4200 /* just being told to finish up .. nothing much to do */
4201 unplug_slaves(mddev);
cea9c228 4202
29269553
N
4203 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4204 end_reshape(conf);
4205 return 0;
4206 }
72626685
N
4207
4208 if (mddev->curr_resync < max_sector) /* aborted */
4209 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4210 &sync_blocks, 1);
16a53ecc 4211 else /* completed sync */
72626685
N
4212 conf->fullsync = 0;
4213 bitmap_close_sync(mddev->bitmap);
4214
1da177e4
LT
4215 return 0;
4216 }
ccfcc3c1 4217
64bd660b
N
4218 /* Allow raid5_quiesce to complete */
4219 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4220
52c03291
N
4221 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4222 return reshape_request(mddev, sector_nr, skipped);
f6705578 4223
c6207277
N
4224 /* No need to check resync_max as we never do more than one
4225 * stripe, and as resync_max will always be on a chunk boundary,
4226 * if the check in md_do_sync didn't fire, there is no chance
4227 * of overstepping resync_max here
4228 */
4229
16a53ecc 4230 /* if there is too many failed drives and we are trying
1da177e4
LT
4231 * to resync, then assert that we are finished, because there is
4232 * nothing we can do.
4233 */
3285edf1 4234 if (mddev->degraded >= conf->max_degraded &&
16a53ecc 4235 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
58c0fed4 4236 sector_t rv = mddev->dev_sectors - sector_nr;
57afd89f 4237 *skipped = 1;
1da177e4
LT
4238 return rv;
4239 }
72626685 4240 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3855ad9f 4241 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
72626685
N
4242 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4243 /* we can skip this block, and probably more */
4244 sync_blocks /= STRIPE_SECTORS;
4245 *skipped = 1;
4246 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4247 }
1da177e4 4248
b47490c9
N
4249
4250 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4251
a8c906ca 4252 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
1da177e4 4253 if (sh == NULL) {
a8c906ca 4254 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
1da177e4 4255 /* make sure we don't swamp the stripe cache if someone else
16a53ecc 4256 * is trying to get access
1da177e4 4257 */
66c006a5 4258 schedule_timeout_uninterruptible(1);
1da177e4 4259 }
16a53ecc
N
4260 /* Need to check if array will still be degraded after recovery/resync
4261 * We don't need to check the 'failed' flag as when that gets set,
4262 * recovery aborts.
4263 */
f001a70c 4264 for (i = 0; i < conf->raid_disks; i++)
16a53ecc
N
4265 if (conf->disks[i].rdev == NULL)
4266 still_degraded = 1;
4267
4268 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4269
4270 spin_lock(&sh->lock);
1da177e4
LT
4271 set_bit(STRIPE_SYNCING, &sh->state);
4272 clear_bit(STRIPE_INSYNC, &sh->state);
4273 spin_unlock(&sh->lock);
4274
1442577b 4275 handle_stripe(sh);
1da177e4
LT
4276 release_stripe(sh);
4277
4278 return STRIPE_SECTORS;
4279}
4280
46031f9a
RBJ
4281static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4282{
4283 /* We may not be able to submit a whole bio at once as there
4284 * may not be enough stripe_heads available.
4285 * We cannot pre-allocate enough stripe_heads as we may need
4286 * more than exist in the cache (if we allow ever large chunks).
4287 * So we do one stripe head at a time and record in
4288 * ->bi_hw_segments how many have been done.
4289 *
4290 * We *know* that this entire raid_bio is in one chunk, so
4291 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4292 */
4293 struct stripe_head *sh;
911d4ee8 4294 int dd_idx;
46031f9a
RBJ
4295 sector_t sector, logical_sector, last_sector;
4296 int scnt = 0;
4297 int remaining;
4298 int handled = 0;
4299
4300 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
112bf897 4301 sector = raid5_compute_sector(conf, logical_sector,
911d4ee8 4302 0, &dd_idx, NULL);
46031f9a
RBJ
4303 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4304
4305 for (; logical_sector < last_sector;
387bb173
NB
4306 logical_sector += STRIPE_SECTORS,
4307 sector += STRIPE_SECTORS,
4308 scnt++) {
46031f9a 4309
960e739d 4310 if (scnt < raid5_bi_hw_segments(raid_bio))
46031f9a
RBJ
4311 /* already done this stripe */
4312 continue;
4313
a8c906ca 4314 sh = get_active_stripe(conf, sector, 0, 1, 0);
46031f9a
RBJ
4315
4316 if (!sh) {
4317 /* failed to get a stripe - must wait */
960e739d 4318 raid5_set_bi_hw_segments(raid_bio, scnt);
46031f9a
RBJ
4319 conf->retry_read_aligned = raid_bio;
4320 return handled;
4321 }
4322
4323 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
387bb173
NB
4324 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4325 release_stripe(sh);
960e739d 4326 raid5_set_bi_hw_segments(raid_bio, scnt);
387bb173
NB
4327 conf->retry_read_aligned = raid_bio;
4328 return handled;
4329 }
4330
36d1c647 4331 handle_stripe(sh);
46031f9a
RBJ
4332 release_stripe(sh);
4333 handled++;
4334 }
4335 spin_lock_irq(&conf->device_lock);
960e739d 4336 remaining = raid5_dec_bi_phys_segments(raid_bio);
46031f9a 4337 spin_unlock_irq(&conf->device_lock);
0e13fe23
NB
4338 if (remaining == 0)
4339 bio_endio(raid_bio, 0);
46031f9a
RBJ
4340 if (atomic_dec_and_test(&conf->active_aligned_reads))
4341 wake_up(&conf->wait_for_stripe);
4342 return handled;
4343}
4344
07a3b417
DW
4345#ifdef CONFIG_MULTICORE_RAID456
4346static void __process_stripe(void *param, async_cookie_t cookie)
4347{
4348 struct stripe_head *sh = param;
4349
4350 handle_stripe(sh);
4351 release_stripe(sh);
4352}
4353
4354static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4355{
4356 async_schedule_domain(__process_stripe, sh, domain);
4357}
4358
4359static void synchronize_stripe_processing(struct list_head *domain)
4360{
4361 async_synchronize_full_domain(domain);
4362}
4363#else
4364static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4365{
4366 handle_stripe(sh);
4367 release_stripe(sh);
4368 cond_resched();
4369}
4370
4371static void synchronize_stripe_processing(struct list_head *domain)
4372{
4373}
4374#endif
46031f9a
RBJ
4375
4376
1da177e4
LT
4377/*
4378 * This is our raid5 kernel thread.
4379 *
4380 * We scan the hash table for stripes which can be handled now.
4381 * During the scan, completed stripes are saved for us by the interrupt
4382 * handler, so that they will not have to wait for our next wakeup.
4383 */
6ed3003c 4384static void raid5d(mddev_t *mddev)
1da177e4
LT
4385{
4386 struct stripe_head *sh;
070ec55d 4387 raid5_conf_t *conf = mddev->private;
1da177e4 4388 int handled;
07a3b417 4389 LIST_HEAD(raid_domain);
1da177e4 4390
45b4233c 4391 pr_debug("+++ raid5d active\n");
1da177e4
LT
4392
4393 md_check_recovery(mddev);
1da177e4
LT
4394
4395 handled = 0;
4396 spin_lock_irq(&conf->device_lock);
4397 while (1) {
46031f9a 4398 struct bio *bio;
1da177e4 4399
ae3c20cc 4400 if (conf->seq_flush != conf->seq_write) {
72626685 4401 int seq = conf->seq_flush;
700e432d 4402 spin_unlock_irq(&conf->device_lock);
72626685 4403 bitmap_unplug(mddev->bitmap);
700e432d 4404 spin_lock_irq(&conf->device_lock);
72626685
N
4405 conf->seq_write = seq;
4406 activate_bit_delay(conf);
4407 }
4408
46031f9a
RBJ
4409 while ((bio = remove_bio_from_retry(conf))) {
4410 int ok;
4411 spin_unlock_irq(&conf->device_lock);
4412 ok = retry_aligned_read(conf, bio);
4413 spin_lock_irq(&conf->device_lock);
4414 if (!ok)
4415 break;
4416 handled++;
4417 }
4418
8b3e6cdc
DW
4419 sh = __get_priority_stripe(conf);
4420
c9f21aaf 4421 if (!sh)
1da177e4 4422 break;
1da177e4
LT
4423 spin_unlock_irq(&conf->device_lock);
4424
4425 handled++;
07a3b417 4426 process_stripe(sh, &raid_domain);
1da177e4
LT
4427
4428 spin_lock_irq(&conf->device_lock);
4429 }
45b4233c 4430 pr_debug("%d stripes handled\n", handled);
1da177e4
LT
4431
4432 spin_unlock_irq(&conf->device_lock);
4433
07a3b417 4434 synchronize_stripe_processing(&raid_domain);
c9f21aaf 4435 async_tx_issue_pending_all();
1da177e4
LT
4436 unplug_slaves(mddev);
4437
45b4233c 4438 pr_debug("--- raid5d inactive\n");
1da177e4
LT
4439}
4440
3f294f4f 4441static ssize_t
007583c9 4442raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
3f294f4f 4443{
070ec55d 4444 raid5_conf_t *conf = mddev->private;
96de1e66
N
4445 if (conf)
4446 return sprintf(page, "%d\n", conf->max_nr_stripes);
4447 else
4448 return 0;
3f294f4f
N
4449}
4450
4451static ssize_t
007583c9 4452raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
3f294f4f 4453{
070ec55d 4454 raid5_conf_t *conf = mddev->private;
4ef197d8 4455 unsigned long new;
b5470dc5
DW
4456 int err;
4457
3f294f4f
N
4458 if (len >= PAGE_SIZE)
4459 return -EINVAL;
96de1e66
N
4460 if (!conf)
4461 return -ENODEV;
3f294f4f 4462
4ef197d8 4463 if (strict_strtoul(page, 10, &new))
3f294f4f
N
4464 return -EINVAL;
4465 if (new <= 16 || new > 32768)
4466 return -EINVAL;
4467 while (new < conf->max_nr_stripes) {
4468 if (drop_one_stripe(conf))
4469 conf->max_nr_stripes--;
4470 else
4471 break;
4472 }
b5470dc5
DW
4473 err = md_allow_write(mddev);
4474 if (err)
4475 return err;
3f294f4f
N
4476 while (new > conf->max_nr_stripes) {
4477 if (grow_one_stripe(conf))
4478 conf->max_nr_stripes++;
4479 else break;
4480 }
4481 return len;
4482}
007583c9 4483
96de1e66
N
4484static struct md_sysfs_entry
4485raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4486 raid5_show_stripe_cache_size,
4487 raid5_store_stripe_cache_size);
3f294f4f 4488
8b3e6cdc
DW
4489static ssize_t
4490raid5_show_preread_threshold(mddev_t *mddev, char *page)
4491{
070ec55d 4492 raid5_conf_t *conf = mddev->private;
8b3e6cdc
DW
4493 if (conf)
4494 return sprintf(page, "%d\n", conf->bypass_threshold);
4495 else
4496 return 0;
4497}
4498
4499static ssize_t
4500raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4501{
070ec55d 4502 raid5_conf_t *conf = mddev->private;
4ef197d8 4503 unsigned long new;
8b3e6cdc
DW
4504 if (len >= PAGE_SIZE)
4505 return -EINVAL;
4506 if (!conf)
4507 return -ENODEV;
4508
4ef197d8 4509 if (strict_strtoul(page, 10, &new))
8b3e6cdc 4510 return -EINVAL;
4ef197d8 4511 if (new > conf->max_nr_stripes)
8b3e6cdc
DW
4512 return -EINVAL;
4513 conf->bypass_threshold = new;
4514 return len;
4515}
4516
4517static struct md_sysfs_entry
4518raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4519 S_IRUGO | S_IWUSR,
4520 raid5_show_preread_threshold,
4521 raid5_store_preread_threshold);
4522
3f294f4f 4523static ssize_t
96de1e66 4524stripe_cache_active_show(mddev_t *mddev, char *page)
3f294f4f 4525{
070ec55d 4526 raid5_conf_t *conf = mddev->private;
96de1e66
N
4527 if (conf)
4528 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4529 else
4530 return 0;
3f294f4f
N
4531}
4532
96de1e66
N
4533static struct md_sysfs_entry
4534raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
3f294f4f 4535
007583c9 4536static struct attribute *raid5_attrs[] = {
3f294f4f
N
4537 &raid5_stripecache_size.attr,
4538 &raid5_stripecache_active.attr,
8b3e6cdc 4539 &raid5_preread_bypass_threshold.attr,
3f294f4f
N
4540 NULL,
4541};
007583c9
N
4542static struct attribute_group raid5_attrs_group = {
4543 .name = NULL,
4544 .attrs = raid5_attrs,
3f294f4f
N
4545};
4546
80c3a6ce
DW
4547static sector_t
4548raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4549{
070ec55d 4550 raid5_conf_t *conf = mddev->private;
80c3a6ce
DW
4551
4552 if (!sectors)
4553 sectors = mddev->dev_sectors;
7ec05478
N
4554 if (!raid_disks) {
4555 /* size is defined by the smallest of previous and new size */
4556 if (conf->raid_disks < conf->previous_raid_disks)
4557 raid_disks = conf->raid_disks;
4558 else
4559 raid_disks = conf->previous_raid_disks;
4560 }
80c3a6ce 4561
9d8f0363 4562 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
664e7c41 4563 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
80c3a6ce
DW
4564 return sectors * (raid_disks - conf->max_degraded);
4565}
4566
36d1c647
DW
4567static void raid5_free_percpu(raid5_conf_t *conf)
4568{
4569 struct raid5_percpu *percpu;
4570 unsigned long cpu;
4571
4572 if (!conf->percpu)
4573 return;
4574
4575 get_online_cpus();
4576 for_each_possible_cpu(cpu) {
4577 percpu = per_cpu_ptr(conf->percpu, cpu);
4578 safe_put_page(percpu->spare_page);
d6f38f31 4579 kfree(percpu->scribble);
36d1c647
DW
4580 }
4581#ifdef CONFIG_HOTPLUG_CPU
4582 unregister_cpu_notifier(&conf->cpu_notify);
4583#endif
4584 put_online_cpus();
4585
4586 free_percpu(conf->percpu);
4587}
4588
95fc17aa
DW
4589static void free_conf(raid5_conf_t *conf)
4590{
4591 shrink_stripes(conf);
36d1c647 4592 raid5_free_percpu(conf);
95fc17aa
DW
4593 kfree(conf->disks);
4594 kfree(conf->stripe_hashtbl);
4595 kfree(conf);
4596}
4597
36d1c647
DW
4598#ifdef CONFIG_HOTPLUG_CPU
4599static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4600 void *hcpu)
4601{
4602 raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4603 long cpu = (long)hcpu;
4604 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4605
4606 switch (action) {
4607 case CPU_UP_PREPARE:
4608 case CPU_UP_PREPARE_FROZEN:
d6f38f31 4609 if (conf->level == 6 && !percpu->spare_page)
36d1c647 4610 percpu->spare_page = alloc_page(GFP_KERNEL);
d6f38f31
DW
4611 if (!percpu->scribble)
4612 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4613
4614 if (!percpu->scribble ||
4615 (conf->level == 6 && !percpu->spare_page)) {
4616 safe_put_page(percpu->spare_page);
4617 kfree(percpu->scribble);
36d1c647
DW
4618 pr_err("%s: failed memory allocation for cpu%ld\n",
4619 __func__, cpu);
4620 return NOTIFY_BAD;
4621 }
4622 break;
4623 case CPU_DEAD:
4624 case CPU_DEAD_FROZEN:
4625 safe_put_page(percpu->spare_page);
d6f38f31 4626 kfree(percpu->scribble);
36d1c647 4627 percpu->spare_page = NULL;
d6f38f31 4628 percpu->scribble = NULL;
36d1c647
DW
4629 break;
4630 default:
4631 break;
4632 }
4633 return NOTIFY_OK;
4634}
4635#endif
4636
4637static int raid5_alloc_percpu(raid5_conf_t *conf)
4638{
4639 unsigned long cpu;
4640 struct page *spare_page;
4641 struct raid5_percpu *allcpus;
d6f38f31 4642 void *scribble;
36d1c647
DW
4643 int err;
4644
36d1c647
DW
4645 allcpus = alloc_percpu(struct raid5_percpu);
4646 if (!allcpus)
4647 return -ENOMEM;
4648 conf->percpu = allcpus;
4649
4650 get_online_cpus();
4651 err = 0;
4652 for_each_present_cpu(cpu) {
d6f38f31
DW
4653 if (conf->level == 6) {
4654 spare_page = alloc_page(GFP_KERNEL);
4655 if (!spare_page) {
4656 err = -ENOMEM;
4657 break;
4658 }
4659 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4660 }
4661 scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL);
4662 if (!scribble) {
36d1c647
DW
4663 err = -ENOMEM;
4664 break;
4665 }
d6f38f31 4666 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
36d1c647
DW
4667 }
4668#ifdef CONFIG_HOTPLUG_CPU
4669 conf->cpu_notify.notifier_call = raid456_cpu_notify;
4670 conf->cpu_notify.priority = 0;
4671 if (err == 0)
4672 err = register_cpu_notifier(&conf->cpu_notify);
4673#endif
4674 put_online_cpus();
4675
4676 return err;
4677}
4678
91adb564 4679static raid5_conf_t *setup_conf(mddev_t *mddev)
1da177e4
LT
4680{
4681 raid5_conf_t *conf;
4682 int raid_disk, memory;
4683 mdk_rdev_t *rdev;
4684 struct disk_info *disk;
1da177e4 4685
91adb564
N
4686 if (mddev->new_level != 5
4687 && mddev->new_level != 4
4688 && mddev->new_level != 6) {
16a53ecc 4689 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
91adb564
N
4690 mdname(mddev), mddev->new_level);
4691 return ERR_PTR(-EIO);
1da177e4 4692 }
91adb564
N
4693 if ((mddev->new_level == 5
4694 && !algorithm_valid_raid5(mddev->new_layout)) ||
4695 (mddev->new_level == 6
4696 && !algorithm_valid_raid6(mddev->new_layout))) {
99c0fb5f 4697 printk(KERN_ERR "raid5: %s: layout %d not supported\n",
91adb564
N
4698 mdname(mddev), mddev->new_layout);
4699 return ERR_PTR(-EIO);
99c0fb5f 4700 }
91adb564
N
4701 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4702 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
4703 mdname(mddev), mddev->raid_disks);
4704 return ERR_PTR(-EINVAL);
4bbf3771
N
4705 }
4706
664e7c41
AN
4707 if (!mddev->new_chunk_sectors ||
4708 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4709 !is_power_of_2(mddev->new_chunk_sectors)) {
91adb564 4710 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
664e7c41 4711 mddev->new_chunk_sectors << 9, mdname(mddev));
91adb564 4712 return ERR_PTR(-EINVAL);
f6705578
N
4713 }
4714
91adb564
N
4715 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4716 if (conf == NULL)
1da177e4 4717 goto abort;
91adb564
N
4718
4719 conf->raid_disks = mddev->raid_disks;
d6f38f31 4720 conf->scribble_len = scribble_len(conf->raid_disks);
91adb564
N
4721 if (mddev->reshape_position == MaxSector)
4722 conf->previous_raid_disks = mddev->raid_disks;
4723 else
f6705578 4724 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
f6705578
N
4725
4726 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
b55e6bfc
N
4727 GFP_KERNEL);
4728 if (!conf->disks)
4729 goto abort;
9ffae0cf 4730
1da177e4
LT
4731 conf->mddev = mddev;
4732
fccddba0 4733 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1da177e4 4734 goto abort;
1da177e4 4735
36d1c647
DW
4736 conf->level = mddev->new_level;
4737 if (raid5_alloc_percpu(conf) != 0)
4738 goto abort;
4739
1da177e4
LT
4740 spin_lock_init(&conf->device_lock);
4741 init_waitqueue_head(&conf->wait_for_stripe);
4742 init_waitqueue_head(&conf->wait_for_overlap);
4743 INIT_LIST_HEAD(&conf->handle_list);
8b3e6cdc 4744 INIT_LIST_HEAD(&conf->hold_list);
1da177e4 4745 INIT_LIST_HEAD(&conf->delayed_list);
72626685 4746 INIT_LIST_HEAD(&conf->bitmap_list);
1da177e4
LT
4747 INIT_LIST_HEAD(&conf->inactive_list);
4748 atomic_set(&conf->active_stripes, 0);
4749 atomic_set(&conf->preread_active_stripes, 0);
46031f9a 4750 atomic_set(&conf->active_aligned_reads, 0);
8b3e6cdc 4751 conf->bypass_threshold = BYPASS_THRESHOLD;
1da177e4 4752
45b4233c 4753 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
1da177e4 4754
159ec1fc 4755 list_for_each_entry(rdev, &mddev->disks, same_set) {
1da177e4 4756 raid_disk = rdev->raid_disk;
f6705578 4757 if (raid_disk >= conf->raid_disks
1da177e4
LT
4758 || raid_disk < 0)
4759 continue;
4760 disk = conf->disks + raid_disk;
4761
4762 disk->rdev = rdev;
4763
b2d444d7 4764 if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
4765 char b[BDEVNAME_SIZE];
4766 printk(KERN_INFO "raid5: device %s operational as raid"
4767 " disk %d\n", bdevname(rdev->bdev,b),
4768 raid_disk);
8c2e870a
NB
4769 } else
4770 /* Cannot rely on bitmap to complete recovery */
4771 conf->fullsync = 1;
1da177e4
LT
4772 }
4773
09c9e5fa 4774 conf->chunk_sectors = mddev->new_chunk_sectors;
91adb564 4775 conf->level = mddev->new_level;
16a53ecc
N
4776 if (conf->level == 6)
4777 conf->max_degraded = 2;
4778 else
4779 conf->max_degraded = 1;
91adb564 4780 conf->algorithm = mddev->new_layout;
1da177e4 4781 conf->max_nr_stripes = NR_STRIPES;
fef9c61f 4782 conf->reshape_progress = mddev->reshape_position;
e183eaed 4783 if (conf->reshape_progress != MaxSector) {
09c9e5fa 4784 conf->prev_chunk_sectors = mddev->chunk_sectors;
e183eaed
N
4785 conf->prev_algo = mddev->layout;
4786 }
1da177e4 4787
91adb564
N
4788 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4789 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4790 if (grow_stripes(conf, conf->max_nr_stripes)) {
4791 printk(KERN_ERR
4792 "raid5: couldn't allocate %dkB for buffers\n", memory);
4793 goto abort;
4794 } else
4795 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
4796 memory, mdname(mddev));
1da177e4 4797
0da3c619 4798 conf->thread = md_register_thread(raid5d, mddev, NULL);
91adb564
N
4799 if (!conf->thread) {
4800 printk(KERN_ERR
4801 "raid5: couldn't allocate thread for %s\n",
4802 mdname(mddev));
16a53ecc
N
4803 goto abort;
4804 }
91adb564
N
4805
4806 return conf;
4807
4808 abort:
4809 if (conf) {
95fc17aa 4810 free_conf(conf);
91adb564
N
4811 return ERR_PTR(-EIO);
4812 } else
4813 return ERR_PTR(-ENOMEM);
4814}
4815
4816static int run(mddev_t *mddev)
4817{
4818 raid5_conf_t *conf;
8f6c2e4b 4819 int working_disks = 0, chunk_size;
91adb564
N
4820 mdk_rdev_t *rdev;
4821
8c6ac868
AN
4822 if (mddev->recovery_cp != MaxSector)
4823 printk(KERN_NOTICE "raid5: %s is not clean"
4824 " -- starting background reconstruction\n",
4825 mdname(mddev));
91adb564
N
4826 if (mddev->reshape_position != MaxSector) {
4827 /* Check that we can continue the reshape.
4828 * Currently only disks can change, it must
4829 * increase, and we must be past the point where
4830 * a stripe over-writes itself
4831 */
4832 sector_t here_new, here_old;
4833 int old_disks;
18b00334 4834 int max_degraded = (mddev->level == 6 ? 2 : 1);
91adb564 4835
88ce4930 4836 if (mddev->new_level != mddev->level) {
91adb564
N
4837 printk(KERN_ERR "raid5: %s: unsupported reshape "
4838 "required - aborting.\n",
4839 mdname(mddev));
4840 return -EINVAL;
4841 }
91adb564
N
4842 old_disks = mddev->raid_disks - mddev->delta_disks;
4843 /* reshape_position must be on a new-stripe boundary, and one
4844 * further up in new geometry must map after here in old
4845 * geometry.
4846 */
4847 here_new = mddev->reshape_position;
664e7c41 4848 if (sector_div(here_new, mddev->new_chunk_sectors *
91adb564
N
4849 (mddev->raid_disks - max_degraded))) {
4850 printk(KERN_ERR "raid5: reshape_position not "
4851 "on a stripe boundary\n");
4852 return -EINVAL;
4853 }
4854 /* here_new is the stripe we will write to */
4855 here_old = mddev->reshape_position;
9d8f0363 4856 sector_div(here_old, mddev->chunk_sectors *
91adb564
N
4857 (old_disks-max_degraded));
4858 /* here_old is the first stripe that we might need to read
4859 * from */
67ac6011
N
4860 if (mddev->delta_disks == 0) {
4861 /* We cannot be sure it is safe to start an in-place
4862 * reshape. It is only safe if user-space if monitoring
4863 * and taking constant backups.
4864 * mdadm always starts a situation like this in
4865 * readonly mode so it can take control before
4866 * allowing any writes. So just check for that.
4867 */
4868 if ((here_new * mddev->new_chunk_sectors !=
4869 here_old * mddev->chunk_sectors) ||
4870 mddev->ro == 0) {
4871 printk(KERN_ERR "raid5: in-place reshape must be started"
4872 " in read-only mode - aborting\n");
4873 return -EINVAL;
4874 }
4875 } else if (mddev->delta_disks < 0
4876 ? (here_new * mddev->new_chunk_sectors <=
4877 here_old * mddev->chunk_sectors)
4878 : (here_new * mddev->new_chunk_sectors >=
4879 here_old * mddev->chunk_sectors)) {
91adb564
N
4880 /* Reading from the same stripe as writing to - bad */
4881 printk(KERN_ERR "raid5: reshape_position too early for "
4882 "auto-recovery - aborting.\n");
4883 return -EINVAL;
4884 }
4885 printk(KERN_INFO "raid5: reshape will continue\n");
4886 /* OK, we should be able to continue; */
4887 } else {
4888 BUG_ON(mddev->level != mddev->new_level);
4889 BUG_ON(mddev->layout != mddev->new_layout);
664e7c41 4890 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
91adb564 4891 BUG_ON(mddev->delta_disks != 0);
1da177e4 4892 }
91adb564 4893
245f46c2
N
4894 if (mddev->private == NULL)
4895 conf = setup_conf(mddev);
4896 else
4897 conf = mddev->private;
4898
91adb564
N
4899 if (IS_ERR(conf))
4900 return PTR_ERR(conf);
4901
4902 mddev->thread = conf->thread;
4903 conf->thread = NULL;
4904 mddev->private = conf;
4905
4906 /*
4907 * 0 for a fully functional array, 1 or 2 for a degraded array.
4908 */
4909 list_for_each_entry(rdev, &mddev->disks, same_set)
4910 if (rdev->raid_disk >= 0 &&
4911 test_bit(In_sync, &rdev->flags))
4912 working_disks++;
4913
4914 mddev->degraded = conf->raid_disks - working_disks;
4915
16a53ecc 4916 if (mddev->degraded > conf->max_degraded) {
1da177e4
LT
4917 printk(KERN_ERR "raid5: not enough operational devices for %s"
4918 " (%d/%d failed)\n",
02c2de8c 4919 mdname(mddev), mddev->degraded, conf->raid_disks);
1da177e4
LT
4920 goto abort;
4921 }
4922
91adb564 4923 /* device size must be a multiple of chunk size */
9d8f0363 4924 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
91adb564
N
4925 mddev->resync_max_sectors = mddev->dev_sectors;
4926
16a53ecc 4927 if (mddev->degraded > 0 &&
1da177e4 4928 mddev->recovery_cp != MaxSector) {
6ff8d8ec
N
4929 if (mddev->ok_start_degraded)
4930 printk(KERN_WARNING
4931 "raid5: starting dirty degraded array: %s"
4932 "- data corruption possible.\n",
4933 mdname(mddev));
4934 else {
4935 printk(KERN_ERR
4936 "raid5: cannot start dirty degraded array for %s\n",
4937 mdname(mddev));
4938 goto abort;
4939 }
1da177e4
LT
4940 }
4941
1da177e4
LT
4942 if (mddev->degraded == 0)
4943 printk("raid5: raid level %d set %s active with %d out of %d"
e183eaed
N
4944 " devices, algorithm %d\n", conf->level, mdname(mddev),
4945 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4946 mddev->new_layout);
1da177e4
LT
4947 else
4948 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
4949 " out of %d devices, algorithm %d\n", conf->level,
4950 mdname(mddev), mddev->raid_disks - mddev->degraded,
e183eaed 4951 mddev->raid_disks, mddev->new_layout);
1da177e4
LT
4952
4953 print_raid5_conf(conf);
4954
fef9c61f 4955 if (conf->reshape_progress != MaxSector) {
f6705578 4956 printk("...ok start reshape thread\n");
fef9c61f 4957 conf->reshape_safe = conf->reshape_progress;
f6705578
N
4958 atomic_set(&conf->reshape_stripes, 0);
4959 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4960 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4961 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4962 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4963 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
0da3c619 4964 "reshape");
f6705578
N
4965 }
4966
1da177e4 4967 /* read-ahead size must cover two whole stripes, which is
16a53ecc 4968 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
1da177e4
LT
4969 */
4970 {
16a53ecc
N
4971 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4972 int stripe = data_disks *
9d8f0363 4973 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
1da177e4
LT
4974 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4975 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4976 }
4977
4978 /* Ok, everything is just fine now */
5e55e2f5
N
4979 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4980 printk(KERN_WARNING
4981 "raid5: failed to create sysfs attributes for %s\n",
4982 mdname(mddev));
7a5febe9 4983
91adb564
N
4984 mddev->queue->queue_lock = &conf->device_lock;
4985
7a5febe9 4986 mddev->queue->unplug_fn = raid5_unplug_device;
f022b2fd 4987 mddev->queue->backing_dev_info.congested_data = mddev;
041ae52e 4988 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
f022b2fd 4989
1f403624 4990 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7a5febe9 4991
23032a0e 4992 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
8f6c2e4b
MP
4993 chunk_size = mddev->chunk_sectors << 9;
4994 blk_queue_io_min(mddev->queue, chunk_size);
4995 blk_queue_io_opt(mddev->queue, chunk_size *
4996 (conf->raid_disks - conf->max_degraded));
4997
4998 list_for_each_entry(rdev, &mddev->disks, same_set)
4999 disk_stack_limits(mddev->gendisk, rdev->bdev,
5000 rdev->data_offset << 9);
23032a0e 5001
1da177e4
LT
5002 return 0;
5003abort:
e0cf8f04 5004 md_unregister_thread(mddev->thread);
91adb564 5005 mddev->thread = NULL;
1da177e4
LT
5006 if (conf) {
5007 print_raid5_conf(conf);
95fc17aa 5008 free_conf(conf);
1da177e4
LT
5009 }
5010 mddev->private = NULL;
5011 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
5012 return -EIO;
5013}
5014
5015
5016
3f294f4f 5017static int stop(mddev_t *mddev)
1da177e4
LT
5018{
5019 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
5020
5021 md_unregister_thread(mddev->thread);
5022 mddev->thread = NULL;
041ae52e 5023 mddev->queue->backing_dev_info.congested_fn = NULL;
1da177e4 5024 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
007583c9 5025 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
95fc17aa 5026 free_conf(conf);
1da177e4
LT
5027 mddev->private = NULL;
5028 return 0;
5029}
5030
45b4233c 5031#ifdef DEBUG
d710e138 5032static void print_sh(struct seq_file *seq, struct stripe_head *sh)
1da177e4
LT
5033{
5034 int i;
5035
16a53ecc
N
5036 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
5037 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
5038 seq_printf(seq, "sh %llu, count %d.\n",
5039 (unsigned long long)sh->sector, atomic_read(&sh->count));
5040 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
7ecaa1e6 5041 for (i = 0; i < sh->disks; i++) {
16a53ecc
N
5042 seq_printf(seq, "(cache%d: %p %ld) ",
5043 i, sh->dev[i].page, sh->dev[i].flags);
1da177e4 5044 }
16a53ecc 5045 seq_printf(seq, "\n");
1da177e4
LT
5046}
5047
d710e138 5048static void printall(struct seq_file *seq, raid5_conf_t *conf)
1da177e4
LT
5049{
5050 struct stripe_head *sh;
fccddba0 5051 struct hlist_node *hn;
1da177e4
LT
5052 int i;
5053
5054 spin_lock_irq(&conf->device_lock);
5055 for (i = 0; i < NR_HASH; i++) {
fccddba0 5056 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
1da177e4
LT
5057 if (sh->raid_conf != conf)
5058 continue;
16a53ecc 5059 print_sh(seq, sh);
1da177e4
LT
5060 }
5061 }
5062 spin_unlock_irq(&conf->device_lock);
5063}
5064#endif
5065
d710e138 5066static void status(struct seq_file *seq, mddev_t *mddev)
1da177e4
LT
5067{
5068 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
5069 int i;
5070
9d8f0363
AN
5071 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5072 mddev->chunk_sectors / 2, mddev->layout);
02c2de8c 5073 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
1da177e4
LT
5074 for (i = 0; i < conf->raid_disks; i++)
5075 seq_printf (seq, "%s",
5076 conf->disks[i].rdev &&
b2d444d7 5077 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
1da177e4 5078 seq_printf (seq, "]");
45b4233c 5079#ifdef DEBUG
16a53ecc
N
5080 seq_printf (seq, "\n");
5081 printall(seq, conf);
1da177e4
LT
5082#endif
5083}
5084
5085static void print_raid5_conf (raid5_conf_t *conf)
5086{
5087 int i;
5088 struct disk_info *tmp;
5089
5090 printk("RAID5 conf printout:\n");
5091 if (!conf) {
5092 printk("(conf==NULL)\n");
5093 return;
5094 }
02c2de8c
N
5095 printk(" --- rd:%d wd:%d\n", conf->raid_disks,
5096 conf->raid_disks - conf->mddev->degraded);
1da177e4
LT
5097
5098 for (i = 0; i < conf->raid_disks; i++) {
5099 char b[BDEVNAME_SIZE];
5100 tmp = conf->disks + i;
5101 if (tmp->rdev)
5102 printk(" disk %d, o:%d, dev:%s\n",
b2d444d7 5103 i, !test_bit(Faulty, &tmp->rdev->flags),
1da177e4
LT
5104 bdevname(tmp->rdev->bdev,b));
5105 }
5106}
5107
5108static int raid5_spare_active(mddev_t *mddev)
5109{
5110 int i;
5111 raid5_conf_t *conf = mddev->private;
5112 struct disk_info *tmp;
5113
5114 for (i = 0; i < conf->raid_disks; i++) {
5115 tmp = conf->disks + i;
5116 if (tmp->rdev
b2d444d7 5117 && !test_bit(Faulty, &tmp->rdev->flags)
c04be0aa
N
5118 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5119 unsigned long flags;
5120 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 5121 mddev->degraded--;
c04be0aa 5122 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
5123 }
5124 }
5125 print_raid5_conf(conf);
5126 return 0;
5127}
5128
5129static int raid5_remove_disk(mddev_t *mddev, int number)
5130{
5131 raid5_conf_t *conf = mddev->private;
5132 int err = 0;
5133 mdk_rdev_t *rdev;
5134 struct disk_info *p = conf->disks + number;
5135
5136 print_raid5_conf(conf);
5137 rdev = p->rdev;
5138 if (rdev) {
ec32a2bd
N
5139 if (number >= conf->raid_disks &&
5140 conf->reshape_progress == MaxSector)
5141 clear_bit(In_sync, &rdev->flags);
5142
b2d444d7 5143 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
5144 atomic_read(&rdev->nr_pending)) {
5145 err = -EBUSY;
5146 goto abort;
5147 }
dfc70645
N
5148 /* Only remove non-faulty devices if recovery
5149 * isn't possible.
5150 */
5151 if (!test_bit(Faulty, &rdev->flags) &&
ec32a2bd
N
5152 mddev->degraded <= conf->max_degraded &&
5153 number < conf->raid_disks) {
dfc70645
N
5154 err = -EBUSY;
5155 goto abort;
5156 }
1da177e4 5157 p->rdev = NULL;
fbd568a3 5158 synchronize_rcu();
1da177e4
LT
5159 if (atomic_read(&rdev->nr_pending)) {
5160 /* lost the race, try later */
5161 err = -EBUSY;
5162 p->rdev = rdev;
5163 }
5164 }
5165abort:
5166
5167 print_raid5_conf(conf);
5168 return err;
5169}
5170
5171static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5172{
5173 raid5_conf_t *conf = mddev->private;
199050ea 5174 int err = -EEXIST;
1da177e4
LT
5175 int disk;
5176 struct disk_info *p;
6c2fce2e
NB
5177 int first = 0;
5178 int last = conf->raid_disks - 1;
1da177e4 5179
16a53ecc 5180 if (mddev->degraded > conf->max_degraded)
1da177e4 5181 /* no point adding a device */
199050ea 5182 return -EINVAL;
1da177e4 5183
6c2fce2e
NB
5184 if (rdev->raid_disk >= 0)
5185 first = last = rdev->raid_disk;
1da177e4
LT
5186
5187 /*
16a53ecc
N
5188 * find the disk ... but prefer rdev->saved_raid_disk
5189 * if possible.
1da177e4 5190 */
16a53ecc 5191 if (rdev->saved_raid_disk >= 0 &&
6c2fce2e 5192 rdev->saved_raid_disk >= first &&
16a53ecc
N
5193 conf->disks[rdev->saved_raid_disk].rdev == NULL)
5194 disk = rdev->saved_raid_disk;
5195 else
6c2fce2e
NB
5196 disk = first;
5197 for ( ; disk <= last ; disk++)
1da177e4 5198 if ((p=conf->disks + disk)->rdev == NULL) {
b2d444d7 5199 clear_bit(In_sync, &rdev->flags);
1da177e4 5200 rdev->raid_disk = disk;
199050ea 5201 err = 0;
72626685
N
5202 if (rdev->saved_raid_disk != disk)
5203 conf->fullsync = 1;
d6065f7b 5204 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
5205 break;
5206 }
5207 print_raid5_conf(conf);
199050ea 5208 return err;
1da177e4
LT
5209}
5210
5211static int raid5_resize(mddev_t *mddev, sector_t sectors)
5212{
5213 /* no resync is happening, and there is enough space
5214 * on all devices, so we can resize.
5215 * We need to make sure resync covers any new space.
5216 * If the array is shrinking we should possibly wait until
5217 * any io in the removed space completes, but it hardly seems
5218 * worth it.
5219 */
9d8f0363 5220 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
1f403624
DW
5221 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5222 mddev->raid_disks));
b522adcd
DW
5223 if (mddev->array_sectors >
5224 raid5_size(mddev, sectors, mddev->raid_disks))
5225 return -EINVAL;
f233ea5c 5226 set_capacity(mddev->gendisk, mddev->array_sectors);
44ce6294 5227 mddev->changed = 1;
449aad3e 5228 revalidate_disk(mddev->gendisk);
58c0fed4
AN
5229 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
5230 mddev->recovery_cp = mddev->dev_sectors;
1da177e4
LT
5231 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5232 }
58c0fed4 5233 mddev->dev_sectors = sectors;
4b5c7ae8 5234 mddev->resync_max_sectors = sectors;
1da177e4
LT
5235 return 0;
5236}
5237
01ee22b4
N
5238static int check_stripe_cache(mddev_t *mddev)
5239{
5240 /* Can only proceed if there are plenty of stripe_heads.
5241 * We need a minimum of one full stripe,, and for sensible progress
5242 * it is best to have about 4 times that.
5243 * If we require 4 times, then the default 256 4K stripe_heads will
5244 * allow for chunk sizes up to 256K, which is probably OK.
5245 * If the chunk size is greater, user-space should request more
5246 * stripe_heads first.
5247 */
5248 raid5_conf_t *conf = mddev->private;
5249 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5250 > conf->max_nr_stripes ||
5251 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5252 > conf->max_nr_stripes) {
5253 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
5254 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5255 / STRIPE_SIZE)*4);
5256 return 0;
5257 }
5258 return 1;
5259}
5260
50ac168a 5261static int check_reshape(mddev_t *mddev)
29269553 5262{
070ec55d 5263 raid5_conf_t *conf = mddev->private;
29269553 5264
88ce4930
N
5265 if (mddev->delta_disks == 0 &&
5266 mddev->new_layout == mddev->layout &&
664e7c41 5267 mddev->new_chunk_sectors == mddev->chunk_sectors)
50ac168a 5268 return 0; /* nothing to do */
dba034ee
N
5269 if (mddev->bitmap)
5270 /* Cannot grow a bitmap yet */
5271 return -EBUSY;
ec32a2bd
N
5272 if (mddev->degraded > conf->max_degraded)
5273 return -EINVAL;
5274 if (mddev->delta_disks < 0) {
5275 /* We might be able to shrink, but the devices must
5276 * be made bigger first.
5277 * For raid6, 4 is the minimum size.
5278 * Otherwise 2 is the minimum
5279 */
5280 int min = 2;
5281 if (mddev->level == 6)
5282 min = 4;
5283 if (mddev->raid_disks + mddev->delta_disks < min)
5284 return -EINVAL;
5285 }
29269553 5286
01ee22b4 5287 if (!check_stripe_cache(mddev))
29269553 5288 return -ENOSPC;
29269553 5289
ec32a2bd 5290 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
63c70c4f
N
5291}
5292
5293static int raid5_start_reshape(mddev_t *mddev)
5294{
070ec55d 5295 raid5_conf_t *conf = mddev->private;
63c70c4f 5296 mdk_rdev_t *rdev;
63c70c4f
N
5297 int spares = 0;
5298 int added_devices = 0;
c04be0aa 5299 unsigned long flags;
63c70c4f 5300
f416885e 5301 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
63c70c4f
N
5302 return -EBUSY;
5303
01ee22b4
N
5304 if (!check_stripe_cache(mddev))
5305 return -ENOSPC;
5306
159ec1fc 5307 list_for_each_entry(rdev, &mddev->disks, same_set)
29269553
N
5308 if (rdev->raid_disk < 0 &&
5309 !test_bit(Faulty, &rdev->flags))
5310 spares++;
63c70c4f 5311
f416885e 5312 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
29269553
N
5313 /* Not enough devices even to make a degraded array
5314 * of that size
5315 */
5316 return -EINVAL;
5317
ec32a2bd
N
5318 /* Refuse to reduce size of the array. Any reductions in
5319 * array size must be through explicit setting of array_size
5320 * attribute.
5321 */
5322 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5323 < mddev->array_sectors) {
5324 printk(KERN_ERR "md: %s: array size must be reduced "
5325 "before number of disks\n", mdname(mddev));
5326 return -EINVAL;
5327 }
5328
f6705578 5329 atomic_set(&conf->reshape_stripes, 0);
29269553
N
5330 spin_lock_irq(&conf->device_lock);
5331 conf->previous_raid_disks = conf->raid_disks;
63c70c4f 5332 conf->raid_disks += mddev->delta_disks;
09c9e5fa
AN
5333 conf->prev_chunk_sectors = conf->chunk_sectors;
5334 conf->chunk_sectors = mddev->new_chunk_sectors;
88ce4930
N
5335 conf->prev_algo = conf->algorithm;
5336 conf->algorithm = mddev->new_layout;
fef9c61f
N
5337 if (mddev->delta_disks < 0)
5338 conf->reshape_progress = raid5_size(mddev, 0, 0);
5339 else
5340 conf->reshape_progress = 0;
5341 conf->reshape_safe = conf->reshape_progress;
86b42c71 5342 conf->generation++;
29269553
N
5343 spin_unlock_irq(&conf->device_lock);
5344
5345 /* Add some new drives, as many as will fit.
5346 * We know there are enough to make the newly sized array work.
5347 */
159ec1fc 5348 list_for_each_entry(rdev, &mddev->disks, same_set)
29269553
N
5349 if (rdev->raid_disk < 0 &&
5350 !test_bit(Faulty, &rdev->flags)) {
199050ea 5351 if (raid5_add_disk(mddev, rdev) == 0) {
29269553
N
5352 char nm[20];
5353 set_bit(In_sync, &rdev->flags);
29269553 5354 added_devices++;
5fd6c1dc 5355 rdev->recovery_offset = 0;
29269553 5356 sprintf(nm, "rd%d", rdev->raid_disk);
5e55e2f5
N
5357 if (sysfs_create_link(&mddev->kobj,
5358 &rdev->kobj, nm))
5359 printk(KERN_WARNING
5360 "raid5: failed to create "
5361 " link %s for %s\n",
5362 nm, mdname(mddev));
29269553
N
5363 } else
5364 break;
5365 }
5366
ec32a2bd
N
5367 if (mddev->delta_disks > 0) {
5368 spin_lock_irqsave(&conf->device_lock, flags);
5369 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
5370 - added_devices;
5371 spin_unlock_irqrestore(&conf->device_lock, flags);
5372 }
63c70c4f 5373 mddev->raid_disks = conf->raid_disks;
e516402c 5374 mddev->reshape_position = conf->reshape_progress;
850b2b42 5375 set_bit(MD_CHANGE_DEVS, &mddev->flags);
f6705578 5376
29269553
N
5377 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5378 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5379 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5380 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5381 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
0da3c619 5382 "reshape");
29269553
N
5383 if (!mddev->sync_thread) {
5384 mddev->recovery = 0;
5385 spin_lock_irq(&conf->device_lock);
5386 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
fef9c61f 5387 conf->reshape_progress = MaxSector;
29269553
N
5388 spin_unlock_irq(&conf->device_lock);
5389 return -EAGAIN;
5390 }
c8f517c4 5391 conf->reshape_checkpoint = jiffies;
29269553
N
5392 md_wakeup_thread(mddev->sync_thread);
5393 md_new_event(mddev);
5394 return 0;
5395}
29269553 5396
ec32a2bd
N
5397/* This is called from the reshape thread and should make any
5398 * changes needed in 'conf'
5399 */
29269553
N
5400static void end_reshape(raid5_conf_t *conf)
5401{
29269553 5402
f6705578 5403 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
f6705578 5404
f6705578 5405 spin_lock_irq(&conf->device_lock);
cea9c228 5406 conf->previous_raid_disks = conf->raid_disks;
fef9c61f 5407 conf->reshape_progress = MaxSector;
f6705578 5408 spin_unlock_irq(&conf->device_lock);
b0f9ec04 5409 wake_up(&conf->wait_for_overlap);
16a53ecc
N
5410
5411 /* read-ahead size must cover two whole stripes, which is
5412 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5413 */
5414 {
cea9c228 5415 int data_disks = conf->raid_disks - conf->max_degraded;
09c9e5fa 5416 int stripe = data_disks * ((conf->chunk_sectors << 9)
cea9c228 5417 / PAGE_SIZE);
16a53ecc
N
5418 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5419 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5420 }
29269553 5421 }
29269553
N
5422}
5423
ec32a2bd
N
5424/* This is called from the raid5d thread with mddev_lock held.
5425 * It makes config changes to the device.
5426 */
cea9c228
N
5427static void raid5_finish_reshape(mddev_t *mddev)
5428{
070ec55d 5429 raid5_conf_t *conf = mddev->private;
cea9c228
N
5430
5431 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5432
ec32a2bd
N
5433 if (mddev->delta_disks > 0) {
5434 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5435 set_capacity(mddev->gendisk, mddev->array_sectors);
5436 mddev->changed = 1;
449aad3e 5437 revalidate_disk(mddev->gendisk);
ec32a2bd
N
5438 } else {
5439 int d;
ec32a2bd
N
5440 mddev->degraded = conf->raid_disks;
5441 for (d = 0; d < conf->raid_disks ; d++)
5442 if (conf->disks[d].rdev &&
5443 test_bit(In_sync,
5444 &conf->disks[d].rdev->flags))
5445 mddev->degraded--;
5446 for (d = conf->raid_disks ;
5447 d < conf->raid_disks - mddev->delta_disks;
1a67dde0
N
5448 d++) {
5449 mdk_rdev_t *rdev = conf->disks[d].rdev;
5450 if (rdev && raid5_remove_disk(mddev, d) == 0) {
5451 char nm[20];
5452 sprintf(nm, "rd%d", rdev->raid_disk);
5453 sysfs_remove_link(&mddev->kobj, nm);
5454 rdev->raid_disk = -1;
5455 }
5456 }
cea9c228 5457 }
88ce4930 5458 mddev->layout = conf->algorithm;
09c9e5fa 5459 mddev->chunk_sectors = conf->chunk_sectors;
ec32a2bd
N
5460 mddev->reshape_position = MaxSector;
5461 mddev->delta_disks = 0;
cea9c228
N
5462 }
5463}
5464
72626685
N
5465static void raid5_quiesce(mddev_t *mddev, int state)
5466{
070ec55d 5467 raid5_conf_t *conf = mddev->private;
72626685
N
5468
5469 switch(state) {
e464eafd
N
5470 case 2: /* resume for a suspend */
5471 wake_up(&conf->wait_for_overlap);
5472 break;
5473
72626685
N
5474 case 1: /* stop all writes */
5475 spin_lock_irq(&conf->device_lock);
64bd660b
N
5476 /* '2' tells resync/reshape to pause so that all
5477 * active stripes can drain
5478 */
5479 conf->quiesce = 2;
72626685 5480 wait_event_lock_irq(conf->wait_for_stripe,
46031f9a
RBJ
5481 atomic_read(&conf->active_stripes) == 0 &&
5482 atomic_read(&conf->active_aligned_reads) == 0,
72626685 5483 conf->device_lock, /* nothing */);
64bd660b 5484 conf->quiesce = 1;
72626685 5485 spin_unlock_irq(&conf->device_lock);
64bd660b
N
5486 /* allow reshape to continue */
5487 wake_up(&conf->wait_for_overlap);
72626685
N
5488 break;
5489
5490 case 0: /* re-enable writes */
5491 spin_lock_irq(&conf->device_lock);
5492 conf->quiesce = 0;
5493 wake_up(&conf->wait_for_stripe);
e464eafd 5494 wake_up(&conf->wait_for_overlap);
72626685
N
5495 spin_unlock_irq(&conf->device_lock);
5496 break;
5497 }
72626685 5498}
b15c2e57 5499
d562b0c4
N
5500
5501static void *raid5_takeover_raid1(mddev_t *mddev)
5502{
5503 int chunksect;
5504
5505 if (mddev->raid_disks != 2 ||
5506 mddev->degraded > 1)
5507 return ERR_PTR(-EINVAL);
5508
5509 /* Should check if there are write-behind devices? */
5510
5511 chunksect = 64*2; /* 64K by default */
5512
5513 /* The array must be an exact multiple of chunksize */
5514 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5515 chunksect >>= 1;
5516
5517 if ((chunksect<<9) < STRIPE_SIZE)
5518 /* array size does not allow a suitable chunk size */
5519 return ERR_PTR(-EINVAL);
5520
5521 mddev->new_level = 5;
5522 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
664e7c41 5523 mddev->new_chunk_sectors = chunksect;
d562b0c4
N
5524
5525 return setup_conf(mddev);
5526}
5527
fc9739c6
N
5528static void *raid5_takeover_raid6(mddev_t *mddev)
5529{
5530 int new_layout;
5531
5532 switch (mddev->layout) {
5533 case ALGORITHM_LEFT_ASYMMETRIC_6:
5534 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5535 break;
5536 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5537 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5538 break;
5539 case ALGORITHM_LEFT_SYMMETRIC_6:
5540 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5541 break;
5542 case ALGORITHM_RIGHT_SYMMETRIC_6:
5543 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5544 break;
5545 case ALGORITHM_PARITY_0_6:
5546 new_layout = ALGORITHM_PARITY_0;
5547 break;
5548 case ALGORITHM_PARITY_N:
5549 new_layout = ALGORITHM_PARITY_N;
5550 break;
5551 default:
5552 return ERR_PTR(-EINVAL);
5553 }
5554 mddev->new_level = 5;
5555 mddev->new_layout = new_layout;
5556 mddev->delta_disks = -1;
5557 mddev->raid_disks -= 1;
5558 return setup_conf(mddev);
5559}
5560
d562b0c4 5561
50ac168a 5562static int raid5_check_reshape(mddev_t *mddev)
b3546035 5563{
88ce4930
N
5564 /* For a 2-drive array, the layout and chunk size can be changed
5565 * immediately as not restriping is needed.
5566 * For larger arrays we record the new value - after validation
5567 * to be used by a reshape pass.
b3546035 5568 */
070ec55d 5569 raid5_conf_t *conf = mddev->private;
597a711b 5570 int new_chunk = mddev->new_chunk_sectors;
b3546035 5571
597a711b 5572 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
b3546035
N
5573 return -EINVAL;
5574 if (new_chunk > 0) {
0ba459d2 5575 if (!is_power_of_2(new_chunk))
b3546035 5576 return -EINVAL;
597a711b 5577 if (new_chunk < (PAGE_SIZE>>9))
b3546035 5578 return -EINVAL;
597a711b 5579 if (mddev->array_sectors & (new_chunk-1))
b3546035
N
5580 /* not factor of array size */
5581 return -EINVAL;
5582 }
5583
5584 /* They look valid */
5585
88ce4930 5586 if (mddev->raid_disks == 2) {
597a711b
N
5587 /* can make the change immediately */
5588 if (mddev->new_layout >= 0) {
5589 conf->algorithm = mddev->new_layout;
5590 mddev->layout = mddev->new_layout;
88ce4930
N
5591 }
5592 if (new_chunk > 0) {
597a711b
N
5593 conf->chunk_sectors = new_chunk ;
5594 mddev->chunk_sectors = new_chunk;
88ce4930
N
5595 }
5596 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5597 md_wakeup_thread(mddev->thread);
b3546035 5598 }
50ac168a 5599 return check_reshape(mddev);
88ce4930
N
5600}
5601
50ac168a 5602static int raid6_check_reshape(mddev_t *mddev)
88ce4930 5603{
597a711b 5604 int new_chunk = mddev->new_chunk_sectors;
50ac168a 5605
597a711b 5606 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
88ce4930 5607 return -EINVAL;
b3546035 5608 if (new_chunk > 0) {
0ba459d2 5609 if (!is_power_of_2(new_chunk))
88ce4930 5610 return -EINVAL;
597a711b 5611 if (new_chunk < (PAGE_SIZE >> 9))
88ce4930 5612 return -EINVAL;
597a711b 5613 if (mddev->array_sectors & (new_chunk-1))
88ce4930
N
5614 /* not factor of array size */
5615 return -EINVAL;
b3546035 5616 }
88ce4930
N
5617
5618 /* They look valid */
50ac168a 5619 return check_reshape(mddev);
b3546035
N
5620}
5621
d562b0c4
N
5622static void *raid5_takeover(mddev_t *mddev)
5623{
5624 /* raid5 can take over:
5625 * raid0 - if all devices are the same - make it a raid4 layout
5626 * raid1 - if there are two drives. We need to know the chunk size
5627 * raid4 - trivial - just use a raid4 layout.
5628 * raid6 - Providing it is a *_6 layout
d562b0c4
N
5629 */
5630
5631 if (mddev->level == 1)
5632 return raid5_takeover_raid1(mddev);
e9d4758f
N
5633 if (mddev->level == 4) {
5634 mddev->new_layout = ALGORITHM_PARITY_N;
5635 mddev->new_level = 5;
5636 return setup_conf(mddev);
5637 }
fc9739c6
N
5638 if (mddev->level == 6)
5639 return raid5_takeover_raid6(mddev);
d562b0c4
N
5640
5641 return ERR_PTR(-EINVAL);
5642}
5643
5644
245f46c2
N
5645static struct mdk_personality raid5_personality;
5646
5647static void *raid6_takeover(mddev_t *mddev)
5648{
5649 /* Currently can only take over a raid5. We map the
5650 * personality to an equivalent raid6 personality
5651 * with the Q block at the end.
5652 */
5653 int new_layout;
5654
5655 if (mddev->pers != &raid5_personality)
5656 return ERR_PTR(-EINVAL);
5657 if (mddev->degraded > 1)
5658 return ERR_PTR(-EINVAL);
5659 if (mddev->raid_disks > 253)
5660 return ERR_PTR(-EINVAL);
5661 if (mddev->raid_disks < 3)
5662 return ERR_PTR(-EINVAL);
5663
5664 switch (mddev->layout) {
5665 case ALGORITHM_LEFT_ASYMMETRIC:
5666 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5667 break;
5668 case ALGORITHM_RIGHT_ASYMMETRIC:
5669 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5670 break;
5671 case ALGORITHM_LEFT_SYMMETRIC:
5672 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5673 break;
5674 case ALGORITHM_RIGHT_SYMMETRIC:
5675 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5676 break;
5677 case ALGORITHM_PARITY_0:
5678 new_layout = ALGORITHM_PARITY_0_6;
5679 break;
5680 case ALGORITHM_PARITY_N:
5681 new_layout = ALGORITHM_PARITY_N;
5682 break;
5683 default:
5684 return ERR_PTR(-EINVAL);
5685 }
5686 mddev->new_level = 6;
5687 mddev->new_layout = new_layout;
5688 mddev->delta_disks = 1;
5689 mddev->raid_disks += 1;
5690 return setup_conf(mddev);
5691}
5692
5693
16a53ecc
N
5694static struct mdk_personality raid6_personality =
5695{
5696 .name = "raid6",
5697 .level = 6,
5698 .owner = THIS_MODULE,
5699 .make_request = make_request,
5700 .run = run,
5701 .stop = stop,
5702 .status = status,
5703 .error_handler = error,
5704 .hot_add_disk = raid5_add_disk,
5705 .hot_remove_disk= raid5_remove_disk,
5706 .spare_active = raid5_spare_active,
5707 .sync_request = sync_request,
5708 .resize = raid5_resize,
80c3a6ce 5709 .size = raid5_size,
50ac168a 5710 .check_reshape = raid6_check_reshape,
f416885e 5711 .start_reshape = raid5_start_reshape,
cea9c228 5712 .finish_reshape = raid5_finish_reshape,
16a53ecc 5713 .quiesce = raid5_quiesce,
245f46c2 5714 .takeover = raid6_takeover,
16a53ecc 5715};
2604b703 5716static struct mdk_personality raid5_personality =
1da177e4
LT
5717{
5718 .name = "raid5",
2604b703 5719 .level = 5,
1da177e4
LT
5720 .owner = THIS_MODULE,
5721 .make_request = make_request,
5722 .run = run,
5723 .stop = stop,
5724 .status = status,
5725 .error_handler = error,
5726 .hot_add_disk = raid5_add_disk,
5727 .hot_remove_disk= raid5_remove_disk,
5728 .spare_active = raid5_spare_active,
5729 .sync_request = sync_request,
5730 .resize = raid5_resize,
80c3a6ce 5731 .size = raid5_size,
63c70c4f
N
5732 .check_reshape = raid5_check_reshape,
5733 .start_reshape = raid5_start_reshape,
cea9c228 5734 .finish_reshape = raid5_finish_reshape,
72626685 5735 .quiesce = raid5_quiesce,
d562b0c4 5736 .takeover = raid5_takeover,
1da177e4
LT
5737};
5738
2604b703 5739static struct mdk_personality raid4_personality =
1da177e4 5740{
2604b703
N
5741 .name = "raid4",
5742 .level = 4,
5743 .owner = THIS_MODULE,
5744 .make_request = make_request,
5745 .run = run,
5746 .stop = stop,
5747 .status = status,
5748 .error_handler = error,
5749 .hot_add_disk = raid5_add_disk,
5750 .hot_remove_disk= raid5_remove_disk,
5751 .spare_active = raid5_spare_active,
5752 .sync_request = sync_request,
5753 .resize = raid5_resize,
80c3a6ce 5754 .size = raid5_size,
3d37890b
N
5755 .check_reshape = raid5_check_reshape,
5756 .start_reshape = raid5_start_reshape,
cea9c228 5757 .finish_reshape = raid5_finish_reshape,
2604b703
N
5758 .quiesce = raid5_quiesce,
5759};
5760
5761static int __init raid5_init(void)
5762{
16a53ecc 5763 register_md_personality(&raid6_personality);
2604b703
N
5764 register_md_personality(&raid5_personality);
5765 register_md_personality(&raid4_personality);
5766 return 0;
1da177e4
LT
5767}
5768
2604b703 5769static void raid5_exit(void)
1da177e4 5770{
16a53ecc 5771 unregister_md_personality(&raid6_personality);
2604b703
N
5772 unregister_md_personality(&raid5_personality);
5773 unregister_md_personality(&raid4_personality);
1da177e4
LT
5774}
5775
5776module_init(raid5_init);
5777module_exit(raid5_exit);
5778MODULE_LICENSE("GPL");
5779MODULE_ALIAS("md-personality-4"); /* RAID5 */
d9d166c2
N
5780MODULE_ALIAS("md-raid5");
5781MODULE_ALIAS("md-raid4");
2604b703
N
5782MODULE_ALIAS("md-level-5");
5783MODULE_ALIAS("md-level-4");
16a53ecc
N
5784MODULE_ALIAS("md-personality-8"); /* RAID6 */
5785MODULE_ALIAS("md-raid6");
5786MODULE_ALIAS("md-level-6");
5787
5788/* This used to be two separate modules, they were: */
5789MODULE_ALIAS("raid5");
5790MODULE_ALIAS("raid6");