md: remove CONFIG_MD_RAID_RESHAPE config option.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / raid5.c
CommitLineData
1da177e4
LT
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
16a53ecc 5 * Copyright (C) 2002, 2003 H. Peter Anvin
1da177e4 6 *
16a53ecc
N
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
1da177e4
LT
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
ae3c20cc
N
21/*
22 * BITMAP UNPLUGGING:
23 *
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
26 * explanation.
27 *
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
32 * new additions.
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
39 * batch.
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
43 * miss any bits.
44 */
1da177e4 45
bff61975 46#include <linux/blkdev.h>
f6705578 47#include <linux/kthread.h>
f701d589 48#include <linux/raid/pq.h>
91c00924 49#include <linux/async_tx.h>
bff61975 50#include <linux/seq_file.h>
43b2e5d8 51#include "md.h"
bff61975 52#include "raid5.h"
ef740c37 53#include "bitmap.h"
72626685 54
1da177e4
LT
55/*
56 * Stripe cache
57 */
58
59#define NR_STRIPES 256
60#define STRIPE_SIZE PAGE_SIZE
61#define STRIPE_SHIFT (PAGE_SHIFT - 9)
62#define STRIPE_SECTORS (STRIPE_SIZE>>9)
63#define IO_THRESHOLD 1
8b3e6cdc 64#define BYPASS_THRESHOLD 1
fccddba0 65#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
1da177e4
LT
66#define HASH_MASK (NR_HASH - 1)
67
fccddba0 68#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
1da177e4
LT
69
70/* bio's attached to a stripe+device for I/O are linked together in bi_sector
71 * order without overlap. There may be several bio's per stripe+device, and
72 * a bio could span several devices.
73 * When walking this list for a particular stripe+device, we must never proceed
74 * beyond a bio that extends past this device, as the next bio might no longer
75 * be valid.
76 * This macro is used to determine the 'next' bio in the list, given the sector
77 * of the current stripe+device
78 */
79#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
80/*
81 * The following can be used to debug the driver
82 */
1da177e4
LT
83#define RAID5_PARANOIA 1
84#if RAID5_PARANOIA && defined(CONFIG_SMP)
85# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
86#else
87# define CHECK_DEVLOCK()
88#endif
89
45b4233c 90#ifdef DEBUG
1da177e4
LT
91#define inline
92#define __inline__
93#endif
94
6be9d494
BS
95#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
96
960e739d 97/*
5b99c2ff
JA
98 * We maintain a biased count of active stripes in the bottom 16 bits of
99 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
960e739d
JA
100 */
101static inline int raid5_bi_phys_segments(struct bio *bio)
102{
5b99c2ff 103 return bio->bi_phys_segments & 0xffff;
960e739d
JA
104}
105
106static inline int raid5_bi_hw_segments(struct bio *bio)
107{
5b99c2ff 108 return (bio->bi_phys_segments >> 16) & 0xffff;
960e739d
JA
109}
110
111static inline int raid5_dec_bi_phys_segments(struct bio *bio)
112{
113 --bio->bi_phys_segments;
114 return raid5_bi_phys_segments(bio);
115}
116
117static inline int raid5_dec_bi_hw_segments(struct bio *bio)
118{
119 unsigned short val = raid5_bi_hw_segments(bio);
120
121 --val;
5b99c2ff 122 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
960e739d
JA
123 return val;
124}
125
126static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
127{
5b99c2ff 128 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
960e739d
JA
129}
130
d0dabf7e
N
131/* Find first data disk in a raid6 stripe */
132static inline int raid6_d0(struct stripe_head *sh)
133{
67cc2b81
N
134 if (sh->ddf_layout)
135 /* ddf always start from first device */
136 return 0;
137 /* md starts just after Q block */
d0dabf7e
N
138 if (sh->qd_idx == sh->disks - 1)
139 return 0;
140 else
141 return sh->qd_idx + 1;
142}
16a53ecc
N
143static inline int raid6_next_disk(int disk, int raid_disks)
144{
145 disk++;
146 return (disk < raid_disks) ? disk : 0;
147}
a4456856 148
d0dabf7e
N
149/* When walking through the disks in a raid5, starting at raid6_d0,
150 * We need to map each disk to a 'slot', where the data disks are slot
151 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
152 * is raid_disks-1. This help does that mapping.
153 */
67cc2b81
N
154static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
155 int *count, int syndrome_disks)
d0dabf7e
N
156{
157 int slot;
67cc2b81 158
d0dabf7e 159 if (idx == sh->pd_idx)
67cc2b81 160 return syndrome_disks;
d0dabf7e 161 if (idx == sh->qd_idx)
67cc2b81 162 return syndrome_disks + 1;
d0dabf7e
N
163 slot = (*count)++;
164 return slot;
165}
166
a4456856
DW
167static void return_io(struct bio *return_bi)
168{
169 struct bio *bi = return_bi;
170 while (bi) {
a4456856
DW
171
172 return_bi = bi->bi_next;
173 bi->bi_next = NULL;
174 bi->bi_size = 0;
0e13fe23 175 bio_endio(bi, 0);
a4456856
DW
176 bi = return_bi;
177 }
178}
179
1da177e4
LT
180static void print_raid5_conf (raid5_conf_t *conf);
181
600aa109
DW
182static int stripe_operations_active(struct stripe_head *sh)
183{
184 return sh->check_state || sh->reconstruct_state ||
185 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
186 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
187}
188
858119e1 189static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4
LT
190{
191 if (atomic_dec_and_test(&sh->count)) {
78bafebd
ES
192 BUG_ON(!list_empty(&sh->lru));
193 BUG_ON(atomic_read(&conf->active_stripes)==0);
1da177e4 194 if (test_bit(STRIPE_HANDLE, &sh->state)) {
7c785b7a 195 if (test_bit(STRIPE_DELAYED, &sh->state)) {
1da177e4 196 list_add_tail(&sh->lru, &conf->delayed_list);
7c785b7a
N
197 blk_plug_device(conf->mddev->queue);
198 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
ae3c20cc 199 sh->bm_seq - conf->seq_write > 0) {
72626685 200 list_add_tail(&sh->lru, &conf->bitmap_list);
7c785b7a
N
201 blk_plug_device(conf->mddev->queue);
202 } else {
72626685 203 clear_bit(STRIPE_BIT_DELAY, &sh->state);
1da177e4 204 list_add_tail(&sh->lru, &conf->handle_list);
72626685 205 }
1da177e4
LT
206 md_wakeup_thread(conf->mddev->thread);
207 } else {
600aa109 208 BUG_ON(stripe_operations_active(sh));
1da177e4
LT
209 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
210 atomic_dec(&conf->preread_active_stripes);
211 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
212 md_wakeup_thread(conf->mddev->thread);
213 }
1da177e4 214 atomic_dec(&conf->active_stripes);
ccfcc3c1
N
215 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
216 list_add_tail(&sh->lru, &conf->inactive_list);
1da177e4 217 wake_up(&conf->wait_for_stripe);
46031f9a
RBJ
218 if (conf->retry_read_aligned)
219 md_wakeup_thread(conf->mddev->thread);
ccfcc3c1 220 }
1da177e4
LT
221 }
222 }
223}
d0dabf7e 224
1da177e4
LT
225static void release_stripe(struct stripe_head *sh)
226{
227 raid5_conf_t *conf = sh->raid_conf;
228 unsigned long flags;
16a53ecc 229
1da177e4
LT
230 spin_lock_irqsave(&conf->device_lock, flags);
231 __release_stripe(conf, sh);
232 spin_unlock_irqrestore(&conf->device_lock, flags);
233}
234
fccddba0 235static inline void remove_hash(struct stripe_head *sh)
1da177e4 236{
45b4233c
DW
237 pr_debug("remove_hash(), stripe %llu\n",
238 (unsigned long long)sh->sector);
1da177e4 239
fccddba0 240 hlist_del_init(&sh->hash);
1da177e4
LT
241}
242
16a53ecc 243static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4 244{
fccddba0 245 struct hlist_head *hp = stripe_hash(conf, sh->sector);
1da177e4 246
45b4233c
DW
247 pr_debug("insert_hash(), stripe %llu\n",
248 (unsigned long long)sh->sector);
1da177e4
LT
249
250 CHECK_DEVLOCK();
fccddba0 251 hlist_add_head(&sh->hash, hp);
1da177e4
LT
252}
253
254
255/* find an idle stripe, make sure it is unhashed, and return it. */
256static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
257{
258 struct stripe_head *sh = NULL;
259 struct list_head *first;
260
261 CHECK_DEVLOCK();
262 if (list_empty(&conf->inactive_list))
263 goto out;
264 first = conf->inactive_list.next;
265 sh = list_entry(first, struct stripe_head, lru);
266 list_del_init(first);
267 remove_hash(sh);
268 atomic_inc(&conf->active_stripes);
269out:
270 return sh;
271}
272
273static void shrink_buffers(struct stripe_head *sh, int num)
274{
275 struct page *p;
276 int i;
277
278 for (i=0; i<num ; i++) {
279 p = sh->dev[i].page;
280 if (!p)
281 continue;
282 sh->dev[i].page = NULL;
2d1f3b5d 283 put_page(p);
1da177e4
LT
284 }
285}
286
287static int grow_buffers(struct stripe_head *sh, int num)
288{
289 int i;
290
291 for (i=0; i<num; i++) {
292 struct page *page;
293
294 if (!(page = alloc_page(GFP_KERNEL))) {
295 return 1;
296 }
297 sh->dev[i].page = page;
298 }
299 return 0;
300}
301
784052ec 302static void raid5_build_block(struct stripe_head *sh, int i, int previous);
911d4ee8
N
303static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
304 struct stripe_head *sh);
1da177e4 305
b5663ba4 306static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
1da177e4
LT
307{
308 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 309 int i;
1da177e4 310
78bafebd
ES
311 BUG_ON(atomic_read(&sh->count) != 0);
312 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
600aa109 313 BUG_ON(stripe_operations_active(sh));
d84e0f10 314
1da177e4 315 CHECK_DEVLOCK();
45b4233c 316 pr_debug("init_stripe called, stripe %llu\n",
1da177e4
LT
317 (unsigned long long)sh->sector);
318
319 remove_hash(sh);
16a53ecc 320
86b42c71 321 sh->generation = conf->generation - previous;
b5663ba4 322 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
1da177e4 323 sh->sector = sector;
911d4ee8 324 stripe_set_idx(sector, conf, previous, sh);
1da177e4
LT
325 sh->state = 0;
326
7ecaa1e6
N
327
328 for (i = sh->disks; i--; ) {
1da177e4
LT
329 struct r5dev *dev = &sh->dev[i];
330
d84e0f10 331 if (dev->toread || dev->read || dev->towrite || dev->written ||
1da177e4 332 test_bit(R5_LOCKED, &dev->flags)) {
d84e0f10 333 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
1da177e4 334 (unsigned long long)sh->sector, i, dev->toread,
d84e0f10 335 dev->read, dev->towrite, dev->written,
1da177e4
LT
336 test_bit(R5_LOCKED, &dev->flags));
337 BUG();
338 }
339 dev->flags = 0;
784052ec 340 raid5_build_block(sh, i, previous);
1da177e4
LT
341 }
342 insert_hash(conf, sh);
343}
344
86b42c71
N
345static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
346 short generation)
1da177e4
LT
347{
348 struct stripe_head *sh;
fccddba0 349 struct hlist_node *hn;
1da177e4
LT
350
351 CHECK_DEVLOCK();
45b4233c 352 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
fccddba0 353 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
86b42c71 354 if (sh->sector == sector && sh->generation == generation)
1da177e4 355 return sh;
45b4233c 356 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
1da177e4
LT
357 return NULL;
358}
359
360static void unplug_slaves(mddev_t *mddev);
165125e1 361static void raid5_unplug_device(struct request_queue *q);
1da177e4 362
b5663ba4
N
363static struct stripe_head *
364get_active_stripe(raid5_conf_t *conf, sector_t sector,
365 int previous, int noblock)
1da177e4
LT
366{
367 struct stripe_head *sh;
368
45b4233c 369 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
1da177e4
LT
370
371 spin_lock_irq(&conf->device_lock);
372
373 do {
72626685
N
374 wait_event_lock_irq(conf->wait_for_stripe,
375 conf->quiesce == 0,
376 conf->device_lock, /* nothing */);
86b42c71 377 sh = __find_stripe(conf, sector, conf->generation - previous);
1da177e4
LT
378 if (!sh) {
379 if (!conf->inactive_blocked)
380 sh = get_free_stripe(conf);
381 if (noblock && sh == NULL)
382 break;
383 if (!sh) {
384 conf->inactive_blocked = 1;
385 wait_event_lock_irq(conf->wait_for_stripe,
386 !list_empty(&conf->inactive_list) &&
5036805b
N
387 (atomic_read(&conf->active_stripes)
388 < (conf->max_nr_stripes *3/4)
1da177e4
LT
389 || !conf->inactive_blocked),
390 conf->device_lock,
f4370781 391 raid5_unplug_device(conf->mddev->queue)
1da177e4
LT
392 );
393 conf->inactive_blocked = 0;
394 } else
b5663ba4 395 init_stripe(sh, sector, previous);
1da177e4
LT
396 } else {
397 if (atomic_read(&sh->count)) {
ab69ae12
N
398 BUG_ON(!list_empty(&sh->lru)
399 && !test_bit(STRIPE_EXPANDING, &sh->state));
1da177e4
LT
400 } else {
401 if (!test_bit(STRIPE_HANDLE, &sh->state))
402 atomic_inc(&conf->active_stripes);
ff4e8d9a
N
403 if (list_empty(&sh->lru) &&
404 !test_bit(STRIPE_EXPANDING, &sh->state))
16a53ecc
N
405 BUG();
406 list_del_init(&sh->lru);
1da177e4
LT
407 }
408 }
409 } while (sh == NULL);
410
411 if (sh)
412 atomic_inc(&sh->count);
413
414 spin_unlock_irq(&conf->device_lock);
415 return sh;
416}
417
6712ecf8
N
418static void
419raid5_end_read_request(struct bio *bi, int error);
420static void
421raid5_end_write_request(struct bio *bi, int error);
91c00924 422
c4e5ac0a 423static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
91c00924
DW
424{
425 raid5_conf_t *conf = sh->raid_conf;
426 int i, disks = sh->disks;
427
428 might_sleep();
429
430 for (i = disks; i--; ) {
431 int rw;
432 struct bio *bi;
433 mdk_rdev_t *rdev;
434 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
435 rw = WRITE;
436 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
437 rw = READ;
438 else
439 continue;
440
441 bi = &sh->dev[i].req;
442
443 bi->bi_rw = rw;
444 if (rw == WRITE)
445 bi->bi_end_io = raid5_end_write_request;
446 else
447 bi->bi_end_io = raid5_end_read_request;
448
449 rcu_read_lock();
450 rdev = rcu_dereference(conf->disks[i].rdev);
451 if (rdev && test_bit(Faulty, &rdev->flags))
452 rdev = NULL;
453 if (rdev)
454 atomic_inc(&rdev->nr_pending);
455 rcu_read_unlock();
456
457 if (rdev) {
c4e5ac0a 458 if (s->syncing || s->expanding || s->expanded)
91c00924
DW
459 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
460
2b7497f0
DW
461 set_bit(STRIPE_IO_STARTED, &sh->state);
462
91c00924
DW
463 bi->bi_bdev = rdev->bdev;
464 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
e46b272b 465 __func__, (unsigned long long)sh->sector,
91c00924
DW
466 bi->bi_rw, i);
467 atomic_inc(&sh->count);
468 bi->bi_sector = sh->sector + rdev->data_offset;
469 bi->bi_flags = 1 << BIO_UPTODATE;
470 bi->bi_vcnt = 1;
471 bi->bi_max_vecs = 1;
472 bi->bi_idx = 0;
473 bi->bi_io_vec = &sh->dev[i].vec;
474 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
475 bi->bi_io_vec[0].bv_offset = 0;
476 bi->bi_size = STRIPE_SIZE;
477 bi->bi_next = NULL;
478 if (rw == WRITE &&
479 test_bit(R5_ReWrite, &sh->dev[i].flags))
480 atomic_add(STRIPE_SECTORS,
481 &rdev->corrected_errors);
482 generic_make_request(bi);
483 } else {
484 if (rw == WRITE)
485 set_bit(STRIPE_DEGRADED, &sh->state);
486 pr_debug("skip op %ld on disc %d for sector %llu\n",
487 bi->bi_rw, i, (unsigned long long)sh->sector);
488 clear_bit(R5_LOCKED, &sh->dev[i].flags);
489 set_bit(STRIPE_HANDLE, &sh->state);
490 }
491 }
492}
493
494static struct dma_async_tx_descriptor *
495async_copy_data(int frombio, struct bio *bio, struct page *page,
496 sector_t sector, struct dma_async_tx_descriptor *tx)
497{
498 struct bio_vec *bvl;
499 struct page *bio_page;
500 int i;
501 int page_offset;
502
503 if (bio->bi_sector >= sector)
504 page_offset = (signed)(bio->bi_sector - sector) * 512;
505 else
506 page_offset = (signed)(sector - bio->bi_sector) * -512;
507 bio_for_each_segment(bvl, bio, i) {
508 int len = bio_iovec_idx(bio, i)->bv_len;
509 int clen;
510 int b_offset = 0;
511
512 if (page_offset < 0) {
513 b_offset = -page_offset;
514 page_offset += b_offset;
515 len -= b_offset;
516 }
517
518 if (len > 0 && page_offset + len > STRIPE_SIZE)
519 clen = STRIPE_SIZE - page_offset;
520 else
521 clen = len;
522
523 if (clen > 0) {
524 b_offset += bio_iovec_idx(bio, i)->bv_offset;
525 bio_page = bio_iovec_idx(bio, i)->bv_page;
526 if (frombio)
527 tx = async_memcpy(page, bio_page, page_offset,
528 b_offset, clen,
eb0645a8 529 ASYNC_TX_DEP_ACK,
91c00924
DW
530 tx, NULL, NULL);
531 else
532 tx = async_memcpy(bio_page, page, b_offset,
533 page_offset, clen,
eb0645a8 534 ASYNC_TX_DEP_ACK,
91c00924
DW
535 tx, NULL, NULL);
536 }
537 if (clen < len) /* hit end of page */
538 break;
539 page_offset += len;
540 }
541
542 return tx;
543}
544
545static void ops_complete_biofill(void *stripe_head_ref)
546{
547 struct stripe_head *sh = stripe_head_ref;
548 struct bio *return_bi = NULL;
549 raid5_conf_t *conf = sh->raid_conf;
e4d84909 550 int i;
91c00924 551
e46b272b 552 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
553 (unsigned long long)sh->sector);
554
555 /* clear completed biofills */
83de75cc 556 spin_lock_irq(&conf->device_lock);
91c00924
DW
557 for (i = sh->disks; i--; ) {
558 struct r5dev *dev = &sh->dev[i];
91c00924
DW
559
560 /* acknowledge completion of a biofill operation */
e4d84909
DW
561 /* and check if we need to reply to a read request,
562 * new R5_Wantfill requests are held off until
83de75cc 563 * !STRIPE_BIOFILL_RUN
e4d84909
DW
564 */
565 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
91c00924 566 struct bio *rbi, *rbi2;
91c00924 567
91c00924
DW
568 BUG_ON(!dev->read);
569 rbi = dev->read;
570 dev->read = NULL;
571 while (rbi && rbi->bi_sector <
572 dev->sector + STRIPE_SECTORS) {
573 rbi2 = r5_next_bio(rbi, dev->sector);
960e739d 574 if (!raid5_dec_bi_phys_segments(rbi)) {
91c00924
DW
575 rbi->bi_next = return_bi;
576 return_bi = rbi;
577 }
91c00924
DW
578 rbi = rbi2;
579 }
580 }
581 }
83de75cc
DW
582 spin_unlock_irq(&conf->device_lock);
583 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
91c00924
DW
584
585 return_io(return_bi);
586
e4d84909 587 set_bit(STRIPE_HANDLE, &sh->state);
91c00924
DW
588 release_stripe(sh);
589}
590
591static void ops_run_biofill(struct stripe_head *sh)
592{
593 struct dma_async_tx_descriptor *tx = NULL;
594 raid5_conf_t *conf = sh->raid_conf;
595 int i;
596
e46b272b 597 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
598 (unsigned long long)sh->sector);
599
600 for (i = sh->disks; i--; ) {
601 struct r5dev *dev = &sh->dev[i];
602 if (test_bit(R5_Wantfill, &dev->flags)) {
603 struct bio *rbi;
604 spin_lock_irq(&conf->device_lock);
605 dev->read = rbi = dev->toread;
606 dev->toread = NULL;
607 spin_unlock_irq(&conf->device_lock);
608 while (rbi && rbi->bi_sector <
609 dev->sector + STRIPE_SECTORS) {
610 tx = async_copy_data(0, rbi, dev->page,
611 dev->sector, tx);
612 rbi = r5_next_bio(rbi, dev->sector);
613 }
614 }
615 }
616
617 atomic_inc(&sh->count);
618 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
619 ops_complete_biofill, sh);
620}
621
622static void ops_complete_compute5(void *stripe_head_ref)
623{
624 struct stripe_head *sh = stripe_head_ref;
625 int target = sh->ops.target;
626 struct r5dev *tgt = &sh->dev[target];
627
e46b272b 628 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
629 (unsigned long long)sh->sector);
630
631 set_bit(R5_UPTODATE, &tgt->flags);
632 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
633 clear_bit(R5_Wantcompute, &tgt->flags);
ecc65c9b
DW
634 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
635 if (sh->check_state == check_state_compute_run)
636 sh->check_state = check_state_compute_result;
91c00924
DW
637 set_bit(STRIPE_HANDLE, &sh->state);
638 release_stripe(sh);
639}
640
7b3a871e 641static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh)
91c00924
DW
642{
643 /* kernel stack size limits the total number of disks */
644 int disks = sh->disks;
645 struct page *xor_srcs[disks];
646 int target = sh->ops.target;
647 struct r5dev *tgt = &sh->dev[target];
648 struct page *xor_dest = tgt->page;
649 int count = 0;
650 struct dma_async_tx_descriptor *tx;
651 int i;
652
653 pr_debug("%s: stripe %llu block: %d\n",
e46b272b 654 __func__, (unsigned long long)sh->sector, target);
91c00924
DW
655 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
656
657 for (i = disks; i--; )
658 if (i != target)
659 xor_srcs[count++] = sh->dev[i].page;
660
661 atomic_inc(&sh->count);
662
663 if (unlikely(count == 1))
664 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
665 0, NULL, ops_complete_compute5, sh);
666 else
667 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
668 ASYNC_TX_XOR_ZERO_DST, NULL,
669 ops_complete_compute5, sh);
670
91c00924
DW
671 return tx;
672}
673
674static void ops_complete_prexor(void *stripe_head_ref)
675{
676 struct stripe_head *sh = stripe_head_ref;
677
e46b272b 678 pr_debug("%s: stripe %llu\n", __func__,
91c00924 679 (unsigned long long)sh->sector);
91c00924
DW
680}
681
682static struct dma_async_tx_descriptor *
683ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
684{
685 /* kernel stack size limits the total number of disks */
686 int disks = sh->disks;
687 struct page *xor_srcs[disks];
688 int count = 0, pd_idx = sh->pd_idx, i;
689
690 /* existing parity data subtracted */
691 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
692
e46b272b 693 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
694 (unsigned long long)sh->sector);
695
696 for (i = disks; i--; ) {
697 struct r5dev *dev = &sh->dev[i];
698 /* Only process blocks that are known to be uptodate */
d8ee0728 699 if (test_bit(R5_Wantdrain, &dev->flags))
91c00924
DW
700 xor_srcs[count++] = dev->page;
701 }
702
703 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
704 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx,
705 ops_complete_prexor, sh);
706
707 return tx;
708}
709
710static struct dma_async_tx_descriptor *
d8ee0728 711ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
91c00924
DW
712{
713 int disks = sh->disks;
d8ee0728 714 int i;
91c00924 715
e46b272b 716 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
717 (unsigned long long)sh->sector);
718
719 for (i = disks; i--; ) {
720 struct r5dev *dev = &sh->dev[i];
721 struct bio *chosen;
91c00924 722
d8ee0728 723 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
91c00924
DW
724 struct bio *wbi;
725
726 spin_lock(&sh->lock);
727 chosen = dev->towrite;
728 dev->towrite = NULL;
729 BUG_ON(dev->written);
730 wbi = dev->written = chosen;
731 spin_unlock(&sh->lock);
732
733 while (wbi && wbi->bi_sector <
734 dev->sector + STRIPE_SECTORS) {
735 tx = async_copy_data(1, wbi, dev->page,
736 dev->sector, tx);
737 wbi = r5_next_bio(wbi, dev->sector);
738 }
739 }
740 }
741
742 return tx;
743}
744
745static void ops_complete_postxor(void *stripe_head_ref)
91c00924
DW
746{
747 struct stripe_head *sh = stripe_head_ref;
748 int disks = sh->disks, i, pd_idx = sh->pd_idx;
749
e46b272b 750 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
751 (unsigned long long)sh->sector);
752
753 for (i = disks; i--; ) {
754 struct r5dev *dev = &sh->dev[i];
755 if (dev->written || i == pd_idx)
756 set_bit(R5_UPTODATE, &dev->flags);
757 }
758
d8ee0728
DW
759 if (sh->reconstruct_state == reconstruct_state_drain_run)
760 sh->reconstruct_state = reconstruct_state_drain_result;
761 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
762 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
763 else {
764 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
765 sh->reconstruct_state = reconstruct_state_result;
766 }
91c00924
DW
767
768 set_bit(STRIPE_HANDLE, &sh->state);
769 release_stripe(sh);
770}
771
772static void
d8ee0728 773ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
91c00924
DW
774{
775 /* kernel stack size limits the total number of disks */
776 int disks = sh->disks;
777 struct page *xor_srcs[disks];
778
779 int count = 0, pd_idx = sh->pd_idx, i;
780 struct page *xor_dest;
d8ee0728 781 int prexor = 0;
91c00924 782 unsigned long flags;
91c00924 783
e46b272b 784 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
785 (unsigned long long)sh->sector);
786
787 /* check if prexor is active which means only process blocks
788 * that are part of a read-modify-write (written)
789 */
d8ee0728
DW
790 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
791 prexor = 1;
91c00924
DW
792 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
793 for (i = disks; i--; ) {
794 struct r5dev *dev = &sh->dev[i];
795 if (dev->written)
796 xor_srcs[count++] = dev->page;
797 }
798 } else {
799 xor_dest = sh->dev[pd_idx].page;
800 for (i = disks; i--; ) {
801 struct r5dev *dev = &sh->dev[i];
802 if (i != pd_idx)
803 xor_srcs[count++] = dev->page;
804 }
805 }
806
91c00924
DW
807 /* 1/ if we prexor'd then the dest is reused as a source
808 * 2/ if we did not prexor then we are redoing the parity
809 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
810 * for the synchronous xor case
811 */
812 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK |
813 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
814
815 atomic_inc(&sh->count);
816
817 if (unlikely(count == 1)) {
818 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
819 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
d8ee0728 820 flags, tx, ops_complete_postxor, sh);
91c00924
DW
821 } else
822 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
d8ee0728 823 flags, tx, ops_complete_postxor, sh);
91c00924
DW
824}
825
826static void ops_complete_check(void *stripe_head_ref)
827{
828 struct stripe_head *sh = stripe_head_ref;
91c00924 829
e46b272b 830 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
831 (unsigned long long)sh->sector);
832
ecc65c9b 833 sh->check_state = check_state_check_result;
91c00924
DW
834 set_bit(STRIPE_HANDLE, &sh->state);
835 release_stripe(sh);
836}
837
838static void ops_run_check(struct stripe_head *sh)
839{
840 /* kernel stack size limits the total number of disks */
841 int disks = sh->disks;
842 struct page *xor_srcs[disks];
843 struct dma_async_tx_descriptor *tx;
844
845 int count = 0, pd_idx = sh->pd_idx, i;
846 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
847
e46b272b 848 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
849 (unsigned long long)sh->sector);
850
851 for (i = disks; i--; ) {
852 struct r5dev *dev = &sh->dev[i];
853 if (i != pd_idx)
854 xor_srcs[count++] = dev->page;
855 }
856
857 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
858 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
859
91c00924
DW
860 atomic_inc(&sh->count);
861 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
862 ops_complete_check, sh);
863}
864
600aa109 865static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request)
91c00924
DW
866{
867 int overlap_clear = 0, i, disks = sh->disks;
868 struct dma_async_tx_descriptor *tx = NULL;
869
83de75cc 870 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
91c00924
DW
871 ops_run_biofill(sh);
872 overlap_clear++;
873 }
874
7b3a871e
DW
875 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
876 tx = ops_run_compute5(sh);
877 /* terminate the chain if postxor is not set to be run */
878 if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request))
879 async_tx_ack(tx);
880 }
91c00924 881
600aa109 882 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
91c00924
DW
883 tx = ops_run_prexor(sh, tx);
884
600aa109 885 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
d8ee0728 886 tx = ops_run_biodrain(sh, tx);
91c00924
DW
887 overlap_clear++;
888 }
889
600aa109 890 if (test_bit(STRIPE_OP_POSTXOR, &ops_request))
d8ee0728 891 ops_run_postxor(sh, tx);
91c00924 892
ecc65c9b 893 if (test_bit(STRIPE_OP_CHECK, &ops_request))
91c00924
DW
894 ops_run_check(sh);
895
91c00924
DW
896 if (overlap_clear)
897 for (i = disks; i--; ) {
898 struct r5dev *dev = &sh->dev[i];
899 if (test_and_clear_bit(R5_Overlap, &dev->flags))
900 wake_up(&sh->raid_conf->wait_for_overlap);
901 }
902}
903
3f294f4f 904static int grow_one_stripe(raid5_conf_t *conf)
1da177e4
LT
905{
906 struct stripe_head *sh;
3f294f4f
N
907 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
908 if (!sh)
909 return 0;
910 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
911 sh->raid_conf = conf;
912 spin_lock_init(&sh->lock);
913
914 if (grow_buffers(sh, conf->raid_disks)) {
915 shrink_buffers(sh, conf->raid_disks);
916 kmem_cache_free(conf->slab_cache, sh);
917 return 0;
918 }
7ecaa1e6 919 sh->disks = conf->raid_disks;
3f294f4f
N
920 /* we just created an active stripe so... */
921 atomic_set(&sh->count, 1);
922 atomic_inc(&conf->active_stripes);
923 INIT_LIST_HEAD(&sh->lru);
924 release_stripe(sh);
925 return 1;
926}
927
928static int grow_stripes(raid5_conf_t *conf, int num)
929{
e18b890b 930 struct kmem_cache *sc;
1da177e4
LT
931 int devs = conf->raid_disks;
932
245f46c2
N
933 sprintf(conf->cache_name[0],
934 "raid%d-%s", conf->level, mdname(conf->mddev));
935 sprintf(conf->cache_name[1],
936 "raid%d-%s-alt", conf->level, mdname(conf->mddev));
ad01c9e3
N
937 conf->active_name = 0;
938 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1da177e4 939 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
20c2df83 940 0, 0, NULL);
1da177e4
LT
941 if (!sc)
942 return 1;
943 conf->slab_cache = sc;
ad01c9e3 944 conf->pool_size = devs;
16a53ecc 945 while (num--)
3f294f4f 946 if (!grow_one_stripe(conf))
1da177e4 947 return 1;
1da177e4
LT
948 return 0;
949}
29269553 950
ad01c9e3
N
951static int resize_stripes(raid5_conf_t *conf, int newsize)
952{
953 /* Make all the stripes able to hold 'newsize' devices.
954 * New slots in each stripe get 'page' set to a new page.
955 *
956 * This happens in stages:
957 * 1/ create a new kmem_cache and allocate the required number of
958 * stripe_heads.
959 * 2/ gather all the old stripe_heads and tranfer the pages across
960 * to the new stripe_heads. This will have the side effect of
961 * freezing the array as once all stripe_heads have been collected,
962 * no IO will be possible. Old stripe heads are freed once their
963 * pages have been transferred over, and the old kmem_cache is
964 * freed when all stripes are done.
965 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
966 * we simple return a failre status - no need to clean anything up.
967 * 4/ allocate new pages for the new slots in the new stripe_heads.
968 * If this fails, we don't bother trying the shrink the
969 * stripe_heads down again, we just leave them as they are.
970 * As each stripe_head is processed the new one is released into
971 * active service.
972 *
973 * Once step2 is started, we cannot afford to wait for a write,
974 * so we use GFP_NOIO allocations.
975 */
976 struct stripe_head *osh, *nsh;
977 LIST_HEAD(newstripes);
978 struct disk_info *ndisks;
b5470dc5 979 int err;
e18b890b 980 struct kmem_cache *sc;
ad01c9e3
N
981 int i;
982
983 if (newsize <= conf->pool_size)
984 return 0; /* never bother to shrink */
985
b5470dc5
DW
986 err = md_allow_write(conf->mddev);
987 if (err)
988 return err;
2a2275d6 989
ad01c9e3
N
990 /* Step 1 */
991 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
992 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
20c2df83 993 0, 0, NULL);
ad01c9e3
N
994 if (!sc)
995 return -ENOMEM;
996
997 for (i = conf->max_nr_stripes; i; i--) {
998 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
999 if (!nsh)
1000 break;
1001
1002 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1003
1004 nsh->raid_conf = conf;
1005 spin_lock_init(&nsh->lock);
1006
1007 list_add(&nsh->lru, &newstripes);
1008 }
1009 if (i) {
1010 /* didn't get enough, give up */
1011 while (!list_empty(&newstripes)) {
1012 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1013 list_del(&nsh->lru);
1014 kmem_cache_free(sc, nsh);
1015 }
1016 kmem_cache_destroy(sc);
1017 return -ENOMEM;
1018 }
1019 /* Step 2 - Must use GFP_NOIO now.
1020 * OK, we have enough stripes, start collecting inactive
1021 * stripes and copying them over
1022 */
1023 list_for_each_entry(nsh, &newstripes, lru) {
1024 spin_lock_irq(&conf->device_lock);
1025 wait_event_lock_irq(conf->wait_for_stripe,
1026 !list_empty(&conf->inactive_list),
1027 conf->device_lock,
b3b46be3 1028 unplug_slaves(conf->mddev)
ad01c9e3
N
1029 );
1030 osh = get_free_stripe(conf);
1031 spin_unlock_irq(&conf->device_lock);
1032 atomic_set(&nsh->count, 1);
1033 for(i=0; i<conf->pool_size; i++)
1034 nsh->dev[i].page = osh->dev[i].page;
1035 for( ; i<newsize; i++)
1036 nsh->dev[i].page = NULL;
1037 kmem_cache_free(conf->slab_cache, osh);
1038 }
1039 kmem_cache_destroy(conf->slab_cache);
1040
1041 /* Step 3.
1042 * At this point, we are holding all the stripes so the array
1043 * is completely stalled, so now is a good time to resize
1044 * conf->disks.
1045 */
1046 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1047 if (ndisks) {
1048 for (i=0; i<conf->raid_disks; i++)
1049 ndisks[i] = conf->disks[i];
1050 kfree(conf->disks);
1051 conf->disks = ndisks;
1052 } else
1053 err = -ENOMEM;
1054
1055 /* Step 4, return new stripes to service */
1056 while(!list_empty(&newstripes)) {
1057 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1058 list_del_init(&nsh->lru);
1059 for (i=conf->raid_disks; i < newsize; i++)
1060 if (nsh->dev[i].page == NULL) {
1061 struct page *p = alloc_page(GFP_NOIO);
1062 nsh->dev[i].page = p;
1063 if (!p)
1064 err = -ENOMEM;
1065 }
1066 release_stripe(nsh);
1067 }
1068 /* critical section pass, GFP_NOIO no longer needed */
1069
1070 conf->slab_cache = sc;
1071 conf->active_name = 1-conf->active_name;
1072 conf->pool_size = newsize;
1073 return err;
1074}
1da177e4 1075
3f294f4f 1076static int drop_one_stripe(raid5_conf_t *conf)
1da177e4
LT
1077{
1078 struct stripe_head *sh;
1079
3f294f4f
N
1080 spin_lock_irq(&conf->device_lock);
1081 sh = get_free_stripe(conf);
1082 spin_unlock_irq(&conf->device_lock);
1083 if (!sh)
1084 return 0;
78bafebd 1085 BUG_ON(atomic_read(&sh->count));
ad01c9e3 1086 shrink_buffers(sh, conf->pool_size);
3f294f4f
N
1087 kmem_cache_free(conf->slab_cache, sh);
1088 atomic_dec(&conf->active_stripes);
1089 return 1;
1090}
1091
1092static void shrink_stripes(raid5_conf_t *conf)
1093{
1094 while (drop_one_stripe(conf))
1095 ;
1096
29fc7e3e
N
1097 if (conf->slab_cache)
1098 kmem_cache_destroy(conf->slab_cache);
1da177e4
LT
1099 conf->slab_cache = NULL;
1100}
1101
6712ecf8 1102static void raid5_end_read_request(struct bio * bi, int error)
1da177e4 1103{
99c0fb5f 1104 struct stripe_head *sh = bi->bi_private;
1da177e4 1105 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 1106 int disks = sh->disks, i;
1da177e4 1107 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
d6950432
N
1108 char b[BDEVNAME_SIZE];
1109 mdk_rdev_t *rdev;
1da177e4 1110
1da177e4
LT
1111
1112 for (i=0 ; i<disks; i++)
1113 if (bi == &sh->dev[i].req)
1114 break;
1115
45b4233c
DW
1116 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1117 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1da177e4
LT
1118 uptodate);
1119 if (i == disks) {
1120 BUG();
6712ecf8 1121 return;
1da177e4
LT
1122 }
1123
1124 if (uptodate) {
1da177e4 1125 set_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5 1126 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
d6950432 1127 rdev = conf->disks[i].rdev;
6be9d494
BS
1128 printk_rl(KERN_INFO "raid5:%s: read error corrected"
1129 " (%lu sectors at %llu on %s)\n",
1130 mdname(conf->mddev), STRIPE_SECTORS,
1131 (unsigned long long)(sh->sector
1132 + rdev->data_offset),
1133 bdevname(rdev->bdev, b));
4e5314b5
N
1134 clear_bit(R5_ReadError, &sh->dev[i].flags);
1135 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1136 }
ba22dcbf
N
1137 if (atomic_read(&conf->disks[i].rdev->read_errors))
1138 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1da177e4 1139 } else {
d6950432 1140 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
ba22dcbf 1141 int retry = 0;
d6950432
N
1142 rdev = conf->disks[i].rdev;
1143
1da177e4 1144 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
d6950432 1145 atomic_inc(&rdev->read_errors);
ba22dcbf 1146 if (conf->mddev->degraded)
6be9d494
BS
1147 printk_rl(KERN_WARNING
1148 "raid5:%s: read error not correctable "
1149 "(sector %llu on %s).\n",
1150 mdname(conf->mddev),
1151 (unsigned long long)(sh->sector
1152 + rdev->data_offset),
1153 bdn);
ba22dcbf 1154 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
4e5314b5 1155 /* Oh, no!!! */
6be9d494
BS
1156 printk_rl(KERN_WARNING
1157 "raid5:%s: read error NOT corrected!! "
1158 "(sector %llu on %s).\n",
1159 mdname(conf->mddev),
1160 (unsigned long long)(sh->sector
1161 + rdev->data_offset),
1162 bdn);
d6950432 1163 else if (atomic_read(&rdev->read_errors)
ba22dcbf 1164 > conf->max_nr_stripes)
14f8d26b 1165 printk(KERN_WARNING
d6950432
N
1166 "raid5:%s: Too many read errors, failing device %s.\n",
1167 mdname(conf->mddev), bdn);
ba22dcbf
N
1168 else
1169 retry = 1;
1170 if (retry)
1171 set_bit(R5_ReadError, &sh->dev[i].flags);
1172 else {
4e5314b5
N
1173 clear_bit(R5_ReadError, &sh->dev[i].flags);
1174 clear_bit(R5_ReWrite, &sh->dev[i].flags);
d6950432 1175 md_error(conf->mddev, rdev);
ba22dcbf 1176 }
1da177e4
LT
1177 }
1178 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1da177e4
LT
1179 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1180 set_bit(STRIPE_HANDLE, &sh->state);
1181 release_stripe(sh);
1da177e4
LT
1182}
1183
d710e138 1184static void raid5_end_write_request(struct bio *bi, int error)
1da177e4 1185{
99c0fb5f 1186 struct stripe_head *sh = bi->bi_private;
1da177e4 1187 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 1188 int disks = sh->disks, i;
1da177e4
LT
1189 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1190
1da177e4
LT
1191 for (i=0 ; i<disks; i++)
1192 if (bi == &sh->dev[i].req)
1193 break;
1194
45b4233c 1195 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1da177e4
LT
1196 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1197 uptodate);
1198 if (i == disks) {
1199 BUG();
6712ecf8 1200 return;
1da177e4
LT
1201 }
1202
1da177e4
LT
1203 if (!uptodate)
1204 md_error(conf->mddev, conf->disks[i].rdev);
1205
1206 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1207
1208 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1209 set_bit(STRIPE_HANDLE, &sh->state);
c04be0aa 1210 release_stripe(sh);
1da177e4
LT
1211}
1212
1213
784052ec 1214static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1da177e4 1215
784052ec 1216static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1da177e4
LT
1217{
1218 struct r5dev *dev = &sh->dev[i];
1219
1220 bio_init(&dev->req);
1221 dev->req.bi_io_vec = &dev->vec;
1222 dev->req.bi_vcnt++;
1223 dev->req.bi_max_vecs++;
1224 dev->vec.bv_page = dev->page;
1225 dev->vec.bv_len = STRIPE_SIZE;
1226 dev->vec.bv_offset = 0;
1227
1228 dev->req.bi_sector = sh->sector;
1229 dev->req.bi_private = sh;
1230
1231 dev->flags = 0;
784052ec 1232 dev->sector = compute_blocknr(sh, i, previous);
1da177e4
LT
1233}
1234
1235static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1236{
1237 char b[BDEVNAME_SIZE];
1238 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
45b4233c 1239 pr_debug("raid5: error called\n");
1da177e4 1240
b2d444d7 1241 if (!test_bit(Faulty, &rdev->flags)) {
850b2b42 1242 set_bit(MD_CHANGE_DEVS, &mddev->flags);
c04be0aa
N
1243 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1244 unsigned long flags;
1245 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 1246 mddev->degraded++;
c04be0aa 1247 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1248 /*
1249 * if recovery was running, make sure it aborts.
1250 */
dfc70645 1251 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1da177e4 1252 }
b2d444d7 1253 set_bit(Faulty, &rdev->flags);
d710e138
N
1254 printk(KERN_ALERT
1255 "raid5: Disk failure on %s, disabling device.\n"
1256 "raid5: Operation continuing on %d devices.\n",
1257 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1da177e4 1258 }
16a53ecc 1259}
1da177e4
LT
1260
1261/*
1262 * Input: a 'big' sector number,
1263 * Output: index of the data and parity disk, and the sector # in them.
1264 */
112bf897 1265static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
911d4ee8
N
1266 int previous, int *dd_idx,
1267 struct stripe_head *sh)
1da177e4
LT
1268{
1269 long stripe;
1270 unsigned long chunk_number;
1271 unsigned int chunk_offset;
911d4ee8 1272 int pd_idx, qd_idx;
67cc2b81 1273 int ddf_layout = 0;
1da177e4 1274 sector_t new_sector;
e183eaed
N
1275 int algorithm = previous ? conf->prev_algo
1276 : conf->algorithm;
784052ec
N
1277 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
1278 : (conf->chunk_size >> 9);
112bf897
N
1279 int raid_disks = previous ? conf->previous_raid_disks
1280 : conf->raid_disks;
1281 int data_disks = raid_disks - conf->max_degraded;
1da177e4
LT
1282
1283 /* First compute the information on this sector */
1284
1285 /*
1286 * Compute the chunk number and the sector offset inside the chunk
1287 */
1288 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1289 chunk_number = r_sector;
1290 BUG_ON(r_sector != chunk_number);
1291
1292 /*
1293 * Compute the stripe number
1294 */
1295 stripe = chunk_number / data_disks;
1296
1297 /*
1298 * Compute the data disk and parity disk indexes inside the stripe
1299 */
1300 *dd_idx = chunk_number % data_disks;
1301
1302 /*
1303 * Select the parity disk based on the user selected algorithm.
1304 */
911d4ee8 1305 pd_idx = qd_idx = ~0;
16a53ecc
N
1306 switch(conf->level) {
1307 case 4:
911d4ee8 1308 pd_idx = data_disks;
16a53ecc
N
1309 break;
1310 case 5:
e183eaed 1311 switch (algorithm) {
1da177e4 1312 case ALGORITHM_LEFT_ASYMMETRIC:
911d4ee8
N
1313 pd_idx = data_disks - stripe % raid_disks;
1314 if (*dd_idx >= pd_idx)
1da177e4
LT
1315 (*dd_idx)++;
1316 break;
1317 case ALGORITHM_RIGHT_ASYMMETRIC:
911d4ee8
N
1318 pd_idx = stripe % raid_disks;
1319 if (*dd_idx >= pd_idx)
1da177e4
LT
1320 (*dd_idx)++;
1321 break;
1322 case ALGORITHM_LEFT_SYMMETRIC:
911d4ee8
N
1323 pd_idx = data_disks - stripe % raid_disks;
1324 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4
LT
1325 break;
1326 case ALGORITHM_RIGHT_SYMMETRIC:
911d4ee8
N
1327 pd_idx = stripe % raid_disks;
1328 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4 1329 break;
99c0fb5f
N
1330 case ALGORITHM_PARITY_0:
1331 pd_idx = 0;
1332 (*dd_idx)++;
1333 break;
1334 case ALGORITHM_PARITY_N:
1335 pd_idx = data_disks;
1336 break;
1da177e4 1337 default:
14f8d26b 1338 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
e183eaed 1339 algorithm);
99c0fb5f 1340 BUG();
16a53ecc
N
1341 }
1342 break;
1343 case 6:
1344
e183eaed 1345 switch (algorithm) {
16a53ecc 1346 case ALGORITHM_LEFT_ASYMMETRIC:
911d4ee8
N
1347 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1348 qd_idx = pd_idx + 1;
1349 if (pd_idx == raid_disks-1) {
99c0fb5f 1350 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1351 qd_idx = 0;
1352 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1353 (*dd_idx) += 2; /* D D P Q D */
1354 break;
1355 case ALGORITHM_RIGHT_ASYMMETRIC:
911d4ee8
N
1356 pd_idx = stripe % raid_disks;
1357 qd_idx = pd_idx + 1;
1358 if (pd_idx == raid_disks-1) {
99c0fb5f 1359 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1360 qd_idx = 0;
1361 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1362 (*dd_idx) += 2; /* D D P Q D */
1363 break;
1364 case ALGORITHM_LEFT_SYMMETRIC:
911d4ee8
N
1365 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1366 qd_idx = (pd_idx + 1) % raid_disks;
1367 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc
N
1368 break;
1369 case ALGORITHM_RIGHT_SYMMETRIC:
911d4ee8
N
1370 pd_idx = stripe % raid_disks;
1371 qd_idx = (pd_idx + 1) % raid_disks;
1372 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc 1373 break;
99c0fb5f
N
1374
1375 case ALGORITHM_PARITY_0:
1376 pd_idx = 0;
1377 qd_idx = 1;
1378 (*dd_idx) += 2;
1379 break;
1380 case ALGORITHM_PARITY_N:
1381 pd_idx = data_disks;
1382 qd_idx = data_disks + 1;
1383 break;
1384
1385 case ALGORITHM_ROTATING_ZERO_RESTART:
1386 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1387 * of blocks for computing Q is different.
1388 */
1389 pd_idx = stripe % raid_disks;
1390 qd_idx = pd_idx + 1;
1391 if (pd_idx == raid_disks-1) {
1392 (*dd_idx)++; /* Q D D D P */
1393 qd_idx = 0;
1394 } else if (*dd_idx >= pd_idx)
1395 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1396 ddf_layout = 1;
99c0fb5f
N
1397 break;
1398
1399 case ALGORITHM_ROTATING_N_RESTART:
1400 /* Same a left_asymmetric, by first stripe is
1401 * D D D P Q rather than
1402 * Q D D D P
1403 */
1404 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1405 qd_idx = pd_idx + 1;
1406 if (pd_idx == raid_disks-1) {
1407 (*dd_idx)++; /* Q D D D P */
1408 qd_idx = 0;
1409 } else if (*dd_idx >= pd_idx)
1410 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1411 ddf_layout = 1;
99c0fb5f
N
1412 break;
1413
1414 case ALGORITHM_ROTATING_N_CONTINUE:
1415 /* Same as left_symmetric but Q is before P */
1416 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1417 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1418 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
67cc2b81 1419 ddf_layout = 1;
99c0fb5f
N
1420 break;
1421
1422 case ALGORITHM_LEFT_ASYMMETRIC_6:
1423 /* RAID5 left_asymmetric, with Q on last device */
1424 pd_idx = data_disks - stripe % (raid_disks-1);
1425 if (*dd_idx >= pd_idx)
1426 (*dd_idx)++;
1427 qd_idx = raid_disks - 1;
1428 break;
1429
1430 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1431 pd_idx = stripe % (raid_disks-1);
1432 if (*dd_idx >= pd_idx)
1433 (*dd_idx)++;
1434 qd_idx = raid_disks - 1;
1435 break;
1436
1437 case ALGORITHM_LEFT_SYMMETRIC_6:
1438 pd_idx = data_disks - stripe % (raid_disks-1);
1439 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1440 qd_idx = raid_disks - 1;
1441 break;
1442
1443 case ALGORITHM_RIGHT_SYMMETRIC_6:
1444 pd_idx = stripe % (raid_disks-1);
1445 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1446 qd_idx = raid_disks - 1;
1447 break;
1448
1449 case ALGORITHM_PARITY_0_6:
1450 pd_idx = 0;
1451 (*dd_idx)++;
1452 qd_idx = raid_disks - 1;
1453 break;
1454
1455
16a53ecc 1456 default:
d710e138 1457 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
e183eaed 1458 algorithm);
99c0fb5f 1459 BUG();
16a53ecc
N
1460 }
1461 break;
1da177e4
LT
1462 }
1463
911d4ee8
N
1464 if (sh) {
1465 sh->pd_idx = pd_idx;
1466 sh->qd_idx = qd_idx;
67cc2b81 1467 sh->ddf_layout = ddf_layout;
911d4ee8 1468 }
1da177e4
LT
1469 /*
1470 * Finally, compute the new sector number
1471 */
1472 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1473 return new_sector;
1474}
1475
1476
784052ec 1477static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1da177e4
LT
1478{
1479 raid5_conf_t *conf = sh->raid_conf;
b875e531
N
1480 int raid_disks = sh->disks;
1481 int data_disks = raid_disks - conf->max_degraded;
1da177e4 1482 sector_t new_sector = sh->sector, check;
784052ec
N
1483 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
1484 : (conf->chunk_size >> 9);
e183eaed
N
1485 int algorithm = previous ? conf->prev_algo
1486 : conf->algorithm;
1da177e4
LT
1487 sector_t stripe;
1488 int chunk_offset;
911d4ee8 1489 int chunk_number, dummy1, dd_idx = i;
1da177e4 1490 sector_t r_sector;
911d4ee8 1491 struct stripe_head sh2;
1da177e4 1492
16a53ecc 1493
1da177e4
LT
1494 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1495 stripe = new_sector;
1496 BUG_ON(new_sector != stripe);
1497
16a53ecc
N
1498 if (i == sh->pd_idx)
1499 return 0;
1500 switch(conf->level) {
1501 case 4: break;
1502 case 5:
e183eaed 1503 switch (algorithm) {
1da177e4
LT
1504 case ALGORITHM_LEFT_ASYMMETRIC:
1505 case ALGORITHM_RIGHT_ASYMMETRIC:
1506 if (i > sh->pd_idx)
1507 i--;
1508 break;
1509 case ALGORITHM_LEFT_SYMMETRIC:
1510 case ALGORITHM_RIGHT_SYMMETRIC:
1511 if (i < sh->pd_idx)
1512 i += raid_disks;
1513 i -= (sh->pd_idx + 1);
1514 break;
99c0fb5f
N
1515 case ALGORITHM_PARITY_0:
1516 i -= 1;
1517 break;
1518 case ALGORITHM_PARITY_N:
1519 break;
1da177e4 1520 default:
14f8d26b 1521 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
e183eaed 1522 algorithm);
99c0fb5f 1523 BUG();
16a53ecc
N
1524 }
1525 break;
1526 case 6:
d0dabf7e 1527 if (i == sh->qd_idx)
16a53ecc 1528 return 0; /* It is the Q disk */
e183eaed 1529 switch (algorithm) {
16a53ecc
N
1530 case ALGORITHM_LEFT_ASYMMETRIC:
1531 case ALGORITHM_RIGHT_ASYMMETRIC:
99c0fb5f
N
1532 case ALGORITHM_ROTATING_ZERO_RESTART:
1533 case ALGORITHM_ROTATING_N_RESTART:
1534 if (sh->pd_idx == raid_disks-1)
1535 i--; /* Q D D D P */
16a53ecc
N
1536 else if (i > sh->pd_idx)
1537 i -= 2; /* D D P Q D */
1538 break;
1539 case ALGORITHM_LEFT_SYMMETRIC:
1540 case ALGORITHM_RIGHT_SYMMETRIC:
1541 if (sh->pd_idx == raid_disks-1)
1542 i--; /* Q D D D P */
1543 else {
1544 /* D D P Q D */
1545 if (i < sh->pd_idx)
1546 i += raid_disks;
1547 i -= (sh->pd_idx + 2);
1548 }
1549 break;
99c0fb5f
N
1550 case ALGORITHM_PARITY_0:
1551 i -= 2;
1552 break;
1553 case ALGORITHM_PARITY_N:
1554 break;
1555 case ALGORITHM_ROTATING_N_CONTINUE:
1556 if (sh->pd_idx == 0)
1557 i--; /* P D D D Q */
1558 else if (i > sh->pd_idx)
1559 i -= 2; /* D D Q P D */
1560 break;
1561 case ALGORITHM_LEFT_ASYMMETRIC_6:
1562 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1563 if (i > sh->pd_idx)
1564 i--;
1565 break;
1566 case ALGORITHM_LEFT_SYMMETRIC_6:
1567 case ALGORITHM_RIGHT_SYMMETRIC_6:
1568 if (i < sh->pd_idx)
1569 i += data_disks + 1;
1570 i -= (sh->pd_idx + 1);
1571 break;
1572 case ALGORITHM_PARITY_0_6:
1573 i -= 1;
1574 break;
16a53ecc 1575 default:
d710e138 1576 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
e183eaed 1577 algorithm);
99c0fb5f 1578 BUG();
16a53ecc
N
1579 }
1580 break;
1da177e4
LT
1581 }
1582
1583 chunk_number = stripe * data_disks + i;
1584 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1585
112bf897 1586 check = raid5_compute_sector(conf, r_sector,
784052ec 1587 previous, &dummy1, &sh2);
911d4ee8
N
1588 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1589 || sh2.qd_idx != sh->qd_idx) {
14f8d26b 1590 printk(KERN_ERR "compute_blocknr: map not correct\n");
1da177e4
LT
1591 return 0;
1592 }
1593 return r_sector;
1594}
1595
1596
1597
1598/*
16a53ecc
N
1599 * Copy data between a page in the stripe cache, and one or more bion
1600 * The page could align with the middle of the bio, or there could be
1601 * several bion, each with several bio_vecs, which cover part of the page
1602 * Multiple bion are linked together on bi_next. There may be extras
1603 * at the end of this list. We ignore them.
1da177e4
LT
1604 */
1605static void copy_data(int frombio, struct bio *bio,
1606 struct page *page,
1607 sector_t sector)
1608{
1609 char *pa = page_address(page);
1610 struct bio_vec *bvl;
1611 int i;
1612 int page_offset;
1613
1614 if (bio->bi_sector >= sector)
1615 page_offset = (signed)(bio->bi_sector - sector) * 512;
1616 else
1617 page_offset = (signed)(sector - bio->bi_sector) * -512;
1618 bio_for_each_segment(bvl, bio, i) {
1619 int len = bio_iovec_idx(bio,i)->bv_len;
1620 int clen;
1621 int b_offset = 0;
1622
1623 if (page_offset < 0) {
1624 b_offset = -page_offset;
1625 page_offset += b_offset;
1626 len -= b_offset;
1627 }
1628
1629 if (len > 0 && page_offset + len > STRIPE_SIZE)
1630 clen = STRIPE_SIZE - page_offset;
1631 else clen = len;
16a53ecc 1632
1da177e4
LT
1633 if (clen > 0) {
1634 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
1635 if (frombio)
1636 memcpy(pa+page_offset, ba+b_offset, clen);
1637 else
1638 memcpy(ba+b_offset, pa+page_offset, clen);
1639 __bio_kunmap_atomic(ba, KM_USER0);
1640 }
1641 if (clen < len) /* hit end of page */
1642 break;
1643 page_offset += len;
1644 }
1645}
1646
9bc89cd8
DW
1647#define check_xor() do { \
1648 if (count == MAX_XOR_BLOCKS) { \
1649 xor_blocks(count, STRIPE_SIZE, dest, ptr);\
1650 count = 0; \
1651 } \
1da177e4
LT
1652 } while(0)
1653
16a53ecc
N
1654static void compute_parity6(struct stripe_head *sh, int method)
1655{
bff61975 1656 raid5_conf_t *conf = sh->raid_conf;
d0dabf7e 1657 int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count;
67cc2b81 1658 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
16a53ecc
N
1659 struct bio *chosen;
1660 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
67cc2b81 1661 void *ptrs[syndrome_disks+2];
16a53ecc 1662
d0dabf7e
N
1663 pd_idx = sh->pd_idx;
1664 qd_idx = sh->qd_idx;
1665 d0_idx = raid6_d0(sh);
16a53ecc 1666
45b4233c 1667 pr_debug("compute_parity, stripe %llu, method %d\n",
16a53ecc
N
1668 (unsigned long long)sh->sector, method);
1669
1670 switch(method) {
1671 case READ_MODIFY_WRITE:
1672 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */
1673 case RECONSTRUCT_WRITE:
1674 for (i= disks; i-- ;)
1675 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
1676 chosen = sh->dev[i].towrite;
1677 sh->dev[i].towrite = NULL;
1678
1679 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1680 wake_up(&conf->wait_for_overlap);
1681
52e5f9d1 1682 BUG_ON(sh->dev[i].written);
16a53ecc
N
1683 sh->dev[i].written = chosen;
1684 }
1685 break;
1686 case CHECK_PARITY:
1687 BUG(); /* Not implemented yet */
1688 }
1689
1690 for (i = disks; i--;)
1691 if (sh->dev[i].written) {
1692 sector_t sector = sh->dev[i].sector;
1693 struct bio *wbi = sh->dev[i].written;
1694 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
1695 copy_data(1, wbi, sh->dev[i].page, sector);
1696 wbi = r5_next_bio(wbi, sector);
1697 }
1698
1699 set_bit(R5_LOCKED, &sh->dev[i].flags);
1700 set_bit(R5_UPTODATE, &sh->dev[i].flags);
1701 }
1702
d0dabf7e 1703 /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/
67cc2b81
N
1704
1705 for (i = 0; i < disks; i++)
1706 ptrs[i] = (void *)raid6_empty_zero_page;
1707
d0dabf7e
N
1708 count = 0;
1709 i = d0_idx;
1710 do {
67cc2b81
N
1711 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1712
d0dabf7e 1713 ptrs[slot] = page_address(sh->dev[i].page);
67cc2b81 1714 if (slot < syndrome_disks &&
d0dabf7e
N
1715 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
1716 printk(KERN_ERR "block %d/%d not uptodate "
1717 "on parity calc\n", i, count);
1718 BUG();
1719 }
67cc2b81 1720
d0dabf7e
N
1721 i = raid6_next_disk(i, disks);
1722 } while (i != d0_idx);
67cc2b81 1723 BUG_ON(count != syndrome_disks);
16a53ecc 1724
67cc2b81 1725 raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs);
16a53ecc
N
1726
1727 switch(method) {
1728 case RECONSTRUCT_WRITE:
1729 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1730 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1731 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1732 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags);
1733 break;
1734 case UPDATE_PARITY:
1735 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
1736 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
1737 break;
1738 }
1739}
1740
1741
1742/* Compute one missing block */
1743static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
1744{
f416885e 1745 int i, count, disks = sh->disks;
9bc89cd8 1746 void *ptr[MAX_XOR_BLOCKS], *dest, *p;
d0dabf7e 1747 int qd_idx = sh->qd_idx;
16a53ecc 1748
45b4233c 1749 pr_debug("compute_block_1, stripe %llu, idx %d\n",
16a53ecc
N
1750 (unsigned long long)sh->sector, dd_idx);
1751
1752 if ( dd_idx == qd_idx ) {
1753 /* We're actually computing the Q drive */
1754 compute_parity6(sh, UPDATE_PARITY);
1755 } else {
9bc89cd8
DW
1756 dest = page_address(sh->dev[dd_idx].page);
1757 if (!nozero) memset(dest, 0, STRIPE_SIZE);
1758 count = 0;
16a53ecc
N
1759 for (i = disks ; i--; ) {
1760 if (i == dd_idx || i == qd_idx)
1761 continue;
1762 p = page_address(sh->dev[i].page);
1763 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
1764 ptr[count++] = p;
1765 else
1766 printk("compute_block() %d, stripe %llu, %d"
1767 " not present\n", dd_idx,
1768 (unsigned long long)sh->sector, i);
1769
1770 check_xor();
1771 }
9bc89cd8
DW
1772 if (count)
1773 xor_blocks(count, STRIPE_SIZE, dest, ptr);
16a53ecc
N
1774 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1775 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
1776 }
1777}
1778
1779/* Compute two missing blocks */
1780static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
1781{
f416885e 1782 int i, count, disks = sh->disks;
67cc2b81 1783 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
d0dabf7e
N
1784 int d0_idx = raid6_d0(sh);
1785 int faila = -1, failb = -1;
1786 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
67cc2b81 1787 void *ptrs[syndrome_disks+2];
16a53ecc 1788
67cc2b81
N
1789 for (i = 0; i < disks ; i++)
1790 ptrs[i] = (void *)raid6_empty_zero_page;
d0dabf7e
N
1791 count = 0;
1792 i = d0_idx;
1793 do {
67cc2b81
N
1794 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1795
d0dabf7e 1796 ptrs[slot] = page_address(sh->dev[i].page);
67cc2b81 1797
d0dabf7e
N
1798 if (i == dd_idx1)
1799 faila = slot;
1800 if (i == dd_idx2)
1801 failb = slot;
1802 i = raid6_next_disk(i, disks);
1803 } while (i != d0_idx);
67cc2b81 1804 BUG_ON(count != syndrome_disks);
16a53ecc
N
1805
1806 BUG_ON(faila == failb);
1807 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
1808
45b4233c 1809 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
d0dabf7e
N
1810 (unsigned long long)sh->sector, dd_idx1, dd_idx2,
1811 faila, failb);
16a53ecc 1812
67cc2b81 1813 if (failb == syndrome_disks+1) {
16a53ecc 1814 /* Q disk is one of the missing disks */
67cc2b81 1815 if (faila == syndrome_disks) {
16a53ecc
N
1816 /* Missing P+Q, just recompute */
1817 compute_parity6(sh, UPDATE_PARITY);
1818 return;
1819 } else {
1820 /* We're missing D+Q; recompute D from P */
d0dabf7e
N
1821 compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ?
1822 dd_idx2 : dd_idx1),
1823 0);
16a53ecc
N
1824 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */
1825 return;
1826 }
1827 }
1828
d0dabf7e 1829 /* We're missing D+P or D+D; */
67cc2b81 1830 if (failb == syndrome_disks) {
d0dabf7e 1831 /* We're missing D+P. */
67cc2b81 1832 raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs);
d0dabf7e
N
1833 } else {
1834 /* We're missing D+D. */
67cc2b81
N
1835 raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb,
1836 ptrs);
16a53ecc 1837 }
d0dabf7e
N
1838
1839 /* Both the above update both missing blocks */
1840 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
1841 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
16a53ecc
N
1842}
1843
600aa109 1844static void
1fe797e6 1845schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s,
600aa109 1846 int rcw, int expand)
e33129d8
DW
1847{
1848 int i, pd_idx = sh->pd_idx, disks = sh->disks;
e33129d8
DW
1849
1850 if (rcw) {
1851 /* if we are not expanding this is a proper write request, and
1852 * there will be bios with new data to be drained into the
1853 * stripe cache
1854 */
1855 if (!expand) {
600aa109
DW
1856 sh->reconstruct_state = reconstruct_state_drain_run;
1857 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1858 } else
1859 sh->reconstruct_state = reconstruct_state_run;
16a53ecc 1860
600aa109 1861 set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
e33129d8
DW
1862
1863 for (i = disks; i--; ) {
1864 struct r5dev *dev = &sh->dev[i];
1865
1866 if (dev->towrite) {
1867 set_bit(R5_LOCKED, &dev->flags);
d8ee0728 1868 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
1869 if (!expand)
1870 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 1871 s->locked++;
e33129d8
DW
1872 }
1873 }
600aa109 1874 if (s->locked + 1 == disks)
8b3e6cdc
DW
1875 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
1876 atomic_inc(&sh->raid_conf->pending_full_writes);
e33129d8
DW
1877 } else {
1878 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
1879 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
1880
d8ee0728 1881 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
600aa109
DW
1882 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
1883 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1884 set_bit(STRIPE_OP_POSTXOR, &s->ops_request);
e33129d8
DW
1885
1886 for (i = disks; i--; ) {
1887 struct r5dev *dev = &sh->dev[i];
1888 if (i == pd_idx)
1889 continue;
1890
e33129d8
DW
1891 if (dev->towrite &&
1892 (test_bit(R5_UPTODATE, &dev->flags) ||
d8ee0728
DW
1893 test_bit(R5_Wantcompute, &dev->flags))) {
1894 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
1895 set_bit(R5_LOCKED, &dev->flags);
1896 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 1897 s->locked++;
e33129d8
DW
1898 }
1899 }
1900 }
1901
1902 /* keep the parity disk locked while asynchronous operations
1903 * are in flight
1904 */
1905 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
1906 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
600aa109 1907 s->locked++;
e33129d8 1908
600aa109 1909 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
e46b272b 1910 __func__, (unsigned long long)sh->sector,
600aa109 1911 s->locked, s->ops_request);
e33129d8 1912}
16a53ecc 1913
1da177e4
LT
1914/*
1915 * Each stripe/dev can have one or more bion attached.
16a53ecc 1916 * toread/towrite point to the first in a chain.
1da177e4
LT
1917 * The bi_next chain must be in order.
1918 */
1919static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
1920{
1921 struct bio **bip;
1922 raid5_conf_t *conf = sh->raid_conf;
72626685 1923 int firstwrite=0;
1da177e4 1924
45b4233c 1925 pr_debug("adding bh b#%llu to stripe s#%llu\n",
1da177e4
LT
1926 (unsigned long long)bi->bi_sector,
1927 (unsigned long long)sh->sector);
1928
1929
1930 spin_lock(&sh->lock);
1931 spin_lock_irq(&conf->device_lock);
72626685 1932 if (forwrite) {
1da177e4 1933 bip = &sh->dev[dd_idx].towrite;
72626685
N
1934 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
1935 firstwrite = 1;
1936 } else
1da177e4
LT
1937 bip = &sh->dev[dd_idx].toread;
1938 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
1939 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
1940 goto overlap;
1941 bip = & (*bip)->bi_next;
1942 }
1943 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
1944 goto overlap;
1945
78bafebd 1946 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
1da177e4
LT
1947 if (*bip)
1948 bi->bi_next = *bip;
1949 *bip = bi;
960e739d 1950 bi->bi_phys_segments++;
1da177e4
LT
1951 spin_unlock_irq(&conf->device_lock);
1952 spin_unlock(&sh->lock);
1953
45b4233c 1954 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
1da177e4
LT
1955 (unsigned long long)bi->bi_sector,
1956 (unsigned long long)sh->sector, dd_idx);
1957
72626685 1958 if (conf->mddev->bitmap && firstwrite) {
72626685
N
1959 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1960 STRIPE_SECTORS, 0);
ae3c20cc 1961 sh->bm_seq = conf->seq_flush+1;
72626685
N
1962 set_bit(STRIPE_BIT_DELAY, &sh->state);
1963 }
1964
1da177e4
LT
1965 if (forwrite) {
1966 /* check if page is covered */
1967 sector_t sector = sh->dev[dd_idx].sector;
1968 for (bi=sh->dev[dd_idx].towrite;
1969 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1970 bi && bi->bi_sector <= sector;
1971 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1972 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1973 sector = bi->bi_sector + (bi->bi_size>>9);
1974 }
1975 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1976 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1977 }
1978 return 1;
1979
1980 overlap:
1981 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1982 spin_unlock_irq(&conf->device_lock);
1983 spin_unlock(&sh->lock);
1984 return 0;
1985}
1986
29269553
N
1987static void end_reshape(raid5_conf_t *conf);
1988
16a53ecc
N
1989static int page_is_zero(struct page *p)
1990{
1991 char *a = page_address(p);
1992 return ((*(u32*)a) == 0 &&
1993 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1994}
1995
911d4ee8
N
1996static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
1997 struct stripe_head *sh)
ccfcc3c1 1998{
784052ec
N
1999 int sectors_per_chunk =
2000 previous ? (conf->prev_chunk >> 9)
2001 : (conf->chunk_size >> 9);
911d4ee8 2002 int dd_idx;
2d2063ce 2003 int chunk_offset = sector_div(stripe, sectors_per_chunk);
112bf897 2004 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2d2063ce 2005
112bf897
N
2006 raid5_compute_sector(conf,
2007 stripe * (disks - conf->max_degraded)
b875e531 2008 *sectors_per_chunk + chunk_offset,
112bf897 2009 previous,
911d4ee8 2010 &dd_idx, sh);
ccfcc3c1
N
2011}
2012
a4456856 2013static void
1fe797e6 2014handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
a4456856
DW
2015 struct stripe_head_state *s, int disks,
2016 struct bio **return_bi)
2017{
2018 int i;
2019 for (i = disks; i--; ) {
2020 struct bio *bi;
2021 int bitmap_end = 0;
2022
2023 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2024 mdk_rdev_t *rdev;
2025 rcu_read_lock();
2026 rdev = rcu_dereference(conf->disks[i].rdev);
2027 if (rdev && test_bit(In_sync, &rdev->flags))
2028 /* multiple read failures in one stripe */
2029 md_error(conf->mddev, rdev);
2030 rcu_read_unlock();
2031 }
2032 spin_lock_irq(&conf->device_lock);
2033 /* fail all writes first */
2034 bi = sh->dev[i].towrite;
2035 sh->dev[i].towrite = NULL;
2036 if (bi) {
2037 s->to_write--;
2038 bitmap_end = 1;
2039 }
2040
2041 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2042 wake_up(&conf->wait_for_overlap);
2043
2044 while (bi && bi->bi_sector <
2045 sh->dev[i].sector + STRIPE_SECTORS) {
2046 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2047 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2048 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2049 md_write_end(conf->mddev);
2050 bi->bi_next = *return_bi;
2051 *return_bi = bi;
2052 }
2053 bi = nextbi;
2054 }
2055 /* and fail all 'written' */
2056 bi = sh->dev[i].written;
2057 sh->dev[i].written = NULL;
2058 if (bi) bitmap_end = 1;
2059 while (bi && bi->bi_sector <
2060 sh->dev[i].sector + STRIPE_SECTORS) {
2061 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2062 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2063 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2064 md_write_end(conf->mddev);
2065 bi->bi_next = *return_bi;
2066 *return_bi = bi;
2067 }
2068 bi = bi2;
2069 }
2070
b5e98d65
DW
2071 /* fail any reads if this device is non-operational and
2072 * the data has not reached the cache yet.
2073 */
2074 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2075 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2076 test_bit(R5_ReadError, &sh->dev[i].flags))) {
a4456856
DW
2077 bi = sh->dev[i].toread;
2078 sh->dev[i].toread = NULL;
2079 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2080 wake_up(&conf->wait_for_overlap);
2081 if (bi) s->to_read--;
2082 while (bi && bi->bi_sector <
2083 sh->dev[i].sector + STRIPE_SECTORS) {
2084 struct bio *nextbi =
2085 r5_next_bio(bi, sh->dev[i].sector);
2086 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2087 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2088 bi->bi_next = *return_bi;
2089 *return_bi = bi;
2090 }
2091 bi = nextbi;
2092 }
2093 }
2094 spin_unlock_irq(&conf->device_lock);
2095 if (bitmap_end)
2096 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2097 STRIPE_SECTORS, 0, 0);
2098 }
2099
8b3e6cdc
DW
2100 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2101 if (atomic_dec_and_test(&conf->pending_full_writes))
2102 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2103}
2104
1fe797e6
DW
2105/* fetch_block5 - checks the given member device to see if its data needs
2106 * to be read or computed to satisfy a request.
2107 *
2108 * Returns 1 when no more member devices need to be checked, otherwise returns
2109 * 0 to tell the loop in handle_stripe_fill5 to continue
f38e1219 2110 */
1fe797e6
DW
2111static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2112 int disk_idx, int disks)
f38e1219
DW
2113{
2114 struct r5dev *dev = &sh->dev[disk_idx];
2115 struct r5dev *failed_dev = &sh->dev[s->failed_num];
2116
f38e1219
DW
2117 /* is the data in this block needed, and can we get it? */
2118 if (!test_bit(R5_LOCKED, &dev->flags) &&
1fe797e6
DW
2119 !test_bit(R5_UPTODATE, &dev->flags) &&
2120 (dev->toread ||
2121 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2122 s->syncing || s->expanding ||
2123 (s->failed &&
2124 (failed_dev->toread ||
2125 (failed_dev->towrite &&
2126 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
976ea8d4
DW
2127 /* We would like to get this block, possibly by computing it,
2128 * otherwise read it if the backing disk is insync
f38e1219
DW
2129 */
2130 if ((s->uptodate == disks - 1) &&
ecc65c9b 2131 (s->failed && disk_idx == s->failed_num)) {
976ea8d4
DW
2132 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2133 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
f38e1219
DW
2134 set_bit(R5_Wantcompute, &dev->flags);
2135 sh->ops.target = disk_idx;
2136 s->req_compute = 1;
f38e1219
DW
2137 /* Careful: from this point on 'uptodate' is in the eye
2138 * of raid5_run_ops which services 'compute' operations
2139 * before writes. R5_Wantcompute flags a block that will
2140 * be R5_UPTODATE by the time it is needed for a
2141 * subsequent operation.
2142 */
2143 s->uptodate++;
1fe797e6 2144 return 1; /* uptodate + compute == disks */
7a1fc53c 2145 } else if (test_bit(R5_Insync, &dev->flags)) {
f38e1219
DW
2146 set_bit(R5_LOCKED, &dev->flags);
2147 set_bit(R5_Wantread, &dev->flags);
f38e1219
DW
2148 s->locked++;
2149 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2150 s->syncing);
2151 }
2152 }
2153
1fe797e6 2154 return 0;
f38e1219
DW
2155}
2156
1fe797e6
DW
2157/**
2158 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2159 */
2160static void handle_stripe_fill5(struct stripe_head *sh,
a4456856
DW
2161 struct stripe_head_state *s, int disks)
2162{
2163 int i;
f38e1219 2164
f38e1219
DW
2165 /* look for blocks to read/compute, skip this if a compute
2166 * is already in flight, or if the stripe contents are in the
2167 * midst of changing due to a write
2168 */
976ea8d4 2169 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
1fe797e6 2170 !sh->reconstruct_state)
f38e1219 2171 for (i = disks; i--; )
1fe797e6 2172 if (fetch_block5(sh, s, i, disks))
f38e1219 2173 break;
a4456856
DW
2174 set_bit(STRIPE_HANDLE, &sh->state);
2175}
2176
1fe797e6 2177static void handle_stripe_fill6(struct stripe_head *sh,
a4456856
DW
2178 struct stripe_head_state *s, struct r6_state *r6s,
2179 int disks)
2180{
2181 int i;
2182 for (i = disks; i--; ) {
2183 struct r5dev *dev = &sh->dev[i];
2184 if (!test_bit(R5_LOCKED, &dev->flags) &&
2185 !test_bit(R5_UPTODATE, &dev->flags) &&
2186 (dev->toread || (dev->towrite &&
2187 !test_bit(R5_OVERWRITE, &dev->flags)) ||
2188 s->syncing || s->expanding ||
2189 (s->failed >= 1 &&
2190 (sh->dev[r6s->failed_num[0]].toread ||
2191 s->to_write)) ||
2192 (s->failed >= 2 &&
2193 (sh->dev[r6s->failed_num[1]].toread ||
2194 s->to_write)))) {
2195 /* we would like to get this block, possibly
2196 * by computing it, but we might not be able to
2197 */
c337869d
DW
2198 if ((s->uptodate == disks - 1) &&
2199 (s->failed && (i == r6s->failed_num[0] ||
2200 i == r6s->failed_num[1]))) {
45b4233c 2201 pr_debug("Computing stripe %llu block %d\n",
a4456856
DW
2202 (unsigned long long)sh->sector, i);
2203 compute_block_1(sh, i, 0);
2204 s->uptodate++;
2205 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) {
2206 /* Computing 2-failure is *very* expensive; only
2207 * do it if failed >= 2
2208 */
2209 int other;
2210 for (other = disks; other--; ) {
2211 if (other == i)
2212 continue;
2213 if (!test_bit(R5_UPTODATE,
2214 &sh->dev[other].flags))
2215 break;
2216 }
2217 BUG_ON(other < 0);
45b4233c 2218 pr_debug("Computing stripe %llu blocks %d,%d\n",
a4456856
DW
2219 (unsigned long long)sh->sector,
2220 i, other);
2221 compute_block_2(sh, i, other);
2222 s->uptodate += 2;
2223 } else if (test_bit(R5_Insync, &dev->flags)) {
2224 set_bit(R5_LOCKED, &dev->flags);
2225 set_bit(R5_Wantread, &dev->flags);
2226 s->locked++;
45b4233c 2227 pr_debug("Reading block %d (sync=%d)\n",
a4456856
DW
2228 i, s->syncing);
2229 }
2230 }
2231 }
2232 set_bit(STRIPE_HANDLE, &sh->state);
2233}
2234
2235
1fe797e6 2236/* handle_stripe_clean_event
a4456856
DW
2237 * any written block on an uptodate or failed drive can be returned.
2238 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2239 * never LOCKED, so we don't need to test 'failed' directly.
2240 */
1fe797e6 2241static void handle_stripe_clean_event(raid5_conf_t *conf,
a4456856
DW
2242 struct stripe_head *sh, int disks, struct bio **return_bi)
2243{
2244 int i;
2245 struct r5dev *dev;
2246
2247 for (i = disks; i--; )
2248 if (sh->dev[i].written) {
2249 dev = &sh->dev[i];
2250 if (!test_bit(R5_LOCKED, &dev->flags) &&
2251 test_bit(R5_UPTODATE, &dev->flags)) {
2252 /* We can return any write requests */
2253 struct bio *wbi, *wbi2;
2254 int bitmap_end = 0;
45b4233c 2255 pr_debug("Return write for disc %d\n", i);
a4456856
DW
2256 spin_lock_irq(&conf->device_lock);
2257 wbi = dev->written;
2258 dev->written = NULL;
2259 while (wbi && wbi->bi_sector <
2260 dev->sector + STRIPE_SECTORS) {
2261 wbi2 = r5_next_bio(wbi, dev->sector);
960e739d 2262 if (!raid5_dec_bi_phys_segments(wbi)) {
a4456856
DW
2263 md_write_end(conf->mddev);
2264 wbi->bi_next = *return_bi;
2265 *return_bi = wbi;
2266 }
2267 wbi = wbi2;
2268 }
2269 if (dev->towrite == NULL)
2270 bitmap_end = 1;
2271 spin_unlock_irq(&conf->device_lock);
2272 if (bitmap_end)
2273 bitmap_endwrite(conf->mddev->bitmap,
2274 sh->sector,
2275 STRIPE_SECTORS,
2276 !test_bit(STRIPE_DEGRADED, &sh->state),
2277 0);
2278 }
2279 }
8b3e6cdc
DW
2280
2281 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2282 if (atomic_dec_and_test(&conf->pending_full_writes))
2283 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2284}
2285
1fe797e6 2286static void handle_stripe_dirtying5(raid5_conf_t *conf,
a4456856
DW
2287 struct stripe_head *sh, struct stripe_head_state *s, int disks)
2288{
2289 int rmw = 0, rcw = 0, i;
2290 for (i = disks; i--; ) {
2291 /* would I have to read this buffer for read_modify_write */
2292 struct r5dev *dev = &sh->dev[i];
2293 if ((dev->towrite || i == sh->pd_idx) &&
2294 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2295 !(test_bit(R5_UPTODATE, &dev->flags) ||
2296 test_bit(R5_Wantcompute, &dev->flags))) {
a4456856
DW
2297 if (test_bit(R5_Insync, &dev->flags))
2298 rmw++;
2299 else
2300 rmw += 2*disks; /* cannot read it */
2301 }
2302 /* Would I have to read this buffer for reconstruct_write */
2303 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2304 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2305 !(test_bit(R5_UPTODATE, &dev->flags) ||
2306 test_bit(R5_Wantcompute, &dev->flags))) {
2307 if (test_bit(R5_Insync, &dev->flags)) rcw++;
a4456856
DW
2308 else
2309 rcw += 2*disks;
2310 }
2311 }
45b4233c 2312 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
a4456856
DW
2313 (unsigned long long)sh->sector, rmw, rcw);
2314 set_bit(STRIPE_HANDLE, &sh->state);
2315 if (rmw < rcw && rmw > 0)
2316 /* prefer read-modify-write, but need to get some data */
2317 for (i = disks; i--; ) {
2318 struct r5dev *dev = &sh->dev[i];
2319 if ((dev->towrite || i == sh->pd_idx) &&
2320 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2321 !(test_bit(R5_UPTODATE, &dev->flags) ||
2322 test_bit(R5_Wantcompute, &dev->flags)) &&
a4456856
DW
2323 test_bit(R5_Insync, &dev->flags)) {
2324 if (
2325 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2326 pr_debug("Read_old block "
a4456856
DW
2327 "%d for r-m-w\n", i);
2328 set_bit(R5_LOCKED, &dev->flags);
2329 set_bit(R5_Wantread, &dev->flags);
2330 s->locked++;
2331 } else {
2332 set_bit(STRIPE_DELAYED, &sh->state);
2333 set_bit(STRIPE_HANDLE, &sh->state);
2334 }
2335 }
2336 }
2337 if (rcw <= rmw && rcw > 0)
2338 /* want reconstruct write, but need to get some data */
2339 for (i = disks; i--; ) {
2340 struct r5dev *dev = &sh->dev[i];
2341 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2342 i != sh->pd_idx &&
2343 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2344 !(test_bit(R5_UPTODATE, &dev->flags) ||
2345 test_bit(R5_Wantcompute, &dev->flags)) &&
a4456856
DW
2346 test_bit(R5_Insync, &dev->flags)) {
2347 if (
2348 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2349 pr_debug("Read_old block "
a4456856
DW
2350 "%d for Reconstruct\n", i);
2351 set_bit(R5_LOCKED, &dev->flags);
2352 set_bit(R5_Wantread, &dev->flags);
2353 s->locked++;
2354 } else {
2355 set_bit(STRIPE_DELAYED, &sh->state);
2356 set_bit(STRIPE_HANDLE, &sh->state);
2357 }
2358 }
2359 }
2360 /* now if nothing is locked, and if we have enough data,
2361 * we can start a write request
2362 */
f38e1219
DW
2363 /* since handle_stripe can be called at any time we need to handle the
2364 * case where a compute block operation has been submitted and then a
2365 * subsequent call wants to start a write request. raid5_run_ops only
2366 * handles the case where compute block and postxor are requested
2367 * simultaneously. If this is not the case then new writes need to be
2368 * held off until the compute completes.
2369 */
976ea8d4
DW
2370 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2371 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2372 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
1fe797e6 2373 schedule_reconstruction5(sh, s, rcw == 0, 0);
a4456856
DW
2374}
2375
1fe797e6 2376static void handle_stripe_dirtying6(raid5_conf_t *conf,
a4456856
DW
2377 struct stripe_head *sh, struct stripe_head_state *s,
2378 struct r6_state *r6s, int disks)
2379{
2380 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i;
34e04e87 2381 int qd_idx = sh->qd_idx;
a4456856
DW
2382 for (i = disks; i--; ) {
2383 struct r5dev *dev = &sh->dev[i];
2384 /* Would I have to read this buffer for reconstruct_write */
2385 if (!test_bit(R5_OVERWRITE, &dev->flags)
2386 && i != pd_idx && i != qd_idx
2387 && (!test_bit(R5_LOCKED, &dev->flags)
2388 ) &&
2389 !test_bit(R5_UPTODATE, &dev->flags)) {
2390 if (test_bit(R5_Insync, &dev->flags)) rcw++;
2391 else {
45b4233c 2392 pr_debug("raid6: must_compute: "
a4456856
DW
2393 "disk %d flags=%#lx\n", i, dev->flags);
2394 must_compute++;
2395 }
2396 }
2397 }
45b4233c 2398 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
a4456856
DW
2399 (unsigned long long)sh->sector, rcw, must_compute);
2400 set_bit(STRIPE_HANDLE, &sh->state);
2401
2402 if (rcw > 0)
2403 /* want reconstruct write, but need to get some data */
2404 for (i = disks; i--; ) {
2405 struct r5dev *dev = &sh->dev[i];
2406 if (!test_bit(R5_OVERWRITE, &dev->flags)
2407 && !(s->failed == 0 && (i == pd_idx || i == qd_idx))
2408 && !test_bit(R5_LOCKED, &dev->flags) &&
2409 !test_bit(R5_UPTODATE, &dev->flags) &&
2410 test_bit(R5_Insync, &dev->flags)) {
2411 if (
2412 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2413 pr_debug("Read_old stripe %llu "
a4456856
DW
2414 "block %d for Reconstruct\n",
2415 (unsigned long long)sh->sector, i);
2416 set_bit(R5_LOCKED, &dev->flags);
2417 set_bit(R5_Wantread, &dev->flags);
2418 s->locked++;
2419 } else {
45b4233c 2420 pr_debug("Request delayed stripe %llu "
a4456856
DW
2421 "block %d for Reconstruct\n",
2422 (unsigned long long)sh->sector, i);
2423 set_bit(STRIPE_DELAYED, &sh->state);
2424 set_bit(STRIPE_HANDLE, &sh->state);
2425 }
2426 }
2427 }
2428 /* now if nothing is locked, and if we have enough data, we can start a
2429 * write request
2430 */
2431 if (s->locked == 0 && rcw == 0 &&
2432 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
2433 if (must_compute > 0) {
2434 /* We have failed blocks and need to compute them */
2435 switch (s->failed) {
2436 case 0:
2437 BUG();
2438 case 1:
2439 compute_block_1(sh, r6s->failed_num[0], 0);
2440 break;
2441 case 2:
2442 compute_block_2(sh, r6s->failed_num[0],
2443 r6s->failed_num[1]);
2444 break;
2445 default: /* This request should have been failed? */
2446 BUG();
2447 }
2448 }
2449
45b4233c 2450 pr_debug("Computing parity for stripe %llu\n",
a4456856
DW
2451 (unsigned long long)sh->sector);
2452 compute_parity6(sh, RECONSTRUCT_WRITE);
2453 /* now every locked buffer is ready to be written */
2454 for (i = disks; i--; )
2455 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
45b4233c 2456 pr_debug("Writing stripe %llu block %d\n",
a4456856
DW
2457 (unsigned long long)sh->sector, i);
2458 s->locked++;
2459 set_bit(R5_Wantwrite, &sh->dev[i].flags);
2460 }
8b3e6cdc
DW
2461 if (s->locked == disks)
2462 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2463 atomic_inc(&conf->pending_full_writes);
a4456856
DW
2464 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
2465 set_bit(STRIPE_INSYNC, &sh->state);
2466
2467 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2468 atomic_dec(&conf->preread_active_stripes);
2469 if (atomic_read(&conf->preread_active_stripes) <
2470 IO_THRESHOLD)
2471 md_wakeup_thread(conf->mddev->thread);
2472 }
2473 }
2474}
2475
2476static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2477 struct stripe_head_state *s, int disks)
2478{
ecc65c9b 2479 struct r5dev *dev = NULL;
bd2ab670 2480
a4456856 2481 set_bit(STRIPE_HANDLE, &sh->state);
e89f8962 2482
ecc65c9b
DW
2483 switch (sh->check_state) {
2484 case check_state_idle:
2485 /* start a new check operation if there are no failures */
bd2ab670 2486 if (s->failed == 0) {
bd2ab670 2487 BUG_ON(s->uptodate != disks);
ecc65c9b
DW
2488 sh->check_state = check_state_run;
2489 set_bit(STRIPE_OP_CHECK, &s->ops_request);
bd2ab670 2490 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
bd2ab670 2491 s->uptodate--;
ecc65c9b 2492 break;
bd2ab670 2493 }
ecc65c9b
DW
2494 dev = &sh->dev[s->failed_num];
2495 /* fall through */
2496 case check_state_compute_result:
2497 sh->check_state = check_state_idle;
2498 if (!dev)
2499 dev = &sh->dev[sh->pd_idx];
2500
2501 /* check that a write has not made the stripe insync */
2502 if (test_bit(STRIPE_INSYNC, &sh->state))
2503 break;
c8894419 2504
a4456856 2505 /* either failed parity check, or recovery is happening */
a4456856
DW
2506 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2507 BUG_ON(s->uptodate != disks);
2508
2509 set_bit(R5_LOCKED, &dev->flags);
ecc65c9b 2510 s->locked++;
a4456856 2511 set_bit(R5_Wantwrite, &dev->flags);
830ea016 2512
a4456856 2513 clear_bit(STRIPE_DEGRADED, &sh->state);
a4456856 2514 set_bit(STRIPE_INSYNC, &sh->state);
ecc65c9b
DW
2515 break;
2516 case check_state_run:
2517 break; /* we will be called again upon completion */
2518 case check_state_check_result:
2519 sh->check_state = check_state_idle;
2520
2521 /* if a failure occurred during the check operation, leave
2522 * STRIPE_INSYNC not set and let the stripe be handled again
2523 */
2524 if (s->failed)
2525 break;
2526
2527 /* handle a successful check operation, if parity is correct
2528 * we are done. Otherwise update the mismatch count and repair
2529 * parity if !MD_RECOVERY_CHECK
2530 */
2531 if (sh->ops.zero_sum_result == 0)
2532 /* parity is correct (on disc,
2533 * not in buffer any more)
2534 */
2535 set_bit(STRIPE_INSYNC, &sh->state);
2536 else {
2537 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2538 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2539 /* don't try to repair!! */
2540 set_bit(STRIPE_INSYNC, &sh->state);
2541 else {
2542 sh->check_state = check_state_compute_run;
976ea8d4 2543 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
ecc65c9b
DW
2544 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2545 set_bit(R5_Wantcompute,
2546 &sh->dev[sh->pd_idx].flags);
2547 sh->ops.target = sh->pd_idx;
2548 s->uptodate++;
2549 }
2550 }
2551 break;
2552 case check_state_compute_run:
2553 break;
2554 default:
2555 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2556 __func__, sh->check_state,
2557 (unsigned long long) sh->sector);
2558 BUG();
a4456856
DW
2559 }
2560}
2561
2562
2563static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2564 struct stripe_head_state *s,
2565 struct r6_state *r6s, struct page *tmp_page,
2566 int disks)
2567{
2568 int update_p = 0, update_q = 0;
2569 struct r5dev *dev;
2570 int pd_idx = sh->pd_idx;
34e04e87 2571 int qd_idx = sh->qd_idx;
a4456856
DW
2572
2573 set_bit(STRIPE_HANDLE, &sh->state);
2574
2575 BUG_ON(s->failed > 2);
2576 BUG_ON(s->uptodate < disks);
2577 /* Want to check and possibly repair P and Q.
2578 * However there could be one 'failed' device, in which
2579 * case we can only check one of them, possibly using the
2580 * other to generate missing data
2581 */
2582
2583 /* If !tmp_page, we cannot do the calculations,
2584 * but as we have set STRIPE_HANDLE, we will soon be called
2585 * by stripe_handle with a tmp_page - just wait until then.
2586 */
2587 if (tmp_page) {
2588 if (s->failed == r6s->q_failed) {
2589 /* The only possible failed device holds 'Q', so it
2590 * makes sense to check P (If anything else were failed,
2591 * we would have used P to recreate it).
2592 */
2593 compute_block_1(sh, pd_idx, 1);
2594 if (!page_is_zero(sh->dev[pd_idx].page)) {
2595 compute_block_1(sh, pd_idx, 0);
2596 update_p = 1;
2597 }
2598 }
2599 if (!r6s->q_failed && s->failed < 2) {
2600 /* q is not failed, and we didn't use it to generate
2601 * anything, so it makes sense to check it
2602 */
2603 memcpy(page_address(tmp_page),
2604 page_address(sh->dev[qd_idx].page),
2605 STRIPE_SIZE);
2606 compute_parity6(sh, UPDATE_PARITY);
2607 if (memcmp(page_address(tmp_page),
2608 page_address(sh->dev[qd_idx].page),
2609 STRIPE_SIZE) != 0) {
2610 clear_bit(STRIPE_INSYNC, &sh->state);
2611 update_q = 1;
2612 }
2613 }
2614 if (update_p || update_q) {
2615 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2616 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2617 /* don't try to repair!! */
2618 update_p = update_q = 0;
2619 }
2620
2621 /* now write out any block on a failed drive,
2622 * or P or Q if they need it
2623 */
2624
2625 if (s->failed == 2) {
2626 dev = &sh->dev[r6s->failed_num[1]];
2627 s->locked++;
2628 set_bit(R5_LOCKED, &dev->flags);
2629 set_bit(R5_Wantwrite, &dev->flags);
2630 }
2631 if (s->failed >= 1) {
2632 dev = &sh->dev[r6s->failed_num[0]];
2633 s->locked++;
2634 set_bit(R5_LOCKED, &dev->flags);
2635 set_bit(R5_Wantwrite, &dev->flags);
2636 }
2637
2638 if (update_p) {
2639 dev = &sh->dev[pd_idx];
2640 s->locked++;
2641 set_bit(R5_LOCKED, &dev->flags);
2642 set_bit(R5_Wantwrite, &dev->flags);
2643 }
2644 if (update_q) {
2645 dev = &sh->dev[qd_idx];
2646 s->locked++;
2647 set_bit(R5_LOCKED, &dev->flags);
2648 set_bit(R5_Wantwrite, &dev->flags);
2649 }
2650 clear_bit(STRIPE_DEGRADED, &sh->state);
2651
2652 set_bit(STRIPE_INSYNC, &sh->state);
2653 }
2654}
2655
2656static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2657 struct r6_state *r6s)
2658{
2659 int i;
2660
2661 /* We have read all the blocks in this stripe and now we need to
2662 * copy some of them into a target stripe for expand.
2663 */
f0a50d37 2664 struct dma_async_tx_descriptor *tx = NULL;
a4456856
DW
2665 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2666 for (i = 0; i < sh->disks; i++)
34e04e87 2667 if (i != sh->pd_idx && i != sh->qd_idx) {
911d4ee8 2668 int dd_idx, j;
a4456856
DW
2669 struct stripe_head *sh2;
2670
784052ec 2671 sector_t bn = compute_blocknr(sh, i, 1);
911d4ee8
N
2672 sector_t s = raid5_compute_sector(conf, bn, 0,
2673 &dd_idx, NULL);
b5663ba4 2674 sh2 = get_active_stripe(conf, s, 0, 1);
a4456856
DW
2675 if (sh2 == NULL)
2676 /* so far only the early blocks of this stripe
2677 * have been requested. When later blocks
2678 * get requested, we will try again
2679 */
2680 continue;
2681 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2682 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2683 /* must have already done this block */
2684 release_stripe(sh2);
2685 continue;
2686 }
f0a50d37
DW
2687
2688 /* place all the copies on one channel */
2689 tx = async_memcpy(sh2->dev[dd_idx].page,
2690 sh->dev[i].page, 0, 0, STRIPE_SIZE,
2691 ASYNC_TX_DEP_ACK, tx, NULL, NULL);
2692
a4456856
DW
2693 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2694 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2695 for (j = 0; j < conf->raid_disks; j++)
2696 if (j != sh2->pd_idx &&
d0dabf7e 2697 (!r6s || j != sh2->qd_idx) &&
a4456856
DW
2698 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2699 break;
2700 if (j == conf->raid_disks) {
2701 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2702 set_bit(STRIPE_HANDLE, &sh2->state);
2703 }
2704 release_stripe(sh2);
f0a50d37 2705
a4456856 2706 }
a2e08551
N
2707 /* done submitting copies, wait for them to complete */
2708 if (tx) {
2709 async_tx_ack(tx);
2710 dma_wait_for_async_tx(tx);
2711 }
a4456856 2712}
1da177e4 2713
6bfe0b49 2714
1da177e4
LT
2715/*
2716 * handle_stripe - do things to a stripe.
2717 *
2718 * We lock the stripe and then examine the state of various bits
2719 * to see what needs to be done.
2720 * Possible results:
2721 * return some read request which now have data
2722 * return some write requests which are safely on disc
2723 * schedule a read on some buffers
2724 * schedule a write of some buffers
2725 * return confirmation of parity correctness
2726 *
1da177e4
LT
2727 * buffers are taken off read_list or write_list, and bh_cache buffers
2728 * get BH_Lock set before the stripe lock is released.
2729 *
2730 */
a4456856 2731
df10cfbc 2732static bool handle_stripe5(struct stripe_head *sh)
1da177e4
LT
2733{
2734 raid5_conf_t *conf = sh->raid_conf;
a4456856
DW
2735 int disks = sh->disks, i;
2736 struct bio *return_bi = NULL;
2737 struct stripe_head_state s;
1da177e4 2738 struct r5dev *dev;
6bfe0b49 2739 mdk_rdev_t *blocked_rdev = NULL;
e0a115e5 2740 int prexor;
1da177e4 2741
a4456856 2742 memset(&s, 0, sizeof(s));
600aa109
DW
2743 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2744 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
2745 atomic_read(&sh->count), sh->pd_idx, sh->check_state,
2746 sh->reconstruct_state);
1da177e4
LT
2747
2748 spin_lock(&sh->lock);
2749 clear_bit(STRIPE_HANDLE, &sh->state);
2750 clear_bit(STRIPE_DELAYED, &sh->state);
2751
a4456856
DW
2752 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2753 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2754 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
def6ae26 2755
83de75cc 2756 /* Now to look around and see what can be done */
9910f16a 2757 rcu_read_lock();
1da177e4
LT
2758 for (i=disks; i--; ) {
2759 mdk_rdev_t *rdev;
a4456856 2760 struct r5dev *dev = &sh->dev[i];
1da177e4 2761 clear_bit(R5_Insync, &dev->flags);
1da177e4 2762
b5e98d65
DW
2763 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2764 "written %p\n", i, dev->flags, dev->toread, dev->read,
2765 dev->towrite, dev->written);
2766
2767 /* maybe we can request a biofill operation
2768 *
2769 * new wantfill requests are only permitted while
83de75cc 2770 * ops_complete_biofill is guaranteed to be inactive
b5e98d65
DW
2771 */
2772 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
83de75cc 2773 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
b5e98d65 2774 set_bit(R5_Wantfill, &dev->flags);
1da177e4
LT
2775
2776 /* now count some things */
a4456856
DW
2777 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2778 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
f38e1219 2779 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
1da177e4 2780
b5e98d65
DW
2781 if (test_bit(R5_Wantfill, &dev->flags))
2782 s.to_fill++;
2783 else if (dev->toread)
a4456856 2784 s.to_read++;
1da177e4 2785 if (dev->towrite) {
a4456856 2786 s.to_write++;
1da177e4 2787 if (!test_bit(R5_OVERWRITE, &dev->flags))
a4456856 2788 s.non_overwrite++;
1da177e4 2789 }
a4456856
DW
2790 if (dev->written)
2791 s.written++;
9910f16a 2792 rdev = rcu_dereference(conf->disks[i].rdev);
ac4090d2
N
2793 if (blocked_rdev == NULL &&
2794 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
6bfe0b49
DW
2795 blocked_rdev = rdev;
2796 atomic_inc(&rdev->nr_pending);
6bfe0b49 2797 }
b2d444d7 2798 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
14f8d26b 2799 /* The ReadError flag will just be confusing now */
4e5314b5
N
2800 clear_bit(R5_ReadError, &dev->flags);
2801 clear_bit(R5_ReWrite, &dev->flags);
2802 }
b2d444d7 2803 if (!rdev || !test_bit(In_sync, &rdev->flags)
4e5314b5 2804 || test_bit(R5_ReadError, &dev->flags)) {
a4456856
DW
2805 s.failed++;
2806 s.failed_num = i;
1da177e4
LT
2807 } else
2808 set_bit(R5_Insync, &dev->flags);
2809 }
9910f16a 2810 rcu_read_unlock();
b5e98d65 2811
6bfe0b49 2812 if (unlikely(blocked_rdev)) {
ac4090d2
N
2813 if (s.syncing || s.expanding || s.expanded ||
2814 s.to_write || s.written) {
2815 set_bit(STRIPE_HANDLE, &sh->state);
2816 goto unlock;
2817 }
2818 /* There is nothing for the blocked_rdev to block */
2819 rdev_dec_pending(blocked_rdev, conf->mddev);
2820 blocked_rdev = NULL;
6bfe0b49
DW
2821 }
2822
83de75cc
DW
2823 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
2824 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
2825 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
2826 }
b5e98d65 2827
45b4233c 2828 pr_debug("locked=%d uptodate=%d to_read=%d"
1da177e4 2829 " to_write=%d failed=%d failed_num=%d\n",
a4456856
DW
2830 s.locked, s.uptodate, s.to_read, s.to_write,
2831 s.failed, s.failed_num);
1da177e4
LT
2832 /* check if the array has lost two devices and, if so, some requests might
2833 * need to be failed
2834 */
a4456856 2835 if (s.failed > 1 && s.to_read+s.to_write+s.written)
1fe797e6 2836 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
a4456856 2837 if (s.failed > 1 && s.syncing) {
1da177e4
LT
2838 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
2839 clear_bit(STRIPE_SYNCING, &sh->state);
a4456856 2840 s.syncing = 0;
1da177e4
LT
2841 }
2842
2843 /* might be able to return some write requests if the parity block
2844 * is safe, or on a failed drive
2845 */
2846 dev = &sh->dev[sh->pd_idx];
a4456856
DW
2847 if ( s.written &&
2848 ((test_bit(R5_Insync, &dev->flags) &&
2849 !test_bit(R5_LOCKED, &dev->flags) &&
2850 test_bit(R5_UPTODATE, &dev->flags)) ||
2851 (s.failed == 1 && s.failed_num == sh->pd_idx)))
1fe797e6 2852 handle_stripe_clean_event(conf, sh, disks, &return_bi);
1da177e4
LT
2853
2854 /* Now we might consider reading some blocks, either to check/generate
2855 * parity, or to satisfy requests
2856 * or to load a block that is being partially written.
2857 */
a4456856 2858 if (s.to_read || s.non_overwrite ||
976ea8d4 2859 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
1fe797e6 2860 handle_stripe_fill5(sh, &s, disks);
1da177e4 2861
e33129d8
DW
2862 /* Now we check to see if any write operations have recently
2863 * completed
2864 */
e0a115e5 2865 prexor = 0;
d8ee0728 2866 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
e0a115e5 2867 prexor = 1;
d8ee0728
DW
2868 if (sh->reconstruct_state == reconstruct_state_drain_result ||
2869 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
600aa109 2870 sh->reconstruct_state = reconstruct_state_idle;
e33129d8
DW
2871
2872 /* All the 'written' buffers and the parity block are ready to
2873 * be written back to disk
2874 */
2875 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
2876 for (i = disks; i--; ) {
2877 dev = &sh->dev[i];
2878 if (test_bit(R5_LOCKED, &dev->flags) &&
2879 (i == sh->pd_idx || dev->written)) {
2880 pr_debug("Writing block %d\n", i);
2881 set_bit(R5_Wantwrite, &dev->flags);
e0a115e5
DW
2882 if (prexor)
2883 continue;
e33129d8
DW
2884 if (!test_bit(R5_Insync, &dev->flags) ||
2885 (i == sh->pd_idx && s.failed == 0))
2886 set_bit(STRIPE_INSYNC, &sh->state);
2887 }
2888 }
2889 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2890 atomic_dec(&conf->preread_active_stripes);
2891 if (atomic_read(&conf->preread_active_stripes) <
2892 IO_THRESHOLD)
2893 md_wakeup_thread(conf->mddev->thread);
2894 }
2895 }
2896
2897 /* Now to consider new write requests and what else, if anything
2898 * should be read. We do not handle new writes when:
2899 * 1/ A 'write' operation (copy+xor) is already in flight.
2900 * 2/ A 'check' operation is in flight, as it may clobber the parity
2901 * block.
2902 */
600aa109 2903 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
1fe797e6 2904 handle_stripe_dirtying5(conf, sh, &s, disks);
1da177e4
LT
2905
2906 /* maybe we need to check and possibly fix the parity for this stripe
e89f8962
DW
2907 * Any reads will already have been scheduled, so we just see if enough
2908 * data is available. The parity check is held off while parity
2909 * dependent operations are in flight.
1da177e4 2910 */
ecc65c9b
DW
2911 if (sh->check_state ||
2912 (s.syncing && s.locked == 0 &&
976ea8d4 2913 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
ecc65c9b 2914 !test_bit(STRIPE_INSYNC, &sh->state)))
a4456856 2915 handle_parity_checks5(conf, sh, &s, disks);
e89f8962 2916
a4456856 2917 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1da177e4
LT
2918 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
2919 clear_bit(STRIPE_SYNCING, &sh->state);
2920 }
4e5314b5
N
2921
2922 /* If the failed drive is just a ReadError, then we might need to progress
2923 * the repair/check process
2924 */
a4456856
DW
2925 if (s.failed == 1 && !conf->mddev->ro &&
2926 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
2927 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
2928 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
4e5314b5 2929 ) {
a4456856 2930 dev = &sh->dev[s.failed_num];
4e5314b5
N
2931 if (!test_bit(R5_ReWrite, &dev->flags)) {
2932 set_bit(R5_Wantwrite, &dev->flags);
2933 set_bit(R5_ReWrite, &dev->flags);
2934 set_bit(R5_LOCKED, &dev->flags);
a4456856 2935 s.locked++;
4e5314b5
N
2936 } else {
2937 /* let's read it back */
2938 set_bit(R5_Wantread, &dev->flags);
2939 set_bit(R5_LOCKED, &dev->flags);
a4456856 2940 s.locked++;
4e5314b5
N
2941 }
2942 }
2943
600aa109
DW
2944 /* Finish reconstruct operations initiated by the expansion process */
2945 if (sh->reconstruct_state == reconstruct_state_result) {
ab69ae12
N
2946 struct stripe_head *sh2
2947 = get_active_stripe(conf, sh->sector, 1, 1);
2948 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
2949 /* sh cannot be written until sh2 has been read.
2950 * so arrange for sh to be delayed a little
2951 */
2952 set_bit(STRIPE_DELAYED, &sh->state);
2953 set_bit(STRIPE_HANDLE, &sh->state);
2954 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
2955 &sh2->state))
2956 atomic_inc(&conf->preread_active_stripes);
2957 release_stripe(sh2);
2958 goto unlock;
2959 }
2960 if (sh2)
2961 release_stripe(sh2);
2962
600aa109 2963 sh->reconstruct_state = reconstruct_state_idle;
f0a50d37 2964 clear_bit(STRIPE_EXPANDING, &sh->state);
23397883 2965 for (i = conf->raid_disks; i--; ) {
ccfcc3c1 2966 set_bit(R5_Wantwrite, &sh->dev[i].flags);
23397883 2967 set_bit(R5_LOCKED, &sh->dev[i].flags);
efe31143 2968 s.locked++;
23397883 2969 }
f0a50d37
DW
2970 }
2971
2972 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
600aa109 2973 !sh->reconstruct_state) {
f0a50d37
DW
2974 /* Need to write out all blocks after computing parity */
2975 sh->disks = conf->raid_disks;
911d4ee8 2976 stripe_set_idx(sh->sector, conf, 0, sh);
1fe797e6 2977 schedule_reconstruction5(sh, &s, 1, 1);
600aa109 2978 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
ccfcc3c1 2979 clear_bit(STRIPE_EXPAND_READY, &sh->state);
f6705578 2980 atomic_dec(&conf->reshape_stripes);
ccfcc3c1
N
2981 wake_up(&conf->wait_for_overlap);
2982 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
2983 }
2984
0f94e87c 2985 if (s.expanding && s.locked == 0 &&
976ea8d4 2986 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
a4456856 2987 handle_stripe_expansion(conf, sh, NULL);
ccfcc3c1 2988
6bfe0b49 2989 unlock:
1da177e4
LT
2990 spin_unlock(&sh->lock);
2991
6bfe0b49
DW
2992 /* wait for this device to become unblocked */
2993 if (unlikely(blocked_rdev))
2994 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
2995
600aa109
DW
2996 if (s.ops_request)
2997 raid5_run_ops(sh, s.ops_request);
d84e0f10 2998
c4e5ac0a 2999 ops_run_io(sh, &s);
1da177e4 3000
a4456856 3001 return_io(return_bi);
df10cfbc
DW
3002
3003 return blocked_rdev == NULL;
1da177e4
LT
3004}
3005
df10cfbc 3006static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
1da177e4 3007{
bff61975 3008 raid5_conf_t *conf = sh->raid_conf;
f416885e 3009 int disks = sh->disks;
a4456856 3010 struct bio *return_bi = NULL;
34e04e87 3011 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
a4456856
DW
3012 struct stripe_head_state s;
3013 struct r6_state r6s;
16a53ecc 3014 struct r5dev *dev, *pdev, *qdev;
6bfe0b49 3015 mdk_rdev_t *blocked_rdev = NULL;
1da177e4 3016
45b4233c 3017 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
a4456856
DW
3018 "pd_idx=%d, qd_idx=%d\n",
3019 (unsigned long long)sh->sector, sh->state,
34e04e87 3020 atomic_read(&sh->count), pd_idx, qd_idx);
a4456856 3021 memset(&s, 0, sizeof(s));
72626685 3022
16a53ecc
N
3023 spin_lock(&sh->lock);
3024 clear_bit(STRIPE_HANDLE, &sh->state);
3025 clear_bit(STRIPE_DELAYED, &sh->state);
3026
a4456856
DW
3027 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
3028 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3029 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
16a53ecc 3030 /* Now to look around and see what can be done */
1da177e4
LT
3031
3032 rcu_read_lock();
16a53ecc
N
3033 for (i=disks; i--; ) {
3034 mdk_rdev_t *rdev;
3035 dev = &sh->dev[i];
3036 clear_bit(R5_Insync, &dev->flags);
1da177e4 3037
45b4233c 3038 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
16a53ecc
N
3039 i, dev->flags, dev->toread, dev->towrite, dev->written);
3040 /* maybe we can reply to a read */
3041 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
3042 struct bio *rbi, *rbi2;
45b4233c 3043 pr_debug("Return read for disc %d\n", i);
16a53ecc
N
3044 spin_lock_irq(&conf->device_lock);
3045 rbi = dev->toread;
3046 dev->toread = NULL;
3047 if (test_and_clear_bit(R5_Overlap, &dev->flags))
3048 wake_up(&conf->wait_for_overlap);
3049 spin_unlock_irq(&conf->device_lock);
3050 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
3051 copy_data(0, rbi, dev->page, dev->sector);
3052 rbi2 = r5_next_bio(rbi, dev->sector);
3053 spin_lock_irq(&conf->device_lock);
960e739d 3054 if (!raid5_dec_bi_phys_segments(rbi)) {
16a53ecc
N
3055 rbi->bi_next = return_bi;
3056 return_bi = rbi;
3057 }
3058 spin_unlock_irq(&conf->device_lock);
3059 rbi = rbi2;
3060 }
3061 }
1da177e4 3062
16a53ecc 3063 /* now count some things */
a4456856
DW
3064 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
3065 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
1da177e4 3066
16a53ecc 3067
a4456856
DW
3068 if (dev->toread)
3069 s.to_read++;
16a53ecc 3070 if (dev->towrite) {
a4456856 3071 s.to_write++;
16a53ecc 3072 if (!test_bit(R5_OVERWRITE, &dev->flags))
a4456856 3073 s.non_overwrite++;
16a53ecc 3074 }
a4456856
DW
3075 if (dev->written)
3076 s.written++;
16a53ecc 3077 rdev = rcu_dereference(conf->disks[i].rdev);
ac4090d2
N
3078 if (blocked_rdev == NULL &&
3079 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
6bfe0b49
DW
3080 blocked_rdev = rdev;
3081 atomic_inc(&rdev->nr_pending);
6bfe0b49 3082 }
16a53ecc
N
3083 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
3084 /* The ReadError flag will just be confusing now */
3085 clear_bit(R5_ReadError, &dev->flags);
3086 clear_bit(R5_ReWrite, &dev->flags);
1da177e4 3087 }
16a53ecc
N
3088 if (!rdev || !test_bit(In_sync, &rdev->flags)
3089 || test_bit(R5_ReadError, &dev->flags)) {
a4456856
DW
3090 if (s.failed < 2)
3091 r6s.failed_num[s.failed] = i;
3092 s.failed++;
16a53ecc
N
3093 } else
3094 set_bit(R5_Insync, &dev->flags);
1da177e4
LT
3095 }
3096 rcu_read_unlock();
6bfe0b49
DW
3097
3098 if (unlikely(blocked_rdev)) {
ac4090d2
N
3099 if (s.syncing || s.expanding || s.expanded ||
3100 s.to_write || s.written) {
3101 set_bit(STRIPE_HANDLE, &sh->state);
3102 goto unlock;
3103 }
3104 /* There is nothing for the blocked_rdev to block */
3105 rdev_dec_pending(blocked_rdev, conf->mddev);
3106 blocked_rdev = NULL;
6bfe0b49 3107 }
ac4090d2 3108
45b4233c 3109 pr_debug("locked=%d uptodate=%d to_read=%d"
16a53ecc 3110 " to_write=%d failed=%d failed_num=%d,%d\n",
a4456856
DW
3111 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3112 r6s.failed_num[0], r6s.failed_num[1]);
3113 /* check if the array has lost >2 devices and, if so, some requests
3114 * might need to be failed
16a53ecc 3115 */
a4456856 3116 if (s.failed > 2 && s.to_read+s.to_write+s.written)
1fe797e6 3117 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
a4456856 3118 if (s.failed > 2 && s.syncing) {
16a53ecc
N
3119 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3120 clear_bit(STRIPE_SYNCING, &sh->state);
a4456856 3121 s.syncing = 0;
16a53ecc
N
3122 }
3123
3124 /*
3125 * might be able to return some write requests if the parity blocks
3126 * are safe, or on a failed drive
3127 */
3128 pdev = &sh->dev[pd_idx];
a4456856
DW
3129 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
3130 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
34e04e87
N
3131 qdev = &sh->dev[qd_idx];
3132 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
3133 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
a4456856
DW
3134
3135 if ( s.written &&
3136 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
16a53ecc 3137 && !test_bit(R5_LOCKED, &pdev->flags)
a4456856
DW
3138 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3139 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
16a53ecc 3140 && !test_bit(R5_LOCKED, &qdev->flags)
a4456856 3141 && test_bit(R5_UPTODATE, &qdev->flags)))))
1fe797e6 3142 handle_stripe_clean_event(conf, sh, disks, &return_bi);
16a53ecc
N
3143
3144 /* Now we might consider reading some blocks, either to check/generate
3145 * parity, or to satisfy requests
3146 * or to load a block that is being partially written.
3147 */
a4456856
DW
3148 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
3149 (s.syncing && (s.uptodate < disks)) || s.expanding)
1fe797e6 3150 handle_stripe_fill6(sh, &s, &r6s, disks);
16a53ecc
N
3151
3152 /* now to consider writing and what else, if anything should be read */
a4456856 3153 if (s.to_write)
1fe797e6 3154 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
16a53ecc
N
3155
3156 /* maybe we need to check and possibly fix the parity for this stripe
a4456856
DW
3157 * Any reads will already have been scheduled, so we just see if enough
3158 * data is available
16a53ecc 3159 */
a4456856
DW
3160 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state))
3161 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks);
16a53ecc 3162
a4456856 3163 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
16a53ecc
N
3164 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3165 clear_bit(STRIPE_SYNCING, &sh->state);
3166 }
3167
3168 /* If the failed drives are just a ReadError, then we might need
3169 * to progress the repair/check process
3170 */
a4456856
DW
3171 if (s.failed <= 2 && !conf->mddev->ro)
3172 for (i = 0; i < s.failed; i++) {
3173 dev = &sh->dev[r6s.failed_num[i]];
16a53ecc
N
3174 if (test_bit(R5_ReadError, &dev->flags)
3175 && !test_bit(R5_LOCKED, &dev->flags)
3176 && test_bit(R5_UPTODATE, &dev->flags)
3177 ) {
3178 if (!test_bit(R5_ReWrite, &dev->flags)) {
3179 set_bit(R5_Wantwrite, &dev->flags);
3180 set_bit(R5_ReWrite, &dev->flags);
3181 set_bit(R5_LOCKED, &dev->flags);
3182 } else {
3183 /* let's read it back */
3184 set_bit(R5_Wantread, &dev->flags);
3185 set_bit(R5_LOCKED, &dev->flags);
3186 }
3187 }
3188 }
f416885e 3189
a4456856 3190 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
ab69ae12
N
3191 struct stripe_head *sh2
3192 = get_active_stripe(conf, sh->sector, 1, 1);
3193 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3194 /* sh cannot be written until sh2 has been read.
3195 * so arrange for sh to be delayed a little
3196 */
3197 set_bit(STRIPE_DELAYED, &sh->state);
3198 set_bit(STRIPE_HANDLE, &sh->state);
3199 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3200 &sh2->state))
3201 atomic_inc(&conf->preread_active_stripes);
3202 release_stripe(sh2);
3203 goto unlock;
3204 }
3205 if (sh2)
3206 release_stripe(sh2);
3207
f416885e
N
3208 /* Need to write out all blocks after computing P&Q */
3209 sh->disks = conf->raid_disks;
911d4ee8 3210 stripe_set_idx(sh->sector, conf, 0, sh);
f416885e
N
3211 compute_parity6(sh, RECONSTRUCT_WRITE);
3212 for (i = conf->raid_disks ; i-- ; ) {
3213 set_bit(R5_LOCKED, &sh->dev[i].flags);
a4456856 3214 s.locked++;
f416885e
N
3215 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3216 }
3217 clear_bit(STRIPE_EXPANDING, &sh->state);
a4456856 3218 } else if (s.expanded) {
f416885e
N
3219 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3220 atomic_dec(&conf->reshape_stripes);
3221 wake_up(&conf->wait_for_overlap);
3222 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3223 }
3224
0f94e87c 3225 if (s.expanding && s.locked == 0 &&
976ea8d4 3226 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
a4456856 3227 handle_stripe_expansion(conf, sh, &r6s);
f416885e 3228
6bfe0b49 3229 unlock:
16a53ecc
N
3230 spin_unlock(&sh->lock);
3231
6bfe0b49
DW
3232 /* wait for this device to become unblocked */
3233 if (unlikely(blocked_rdev))
3234 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3235
f0e43bcd 3236 ops_run_io(sh, &s);
16a53ecc 3237
f0e43bcd 3238 return_io(return_bi);
df10cfbc
DW
3239
3240 return blocked_rdev == NULL;
16a53ecc
N
3241}
3242
df10cfbc
DW
3243/* returns true if the stripe was handled */
3244static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page)
16a53ecc
N
3245{
3246 if (sh->raid_conf->level == 6)
df10cfbc 3247 return handle_stripe6(sh, tmp_page);
16a53ecc 3248 else
df10cfbc 3249 return handle_stripe5(sh);
16a53ecc
N
3250}
3251
3252
3253
3254static void raid5_activate_delayed(raid5_conf_t *conf)
3255{
3256 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3257 while (!list_empty(&conf->delayed_list)) {
3258 struct list_head *l = conf->delayed_list.next;
3259 struct stripe_head *sh;
3260 sh = list_entry(l, struct stripe_head, lru);
3261 list_del_init(l);
3262 clear_bit(STRIPE_DELAYED, &sh->state);
3263 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3264 atomic_inc(&conf->preread_active_stripes);
8b3e6cdc 3265 list_add_tail(&sh->lru, &conf->hold_list);
16a53ecc 3266 }
6ed3003c
N
3267 } else
3268 blk_plug_device(conf->mddev->queue);
16a53ecc
N
3269}
3270
3271static void activate_bit_delay(raid5_conf_t *conf)
3272{
3273 /* device_lock is held */
3274 struct list_head head;
3275 list_add(&head, &conf->bitmap_list);
3276 list_del_init(&conf->bitmap_list);
3277 while (!list_empty(&head)) {
3278 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3279 list_del_init(&sh->lru);
3280 atomic_inc(&sh->count);
3281 __release_stripe(conf, sh);
3282 }
3283}
3284
3285static void unplug_slaves(mddev_t *mddev)
3286{
3287 raid5_conf_t *conf = mddev_to_conf(mddev);
3288 int i;
3289
3290 rcu_read_lock();
3291 for (i=0; i<mddev->raid_disks; i++) {
3292 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
3293 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
165125e1 3294 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
16a53ecc
N
3295
3296 atomic_inc(&rdev->nr_pending);
3297 rcu_read_unlock();
3298
2ad8b1ef 3299 blk_unplug(r_queue);
16a53ecc
N
3300
3301 rdev_dec_pending(rdev, mddev);
3302 rcu_read_lock();
3303 }
3304 }
3305 rcu_read_unlock();
3306}
3307
165125e1 3308static void raid5_unplug_device(struct request_queue *q)
16a53ecc
N
3309{
3310 mddev_t *mddev = q->queuedata;
3311 raid5_conf_t *conf = mddev_to_conf(mddev);
3312 unsigned long flags;
3313
3314 spin_lock_irqsave(&conf->device_lock, flags);
3315
3316 if (blk_remove_plug(q)) {
3317 conf->seq_flush++;
3318 raid5_activate_delayed(conf);
72626685 3319 }
1da177e4
LT
3320 md_wakeup_thread(mddev->thread);
3321
3322 spin_unlock_irqrestore(&conf->device_lock, flags);
3323
3324 unplug_slaves(mddev);
3325}
3326
f022b2fd
N
3327static int raid5_congested(void *data, int bits)
3328{
3329 mddev_t *mddev = data;
3330 raid5_conf_t *conf = mddev_to_conf(mddev);
3331
3332 /* No difference between reads and writes. Just check
3333 * how busy the stripe_cache is
3334 */
3335 if (conf->inactive_blocked)
3336 return 1;
3337 if (conf->quiesce)
3338 return 1;
3339 if (list_empty_careful(&conf->inactive_list))
3340 return 1;
3341
3342 return 0;
3343}
3344
23032a0e
RBJ
3345/* We want read requests to align with chunks where possible,
3346 * but write requests don't need to.
3347 */
cc371e66
AK
3348static int raid5_mergeable_bvec(struct request_queue *q,
3349 struct bvec_merge_data *bvm,
3350 struct bio_vec *biovec)
23032a0e
RBJ
3351{
3352 mddev_t *mddev = q->queuedata;
cc371e66 3353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
23032a0e
RBJ
3354 int max;
3355 unsigned int chunk_sectors = mddev->chunk_size >> 9;
cc371e66 3356 unsigned int bio_sectors = bvm->bi_size >> 9;
23032a0e 3357
cc371e66 3358 if ((bvm->bi_rw & 1) == WRITE)
23032a0e
RBJ
3359 return biovec->bv_len; /* always allow writes to be mergeable */
3360
784052ec
N
3361 if (mddev->new_chunk < mddev->chunk_size)
3362 chunk_sectors = mddev->new_chunk >> 9;
23032a0e
RBJ
3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3364 if (max < 0) max = 0;
3365 if (max <= biovec->bv_len && bio_sectors == 0)
3366 return biovec->bv_len;
3367 else
3368 return max;
3369}
3370
f679623f
RBJ
3371
3372static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3373{
3374 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3375 unsigned int chunk_sectors = mddev->chunk_size >> 9;
3376 unsigned int bio_sectors = bio->bi_size >> 9;
3377
784052ec
N
3378 if (mddev->new_chunk < mddev->chunk_size)
3379 chunk_sectors = mddev->new_chunk >> 9;
f679623f
RBJ
3380 return chunk_sectors >=
3381 ((sector & (chunk_sectors - 1)) + bio_sectors);
3382}
3383
46031f9a
RBJ
3384/*
3385 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3386 * later sampled by raid5d.
3387 */
3388static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3389{
3390 unsigned long flags;
3391
3392 spin_lock_irqsave(&conf->device_lock, flags);
3393
3394 bi->bi_next = conf->retry_read_aligned_list;
3395 conf->retry_read_aligned_list = bi;
3396
3397 spin_unlock_irqrestore(&conf->device_lock, flags);
3398 md_wakeup_thread(conf->mddev->thread);
3399}
3400
3401
3402static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3403{
3404 struct bio *bi;
3405
3406 bi = conf->retry_read_aligned;
3407 if (bi) {
3408 conf->retry_read_aligned = NULL;
3409 return bi;
3410 }
3411 bi = conf->retry_read_aligned_list;
3412 if(bi) {
387bb173 3413 conf->retry_read_aligned_list = bi->bi_next;
46031f9a 3414 bi->bi_next = NULL;
960e739d
JA
3415 /*
3416 * this sets the active strip count to 1 and the processed
3417 * strip count to zero (upper 8 bits)
3418 */
46031f9a 3419 bi->bi_phys_segments = 1; /* biased count of active stripes */
46031f9a
RBJ
3420 }
3421
3422 return bi;
3423}
3424
3425
f679623f
RBJ
3426/*
3427 * The "raid5_align_endio" should check if the read succeeded and if it
3428 * did, call bio_endio on the original bio (having bio_put the new bio
3429 * first).
3430 * If the read failed..
3431 */
6712ecf8 3432static void raid5_align_endio(struct bio *bi, int error)
f679623f
RBJ
3433{
3434 struct bio* raid_bi = bi->bi_private;
46031f9a
RBJ
3435 mddev_t *mddev;
3436 raid5_conf_t *conf;
3437 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3438 mdk_rdev_t *rdev;
3439
f679623f 3440 bio_put(bi);
46031f9a
RBJ
3441
3442 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
3443 conf = mddev_to_conf(mddev);
3444 rdev = (void*)raid_bi->bi_next;
3445 raid_bi->bi_next = NULL;
3446
3447 rdev_dec_pending(rdev, conf->mddev);
3448
3449 if (!error && uptodate) {
6712ecf8 3450 bio_endio(raid_bi, 0);
46031f9a
RBJ
3451 if (atomic_dec_and_test(&conf->active_aligned_reads))
3452 wake_up(&conf->wait_for_stripe);
6712ecf8 3453 return;
46031f9a
RBJ
3454 }
3455
3456
45b4233c 3457 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
46031f9a
RBJ
3458
3459 add_bio_to_retry(raid_bi, conf);
f679623f
RBJ
3460}
3461
387bb173
NB
3462static int bio_fits_rdev(struct bio *bi)
3463{
165125e1 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
387bb173
NB
3465
3466 if ((bi->bi_size>>9) > q->max_sectors)
3467 return 0;
3468 blk_recount_segments(q, bi);
960e739d 3469 if (bi->bi_phys_segments > q->max_phys_segments)
387bb173
NB
3470 return 0;
3471
3472 if (q->merge_bvec_fn)
3473 /* it's too hard to apply the merge_bvec_fn at this stage,
3474 * just just give up
3475 */
3476 return 0;
3477
3478 return 1;
3479}
3480
3481
165125e1 3482static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
f679623f
RBJ
3483{
3484 mddev_t *mddev = q->queuedata;
3485 raid5_conf_t *conf = mddev_to_conf(mddev);
911d4ee8 3486 unsigned int dd_idx;
f679623f
RBJ
3487 struct bio* align_bi;
3488 mdk_rdev_t *rdev;
3489
3490 if (!in_chunk_boundary(mddev, raid_bio)) {
45b4233c 3491 pr_debug("chunk_aligned_read : non aligned\n");
f679623f
RBJ
3492 return 0;
3493 }
3494 /*
99c0fb5f 3495 * use bio_clone to make a copy of the bio
f679623f
RBJ
3496 */
3497 align_bi = bio_clone(raid_bio, GFP_NOIO);
3498 if (!align_bi)
3499 return 0;
3500 /*
3501 * set bi_end_io to a new function, and set bi_private to the
3502 * original bio.
3503 */
3504 align_bi->bi_end_io = raid5_align_endio;
3505 align_bi->bi_private = raid_bio;
3506 /*
3507 * compute position
3508 */
112bf897
N
3509 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3510 0,
911d4ee8 3511 &dd_idx, NULL);
f679623f
RBJ
3512
3513 rcu_read_lock();
3514 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3515 if (rdev && test_bit(In_sync, &rdev->flags)) {
f679623f
RBJ
3516 atomic_inc(&rdev->nr_pending);
3517 rcu_read_unlock();
46031f9a
RBJ
3518 raid_bio->bi_next = (void*)rdev;
3519 align_bi->bi_bdev = rdev->bdev;
3520 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3521 align_bi->bi_sector += rdev->data_offset;
3522
387bb173
NB
3523 if (!bio_fits_rdev(align_bi)) {
3524 /* too big in some way */
3525 bio_put(align_bi);
3526 rdev_dec_pending(rdev, mddev);
3527 return 0;
3528 }
3529
46031f9a
RBJ
3530 spin_lock_irq(&conf->device_lock);
3531 wait_event_lock_irq(conf->wait_for_stripe,
3532 conf->quiesce == 0,
3533 conf->device_lock, /* nothing */);
3534 atomic_inc(&conf->active_aligned_reads);
3535 spin_unlock_irq(&conf->device_lock);
3536
f679623f
RBJ
3537 generic_make_request(align_bi);
3538 return 1;
3539 } else {
3540 rcu_read_unlock();
46031f9a 3541 bio_put(align_bi);
f679623f
RBJ
3542 return 0;
3543 }
3544}
3545
8b3e6cdc
DW
3546/* __get_priority_stripe - get the next stripe to process
3547 *
3548 * Full stripe writes are allowed to pass preread active stripes up until
3549 * the bypass_threshold is exceeded. In general the bypass_count
3550 * increments when the handle_list is handled before the hold_list; however, it
3551 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3552 * stripe with in flight i/o. The bypass_count will be reset when the
3553 * head of the hold_list has changed, i.e. the head was promoted to the
3554 * handle_list.
3555 */
3556static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3557{
3558 struct stripe_head *sh;
3559
3560 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3561 __func__,
3562 list_empty(&conf->handle_list) ? "empty" : "busy",
3563 list_empty(&conf->hold_list) ? "empty" : "busy",
3564 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3565
3566 if (!list_empty(&conf->handle_list)) {
3567 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3568
3569 if (list_empty(&conf->hold_list))
3570 conf->bypass_count = 0;
3571 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3572 if (conf->hold_list.next == conf->last_hold)
3573 conf->bypass_count++;
3574 else {
3575 conf->last_hold = conf->hold_list.next;
3576 conf->bypass_count -= conf->bypass_threshold;
3577 if (conf->bypass_count < 0)
3578 conf->bypass_count = 0;
3579 }
3580 }
3581 } else if (!list_empty(&conf->hold_list) &&
3582 ((conf->bypass_threshold &&
3583 conf->bypass_count > conf->bypass_threshold) ||
3584 atomic_read(&conf->pending_full_writes) == 0)) {
3585 sh = list_entry(conf->hold_list.next,
3586 typeof(*sh), lru);
3587 conf->bypass_count -= conf->bypass_threshold;
3588 if (conf->bypass_count < 0)
3589 conf->bypass_count = 0;
3590 } else
3591 return NULL;
3592
3593 list_del_init(&sh->lru);
3594 atomic_inc(&sh->count);
3595 BUG_ON(atomic_read(&sh->count) != 1);
3596 return sh;
3597}
f679623f 3598
165125e1 3599static int make_request(struct request_queue *q, struct bio * bi)
1da177e4
LT
3600{
3601 mddev_t *mddev = q->queuedata;
3602 raid5_conf_t *conf = mddev_to_conf(mddev);
911d4ee8 3603 int dd_idx;
1da177e4
LT
3604 sector_t new_sector;
3605 sector_t logical_sector, last_sector;
3606 struct stripe_head *sh;
a362357b 3607 const int rw = bio_data_dir(bi);
c9959059 3608 int cpu, remaining;
1da177e4 3609
e5dcdd80 3610 if (unlikely(bio_barrier(bi))) {
6712ecf8 3611 bio_endio(bi, -EOPNOTSUPP);
e5dcdd80
N
3612 return 0;
3613 }
3614
3d310eb7 3615 md_write_start(mddev, bi);
06d91a5f 3616
074a7aca
TH
3617 cpu = part_stat_lock();
3618 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3619 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3620 bio_sectors(bi));
3621 part_stat_unlock();
1da177e4 3622
802ba064 3623 if (rw == READ &&
52488615
RBJ
3624 mddev->reshape_position == MaxSector &&
3625 chunk_aligned_read(q,bi))
99c0fb5f 3626 return 0;
52488615 3627
1da177e4
LT
3628 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3629 last_sector = bi->bi_sector + (bi->bi_size>>9);
3630 bi->bi_next = NULL;
3631 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
06d91a5f 3632
1da177e4
LT
3633 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3634 DEFINE_WAIT(w);
16a53ecc 3635 int disks, data_disks;
b5663ba4 3636 int previous;
b578d55f 3637
7ecaa1e6 3638 retry:
b5663ba4 3639 previous = 0;
b578d55f 3640 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
fef9c61f 3641 if (likely(conf->reshape_progress == MaxSector))
7ecaa1e6
N
3642 disks = conf->raid_disks;
3643 else {
fef9c61f 3644 /* spinlock is needed as reshape_progress may be
df8e7f76
N
3645 * 64bit on a 32bit platform, and so it might be
3646 * possible to see a half-updated value
fef9c61f 3647 * Ofcourse reshape_progress could change after
df8e7f76
N
3648 * the lock is dropped, so once we get a reference
3649 * to the stripe that we think it is, we will have
3650 * to check again.
3651 */
7ecaa1e6
N
3652 spin_lock_irq(&conf->device_lock);
3653 disks = conf->raid_disks;
fef9c61f
N
3654 if (mddev->delta_disks < 0
3655 ? logical_sector < conf->reshape_progress
3656 : logical_sector >= conf->reshape_progress) {
7ecaa1e6 3657 disks = conf->previous_raid_disks;
b5663ba4
N
3658 previous = 1;
3659 } else {
fef9c61f
N
3660 if (mddev->delta_disks < 0
3661 ? logical_sector < conf->reshape_safe
3662 : logical_sector >= conf->reshape_safe) {
b578d55f
N
3663 spin_unlock_irq(&conf->device_lock);
3664 schedule();
3665 goto retry;
3666 }
3667 }
7ecaa1e6
N
3668 spin_unlock_irq(&conf->device_lock);
3669 }
16a53ecc
N
3670 data_disks = disks - conf->max_degraded;
3671
112bf897
N
3672 new_sector = raid5_compute_sector(conf, logical_sector,
3673 previous,
911d4ee8 3674 &dd_idx, NULL);
45b4233c 3675 pr_debug("raid5: make_request, sector %llu logical %llu\n",
1da177e4
LT
3676 (unsigned long long)new_sector,
3677 (unsigned long long)logical_sector);
3678
b5663ba4
N
3679 sh = get_active_stripe(conf, new_sector, previous,
3680 (bi->bi_rw&RWA_MASK));
1da177e4 3681 if (sh) {
fef9c61f 3682 if (unlikely(conf->reshape_progress != MaxSector)) {
7ecaa1e6 3683 /* expansion might have moved on while waiting for a
df8e7f76
N
3684 * stripe, so we must do the range check again.
3685 * Expansion could still move past after this
3686 * test, but as we are holding a reference to
3687 * 'sh', we know that if that happens,
3688 * STRIPE_EXPANDING will get set and the expansion
3689 * won't proceed until we finish with the stripe.
7ecaa1e6
N
3690 */
3691 int must_retry = 0;
3692 spin_lock_irq(&conf->device_lock);
fef9c61f
N
3693 if ((mddev->delta_disks < 0
3694 ? logical_sector >= conf->reshape_progress
3695 : logical_sector < conf->reshape_progress)
86b42c71 3696 && previous)
7ecaa1e6
N
3697 /* mismatch, need to try again */
3698 must_retry = 1;
3699 spin_unlock_irq(&conf->device_lock);
3700 if (must_retry) {
3701 release_stripe(sh);
3702 goto retry;
3703 }
3704 }
e464eafd
N
3705 /* FIXME what if we get a false positive because these
3706 * are being updated.
3707 */
3708 if (logical_sector >= mddev->suspend_lo &&
3709 logical_sector < mddev->suspend_hi) {
3710 release_stripe(sh);
3711 schedule();
3712 goto retry;
3713 }
7ecaa1e6
N
3714
3715 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3716 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
3717 /* Stripe is busy expanding or
3718 * add failed due to overlap. Flush everything
1da177e4
LT
3719 * and wait a while
3720 */
3721 raid5_unplug_device(mddev->queue);
3722 release_stripe(sh);
3723 schedule();
3724 goto retry;
3725 }
3726 finish_wait(&conf->wait_for_overlap, &w);
6ed3003c
N
3727 set_bit(STRIPE_HANDLE, &sh->state);
3728 clear_bit(STRIPE_DELAYED, &sh->state);
1da177e4 3729 release_stripe(sh);
1da177e4
LT
3730 } else {
3731 /* cannot get stripe for read-ahead, just give-up */
3732 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3733 finish_wait(&conf->wait_for_overlap, &w);
3734 break;
3735 }
3736
3737 }
3738 spin_lock_irq(&conf->device_lock);
960e739d 3739 remaining = raid5_dec_bi_phys_segments(bi);
f6344757
N
3740 spin_unlock_irq(&conf->device_lock);
3741 if (remaining == 0) {
1da177e4 3742
16a53ecc 3743 if ( rw == WRITE )
1da177e4 3744 md_write_end(mddev);
6712ecf8 3745
0e13fe23 3746 bio_endio(bi, 0);
1da177e4 3747 }
1da177e4
LT
3748 return 0;
3749}
3750
b522adcd
DW
3751static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3752
52c03291 3753static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
1da177e4 3754{
52c03291
N
3755 /* reshaping is quite different to recovery/resync so it is
3756 * handled quite separately ... here.
3757 *
3758 * On each call to sync_request, we gather one chunk worth of
3759 * destination stripes and flag them as expanding.
3760 * Then we find all the source stripes and request reads.
3761 * As the reads complete, handle_stripe will copy the data
3762 * into the destination stripe and release that stripe.
3763 */
1da177e4
LT
3764 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3765 struct stripe_head *sh;
ccfcc3c1 3766 sector_t first_sector, last_sector;
f416885e
N
3767 int raid_disks = conf->previous_raid_disks;
3768 int data_disks = raid_disks - conf->max_degraded;
3769 int new_data_disks = conf->raid_disks - conf->max_degraded;
52c03291
N
3770 int i;
3771 int dd_idx;
3772 sector_t writepos, safepos, gap;
ec32a2bd 3773 sector_t stripe_addr;
7a661381 3774 int reshape_sectors;
ab69ae12 3775 struct list_head stripes;
52c03291 3776
fef9c61f
N
3777 if (sector_nr == 0) {
3778 /* If restarting in the middle, skip the initial sectors */
3779 if (mddev->delta_disks < 0 &&
3780 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3781 sector_nr = raid5_size(mddev, 0, 0)
3782 - conf->reshape_progress;
3783 } else if (mddev->delta_disks > 0 &&
3784 conf->reshape_progress > 0)
3785 sector_nr = conf->reshape_progress;
f416885e 3786 sector_div(sector_nr, new_data_disks);
fef9c61f
N
3787 if (sector_nr) {
3788 *skipped = 1;
3789 return sector_nr;
3790 }
52c03291
N
3791 }
3792
7a661381
N
3793 /* We need to process a full chunk at a time.
3794 * If old and new chunk sizes differ, we need to process the
3795 * largest of these
3796 */
3797 if (mddev->new_chunk > mddev->chunk_size)
3798 reshape_sectors = mddev->new_chunk / 512;
3799 else
3800 reshape_sectors = mddev->chunk_size / 512;
3801
52c03291
N
3802 /* we update the metadata when there is more than 3Meg
3803 * in the block range (that is rather arbitrary, should
3804 * probably be time based) or when the data about to be
3805 * copied would over-write the source of the data at
3806 * the front of the range.
fef9c61f
N
3807 * i.e. one new_stripe along from reshape_progress new_maps
3808 * to after where reshape_safe old_maps to
52c03291 3809 */
fef9c61f 3810 writepos = conf->reshape_progress;
f416885e 3811 sector_div(writepos, new_data_disks);
fef9c61f 3812 safepos = conf->reshape_safe;
f416885e 3813 sector_div(safepos, data_disks);
fef9c61f 3814 if (mddev->delta_disks < 0) {
7a661381
N
3815 writepos -= reshape_sectors;
3816 safepos += reshape_sectors;
fef9c61f
N
3817 gap = conf->reshape_safe - conf->reshape_progress;
3818 } else {
7a661381
N
3819 writepos += reshape_sectors;
3820 safepos -= reshape_sectors;
fef9c61f
N
3821 gap = conf->reshape_progress - conf->reshape_safe;
3822 }
52c03291 3823
fef9c61f
N
3824 if ((mddev->delta_disks < 0
3825 ? writepos < safepos
3826 : writepos > safepos) ||
f416885e 3827 gap > (new_data_disks)*3000*2 /*3Meg*/) {
52c03291
N
3828 /* Cannot proceed until we've updated the superblock... */
3829 wait_event(conf->wait_for_overlap,
3830 atomic_read(&conf->reshape_stripes)==0);
fef9c61f 3831 mddev->reshape_position = conf->reshape_progress;
850b2b42 3832 set_bit(MD_CHANGE_DEVS, &mddev->flags);
52c03291 3833 md_wakeup_thread(mddev->thread);
850b2b42 3834 wait_event(mddev->sb_wait, mddev->flags == 0 ||
52c03291
N
3835 kthread_should_stop());
3836 spin_lock_irq(&conf->device_lock);
fef9c61f 3837 conf->reshape_safe = mddev->reshape_position;
52c03291
N
3838 spin_unlock_irq(&conf->device_lock);
3839 wake_up(&conf->wait_for_overlap);
3840 }
3841
ec32a2bd
N
3842 if (mddev->delta_disks < 0) {
3843 BUG_ON(conf->reshape_progress == 0);
3844 stripe_addr = writepos;
3845 BUG_ON((mddev->dev_sectors &
7a661381
N
3846 ~((sector_t)reshape_sectors - 1))
3847 - reshape_sectors - stripe_addr
ec32a2bd
N
3848 != sector_nr);
3849 } else {
7a661381 3850 BUG_ON(writepos != sector_nr + reshape_sectors);
ec32a2bd
N
3851 stripe_addr = sector_nr;
3852 }
ab69ae12 3853 INIT_LIST_HEAD(&stripes);
7a661381 3854 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
52c03291
N
3855 int j;
3856 int skipped = 0;
ec32a2bd 3857 sh = get_active_stripe(conf, stripe_addr+i, 0, 0);
52c03291
N
3858 set_bit(STRIPE_EXPANDING, &sh->state);
3859 atomic_inc(&conf->reshape_stripes);
3860 /* If any of this stripe is beyond the end of the old
3861 * array, then we need to zero those blocks
3862 */
3863 for (j=sh->disks; j--;) {
3864 sector_t s;
3865 if (j == sh->pd_idx)
3866 continue;
f416885e 3867 if (conf->level == 6 &&
d0dabf7e 3868 j == sh->qd_idx)
f416885e 3869 continue;
784052ec 3870 s = compute_blocknr(sh, j, 0);
b522adcd 3871 if (s < raid5_size(mddev, 0, 0)) {
52c03291
N
3872 skipped = 1;
3873 continue;
3874 }
3875 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
3876 set_bit(R5_Expanded, &sh->dev[j].flags);
3877 set_bit(R5_UPTODATE, &sh->dev[j].flags);
3878 }
3879 if (!skipped) {
3880 set_bit(STRIPE_EXPAND_READY, &sh->state);
3881 set_bit(STRIPE_HANDLE, &sh->state);
3882 }
ab69ae12 3883 list_add(&sh->lru, &stripes);
52c03291
N
3884 }
3885 spin_lock_irq(&conf->device_lock);
fef9c61f 3886 if (mddev->delta_disks < 0)
7a661381 3887 conf->reshape_progress -= reshape_sectors * new_data_disks;
fef9c61f 3888 else
7a661381 3889 conf->reshape_progress += reshape_sectors * new_data_disks;
52c03291
N
3890 spin_unlock_irq(&conf->device_lock);
3891 /* Ok, those stripe are ready. We can start scheduling
3892 * reads on the source stripes.
3893 * The source stripes are determined by mapping the first and last
3894 * block on the destination stripes.
3895 */
52c03291 3896 first_sector =
ec32a2bd 3897 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
911d4ee8 3898 1, &dd_idx, NULL);
52c03291 3899 last_sector =
ec32a2bd 3900 raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512)
112bf897 3901 *(new_data_disks) - 1),
911d4ee8 3902 1, &dd_idx, NULL);
58c0fed4
AN
3903 if (last_sector >= mddev->dev_sectors)
3904 last_sector = mddev->dev_sectors - 1;
52c03291 3905 while (first_sector <= last_sector) {
b5663ba4 3906 sh = get_active_stripe(conf, first_sector, 1, 0);
52c03291
N
3907 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3908 set_bit(STRIPE_HANDLE, &sh->state);
3909 release_stripe(sh);
3910 first_sector += STRIPE_SECTORS;
3911 }
ab69ae12
N
3912 /* Now that the sources are clearly marked, we can release
3913 * the destination stripes
3914 */
3915 while (!list_empty(&stripes)) {
3916 sh = list_entry(stripes.next, struct stripe_head, lru);
3917 list_del_init(&sh->lru);
3918 release_stripe(sh);
3919 }
c6207277
N
3920 /* If this takes us to the resync_max point where we have to pause,
3921 * then we need to write out the superblock.
3922 */
7a661381 3923 sector_nr += reshape_sectors;
c6207277
N
3924 if (sector_nr >= mddev->resync_max) {
3925 /* Cannot proceed until we've updated the superblock... */
3926 wait_event(conf->wait_for_overlap,
3927 atomic_read(&conf->reshape_stripes) == 0);
fef9c61f 3928 mddev->reshape_position = conf->reshape_progress;
c6207277
N
3929 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3930 md_wakeup_thread(mddev->thread);
3931 wait_event(mddev->sb_wait,
3932 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
3933 || kthread_should_stop());
3934 spin_lock_irq(&conf->device_lock);
fef9c61f 3935 conf->reshape_safe = mddev->reshape_position;
c6207277
N
3936 spin_unlock_irq(&conf->device_lock);
3937 wake_up(&conf->wait_for_overlap);
3938 }
7a661381 3939 return reshape_sectors;
52c03291
N
3940}
3941
3942/* FIXME go_faster isn't used */
3943static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
3944{
3945 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3946 struct stripe_head *sh;
58c0fed4 3947 sector_t max_sector = mddev->dev_sectors;
72626685 3948 int sync_blocks;
16a53ecc
N
3949 int still_degraded = 0;
3950 int i;
1da177e4 3951
72626685 3952 if (sector_nr >= max_sector) {
1da177e4
LT
3953 /* just being told to finish up .. nothing much to do */
3954 unplug_slaves(mddev);
cea9c228 3955
29269553
N
3956 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3957 end_reshape(conf);
3958 return 0;
3959 }
72626685
N
3960
3961 if (mddev->curr_resync < max_sector) /* aborted */
3962 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3963 &sync_blocks, 1);
16a53ecc 3964 else /* completed sync */
72626685
N
3965 conf->fullsync = 0;
3966 bitmap_close_sync(mddev->bitmap);
3967
1da177e4
LT
3968 return 0;
3969 }
ccfcc3c1 3970
52c03291
N
3971 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3972 return reshape_request(mddev, sector_nr, skipped);
f6705578 3973
c6207277
N
3974 /* No need to check resync_max as we never do more than one
3975 * stripe, and as resync_max will always be on a chunk boundary,
3976 * if the check in md_do_sync didn't fire, there is no chance
3977 * of overstepping resync_max here
3978 */
3979
16a53ecc 3980 /* if there is too many failed drives and we are trying
1da177e4
LT
3981 * to resync, then assert that we are finished, because there is
3982 * nothing we can do.
3983 */
3285edf1 3984 if (mddev->degraded >= conf->max_degraded &&
16a53ecc 3985 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
58c0fed4 3986 sector_t rv = mddev->dev_sectors - sector_nr;
57afd89f 3987 *skipped = 1;
1da177e4
LT
3988 return rv;
3989 }
72626685 3990 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3855ad9f 3991 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
72626685
N
3992 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
3993 /* we can skip this block, and probably more */
3994 sync_blocks /= STRIPE_SECTORS;
3995 *skipped = 1;
3996 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
3997 }
1da177e4 3998
b47490c9
N
3999
4000 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4001
b5663ba4 4002 sh = get_active_stripe(conf, sector_nr, 0, 1);
1da177e4 4003 if (sh == NULL) {
b5663ba4 4004 sh = get_active_stripe(conf, sector_nr, 0, 0);
1da177e4 4005 /* make sure we don't swamp the stripe cache if someone else
16a53ecc 4006 * is trying to get access
1da177e4 4007 */
66c006a5 4008 schedule_timeout_uninterruptible(1);
1da177e4 4009 }
16a53ecc
N
4010 /* Need to check if array will still be degraded after recovery/resync
4011 * We don't need to check the 'failed' flag as when that gets set,
4012 * recovery aborts.
4013 */
4014 for (i=0; i<mddev->raid_disks; i++)
4015 if (conf->disks[i].rdev == NULL)
4016 still_degraded = 1;
4017
4018 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4019
4020 spin_lock(&sh->lock);
1da177e4
LT
4021 set_bit(STRIPE_SYNCING, &sh->state);
4022 clear_bit(STRIPE_INSYNC, &sh->state);
4023 spin_unlock(&sh->lock);
4024
df10cfbc
DW
4025 /* wait for any blocked device to be handled */
4026 while(unlikely(!handle_stripe(sh, NULL)))
4027 ;
1da177e4
LT
4028 release_stripe(sh);
4029
4030 return STRIPE_SECTORS;
4031}
4032
46031f9a
RBJ
4033static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4034{
4035 /* We may not be able to submit a whole bio at once as there
4036 * may not be enough stripe_heads available.
4037 * We cannot pre-allocate enough stripe_heads as we may need
4038 * more than exist in the cache (if we allow ever large chunks).
4039 * So we do one stripe head at a time and record in
4040 * ->bi_hw_segments how many have been done.
4041 *
4042 * We *know* that this entire raid_bio is in one chunk, so
4043 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4044 */
4045 struct stripe_head *sh;
911d4ee8 4046 int dd_idx;
46031f9a
RBJ
4047 sector_t sector, logical_sector, last_sector;
4048 int scnt = 0;
4049 int remaining;
4050 int handled = 0;
4051
4052 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
112bf897 4053 sector = raid5_compute_sector(conf, logical_sector,
911d4ee8 4054 0, &dd_idx, NULL);
46031f9a
RBJ
4055 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4056
4057 for (; logical_sector < last_sector;
387bb173
NB
4058 logical_sector += STRIPE_SECTORS,
4059 sector += STRIPE_SECTORS,
4060 scnt++) {
46031f9a 4061
960e739d 4062 if (scnt < raid5_bi_hw_segments(raid_bio))
46031f9a
RBJ
4063 /* already done this stripe */
4064 continue;
4065
b5663ba4 4066 sh = get_active_stripe(conf, sector, 0, 1);
46031f9a
RBJ
4067
4068 if (!sh) {
4069 /* failed to get a stripe - must wait */
960e739d 4070 raid5_set_bi_hw_segments(raid_bio, scnt);
46031f9a
RBJ
4071 conf->retry_read_aligned = raid_bio;
4072 return handled;
4073 }
4074
4075 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
387bb173
NB
4076 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4077 release_stripe(sh);
960e739d 4078 raid5_set_bi_hw_segments(raid_bio, scnt);
387bb173
NB
4079 conf->retry_read_aligned = raid_bio;
4080 return handled;
4081 }
4082
46031f9a
RBJ
4083 handle_stripe(sh, NULL);
4084 release_stripe(sh);
4085 handled++;
4086 }
4087 spin_lock_irq(&conf->device_lock);
960e739d 4088 remaining = raid5_dec_bi_phys_segments(raid_bio);
46031f9a 4089 spin_unlock_irq(&conf->device_lock);
0e13fe23
NB
4090 if (remaining == 0)
4091 bio_endio(raid_bio, 0);
46031f9a
RBJ
4092 if (atomic_dec_and_test(&conf->active_aligned_reads))
4093 wake_up(&conf->wait_for_stripe);
4094 return handled;
4095}
4096
4097
4098
1da177e4
LT
4099/*
4100 * This is our raid5 kernel thread.
4101 *
4102 * We scan the hash table for stripes which can be handled now.
4103 * During the scan, completed stripes are saved for us by the interrupt
4104 * handler, so that they will not have to wait for our next wakeup.
4105 */
6ed3003c 4106static void raid5d(mddev_t *mddev)
1da177e4
LT
4107{
4108 struct stripe_head *sh;
4109 raid5_conf_t *conf = mddev_to_conf(mddev);
4110 int handled;
4111
45b4233c 4112 pr_debug("+++ raid5d active\n");
1da177e4
LT
4113
4114 md_check_recovery(mddev);
1da177e4
LT
4115
4116 handled = 0;
4117 spin_lock_irq(&conf->device_lock);
4118 while (1) {
46031f9a 4119 struct bio *bio;
1da177e4 4120
ae3c20cc 4121 if (conf->seq_flush != conf->seq_write) {
72626685 4122 int seq = conf->seq_flush;
700e432d 4123 spin_unlock_irq(&conf->device_lock);
72626685 4124 bitmap_unplug(mddev->bitmap);
700e432d 4125 spin_lock_irq(&conf->device_lock);
72626685
N
4126 conf->seq_write = seq;
4127 activate_bit_delay(conf);
4128 }
4129
46031f9a
RBJ
4130 while ((bio = remove_bio_from_retry(conf))) {
4131 int ok;
4132 spin_unlock_irq(&conf->device_lock);
4133 ok = retry_aligned_read(conf, bio);
4134 spin_lock_irq(&conf->device_lock);
4135 if (!ok)
4136 break;
4137 handled++;
4138 }
4139
8b3e6cdc
DW
4140 sh = __get_priority_stripe(conf);
4141
c9f21aaf 4142 if (!sh)
1da177e4 4143 break;
1da177e4
LT
4144 spin_unlock_irq(&conf->device_lock);
4145
4146 handled++;
16a53ecc 4147 handle_stripe(sh, conf->spare_page);
1da177e4
LT
4148 release_stripe(sh);
4149
4150 spin_lock_irq(&conf->device_lock);
4151 }
45b4233c 4152 pr_debug("%d stripes handled\n", handled);
1da177e4
LT
4153
4154 spin_unlock_irq(&conf->device_lock);
4155
c9f21aaf 4156 async_tx_issue_pending_all();
1da177e4
LT
4157 unplug_slaves(mddev);
4158
45b4233c 4159 pr_debug("--- raid5d inactive\n");
1da177e4
LT
4160}
4161
3f294f4f 4162static ssize_t
007583c9 4163raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
3f294f4f 4164{
007583c9 4165 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
4166 if (conf)
4167 return sprintf(page, "%d\n", conf->max_nr_stripes);
4168 else
4169 return 0;
3f294f4f
N
4170}
4171
4172static ssize_t
007583c9 4173raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
3f294f4f 4174{
007583c9 4175 raid5_conf_t *conf = mddev_to_conf(mddev);
4ef197d8 4176 unsigned long new;
b5470dc5
DW
4177 int err;
4178
3f294f4f
N
4179 if (len >= PAGE_SIZE)
4180 return -EINVAL;
96de1e66
N
4181 if (!conf)
4182 return -ENODEV;
3f294f4f 4183
4ef197d8 4184 if (strict_strtoul(page, 10, &new))
3f294f4f
N
4185 return -EINVAL;
4186 if (new <= 16 || new > 32768)
4187 return -EINVAL;
4188 while (new < conf->max_nr_stripes) {
4189 if (drop_one_stripe(conf))
4190 conf->max_nr_stripes--;
4191 else
4192 break;
4193 }
b5470dc5
DW
4194 err = md_allow_write(mddev);
4195 if (err)
4196 return err;
3f294f4f
N
4197 while (new > conf->max_nr_stripes) {
4198 if (grow_one_stripe(conf))
4199 conf->max_nr_stripes++;
4200 else break;
4201 }
4202 return len;
4203}
007583c9 4204
96de1e66
N
4205static struct md_sysfs_entry
4206raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4207 raid5_show_stripe_cache_size,
4208 raid5_store_stripe_cache_size);
3f294f4f 4209
8b3e6cdc
DW
4210static ssize_t
4211raid5_show_preread_threshold(mddev_t *mddev, char *page)
4212{
4213 raid5_conf_t *conf = mddev_to_conf(mddev);
4214 if (conf)
4215 return sprintf(page, "%d\n", conf->bypass_threshold);
4216 else
4217 return 0;
4218}
4219
4220static ssize_t
4221raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4222{
4223 raid5_conf_t *conf = mddev_to_conf(mddev);
4ef197d8 4224 unsigned long new;
8b3e6cdc
DW
4225 if (len >= PAGE_SIZE)
4226 return -EINVAL;
4227 if (!conf)
4228 return -ENODEV;
4229
4ef197d8 4230 if (strict_strtoul(page, 10, &new))
8b3e6cdc 4231 return -EINVAL;
4ef197d8 4232 if (new > conf->max_nr_stripes)
8b3e6cdc
DW
4233 return -EINVAL;
4234 conf->bypass_threshold = new;
4235 return len;
4236}
4237
4238static struct md_sysfs_entry
4239raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4240 S_IRUGO | S_IWUSR,
4241 raid5_show_preread_threshold,
4242 raid5_store_preread_threshold);
4243
3f294f4f 4244static ssize_t
96de1e66 4245stripe_cache_active_show(mddev_t *mddev, char *page)
3f294f4f 4246{
007583c9 4247 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
4248 if (conf)
4249 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4250 else
4251 return 0;
3f294f4f
N
4252}
4253
96de1e66
N
4254static struct md_sysfs_entry
4255raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
3f294f4f 4256
007583c9 4257static struct attribute *raid5_attrs[] = {
3f294f4f
N
4258 &raid5_stripecache_size.attr,
4259 &raid5_stripecache_active.attr,
8b3e6cdc 4260 &raid5_preread_bypass_threshold.attr,
3f294f4f
N
4261 NULL,
4262};
007583c9
N
4263static struct attribute_group raid5_attrs_group = {
4264 .name = NULL,
4265 .attrs = raid5_attrs,
3f294f4f
N
4266};
4267
80c3a6ce
DW
4268static sector_t
4269raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4270{
4271 raid5_conf_t *conf = mddev_to_conf(mddev);
4272
4273 if (!sectors)
4274 sectors = mddev->dev_sectors;
7ec05478
N
4275 if (!raid_disks) {
4276 /* size is defined by the smallest of previous and new size */
4277 if (conf->raid_disks < conf->previous_raid_disks)
4278 raid_disks = conf->raid_disks;
4279 else
4280 raid_disks = conf->previous_raid_disks;
4281 }
80c3a6ce
DW
4282
4283 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
784052ec 4284 sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
80c3a6ce
DW
4285 return sectors * (raid_disks - conf->max_degraded);
4286}
4287
91adb564 4288static raid5_conf_t *setup_conf(mddev_t *mddev)
1da177e4
LT
4289{
4290 raid5_conf_t *conf;
4291 int raid_disk, memory;
4292 mdk_rdev_t *rdev;
4293 struct disk_info *disk;
1da177e4 4294
91adb564
N
4295 if (mddev->new_level != 5
4296 && mddev->new_level != 4
4297 && mddev->new_level != 6) {
16a53ecc 4298 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
91adb564
N
4299 mdname(mddev), mddev->new_level);
4300 return ERR_PTR(-EIO);
1da177e4 4301 }
91adb564
N
4302 if ((mddev->new_level == 5
4303 && !algorithm_valid_raid5(mddev->new_layout)) ||
4304 (mddev->new_level == 6
4305 && !algorithm_valid_raid6(mddev->new_layout))) {
99c0fb5f 4306 printk(KERN_ERR "raid5: %s: layout %d not supported\n",
91adb564
N
4307 mdname(mddev), mddev->new_layout);
4308 return ERR_PTR(-EIO);
99c0fb5f 4309 }
91adb564
N
4310 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4311 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
4312 mdname(mddev), mddev->raid_disks);
4313 return ERR_PTR(-EINVAL);
4bbf3771
N
4314 }
4315
91adb564
N
4316 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) {
4317 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
4318 mddev->new_chunk, mdname(mddev));
4319 return ERR_PTR(-EINVAL);
f6705578
N
4320 }
4321
91adb564
N
4322 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4323 if (conf == NULL)
1da177e4 4324 goto abort;
91adb564
N
4325
4326 conf->raid_disks = mddev->raid_disks;
4327 if (mddev->reshape_position == MaxSector)
4328 conf->previous_raid_disks = mddev->raid_disks;
4329 else
f6705578 4330 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
f6705578
N
4331
4332 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
b55e6bfc
N
4333 GFP_KERNEL);
4334 if (!conf->disks)
4335 goto abort;
9ffae0cf 4336
1da177e4
LT
4337 conf->mddev = mddev;
4338
fccddba0 4339 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1da177e4 4340 goto abort;
1da177e4 4341
91adb564 4342 if (mddev->new_level == 6) {
16a53ecc
N
4343 conf->spare_page = alloc_page(GFP_KERNEL);
4344 if (!conf->spare_page)
4345 goto abort;
4346 }
1da177e4
LT
4347 spin_lock_init(&conf->device_lock);
4348 init_waitqueue_head(&conf->wait_for_stripe);
4349 init_waitqueue_head(&conf->wait_for_overlap);
4350 INIT_LIST_HEAD(&conf->handle_list);
8b3e6cdc 4351 INIT_LIST_HEAD(&conf->hold_list);
1da177e4 4352 INIT_LIST_HEAD(&conf->delayed_list);
72626685 4353 INIT_LIST_HEAD(&conf->bitmap_list);
1da177e4
LT
4354 INIT_LIST_HEAD(&conf->inactive_list);
4355 atomic_set(&conf->active_stripes, 0);
4356 atomic_set(&conf->preread_active_stripes, 0);
46031f9a 4357 atomic_set(&conf->active_aligned_reads, 0);
8b3e6cdc 4358 conf->bypass_threshold = BYPASS_THRESHOLD;
1da177e4 4359
45b4233c 4360 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
1da177e4 4361
159ec1fc 4362 list_for_each_entry(rdev, &mddev->disks, same_set) {
1da177e4 4363 raid_disk = rdev->raid_disk;
f6705578 4364 if (raid_disk >= conf->raid_disks
1da177e4
LT
4365 || raid_disk < 0)
4366 continue;
4367 disk = conf->disks + raid_disk;
4368
4369 disk->rdev = rdev;
4370
b2d444d7 4371 if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
4372 char b[BDEVNAME_SIZE];
4373 printk(KERN_INFO "raid5: device %s operational as raid"
4374 " disk %d\n", bdevname(rdev->bdev,b),
4375 raid_disk);
8c2e870a
NB
4376 } else
4377 /* Cannot rely on bitmap to complete recovery */
4378 conf->fullsync = 1;
1da177e4
LT
4379 }
4380
91adb564
N
4381 conf->chunk_size = mddev->new_chunk;
4382 conf->level = mddev->new_level;
16a53ecc
N
4383 if (conf->level == 6)
4384 conf->max_degraded = 2;
4385 else
4386 conf->max_degraded = 1;
91adb564 4387 conf->algorithm = mddev->new_layout;
1da177e4 4388 conf->max_nr_stripes = NR_STRIPES;
fef9c61f 4389 conf->reshape_progress = mddev->reshape_position;
e183eaed 4390 if (conf->reshape_progress != MaxSector) {
784052ec 4391 conf->prev_chunk = mddev->chunk_size;
e183eaed
N
4392 conf->prev_algo = mddev->layout;
4393 }
1da177e4 4394
91adb564
N
4395 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4396 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4397 if (grow_stripes(conf, conf->max_nr_stripes)) {
4398 printk(KERN_ERR
4399 "raid5: couldn't allocate %dkB for buffers\n", memory);
4400 goto abort;
4401 } else
4402 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
4403 memory, mdname(mddev));
1da177e4 4404
91adb564
N
4405 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5");
4406 if (!conf->thread) {
4407 printk(KERN_ERR
4408 "raid5: couldn't allocate thread for %s\n",
4409 mdname(mddev));
16a53ecc
N
4410 goto abort;
4411 }
91adb564
N
4412
4413 return conf;
4414
4415 abort:
4416 if (conf) {
4417 shrink_stripes(conf);
4418 safe_put_page(conf->spare_page);
4419 kfree(conf->disks);
4420 kfree(conf->stripe_hashtbl);
4421 kfree(conf);
4422 return ERR_PTR(-EIO);
4423 } else
4424 return ERR_PTR(-ENOMEM);
4425}
4426
4427static int run(mddev_t *mddev)
4428{
4429 raid5_conf_t *conf;
4430 int working_disks = 0;
4431 mdk_rdev_t *rdev;
4432
4433 if (mddev->reshape_position != MaxSector) {
4434 /* Check that we can continue the reshape.
4435 * Currently only disks can change, it must
4436 * increase, and we must be past the point where
4437 * a stripe over-writes itself
4438 */
4439 sector_t here_new, here_old;
4440 int old_disks;
18b00334 4441 int max_degraded = (mddev->level == 6 ? 2 : 1);
91adb564 4442
88ce4930 4443 if (mddev->new_level != mddev->level) {
91adb564
N
4444 printk(KERN_ERR "raid5: %s: unsupported reshape "
4445 "required - aborting.\n",
4446 mdname(mddev));
4447 return -EINVAL;
4448 }
91adb564
N
4449 old_disks = mddev->raid_disks - mddev->delta_disks;
4450 /* reshape_position must be on a new-stripe boundary, and one
4451 * further up in new geometry must map after here in old
4452 * geometry.
4453 */
4454 here_new = mddev->reshape_position;
784052ec 4455 if (sector_div(here_new, (mddev->new_chunk>>9)*
91adb564
N
4456 (mddev->raid_disks - max_degraded))) {
4457 printk(KERN_ERR "raid5: reshape_position not "
4458 "on a stripe boundary\n");
4459 return -EINVAL;
4460 }
4461 /* here_new is the stripe we will write to */
4462 here_old = mddev->reshape_position;
4463 sector_div(here_old, (mddev->chunk_size>>9)*
4464 (old_disks-max_degraded));
4465 /* here_old is the first stripe that we might need to read
4466 * from */
4467 if (here_new >= here_old) {
4468 /* Reading from the same stripe as writing to - bad */
4469 printk(KERN_ERR "raid5: reshape_position too early for "
4470 "auto-recovery - aborting.\n");
4471 return -EINVAL;
4472 }
4473 printk(KERN_INFO "raid5: reshape will continue\n");
4474 /* OK, we should be able to continue; */
4475 } else {
4476 BUG_ON(mddev->level != mddev->new_level);
4477 BUG_ON(mddev->layout != mddev->new_layout);
4478 BUG_ON(mddev->chunk_size != mddev->new_chunk);
4479 BUG_ON(mddev->delta_disks != 0);
1da177e4 4480 }
91adb564 4481
245f46c2
N
4482 if (mddev->private == NULL)
4483 conf = setup_conf(mddev);
4484 else
4485 conf = mddev->private;
4486
91adb564
N
4487 if (IS_ERR(conf))
4488 return PTR_ERR(conf);
4489
4490 mddev->thread = conf->thread;
4491 conf->thread = NULL;
4492 mddev->private = conf;
4493
4494 /*
4495 * 0 for a fully functional array, 1 or 2 for a degraded array.
4496 */
4497 list_for_each_entry(rdev, &mddev->disks, same_set)
4498 if (rdev->raid_disk >= 0 &&
4499 test_bit(In_sync, &rdev->flags))
4500 working_disks++;
4501
4502 mddev->degraded = conf->raid_disks - working_disks;
4503
16a53ecc 4504 if (mddev->degraded > conf->max_degraded) {
1da177e4
LT
4505 printk(KERN_ERR "raid5: not enough operational devices for %s"
4506 " (%d/%d failed)\n",
02c2de8c 4507 mdname(mddev), mddev->degraded, conf->raid_disks);
1da177e4
LT
4508 goto abort;
4509 }
4510
91adb564
N
4511 /* device size must be a multiple of chunk size */
4512 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
4513 mddev->resync_max_sectors = mddev->dev_sectors;
4514
16a53ecc 4515 if (mddev->degraded > 0 &&
1da177e4 4516 mddev->recovery_cp != MaxSector) {
6ff8d8ec
N
4517 if (mddev->ok_start_degraded)
4518 printk(KERN_WARNING
4519 "raid5: starting dirty degraded array: %s"
4520 "- data corruption possible.\n",
4521 mdname(mddev));
4522 else {
4523 printk(KERN_ERR
4524 "raid5: cannot start dirty degraded array for %s\n",
4525 mdname(mddev));
4526 goto abort;
4527 }
1da177e4
LT
4528 }
4529
1da177e4
LT
4530 if (mddev->degraded == 0)
4531 printk("raid5: raid level %d set %s active with %d out of %d"
e183eaed
N
4532 " devices, algorithm %d\n", conf->level, mdname(mddev),
4533 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4534 mddev->new_layout);
1da177e4
LT
4535 else
4536 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
4537 " out of %d devices, algorithm %d\n", conf->level,
4538 mdname(mddev), mddev->raid_disks - mddev->degraded,
e183eaed 4539 mddev->raid_disks, mddev->new_layout);
1da177e4
LT
4540
4541 print_raid5_conf(conf);
4542
fef9c61f 4543 if (conf->reshape_progress != MaxSector) {
f6705578 4544 printk("...ok start reshape thread\n");
fef9c61f 4545 conf->reshape_safe = conf->reshape_progress;
f6705578
N
4546 atomic_set(&conf->reshape_stripes, 0);
4547 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4548 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4549 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4550 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4551 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4552 "%s_reshape");
f6705578
N
4553 }
4554
1da177e4 4555 /* read-ahead size must cover two whole stripes, which is
16a53ecc 4556 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
1da177e4
LT
4557 */
4558 {
16a53ecc
N
4559 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4560 int stripe = data_disks *
8932c2e0 4561 (mddev->chunk_size / PAGE_SIZE);
1da177e4
LT
4562 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4563 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4564 }
4565
4566 /* Ok, everything is just fine now */
5e55e2f5
N
4567 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4568 printk(KERN_WARNING
4569 "raid5: failed to create sysfs attributes for %s\n",
4570 mdname(mddev));
7a5febe9 4571
91adb564
N
4572 mddev->queue->queue_lock = &conf->device_lock;
4573
7a5febe9 4574 mddev->queue->unplug_fn = raid5_unplug_device;
f022b2fd 4575 mddev->queue->backing_dev_info.congested_data = mddev;
041ae52e 4576 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
f022b2fd 4577
1f403624 4578 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7a5febe9 4579
23032a0e
RBJ
4580 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4581
1da177e4
LT
4582 return 0;
4583abort:
e0cf8f04 4584 md_unregister_thread(mddev->thread);
91adb564 4585 mddev->thread = NULL;
1da177e4 4586 if (conf) {
91adb564 4587 shrink_stripes(conf);
1da177e4 4588 print_raid5_conf(conf);
16a53ecc 4589 safe_put_page(conf->spare_page);
b55e6bfc 4590 kfree(conf->disks);
fccddba0 4591 kfree(conf->stripe_hashtbl);
1da177e4
LT
4592 kfree(conf);
4593 }
4594 mddev->private = NULL;
4595 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
4596 return -EIO;
4597}
4598
4599
4600
3f294f4f 4601static int stop(mddev_t *mddev)
1da177e4
LT
4602{
4603 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4604
4605 md_unregister_thread(mddev->thread);
4606 mddev->thread = NULL;
4607 shrink_stripes(conf);
fccddba0 4608 kfree(conf->stripe_hashtbl);
041ae52e 4609 mddev->queue->backing_dev_info.congested_fn = NULL;
1da177e4 4610 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
007583c9 4611 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
b55e6bfc 4612 kfree(conf->disks);
96de1e66 4613 kfree(conf);
1da177e4
LT
4614 mddev->private = NULL;
4615 return 0;
4616}
4617
45b4233c 4618#ifdef DEBUG
d710e138 4619static void print_sh(struct seq_file *seq, struct stripe_head *sh)
1da177e4
LT
4620{
4621 int i;
4622
16a53ecc
N
4623 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
4624 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
4625 seq_printf(seq, "sh %llu, count %d.\n",
4626 (unsigned long long)sh->sector, atomic_read(&sh->count));
4627 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
7ecaa1e6 4628 for (i = 0; i < sh->disks; i++) {
16a53ecc
N
4629 seq_printf(seq, "(cache%d: %p %ld) ",
4630 i, sh->dev[i].page, sh->dev[i].flags);
1da177e4 4631 }
16a53ecc 4632 seq_printf(seq, "\n");
1da177e4
LT
4633}
4634
d710e138 4635static void printall(struct seq_file *seq, raid5_conf_t *conf)
1da177e4
LT
4636{
4637 struct stripe_head *sh;
fccddba0 4638 struct hlist_node *hn;
1da177e4
LT
4639 int i;
4640
4641 spin_lock_irq(&conf->device_lock);
4642 for (i = 0; i < NR_HASH; i++) {
fccddba0 4643 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
1da177e4
LT
4644 if (sh->raid_conf != conf)
4645 continue;
16a53ecc 4646 print_sh(seq, sh);
1da177e4
LT
4647 }
4648 }
4649 spin_unlock_irq(&conf->device_lock);
4650}
4651#endif
4652
d710e138 4653static void status(struct seq_file *seq, mddev_t *mddev)
1da177e4
LT
4654{
4655 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4656 int i;
4657
4658 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
02c2de8c 4659 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
1da177e4
LT
4660 for (i = 0; i < conf->raid_disks; i++)
4661 seq_printf (seq, "%s",
4662 conf->disks[i].rdev &&
b2d444d7 4663 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
1da177e4 4664 seq_printf (seq, "]");
45b4233c 4665#ifdef DEBUG
16a53ecc
N
4666 seq_printf (seq, "\n");
4667 printall(seq, conf);
1da177e4
LT
4668#endif
4669}
4670
4671static void print_raid5_conf (raid5_conf_t *conf)
4672{
4673 int i;
4674 struct disk_info *tmp;
4675
4676 printk("RAID5 conf printout:\n");
4677 if (!conf) {
4678 printk("(conf==NULL)\n");
4679 return;
4680 }
02c2de8c
N
4681 printk(" --- rd:%d wd:%d\n", conf->raid_disks,
4682 conf->raid_disks - conf->mddev->degraded);
1da177e4
LT
4683
4684 for (i = 0; i < conf->raid_disks; i++) {
4685 char b[BDEVNAME_SIZE];
4686 tmp = conf->disks + i;
4687 if (tmp->rdev)
4688 printk(" disk %d, o:%d, dev:%s\n",
b2d444d7 4689 i, !test_bit(Faulty, &tmp->rdev->flags),
1da177e4
LT
4690 bdevname(tmp->rdev->bdev,b));
4691 }
4692}
4693
4694static int raid5_spare_active(mddev_t *mddev)
4695{
4696 int i;
4697 raid5_conf_t *conf = mddev->private;
4698 struct disk_info *tmp;
4699
4700 for (i = 0; i < conf->raid_disks; i++) {
4701 tmp = conf->disks + i;
4702 if (tmp->rdev
b2d444d7 4703 && !test_bit(Faulty, &tmp->rdev->flags)
c04be0aa
N
4704 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
4705 unsigned long flags;
4706 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 4707 mddev->degraded--;
c04be0aa 4708 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
4709 }
4710 }
4711 print_raid5_conf(conf);
4712 return 0;
4713}
4714
4715static int raid5_remove_disk(mddev_t *mddev, int number)
4716{
4717 raid5_conf_t *conf = mddev->private;
4718 int err = 0;
4719 mdk_rdev_t *rdev;
4720 struct disk_info *p = conf->disks + number;
4721
4722 print_raid5_conf(conf);
4723 rdev = p->rdev;
4724 if (rdev) {
ec32a2bd
N
4725 if (number >= conf->raid_disks &&
4726 conf->reshape_progress == MaxSector)
4727 clear_bit(In_sync, &rdev->flags);
4728
b2d444d7 4729 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
4730 atomic_read(&rdev->nr_pending)) {
4731 err = -EBUSY;
4732 goto abort;
4733 }
dfc70645
N
4734 /* Only remove non-faulty devices if recovery
4735 * isn't possible.
4736 */
4737 if (!test_bit(Faulty, &rdev->flags) &&
ec32a2bd
N
4738 mddev->degraded <= conf->max_degraded &&
4739 number < conf->raid_disks) {
dfc70645
N
4740 err = -EBUSY;
4741 goto abort;
4742 }
1da177e4 4743 p->rdev = NULL;
fbd568a3 4744 synchronize_rcu();
1da177e4
LT
4745 if (atomic_read(&rdev->nr_pending)) {
4746 /* lost the race, try later */
4747 err = -EBUSY;
4748 p->rdev = rdev;
4749 }
4750 }
4751abort:
4752
4753 print_raid5_conf(conf);
4754 return err;
4755}
4756
4757static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
4758{
4759 raid5_conf_t *conf = mddev->private;
199050ea 4760 int err = -EEXIST;
1da177e4
LT
4761 int disk;
4762 struct disk_info *p;
6c2fce2e
NB
4763 int first = 0;
4764 int last = conf->raid_disks - 1;
1da177e4 4765
16a53ecc 4766 if (mddev->degraded > conf->max_degraded)
1da177e4 4767 /* no point adding a device */
199050ea 4768 return -EINVAL;
1da177e4 4769
6c2fce2e
NB
4770 if (rdev->raid_disk >= 0)
4771 first = last = rdev->raid_disk;
1da177e4
LT
4772
4773 /*
16a53ecc
N
4774 * find the disk ... but prefer rdev->saved_raid_disk
4775 * if possible.
1da177e4 4776 */
16a53ecc 4777 if (rdev->saved_raid_disk >= 0 &&
6c2fce2e 4778 rdev->saved_raid_disk >= first &&
16a53ecc
N
4779 conf->disks[rdev->saved_raid_disk].rdev == NULL)
4780 disk = rdev->saved_raid_disk;
4781 else
6c2fce2e
NB
4782 disk = first;
4783 for ( ; disk <= last ; disk++)
1da177e4 4784 if ((p=conf->disks + disk)->rdev == NULL) {
b2d444d7 4785 clear_bit(In_sync, &rdev->flags);
1da177e4 4786 rdev->raid_disk = disk;
199050ea 4787 err = 0;
72626685
N
4788 if (rdev->saved_raid_disk != disk)
4789 conf->fullsync = 1;
d6065f7b 4790 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
4791 break;
4792 }
4793 print_raid5_conf(conf);
199050ea 4794 return err;
1da177e4
LT
4795}
4796
4797static int raid5_resize(mddev_t *mddev, sector_t sectors)
4798{
4799 /* no resync is happening, and there is enough space
4800 * on all devices, so we can resize.
4801 * We need to make sure resync covers any new space.
4802 * If the array is shrinking we should possibly wait until
4803 * any io in the removed space completes, but it hardly seems
4804 * worth it.
4805 */
4806 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
1f403624
DW
4807 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
4808 mddev->raid_disks));
b522adcd
DW
4809 if (mddev->array_sectors >
4810 raid5_size(mddev, sectors, mddev->raid_disks))
4811 return -EINVAL;
f233ea5c 4812 set_capacity(mddev->gendisk, mddev->array_sectors);
44ce6294 4813 mddev->changed = 1;
58c0fed4
AN
4814 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
4815 mddev->recovery_cp = mddev->dev_sectors;
1da177e4
LT
4816 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4817 }
58c0fed4 4818 mddev->dev_sectors = sectors;
4b5c7ae8 4819 mddev->resync_max_sectors = sectors;
1da177e4
LT
4820 return 0;
4821}
4822
63c70c4f 4823static int raid5_check_reshape(mddev_t *mddev)
29269553
N
4824{
4825 raid5_conf_t *conf = mddev_to_conf(mddev);
29269553 4826
88ce4930
N
4827 if (mddev->delta_disks == 0 &&
4828 mddev->new_layout == mddev->layout &&
4829 mddev->new_chunk == mddev->chunk_size)
4830 return -EINVAL; /* nothing to do */
dba034ee
N
4831 if (mddev->bitmap)
4832 /* Cannot grow a bitmap yet */
4833 return -EBUSY;
ec32a2bd
N
4834 if (mddev->degraded > conf->max_degraded)
4835 return -EINVAL;
4836 if (mddev->delta_disks < 0) {
4837 /* We might be able to shrink, but the devices must
4838 * be made bigger first.
4839 * For raid6, 4 is the minimum size.
4840 * Otherwise 2 is the minimum
4841 */
4842 int min = 2;
4843 if (mddev->level == 6)
4844 min = 4;
4845 if (mddev->raid_disks + mddev->delta_disks < min)
4846 return -EINVAL;
4847 }
29269553
N
4848
4849 /* Can only proceed if there are plenty of stripe_heads.
4850 * We need a minimum of one full stripe,, and for sensible progress
4851 * it is best to have about 4 times that.
4852 * If we require 4 times, then the default 256 4K stripe_heads will
4853 * allow for chunk sizes up to 256K, which is probably OK.
4854 * If the chunk size is greater, user-space should request more
4855 * stripe_heads first.
4856 */
63c70c4f
N
4857 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
4858 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
29269553 4859 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
784052ec
N
4860 (max(mddev->chunk_size, mddev->new_chunk)
4861 / STRIPE_SIZE)*4);
29269553
N
4862 return -ENOSPC;
4863 }
4864
ec32a2bd 4865 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
63c70c4f
N
4866}
4867
4868static int raid5_start_reshape(mddev_t *mddev)
4869{
4870 raid5_conf_t *conf = mddev_to_conf(mddev);
4871 mdk_rdev_t *rdev;
63c70c4f
N
4872 int spares = 0;
4873 int added_devices = 0;
c04be0aa 4874 unsigned long flags;
63c70c4f 4875
f416885e 4876 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
63c70c4f
N
4877 return -EBUSY;
4878
159ec1fc 4879 list_for_each_entry(rdev, &mddev->disks, same_set)
29269553
N
4880 if (rdev->raid_disk < 0 &&
4881 !test_bit(Faulty, &rdev->flags))
4882 spares++;
63c70c4f 4883
f416885e 4884 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
29269553
N
4885 /* Not enough devices even to make a degraded array
4886 * of that size
4887 */
4888 return -EINVAL;
4889
ec32a2bd
N
4890 /* Refuse to reduce size of the array. Any reductions in
4891 * array size must be through explicit setting of array_size
4892 * attribute.
4893 */
4894 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
4895 < mddev->array_sectors) {
4896 printk(KERN_ERR "md: %s: array size must be reduced "
4897 "before number of disks\n", mdname(mddev));
4898 return -EINVAL;
4899 }
4900
f6705578 4901 atomic_set(&conf->reshape_stripes, 0);
29269553
N
4902 spin_lock_irq(&conf->device_lock);
4903 conf->previous_raid_disks = conf->raid_disks;
63c70c4f 4904 conf->raid_disks += mddev->delta_disks;
88ce4930
N
4905 conf->prev_chunk = conf->chunk_size;
4906 conf->chunk_size = mddev->new_chunk;
4907 conf->prev_algo = conf->algorithm;
4908 conf->algorithm = mddev->new_layout;
fef9c61f
N
4909 if (mddev->delta_disks < 0)
4910 conf->reshape_progress = raid5_size(mddev, 0, 0);
4911 else
4912 conf->reshape_progress = 0;
4913 conf->reshape_safe = conf->reshape_progress;
86b42c71 4914 conf->generation++;
29269553
N
4915 spin_unlock_irq(&conf->device_lock);
4916
4917 /* Add some new drives, as many as will fit.
4918 * We know there are enough to make the newly sized array work.
4919 */
159ec1fc 4920 list_for_each_entry(rdev, &mddev->disks, same_set)
29269553
N
4921 if (rdev->raid_disk < 0 &&
4922 !test_bit(Faulty, &rdev->flags)) {
199050ea 4923 if (raid5_add_disk(mddev, rdev) == 0) {
29269553
N
4924 char nm[20];
4925 set_bit(In_sync, &rdev->flags);
29269553 4926 added_devices++;
5fd6c1dc 4927 rdev->recovery_offset = 0;
29269553 4928 sprintf(nm, "rd%d", rdev->raid_disk);
5e55e2f5
N
4929 if (sysfs_create_link(&mddev->kobj,
4930 &rdev->kobj, nm))
4931 printk(KERN_WARNING
4932 "raid5: failed to create "
4933 " link %s for %s\n",
4934 nm, mdname(mddev));
29269553
N
4935 } else
4936 break;
4937 }
4938
ec32a2bd
N
4939 if (mddev->delta_disks > 0) {
4940 spin_lock_irqsave(&conf->device_lock, flags);
4941 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
4942 - added_devices;
4943 spin_unlock_irqrestore(&conf->device_lock, flags);
4944 }
63c70c4f 4945 mddev->raid_disks = conf->raid_disks;
f6705578 4946 mddev->reshape_position = 0;
850b2b42 4947 set_bit(MD_CHANGE_DEVS, &mddev->flags);
f6705578 4948
29269553
N
4949 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4950 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4951 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4952 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4953 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4954 "%s_reshape");
4955 if (!mddev->sync_thread) {
4956 mddev->recovery = 0;
4957 spin_lock_irq(&conf->device_lock);
4958 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
fef9c61f 4959 conf->reshape_progress = MaxSector;
29269553
N
4960 spin_unlock_irq(&conf->device_lock);
4961 return -EAGAIN;
4962 }
4963 md_wakeup_thread(mddev->sync_thread);
4964 md_new_event(mddev);
4965 return 0;
4966}
29269553 4967
ec32a2bd
N
4968/* This is called from the reshape thread and should make any
4969 * changes needed in 'conf'
4970 */
29269553
N
4971static void end_reshape(raid5_conf_t *conf)
4972{
29269553 4973
f6705578 4974 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
f6705578 4975
f6705578 4976 spin_lock_irq(&conf->device_lock);
cea9c228 4977 conf->previous_raid_disks = conf->raid_disks;
fef9c61f 4978 conf->reshape_progress = MaxSector;
f6705578 4979 spin_unlock_irq(&conf->device_lock);
16a53ecc
N
4980
4981 /* read-ahead size must cover two whole stripes, which is
4982 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4983 */
4984 {
cea9c228
N
4985 int data_disks = conf->raid_disks - conf->max_degraded;
4986 int stripe = data_disks * (conf->chunk_size
4987 / PAGE_SIZE);
16a53ecc
N
4988 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4989 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4990 }
29269553 4991 }
29269553
N
4992}
4993
ec32a2bd
N
4994/* This is called from the raid5d thread with mddev_lock held.
4995 * It makes config changes to the device.
4996 */
cea9c228
N
4997static void raid5_finish_reshape(mddev_t *mddev)
4998{
4999 struct block_device *bdev;
88ce4930 5000 raid5_conf_t *conf = mddev_to_conf(mddev);
cea9c228
N
5001
5002 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5003
ec32a2bd
N
5004 if (mddev->delta_disks > 0) {
5005 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5006 set_capacity(mddev->gendisk, mddev->array_sectors);
5007 mddev->changed = 1;
5008
5009 bdev = bdget_disk(mddev->gendisk, 0);
5010 if (bdev) {
5011 mutex_lock(&bdev->bd_inode->i_mutex);
5012 i_size_write(bdev->bd_inode,
5013 (loff_t)mddev->array_sectors << 9);
5014 mutex_unlock(&bdev->bd_inode->i_mutex);
5015 bdput(bdev);
5016 }
5017 } else {
5018 int d;
ec32a2bd
N
5019 mddev->degraded = conf->raid_disks;
5020 for (d = 0; d < conf->raid_disks ; d++)
5021 if (conf->disks[d].rdev &&
5022 test_bit(In_sync,
5023 &conf->disks[d].rdev->flags))
5024 mddev->degraded--;
5025 for (d = conf->raid_disks ;
5026 d < conf->raid_disks - mddev->delta_disks;
5027 d++)
5028 raid5_remove_disk(mddev, d);
cea9c228 5029 }
88ce4930
N
5030 mddev->layout = conf->algorithm;
5031 mddev->chunk_size = conf->chunk_size;
ec32a2bd
N
5032 mddev->reshape_position = MaxSector;
5033 mddev->delta_disks = 0;
cea9c228
N
5034 }
5035}
5036
72626685
N
5037static void raid5_quiesce(mddev_t *mddev, int state)
5038{
5039 raid5_conf_t *conf = mddev_to_conf(mddev);
5040
5041 switch(state) {
e464eafd
N
5042 case 2: /* resume for a suspend */
5043 wake_up(&conf->wait_for_overlap);
5044 break;
5045
72626685
N
5046 case 1: /* stop all writes */
5047 spin_lock_irq(&conf->device_lock);
5048 conf->quiesce = 1;
5049 wait_event_lock_irq(conf->wait_for_stripe,
46031f9a
RBJ
5050 atomic_read(&conf->active_stripes) == 0 &&
5051 atomic_read(&conf->active_aligned_reads) == 0,
72626685
N
5052 conf->device_lock, /* nothing */);
5053 spin_unlock_irq(&conf->device_lock);
5054 break;
5055
5056 case 0: /* re-enable writes */
5057 spin_lock_irq(&conf->device_lock);
5058 conf->quiesce = 0;
5059 wake_up(&conf->wait_for_stripe);
e464eafd 5060 wake_up(&conf->wait_for_overlap);
72626685
N
5061 spin_unlock_irq(&conf->device_lock);
5062 break;
5063 }
72626685 5064}
b15c2e57 5065
d562b0c4
N
5066
5067static void *raid5_takeover_raid1(mddev_t *mddev)
5068{
5069 int chunksect;
5070
5071 if (mddev->raid_disks != 2 ||
5072 mddev->degraded > 1)
5073 return ERR_PTR(-EINVAL);
5074
5075 /* Should check if there are write-behind devices? */
5076
5077 chunksect = 64*2; /* 64K by default */
5078
5079 /* The array must be an exact multiple of chunksize */
5080 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5081 chunksect >>= 1;
5082
5083 if ((chunksect<<9) < STRIPE_SIZE)
5084 /* array size does not allow a suitable chunk size */
5085 return ERR_PTR(-EINVAL);
5086
5087 mddev->new_level = 5;
5088 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5089 mddev->new_chunk = chunksect << 9;
5090
5091 return setup_conf(mddev);
5092}
5093
fc9739c6
N
5094static void *raid5_takeover_raid6(mddev_t *mddev)
5095{
5096 int new_layout;
5097
5098 switch (mddev->layout) {
5099 case ALGORITHM_LEFT_ASYMMETRIC_6:
5100 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5101 break;
5102 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5103 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5104 break;
5105 case ALGORITHM_LEFT_SYMMETRIC_6:
5106 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5107 break;
5108 case ALGORITHM_RIGHT_SYMMETRIC_6:
5109 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5110 break;
5111 case ALGORITHM_PARITY_0_6:
5112 new_layout = ALGORITHM_PARITY_0;
5113 break;
5114 case ALGORITHM_PARITY_N:
5115 new_layout = ALGORITHM_PARITY_N;
5116 break;
5117 default:
5118 return ERR_PTR(-EINVAL);
5119 }
5120 mddev->new_level = 5;
5121 mddev->new_layout = new_layout;
5122 mddev->delta_disks = -1;
5123 mddev->raid_disks -= 1;
5124 return setup_conf(mddev);
5125}
5126
d562b0c4 5127
b3546035
N
5128static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5129{
88ce4930
N
5130 /* For a 2-drive array, the layout and chunk size can be changed
5131 * immediately as not restriping is needed.
5132 * For larger arrays we record the new value - after validation
5133 * to be used by a reshape pass.
b3546035
N
5134 */
5135 raid5_conf_t *conf = mddev_to_conf(mddev);
5136
5137 if (new_layout >= 0 && !algorithm_valid_raid5(new_layout))
5138 return -EINVAL;
5139 if (new_chunk > 0) {
5140 if (new_chunk & (new_chunk-1))
5141 /* not a power of 2 */
5142 return -EINVAL;
5143 if (new_chunk < PAGE_SIZE)
5144 return -EINVAL;
5145 if (mddev->array_sectors & ((new_chunk>>9)-1))
5146 /* not factor of array size */
5147 return -EINVAL;
5148 }
5149
5150 /* They look valid */
5151
88ce4930 5152 if (mddev->raid_disks == 2) {
b3546035 5153
88ce4930
N
5154 if (new_layout >= 0) {
5155 conf->algorithm = new_layout;
5156 mddev->layout = mddev->new_layout = new_layout;
5157 }
5158 if (new_chunk > 0) {
5159 conf->chunk_size = new_chunk;
5160 mddev->chunk_size = mddev->new_chunk = new_chunk;
5161 }
5162 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5163 md_wakeup_thread(mddev->thread);
5164 } else {
5165 if (new_layout >= 0)
5166 mddev->new_layout = new_layout;
5167 if (new_chunk > 0)
5168 mddev->new_chunk = new_chunk;
b3546035 5169 }
88ce4930
N
5170 return 0;
5171}
5172
5173static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5174{
5175 if (new_layout >= 0 && !algorithm_valid_raid6(new_layout))
5176 return -EINVAL;
b3546035 5177 if (new_chunk > 0) {
88ce4930
N
5178 if (new_chunk & (new_chunk-1))
5179 /* not a power of 2 */
5180 return -EINVAL;
5181 if (new_chunk < PAGE_SIZE)
5182 return -EINVAL;
5183 if (mddev->array_sectors & ((new_chunk>>9)-1))
5184 /* not factor of array size */
5185 return -EINVAL;
b3546035 5186 }
88ce4930
N
5187
5188 /* They look valid */
5189
5190 if (new_layout >= 0)
5191 mddev->new_layout = new_layout;
5192 if (new_chunk > 0)
5193 mddev->new_chunk = new_chunk;
5194
b3546035
N
5195 return 0;
5196}
5197
d562b0c4
N
5198static void *raid5_takeover(mddev_t *mddev)
5199{
5200 /* raid5 can take over:
5201 * raid0 - if all devices are the same - make it a raid4 layout
5202 * raid1 - if there are two drives. We need to know the chunk size
5203 * raid4 - trivial - just use a raid4 layout.
5204 * raid6 - Providing it is a *_6 layout
5205 *
5206 * For now, just do raid1
5207 */
5208
5209 if (mddev->level == 1)
5210 return raid5_takeover_raid1(mddev);
e9d4758f
N
5211 if (mddev->level == 4) {
5212 mddev->new_layout = ALGORITHM_PARITY_N;
5213 mddev->new_level = 5;
5214 return setup_conf(mddev);
5215 }
fc9739c6
N
5216 if (mddev->level == 6)
5217 return raid5_takeover_raid6(mddev);
d562b0c4
N
5218
5219 return ERR_PTR(-EINVAL);
5220}
5221
5222
245f46c2
N
5223static struct mdk_personality raid5_personality;
5224
5225static void *raid6_takeover(mddev_t *mddev)
5226{
5227 /* Currently can only take over a raid5. We map the
5228 * personality to an equivalent raid6 personality
5229 * with the Q block at the end.
5230 */
5231 int new_layout;
5232
5233 if (mddev->pers != &raid5_personality)
5234 return ERR_PTR(-EINVAL);
5235 if (mddev->degraded > 1)
5236 return ERR_PTR(-EINVAL);
5237 if (mddev->raid_disks > 253)
5238 return ERR_PTR(-EINVAL);
5239 if (mddev->raid_disks < 3)
5240 return ERR_PTR(-EINVAL);
5241
5242 switch (mddev->layout) {
5243 case ALGORITHM_LEFT_ASYMMETRIC:
5244 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5245 break;
5246 case ALGORITHM_RIGHT_ASYMMETRIC:
5247 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5248 break;
5249 case ALGORITHM_LEFT_SYMMETRIC:
5250 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5251 break;
5252 case ALGORITHM_RIGHT_SYMMETRIC:
5253 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5254 break;
5255 case ALGORITHM_PARITY_0:
5256 new_layout = ALGORITHM_PARITY_0_6;
5257 break;
5258 case ALGORITHM_PARITY_N:
5259 new_layout = ALGORITHM_PARITY_N;
5260 break;
5261 default:
5262 return ERR_PTR(-EINVAL);
5263 }
5264 mddev->new_level = 6;
5265 mddev->new_layout = new_layout;
5266 mddev->delta_disks = 1;
5267 mddev->raid_disks += 1;
5268 return setup_conf(mddev);
5269}
5270
5271
16a53ecc
N
5272static struct mdk_personality raid6_personality =
5273{
5274 .name = "raid6",
5275 .level = 6,
5276 .owner = THIS_MODULE,
5277 .make_request = make_request,
5278 .run = run,
5279 .stop = stop,
5280 .status = status,
5281 .error_handler = error,
5282 .hot_add_disk = raid5_add_disk,
5283 .hot_remove_disk= raid5_remove_disk,
5284 .spare_active = raid5_spare_active,
5285 .sync_request = sync_request,
5286 .resize = raid5_resize,
80c3a6ce 5287 .size = raid5_size,
f416885e
N
5288 .check_reshape = raid5_check_reshape,
5289 .start_reshape = raid5_start_reshape,
cea9c228 5290 .finish_reshape = raid5_finish_reshape,
16a53ecc 5291 .quiesce = raid5_quiesce,
245f46c2 5292 .takeover = raid6_takeover,
88ce4930 5293 .reconfig = raid6_reconfig,
16a53ecc 5294};
2604b703 5295static struct mdk_personality raid5_personality =
1da177e4
LT
5296{
5297 .name = "raid5",
2604b703 5298 .level = 5,
1da177e4
LT
5299 .owner = THIS_MODULE,
5300 .make_request = make_request,
5301 .run = run,
5302 .stop = stop,
5303 .status = status,
5304 .error_handler = error,
5305 .hot_add_disk = raid5_add_disk,
5306 .hot_remove_disk= raid5_remove_disk,
5307 .spare_active = raid5_spare_active,
5308 .sync_request = sync_request,
5309 .resize = raid5_resize,
80c3a6ce 5310 .size = raid5_size,
63c70c4f
N
5311 .check_reshape = raid5_check_reshape,
5312 .start_reshape = raid5_start_reshape,
cea9c228 5313 .finish_reshape = raid5_finish_reshape,
72626685 5314 .quiesce = raid5_quiesce,
d562b0c4 5315 .takeover = raid5_takeover,
b3546035 5316 .reconfig = raid5_reconfig,
1da177e4
LT
5317};
5318
2604b703 5319static struct mdk_personality raid4_personality =
1da177e4 5320{
2604b703
N
5321 .name = "raid4",
5322 .level = 4,
5323 .owner = THIS_MODULE,
5324 .make_request = make_request,
5325 .run = run,
5326 .stop = stop,
5327 .status = status,
5328 .error_handler = error,
5329 .hot_add_disk = raid5_add_disk,
5330 .hot_remove_disk= raid5_remove_disk,
5331 .spare_active = raid5_spare_active,
5332 .sync_request = sync_request,
5333 .resize = raid5_resize,
80c3a6ce 5334 .size = raid5_size,
3d37890b
N
5335 .check_reshape = raid5_check_reshape,
5336 .start_reshape = raid5_start_reshape,
cea9c228 5337 .finish_reshape = raid5_finish_reshape,
2604b703
N
5338 .quiesce = raid5_quiesce,
5339};
5340
5341static int __init raid5_init(void)
5342{
16a53ecc 5343 register_md_personality(&raid6_personality);
2604b703
N
5344 register_md_personality(&raid5_personality);
5345 register_md_personality(&raid4_personality);
5346 return 0;
1da177e4
LT
5347}
5348
2604b703 5349static void raid5_exit(void)
1da177e4 5350{
16a53ecc 5351 unregister_md_personality(&raid6_personality);
2604b703
N
5352 unregister_md_personality(&raid5_personality);
5353 unregister_md_personality(&raid4_personality);
1da177e4
LT
5354}
5355
5356module_init(raid5_init);
5357module_exit(raid5_exit);
5358MODULE_LICENSE("GPL");
5359MODULE_ALIAS("md-personality-4"); /* RAID5 */
d9d166c2
N
5360MODULE_ALIAS("md-raid5");
5361MODULE_ALIAS("md-raid4");
2604b703
N
5362MODULE_ALIAS("md-level-5");
5363MODULE_ALIAS("md-level-4");
16a53ecc
N
5364MODULE_ALIAS("md-personality-8"); /* RAID6 */
5365MODULE_ALIAS("md-raid6");
5366MODULE_ALIAS("md-level-6");
5367
5368/* This used to be two separate modules, they were: */
5369MODULE_ALIAS("raid5");
5370MODULE_ALIAS("raid6");