[PATCH] md: teach raid5 the difference between 'check' and 'repair'.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / raid5.c
CommitLineData
1da177e4
LT
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 *
6 * RAID-5 management functions.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/raid/raid5.h>
23#include <linux/highmem.h>
24#include <linux/bitops.h>
25#include <asm/atomic.h>
26
72626685
N
27#include <linux/raid/bitmap.h>
28
1da177e4
LT
29/*
30 * Stripe cache
31 */
32
33#define NR_STRIPES 256
34#define STRIPE_SIZE PAGE_SIZE
35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1
38#define HASH_PAGES 1
39#define HASH_PAGES_ORDER 0
40#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
41#define HASH_MASK (NR_HASH - 1)
42
43#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
44
45/* bio's attached to a stripe+device for I/O are linked together in bi_sector
46 * order without overlap. There may be several bio's per stripe+device, and
47 * a bio could span several devices.
48 * When walking this list for a particular stripe+device, we must never proceed
49 * beyond a bio that extends past this device, as the next bio might no longer
50 * be valid.
51 * This macro is used to determine the 'next' bio in the list, given the sector
52 * of the current stripe+device
53 */
54#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
55/*
56 * The following can be used to debug the driver
57 */
58#define RAID5_DEBUG 0
59#define RAID5_PARANOIA 1
60#if RAID5_PARANOIA && defined(CONFIG_SMP)
61# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
62#else
63# define CHECK_DEVLOCK()
64#endif
65
66#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
67#if RAID5_DEBUG
68#define inline
69#define __inline__
70#endif
71
72static void print_raid5_conf (raid5_conf_t *conf);
73
74static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
75{
76 if (atomic_dec_and_test(&sh->count)) {
77 if (!list_empty(&sh->lru))
78 BUG();
79 if (atomic_read(&conf->active_stripes)==0)
80 BUG();
81 if (test_bit(STRIPE_HANDLE, &sh->state)) {
82 if (test_bit(STRIPE_DELAYED, &sh->state))
83 list_add_tail(&sh->lru, &conf->delayed_list);
72626685
N
84 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
85 conf->seq_write == sh->bm_seq)
86 list_add_tail(&sh->lru, &conf->bitmap_list);
87 else {
88 clear_bit(STRIPE_BIT_DELAY, &sh->state);
1da177e4 89 list_add_tail(&sh->lru, &conf->handle_list);
72626685 90 }
1da177e4
LT
91 md_wakeup_thread(conf->mddev->thread);
92 } else {
93 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
94 atomic_dec(&conf->preread_active_stripes);
95 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
96 md_wakeup_thread(conf->mddev->thread);
97 }
98 list_add_tail(&sh->lru, &conf->inactive_list);
99 atomic_dec(&conf->active_stripes);
100 if (!conf->inactive_blocked ||
101 atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4))
102 wake_up(&conf->wait_for_stripe);
103 }
104 }
105}
106static void release_stripe(struct stripe_head *sh)
107{
108 raid5_conf_t *conf = sh->raid_conf;
109 unsigned long flags;
110
111 spin_lock_irqsave(&conf->device_lock, flags);
112 __release_stripe(conf, sh);
113 spin_unlock_irqrestore(&conf->device_lock, flags);
114}
115
116static void remove_hash(struct stripe_head *sh)
117{
118 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
119
120 if (sh->hash_pprev) {
121 if (sh->hash_next)
122 sh->hash_next->hash_pprev = sh->hash_pprev;
123 *sh->hash_pprev = sh->hash_next;
124 sh->hash_pprev = NULL;
125 }
126}
127
128static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
129{
130 struct stripe_head **shp = &stripe_hash(conf, sh->sector);
131
132 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
133
134 CHECK_DEVLOCK();
135 if ((sh->hash_next = *shp) != NULL)
136 (*shp)->hash_pprev = &sh->hash_next;
137 *shp = sh;
138 sh->hash_pprev = shp;
139}
140
141
142/* find an idle stripe, make sure it is unhashed, and return it. */
143static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
144{
145 struct stripe_head *sh = NULL;
146 struct list_head *first;
147
148 CHECK_DEVLOCK();
149 if (list_empty(&conf->inactive_list))
150 goto out;
151 first = conf->inactive_list.next;
152 sh = list_entry(first, struct stripe_head, lru);
153 list_del_init(first);
154 remove_hash(sh);
155 atomic_inc(&conf->active_stripes);
156out:
157 return sh;
158}
159
160static void shrink_buffers(struct stripe_head *sh, int num)
161{
162 struct page *p;
163 int i;
164
165 for (i=0; i<num ; i++) {
166 p = sh->dev[i].page;
167 if (!p)
168 continue;
169 sh->dev[i].page = NULL;
170 page_cache_release(p);
171 }
172}
173
174static int grow_buffers(struct stripe_head *sh, int num)
175{
176 int i;
177
178 for (i=0; i<num; i++) {
179 struct page *page;
180
181 if (!(page = alloc_page(GFP_KERNEL))) {
182 return 1;
183 }
184 sh->dev[i].page = page;
185 }
186 return 0;
187}
188
189static void raid5_build_block (struct stripe_head *sh, int i);
190
191static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
192{
193 raid5_conf_t *conf = sh->raid_conf;
194 int disks = conf->raid_disks, i;
195
196 if (atomic_read(&sh->count) != 0)
197 BUG();
198 if (test_bit(STRIPE_HANDLE, &sh->state))
199 BUG();
200
201 CHECK_DEVLOCK();
202 PRINTK("init_stripe called, stripe %llu\n",
203 (unsigned long long)sh->sector);
204
205 remove_hash(sh);
206
207 sh->sector = sector;
208 sh->pd_idx = pd_idx;
209 sh->state = 0;
210
211 for (i=disks; i--; ) {
212 struct r5dev *dev = &sh->dev[i];
213
214 if (dev->toread || dev->towrite || dev->written ||
215 test_bit(R5_LOCKED, &dev->flags)) {
216 printk("sector=%llx i=%d %p %p %p %d\n",
217 (unsigned long long)sh->sector, i, dev->toread,
218 dev->towrite, dev->written,
219 test_bit(R5_LOCKED, &dev->flags));
220 BUG();
221 }
222 dev->flags = 0;
223 raid5_build_block(sh, i);
224 }
225 insert_hash(conf, sh);
226}
227
228static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
229{
230 struct stripe_head *sh;
231
232 CHECK_DEVLOCK();
233 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
234 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
235 if (sh->sector == sector)
236 return sh;
237 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
238 return NULL;
239}
240
241static void unplug_slaves(mddev_t *mddev);
242static void raid5_unplug_device(request_queue_t *q);
243
244static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
245 int pd_idx, int noblock)
246{
247 struct stripe_head *sh;
248
249 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
250
251 spin_lock_irq(&conf->device_lock);
252
253 do {
72626685
N
254 wait_event_lock_irq(conf->wait_for_stripe,
255 conf->quiesce == 0,
256 conf->device_lock, /* nothing */);
1da177e4
LT
257 sh = __find_stripe(conf, sector);
258 if (!sh) {
259 if (!conf->inactive_blocked)
260 sh = get_free_stripe(conf);
261 if (noblock && sh == NULL)
262 break;
263 if (!sh) {
264 conf->inactive_blocked = 1;
265 wait_event_lock_irq(conf->wait_for_stripe,
266 !list_empty(&conf->inactive_list) &&
267 (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4)
268 || !conf->inactive_blocked),
269 conf->device_lock,
270 unplug_slaves(conf->mddev);
271 );
272 conf->inactive_blocked = 0;
273 } else
274 init_stripe(sh, sector, pd_idx);
275 } else {
276 if (atomic_read(&sh->count)) {
277 if (!list_empty(&sh->lru))
278 BUG();
279 } else {
280 if (!test_bit(STRIPE_HANDLE, &sh->state))
281 atomic_inc(&conf->active_stripes);
282 if (list_empty(&sh->lru))
283 BUG();
284 list_del_init(&sh->lru);
285 }
286 }
287 } while (sh == NULL);
288
289 if (sh)
290 atomic_inc(&sh->count);
291
292 spin_unlock_irq(&conf->device_lock);
293 return sh;
294}
295
3f294f4f 296static int grow_one_stripe(raid5_conf_t *conf)
1da177e4
LT
297{
298 struct stripe_head *sh;
3f294f4f
N
299 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
300 if (!sh)
301 return 0;
302 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
303 sh->raid_conf = conf;
304 spin_lock_init(&sh->lock);
305
306 if (grow_buffers(sh, conf->raid_disks)) {
307 shrink_buffers(sh, conf->raid_disks);
308 kmem_cache_free(conf->slab_cache, sh);
309 return 0;
310 }
311 /* we just created an active stripe so... */
312 atomic_set(&sh->count, 1);
313 atomic_inc(&conf->active_stripes);
314 INIT_LIST_HEAD(&sh->lru);
315 release_stripe(sh);
316 return 1;
317}
318
319static int grow_stripes(raid5_conf_t *conf, int num)
320{
1da177e4
LT
321 kmem_cache_t *sc;
322 int devs = conf->raid_disks;
323
324 sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev));
325
326 sc = kmem_cache_create(conf->cache_name,
327 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
328 0, 0, NULL, NULL);
329 if (!sc)
330 return 1;
331 conf->slab_cache = sc;
332 while (num--) {
3f294f4f 333 if (!grow_one_stripe(conf))
1da177e4 334 return 1;
1da177e4
LT
335 }
336 return 0;
337}
338
3f294f4f 339static int drop_one_stripe(raid5_conf_t *conf)
1da177e4
LT
340{
341 struct stripe_head *sh;
342
3f294f4f
N
343 spin_lock_irq(&conf->device_lock);
344 sh = get_free_stripe(conf);
345 spin_unlock_irq(&conf->device_lock);
346 if (!sh)
347 return 0;
348 if (atomic_read(&sh->count))
349 BUG();
350 shrink_buffers(sh, conf->raid_disks);
351 kmem_cache_free(conf->slab_cache, sh);
352 atomic_dec(&conf->active_stripes);
353 return 1;
354}
355
356static void shrink_stripes(raid5_conf_t *conf)
357{
358 while (drop_one_stripe(conf))
359 ;
360
1da177e4
LT
361 kmem_cache_destroy(conf->slab_cache);
362 conf->slab_cache = NULL;
363}
364
4e5314b5 365static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1da177e4
LT
366 int error)
367{
368 struct stripe_head *sh = bi->bi_private;
369 raid5_conf_t *conf = sh->raid_conf;
370 int disks = conf->raid_disks, i;
371 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
372
373 if (bi->bi_size)
374 return 1;
375
376 for (i=0 ; i<disks; i++)
377 if (bi == &sh->dev[i].req)
378 break;
379
380 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
381 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
382 uptodate);
383 if (i == disks) {
384 BUG();
385 return 0;
386 }
387
388 if (uptodate) {
389#if 0
390 struct bio *bio;
391 unsigned long flags;
392 spin_lock_irqsave(&conf->device_lock, flags);
393 /* we can return a buffer if we bypassed the cache or
394 * if the top buffer is not in highmem. If there are
395 * multiple buffers, leave the extra work to
396 * handle_stripe
397 */
398 buffer = sh->bh_read[i];
399 if (buffer &&
400 (!PageHighMem(buffer->b_page)
401 || buffer->b_page == bh->b_page )
402 ) {
403 sh->bh_read[i] = buffer->b_reqnext;
404 buffer->b_reqnext = NULL;
405 } else
406 buffer = NULL;
407 spin_unlock_irqrestore(&conf->device_lock, flags);
408 if (sh->bh_page[i]==bh->b_page)
409 set_buffer_uptodate(bh);
410 if (buffer) {
411 if (buffer->b_page != bh->b_page)
412 memcpy(buffer->b_data, bh->b_data, bh->b_size);
413 buffer->b_end_io(buffer, 1);
414 }
415#else
416 set_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5
N
417#endif
418 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
419 printk("R5: read error corrected!!\n");
420 clear_bit(R5_ReadError, &sh->dev[i].flags);
421 clear_bit(R5_ReWrite, &sh->dev[i].flags);
422 }
1da177e4 423 } else {
1da177e4 424 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5
N
425 if (conf->mddev->degraded) {
426 printk("R5: read error not correctable.\n");
427 clear_bit(R5_ReadError, &sh->dev[i].flags);
428 clear_bit(R5_ReWrite, &sh->dev[i].flags);
429 md_error(conf->mddev, conf->disks[i].rdev);
430 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
431 /* Oh, no!!! */
432 printk("R5: read error NOT corrected!!\n");
433 clear_bit(R5_ReadError, &sh->dev[i].flags);
434 clear_bit(R5_ReWrite, &sh->dev[i].flags);
435 md_error(conf->mddev, conf->disks[i].rdev);
436 } else
437 set_bit(R5_ReadError, &sh->dev[i].flags);
1da177e4
LT
438 }
439 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
440#if 0
441 /* must restore b_page before unlocking buffer... */
442 if (sh->bh_page[i] != bh->b_page) {
443 bh->b_page = sh->bh_page[i];
444 bh->b_data = page_address(bh->b_page);
445 clear_buffer_uptodate(bh);
446 }
447#endif
448 clear_bit(R5_LOCKED, &sh->dev[i].flags);
449 set_bit(STRIPE_HANDLE, &sh->state);
450 release_stripe(sh);
451 return 0;
452}
453
454static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
455 int error)
456{
457 struct stripe_head *sh = bi->bi_private;
458 raid5_conf_t *conf = sh->raid_conf;
459 int disks = conf->raid_disks, i;
460 unsigned long flags;
461 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
462
463 if (bi->bi_size)
464 return 1;
465
466 for (i=0 ; i<disks; i++)
467 if (bi == &sh->dev[i].req)
468 break;
469
470 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
471 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
472 uptodate);
473 if (i == disks) {
474 BUG();
475 return 0;
476 }
477
478 spin_lock_irqsave(&conf->device_lock, flags);
479 if (!uptodate)
480 md_error(conf->mddev, conf->disks[i].rdev);
481
482 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
483
484 clear_bit(R5_LOCKED, &sh->dev[i].flags);
485 set_bit(STRIPE_HANDLE, &sh->state);
486 __release_stripe(conf, sh);
487 spin_unlock_irqrestore(&conf->device_lock, flags);
488 return 0;
489}
490
491
492static sector_t compute_blocknr(struct stripe_head *sh, int i);
493
494static void raid5_build_block (struct stripe_head *sh, int i)
495{
496 struct r5dev *dev = &sh->dev[i];
497
498 bio_init(&dev->req);
499 dev->req.bi_io_vec = &dev->vec;
500 dev->req.bi_vcnt++;
501 dev->req.bi_max_vecs++;
502 dev->vec.bv_page = dev->page;
503 dev->vec.bv_len = STRIPE_SIZE;
504 dev->vec.bv_offset = 0;
505
506 dev->req.bi_sector = sh->sector;
507 dev->req.bi_private = sh;
508
509 dev->flags = 0;
510 if (i != sh->pd_idx)
511 dev->sector = compute_blocknr(sh, i);
512}
513
514static void error(mddev_t *mddev, mdk_rdev_t *rdev)
515{
516 char b[BDEVNAME_SIZE];
517 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
518 PRINTK("raid5: error called\n");
519
520 if (!rdev->faulty) {
521 mddev->sb_dirty = 1;
522 if (rdev->in_sync) {
523 conf->working_disks--;
524 mddev->degraded++;
525 conf->failed_disks++;
526 rdev->in_sync = 0;
527 /*
528 * if recovery was running, make sure it aborts.
529 */
530 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
531 }
532 rdev->faulty = 1;
533 printk (KERN_ALERT
534 "raid5: Disk failure on %s, disabling device."
535 " Operation continuing on %d devices\n",
536 bdevname(rdev->bdev,b), conf->working_disks);
537 }
538}
539
540/*
541 * Input: a 'big' sector number,
542 * Output: index of the data and parity disk, and the sector # in them.
543 */
544static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
545 unsigned int data_disks, unsigned int * dd_idx,
546 unsigned int * pd_idx, raid5_conf_t *conf)
547{
548 long stripe;
549 unsigned long chunk_number;
550 unsigned int chunk_offset;
551 sector_t new_sector;
552 int sectors_per_chunk = conf->chunk_size >> 9;
553
554 /* First compute the information on this sector */
555
556 /*
557 * Compute the chunk number and the sector offset inside the chunk
558 */
559 chunk_offset = sector_div(r_sector, sectors_per_chunk);
560 chunk_number = r_sector;
561 BUG_ON(r_sector != chunk_number);
562
563 /*
564 * Compute the stripe number
565 */
566 stripe = chunk_number / data_disks;
567
568 /*
569 * Compute the data disk and parity disk indexes inside the stripe
570 */
571 *dd_idx = chunk_number % data_disks;
572
573 /*
574 * Select the parity disk based on the user selected algorithm.
575 */
576 if (conf->level == 4)
577 *pd_idx = data_disks;
578 else switch (conf->algorithm) {
579 case ALGORITHM_LEFT_ASYMMETRIC:
580 *pd_idx = data_disks - stripe % raid_disks;
581 if (*dd_idx >= *pd_idx)
582 (*dd_idx)++;
583 break;
584 case ALGORITHM_RIGHT_ASYMMETRIC:
585 *pd_idx = stripe % raid_disks;
586 if (*dd_idx >= *pd_idx)
587 (*dd_idx)++;
588 break;
589 case ALGORITHM_LEFT_SYMMETRIC:
590 *pd_idx = data_disks - stripe % raid_disks;
591 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
592 break;
593 case ALGORITHM_RIGHT_SYMMETRIC:
594 *pd_idx = stripe % raid_disks;
595 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
596 break;
597 default:
598 printk("raid5: unsupported algorithm %d\n",
599 conf->algorithm);
600 }
601
602 /*
603 * Finally, compute the new sector number
604 */
605 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
606 return new_sector;
607}
608
609
610static sector_t compute_blocknr(struct stripe_head *sh, int i)
611{
612 raid5_conf_t *conf = sh->raid_conf;
613 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
614 sector_t new_sector = sh->sector, check;
615 int sectors_per_chunk = conf->chunk_size >> 9;
616 sector_t stripe;
617 int chunk_offset;
618 int chunk_number, dummy1, dummy2, dd_idx = i;
619 sector_t r_sector;
620
621 chunk_offset = sector_div(new_sector, sectors_per_chunk);
622 stripe = new_sector;
623 BUG_ON(new_sector != stripe);
624
625
626 switch (conf->algorithm) {
627 case ALGORITHM_LEFT_ASYMMETRIC:
628 case ALGORITHM_RIGHT_ASYMMETRIC:
629 if (i > sh->pd_idx)
630 i--;
631 break;
632 case ALGORITHM_LEFT_SYMMETRIC:
633 case ALGORITHM_RIGHT_SYMMETRIC:
634 if (i < sh->pd_idx)
635 i += raid_disks;
636 i -= (sh->pd_idx + 1);
637 break;
638 default:
639 printk("raid5: unsupported algorithm %d\n",
640 conf->algorithm);
641 }
642
643 chunk_number = stripe * data_disks + i;
644 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
645
646 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
647 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
648 printk("compute_blocknr: map not correct\n");
649 return 0;
650 }
651 return r_sector;
652}
653
654
655
656/*
657 * Copy data between a page in the stripe cache, and a bio.
658 * There are no alignment or size guarantees between the page or the
659 * bio except that there is some overlap.
660 * All iovecs in the bio must be considered.
661 */
662static void copy_data(int frombio, struct bio *bio,
663 struct page *page,
664 sector_t sector)
665{
666 char *pa = page_address(page);
667 struct bio_vec *bvl;
668 int i;
669 int page_offset;
670
671 if (bio->bi_sector >= sector)
672 page_offset = (signed)(bio->bi_sector - sector) * 512;
673 else
674 page_offset = (signed)(sector - bio->bi_sector) * -512;
675 bio_for_each_segment(bvl, bio, i) {
676 int len = bio_iovec_idx(bio,i)->bv_len;
677 int clen;
678 int b_offset = 0;
679
680 if (page_offset < 0) {
681 b_offset = -page_offset;
682 page_offset += b_offset;
683 len -= b_offset;
684 }
685
686 if (len > 0 && page_offset + len > STRIPE_SIZE)
687 clen = STRIPE_SIZE - page_offset;
688 else clen = len;
689
690 if (clen > 0) {
691 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
692 if (frombio)
693 memcpy(pa+page_offset, ba+b_offset, clen);
694 else
695 memcpy(ba+b_offset, pa+page_offset, clen);
696 __bio_kunmap_atomic(ba, KM_USER0);
697 }
698 if (clen < len) /* hit end of page */
699 break;
700 page_offset += len;
701 }
702}
703
704#define check_xor() do { \
705 if (count == MAX_XOR_BLOCKS) { \
706 xor_block(count, STRIPE_SIZE, ptr); \
707 count = 1; \
708 } \
709 } while(0)
710
711
712static void compute_block(struct stripe_head *sh, int dd_idx)
713{
714 raid5_conf_t *conf = sh->raid_conf;
715 int i, count, disks = conf->raid_disks;
716 void *ptr[MAX_XOR_BLOCKS], *p;
717
718 PRINTK("compute_block, stripe %llu, idx %d\n",
719 (unsigned long long)sh->sector, dd_idx);
720
721 ptr[0] = page_address(sh->dev[dd_idx].page);
722 memset(ptr[0], 0, STRIPE_SIZE);
723 count = 1;
724 for (i = disks ; i--; ) {
725 if (i == dd_idx)
726 continue;
727 p = page_address(sh->dev[i].page);
728 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
729 ptr[count++] = p;
730 else
731 printk("compute_block() %d, stripe %llu, %d"
732 " not present\n", dd_idx,
733 (unsigned long long)sh->sector, i);
734
735 check_xor();
736 }
737 if (count != 1)
738 xor_block(count, STRIPE_SIZE, ptr);
739 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
740}
741
742static void compute_parity(struct stripe_head *sh, int method)
743{
744 raid5_conf_t *conf = sh->raid_conf;
745 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
746 void *ptr[MAX_XOR_BLOCKS];
747 struct bio *chosen;
748
749 PRINTK("compute_parity, stripe %llu, method %d\n",
750 (unsigned long long)sh->sector, method);
751
752 count = 1;
753 ptr[0] = page_address(sh->dev[pd_idx].page);
754 switch(method) {
755 case READ_MODIFY_WRITE:
756 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
757 BUG();
758 for (i=disks ; i-- ;) {
759 if (i==pd_idx)
760 continue;
761 if (sh->dev[i].towrite &&
762 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
763 ptr[count++] = page_address(sh->dev[i].page);
764 chosen = sh->dev[i].towrite;
765 sh->dev[i].towrite = NULL;
766
767 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
768 wake_up(&conf->wait_for_overlap);
769
770 if (sh->dev[i].written) BUG();
771 sh->dev[i].written = chosen;
772 check_xor();
773 }
774 }
775 break;
776 case RECONSTRUCT_WRITE:
777 memset(ptr[0], 0, STRIPE_SIZE);
778 for (i= disks; i-- ;)
779 if (i!=pd_idx && sh->dev[i].towrite) {
780 chosen = sh->dev[i].towrite;
781 sh->dev[i].towrite = NULL;
782
783 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
784 wake_up(&conf->wait_for_overlap);
785
786 if (sh->dev[i].written) BUG();
787 sh->dev[i].written = chosen;
788 }
789 break;
790 case CHECK_PARITY:
791 break;
792 }
793 if (count>1) {
794 xor_block(count, STRIPE_SIZE, ptr);
795 count = 1;
796 }
797
798 for (i = disks; i--;)
799 if (sh->dev[i].written) {
800 sector_t sector = sh->dev[i].sector;
801 struct bio *wbi = sh->dev[i].written;
802 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
803 copy_data(1, wbi, sh->dev[i].page, sector);
804 wbi = r5_next_bio(wbi, sector);
805 }
806
807 set_bit(R5_LOCKED, &sh->dev[i].flags);
808 set_bit(R5_UPTODATE, &sh->dev[i].flags);
809 }
810
811 switch(method) {
812 case RECONSTRUCT_WRITE:
813 case CHECK_PARITY:
814 for (i=disks; i--;)
815 if (i != pd_idx) {
816 ptr[count++] = page_address(sh->dev[i].page);
817 check_xor();
818 }
819 break;
820 case READ_MODIFY_WRITE:
821 for (i = disks; i--;)
822 if (sh->dev[i].written) {
823 ptr[count++] = page_address(sh->dev[i].page);
824 check_xor();
825 }
826 }
827 if (count != 1)
828 xor_block(count, STRIPE_SIZE, ptr);
829
830 if (method != CHECK_PARITY) {
831 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
832 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
833 } else
834 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
835}
836
837/*
838 * Each stripe/dev can have one or more bion attached.
839 * toread/towrite point to the first in a chain.
840 * The bi_next chain must be in order.
841 */
842static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
843{
844 struct bio **bip;
845 raid5_conf_t *conf = sh->raid_conf;
72626685 846 int firstwrite=0;
1da177e4
LT
847
848 PRINTK("adding bh b#%llu to stripe s#%llu\n",
849 (unsigned long long)bi->bi_sector,
850 (unsigned long long)sh->sector);
851
852
853 spin_lock(&sh->lock);
854 spin_lock_irq(&conf->device_lock);
72626685 855 if (forwrite) {
1da177e4 856 bip = &sh->dev[dd_idx].towrite;
72626685
N
857 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
858 firstwrite = 1;
859 } else
1da177e4
LT
860 bip = &sh->dev[dd_idx].toread;
861 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
862 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
863 goto overlap;
864 bip = & (*bip)->bi_next;
865 }
866 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
867 goto overlap;
868
869 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
870 BUG();
871 if (*bip)
872 bi->bi_next = *bip;
873 *bip = bi;
874 bi->bi_phys_segments ++;
875 spin_unlock_irq(&conf->device_lock);
876 spin_unlock(&sh->lock);
877
878 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
879 (unsigned long long)bi->bi_sector,
880 (unsigned long long)sh->sector, dd_idx);
881
72626685
N
882 if (conf->mddev->bitmap && firstwrite) {
883 sh->bm_seq = conf->seq_write;
884 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
885 STRIPE_SECTORS, 0);
886 set_bit(STRIPE_BIT_DELAY, &sh->state);
887 }
888
1da177e4
LT
889 if (forwrite) {
890 /* check if page is covered */
891 sector_t sector = sh->dev[dd_idx].sector;
892 for (bi=sh->dev[dd_idx].towrite;
893 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
894 bi && bi->bi_sector <= sector;
895 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
896 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
897 sector = bi->bi_sector + (bi->bi_size>>9);
898 }
899 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
900 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
901 }
902 return 1;
903
904 overlap:
905 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
906 spin_unlock_irq(&conf->device_lock);
907 spin_unlock(&sh->lock);
908 return 0;
909}
910
911
912/*
913 * handle_stripe - do things to a stripe.
914 *
915 * We lock the stripe and then examine the state of various bits
916 * to see what needs to be done.
917 * Possible results:
918 * return some read request which now have data
919 * return some write requests which are safely on disc
920 * schedule a read on some buffers
921 * schedule a write of some buffers
922 * return confirmation of parity correctness
923 *
924 * Parity calculations are done inside the stripe lock
925 * buffers are taken off read_list or write_list, and bh_cache buffers
926 * get BH_Lock set before the stripe lock is released.
927 *
928 */
929
930static void handle_stripe(struct stripe_head *sh)
931{
932 raid5_conf_t *conf = sh->raid_conf;
933 int disks = conf->raid_disks;
934 struct bio *return_bi= NULL;
935 struct bio *bi;
936 int i;
937 int syncing;
938 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
939 int non_overwrite = 0;
940 int failed_num=0;
941 struct r5dev *dev;
942
943 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
944 (unsigned long long)sh->sector, atomic_read(&sh->count),
945 sh->pd_idx);
946
947 spin_lock(&sh->lock);
948 clear_bit(STRIPE_HANDLE, &sh->state);
949 clear_bit(STRIPE_DELAYED, &sh->state);
950
951 syncing = test_bit(STRIPE_SYNCING, &sh->state);
952 /* Now to look around and see what can be done */
953
954 for (i=disks; i--; ) {
955 mdk_rdev_t *rdev;
956 dev = &sh->dev[i];
957 clear_bit(R5_Insync, &dev->flags);
958 clear_bit(R5_Syncio, &dev->flags);
959
960 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
961 i, dev->flags, dev->toread, dev->towrite, dev->written);
962 /* maybe we can reply to a read */
963 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
964 struct bio *rbi, *rbi2;
965 PRINTK("Return read for disc %d\n", i);
966 spin_lock_irq(&conf->device_lock);
967 rbi = dev->toread;
968 dev->toread = NULL;
969 if (test_and_clear_bit(R5_Overlap, &dev->flags))
970 wake_up(&conf->wait_for_overlap);
971 spin_unlock_irq(&conf->device_lock);
972 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
973 copy_data(0, rbi, dev->page, dev->sector);
974 rbi2 = r5_next_bio(rbi, dev->sector);
975 spin_lock_irq(&conf->device_lock);
976 if (--rbi->bi_phys_segments == 0) {
977 rbi->bi_next = return_bi;
978 return_bi = rbi;
979 }
980 spin_unlock_irq(&conf->device_lock);
981 rbi = rbi2;
982 }
983 }
984
985 /* now count some things */
986 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
987 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
988
989
990 if (dev->toread) to_read++;
991 if (dev->towrite) {
992 to_write++;
993 if (!test_bit(R5_OVERWRITE, &dev->flags))
994 non_overwrite++;
995 }
996 if (dev->written) written++;
997 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
998 if (!rdev || !rdev->in_sync) {
4e5314b5
N
999 /* The ReadError flag wil just be confusing now */
1000 clear_bit(R5_ReadError, &dev->flags);
1001 clear_bit(R5_ReWrite, &dev->flags);
1002 }
1003 if (!rdev || !rdev->in_sync
1004 || test_bit(R5_ReadError, &dev->flags)) {
1da177e4
LT
1005 failed++;
1006 failed_num = i;
1007 } else
1008 set_bit(R5_Insync, &dev->flags);
1009 }
1010 PRINTK("locked=%d uptodate=%d to_read=%d"
1011 " to_write=%d failed=%d failed_num=%d\n",
1012 locked, uptodate, to_read, to_write, failed, failed_num);
1013 /* check if the array has lost two devices and, if so, some requests might
1014 * need to be failed
1015 */
1016 if (failed > 1 && to_read+to_write+written) {
1da177e4 1017 for (i=disks; i--; ) {
72626685 1018 int bitmap_end = 0;
4e5314b5
N
1019
1020 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1021 mdk_rdev_t *rdev = conf->disks[i].rdev;
1022 if (rdev && rdev->in_sync)
1023 /* multiple read failures in one stripe */
1024 md_error(conf->mddev, rdev);
1025 }
1026
72626685 1027 spin_lock_irq(&conf->device_lock);
1da177e4
LT
1028 /* fail all writes first */
1029 bi = sh->dev[i].towrite;
1030 sh->dev[i].towrite = NULL;
72626685 1031 if (bi) { to_write--; bitmap_end = 1; }
1da177e4
LT
1032
1033 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1034 wake_up(&conf->wait_for_overlap);
1035
1036 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1037 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1038 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1039 if (--bi->bi_phys_segments == 0) {
1040 md_write_end(conf->mddev);
1041 bi->bi_next = return_bi;
1042 return_bi = bi;
1043 }
1044 bi = nextbi;
1045 }
1046 /* and fail all 'written' */
1047 bi = sh->dev[i].written;
1048 sh->dev[i].written = NULL;
72626685 1049 if (bi) bitmap_end = 1;
1da177e4
LT
1050 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1051 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1052 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1053 if (--bi->bi_phys_segments == 0) {
1054 md_write_end(conf->mddev);
1055 bi->bi_next = return_bi;
1056 return_bi = bi;
1057 }
1058 bi = bi2;
1059 }
1060
1061 /* fail any reads if this device is non-operational */
4e5314b5
N
1062 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1063 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1da177e4
LT
1064 bi = sh->dev[i].toread;
1065 sh->dev[i].toread = NULL;
1066 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1067 wake_up(&conf->wait_for_overlap);
1068 if (bi) to_read--;
1069 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1070 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1071 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1072 if (--bi->bi_phys_segments == 0) {
1073 bi->bi_next = return_bi;
1074 return_bi = bi;
1075 }
1076 bi = nextbi;
1077 }
1078 }
72626685
N
1079 spin_unlock_irq(&conf->device_lock);
1080 if (bitmap_end)
1081 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1082 STRIPE_SECTORS, 0, 0);
1da177e4 1083 }
1da177e4
LT
1084 }
1085 if (failed > 1 && syncing) {
1086 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1087 clear_bit(STRIPE_SYNCING, &sh->state);
1088 syncing = 0;
1089 }
1090
1091 /* might be able to return some write requests if the parity block
1092 * is safe, or on a failed drive
1093 */
1094 dev = &sh->dev[sh->pd_idx];
1095 if ( written &&
1096 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1097 test_bit(R5_UPTODATE, &dev->flags))
1098 || (failed == 1 && failed_num == sh->pd_idx))
1099 ) {
1100 /* any written block on an uptodate or failed drive can be returned.
1101 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1102 * never LOCKED, so we don't need to test 'failed' directly.
1103 */
1104 for (i=disks; i--; )
1105 if (sh->dev[i].written) {
1106 dev = &sh->dev[i];
1107 if (!test_bit(R5_LOCKED, &dev->flags) &&
1108 test_bit(R5_UPTODATE, &dev->flags) ) {
1109 /* We can return any write requests */
1110 struct bio *wbi, *wbi2;
72626685 1111 int bitmap_end = 0;
1da177e4
LT
1112 PRINTK("Return write for disc %d\n", i);
1113 spin_lock_irq(&conf->device_lock);
1114 wbi = dev->written;
1115 dev->written = NULL;
1116 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1117 wbi2 = r5_next_bio(wbi, dev->sector);
1118 if (--wbi->bi_phys_segments == 0) {
1119 md_write_end(conf->mddev);
1120 wbi->bi_next = return_bi;
1121 return_bi = wbi;
1122 }
1123 wbi = wbi2;
1124 }
72626685
N
1125 if (dev->towrite == NULL)
1126 bitmap_end = 1;
1da177e4 1127 spin_unlock_irq(&conf->device_lock);
72626685
N
1128 if (bitmap_end)
1129 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1130 STRIPE_SECTORS,
1131 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1da177e4
LT
1132 }
1133 }
1134 }
1135
1136 /* Now we might consider reading some blocks, either to check/generate
1137 * parity, or to satisfy requests
1138 * or to load a block that is being partially written.
1139 */
1140 if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1141 for (i=disks; i--;) {
1142 dev = &sh->dev[i];
1143 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1144 (dev->toread ||
1145 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1146 syncing ||
1147 (failed && (sh->dev[failed_num].toread ||
1148 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1149 )
1150 ) {
1151 /* we would like to get this block, possibly
1152 * by computing it, but we might not be able to
1153 */
1154 if (uptodate == disks-1) {
1155 PRINTK("Computing block %d\n", i);
1156 compute_block(sh, i);
1157 uptodate++;
1158 } else if (test_bit(R5_Insync, &dev->flags)) {
1159 set_bit(R5_LOCKED, &dev->flags);
1160 set_bit(R5_Wantread, &dev->flags);
1161#if 0
1162 /* if I am just reading this block and we don't have
1163 a failed drive, or any pending writes then sidestep the cache */
1164 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1165 ! syncing && !failed && !to_write) {
1166 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1167 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1168 }
1169#endif
1170 locked++;
1171 PRINTK("Reading block %d (sync=%d)\n",
1172 i, syncing);
1173 if (syncing)
1174 md_sync_acct(conf->disks[i].rdev->bdev,
1175 STRIPE_SECTORS);
1176 }
1177 }
1178 }
1179 set_bit(STRIPE_HANDLE, &sh->state);
1180 }
1181
1182 /* now to consider writing and what else, if anything should be read */
1183 if (to_write) {
1184 int rmw=0, rcw=0;
1185 for (i=disks ; i--;) {
1186 /* would I have to read this buffer for read_modify_write */
1187 dev = &sh->dev[i];
1188 if ((dev->towrite || i == sh->pd_idx) &&
1189 (!test_bit(R5_LOCKED, &dev->flags)
1190#if 0
1191|| sh->bh_page[i]!=bh->b_page
1192#endif
1193 ) &&
1194 !test_bit(R5_UPTODATE, &dev->flags)) {
1195 if (test_bit(R5_Insync, &dev->flags)
1196/* && !(!mddev->insync && i == sh->pd_idx) */
1197 )
1198 rmw++;
1199 else rmw += 2*disks; /* cannot read it */
1200 }
1201 /* Would I have to read this buffer for reconstruct_write */
1202 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1203 (!test_bit(R5_LOCKED, &dev->flags)
1204#if 0
1205|| sh->bh_page[i] != bh->b_page
1206#endif
1207 ) &&
1208 !test_bit(R5_UPTODATE, &dev->flags)) {
1209 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1210 else rcw += 2*disks;
1211 }
1212 }
1213 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1214 (unsigned long long)sh->sector, rmw, rcw);
1215 set_bit(STRIPE_HANDLE, &sh->state);
1216 if (rmw < rcw && rmw > 0)
1217 /* prefer read-modify-write, but need to get some data */
1218 for (i=disks; i--;) {
1219 dev = &sh->dev[i];
1220 if ((dev->towrite || i == sh->pd_idx) &&
1221 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1222 test_bit(R5_Insync, &dev->flags)) {
1223 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1224 {
1225 PRINTK("Read_old block %d for r-m-w\n", i);
1226 set_bit(R5_LOCKED, &dev->flags);
1227 set_bit(R5_Wantread, &dev->flags);
1228 locked++;
1229 } else {
1230 set_bit(STRIPE_DELAYED, &sh->state);
1231 set_bit(STRIPE_HANDLE, &sh->state);
1232 }
1233 }
1234 }
1235 if (rcw <= rmw && rcw > 0)
1236 /* want reconstruct write, but need to get some data */
1237 for (i=disks; i--;) {
1238 dev = &sh->dev[i];
1239 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1240 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1241 test_bit(R5_Insync, &dev->flags)) {
1242 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1243 {
1244 PRINTK("Read_old block %d for Reconstruct\n", i);
1245 set_bit(R5_LOCKED, &dev->flags);
1246 set_bit(R5_Wantread, &dev->flags);
1247 locked++;
1248 } else {
1249 set_bit(STRIPE_DELAYED, &sh->state);
1250 set_bit(STRIPE_HANDLE, &sh->state);
1251 }
1252 }
1253 }
1254 /* now if nothing is locked, and if we have enough data, we can start a write request */
72626685
N
1255 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1256 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1da177e4
LT
1257 PRINTK("Computing parity...\n");
1258 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1259 /* now every locked buffer is ready to be written */
1260 for (i=disks; i--;)
1261 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1262 PRINTK("Writing block %d\n", i);
1263 locked++;
1264 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1265 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1266 || (i==sh->pd_idx && failed == 0))
1267 set_bit(STRIPE_INSYNC, &sh->state);
1268 }
1269 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1270 atomic_dec(&conf->preread_active_stripes);
1271 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1272 md_wakeup_thread(conf->mddev->thread);
1273 }
1274 }
1275 }
1276
1277 /* maybe we need to check and possibly fix the parity for this stripe
1278 * Any reads will already have been scheduled, so we just see if enough data
1279 * is available
1280 */
1281 if (syncing && locked == 0 &&
1282 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) {
1283 set_bit(STRIPE_HANDLE, &sh->state);
1284 if (failed == 0) {
1285 char *pagea;
1286 if (uptodate != disks)
1287 BUG();
1288 compute_parity(sh, CHECK_PARITY);
1289 uptodate--;
1290 pagea = page_address(sh->dev[sh->pd_idx].page);
1291 if ((*(u32*)pagea) == 0 &&
1292 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1293 /* parity is correct (on disc, not in buffer any more) */
1294 set_bit(STRIPE_INSYNC, &sh->state);
9d88883e
N
1295 } else {
1296 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1297 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1298 /* don't try to repair!! */
1299 set_bit(STRIPE_INSYNC, &sh->state);
1da177e4
LT
1300 }
1301 }
1302 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1303 if (failed==0)
1304 failed_num = sh->pd_idx;
1305 /* should be able to compute the missing block and write it to spare */
1306 if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) {
1307 if (uptodate+1 != disks)
1308 BUG();
1309 compute_block(sh, failed_num);
1310 uptodate++;
1311 }
1312 if (uptodate != disks)
1313 BUG();
1314 dev = &sh->dev[failed_num];
1315 set_bit(R5_LOCKED, &dev->flags);
1316 set_bit(R5_Wantwrite, &dev->flags);
72626685 1317 clear_bit(STRIPE_DEGRADED, &sh->state);
1da177e4
LT
1318 locked++;
1319 set_bit(STRIPE_INSYNC, &sh->state);
1320 set_bit(R5_Syncio, &dev->flags);
1321 }
1322 }
1323 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1324 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1325 clear_bit(STRIPE_SYNCING, &sh->state);
1326 }
4e5314b5
N
1327
1328 /* If the failed drive is just a ReadError, then we might need to progress
1329 * the repair/check process
1330 */
1331 if (failed == 1 && test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1332 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1333 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1334 ) {
1335 dev = &sh->dev[failed_num];
1336 if (!test_bit(R5_ReWrite, &dev->flags)) {
1337 set_bit(R5_Wantwrite, &dev->flags);
1338 set_bit(R5_ReWrite, &dev->flags);
1339 set_bit(R5_LOCKED, &dev->flags);
1340 } else {
1341 /* let's read it back */
1342 set_bit(R5_Wantread, &dev->flags);
1343 set_bit(R5_LOCKED, &dev->flags);
1344 }
1345 }
1346
1da177e4
LT
1347 spin_unlock(&sh->lock);
1348
1349 while ((bi=return_bi)) {
1350 int bytes = bi->bi_size;
1351
1352 return_bi = bi->bi_next;
1353 bi->bi_next = NULL;
1354 bi->bi_size = 0;
1355 bi->bi_end_io(bi, bytes, 0);
1356 }
1357 for (i=disks; i-- ;) {
1358 int rw;
1359 struct bio *bi;
1360 mdk_rdev_t *rdev;
1361 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1362 rw = 1;
1363 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1364 rw = 0;
1365 else
1366 continue;
1367
1368 bi = &sh->dev[i].req;
1369
1370 bi->bi_rw = rw;
1371 if (rw)
1372 bi->bi_end_io = raid5_end_write_request;
1373 else
1374 bi->bi_end_io = raid5_end_read_request;
1375
1376 rcu_read_lock();
1377 rdev = conf->disks[i].rdev;
1378 if (rdev && rdev->faulty)
1379 rdev = NULL;
1380 if (rdev)
1381 atomic_inc(&rdev->nr_pending);
1382 rcu_read_unlock();
1383
1384 if (rdev) {
1385 if (test_bit(R5_Syncio, &sh->dev[i].flags))
1386 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1387
1388 bi->bi_bdev = rdev->bdev;
1389 PRINTK("for %llu schedule op %ld on disc %d\n",
1390 (unsigned long long)sh->sector, bi->bi_rw, i);
1391 atomic_inc(&sh->count);
1392 bi->bi_sector = sh->sector + rdev->data_offset;
1393 bi->bi_flags = 1 << BIO_UPTODATE;
1394 bi->bi_vcnt = 1;
1395 bi->bi_max_vecs = 1;
1396 bi->bi_idx = 0;
1397 bi->bi_io_vec = &sh->dev[i].vec;
1398 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1399 bi->bi_io_vec[0].bv_offset = 0;
1400 bi->bi_size = STRIPE_SIZE;
1401 bi->bi_next = NULL;
1402 generic_make_request(bi);
1403 } else {
72626685
N
1404 if (rw == 1)
1405 set_bit(STRIPE_DEGRADED, &sh->state);
1da177e4
LT
1406 PRINTK("skip op %ld on disc %d for sector %llu\n",
1407 bi->bi_rw, i, (unsigned long long)sh->sector);
1408 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1409 set_bit(STRIPE_HANDLE, &sh->state);
1410 }
1411 }
1412}
1413
1414static inline void raid5_activate_delayed(raid5_conf_t *conf)
1415{
1416 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1417 while (!list_empty(&conf->delayed_list)) {
1418 struct list_head *l = conf->delayed_list.next;
1419 struct stripe_head *sh;
1420 sh = list_entry(l, struct stripe_head, lru);
1421 list_del_init(l);
1422 clear_bit(STRIPE_DELAYED, &sh->state);
1423 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1424 atomic_inc(&conf->preread_active_stripes);
1425 list_add_tail(&sh->lru, &conf->handle_list);
1426 }
1427 }
1428}
1429
72626685
N
1430static inline void activate_bit_delay(raid5_conf_t *conf)
1431{
1432 /* device_lock is held */
1433 struct list_head head;
1434 list_add(&head, &conf->bitmap_list);
1435 list_del_init(&conf->bitmap_list);
1436 while (!list_empty(&head)) {
1437 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1438 list_del_init(&sh->lru);
1439 atomic_inc(&sh->count);
1440 __release_stripe(conf, sh);
1441 }
1442}
1443
1da177e4
LT
1444static void unplug_slaves(mddev_t *mddev)
1445{
1446 raid5_conf_t *conf = mddev_to_conf(mddev);
1447 int i;
1448
1449 rcu_read_lock();
1450 for (i=0; i<mddev->raid_disks; i++) {
1451 mdk_rdev_t *rdev = conf->disks[i].rdev;
1452 if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
1453 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1454
1455 atomic_inc(&rdev->nr_pending);
1456 rcu_read_unlock();
1457
1458 if (r_queue->unplug_fn)
1459 r_queue->unplug_fn(r_queue);
1460
1461 rdev_dec_pending(rdev, mddev);
1462 rcu_read_lock();
1463 }
1464 }
1465 rcu_read_unlock();
1466}
1467
1468static void raid5_unplug_device(request_queue_t *q)
1469{
1470 mddev_t *mddev = q->queuedata;
1471 raid5_conf_t *conf = mddev_to_conf(mddev);
1472 unsigned long flags;
1473
1474 spin_lock_irqsave(&conf->device_lock, flags);
1475
72626685
N
1476 if (blk_remove_plug(q)) {
1477 conf->seq_flush++;
1da177e4 1478 raid5_activate_delayed(conf);
72626685 1479 }
1da177e4
LT
1480 md_wakeup_thread(mddev->thread);
1481
1482 spin_unlock_irqrestore(&conf->device_lock, flags);
1483
1484 unplug_slaves(mddev);
1485}
1486
1487static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1488 sector_t *error_sector)
1489{
1490 mddev_t *mddev = q->queuedata;
1491 raid5_conf_t *conf = mddev_to_conf(mddev);
1492 int i, ret = 0;
1493
1494 rcu_read_lock();
1495 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1496 mdk_rdev_t *rdev = conf->disks[i].rdev;
1497 if (rdev && !rdev->faulty) {
1498 struct block_device *bdev = rdev->bdev;
1499 request_queue_t *r_queue = bdev_get_queue(bdev);
1500
1501 if (!r_queue->issue_flush_fn)
1502 ret = -EOPNOTSUPP;
1503 else {
1504 atomic_inc(&rdev->nr_pending);
1505 rcu_read_unlock();
1506 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1507 error_sector);
1508 rdev_dec_pending(rdev, mddev);
1509 rcu_read_lock();
1510 }
1511 }
1512 }
1513 rcu_read_unlock();
1514 return ret;
1515}
1516
1517static inline void raid5_plug_device(raid5_conf_t *conf)
1518{
1519 spin_lock_irq(&conf->device_lock);
1520 blk_plug_device(conf->mddev->queue);
1521 spin_unlock_irq(&conf->device_lock);
1522}
1523
1524static int make_request (request_queue_t *q, struct bio * bi)
1525{
1526 mddev_t *mddev = q->queuedata;
1527 raid5_conf_t *conf = mddev_to_conf(mddev);
1528 const unsigned int raid_disks = conf->raid_disks;
1529 const unsigned int data_disks = raid_disks - 1;
1530 unsigned int dd_idx, pd_idx;
1531 sector_t new_sector;
1532 sector_t logical_sector, last_sector;
1533 struct stripe_head *sh;
a362357b 1534 const int rw = bio_data_dir(bi);
1da177e4 1535
e5dcdd80
N
1536 if (unlikely(bio_barrier(bi))) {
1537 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1538 return 0;
1539 }
1540
3d310eb7 1541 md_write_start(mddev, bi);
06d91a5f 1542
a362357b
JA
1543 disk_stat_inc(mddev->gendisk, ios[rw]);
1544 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1da177e4
LT
1545
1546 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1547 last_sector = bi->bi_sector + (bi->bi_size>>9);
1548 bi->bi_next = NULL;
1549 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
06d91a5f 1550
1da177e4
LT
1551 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1552 DEFINE_WAIT(w);
1553
1554 new_sector = raid5_compute_sector(logical_sector,
1555 raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1556
1557 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1558 (unsigned long long)new_sector,
1559 (unsigned long long)logical_sector);
1560
1561 retry:
1562 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1563 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1564 if (sh) {
1565 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1566 /* Add failed due to overlap. Flush everything
1567 * and wait a while
1568 */
1569 raid5_unplug_device(mddev->queue);
1570 release_stripe(sh);
1571 schedule();
1572 goto retry;
1573 }
1574 finish_wait(&conf->wait_for_overlap, &w);
1575 raid5_plug_device(conf);
1576 handle_stripe(sh);
1577 release_stripe(sh);
1578
1579 } else {
1580 /* cannot get stripe for read-ahead, just give-up */
1581 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1582 finish_wait(&conf->wait_for_overlap, &w);
1583 break;
1584 }
1585
1586 }
1587 spin_lock_irq(&conf->device_lock);
1588 if (--bi->bi_phys_segments == 0) {
1589 int bytes = bi->bi_size;
1590
1591 if ( bio_data_dir(bi) == WRITE )
1592 md_write_end(mddev);
1593 bi->bi_size = 0;
1594 bi->bi_end_io(bi, bytes, 0);
1595 }
1596 spin_unlock_irq(&conf->device_lock);
1597 return 0;
1598}
1599
1600/* FIXME go_faster isn't used */
57afd89f 1601static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1da177e4
LT
1602{
1603 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1604 struct stripe_head *sh;
1605 int sectors_per_chunk = conf->chunk_size >> 9;
1606 sector_t x;
1607 unsigned long stripe;
1608 int chunk_offset;
1609 int dd_idx, pd_idx;
1610 sector_t first_sector;
1611 int raid_disks = conf->raid_disks;
1612 int data_disks = raid_disks-1;
72626685
N
1613 sector_t max_sector = mddev->size << 1;
1614 int sync_blocks;
1da177e4 1615
72626685 1616 if (sector_nr >= max_sector) {
1da177e4
LT
1617 /* just being told to finish up .. nothing much to do */
1618 unplug_slaves(mddev);
72626685
N
1619
1620 if (mddev->curr_resync < max_sector) /* aborted */
1621 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1622 &sync_blocks, 1);
1623 else /* compelted sync */
1624 conf->fullsync = 0;
1625 bitmap_close_sync(mddev->bitmap);
1626
1da177e4
LT
1627 return 0;
1628 }
1629 /* if there is 1 or more failed drives and we are trying
1630 * to resync, then assert that we are finished, because there is
1631 * nothing we can do.
1632 */
1633 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
57afd89f
N
1634 sector_t rv = (mddev->size << 1) - sector_nr;
1635 *skipped = 1;
1da177e4
LT
1636 return rv;
1637 }
72626685
N
1638 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1639 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1640 /* we can skip this block, and probably more */
1641 sync_blocks /= STRIPE_SECTORS;
1642 *skipped = 1;
1643 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1644 }
1da177e4
LT
1645
1646 x = sector_nr;
1647 chunk_offset = sector_div(x, sectors_per_chunk);
1648 stripe = x;
1649 BUG_ON(x != stripe);
1650
1651 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1652 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1653 sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1654 if (sh == NULL) {
1655 sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1656 /* make sure we don't swamp the stripe cache if someone else
1657 * is trying to get access
1658 */
66c006a5 1659 schedule_timeout_uninterruptible(1);
1da177e4 1660 }
72626685 1661 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1da177e4
LT
1662 spin_lock(&sh->lock);
1663 set_bit(STRIPE_SYNCING, &sh->state);
1664 clear_bit(STRIPE_INSYNC, &sh->state);
1665 spin_unlock(&sh->lock);
1666
1667 handle_stripe(sh);
1668 release_stripe(sh);
1669
1670 return STRIPE_SECTORS;
1671}
1672
1673/*
1674 * This is our raid5 kernel thread.
1675 *
1676 * We scan the hash table for stripes which can be handled now.
1677 * During the scan, completed stripes are saved for us by the interrupt
1678 * handler, so that they will not have to wait for our next wakeup.
1679 */
1680static void raid5d (mddev_t *mddev)
1681{
1682 struct stripe_head *sh;
1683 raid5_conf_t *conf = mddev_to_conf(mddev);
1684 int handled;
1685
1686 PRINTK("+++ raid5d active\n");
1687
1688 md_check_recovery(mddev);
1da177e4
LT
1689
1690 handled = 0;
1691 spin_lock_irq(&conf->device_lock);
1692 while (1) {
1693 struct list_head *first;
1694
72626685
N
1695 if (conf->seq_flush - conf->seq_write > 0) {
1696 int seq = conf->seq_flush;
1697 bitmap_unplug(mddev->bitmap);
1698 conf->seq_write = seq;
1699 activate_bit_delay(conf);
1700 }
1701
1da177e4
LT
1702 if (list_empty(&conf->handle_list) &&
1703 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1704 !blk_queue_plugged(mddev->queue) &&
1705 !list_empty(&conf->delayed_list))
1706 raid5_activate_delayed(conf);
1707
1708 if (list_empty(&conf->handle_list))
1709 break;
1710
1711 first = conf->handle_list.next;
1712 sh = list_entry(first, struct stripe_head, lru);
1713
1714 list_del_init(first);
1715 atomic_inc(&sh->count);
1716 if (atomic_read(&sh->count)!= 1)
1717 BUG();
1718 spin_unlock_irq(&conf->device_lock);
1719
1720 handled++;
1721 handle_stripe(sh);
1722 release_stripe(sh);
1723
1724 spin_lock_irq(&conf->device_lock);
1725 }
1726 PRINTK("%d stripes handled\n", handled);
1727
1728 spin_unlock_irq(&conf->device_lock);
1729
1730 unplug_slaves(mddev);
1731
1732 PRINTK("--- raid5d inactive\n");
1733}
1734
3f294f4f
N
1735struct raid5_sysfs_entry {
1736 struct attribute attr;
1737 ssize_t (*show)(raid5_conf_t *, char *);
1738 ssize_t (*store)(raid5_conf_t *, const char *, ssize_t);
1739};
1740
1741static ssize_t
1742raid5_show_stripe_cache_size(raid5_conf_t *conf, char *page)
1743{
1744 return sprintf(page, "%d\n", conf->max_nr_stripes);
1745}
1746
1747static ssize_t
1748raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len)
1749{
1750 char *end;
1751 int new;
1752 if (len >= PAGE_SIZE)
1753 return -EINVAL;
1754
1755 new = simple_strtoul(page, &end, 10);
1756 if (!*page || (*end && *end != '\n') )
1757 return -EINVAL;
1758 if (new <= 16 || new > 32768)
1759 return -EINVAL;
1760 while (new < conf->max_nr_stripes) {
1761 if (drop_one_stripe(conf))
1762 conf->max_nr_stripes--;
1763 else
1764 break;
1765 }
1766 while (new > conf->max_nr_stripes) {
1767 if (grow_one_stripe(conf))
1768 conf->max_nr_stripes++;
1769 else break;
1770 }
1771 return len;
1772}
1773static struct raid5_sysfs_entry raid5_stripecache_size = {
1774 .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR },
1775 .show = raid5_show_stripe_cache_size,
1776 .store = raid5_store_stripe_cache_size,
1777};
1778
1779static ssize_t
1780raid5_show_stripe_cache_active(raid5_conf_t *conf, char *page)
1781{
1782 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1783}
1784
1785static struct raid5_sysfs_entry raid5_stripecache_active = {
1786 .attr = {.name = "stripe_cache_active", .mode = S_IRUGO},
1787 .show = raid5_show_stripe_cache_active,
1788};
1789
1790static struct attribute *raid5_default_attrs[] = {
1791 &raid5_stripecache_size.attr,
1792 &raid5_stripecache_active.attr,
1793 NULL,
1794};
1795
1796static ssize_t
1797raid5_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1798{
1799 struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr);
1800 raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj);
1801
1802 if (!entry->show)
1803 return -EIO;
1804 return entry->show(conf, page);
1805}
1806
1807static ssize_t
1808raid5_attr_store(struct kobject *kobj, struct attribute *attr,
1809 const char *page, size_t length)
1810{
1811 struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr);
1812 raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj);
1813
1814 if (!entry->store)
1815 return -EIO;
1816 return entry->store(conf, page, length);
1817}
1818
1819static void raid5_free(struct kobject *ko)
1820{
1821 raid5_conf_t *conf = container_of(ko, raid5_conf_t, kobj);
1822 kfree(conf);
1823}
1824
1825
1826static struct sysfs_ops raid5_sysfs_ops = {
1827 .show = raid5_attr_show,
1828 .store = raid5_attr_store,
1829};
1830
1831static struct kobj_type raid5_ktype = {
1832 .release = raid5_free,
1833 .sysfs_ops = &raid5_sysfs_ops,
1834 .default_attrs = raid5_default_attrs,
1835};
1836
72626685 1837static int run(mddev_t *mddev)
1da177e4
LT
1838{
1839 raid5_conf_t *conf;
1840 int raid_disk, memory;
1841 mdk_rdev_t *rdev;
1842 struct disk_info *disk;
1843 struct list_head *tmp;
1844
1845 if (mddev->level != 5 && mddev->level != 4) {
1846 printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level);
1847 return -EIO;
1848 }
1849
1850 mddev->private = kmalloc (sizeof (raid5_conf_t)
1851 + mddev->raid_disks * sizeof(struct disk_info),
1852 GFP_KERNEL);
1853 if ((conf = mddev->private) == NULL)
1854 goto abort;
1855 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
1856 conf->mddev = mddev;
1857
1858 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL)
1859 goto abort;
1860 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1861
1862 spin_lock_init(&conf->device_lock);
1863 init_waitqueue_head(&conf->wait_for_stripe);
1864 init_waitqueue_head(&conf->wait_for_overlap);
1865 INIT_LIST_HEAD(&conf->handle_list);
1866 INIT_LIST_HEAD(&conf->delayed_list);
72626685 1867 INIT_LIST_HEAD(&conf->bitmap_list);
1da177e4
LT
1868 INIT_LIST_HEAD(&conf->inactive_list);
1869 atomic_set(&conf->active_stripes, 0);
1870 atomic_set(&conf->preread_active_stripes, 0);
1871
1da177e4
LT
1872 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
1873
1874 ITERATE_RDEV(mddev,rdev,tmp) {
1875 raid_disk = rdev->raid_disk;
1876 if (raid_disk >= mddev->raid_disks
1877 || raid_disk < 0)
1878 continue;
1879 disk = conf->disks + raid_disk;
1880
1881 disk->rdev = rdev;
1882
1883 if (rdev->in_sync) {
1884 char b[BDEVNAME_SIZE];
1885 printk(KERN_INFO "raid5: device %s operational as raid"
1886 " disk %d\n", bdevname(rdev->bdev,b),
1887 raid_disk);
1888 conf->working_disks++;
1889 }
1890 }
1891
1892 conf->raid_disks = mddev->raid_disks;
1893 /*
1894 * 0 for a fully functional array, 1 for a degraded array.
1895 */
1896 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1897 conf->mddev = mddev;
1898 conf->chunk_size = mddev->chunk_size;
1899 conf->level = mddev->level;
1900 conf->algorithm = mddev->layout;
1901 conf->max_nr_stripes = NR_STRIPES;
1902
1903 /* device size must be a multiple of chunk size */
1904 mddev->size &= ~(mddev->chunk_size/1024 -1);
b1581566 1905 mddev->resync_max_sectors = mddev->size << 1;
1da177e4
LT
1906
1907 if (!conf->chunk_size || conf->chunk_size % 4) {
1908 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
1909 conf->chunk_size, mdname(mddev));
1910 goto abort;
1911 }
1912 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
1913 printk(KERN_ERR
1914 "raid5: unsupported parity algorithm %d for %s\n",
1915 conf->algorithm, mdname(mddev));
1916 goto abort;
1917 }
1918 if (mddev->degraded > 1) {
1919 printk(KERN_ERR "raid5: not enough operational devices for %s"
1920 " (%d/%d failed)\n",
1921 mdname(mddev), conf->failed_disks, conf->raid_disks);
1922 goto abort;
1923 }
1924
1925 if (mddev->degraded == 1 &&
1926 mddev->recovery_cp != MaxSector) {
1927 printk(KERN_ERR
1928 "raid5: cannot start dirty degraded array for %s\n",
1929 mdname(mddev));
1930 goto abort;
1931 }
1932
1933 {
1934 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
1935 if (!mddev->thread) {
1936 printk(KERN_ERR
1937 "raid5: couldn't allocate thread for %s\n",
1938 mdname(mddev));
1939 goto abort;
1940 }
1941 }
1942memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1943 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1944 if (grow_stripes(conf, conf->max_nr_stripes)) {
1945 printk(KERN_ERR
1946 "raid5: couldn't allocate %dkB for buffers\n", memory);
1947 shrink_stripes(conf);
1948 md_unregister_thread(mddev->thread);
1949 goto abort;
1950 } else
1951 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
1952 memory, mdname(mddev));
1953
1954 if (mddev->degraded == 0)
1955 printk("raid5: raid level %d set %s active with %d out of %d"
1956 " devices, algorithm %d\n", conf->level, mdname(mddev),
1957 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
1958 conf->algorithm);
1959 else
1960 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
1961 " out of %d devices, algorithm %d\n", conf->level,
1962 mdname(mddev), mddev->raid_disks - mddev->degraded,
1963 mddev->raid_disks, conf->algorithm);
1964
1965 print_raid5_conf(conf);
1966
1967 /* read-ahead size must cover two whole stripes, which is
1968 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
1969 */
1970 {
1971 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
1972 / PAGE_CACHE_SIZE;
1973 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1974 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1975 }
1976
1977 /* Ok, everything is just fine now */
3f294f4f
N
1978 conf->kobj.parent = kobject_get(&mddev->kobj);
1979 strcpy(conf->kobj.name, "raid5");
1980 conf->kobj.ktype = &raid5_ktype;
1981 kobject_register(&conf->kobj);
7a5febe9 1982
72626685
N
1983 if (mddev->bitmap)
1984 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1985
7a5febe9
N
1986 mddev->queue->unplug_fn = raid5_unplug_device;
1987 mddev->queue->issue_flush_fn = raid5_issue_flush;
1988
1da177e4
LT
1989 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
1990 return 0;
1991abort:
1992 if (conf) {
1993 print_raid5_conf(conf);
1994 if (conf->stripe_hashtbl)
1995 free_pages((unsigned long) conf->stripe_hashtbl,
1996 HASH_PAGES_ORDER);
1997 kfree(conf);
1998 }
1999 mddev->private = NULL;
2000 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
2001 return -EIO;
2002}
2003
2004
2005
3f294f4f 2006static int stop(mddev_t *mddev)
1da177e4
LT
2007{
2008 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2009
2010 md_unregister_thread(mddev->thread);
2011 mddev->thread = NULL;
2012 shrink_stripes(conf);
2013 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
2014 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
3f294f4f 2015 kobject_unregister(&conf->kobj);
1da177e4
LT
2016 mddev->private = NULL;
2017 return 0;
2018}
2019
2020#if RAID5_DEBUG
2021static void print_sh (struct stripe_head *sh)
2022{
2023 int i;
2024
2025 printk("sh %llu, pd_idx %d, state %ld.\n",
2026 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2027 printk("sh %llu, count %d.\n",
2028 (unsigned long long)sh->sector, atomic_read(&sh->count));
2029 printk("sh %llu, ", (unsigned long long)sh->sector);
2030 for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2031 printk("(cache%d: %p %ld) ",
2032 i, sh->dev[i].page, sh->dev[i].flags);
2033 }
2034 printk("\n");
2035}
2036
2037static void printall (raid5_conf_t *conf)
2038{
2039 struct stripe_head *sh;
2040 int i;
2041
2042 spin_lock_irq(&conf->device_lock);
2043 for (i = 0; i < NR_HASH; i++) {
2044 sh = conf->stripe_hashtbl[i];
2045 for (; sh; sh = sh->hash_next) {
2046 if (sh->raid_conf != conf)
2047 continue;
2048 print_sh(sh);
2049 }
2050 }
2051 spin_unlock_irq(&conf->device_lock);
2052}
2053#endif
2054
2055static void status (struct seq_file *seq, mddev_t *mddev)
2056{
2057 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2058 int i;
2059
2060 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2061 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2062 for (i = 0; i < conf->raid_disks; i++)
2063 seq_printf (seq, "%s",
2064 conf->disks[i].rdev &&
2065 conf->disks[i].rdev->in_sync ? "U" : "_");
2066 seq_printf (seq, "]");
2067#if RAID5_DEBUG
2068#define D(x) \
2069 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2070 printall(conf);
2071#endif
2072}
2073
2074static void print_raid5_conf (raid5_conf_t *conf)
2075{
2076 int i;
2077 struct disk_info *tmp;
2078
2079 printk("RAID5 conf printout:\n");
2080 if (!conf) {
2081 printk("(conf==NULL)\n");
2082 return;
2083 }
2084 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2085 conf->working_disks, conf->failed_disks);
2086
2087 for (i = 0; i < conf->raid_disks; i++) {
2088 char b[BDEVNAME_SIZE];
2089 tmp = conf->disks + i;
2090 if (tmp->rdev)
2091 printk(" disk %d, o:%d, dev:%s\n",
2092 i, !tmp->rdev->faulty,
2093 bdevname(tmp->rdev->bdev,b));
2094 }
2095}
2096
2097static int raid5_spare_active(mddev_t *mddev)
2098{
2099 int i;
2100 raid5_conf_t *conf = mddev->private;
2101 struct disk_info *tmp;
2102
2103 for (i = 0; i < conf->raid_disks; i++) {
2104 tmp = conf->disks + i;
2105 if (tmp->rdev
2106 && !tmp->rdev->faulty
2107 && !tmp->rdev->in_sync) {
2108 mddev->degraded--;
2109 conf->failed_disks--;
2110 conf->working_disks++;
2111 tmp->rdev->in_sync = 1;
2112 }
2113 }
2114 print_raid5_conf(conf);
2115 return 0;
2116}
2117
2118static int raid5_remove_disk(mddev_t *mddev, int number)
2119{
2120 raid5_conf_t *conf = mddev->private;
2121 int err = 0;
2122 mdk_rdev_t *rdev;
2123 struct disk_info *p = conf->disks + number;
2124
2125 print_raid5_conf(conf);
2126 rdev = p->rdev;
2127 if (rdev) {
2128 if (rdev->in_sync ||
2129 atomic_read(&rdev->nr_pending)) {
2130 err = -EBUSY;
2131 goto abort;
2132 }
2133 p->rdev = NULL;
fbd568a3 2134 synchronize_rcu();
1da177e4
LT
2135 if (atomic_read(&rdev->nr_pending)) {
2136 /* lost the race, try later */
2137 err = -EBUSY;
2138 p->rdev = rdev;
2139 }
2140 }
2141abort:
2142
2143 print_raid5_conf(conf);
2144 return err;
2145}
2146
2147static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2148{
2149 raid5_conf_t *conf = mddev->private;
2150 int found = 0;
2151 int disk;
2152 struct disk_info *p;
2153
2154 if (mddev->degraded > 1)
2155 /* no point adding a device */
2156 return 0;
2157
2158 /*
2159 * find the disk ...
2160 */
2161 for (disk=0; disk < mddev->raid_disks; disk++)
2162 if ((p=conf->disks + disk)->rdev == NULL) {
2163 rdev->in_sync = 0;
2164 rdev->raid_disk = disk;
2165 found = 1;
72626685
N
2166 if (rdev->saved_raid_disk != disk)
2167 conf->fullsync = 1;
1da177e4
LT
2168 p->rdev = rdev;
2169 break;
2170 }
2171 print_raid5_conf(conf);
2172 return found;
2173}
2174
2175static int raid5_resize(mddev_t *mddev, sector_t sectors)
2176{
2177 /* no resync is happening, and there is enough space
2178 * on all devices, so we can resize.
2179 * We need to make sure resync covers any new space.
2180 * If the array is shrinking we should possibly wait until
2181 * any io in the removed space completes, but it hardly seems
2182 * worth it.
2183 */
2184 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2185 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2186 set_capacity(mddev->gendisk, mddev->array_size << 1);
2187 mddev->changed = 1;
2188 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2189 mddev->recovery_cp = mddev->size << 1;
2190 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2191 }
2192 mddev->size = sectors /2;
4b5c7ae8 2193 mddev->resync_max_sectors = sectors;
1da177e4
LT
2194 return 0;
2195}
2196
72626685
N
2197static void raid5_quiesce(mddev_t *mddev, int state)
2198{
2199 raid5_conf_t *conf = mddev_to_conf(mddev);
2200
2201 switch(state) {
2202 case 1: /* stop all writes */
2203 spin_lock_irq(&conf->device_lock);
2204 conf->quiesce = 1;
2205 wait_event_lock_irq(conf->wait_for_stripe,
2206 atomic_read(&conf->active_stripes) == 0,
2207 conf->device_lock, /* nothing */);
2208 spin_unlock_irq(&conf->device_lock);
2209 break;
2210
2211 case 0: /* re-enable writes */
2212 spin_lock_irq(&conf->device_lock);
2213 conf->quiesce = 0;
2214 wake_up(&conf->wait_for_stripe);
2215 spin_unlock_irq(&conf->device_lock);
2216 break;
2217 }
2218 if (mddev->thread) {
2219 if (mddev->bitmap)
2220 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2221 else
2222 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2223 md_wakeup_thread(mddev->thread);
2224 }
2225}
1da177e4
LT
2226static mdk_personality_t raid5_personality=
2227{
2228 .name = "raid5",
2229 .owner = THIS_MODULE,
2230 .make_request = make_request,
2231 .run = run,
2232 .stop = stop,
2233 .status = status,
2234 .error_handler = error,
2235 .hot_add_disk = raid5_add_disk,
2236 .hot_remove_disk= raid5_remove_disk,
2237 .spare_active = raid5_spare_active,
2238 .sync_request = sync_request,
2239 .resize = raid5_resize,
72626685 2240 .quiesce = raid5_quiesce,
1da177e4
LT
2241};
2242
2243static int __init raid5_init (void)
2244{
2245 return register_md_personality (RAID5, &raid5_personality);
2246}
2247
2248static void raid5_exit (void)
2249{
2250 unregister_md_personality (RAID5);
2251}
2252
2253module_init(raid5_init);
2254module_exit(raid5_exit);
2255MODULE_LICENSE("GPL");
2256MODULE_ALIAS("md-personality-4"); /* RAID5 */