[PATCH] md: Split disks array out of raid5 conf structure so it is easier to grow
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / raid5.c
CommitLineData
1da177e4
LT
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 *
6 * RAID-5 management functions.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/raid/raid5.h>
23#include <linux/highmem.h>
24#include <linux/bitops.h>
25#include <asm/atomic.h>
26
72626685
N
27#include <linux/raid/bitmap.h>
28
1da177e4
LT
29/*
30 * Stripe cache
31 */
32
33#define NR_STRIPES 256
34#define STRIPE_SIZE PAGE_SIZE
35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1
fccddba0 38#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
1da177e4
LT
39#define HASH_MASK (NR_HASH - 1)
40
fccddba0 41#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
1da177e4
LT
42
43/* bio's attached to a stripe+device for I/O are linked together in bi_sector
44 * order without overlap. There may be several bio's per stripe+device, and
45 * a bio could span several devices.
46 * When walking this list for a particular stripe+device, we must never proceed
47 * beyond a bio that extends past this device, as the next bio might no longer
48 * be valid.
49 * This macro is used to determine the 'next' bio in the list, given the sector
50 * of the current stripe+device
51 */
52#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
53/*
54 * The following can be used to debug the driver
55 */
56#define RAID5_DEBUG 0
57#define RAID5_PARANOIA 1
58#if RAID5_PARANOIA && defined(CONFIG_SMP)
59# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
60#else
61# define CHECK_DEVLOCK()
62#endif
63
64#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
65#if RAID5_DEBUG
66#define inline
67#define __inline__
68#endif
69
70static void print_raid5_conf (raid5_conf_t *conf);
71
858119e1 72static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4
LT
73{
74 if (atomic_dec_and_test(&sh->count)) {
75 if (!list_empty(&sh->lru))
76 BUG();
77 if (atomic_read(&conf->active_stripes)==0)
78 BUG();
79 if (test_bit(STRIPE_HANDLE, &sh->state)) {
80 if (test_bit(STRIPE_DELAYED, &sh->state))
81 list_add_tail(&sh->lru, &conf->delayed_list);
72626685
N
82 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
83 conf->seq_write == sh->bm_seq)
84 list_add_tail(&sh->lru, &conf->bitmap_list);
85 else {
86 clear_bit(STRIPE_BIT_DELAY, &sh->state);
1da177e4 87 list_add_tail(&sh->lru, &conf->handle_list);
72626685 88 }
1da177e4
LT
89 md_wakeup_thread(conf->mddev->thread);
90 } else {
91 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
92 atomic_dec(&conf->preread_active_stripes);
93 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 md_wakeup_thread(conf->mddev->thread);
95 }
96 list_add_tail(&sh->lru, &conf->inactive_list);
97 atomic_dec(&conf->active_stripes);
98 if (!conf->inactive_blocked ||
5036805b 99 atomic_read(&conf->active_stripes) < (conf->max_nr_stripes*3/4))
1da177e4
LT
100 wake_up(&conf->wait_for_stripe);
101 }
102 }
103}
104static void release_stripe(struct stripe_head *sh)
105{
106 raid5_conf_t *conf = sh->raid_conf;
107 unsigned long flags;
108
109 spin_lock_irqsave(&conf->device_lock, flags);
110 __release_stripe(conf, sh);
111 spin_unlock_irqrestore(&conf->device_lock, flags);
112}
113
fccddba0 114static inline void remove_hash(struct stripe_head *sh)
1da177e4
LT
115{
116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
117
fccddba0 118 hlist_del_init(&sh->hash);
1da177e4
LT
119}
120
858119e1 121static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4 122{
fccddba0 123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
1da177e4
LT
124
125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
126
127 CHECK_DEVLOCK();
fccddba0 128 hlist_add_head(&sh->hash, hp);
1da177e4
LT
129}
130
131
132/* find an idle stripe, make sure it is unhashed, and return it. */
133static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
134{
135 struct stripe_head *sh = NULL;
136 struct list_head *first;
137
138 CHECK_DEVLOCK();
139 if (list_empty(&conf->inactive_list))
140 goto out;
141 first = conf->inactive_list.next;
142 sh = list_entry(first, struct stripe_head, lru);
143 list_del_init(first);
144 remove_hash(sh);
145 atomic_inc(&conf->active_stripes);
146out:
147 return sh;
148}
149
150static void shrink_buffers(struct stripe_head *sh, int num)
151{
152 struct page *p;
153 int i;
154
155 for (i=0; i<num ; i++) {
156 p = sh->dev[i].page;
157 if (!p)
158 continue;
159 sh->dev[i].page = NULL;
2d1f3b5d 160 put_page(p);
1da177e4
LT
161 }
162}
163
164static int grow_buffers(struct stripe_head *sh, int num)
165{
166 int i;
167
168 for (i=0; i<num; i++) {
169 struct page *page;
170
171 if (!(page = alloc_page(GFP_KERNEL))) {
172 return 1;
173 }
174 sh->dev[i].page = page;
175 }
176 return 0;
177}
178
179static void raid5_build_block (struct stripe_head *sh, int i);
180
858119e1 181static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
1da177e4
LT
182{
183 raid5_conf_t *conf = sh->raid_conf;
184 int disks = conf->raid_disks, i;
185
186 if (atomic_read(&sh->count) != 0)
187 BUG();
188 if (test_bit(STRIPE_HANDLE, &sh->state))
189 BUG();
190
191 CHECK_DEVLOCK();
192 PRINTK("init_stripe called, stripe %llu\n",
193 (unsigned long long)sh->sector);
194
195 remove_hash(sh);
196
197 sh->sector = sector;
198 sh->pd_idx = pd_idx;
199 sh->state = 0;
200
201 for (i=disks; i--; ) {
202 struct r5dev *dev = &sh->dev[i];
203
204 if (dev->toread || dev->towrite || dev->written ||
205 test_bit(R5_LOCKED, &dev->flags)) {
206 printk("sector=%llx i=%d %p %p %p %d\n",
207 (unsigned long long)sh->sector, i, dev->toread,
208 dev->towrite, dev->written,
209 test_bit(R5_LOCKED, &dev->flags));
210 BUG();
211 }
212 dev->flags = 0;
213 raid5_build_block(sh, i);
214 }
215 insert_hash(conf, sh);
216}
217
218static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
219{
220 struct stripe_head *sh;
fccddba0 221 struct hlist_node *hn;
1da177e4
LT
222
223 CHECK_DEVLOCK();
224 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
fccddba0 225 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
1da177e4
LT
226 if (sh->sector == sector)
227 return sh;
228 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
229 return NULL;
230}
231
232static void unplug_slaves(mddev_t *mddev);
233static void raid5_unplug_device(request_queue_t *q);
234
235static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
236 int pd_idx, int noblock)
237{
238 struct stripe_head *sh;
239
240 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
241
242 spin_lock_irq(&conf->device_lock);
243
244 do {
72626685
N
245 wait_event_lock_irq(conf->wait_for_stripe,
246 conf->quiesce == 0,
247 conf->device_lock, /* nothing */);
1da177e4
LT
248 sh = __find_stripe(conf, sector);
249 if (!sh) {
250 if (!conf->inactive_blocked)
251 sh = get_free_stripe(conf);
252 if (noblock && sh == NULL)
253 break;
254 if (!sh) {
255 conf->inactive_blocked = 1;
256 wait_event_lock_irq(conf->wait_for_stripe,
257 !list_empty(&conf->inactive_list) &&
5036805b
N
258 (atomic_read(&conf->active_stripes)
259 < (conf->max_nr_stripes *3/4)
1da177e4
LT
260 || !conf->inactive_blocked),
261 conf->device_lock,
262 unplug_slaves(conf->mddev);
263 );
264 conf->inactive_blocked = 0;
265 } else
266 init_stripe(sh, sector, pd_idx);
267 } else {
268 if (atomic_read(&sh->count)) {
269 if (!list_empty(&sh->lru))
270 BUG();
271 } else {
272 if (!test_bit(STRIPE_HANDLE, &sh->state))
273 atomic_inc(&conf->active_stripes);
274 if (list_empty(&sh->lru))
275 BUG();
276 list_del_init(&sh->lru);
277 }
278 }
279 } while (sh == NULL);
280
281 if (sh)
282 atomic_inc(&sh->count);
283
284 spin_unlock_irq(&conf->device_lock);
285 return sh;
286}
287
3f294f4f 288static int grow_one_stripe(raid5_conf_t *conf)
1da177e4
LT
289{
290 struct stripe_head *sh;
3f294f4f
N
291 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
292 if (!sh)
293 return 0;
294 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
295 sh->raid_conf = conf;
296 spin_lock_init(&sh->lock);
297
298 if (grow_buffers(sh, conf->raid_disks)) {
299 shrink_buffers(sh, conf->raid_disks);
300 kmem_cache_free(conf->slab_cache, sh);
301 return 0;
302 }
303 /* we just created an active stripe so... */
304 atomic_set(&sh->count, 1);
305 atomic_inc(&conf->active_stripes);
306 INIT_LIST_HEAD(&sh->lru);
307 release_stripe(sh);
308 return 1;
309}
310
311static int grow_stripes(raid5_conf_t *conf, int num)
312{
1da177e4
LT
313 kmem_cache_t *sc;
314 int devs = conf->raid_disks;
315
316 sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev));
317
318 sc = kmem_cache_create(conf->cache_name,
319 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
320 0, 0, NULL, NULL);
321 if (!sc)
322 return 1;
323 conf->slab_cache = sc;
324 while (num--) {
3f294f4f 325 if (!grow_one_stripe(conf))
1da177e4 326 return 1;
1da177e4
LT
327 }
328 return 0;
329}
330
3f294f4f 331static int drop_one_stripe(raid5_conf_t *conf)
1da177e4
LT
332{
333 struct stripe_head *sh;
334
3f294f4f
N
335 spin_lock_irq(&conf->device_lock);
336 sh = get_free_stripe(conf);
337 spin_unlock_irq(&conf->device_lock);
338 if (!sh)
339 return 0;
340 if (atomic_read(&sh->count))
341 BUG();
342 shrink_buffers(sh, conf->raid_disks);
343 kmem_cache_free(conf->slab_cache, sh);
344 atomic_dec(&conf->active_stripes);
345 return 1;
346}
347
348static void shrink_stripes(raid5_conf_t *conf)
349{
350 while (drop_one_stripe(conf))
351 ;
352
29fc7e3e
N
353 if (conf->slab_cache)
354 kmem_cache_destroy(conf->slab_cache);
1da177e4
LT
355 conf->slab_cache = NULL;
356}
357
4e5314b5 358static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1da177e4
LT
359 int error)
360{
361 struct stripe_head *sh = bi->bi_private;
362 raid5_conf_t *conf = sh->raid_conf;
363 int disks = conf->raid_disks, i;
364 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
365
366 if (bi->bi_size)
367 return 1;
368
369 for (i=0 ; i<disks; i++)
370 if (bi == &sh->dev[i].req)
371 break;
372
373 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
374 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
375 uptodate);
376 if (i == disks) {
377 BUG();
378 return 0;
379 }
380
381 if (uptodate) {
382#if 0
383 struct bio *bio;
384 unsigned long flags;
385 spin_lock_irqsave(&conf->device_lock, flags);
386 /* we can return a buffer if we bypassed the cache or
387 * if the top buffer is not in highmem. If there are
388 * multiple buffers, leave the extra work to
389 * handle_stripe
390 */
391 buffer = sh->bh_read[i];
392 if (buffer &&
393 (!PageHighMem(buffer->b_page)
394 || buffer->b_page == bh->b_page )
395 ) {
396 sh->bh_read[i] = buffer->b_reqnext;
397 buffer->b_reqnext = NULL;
398 } else
399 buffer = NULL;
400 spin_unlock_irqrestore(&conf->device_lock, flags);
401 if (sh->bh_page[i]==bh->b_page)
402 set_buffer_uptodate(bh);
403 if (buffer) {
404 if (buffer->b_page != bh->b_page)
405 memcpy(buffer->b_data, bh->b_data, bh->b_size);
406 buffer->b_end_io(buffer, 1);
407 }
408#else
409 set_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5
N
410#endif
411 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
14f8d26b 412 printk(KERN_INFO "raid5: read error corrected!!\n");
4e5314b5
N
413 clear_bit(R5_ReadError, &sh->dev[i].flags);
414 clear_bit(R5_ReWrite, &sh->dev[i].flags);
415 }
ba22dcbf
N
416 if (atomic_read(&conf->disks[i].rdev->read_errors))
417 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1da177e4 418 } else {
ba22dcbf 419 int retry = 0;
1da177e4 420 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
ba22dcbf
N
421 atomic_inc(&conf->disks[i].rdev->read_errors);
422 if (conf->mddev->degraded)
14f8d26b 423 printk(KERN_WARNING "raid5: read error not correctable.\n");
ba22dcbf 424 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
4e5314b5 425 /* Oh, no!!! */
14f8d26b 426 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
ba22dcbf
N
427 else if (atomic_read(&conf->disks[i].rdev->read_errors)
428 > conf->max_nr_stripes)
14f8d26b
N
429 printk(KERN_WARNING
430 "raid5: Too many read errors, failing device.\n");
ba22dcbf
N
431 else
432 retry = 1;
433 if (retry)
434 set_bit(R5_ReadError, &sh->dev[i].flags);
435 else {
4e5314b5
N
436 clear_bit(R5_ReadError, &sh->dev[i].flags);
437 clear_bit(R5_ReWrite, &sh->dev[i].flags);
438 md_error(conf->mddev, conf->disks[i].rdev);
ba22dcbf 439 }
1da177e4
LT
440 }
441 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
442#if 0
443 /* must restore b_page before unlocking buffer... */
444 if (sh->bh_page[i] != bh->b_page) {
445 bh->b_page = sh->bh_page[i];
446 bh->b_data = page_address(bh->b_page);
447 clear_buffer_uptodate(bh);
448 }
449#endif
450 clear_bit(R5_LOCKED, &sh->dev[i].flags);
451 set_bit(STRIPE_HANDLE, &sh->state);
452 release_stripe(sh);
453 return 0;
454}
455
456static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
457 int error)
458{
459 struct stripe_head *sh = bi->bi_private;
460 raid5_conf_t *conf = sh->raid_conf;
461 int disks = conf->raid_disks, i;
462 unsigned long flags;
463 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
464
465 if (bi->bi_size)
466 return 1;
467
468 for (i=0 ; i<disks; i++)
469 if (bi == &sh->dev[i].req)
470 break;
471
472 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
473 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
474 uptodate);
475 if (i == disks) {
476 BUG();
477 return 0;
478 }
479
480 spin_lock_irqsave(&conf->device_lock, flags);
481 if (!uptodate)
482 md_error(conf->mddev, conf->disks[i].rdev);
483
484 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
485
486 clear_bit(R5_LOCKED, &sh->dev[i].flags);
487 set_bit(STRIPE_HANDLE, &sh->state);
488 __release_stripe(conf, sh);
489 spin_unlock_irqrestore(&conf->device_lock, flags);
490 return 0;
491}
492
493
494static sector_t compute_blocknr(struct stripe_head *sh, int i);
495
496static void raid5_build_block (struct stripe_head *sh, int i)
497{
498 struct r5dev *dev = &sh->dev[i];
499
500 bio_init(&dev->req);
501 dev->req.bi_io_vec = &dev->vec;
502 dev->req.bi_vcnt++;
503 dev->req.bi_max_vecs++;
504 dev->vec.bv_page = dev->page;
505 dev->vec.bv_len = STRIPE_SIZE;
506 dev->vec.bv_offset = 0;
507
508 dev->req.bi_sector = sh->sector;
509 dev->req.bi_private = sh;
510
511 dev->flags = 0;
512 if (i != sh->pd_idx)
513 dev->sector = compute_blocknr(sh, i);
514}
515
516static void error(mddev_t *mddev, mdk_rdev_t *rdev)
517{
518 char b[BDEVNAME_SIZE];
519 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
520 PRINTK("raid5: error called\n");
521
b2d444d7 522 if (!test_bit(Faulty, &rdev->flags)) {
1da177e4 523 mddev->sb_dirty = 1;
b2d444d7 524 if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
525 conf->working_disks--;
526 mddev->degraded++;
527 conf->failed_disks++;
b2d444d7 528 clear_bit(In_sync, &rdev->flags);
1da177e4
LT
529 /*
530 * if recovery was running, make sure it aborts.
531 */
532 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
533 }
b2d444d7 534 set_bit(Faulty, &rdev->flags);
1da177e4
LT
535 printk (KERN_ALERT
536 "raid5: Disk failure on %s, disabling device."
537 " Operation continuing on %d devices\n",
538 bdevname(rdev->bdev,b), conf->working_disks);
539 }
540}
541
542/*
543 * Input: a 'big' sector number,
544 * Output: index of the data and parity disk, and the sector # in them.
545 */
546static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
547 unsigned int data_disks, unsigned int * dd_idx,
548 unsigned int * pd_idx, raid5_conf_t *conf)
549{
550 long stripe;
551 unsigned long chunk_number;
552 unsigned int chunk_offset;
553 sector_t new_sector;
554 int sectors_per_chunk = conf->chunk_size >> 9;
555
556 /* First compute the information on this sector */
557
558 /*
559 * Compute the chunk number and the sector offset inside the chunk
560 */
561 chunk_offset = sector_div(r_sector, sectors_per_chunk);
562 chunk_number = r_sector;
563 BUG_ON(r_sector != chunk_number);
564
565 /*
566 * Compute the stripe number
567 */
568 stripe = chunk_number / data_disks;
569
570 /*
571 * Compute the data disk and parity disk indexes inside the stripe
572 */
573 *dd_idx = chunk_number % data_disks;
574
575 /*
576 * Select the parity disk based on the user selected algorithm.
577 */
578 if (conf->level == 4)
579 *pd_idx = data_disks;
580 else switch (conf->algorithm) {
581 case ALGORITHM_LEFT_ASYMMETRIC:
582 *pd_idx = data_disks - stripe % raid_disks;
583 if (*dd_idx >= *pd_idx)
584 (*dd_idx)++;
585 break;
586 case ALGORITHM_RIGHT_ASYMMETRIC:
587 *pd_idx = stripe % raid_disks;
588 if (*dd_idx >= *pd_idx)
589 (*dd_idx)++;
590 break;
591 case ALGORITHM_LEFT_SYMMETRIC:
592 *pd_idx = data_disks - stripe % raid_disks;
593 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
594 break;
595 case ALGORITHM_RIGHT_SYMMETRIC:
596 *pd_idx = stripe % raid_disks;
597 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
598 break;
599 default:
14f8d26b 600 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1da177e4
LT
601 conf->algorithm);
602 }
603
604 /*
605 * Finally, compute the new sector number
606 */
607 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
608 return new_sector;
609}
610
611
612static sector_t compute_blocknr(struct stripe_head *sh, int i)
613{
614 raid5_conf_t *conf = sh->raid_conf;
615 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
616 sector_t new_sector = sh->sector, check;
617 int sectors_per_chunk = conf->chunk_size >> 9;
618 sector_t stripe;
619 int chunk_offset;
620 int chunk_number, dummy1, dummy2, dd_idx = i;
621 sector_t r_sector;
622
623 chunk_offset = sector_div(new_sector, sectors_per_chunk);
624 stripe = new_sector;
625 BUG_ON(new_sector != stripe);
626
627
628 switch (conf->algorithm) {
629 case ALGORITHM_LEFT_ASYMMETRIC:
630 case ALGORITHM_RIGHT_ASYMMETRIC:
631 if (i > sh->pd_idx)
632 i--;
633 break;
634 case ALGORITHM_LEFT_SYMMETRIC:
635 case ALGORITHM_RIGHT_SYMMETRIC:
636 if (i < sh->pd_idx)
637 i += raid_disks;
638 i -= (sh->pd_idx + 1);
639 break;
640 default:
14f8d26b 641 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1da177e4
LT
642 conf->algorithm);
643 }
644
645 chunk_number = stripe * data_disks + i;
646 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
647
648 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
649 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
14f8d26b 650 printk(KERN_ERR "compute_blocknr: map not correct\n");
1da177e4
LT
651 return 0;
652 }
653 return r_sector;
654}
655
656
657
658/*
659 * Copy data between a page in the stripe cache, and a bio.
660 * There are no alignment or size guarantees between the page or the
661 * bio except that there is some overlap.
662 * All iovecs in the bio must be considered.
663 */
664static void copy_data(int frombio, struct bio *bio,
665 struct page *page,
666 sector_t sector)
667{
668 char *pa = page_address(page);
669 struct bio_vec *bvl;
670 int i;
671 int page_offset;
672
673 if (bio->bi_sector >= sector)
674 page_offset = (signed)(bio->bi_sector - sector) * 512;
675 else
676 page_offset = (signed)(sector - bio->bi_sector) * -512;
677 bio_for_each_segment(bvl, bio, i) {
678 int len = bio_iovec_idx(bio,i)->bv_len;
679 int clen;
680 int b_offset = 0;
681
682 if (page_offset < 0) {
683 b_offset = -page_offset;
684 page_offset += b_offset;
685 len -= b_offset;
686 }
687
688 if (len > 0 && page_offset + len > STRIPE_SIZE)
689 clen = STRIPE_SIZE - page_offset;
690 else clen = len;
691
692 if (clen > 0) {
693 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
694 if (frombio)
695 memcpy(pa+page_offset, ba+b_offset, clen);
696 else
697 memcpy(ba+b_offset, pa+page_offset, clen);
698 __bio_kunmap_atomic(ba, KM_USER0);
699 }
700 if (clen < len) /* hit end of page */
701 break;
702 page_offset += len;
703 }
704}
705
706#define check_xor() do { \
707 if (count == MAX_XOR_BLOCKS) { \
708 xor_block(count, STRIPE_SIZE, ptr); \
709 count = 1; \
710 } \
711 } while(0)
712
713
714static void compute_block(struct stripe_head *sh, int dd_idx)
715{
716 raid5_conf_t *conf = sh->raid_conf;
717 int i, count, disks = conf->raid_disks;
718 void *ptr[MAX_XOR_BLOCKS], *p;
719
720 PRINTK("compute_block, stripe %llu, idx %d\n",
721 (unsigned long long)sh->sector, dd_idx);
722
723 ptr[0] = page_address(sh->dev[dd_idx].page);
724 memset(ptr[0], 0, STRIPE_SIZE);
725 count = 1;
726 for (i = disks ; i--; ) {
727 if (i == dd_idx)
728 continue;
729 p = page_address(sh->dev[i].page);
730 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
731 ptr[count++] = p;
732 else
14f8d26b 733 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
1da177e4
LT
734 " not present\n", dd_idx,
735 (unsigned long long)sh->sector, i);
736
737 check_xor();
738 }
739 if (count != 1)
740 xor_block(count, STRIPE_SIZE, ptr);
741 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
742}
743
744static void compute_parity(struct stripe_head *sh, int method)
745{
746 raid5_conf_t *conf = sh->raid_conf;
747 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
748 void *ptr[MAX_XOR_BLOCKS];
749 struct bio *chosen;
750
751 PRINTK("compute_parity, stripe %llu, method %d\n",
752 (unsigned long long)sh->sector, method);
753
754 count = 1;
755 ptr[0] = page_address(sh->dev[pd_idx].page);
756 switch(method) {
757 case READ_MODIFY_WRITE:
758 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
759 BUG();
760 for (i=disks ; i-- ;) {
761 if (i==pd_idx)
762 continue;
763 if (sh->dev[i].towrite &&
764 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
765 ptr[count++] = page_address(sh->dev[i].page);
766 chosen = sh->dev[i].towrite;
767 sh->dev[i].towrite = NULL;
768
769 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
770 wake_up(&conf->wait_for_overlap);
771
772 if (sh->dev[i].written) BUG();
773 sh->dev[i].written = chosen;
774 check_xor();
775 }
776 }
777 break;
778 case RECONSTRUCT_WRITE:
779 memset(ptr[0], 0, STRIPE_SIZE);
780 for (i= disks; i-- ;)
781 if (i!=pd_idx && sh->dev[i].towrite) {
782 chosen = sh->dev[i].towrite;
783 sh->dev[i].towrite = NULL;
784
785 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
786 wake_up(&conf->wait_for_overlap);
787
788 if (sh->dev[i].written) BUG();
789 sh->dev[i].written = chosen;
790 }
791 break;
792 case CHECK_PARITY:
793 break;
794 }
795 if (count>1) {
796 xor_block(count, STRIPE_SIZE, ptr);
797 count = 1;
798 }
799
800 for (i = disks; i--;)
801 if (sh->dev[i].written) {
802 sector_t sector = sh->dev[i].sector;
803 struct bio *wbi = sh->dev[i].written;
804 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
805 copy_data(1, wbi, sh->dev[i].page, sector);
806 wbi = r5_next_bio(wbi, sector);
807 }
808
809 set_bit(R5_LOCKED, &sh->dev[i].flags);
810 set_bit(R5_UPTODATE, &sh->dev[i].flags);
811 }
812
813 switch(method) {
814 case RECONSTRUCT_WRITE:
815 case CHECK_PARITY:
816 for (i=disks; i--;)
817 if (i != pd_idx) {
818 ptr[count++] = page_address(sh->dev[i].page);
819 check_xor();
820 }
821 break;
822 case READ_MODIFY_WRITE:
823 for (i = disks; i--;)
824 if (sh->dev[i].written) {
825 ptr[count++] = page_address(sh->dev[i].page);
826 check_xor();
827 }
828 }
829 if (count != 1)
830 xor_block(count, STRIPE_SIZE, ptr);
831
832 if (method != CHECK_PARITY) {
833 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
834 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
835 } else
836 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
837}
838
839/*
840 * Each stripe/dev can have one or more bion attached.
841 * toread/towrite point to the first in a chain.
842 * The bi_next chain must be in order.
843 */
844static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
845{
846 struct bio **bip;
847 raid5_conf_t *conf = sh->raid_conf;
72626685 848 int firstwrite=0;
1da177e4
LT
849
850 PRINTK("adding bh b#%llu to stripe s#%llu\n",
851 (unsigned long long)bi->bi_sector,
852 (unsigned long long)sh->sector);
853
854
855 spin_lock(&sh->lock);
856 spin_lock_irq(&conf->device_lock);
72626685 857 if (forwrite) {
1da177e4 858 bip = &sh->dev[dd_idx].towrite;
72626685
N
859 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
860 firstwrite = 1;
861 } else
1da177e4
LT
862 bip = &sh->dev[dd_idx].toread;
863 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
864 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
865 goto overlap;
866 bip = & (*bip)->bi_next;
867 }
868 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
869 goto overlap;
870
871 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
872 BUG();
873 if (*bip)
874 bi->bi_next = *bip;
875 *bip = bi;
876 bi->bi_phys_segments ++;
877 spin_unlock_irq(&conf->device_lock);
878 spin_unlock(&sh->lock);
879
880 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
881 (unsigned long long)bi->bi_sector,
882 (unsigned long long)sh->sector, dd_idx);
883
72626685
N
884 if (conf->mddev->bitmap && firstwrite) {
885 sh->bm_seq = conf->seq_write;
886 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
887 STRIPE_SECTORS, 0);
888 set_bit(STRIPE_BIT_DELAY, &sh->state);
889 }
890
1da177e4
LT
891 if (forwrite) {
892 /* check if page is covered */
893 sector_t sector = sh->dev[dd_idx].sector;
894 for (bi=sh->dev[dd_idx].towrite;
895 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
896 bi && bi->bi_sector <= sector;
897 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
898 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
899 sector = bi->bi_sector + (bi->bi_size>>9);
900 }
901 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
902 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
903 }
904 return 1;
905
906 overlap:
907 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
908 spin_unlock_irq(&conf->device_lock);
909 spin_unlock(&sh->lock);
910 return 0;
911}
912
913
914/*
915 * handle_stripe - do things to a stripe.
916 *
917 * We lock the stripe and then examine the state of various bits
918 * to see what needs to be done.
919 * Possible results:
920 * return some read request which now have data
921 * return some write requests which are safely on disc
922 * schedule a read on some buffers
923 * schedule a write of some buffers
924 * return confirmation of parity correctness
925 *
926 * Parity calculations are done inside the stripe lock
927 * buffers are taken off read_list or write_list, and bh_cache buffers
928 * get BH_Lock set before the stripe lock is released.
929 *
930 */
931
932static void handle_stripe(struct stripe_head *sh)
933{
934 raid5_conf_t *conf = sh->raid_conf;
935 int disks = conf->raid_disks;
936 struct bio *return_bi= NULL;
937 struct bio *bi;
938 int i;
939 int syncing;
940 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
941 int non_overwrite = 0;
942 int failed_num=0;
943 struct r5dev *dev;
944
945 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
946 (unsigned long long)sh->sector, atomic_read(&sh->count),
947 sh->pd_idx);
948
949 spin_lock(&sh->lock);
950 clear_bit(STRIPE_HANDLE, &sh->state);
951 clear_bit(STRIPE_DELAYED, &sh->state);
952
953 syncing = test_bit(STRIPE_SYNCING, &sh->state);
954 /* Now to look around and see what can be done */
955
9910f16a 956 rcu_read_lock();
1da177e4
LT
957 for (i=disks; i--; ) {
958 mdk_rdev_t *rdev;
959 dev = &sh->dev[i];
960 clear_bit(R5_Insync, &dev->flags);
1da177e4
LT
961
962 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
963 i, dev->flags, dev->toread, dev->towrite, dev->written);
964 /* maybe we can reply to a read */
965 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
966 struct bio *rbi, *rbi2;
967 PRINTK("Return read for disc %d\n", i);
968 spin_lock_irq(&conf->device_lock);
969 rbi = dev->toread;
970 dev->toread = NULL;
971 if (test_and_clear_bit(R5_Overlap, &dev->flags))
972 wake_up(&conf->wait_for_overlap);
973 spin_unlock_irq(&conf->device_lock);
974 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
975 copy_data(0, rbi, dev->page, dev->sector);
976 rbi2 = r5_next_bio(rbi, dev->sector);
977 spin_lock_irq(&conf->device_lock);
978 if (--rbi->bi_phys_segments == 0) {
979 rbi->bi_next = return_bi;
980 return_bi = rbi;
981 }
982 spin_unlock_irq(&conf->device_lock);
983 rbi = rbi2;
984 }
985 }
986
987 /* now count some things */
988 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
989 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
990
991
992 if (dev->toread) to_read++;
993 if (dev->towrite) {
994 to_write++;
995 if (!test_bit(R5_OVERWRITE, &dev->flags))
996 non_overwrite++;
997 }
998 if (dev->written) written++;
9910f16a 999 rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1000 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
14f8d26b 1001 /* The ReadError flag will just be confusing now */
4e5314b5
N
1002 clear_bit(R5_ReadError, &dev->flags);
1003 clear_bit(R5_ReWrite, &dev->flags);
1004 }
b2d444d7 1005 if (!rdev || !test_bit(In_sync, &rdev->flags)
4e5314b5 1006 || test_bit(R5_ReadError, &dev->flags)) {
1da177e4
LT
1007 failed++;
1008 failed_num = i;
1009 } else
1010 set_bit(R5_Insync, &dev->flags);
1011 }
9910f16a 1012 rcu_read_unlock();
1da177e4
LT
1013 PRINTK("locked=%d uptodate=%d to_read=%d"
1014 " to_write=%d failed=%d failed_num=%d\n",
1015 locked, uptodate, to_read, to_write, failed, failed_num);
1016 /* check if the array has lost two devices and, if so, some requests might
1017 * need to be failed
1018 */
1019 if (failed > 1 && to_read+to_write+written) {
1da177e4 1020 for (i=disks; i--; ) {
72626685 1021 int bitmap_end = 0;
4e5314b5
N
1022
1023 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
9910f16a
N
1024 mdk_rdev_t *rdev;
1025 rcu_read_lock();
1026 rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1027 if (rdev && test_bit(In_sync, &rdev->flags))
4e5314b5
N
1028 /* multiple read failures in one stripe */
1029 md_error(conf->mddev, rdev);
9910f16a 1030 rcu_read_unlock();
4e5314b5
N
1031 }
1032
72626685 1033 spin_lock_irq(&conf->device_lock);
1da177e4
LT
1034 /* fail all writes first */
1035 bi = sh->dev[i].towrite;
1036 sh->dev[i].towrite = NULL;
72626685 1037 if (bi) { to_write--; bitmap_end = 1; }
1da177e4
LT
1038
1039 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1040 wake_up(&conf->wait_for_overlap);
1041
1042 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1043 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1044 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1045 if (--bi->bi_phys_segments == 0) {
1046 md_write_end(conf->mddev);
1047 bi->bi_next = return_bi;
1048 return_bi = bi;
1049 }
1050 bi = nextbi;
1051 }
1052 /* and fail all 'written' */
1053 bi = sh->dev[i].written;
1054 sh->dev[i].written = NULL;
72626685 1055 if (bi) bitmap_end = 1;
1da177e4
LT
1056 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1057 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1058 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1059 if (--bi->bi_phys_segments == 0) {
1060 md_write_end(conf->mddev);
1061 bi->bi_next = return_bi;
1062 return_bi = bi;
1063 }
1064 bi = bi2;
1065 }
1066
1067 /* fail any reads if this device is non-operational */
4e5314b5
N
1068 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1069 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1da177e4
LT
1070 bi = sh->dev[i].toread;
1071 sh->dev[i].toread = NULL;
1072 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1073 wake_up(&conf->wait_for_overlap);
1074 if (bi) to_read--;
1075 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1076 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1077 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1078 if (--bi->bi_phys_segments == 0) {
1079 bi->bi_next = return_bi;
1080 return_bi = bi;
1081 }
1082 bi = nextbi;
1083 }
1084 }
72626685
N
1085 spin_unlock_irq(&conf->device_lock);
1086 if (bitmap_end)
1087 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1088 STRIPE_SECTORS, 0, 0);
1da177e4 1089 }
1da177e4
LT
1090 }
1091 if (failed > 1 && syncing) {
1092 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1093 clear_bit(STRIPE_SYNCING, &sh->state);
1094 syncing = 0;
1095 }
1096
1097 /* might be able to return some write requests if the parity block
1098 * is safe, or on a failed drive
1099 */
1100 dev = &sh->dev[sh->pd_idx];
1101 if ( written &&
1102 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1103 test_bit(R5_UPTODATE, &dev->flags))
1104 || (failed == 1 && failed_num == sh->pd_idx))
1105 ) {
1106 /* any written block on an uptodate or failed drive can be returned.
1107 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1108 * never LOCKED, so we don't need to test 'failed' directly.
1109 */
1110 for (i=disks; i--; )
1111 if (sh->dev[i].written) {
1112 dev = &sh->dev[i];
1113 if (!test_bit(R5_LOCKED, &dev->flags) &&
1114 test_bit(R5_UPTODATE, &dev->flags) ) {
1115 /* We can return any write requests */
1116 struct bio *wbi, *wbi2;
72626685 1117 int bitmap_end = 0;
1da177e4
LT
1118 PRINTK("Return write for disc %d\n", i);
1119 spin_lock_irq(&conf->device_lock);
1120 wbi = dev->written;
1121 dev->written = NULL;
1122 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1123 wbi2 = r5_next_bio(wbi, dev->sector);
1124 if (--wbi->bi_phys_segments == 0) {
1125 md_write_end(conf->mddev);
1126 wbi->bi_next = return_bi;
1127 return_bi = wbi;
1128 }
1129 wbi = wbi2;
1130 }
72626685
N
1131 if (dev->towrite == NULL)
1132 bitmap_end = 1;
1da177e4 1133 spin_unlock_irq(&conf->device_lock);
72626685
N
1134 if (bitmap_end)
1135 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1136 STRIPE_SECTORS,
1137 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1da177e4
LT
1138 }
1139 }
1140 }
1141
1142 /* Now we might consider reading some blocks, either to check/generate
1143 * parity, or to satisfy requests
1144 * or to load a block that is being partially written.
1145 */
1146 if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1147 for (i=disks; i--;) {
1148 dev = &sh->dev[i];
1149 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1150 (dev->toread ||
1151 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1152 syncing ||
1153 (failed && (sh->dev[failed_num].toread ||
1154 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1155 )
1156 ) {
1157 /* we would like to get this block, possibly
1158 * by computing it, but we might not be able to
1159 */
1160 if (uptodate == disks-1) {
1161 PRINTK("Computing block %d\n", i);
1162 compute_block(sh, i);
1163 uptodate++;
1164 } else if (test_bit(R5_Insync, &dev->flags)) {
1165 set_bit(R5_LOCKED, &dev->flags);
1166 set_bit(R5_Wantread, &dev->flags);
1167#if 0
1168 /* if I am just reading this block and we don't have
1169 a failed drive, or any pending writes then sidestep the cache */
1170 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1171 ! syncing && !failed && !to_write) {
1172 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1173 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1174 }
1175#endif
1176 locked++;
1177 PRINTK("Reading block %d (sync=%d)\n",
1178 i, syncing);
1da177e4
LT
1179 }
1180 }
1181 }
1182 set_bit(STRIPE_HANDLE, &sh->state);
1183 }
1184
1185 /* now to consider writing and what else, if anything should be read */
1186 if (to_write) {
1187 int rmw=0, rcw=0;
1188 for (i=disks ; i--;) {
1189 /* would I have to read this buffer for read_modify_write */
1190 dev = &sh->dev[i];
1191 if ((dev->towrite || i == sh->pd_idx) &&
1192 (!test_bit(R5_LOCKED, &dev->flags)
1193#if 0
1194|| sh->bh_page[i]!=bh->b_page
1195#endif
1196 ) &&
1197 !test_bit(R5_UPTODATE, &dev->flags)) {
1198 if (test_bit(R5_Insync, &dev->flags)
1199/* && !(!mddev->insync && i == sh->pd_idx) */
1200 )
1201 rmw++;
1202 else rmw += 2*disks; /* cannot read it */
1203 }
1204 /* Would I have to read this buffer for reconstruct_write */
1205 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1206 (!test_bit(R5_LOCKED, &dev->flags)
1207#if 0
1208|| sh->bh_page[i] != bh->b_page
1209#endif
1210 ) &&
1211 !test_bit(R5_UPTODATE, &dev->flags)) {
1212 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1213 else rcw += 2*disks;
1214 }
1215 }
1216 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1217 (unsigned long long)sh->sector, rmw, rcw);
1218 set_bit(STRIPE_HANDLE, &sh->state);
1219 if (rmw < rcw && rmw > 0)
1220 /* prefer read-modify-write, but need to get some data */
1221 for (i=disks; i--;) {
1222 dev = &sh->dev[i];
1223 if ((dev->towrite || i == sh->pd_idx) &&
1224 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1225 test_bit(R5_Insync, &dev->flags)) {
1226 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1227 {
1228 PRINTK("Read_old block %d for r-m-w\n", i);
1229 set_bit(R5_LOCKED, &dev->flags);
1230 set_bit(R5_Wantread, &dev->flags);
1231 locked++;
1232 } else {
1233 set_bit(STRIPE_DELAYED, &sh->state);
1234 set_bit(STRIPE_HANDLE, &sh->state);
1235 }
1236 }
1237 }
1238 if (rcw <= rmw && rcw > 0)
1239 /* want reconstruct write, but need to get some data */
1240 for (i=disks; i--;) {
1241 dev = &sh->dev[i];
1242 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1243 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1244 test_bit(R5_Insync, &dev->flags)) {
1245 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1246 {
1247 PRINTK("Read_old block %d for Reconstruct\n", i);
1248 set_bit(R5_LOCKED, &dev->flags);
1249 set_bit(R5_Wantread, &dev->flags);
1250 locked++;
1251 } else {
1252 set_bit(STRIPE_DELAYED, &sh->state);
1253 set_bit(STRIPE_HANDLE, &sh->state);
1254 }
1255 }
1256 }
1257 /* now if nothing is locked, and if we have enough data, we can start a write request */
72626685
N
1258 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1259 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1da177e4
LT
1260 PRINTK("Computing parity...\n");
1261 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1262 /* now every locked buffer is ready to be written */
1263 for (i=disks; i--;)
1264 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1265 PRINTK("Writing block %d\n", i);
1266 locked++;
1267 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1268 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1269 || (i==sh->pd_idx && failed == 0))
1270 set_bit(STRIPE_INSYNC, &sh->state);
1271 }
1272 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1273 atomic_dec(&conf->preread_active_stripes);
1274 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1275 md_wakeup_thread(conf->mddev->thread);
1276 }
1277 }
1278 }
1279
1280 /* maybe we need to check and possibly fix the parity for this stripe
1281 * Any reads will already have been scheduled, so we just see if enough data
1282 * is available
1283 */
1284 if (syncing && locked == 0 &&
14f8d26b 1285 !test_bit(STRIPE_INSYNC, &sh->state)) {
1da177e4
LT
1286 set_bit(STRIPE_HANDLE, &sh->state);
1287 if (failed == 0) {
1288 char *pagea;
1289 if (uptodate != disks)
1290 BUG();
1291 compute_parity(sh, CHECK_PARITY);
1292 uptodate--;
1293 pagea = page_address(sh->dev[sh->pd_idx].page);
1294 if ((*(u32*)pagea) == 0 &&
1295 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1296 /* parity is correct (on disc, not in buffer any more) */
1297 set_bit(STRIPE_INSYNC, &sh->state);
9d88883e
N
1298 } else {
1299 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1300 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1301 /* don't try to repair!! */
1302 set_bit(STRIPE_INSYNC, &sh->state);
14f8d26b
N
1303 else {
1304 compute_block(sh, sh->pd_idx);
1305 uptodate++;
1306 }
1da177e4
LT
1307 }
1308 }
1309 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
14f8d26b 1310 /* either failed parity check, or recovery is happening */
1da177e4
LT
1311 if (failed==0)
1312 failed_num = sh->pd_idx;
1da177e4 1313 dev = &sh->dev[failed_num];
14f8d26b
N
1314 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1315 BUG_ON(uptodate != disks);
1316
1da177e4
LT
1317 set_bit(R5_LOCKED, &dev->flags);
1318 set_bit(R5_Wantwrite, &dev->flags);
72626685 1319 clear_bit(STRIPE_DEGRADED, &sh->state);
1da177e4
LT
1320 locked++;
1321 set_bit(STRIPE_INSYNC, &sh->state);
1da177e4
LT
1322 }
1323 }
1324 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1325 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1326 clear_bit(STRIPE_SYNCING, &sh->state);
1327 }
4e5314b5
N
1328
1329 /* If the failed drive is just a ReadError, then we might need to progress
1330 * the repair/check process
1331 */
ba22dcbf
N
1332 if (failed == 1 && ! conf->mddev->ro &&
1333 test_bit(R5_ReadError, &sh->dev[failed_num].flags)
4e5314b5
N
1334 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1335 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1336 ) {
1337 dev = &sh->dev[failed_num];
1338 if (!test_bit(R5_ReWrite, &dev->flags)) {
1339 set_bit(R5_Wantwrite, &dev->flags);
1340 set_bit(R5_ReWrite, &dev->flags);
1341 set_bit(R5_LOCKED, &dev->flags);
1342 } else {
1343 /* let's read it back */
1344 set_bit(R5_Wantread, &dev->flags);
1345 set_bit(R5_LOCKED, &dev->flags);
1346 }
1347 }
1348
1da177e4
LT
1349 spin_unlock(&sh->lock);
1350
1351 while ((bi=return_bi)) {
1352 int bytes = bi->bi_size;
1353
1354 return_bi = bi->bi_next;
1355 bi->bi_next = NULL;
1356 bi->bi_size = 0;
1357 bi->bi_end_io(bi, bytes, 0);
1358 }
1359 for (i=disks; i-- ;) {
1360 int rw;
1361 struct bio *bi;
1362 mdk_rdev_t *rdev;
1363 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1364 rw = 1;
1365 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1366 rw = 0;
1367 else
1368 continue;
1369
1370 bi = &sh->dev[i].req;
1371
1372 bi->bi_rw = rw;
1373 if (rw)
1374 bi->bi_end_io = raid5_end_write_request;
1375 else
1376 bi->bi_end_io = raid5_end_read_request;
1377
1378 rcu_read_lock();
d6065f7b 1379 rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1380 if (rdev && test_bit(Faulty, &rdev->flags))
1da177e4
LT
1381 rdev = NULL;
1382 if (rdev)
1383 atomic_inc(&rdev->nr_pending);
1384 rcu_read_unlock();
1385
1386 if (rdev) {
9910f16a 1387 if (syncing)
1da177e4
LT
1388 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1389
1390 bi->bi_bdev = rdev->bdev;
1391 PRINTK("for %llu schedule op %ld on disc %d\n",
1392 (unsigned long long)sh->sector, bi->bi_rw, i);
1393 atomic_inc(&sh->count);
1394 bi->bi_sector = sh->sector + rdev->data_offset;
1395 bi->bi_flags = 1 << BIO_UPTODATE;
1396 bi->bi_vcnt = 1;
1397 bi->bi_max_vecs = 1;
1398 bi->bi_idx = 0;
1399 bi->bi_io_vec = &sh->dev[i].vec;
1400 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1401 bi->bi_io_vec[0].bv_offset = 0;
1402 bi->bi_size = STRIPE_SIZE;
1403 bi->bi_next = NULL;
4dbcdc75
N
1404 if (rw == WRITE &&
1405 test_bit(R5_ReWrite, &sh->dev[i].flags))
1406 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1da177e4
LT
1407 generic_make_request(bi);
1408 } else {
72626685
N
1409 if (rw == 1)
1410 set_bit(STRIPE_DEGRADED, &sh->state);
1da177e4
LT
1411 PRINTK("skip op %ld on disc %d for sector %llu\n",
1412 bi->bi_rw, i, (unsigned long long)sh->sector);
1413 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1414 set_bit(STRIPE_HANDLE, &sh->state);
1415 }
1416 }
1417}
1418
858119e1 1419static void raid5_activate_delayed(raid5_conf_t *conf)
1da177e4
LT
1420{
1421 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1422 while (!list_empty(&conf->delayed_list)) {
1423 struct list_head *l = conf->delayed_list.next;
1424 struct stripe_head *sh;
1425 sh = list_entry(l, struct stripe_head, lru);
1426 list_del_init(l);
1427 clear_bit(STRIPE_DELAYED, &sh->state);
1428 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1429 atomic_inc(&conf->preread_active_stripes);
1430 list_add_tail(&sh->lru, &conf->handle_list);
1431 }
1432 }
1433}
1434
858119e1 1435static void activate_bit_delay(raid5_conf_t *conf)
72626685
N
1436{
1437 /* device_lock is held */
1438 struct list_head head;
1439 list_add(&head, &conf->bitmap_list);
1440 list_del_init(&conf->bitmap_list);
1441 while (!list_empty(&head)) {
1442 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1443 list_del_init(&sh->lru);
1444 atomic_inc(&sh->count);
1445 __release_stripe(conf, sh);
1446 }
1447}
1448
1da177e4
LT
1449static void unplug_slaves(mddev_t *mddev)
1450{
1451 raid5_conf_t *conf = mddev_to_conf(mddev);
1452 int i;
1453
1454 rcu_read_lock();
1455 for (i=0; i<mddev->raid_disks; i++) {
d6065f7b 1456 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1457 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1da177e4
LT
1458 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1459
1460 atomic_inc(&rdev->nr_pending);
1461 rcu_read_unlock();
1462
1463 if (r_queue->unplug_fn)
1464 r_queue->unplug_fn(r_queue);
1465
1466 rdev_dec_pending(rdev, mddev);
1467 rcu_read_lock();
1468 }
1469 }
1470 rcu_read_unlock();
1471}
1472
1473static void raid5_unplug_device(request_queue_t *q)
1474{
1475 mddev_t *mddev = q->queuedata;
1476 raid5_conf_t *conf = mddev_to_conf(mddev);
1477 unsigned long flags;
1478
1479 spin_lock_irqsave(&conf->device_lock, flags);
1480
72626685
N
1481 if (blk_remove_plug(q)) {
1482 conf->seq_flush++;
1da177e4 1483 raid5_activate_delayed(conf);
72626685 1484 }
1da177e4
LT
1485 md_wakeup_thread(mddev->thread);
1486
1487 spin_unlock_irqrestore(&conf->device_lock, flags);
1488
1489 unplug_slaves(mddev);
1490}
1491
1492static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1493 sector_t *error_sector)
1494{
1495 mddev_t *mddev = q->queuedata;
1496 raid5_conf_t *conf = mddev_to_conf(mddev);
1497 int i, ret = 0;
1498
1499 rcu_read_lock();
1500 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
d6065f7b 1501 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1502 if (rdev && !test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
1503 struct block_device *bdev = rdev->bdev;
1504 request_queue_t *r_queue = bdev_get_queue(bdev);
1505
1506 if (!r_queue->issue_flush_fn)
1507 ret = -EOPNOTSUPP;
1508 else {
1509 atomic_inc(&rdev->nr_pending);
1510 rcu_read_unlock();
1511 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1512 error_sector);
1513 rdev_dec_pending(rdev, mddev);
1514 rcu_read_lock();
1515 }
1516 }
1517 }
1518 rcu_read_unlock();
1519 return ret;
1520}
1521
1522static inline void raid5_plug_device(raid5_conf_t *conf)
1523{
1524 spin_lock_irq(&conf->device_lock);
1525 blk_plug_device(conf->mddev->queue);
1526 spin_unlock_irq(&conf->device_lock);
1527}
1528
1529static int make_request (request_queue_t *q, struct bio * bi)
1530{
1531 mddev_t *mddev = q->queuedata;
1532 raid5_conf_t *conf = mddev_to_conf(mddev);
1533 const unsigned int raid_disks = conf->raid_disks;
1534 const unsigned int data_disks = raid_disks - 1;
1535 unsigned int dd_idx, pd_idx;
1536 sector_t new_sector;
1537 sector_t logical_sector, last_sector;
1538 struct stripe_head *sh;
a362357b 1539 const int rw = bio_data_dir(bi);
1da177e4 1540
e5dcdd80
N
1541 if (unlikely(bio_barrier(bi))) {
1542 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1543 return 0;
1544 }
1545
3d310eb7 1546 md_write_start(mddev, bi);
06d91a5f 1547
a362357b
JA
1548 disk_stat_inc(mddev->gendisk, ios[rw]);
1549 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1da177e4
LT
1550
1551 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1552 last_sector = bi->bi_sector + (bi->bi_size>>9);
1553 bi->bi_next = NULL;
1554 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
06d91a5f 1555
1da177e4
LT
1556 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1557 DEFINE_WAIT(w);
1558
1559 new_sector = raid5_compute_sector(logical_sector,
1560 raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1561
1562 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1563 (unsigned long long)new_sector,
1564 (unsigned long long)logical_sector);
1565
1566 retry:
1567 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1568 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1569 if (sh) {
1570 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1571 /* Add failed due to overlap. Flush everything
1572 * and wait a while
1573 */
1574 raid5_unplug_device(mddev->queue);
1575 release_stripe(sh);
1576 schedule();
1577 goto retry;
1578 }
1579 finish_wait(&conf->wait_for_overlap, &w);
1580 raid5_plug_device(conf);
1581 handle_stripe(sh);
1582 release_stripe(sh);
1583
1584 } else {
1585 /* cannot get stripe for read-ahead, just give-up */
1586 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1587 finish_wait(&conf->wait_for_overlap, &w);
1588 break;
1589 }
1590
1591 }
1592 spin_lock_irq(&conf->device_lock);
1593 if (--bi->bi_phys_segments == 0) {
1594 int bytes = bi->bi_size;
1595
1596 if ( bio_data_dir(bi) == WRITE )
1597 md_write_end(mddev);
1598 bi->bi_size = 0;
1599 bi->bi_end_io(bi, bytes, 0);
1600 }
1601 spin_unlock_irq(&conf->device_lock);
1602 return 0;
1603}
1604
1605/* FIXME go_faster isn't used */
57afd89f 1606static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1da177e4
LT
1607{
1608 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1609 struct stripe_head *sh;
1610 int sectors_per_chunk = conf->chunk_size >> 9;
1611 sector_t x;
1612 unsigned long stripe;
1613 int chunk_offset;
1614 int dd_idx, pd_idx;
1615 sector_t first_sector;
1616 int raid_disks = conf->raid_disks;
1617 int data_disks = raid_disks-1;
72626685
N
1618 sector_t max_sector = mddev->size << 1;
1619 int sync_blocks;
1da177e4 1620
72626685 1621 if (sector_nr >= max_sector) {
1da177e4
LT
1622 /* just being told to finish up .. nothing much to do */
1623 unplug_slaves(mddev);
72626685
N
1624
1625 if (mddev->curr_resync < max_sector) /* aborted */
1626 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1627 &sync_blocks, 1);
1628 else /* compelted sync */
1629 conf->fullsync = 0;
1630 bitmap_close_sync(mddev->bitmap);
1631
1da177e4
LT
1632 return 0;
1633 }
1634 /* if there is 1 or more failed drives and we are trying
1635 * to resync, then assert that we are finished, because there is
1636 * nothing we can do.
1637 */
1638 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
57afd89f
N
1639 sector_t rv = (mddev->size << 1) - sector_nr;
1640 *skipped = 1;
1da177e4
LT
1641 return rv;
1642 }
72626685 1643 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3855ad9f 1644 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
72626685
N
1645 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1646 /* we can skip this block, and probably more */
1647 sync_blocks /= STRIPE_SECTORS;
1648 *skipped = 1;
1649 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1650 }
1da177e4
LT
1651
1652 x = sector_nr;
1653 chunk_offset = sector_div(x, sectors_per_chunk);
1654 stripe = x;
1655 BUG_ON(x != stripe);
1656
1657 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1658 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1659 sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1660 if (sh == NULL) {
1661 sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1662 /* make sure we don't swamp the stripe cache if someone else
1663 * is trying to get access
1664 */
66c006a5 1665 schedule_timeout_uninterruptible(1);
1da177e4 1666 }
72626685 1667 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1da177e4
LT
1668 spin_lock(&sh->lock);
1669 set_bit(STRIPE_SYNCING, &sh->state);
1670 clear_bit(STRIPE_INSYNC, &sh->state);
1671 spin_unlock(&sh->lock);
1672
1673 handle_stripe(sh);
1674 release_stripe(sh);
1675
1676 return STRIPE_SECTORS;
1677}
1678
1679/*
1680 * This is our raid5 kernel thread.
1681 *
1682 * We scan the hash table for stripes which can be handled now.
1683 * During the scan, completed stripes are saved for us by the interrupt
1684 * handler, so that they will not have to wait for our next wakeup.
1685 */
1686static void raid5d (mddev_t *mddev)
1687{
1688 struct stripe_head *sh;
1689 raid5_conf_t *conf = mddev_to_conf(mddev);
1690 int handled;
1691
1692 PRINTK("+++ raid5d active\n");
1693
1694 md_check_recovery(mddev);
1da177e4
LT
1695
1696 handled = 0;
1697 spin_lock_irq(&conf->device_lock);
1698 while (1) {
1699 struct list_head *first;
1700
72626685
N
1701 if (conf->seq_flush - conf->seq_write > 0) {
1702 int seq = conf->seq_flush;
700e432d 1703 spin_unlock_irq(&conf->device_lock);
72626685 1704 bitmap_unplug(mddev->bitmap);
700e432d 1705 spin_lock_irq(&conf->device_lock);
72626685
N
1706 conf->seq_write = seq;
1707 activate_bit_delay(conf);
1708 }
1709
1da177e4
LT
1710 if (list_empty(&conf->handle_list) &&
1711 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1712 !blk_queue_plugged(mddev->queue) &&
1713 !list_empty(&conf->delayed_list))
1714 raid5_activate_delayed(conf);
1715
1716 if (list_empty(&conf->handle_list))
1717 break;
1718
1719 first = conf->handle_list.next;
1720 sh = list_entry(first, struct stripe_head, lru);
1721
1722 list_del_init(first);
1723 atomic_inc(&sh->count);
1724 if (atomic_read(&sh->count)!= 1)
1725 BUG();
1726 spin_unlock_irq(&conf->device_lock);
1727
1728 handled++;
1729 handle_stripe(sh);
1730 release_stripe(sh);
1731
1732 spin_lock_irq(&conf->device_lock);
1733 }
1734 PRINTK("%d stripes handled\n", handled);
1735
1736 spin_unlock_irq(&conf->device_lock);
1737
1738 unplug_slaves(mddev);
1739
1740 PRINTK("--- raid5d inactive\n");
1741}
1742
3f294f4f 1743static ssize_t
007583c9 1744raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
3f294f4f 1745{
007583c9 1746 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
1747 if (conf)
1748 return sprintf(page, "%d\n", conf->max_nr_stripes);
1749 else
1750 return 0;
3f294f4f
N
1751}
1752
1753static ssize_t
007583c9 1754raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
3f294f4f 1755{
007583c9 1756 raid5_conf_t *conf = mddev_to_conf(mddev);
3f294f4f
N
1757 char *end;
1758 int new;
1759 if (len >= PAGE_SIZE)
1760 return -EINVAL;
96de1e66
N
1761 if (!conf)
1762 return -ENODEV;
3f294f4f
N
1763
1764 new = simple_strtoul(page, &end, 10);
1765 if (!*page || (*end && *end != '\n') )
1766 return -EINVAL;
1767 if (new <= 16 || new > 32768)
1768 return -EINVAL;
1769 while (new < conf->max_nr_stripes) {
1770 if (drop_one_stripe(conf))
1771 conf->max_nr_stripes--;
1772 else
1773 break;
1774 }
1775 while (new > conf->max_nr_stripes) {
1776 if (grow_one_stripe(conf))
1777 conf->max_nr_stripes++;
1778 else break;
1779 }
1780 return len;
1781}
007583c9 1782
96de1e66
N
1783static struct md_sysfs_entry
1784raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1785 raid5_show_stripe_cache_size,
1786 raid5_store_stripe_cache_size);
3f294f4f
N
1787
1788static ssize_t
96de1e66 1789stripe_cache_active_show(mddev_t *mddev, char *page)
3f294f4f 1790{
007583c9 1791 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
1792 if (conf)
1793 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1794 else
1795 return 0;
3f294f4f
N
1796}
1797
96de1e66
N
1798static struct md_sysfs_entry
1799raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
3f294f4f 1800
007583c9 1801static struct attribute *raid5_attrs[] = {
3f294f4f
N
1802 &raid5_stripecache_size.attr,
1803 &raid5_stripecache_active.attr,
1804 NULL,
1805};
007583c9
N
1806static struct attribute_group raid5_attrs_group = {
1807 .name = NULL,
1808 .attrs = raid5_attrs,
3f294f4f
N
1809};
1810
72626685 1811static int run(mddev_t *mddev)
1da177e4
LT
1812{
1813 raid5_conf_t *conf;
1814 int raid_disk, memory;
1815 mdk_rdev_t *rdev;
1816 struct disk_info *disk;
1817 struct list_head *tmp;
1818
1819 if (mddev->level != 5 && mddev->level != 4) {
14f8d26b
N
1820 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
1821 mdname(mddev), mddev->level);
1da177e4
LT
1822 return -EIO;
1823 }
1824
b55e6bfc 1825 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
1da177e4
LT
1826 if ((conf = mddev->private) == NULL)
1827 goto abort;
b55e6bfc
N
1828 conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
1829 GFP_KERNEL);
1830 if (!conf->disks)
1831 goto abort;
9ffae0cf 1832
1da177e4
LT
1833 conf->mddev = mddev;
1834
fccddba0 1835 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1da177e4 1836 goto abort;
1da177e4
LT
1837
1838 spin_lock_init(&conf->device_lock);
1839 init_waitqueue_head(&conf->wait_for_stripe);
1840 init_waitqueue_head(&conf->wait_for_overlap);
1841 INIT_LIST_HEAD(&conf->handle_list);
1842 INIT_LIST_HEAD(&conf->delayed_list);
72626685 1843 INIT_LIST_HEAD(&conf->bitmap_list);
1da177e4
LT
1844 INIT_LIST_HEAD(&conf->inactive_list);
1845 atomic_set(&conf->active_stripes, 0);
1846 atomic_set(&conf->preread_active_stripes, 0);
1847
1da177e4
LT
1848 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
1849
1850 ITERATE_RDEV(mddev,rdev,tmp) {
1851 raid_disk = rdev->raid_disk;
1852 if (raid_disk >= mddev->raid_disks
1853 || raid_disk < 0)
1854 continue;
1855 disk = conf->disks + raid_disk;
1856
1857 disk->rdev = rdev;
1858
b2d444d7 1859 if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
1860 char b[BDEVNAME_SIZE];
1861 printk(KERN_INFO "raid5: device %s operational as raid"
1862 " disk %d\n", bdevname(rdev->bdev,b),
1863 raid_disk);
1864 conf->working_disks++;
1865 }
1866 }
1867
1868 conf->raid_disks = mddev->raid_disks;
1869 /*
1870 * 0 for a fully functional array, 1 for a degraded array.
1871 */
1872 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1873 conf->mddev = mddev;
1874 conf->chunk_size = mddev->chunk_size;
1875 conf->level = mddev->level;
1876 conf->algorithm = mddev->layout;
1877 conf->max_nr_stripes = NR_STRIPES;
1878
1879 /* device size must be a multiple of chunk size */
1880 mddev->size &= ~(mddev->chunk_size/1024 -1);
b1581566 1881 mddev->resync_max_sectors = mddev->size << 1;
1da177e4
LT
1882
1883 if (!conf->chunk_size || conf->chunk_size % 4) {
1884 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
1885 conf->chunk_size, mdname(mddev));
1886 goto abort;
1887 }
1888 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
1889 printk(KERN_ERR
1890 "raid5: unsupported parity algorithm %d for %s\n",
1891 conf->algorithm, mdname(mddev));
1892 goto abort;
1893 }
1894 if (mddev->degraded > 1) {
1895 printk(KERN_ERR "raid5: not enough operational devices for %s"
1896 " (%d/%d failed)\n",
1897 mdname(mddev), conf->failed_disks, conf->raid_disks);
1898 goto abort;
1899 }
1900
1901 if (mddev->degraded == 1 &&
1902 mddev->recovery_cp != MaxSector) {
6ff8d8ec
N
1903 if (mddev->ok_start_degraded)
1904 printk(KERN_WARNING
1905 "raid5: starting dirty degraded array: %s"
1906 "- data corruption possible.\n",
1907 mdname(mddev));
1908 else {
1909 printk(KERN_ERR
1910 "raid5: cannot start dirty degraded array for %s\n",
1911 mdname(mddev));
1912 goto abort;
1913 }
1da177e4
LT
1914 }
1915
1916 {
1917 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
1918 if (!mddev->thread) {
1919 printk(KERN_ERR
1920 "raid5: couldn't allocate thread for %s\n",
1921 mdname(mddev));
1922 goto abort;
1923 }
1924 }
5036805b 1925 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1da177e4
LT
1926 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1927 if (grow_stripes(conf, conf->max_nr_stripes)) {
1928 printk(KERN_ERR
1929 "raid5: couldn't allocate %dkB for buffers\n", memory);
1930 shrink_stripes(conf);
1931 md_unregister_thread(mddev->thread);
1932 goto abort;
1933 } else
1934 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
1935 memory, mdname(mddev));
1936
1937 if (mddev->degraded == 0)
1938 printk("raid5: raid level %d set %s active with %d out of %d"
1939 " devices, algorithm %d\n", conf->level, mdname(mddev),
1940 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
1941 conf->algorithm);
1942 else
1943 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
1944 " out of %d devices, algorithm %d\n", conf->level,
1945 mdname(mddev), mddev->raid_disks - mddev->degraded,
1946 mddev->raid_disks, conf->algorithm);
1947
1948 print_raid5_conf(conf);
1949
1950 /* read-ahead size must cover two whole stripes, which is
1951 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
1952 */
1953 {
1954 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
2d1f3b5d 1955 / PAGE_SIZE;
1da177e4
LT
1956 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1957 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1958 }
1959
1960 /* Ok, everything is just fine now */
007583c9 1961 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
7a5febe9
N
1962
1963 mddev->queue->unplug_fn = raid5_unplug_device;
1964 mddev->queue->issue_flush_fn = raid5_issue_flush;
1965
1da177e4
LT
1966 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
1967 return 0;
1968abort:
1969 if (conf) {
1970 print_raid5_conf(conf);
b55e6bfc 1971 kfree(conf->disks);
fccddba0 1972 kfree(conf->stripe_hashtbl);
1da177e4
LT
1973 kfree(conf);
1974 }
1975 mddev->private = NULL;
1976 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
1977 return -EIO;
1978}
1979
1980
1981
3f294f4f 1982static int stop(mddev_t *mddev)
1da177e4
LT
1983{
1984 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1985
1986 md_unregister_thread(mddev->thread);
1987 mddev->thread = NULL;
1988 shrink_stripes(conf);
fccddba0 1989 kfree(conf->stripe_hashtbl);
1da177e4 1990 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
007583c9 1991 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
b55e6bfc 1992 kfree(conf->disks);
96de1e66 1993 kfree(conf);
1da177e4
LT
1994 mddev->private = NULL;
1995 return 0;
1996}
1997
1998#if RAID5_DEBUG
1999static void print_sh (struct stripe_head *sh)
2000{
2001 int i;
2002
2003 printk("sh %llu, pd_idx %d, state %ld.\n",
2004 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2005 printk("sh %llu, count %d.\n",
2006 (unsigned long long)sh->sector, atomic_read(&sh->count));
2007 printk("sh %llu, ", (unsigned long long)sh->sector);
2008 for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2009 printk("(cache%d: %p %ld) ",
2010 i, sh->dev[i].page, sh->dev[i].flags);
2011 }
2012 printk("\n");
2013}
2014
2015static void printall (raid5_conf_t *conf)
2016{
2017 struct stripe_head *sh;
fccddba0 2018 struct hlist_node *hn;
1da177e4
LT
2019 int i;
2020
2021 spin_lock_irq(&conf->device_lock);
2022 for (i = 0; i < NR_HASH; i++) {
fccddba0 2023 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
1da177e4
LT
2024 if (sh->raid_conf != conf)
2025 continue;
2026 print_sh(sh);
2027 }
2028 }
2029 spin_unlock_irq(&conf->device_lock);
2030}
2031#endif
2032
2033static void status (struct seq_file *seq, mddev_t *mddev)
2034{
2035 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2036 int i;
2037
2038 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2039 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2040 for (i = 0; i < conf->raid_disks; i++)
2041 seq_printf (seq, "%s",
2042 conf->disks[i].rdev &&
b2d444d7 2043 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
1da177e4
LT
2044 seq_printf (seq, "]");
2045#if RAID5_DEBUG
2046#define D(x) \
2047 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2048 printall(conf);
2049#endif
2050}
2051
2052static void print_raid5_conf (raid5_conf_t *conf)
2053{
2054 int i;
2055 struct disk_info *tmp;
2056
2057 printk("RAID5 conf printout:\n");
2058 if (!conf) {
2059 printk("(conf==NULL)\n");
2060 return;
2061 }
2062 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2063 conf->working_disks, conf->failed_disks);
2064
2065 for (i = 0; i < conf->raid_disks; i++) {
2066 char b[BDEVNAME_SIZE];
2067 tmp = conf->disks + i;
2068 if (tmp->rdev)
2069 printk(" disk %d, o:%d, dev:%s\n",
b2d444d7 2070 i, !test_bit(Faulty, &tmp->rdev->flags),
1da177e4
LT
2071 bdevname(tmp->rdev->bdev,b));
2072 }
2073}
2074
2075static int raid5_spare_active(mddev_t *mddev)
2076{
2077 int i;
2078 raid5_conf_t *conf = mddev->private;
2079 struct disk_info *tmp;
2080
2081 for (i = 0; i < conf->raid_disks; i++) {
2082 tmp = conf->disks + i;
2083 if (tmp->rdev
b2d444d7
N
2084 && !test_bit(Faulty, &tmp->rdev->flags)
2085 && !test_bit(In_sync, &tmp->rdev->flags)) {
1da177e4
LT
2086 mddev->degraded--;
2087 conf->failed_disks--;
2088 conf->working_disks++;
b2d444d7 2089 set_bit(In_sync, &tmp->rdev->flags);
1da177e4
LT
2090 }
2091 }
2092 print_raid5_conf(conf);
2093 return 0;
2094}
2095
2096static int raid5_remove_disk(mddev_t *mddev, int number)
2097{
2098 raid5_conf_t *conf = mddev->private;
2099 int err = 0;
2100 mdk_rdev_t *rdev;
2101 struct disk_info *p = conf->disks + number;
2102
2103 print_raid5_conf(conf);
2104 rdev = p->rdev;
2105 if (rdev) {
b2d444d7 2106 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
2107 atomic_read(&rdev->nr_pending)) {
2108 err = -EBUSY;
2109 goto abort;
2110 }
2111 p->rdev = NULL;
fbd568a3 2112 synchronize_rcu();
1da177e4
LT
2113 if (atomic_read(&rdev->nr_pending)) {
2114 /* lost the race, try later */
2115 err = -EBUSY;
2116 p->rdev = rdev;
2117 }
2118 }
2119abort:
2120
2121 print_raid5_conf(conf);
2122 return err;
2123}
2124
2125static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2126{
2127 raid5_conf_t *conf = mddev->private;
2128 int found = 0;
2129 int disk;
2130 struct disk_info *p;
2131
2132 if (mddev->degraded > 1)
2133 /* no point adding a device */
2134 return 0;
2135
2136 /*
2137 * find the disk ...
2138 */
2139 for (disk=0; disk < mddev->raid_disks; disk++)
2140 if ((p=conf->disks + disk)->rdev == NULL) {
b2d444d7 2141 clear_bit(In_sync, &rdev->flags);
1da177e4
LT
2142 rdev->raid_disk = disk;
2143 found = 1;
72626685
N
2144 if (rdev->saved_raid_disk != disk)
2145 conf->fullsync = 1;
d6065f7b 2146 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
2147 break;
2148 }
2149 print_raid5_conf(conf);
2150 return found;
2151}
2152
2153static int raid5_resize(mddev_t *mddev, sector_t sectors)
2154{
2155 /* no resync is happening, and there is enough space
2156 * on all devices, so we can resize.
2157 * We need to make sure resync covers any new space.
2158 * If the array is shrinking we should possibly wait until
2159 * any io in the removed space completes, but it hardly seems
2160 * worth it.
2161 */
2162 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2163 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2164 set_capacity(mddev->gendisk, mddev->array_size << 1);
2165 mddev->changed = 1;
2166 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2167 mddev->recovery_cp = mddev->size << 1;
2168 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2169 }
2170 mddev->size = sectors /2;
4b5c7ae8 2171 mddev->resync_max_sectors = sectors;
1da177e4
LT
2172 return 0;
2173}
2174
72626685
N
2175static void raid5_quiesce(mddev_t *mddev, int state)
2176{
2177 raid5_conf_t *conf = mddev_to_conf(mddev);
2178
2179 switch(state) {
2180 case 1: /* stop all writes */
2181 spin_lock_irq(&conf->device_lock);
2182 conf->quiesce = 1;
2183 wait_event_lock_irq(conf->wait_for_stripe,
2184 atomic_read(&conf->active_stripes) == 0,
2185 conf->device_lock, /* nothing */);
2186 spin_unlock_irq(&conf->device_lock);
2187 break;
2188
2189 case 0: /* re-enable writes */
2190 spin_lock_irq(&conf->device_lock);
2191 conf->quiesce = 0;
2192 wake_up(&conf->wait_for_stripe);
2193 spin_unlock_irq(&conf->device_lock);
2194 break;
2195 }
72626685 2196}
b15c2e57 2197
2604b703 2198static struct mdk_personality raid5_personality =
1da177e4
LT
2199{
2200 .name = "raid5",
2604b703 2201 .level = 5,
1da177e4
LT
2202 .owner = THIS_MODULE,
2203 .make_request = make_request,
2204 .run = run,
2205 .stop = stop,
2206 .status = status,
2207 .error_handler = error,
2208 .hot_add_disk = raid5_add_disk,
2209 .hot_remove_disk= raid5_remove_disk,
2210 .spare_active = raid5_spare_active,
2211 .sync_request = sync_request,
2212 .resize = raid5_resize,
72626685 2213 .quiesce = raid5_quiesce,
1da177e4
LT
2214};
2215
2604b703 2216static struct mdk_personality raid4_personality =
1da177e4 2217{
2604b703
N
2218 .name = "raid4",
2219 .level = 4,
2220 .owner = THIS_MODULE,
2221 .make_request = make_request,
2222 .run = run,
2223 .stop = stop,
2224 .status = status,
2225 .error_handler = error,
2226 .hot_add_disk = raid5_add_disk,
2227 .hot_remove_disk= raid5_remove_disk,
2228 .spare_active = raid5_spare_active,
2229 .sync_request = sync_request,
2230 .resize = raid5_resize,
2231 .quiesce = raid5_quiesce,
2232};
2233
2234static int __init raid5_init(void)
2235{
2236 register_md_personality(&raid5_personality);
2237 register_md_personality(&raid4_personality);
2238 return 0;
1da177e4
LT
2239}
2240
2604b703 2241static void raid5_exit(void)
1da177e4 2242{
2604b703
N
2243 unregister_md_personality(&raid5_personality);
2244 unregister_md_personality(&raid4_personality);
1da177e4
LT
2245}
2246
2247module_init(raid5_init);
2248module_exit(raid5_exit);
2249MODULE_LICENSE("GPL");
2250MODULE_ALIAS("md-personality-4"); /* RAID5 */
d9d166c2
N
2251MODULE_ALIAS("md-raid5");
2252MODULE_ALIAS("md-raid4");
2604b703
N
2253MODULE_ALIAS("md-level-5");
2254MODULE_ALIAS("md-level-4");