disable some mediatekl custom warnings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / raid10.c
1 /*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c. See raid1.c for further copyright information.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/blkdev.h>
24 #include <linux/module.h>
25 #include <linux/seq_file.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "md.h"
29 #include "raid10.h"
30 #include "raid0.h"
31 #include "bitmap.h"
32
33 /*
34 * RAID10 provides a combination of RAID0 and RAID1 functionality.
35 * The layout of data is defined by
36 * chunk_size
37 * raid_disks
38 * near_copies (stored in low byte of layout)
39 * far_copies (stored in second byte of layout)
40 * far_offset (stored in bit 16 of layout )
41 * use_far_sets (stored in bit 17 of layout )
42 *
43 * The data to be stored is divided into chunks using chunksize. Each device
44 * is divided into far_copies sections. In each section, chunks are laid out
45 * in a style similar to raid0, but near_copies copies of each chunk is stored
46 * (each on a different drive). The starting device for each section is offset
47 * near_copies from the starting device of the previous section. Thus there
48 * are (near_copies * far_copies) of each chunk, and each is on a different
49 * drive. near_copies and far_copies must be at least one, and their product
50 * is at most raid_disks.
51 *
52 * If far_offset is true, then the far_copies are handled a bit differently.
53 * The copies are still in different stripes, but instead of being very far
54 * apart on disk, there are adjacent stripes.
55 *
56 * The far and offset algorithms are handled slightly differently if
57 * 'use_far_sets' is true. In this case, the array's devices are grouped into
58 * sets that are (near_copies * far_copies) in size. The far copied stripes
59 * are still shifted by 'near_copies' devices, but this shifting stays confined
60 * to the set rather than the entire array. This is done to improve the number
61 * of device combinations that can fail without causing the array to fail.
62 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
63 * on a device):
64 * A B C D A B C D E
65 * ... ...
66 * D A B C E A B C D
67 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
68 * [A B] [C D] [A B] [C D E]
69 * |...| |...| |...| | ... |
70 * [B A] [D C] [B A] [E C D]
71 */
72
73 /*
74 * Number of guaranteed r10bios in case of extreme VM load:
75 */
76 #define NR_RAID10_BIOS 256
77
78 /* when we get a read error on a read-only array, we redirect to another
79 * device without failing the first device, or trying to over-write to
80 * correct the read error. To keep track of bad blocks on a per-bio
81 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
82 */
83 #define IO_BLOCKED ((struct bio *)1)
84 /* When we successfully write to a known bad-block, we need to remove the
85 * bad-block marking which must be done from process context. So we record
86 * the success by setting devs[n].bio to IO_MADE_GOOD
87 */
88 #define IO_MADE_GOOD ((struct bio *)2)
89
90 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
91
92 /* When there are this many requests queued to be written by
93 * the raid10 thread, we become 'congested' to provide back-pressure
94 * for writeback.
95 */
96 static int max_queued_requests = 1024;
97
98 static void allow_barrier(struct r10conf *conf);
99 static void lower_barrier(struct r10conf *conf);
100 static int enough(struct r10conf *conf, int ignore);
101 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
102 int *skipped);
103 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
104 static void end_reshape_write(struct bio *bio, int error);
105 static void end_reshape(struct r10conf *conf);
106
107 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
108 {
109 struct r10conf *conf = data;
110 int size = offsetof(struct r10bio, devs[conf->copies]);
111
112 /* allocate a r10bio with room for raid_disks entries in the
113 * bios array */
114 return kzalloc(size, gfp_flags);
115 }
116
117 static void r10bio_pool_free(void *r10_bio, void *data)
118 {
119 kfree(r10_bio);
120 }
121
122 /* Maximum size of each resync request */
123 #define RESYNC_BLOCK_SIZE (64*1024)
124 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
125 /* amount of memory to reserve for resync requests */
126 #define RESYNC_WINDOW (1024*1024)
127 /* maximum number of concurrent requests, memory permitting */
128 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
129
130 /*
131 * When performing a resync, we need to read and compare, so
132 * we need as many pages are there are copies.
133 * When performing a recovery, we need 2 bios, one for read,
134 * one for write (we recover only one drive per r10buf)
135 *
136 */
137 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
138 {
139 struct r10conf *conf = data;
140 struct page *page;
141 struct r10bio *r10_bio;
142 struct bio *bio;
143 int i, j;
144 int nalloc;
145
146 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
147 if (!r10_bio)
148 return NULL;
149
150 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
151 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
152 nalloc = conf->copies; /* resync */
153 else
154 nalloc = 2; /* recovery */
155
156 /*
157 * Allocate bios.
158 */
159 for (j = nalloc ; j-- ; ) {
160 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
161 if (!bio)
162 goto out_free_bio;
163 r10_bio->devs[j].bio = bio;
164 if (!conf->have_replacement)
165 continue;
166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
167 if (!bio)
168 goto out_free_bio;
169 r10_bio->devs[j].repl_bio = bio;
170 }
171 /*
172 * Allocate RESYNC_PAGES data pages and attach them
173 * where needed.
174 */
175 for (j = 0 ; j < nalloc; j++) {
176 struct bio *rbio = r10_bio->devs[j].repl_bio;
177 bio = r10_bio->devs[j].bio;
178 for (i = 0; i < RESYNC_PAGES; i++) {
179 if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
180 &conf->mddev->recovery)) {
181 /* we can share bv_page's during recovery
182 * and reshape */
183 struct bio *rbio = r10_bio->devs[0].bio;
184 page = rbio->bi_io_vec[i].bv_page;
185 get_page(page);
186 } else
187 page = alloc_page(gfp_flags);
188 if (unlikely(!page))
189 goto out_free_pages;
190
191 bio->bi_io_vec[i].bv_page = page;
192 if (rbio)
193 rbio->bi_io_vec[i].bv_page = page;
194 }
195 }
196
197 return r10_bio;
198
199 out_free_pages:
200 for ( ; i > 0 ; i--)
201 safe_put_page(bio->bi_io_vec[i-1].bv_page);
202 while (j--)
203 for (i = 0; i < RESYNC_PAGES ; i++)
204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
205 j = 0;
206 out_free_bio:
207 for ( ; j < nalloc; j++) {
208 if (r10_bio->devs[j].bio)
209 bio_put(r10_bio->devs[j].bio);
210 if (r10_bio->devs[j].repl_bio)
211 bio_put(r10_bio->devs[j].repl_bio);
212 }
213 r10bio_pool_free(r10_bio, conf);
214 return NULL;
215 }
216
217 static void r10buf_pool_free(void *__r10_bio, void *data)
218 {
219 int i;
220 struct r10conf *conf = data;
221 struct r10bio *r10bio = __r10_bio;
222 int j;
223
224 for (j=0; j < conf->copies; j++) {
225 struct bio *bio = r10bio->devs[j].bio;
226 if (bio) {
227 for (i = 0; i < RESYNC_PAGES; i++) {
228 safe_put_page(bio->bi_io_vec[i].bv_page);
229 bio->bi_io_vec[i].bv_page = NULL;
230 }
231 bio_put(bio);
232 }
233 bio = r10bio->devs[j].repl_bio;
234 if (bio)
235 bio_put(bio);
236 }
237 r10bio_pool_free(r10bio, conf);
238 }
239
240 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
241 {
242 int i;
243
244 for (i = 0; i < conf->copies; i++) {
245 struct bio **bio = & r10_bio->devs[i].bio;
246 if (!BIO_SPECIAL(*bio))
247 bio_put(*bio);
248 *bio = NULL;
249 bio = &r10_bio->devs[i].repl_bio;
250 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
251 bio_put(*bio);
252 *bio = NULL;
253 }
254 }
255
256 static void free_r10bio(struct r10bio *r10_bio)
257 {
258 struct r10conf *conf = r10_bio->mddev->private;
259
260 put_all_bios(conf, r10_bio);
261 mempool_free(r10_bio, conf->r10bio_pool);
262 }
263
264 static void put_buf(struct r10bio *r10_bio)
265 {
266 struct r10conf *conf = r10_bio->mddev->private;
267
268 mempool_free(r10_bio, conf->r10buf_pool);
269
270 lower_barrier(conf);
271 }
272
273 static void reschedule_retry(struct r10bio *r10_bio)
274 {
275 unsigned long flags;
276 struct mddev *mddev = r10_bio->mddev;
277 struct r10conf *conf = mddev->private;
278
279 spin_lock_irqsave(&conf->device_lock, flags);
280 list_add(&r10_bio->retry_list, &conf->retry_list);
281 conf->nr_queued ++;
282 spin_unlock_irqrestore(&conf->device_lock, flags);
283
284 /* wake up frozen array... */
285 wake_up(&conf->wait_barrier);
286
287 md_wakeup_thread(mddev->thread);
288 }
289
290 /*
291 * raid_end_bio_io() is called when we have finished servicing a mirrored
292 * operation and are ready to return a success/failure code to the buffer
293 * cache layer.
294 */
295 static void raid_end_bio_io(struct r10bio *r10_bio)
296 {
297 struct bio *bio = r10_bio->master_bio;
298 int done;
299 struct r10conf *conf = r10_bio->mddev->private;
300
301 if (bio->bi_phys_segments) {
302 unsigned long flags;
303 spin_lock_irqsave(&conf->device_lock, flags);
304 bio->bi_phys_segments--;
305 done = (bio->bi_phys_segments == 0);
306 spin_unlock_irqrestore(&conf->device_lock, flags);
307 } else
308 done = 1;
309 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
310 clear_bit(BIO_UPTODATE, &bio->bi_flags);
311 if (done) {
312 bio_endio(bio, 0);
313 /*
314 * Wake up any possible resync thread that waits for the device
315 * to go idle.
316 */
317 allow_barrier(conf);
318 }
319 free_r10bio(r10_bio);
320 }
321
322 /*
323 * Update disk head position estimator based on IRQ completion info.
324 */
325 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
326 {
327 struct r10conf *conf = r10_bio->mddev->private;
328
329 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
330 r10_bio->devs[slot].addr + (r10_bio->sectors);
331 }
332
333 /*
334 * Find the disk number which triggered given bio
335 */
336 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
337 struct bio *bio, int *slotp, int *replp)
338 {
339 int slot;
340 int repl = 0;
341
342 for (slot = 0; slot < conf->copies; slot++) {
343 if (r10_bio->devs[slot].bio == bio)
344 break;
345 if (r10_bio->devs[slot].repl_bio == bio) {
346 repl = 1;
347 break;
348 }
349 }
350
351 BUG_ON(slot == conf->copies);
352 update_head_pos(slot, r10_bio);
353
354 if (slotp)
355 *slotp = slot;
356 if (replp)
357 *replp = repl;
358 return r10_bio->devs[slot].devnum;
359 }
360
361 static void raid10_end_read_request(struct bio *bio, int error)
362 {
363 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
364 struct r10bio *r10_bio = bio->bi_private;
365 int slot, dev;
366 struct md_rdev *rdev;
367 struct r10conf *conf = r10_bio->mddev->private;
368
369
370 slot = r10_bio->read_slot;
371 dev = r10_bio->devs[slot].devnum;
372 rdev = r10_bio->devs[slot].rdev;
373 /*
374 * this branch is our 'one mirror IO has finished' event handler:
375 */
376 update_head_pos(slot, r10_bio);
377
378 if (uptodate) {
379 /*
380 * Set R10BIO_Uptodate in our master bio, so that
381 * we will return a good error code to the higher
382 * levels even if IO on some other mirrored buffer fails.
383 *
384 * The 'master' represents the composite IO operation to
385 * user-side. So if something waits for IO, then it will
386 * wait for the 'master' bio.
387 */
388 set_bit(R10BIO_Uptodate, &r10_bio->state);
389 } else {
390 /* If all other devices that store this block have
391 * failed, we want to return the error upwards rather
392 * than fail the last device. Here we redefine
393 * "uptodate" to mean "Don't want to retry"
394 */
395 unsigned long flags;
396 spin_lock_irqsave(&conf->device_lock, flags);
397 if (!enough(conf, rdev->raid_disk))
398 uptodate = 1;
399 spin_unlock_irqrestore(&conf->device_lock, flags);
400 }
401 if (uptodate) {
402 raid_end_bio_io(r10_bio);
403 rdev_dec_pending(rdev, conf->mddev);
404 } else {
405 /*
406 * oops, read error - keep the refcount on the rdev
407 */
408 char b[BDEVNAME_SIZE];
409 printk_ratelimited(KERN_ERR
410 "md/raid10:%s: %s: rescheduling sector %llu\n",
411 mdname(conf->mddev),
412 bdevname(rdev->bdev, b),
413 (unsigned long long)r10_bio->sector);
414 set_bit(R10BIO_ReadError, &r10_bio->state);
415 reschedule_retry(r10_bio);
416 }
417 }
418
419 static void close_write(struct r10bio *r10_bio)
420 {
421 /* clear the bitmap if all writes complete successfully */
422 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
423 r10_bio->sectors,
424 !test_bit(R10BIO_Degraded, &r10_bio->state),
425 0);
426 md_write_end(r10_bio->mddev);
427 }
428
429 static void one_write_done(struct r10bio *r10_bio)
430 {
431 if (atomic_dec_and_test(&r10_bio->remaining)) {
432 if (test_bit(R10BIO_WriteError, &r10_bio->state))
433 reschedule_retry(r10_bio);
434 else {
435 close_write(r10_bio);
436 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
437 reschedule_retry(r10_bio);
438 else
439 raid_end_bio_io(r10_bio);
440 }
441 }
442 }
443
444 static void raid10_end_write_request(struct bio *bio, int error)
445 {
446 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
447 struct r10bio *r10_bio = bio->bi_private;
448 int dev;
449 int dec_rdev = 1;
450 struct r10conf *conf = r10_bio->mddev->private;
451 int slot, repl;
452 struct md_rdev *rdev = NULL;
453
454 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
455
456 if (repl)
457 rdev = conf->mirrors[dev].replacement;
458 if (!rdev) {
459 smp_rmb();
460 repl = 0;
461 rdev = conf->mirrors[dev].rdev;
462 }
463 /*
464 * this branch is our 'one mirror IO has finished' event handler:
465 */
466 if (!uptodate) {
467 if (repl)
468 /* Never record new bad blocks to replacement,
469 * just fail it.
470 */
471 md_error(rdev->mddev, rdev);
472 else {
473 set_bit(WriteErrorSeen, &rdev->flags);
474 if (!test_and_set_bit(WantReplacement, &rdev->flags))
475 set_bit(MD_RECOVERY_NEEDED,
476 &rdev->mddev->recovery);
477 set_bit(R10BIO_WriteError, &r10_bio->state);
478 dec_rdev = 0;
479 }
480 } else {
481 /*
482 * Set R10BIO_Uptodate in our master bio, so that
483 * we will return a good error code for to the higher
484 * levels even if IO on some other mirrored buffer fails.
485 *
486 * The 'master' represents the composite IO operation to
487 * user-side. So if something waits for IO, then it will
488 * wait for the 'master' bio.
489 */
490 sector_t first_bad;
491 int bad_sectors;
492
493 /*
494 * Do not set R10BIO_Uptodate if the current device is
495 * rebuilding or Faulty. This is because we cannot use
496 * such device for properly reading the data back (we could
497 * potentially use it, if the current write would have felt
498 * before rdev->recovery_offset, but for simplicity we don't
499 * check this here.
500 */
501 if (test_bit(In_sync, &rdev->flags) &&
502 !test_bit(Faulty, &rdev->flags))
503 set_bit(R10BIO_Uptodate, &r10_bio->state);
504
505 /* Maybe we can clear some bad blocks. */
506 if (is_badblock(rdev,
507 r10_bio->devs[slot].addr,
508 r10_bio->sectors,
509 &first_bad, &bad_sectors)) {
510 bio_put(bio);
511 if (repl)
512 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
513 else
514 r10_bio->devs[slot].bio = IO_MADE_GOOD;
515 dec_rdev = 0;
516 set_bit(R10BIO_MadeGood, &r10_bio->state);
517 }
518 }
519
520 /*
521 *
522 * Let's see if all mirrored write operations have finished
523 * already.
524 */
525 one_write_done(r10_bio);
526 if (dec_rdev)
527 rdev_dec_pending(rdev, conf->mddev);
528 }
529
530 /*
531 * RAID10 layout manager
532 * As well as the chunksize and raid_disks count, there are two
533 * parameters: near_copies and far_copies.
534 * near_copies * far_copies must be <= raid_disks.
535 * Normally one of these will be 1.
536 * If both are 1, we get raid0.
537 * If near_copies == raid_disks, we get raid1.
538 *
539 * Chunks are laid out in raid0 style with near_copies copies of the
540 * first chunk, followed by near_copies copies of the next chunk and
541 * so on.
542 * If far_copies > 1, then after 1/far_copies of the array has been assigned
543 * as described above, we start again with a device offset of near_copies.
544 * So we effectively have another copy of the whole array further down all
545 * the drives, but with blocks on different drives.
546 * With this layout, and block is never stored twice on the one device.
547 *
548 * raid10_find_phys finds the sector offset of a given virtual sector
549 * on each device that it is on.
550 *
551 * raid10_find_virt does the reverse mapping, from a device and a
552 * sector offset to a virtual address
553 */
554
555 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
556 {
557 int n,f;
558 sector_t sector;
559 sector_t chunk;
560 sector_t stripe;
561 int dev;
562 int slot = 0;
563 int last_far_set_start, last_far_set_size;
564
565 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
566 last_far_set_start *= geo->far_set_size;
567
568 last_far_set_size = geo->far_set_size;
569 last_far_set_size += (geo->raid_disks % geo->far_set_size);
570
571 /* now calculate first sector/dev */
572 chunk = r10bio->sector >> geo->chunk_shift;
573 sector = r10bio->sector & geo->chunk_mask;
574
575 chunk *= geo->near_copies;
576 stripe = chunk;
577 dev = sector_div(stripe, geo->raid_disks);
578 if (geo->far_offset)
579 stripe *= geo->far_copies;
580
581 sector += stripe << geo->chunk_shift;
582
583 /* and calculate all the others */
584 for (n = 0; n < geo->near_copies; n++) {
585 int d = dev;
586 int set;
587 sector_t s = sector;
588 r10bio->devs[slot].devnum = d;
589 r10bio->devs[slot].addr = s;
590 slot++;
591
592 for (f = 1; f < geo->far_copies; f++) {
593 set = d / geo->far_set_size;
594 d += geo->near_copies;
595
596 if ((geo->raid_disks % geo->far_set_size) &&
597 (d > last_far_set_start)) {
598 d -= last_far_set_start;
599 d %= last_far_set_size;
600 d += last_far_set_start;
601 } else {
602 d %= geo->far_set_size;
603 d += geo->far_set_size * set;
604 }
605 s += geo->stride;
606 r10bio->devs[slot].devnum = d;
607 r10bio->devs[slot].addr = s;
608 slot++;
609 }
610 dev++;
611 if (dev >= geo->raid_disks) {
612 dev = 0;
613 sector += (geo->chunk_mask + 1);
614 }
615 }
616 }
617
618 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
619 {
620 struct geom *geo = &conf->geo;
621
622 if (conf->reshape_progress != MaxSector &&
623 ((r10bio->sector >= conf->reshape_progress) !=
624 conf->mddev->reshape_backwards)) {
625 set_bit(R10BIO_Previous, &r10bio->state);
626 geo = &conf->prev;
627 } else
628 clear_bit(R10BIO_Previous, &r10bio->state);
629
630 __raid10_find_phys(geo, r10bio);
631 }
632
633 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
634 {
635 sector_t offset, chunk, vchunk;
636 /* Never use conf->prev as this is only called during resync
637 * or recovery, so reshape isn't happening
638 */
639 struct geom *geo = &conf->geo;
640 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
641 int far_set_size = geo->far_set_size;
642 int last_far_set_start;
643
644 if (geo->raid_disks % geo->far_set_size) {
645 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
646 last_far_set_start *= geo->far_set_size;
647
648 if (dev >= last_far_set_start) {
649 far_set_size = geo->far_set_size;
650 far_set_size += (geo->raid_disks % geo->far_set_size);
651 far_set_start = last_far_set_start;
652 }
653 }
654
655 offset = sector & geo->chunk_mask;
656 if (geo->far_offset) {
657 int fc;
658 chunk = sector >> geo->chunk_shift;
659 fc = sector_div(chunk, geo->far_copies);
660 dev -= fc * geo->near_copies;
661 if (dev < far_set_start)
662 dev += far_set_size;
663 } else {
664 while (sector >= geo->stride) {
665 sector -= geo->stride;
666 if (dev < (geo->near_copies + far_set_start))
667 dev += far_set_size - geo->near_copies;
668 else
669 dev -= geo->near_copies;
670 }
671 chunk = sector >> geo->chunk_shift;
672 }
673 vchunk = chunk * geo->raid_disks + dev;
674 sector_div(vchunk, geo->near_copies);
675 return (vchunk << geo->chunk_shift) + offset;
676 }
677
678 /**
679 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
680 * @q: request queue
681 * @bvm: properties of new bio
682 * @biovec: the request that could be merged to it.
683 *
684 * Return amount of bytes we can accept at this offset
685 * This requires checking for end-of-chunk if near_copies != raid_disks,
686 * and for subordinate merge_bvec_fns if merge_check_needed.
687 */
688 static int raid10_mergeable_bvec(struct request_queue *q,
689 struct bvec_merge_data *bvm,
690 struct bio_vec *biovec)
691 {
692 struct mddev *mddev = q->queuedata;
693 struct r10conf *conf = mddev->private;
694 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
695 int max;
696 unsigned int chunk_sectors;
697 unsigned int bio_sectors = bvm->bi_size >> 9;
698 struct geom *geo = &conf->geo;
699
700 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
701 if (conf->reshape_progress != MaxSector &&
702 ((sector >= conf->reshape_progress) !=
703 conf->mddev->reshape_backwards))
704 geo = &conf->prev;
705
706 if (geo->near_copies < geo->raid_disks) {
707 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
708 + bio_sectors)) << 9;
709 if (max < 0)
710 /* bio_add cannot handle a negative return */
711 max = 0;
712 if (max <= biovec->bv_len && bio_sectors == 0)
713 return biovec->bv_len;
714 } else
715 max = biovec->bv_len;
716
717 if (mddev->merge_check_needed) {
718 struct {
719 struct r10bio r10_bio;
720 struct r10dev devs[conf->copies];
721 } on_stack;
722 struct r10bio *r10_bio = &on_stack.r10_bio;
723 int s;
724 if (conf->reshape_progress != MaxSector) {
725 /* Cannot give any guidance during reshape */
726 if (max <= biovec->bv_len && bio_sectors == 0)
727 return biovec->bv_len;
728 return 0;
729 }
730 r10_bio->sector = sector;
731 raid10_find_phys(conf, r10_bio);
732 rcu_read_lock();
733 for (s = 0; s < conf->copies; s++) {
734 int disk = r10_bio->devs[s].devnum;
735 struct md_rdev *rdev = rcu_dereference(
736 conf->mirrors[disk].rdev);
737 if (rdev && !test_bit(Faulty, &rdev->flags)) {
738 struct request_queue *q =
739 bdev_get_queue(rdev->bdev);
740 if (q->merge_bvec_fn) {
741 bvm->bi_sector = r10_bio->devs[s].addr
742 + rdev->data_offset;
743 bvm->bi_bdev = rdev->bdev;
744 max = min(max, q->merge_bvec_fn(
745 q, bvm, biovec));
746 }
747 }
748 rdev = rcu_dereference(conf->mirrors[disk].replacement);
749 if (rdev && !test_bit(Faulty, &rdev->flags)) {
750 struct request_queue *q =
751 bdev_get_queue(rdev->bdev);
752 if (q->merge_bvec_fn) {
753 bvm->bi_sector = r10_bio->devs[s].addr
754 + rdev->data_offset;
755 bvm->bi_bdev = rdev->bdev;
756 max = min(max, q->merge_bvec_fn(
757 q, bvm, biovec));
758 }
759 }
760 }
761 rcu_read_unlock();
762 }
763 return max;
764 }
765
766 /*
767 * This routine returns the disk from which the requested read should
768 * be done. There is a per-array 'next expected sequential IO' sector
769 * number - if this matches on the next IO then we use the last disk.
770 * There is also a per-disk 'last know head position' sector that is
771 * maintained from IRQ contexts, both the normal and the resync IO
772 * completion handlers update this position correctly. If there is no
773 * perfect sequential match then we pick the disk whose head is closest.
774 *
775 * If there are 2 mirrors in the same 2 devices, performance degrades
776 * because position is mirror, not device based.
777 *
778 * The rdev for the device selected will have nr_pending incremented.
779 */
780
781 /*
782 * FIXME: possibly should rethink readbalancing and do it differently
783 * depending on near_copies / far_copies geometry.
784 */
785 static struct md_rdev *read_balance(struct r10conf *conf,
786 struct r10bio *r10_bio,
787 int *max_sectors)
788 {
789 const sector_t this_sector = r10_bio->sector;
790 int disk, slot;
791 int sectors = r10_bio->sectors;
792 int best_good_sectors;
793 sector_t new_distance, best_dist;
794 struct md_rdev *best_rdev, *rdev = NULL;
795 int do_balance;
796 int best_slot;
797 struct geom *geo = &conf->geo;
798
799 raid10_find_phys(conf, r10_bio);
800 rcu_read_lock();
801 retry:
802 sectors = r10_bio->sectors;
803 best_slot = -1;
804 best_rdev = NULL;
805 best_dist = MaxSector;
806 best_good_sectors = 0;
807 do_balance = 1;
808 /*
809 * Check if we can balance. We can balance on the whole
810 * device if no resync is going on (recovery is ok), or below
811 * the resync window. We take the first readable disk when
812 * above the resync window.
813 */
814 if (conf->mddev->recovery_cp < MaxSector
815 && (this_sector + sectors >= conf->next_resync))
816 do_balance = 0;
817
818 for (slot = 0; slot < conf->copies ; slot++) {
819 sector_t first_bad;
820 int bad_sectors;
821 sector_t dev_sector;
822
823 if (r10_bio->devs[slot].bio == IO_BLOCKED)
824 continue;
825 disk = r10_bio->devs[slot].devnum;
826 rdev = rcu_dereference(conf->mirrors[disk].replacement);
827 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
828 test_bit(Unmerged, &rdev->flags) ||
829 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
830 rdev = rcu_dereference(conf->mirrors[disk].rdev);
831 if (rdev == NULL ||
832 test_bit(Faulty, &rdev->flags) ||
833 test_bit(Unmerged, &rdev->flags))
834 continue;
835 if (!test_bit(In_sync, &rdev->flags) &&
836 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
837 continue;
838
839 dev_sector = r10_bio->devs[slot].addr;
840 if (is_badblock(rdev, dev_sector, sectors,
841 &first_bad, &bad_sectors)) {
842 if (best_dist < MaxSector)
843 /* Already have a better slot */
844 continue;
845 if (first_bad <= dev_sector) {
846 /* Cannot read here. If this is the
847 * 'primary' device, then we must not read
848 * beyond 'bad_sectors' from another device.
849 */
850 bad_sectors -= (dev_sector - first_bad);
851 if (!do_balance && sectors > bad_sectors)
852 sectors = bad_sectors;
853 if (best_good_sectors > sectors)
854 best_good_sectors = sectors;
855 } else {
856 sector_t good_sectors =
857 first_bad - dev_sector;
858 if (good_sectors > best_good_sectors) {
859 best_good_sectors = good_sectors;
860 best_slot = slot;
861 best_rdev = rdev;
862 }
863 if (!do_balance)
864 /* Must read from here */
865 break;
866 }
867 continue;
868 } else
869 best_good_sectors = sectors;
870
871 if (!do_balance)
872 break;
873
874 /* This optimisation is debatable, and completely destroys
875 * sequential read speed for 'far copies' arrays. So only
876 * keep it for 'near' arrays, and review those later.
877 */
878 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
879 break;
880
881 /* for far > 1 always use the lowest address */
882 if (geo->far_copies > 1)
883 new_distance = r10_bio->devs[slot].addr;
884 else
885 new_distance = abs(r10_bio->devs[slot].addr -
886 conf->mirrors[disk].head_position);
887 if (new_distance < best_dist) {
888 best_dist = new_distance;
889 best_slot = slot;
890 best_rdev = rdev;
891 }
892 }
893 if (slot >= conf->copies) {
894 slot = best_slot;
895 rdev = best_rdev;
896 }
897
898 if (slot >= 0) {
899 atomic_inc(&rdev->nr_pending);
900 if (test_bit(Faulty, &rdev->flags)) {
901 /* Cannot risk returning a device that failed
902 * before we inc'ed nr_pending
903 */
904 rdev_dec_pending(rdev, conf->mddev);
905 goto retry;
906 }
907 r10_bio->read_slot = slot;
908 } else
909 rdev = NULL;
910 rcu_read_unlock();
911 *max_sectors = best_good_sectors;
912
913 return rdev;
914 }
915
916 int md_raid10_congested(struct mddev *mddev, int bits)
917 {
918 struct r10conf *conf = mddev->private;
919 int i, ret = 0;
920
921 if ((bits & (1 << BDI_async_congested)) &&
922 conf->pending_count >= max_queued_requests)
923 return 1;
924
925 rcu_read_lock();
926 for (i = 0;
927 (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
928 && ret == 0;
929 i++) {
930 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
931 if (rdev && !test_bit(Faulty, &rdev->flags)) {
932 struct request_queue *q = bdev_get_queue(rdev->bdev);
933
934 ret |= bdi_congested(&q->backing_dev_info, bits);
935 }
936 }
937 rcu_read_unlock();
938 return ret;
939 }
940 EXPORT_SYMBOL_GPL(md_raid10_congested);
941
942 static int raid10_congested(void *data, int bits)
943 {
944 struct mddev *mddev = data;
945
946 return mddev_congested(mddev, bits) ||
947 md_raid10_congested(mddev, bits);
948 }
949
950 static void flush_pending_writes(struct r10conf *conf)
951 {
952 /* Any writes that have been queued but are awaiting
953 * bitmap updates get flushed here.
954 */
955 spin_lock_irq(&conf->device_lock);
956
957 if (conf->pending_bio_list.head) {
958 struct bio *bio;
959 bio = bio_list_get(&conf->pending_bio_list);
960 conf->pending_count = 0;
961 spin_unlock_irq(&conf->device_lock);
962 /* flush any pending bitmap writes to disk
963 * before proceeding w/ I/O */
964 bitmap_unplug(conf->mddev->bitmap);
965 wake_up(&conf->wait_barrier);
966
967 while (bio) { /* submit pending writes */
968 struct bio *next = bio->bi_next;
969 bio->bi_next = NULL;
970 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
971 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
972 /* Just ignore it */
973 bio_endio(bio, 0);
974 else
975 generic_make_request(bio);
976 bio = next;
977 }
978 } else
979 spin_unlock_irq(&conf->device_lock);
980 }
981
982 /* Barriers....
983 * Sometimes we need to suspend IO while we do something else,
984 * either some resync/recovery, or reconfigure the array.
985 * To do this we raise a 'barrier'.
986 * The 'barrier' is a counter that can be raised multiple times
987 * to count how many activities are happening which preclude
988 * normal IO.
989 * We can only raise the barrier if there is no pending IO.
990 * i.e. if nr_pending == 0.
991 * We choose only to raise the barrier if no-one is waiting for the
992 * barrier to go down. This means that as soon as an IO request
993 * is ready, no other operations which require a barrier will start
994 * until the IO request has had a chance.
995 *
996 * So: regular IO calls 'wait_barrier'. When that returns there
997 * is no backgroup IO happening, It must arrange to call
998 * allow_barrier when it has finished its IO.
999 * backgroup IO calls must call raise_barrier. Once that returns
1000 * there is no normal IO happeing. It must arrange to call
1001 * lower_barrier when the particular background IO completes.
1002 */
1003
1004 static void raise_barrier(struct r10conf *conf, int force)
1005 {
1006 BUG_ON(force && !conf->barrier);
1007 spin_lock_irq(&conf->resync_lock);
1008
1009 /* Wait until no block IO is waiting (unless 'force') */
1010 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
1011 conf->resync_lock);
1012
1013 /* block any new IO from starting */
1014 conf->barrier++;
1015
1016 /* Now wait for all pending IO to complete */
1017 wait_event_lock_irq(conf->wait_barrier,
1018 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
1019 conf->resync_lock);
1020
1021 spin_unlock_irq(&conf->resync_lock);
1022 }
1023
1024 static void lower_barrier(struct r10conf *conf)
1025 {
1026 unsigned long flags;
1027 spin_lock_irqsave(&conf->resync_lock, flags);
1028 conf->barrier--;
1029 spin_unlock_irqrestore(&conf->resync_lock, flags);
1030 wake_up(&conf->wait_barrier);
1031 }
1032
1033 static void wait_barrier(struct r10conf *conf)
1034 {
1035 spin_lock_irq(&conf->resync_lock);
1036 if (conf->barrier) {
1037 conf->nr_waiting++;
1038 /* Wait for the barrier to drop.
1039 * However if there are already pending
1040 * requests (preventing the barrier from
1041 * rising completely), and the
1042 * pre-process bio queue isn't empty,
1043 * then don't wait, as we need to empty
1044 * that queue to get the nr_pending
1045 * count down.
1046 */
1047 wait_event_lock_irq(conf->wait_barrier,
1048 !conf->barrier ||
1049 (conf->nr_pending &&
1050 current->bio_list &&
1051 !bio_list_empty(current->bio_list)),
1052 conf->resync_lock);
1053 conf->nr_waiting--;
1054 }
1055 conf->nr_pending++;
1056 spin_unlock_irq(&conf->resync_lock);
1057 }
1058
1059 static void allow_barrier(struct r10conf *conf)
1060 {
1061 unsigned long flags;
1062 spin_lock_irqsave(&conf->resync_lock, flags);
1063 conf->nr_pending--;
1064 spin_unlock_irqrestore(&conf->resync_lock, flags);
1065 wake_up(&conf->wait_barrier);
1066 }
1067
1068 static void freeze_array(struct r10conf *conf, int extra)
1069 {
1070 /* stop syncio and normal IO and wait for everything to
1071 * go quiet.
1072 * We increment barrier and nr_waiting, and then
1073 * wait until nr_pending match nr_queued+extra
1074 * This is called in the context of one normal IO request
1075 * that has failed. Thus any sync request that might be pending
1076 * will be blocked by nr_pending, and we need to wait for
1077 * pending IO requests to complete or be queued for re-try.
1078 * Thus the number queued (nr_queued) plus this request (extra)
1079 * must match the number of pending IOs (nr_pending) before
1080 * we continue.
1081 */
1082 spin_lock_irq(&conf->resync_lock);
1083 conf->barrier++;
1084 conf->nr_waiting++;
1085 wait_event_lock_irq_cmd(conf->wait_barrier,
1086 conf->nr_pending == conf->nr_queued+extra,
1087 conf->resync_lock,
1088 flush_pending_writes(conf));
1089
1090 spin_unlock_irq(&conf->resync_lock);
1091 }
1092
1093 static void unfreeze_array(struct r10conf *conf)
1094 {
1095 /* reverse the effect of the freeze */
1096 spin_lock_irq(&conf->resync_lock);
1097 conf->barrier--;
1098 conf->nr_waiting--;
1099 wake_up(&conf->wait_barrier);
1100 spin_unlock_irq(&conf->resync_lock);
1101 }
1102
1103 static sector_t choose_data_offset(struct r10bio *r10_bio,
1104 struct md_rdev *rdev)
1105 {
1106 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1107 test_bit(R10BIO_Previous, &r10_bio->state))
1108 return rdev->data_offset;
1109 else
1110 return rdev->new_data_offset;
1111 }
1112
1113 struct raid10_plug_cb {
1114 struct blk_plug_cb cb;
1115 struct bio_list pending;
1116 int pending_cnt;
1117 };
1118
1119 static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1120 {
1121 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1122 cb);
1123 struct mddev *mddev = plug->cb.data;
1124 struct r10conf *conf = mddev->private;
1125 struct bio *bio;
1126
1127 if (from_schedule || current->bio_list) {
1128 spin_lock_irq(&conf->device_lock);
1129 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1130 conf->pending_count += plug->pending_cnt;
1131 spin_unlock_irq(&conf->device_lock);
1132 wake_up(&conf->wait_barrier);
1133 md_wakeup_thread(mddev->thread);
1134 kfree(plug);
1135 return;
1136 }
1137
1138 /* we aren't scheduling, so we can do the write-out directly. */
1139 bio = bio_list_get(&plug->pending);
1140 bitmap_unplug(mddev->bitmap);
1141 wake_up(&conf->wait_barrier);
1142
1143 while (bio) { /* submit pending writes */
1144 struct bio *next = bio->bi_next;
1145 bio->bi_next = NULL;
1146 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1147 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1148 /* Just ignore it */
1149 bio_endio(bio, 0);
1150 else
1151 generic_make_request(bio);
1152 bio = next;
1153 }
1154 kfree(plug);
1155 }
1156
1157 static void make_request(struct mddev *mddev, struct bio * bio)
1158 {
1159 struct r10conf *conf = mddev->private;
1160 struct r10bio *r10_bio;
1161 struct bio *read_bio;
1162 int i;
1163 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1164 int chunk_sects = chunk_mask + 1;
1165 const int rw = bio_data_dir(bio);
1166 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1167 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1168 const unsigned long do_discard = (bio->bi_rw
1169 & (REQ_DISCARD | REQ_SECURE));
1170 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1171 unsigned long flags;
1172 struct md_rdev *blocked_rdev;
1173 struct blk_plug_cb *cb;
1174 struct raid10_plug_cb *plug = NULL;
1175 int sectors_handled;
1176 int max_sectors;
1177 int sectors;
1178
1179 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1180 md_flush_request(mddev, bio);
1181 return;
1182 }
1183
1184 /* If this request crosses a chunk boundary, we need to
1185 * split it. This will only happen for 1 PAGE (or less) requests.
1186 */
1187 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1188 > chunk_sects
1189 && (conf->geo.near_copies < conf->geo.raid_disks
1190 || conf->prev.near_copies < conf->prev.raid_disks))) {
1191 struct bio_pair *bp;
1192 /* Sanity check -- queue functions should prevent this happening */
1193 if (bio_segments(bio) > 1)
1194 goto bad_map;
1195 /* This is a one page bio that upper layers
1196 * refuse to split for us, so we need to split it.
1197 */
1198 bp = bio_split(bio,
1199 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
1200
1201 /* Each of these 'make_request' calls will call 'wait_barrier'.
1202 * If the first succeeds but the second blocks due to the resync
1203 * thread raising the barrier, we will deadlock because the
1204 * IO to the underlying device will be queued in generic_make_request
1205 * and will never complete, so will never reduce nr_pending.
1206 * So increment nr_waiting here so no new raise_barriers will
1207 * succeed, and so the second wait_barrier cannot block.
1208 */
1209 spin_lock_irq(&conf->resync_lock);
1210 conf->nr_waiting++;
1211 spin_unlock_irq(&conf->resync_lock);
1212
1213 make_request(mddev, &bp->bio1);
1214 make_request(mddev, &bp->bio2);
1215
1216 spin_lock_irq(&conf->resync_lock);
1217 conf->nr_waiting--;
1218 wake_up(&conf->wait_barrier);
1219 spin_unlock_irq(&conf->resync_lock);
1220
1221 bio_pair_release(bp);
1222 return;
1223 bad_map:
1224 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1225 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1226 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1227
1228 bio_io_error(bio);
1229 return;
1230 }
1231
1232 md_write_start(mddev, bio);
1233
1234 /*
1235 * Register the new request and wait if the reconstruction
1236 * thread has put up a bar for new requests.
1237 * Continue immediately if no resync is active currently.
1238 */
1239 wait_barrier(conf);
1240
1241 sectors = bio_sectors(bio);
1242 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1243 bio->bi_sector < conf->reshape_progress &&
1244 bio->bi_sector + sectors > conf->reshape_progress) {
1245 /* IO spans the reshape position. Need to wait for
1246 * reshape to pass
1247 */
1248 allow_barrier(conf);
1249 wait_event(conf->wait_barrier,
1250 conf->reshape_progress <= bio->bi_sector ||
1251 conf->reshape_progress >= bio->bi_sector + sectors);
1252 wait_barrier(conf);
1253 }
1254 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1255 bio_data_dir(bio) == WRITE &&
1256 (mddev->reshape_backwards
1257 ? (bio->bi_sector < conf->reshape_safe &&
1258 bio->bi_sector + sectors > conf->reshape_progress)
1259 : (bio->bi_sector + sectors > conf->reshape_safe &&
1260 bio->bi_sector < conf->reshape_progress))) {
1261 /* Need to update reshape_position in metadata */
1262 mddev->reshape_position = conf->reshape_progress;
1263 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1264 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1265 md_wakeup_thread(mddev->thread);
1266 wait_event(mddev->sb_wait,
1267 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1268
1269 conf->reshape_safe = mddev->reshape_position;
1270 }
1271
1272 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1273
1274 r10_bio->master_bio = bio;
1275 r10_bio->sectors = sectors;
1276
1277 r10_bio->mddev = mddev;
1278 r10_bio->sector = bio->bi_sector;
1279 r10_bio->state = 0;
1280
1281 /* We might need to issue multiple reads to different
1282 * devices if there are bad blocks around, so we keep
1283 * track of the number of reads in bio->bi_phys_segments.
1284 * If this is 0, there is only one r10_bio and no locking
1285 * will be needed when the request completes. If it is
1286 * non-zero, then it is the number of not-completed requests.
1287 */
1288 bio->bi_phys_segments = 0;
1289 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1290
1291 if (rw == READ) {
1292 /*
1293 * read balancing logic:
1294 */
1295 struct md_rdev *rdev;
1296 int slot;
1297
1298 read_again:
1299 rdev = read_balance(conf, r10_bio, &max_sectors);
1300 if (!rdev) {
1301 raid_end_bio_io(r10_bio);
1302 return;
1303 }
1304 slot = r10_bio->read_slot;
1305
1306 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1307 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
1308 max_sectors);
1309
1310 r10_bio->devs[slot].bio = read_bio;
1311 r10_bio->devs[slot].rdev = rdev;
1312
1313 read_bio->bi_sector = r10_bio->devs[slot].addr +
1314 choose_data_offset(r10_bio, rdev);
1315 read_bio->bi_bdev = rdev->bdev;
1316 read_bio->bi_end_io = raid10_end_read_request;
1317 read_bio->bi_rw = READ | do_sync;
1318 read_bio->bi_private = r10_bio;
1319
1320 if (max_sectors < r10_bio->sectors) {
1321 /* Could not read all from this device, so we will
1322 * need another r10_bio.
1323 */
1324 sectors_handled = (r10_bio->sector + max_sectors
1325 - bio->bi_sector);
1326 r10_bio->sectors = max_sectors;
1327 spin_lock_irq(&conf->device_lock);
1328 if (bio->bi_phys_segments == 0)
1329 bio->bi_phys_segments = 2;
1330 else
1331 bio->bi_phys_segments++;
1332 spin_unlock_irq(&conf->device_lock);
1333 /* Cannot call generic_make_request directly
1334 * as that will be queued in __generic_make_request
1335 * and subsequent mempool_alloc might block
1336 * waiting for it. so hand bio over to raid10d.
1337 */
1338 reschedule_retry(r10_bio);
1339
1340 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1341
1342 r10_bio->master_bio = bio;
1343 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1344 r10_bio->state = 0;
1345 r10_bio->mddev = mddev;
1346 r10_bio->sector = bio->bi_sector + sectors_handled;
1347 goto read_again;
1348 } else
1349 generic_make_request(read_bio);
1350 return;
1351 }
1352
1353 /*
1354 * WRITE:
1355 */
1356 if (conf->pending_count >= max_queued_requests) {
1357 md_wakeup_thread(mddev->thread);
1358 wait_event(conf->wait_barrier,
1359 conf->pending_count < max_queued_requests);
1360 }
1361 /* first select target devices under rcu_lock and
1362 * inc refcount on their rdev. Record them by setting
1363 * bios[x] to bio
1364 * If there are known/acknowledged bad blocks on any device
1365 * on which we have seen a write error, we want to avoid
1366 * writing to those blocks. This potentially requires several
1367 * writes to write around the bad blocks. Each set of writes
1368 * gets its own r10_bio with a set of bios attached. The number
1369 * of r10_bios is recored in bio->bi_phys_segments just as with
1370 * the read case.
1371 */
1372
1373 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1374 raid10_find_phys(conf, r10_bio);
1375 retry_write:
1376 blocked_rdev = NULL;
1377 rcu_read_lock();
1378 max_sectors = r10_bio->sectors;
1379
1380 for (i = 0; i < conf->copies; i++) {
1381 int d = r10_bio->devs[i].devnum;
1382 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1383 struct md_rdev *rrdev = rcu_dereference(
1384 conf->mirrors[d].replacement);
1385 if (rdev == rrdev)
1386 rrdev = NULL;
1387 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1388 atomic_inc(&rdev->nr_pending);
1389 blocked_rdev = rdev;
1390 break;
1391 }
1392 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1393 atomic_inc(&rrdev->nr_pending);
1394 blocked_rdev = rrdev;
1395 break;
1396 }
1397 if (rdev && (test_bit(Faulty, &rdev->flags)
1398 || test_bit(Unmerged, &rdev->flags)))
1399 rdev = NULL;
1400 if (rrdev && (test_bit(Faulty, &rrdev->flags)
1401 || test_bit(Unmerged, &rrdev->flags)))
1402 rrdev = NULL;
1403
1404 r10_bio->devs[i].bio = NULL;
1405 r10_bio->devs[i].repl_bio = NULL;
1406
1407 if (!rdev && !rrdev) {
1408 set_bit(R10BIO_Degraded, &r10_bio->state);
1409 continue;
1410 }
1411 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1412 sector_t first_bad;
1413 sector_t dev_sector = r10_bio->devs[i].addr;
1414 int bad_sectors;
1415 int is_bad;
1416
1417 is_bad = is_badblock(rdev, dev_sector,
1418 max_sectors,
1419 &first_bad, &bad_sectors);
1420 if (is_bad < 0) {
1421 /* Mustn't write here until the bad block
1422 * is acknowledged
1423 */
1424 atomic_inc(&rdev->nr_pending);
1425 set_bit(BlockedBadBlocks, &rdev->flags);
1426 blocked_rdev = rdev;
1427 break;
1428 }
1429 if (is_bad && first_bad <= dev_sector) {
1430 /* Cannot write here at all */
1431 bad_sectors -= (dev_sector - first_bad);
1432 if (bad_sectors < max_sectors)
1433 /* Mustn't write more than bad_sectors
1434 * to other devices yet
1435 */
1436 max_sectors = bad_sectors;
1437 /* We don't set R10BIO_Degraded as that
1438 * only applies if the disk is missing,
1439 * so it might be re-added, and we want to
1440 * know to recover this chunk.
1441 * In this case the device is here, and the
1442 * fact that this chunk is not in-sync is
1443 * recorded in the bad block log.
1444 */
1445 continue;
1446 }
1447 if (is_bad) {
1448 int good_sectors = first_bad - dev_sector;
1449 if (good_sectors < max_sectors)
1450 max_sectors = good_sectors;
1451 }
1452 }
1453 if (rdev) {
1454 r10_bio->devs[i].bio = bio;
1455 atomic_inc(&rdev->nr_pending);
1456 }
1457 if (rrdev) {
1458 r10_bio->devs[i].repl_bio = bio;
1459 atomic_inc(&rrdev->nr_pending);
1460 }
1461 }
1462 rcu_read_unlock();
1463
1464 if (unlikely(blocked_rdev)) {
1465 /* Have to wait for this device to get unblocked, then retry */
1466 int j;
1467 int d;
1468
1469 for (j = 0; j < i; j++) {
1470 if (r10_bio->devs[j].bio) {
1471 d = r10_bio->devs[j].devnum;
1472 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1473 }
1474 if (r10_bio->devs[j].repl_bio) {
1475 struct md_rdev *rdev;
1476 d = r10_bio->devs[j].devnum;
1477 rdev = conf->mirrors[d].replacement;
1478 if (!rdev) {
1479 /* Race with remove_disk */
1480 smp_mb();
1481 rdev = conf->mirrors[d].rdev;
1482 }
1483 rdev_dec_pending(rdev, mddev);
1484 }
1485 }
1486 allow_barrier(conf);
1487 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1488 wait_barrier(conf);
1489 goto retry_write;
1490 }
1491
1492 if (max_sectors < r10_bio->sectors) {
1493 /* We are splitting this into multiple parts, so
1494 * we need to prepare for allocating another r10_bio.
1495 */
1496 r10_bio->sectors = max_sectors;
1497 spin_lock_irq(&conf->device_lock);
1498 if (bio->bi_phys_segments == 0)
1499 bio->bi_phys_segments = 2;
1500 else
1501 bio->bi_phys_segments++;
1502 spin_unlock_irq(&conf->device_lock);
1503 }
1504 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1505
1506 atomic_set(&r10_bio->remaining, 1);
1507 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1508
1509 for (i = 0; i < conf->copies; i++) {
1510 struct bio *mbio;
1511 int d = r10_bio->devs[i].devnum;
1512 if (r10_bio->devs[i].bio) {
1513 struct md_rdev *rdev = conf->mirrors[d].rdev;
1514 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1515 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1516 max_sectors);
1517 r10_bio->devs[i].bio = mbio;
1518
1519 mbio->bi_sector = (r10_bio->devs[i].addr+
1520 choose_data_offset(r10_bio,
1521 rdev));
1522 mbio->bi_bdev = rdev->bdev;
1523 mbio->bi_end_io = raid10_end_write_request;
1524 mbio->bi_rw =
1525 WRITE | do_sync | do_fua | do_discard | do_same;
1526 mbio->bi_private = r10_bio;
1527
1528 atomic_inc(&r10_bio->remaining);
1529
1530 cb = blk_check_plugged(raid10_unplug, mddev,
1531 sizeof(*plug));
1532 if (cb)
1533 plug = container_of(cb, struct raid10_plug_cb,
1534 cb);
1535 else
1536 plug = NULL;
1537 spin_lock_irqsave(&conf->device_lock, flags);
1538 if (plug) {
1539 bio_list_add(&plug->pending, mbio);
1540 plug->pending_cnt++;
1541 } else {
1542 bio_list_add(&conf->pending_bio_list, mbio);
1543 conf->pending_count++;
1544 }
1545 spin_unlock_irqrestore(&conf->device_lock, flags);
1546 if (!plug)
1547 md_wakeup_thread(mddev->thread);
1548 }
1549
1550 if (r10_bio->devs[i].repl_bio) {
1551 struct md_rdev *rdev = conf->mirrors[d].replacement;
1552 if (rdev == NULL) {
1553 /* Replacement just got moved to main 'rdev' */
1554 smp_mb();
1555 rdev = conf->mirrors[d].rdev;
1556 }
1557 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1558 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1559 max_sectors);
1560 r10_bio->devs[i].repl_bio = mbio;
1561
1562 mbio->bi_sector = (r10_bio->devs[i].addr +
1563 choose_data_offset(
1564 r10_bio, rdev));
1565 mbio->bi_bdev = rdev->bdev;
1566 mbio->bi_end_io = raid10_end_write_request;
1567 mbio->bi_rw =
1568 WRITE | do_sync | do_fua | do_discard | do_same;
1569 mbio->bi_private = r10_bio;
1570
1571 atomic_inc(&r10_bio->remaining);
1572 spin_lock_irqsave(&conf->device_lock, flags);
1573 bio_list_add(&conf->pending_bio_list, mbio);
1574 conf->pending_count++;
1575 spin_unlock_irqrestore(&conf->device_lock, flags);
1576 if (!mddev_check_plugged(mddev))
1577 md_wakeup_thread(mddev->thread);
1578 }
1579 }
1580
1581 /* Don't remove the bias on 'remaining' (one_write_done) until
1582 * after checking if we need to go around again.
1583 */
1584
1585 if (sectors_handled < bio_sectors(bio)) {
1586 one_write_done(r10_bio);
1587 /* We need another r10_bio. It has already been counted
1588 * in bio->bi_phys_segments.
1589 */
1590 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1591
1592 r10_bio->master_bio = bio;
1593 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1594
1595 r10_bio->mddev = mddev;
1596 r10_bio->sector = bio->bi_sector + sectors_handled;
1597 r10_bio->state = 0;
1598 goto retry_write;
1599 }
1600 one_write_done(r10_bio);
1601
1602 /* In case raid10d snuck in to freeze_array */
1603 wake_up(&conf->wait_barrier);
1604 }
1605
1606 static void status(struct seq_file *seq, struct mddev *mddev)
1607 {
1608 struct r10conf *conf = mddev->private;
1609 int i;
1610
1611 if (conf->geo.near_copies < conf->geo.raid_disks)
1612 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1613 if (conf->geo.near_copies > 1)
1614 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1615 if (conf->geo.far_copies > 1) {
1616 if (conf->geo.far_offset)
1617 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1618 else
1619 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1620 }
1621 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1622 conf->geo.raid_disks - mddev->degraded);
1623 for (i = 0; i < conf->geo.raid_disks; i++)
1624 seq_printf(seq, "%s",
1625 conf->mirrors[i].rdev &&
1626 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1627 seq_printf(seq, "]");
1628 }
1629
1630 /* check if there are enough drives for
1631 * every block to appear on atleast one.
1632 * Don't consider the device numbered 'ignore'
1633 * as we might be about to remove it.
1634 */
1635 static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
1636 {
1637 int first = 0;
1638
1639 do {
1640 int n = conf->copies;
1641 int cnt = 0;
1642 int this = first;
1643 while (n--) {
1644 if (conf->mirrors[this].rdev &&
1645 this != ignore)
1646 cnt++;
1647 this = (this+1) % geo->raid_disks;
1648 }
1649 if (cnt == 0)
1650 return 0;
1651 first = (first + geo->near_copies) % geo->raid_disks;
1652 } while (first != 0);
1653 return 1;
1654 }
1655
1656 static int enough(struct r10conf *conf, int ignore)
1657 {
1658 return _enough(conf, &conf->geo, ignore) &&
1659 _enough(conf, &conf->prev, ignore);
1660 }
1661
1662 static void error(struct mddev *mddev, struct md_rdev *rdev)
1663 {
1664 char b[BDEVNAME_SIZE];
1665 struct r10conf *conf = mddev->private;
1666
1667 /*
1668 * If it is not operational, then we have already marked it as dead
1669 * else if it is the last working disks, ignore the error, let the
1670 * next level up know.
1671 * else mark the drive as failed
1672 */
1673 if (test_bit(In_sync, &rdev->flags)
1674 && !enough(conf, rdev->raid_disk))
1675 /*
1676 * Don't fail the drive, just return an IO error.
1677 */
1678 return;
1679 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1680 unsigned long flags;
1681 spin_lock_irqsave(&conf->device_lock, flags);
1682 mddev->degraded++;
1683 spin_unlock_irqrestore(&conf->device_lock, flags);
1684 /*
1685 * if recovery is running, make sure it aborts.
1686 */
1687 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1688 }
1689 set_bit(Blocked, &rdev->flags);
1690 set_bit(Faulty, &rdev->flags);
1691 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1692 printk(KERN_ALERT
1693 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1694 "md/raid10:%s: Operation continuing on %d devices.\n",
1695 mdname(mddev), bdevname(rdev->bdev, b),
1696 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1697 }
1698
1699 static void print_conf(struct r10conf *conf)
1700 {
1701 int i;
1702 struct raid10_info *tmp;
1703
1704 printk(KERN_DEBUG "RAID10 conf printout:\n");
1705 if (!conf) {
1706 printk(KERN_DEBUG "(!conf)\n");
1707 return;
1708 }
1709 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1710 conf->geo.raid_disks);
1711
1712 for (i = 0; i < conf->geo.raid_disks; i++) {
1713 char b[BDEVNAME_SIZE];
1714 tmp = conf->mirrors + i;
1715 if (tmp->rdev)
1716 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1717 i, !test_bit(In_sync, &tmp->rdev->flags),
1718 !test_bit(Faulty, &tmp->rdev->flags),
1719 bdevname(tmp->rdev->bdev,b));
1720 }
1721 }
1722
1723 static void close_sync(struct r10conf *conf)
1724 {
1725 wait_barrier(conf);
1726 allow_barrier(conf);
1727
1728 mempool_destroy(conf->r10buf_pool);
1729 conf->r10buf_pool = NULL;
1730 }
1731
1732 static int raid10_spare_active(struct mddev *mddev)
1733 {
1734 int i;
1735 struct r10conf *conf = mddev->private;
1736 struct raid10_info *tmp;
1737 int count = 0;
1738 unsigned long flags;
1739
1740 /*
1741 * Find all non-in_sync disks within the RAID10 configuration
1742 * and mark them in_sync
1743 */
1744 for (i = 0; i < conf->geo.raid_disks; i++) {
1745 tmp = conf->mirrors + i;
1746 if (tmp->replacement
1747 && tmp->replacement->recovery_offset == MaxSector
1748 && !test_bit(Faulty, &tmp->replacement->flags)
1749 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1750 /* Replacement has just become active */
1751 if (!tmp->rdev
1752 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1753 count++;
1754 if (tmp->rdev) {
1755 /* Replaced device not technically faulty,
1756 * but we need to be sure it gets removed
1757 * and never re-added.
1758 */
1759 set_bit(Faulty, &tmp->rdev->flags);
1760 sysfs_notify_dirent_safe(
1761 tmp->rdev->sysfs_state);
1762 }
1763 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1764 } else if (tmp->rdev
1765 && tmp->rdev->recovery_offset == MaxSector
1766 && !test_bit(Faulty, &tmp->rdev->flags)
1767 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1768 count++;
1769 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1770 }
1771 }
1772 spin_lock_irqsave(&conf->device_lock, flags);
1773 mddev->degraded -= count;
1774 spin_unlock_irqrestore(&conf->device_lock, flags);
1775
1776 print_conf(conf);
1777 return count;
1778 }
1779
1780
1781 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1782 {
1783 struct r10conf *conf = mddev->private;
1784 int err = -EEXIST;
1785 int mirror;
1786 int first = 0;
1787 int last = conf->geo.raid_disks - 1;
1788 struct request_queue *q = bdev_get_queue(rdev->bdev);
1789
1790 if (mddev->recovery_cp < MaxSector)
1791 /* only hot-add to in-sync arrays, as recovery is
1792 * very different from resync
1793 */
1794 return -EBUSY;
1795 if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
1796 return -EINVAL;
1797
1798 if (rdev->raid_disk >= 0)
1799 first = last = rdev->raid_disk;
1800
1801 if (q->merge_bvec_fn) {
1802 set_bit(Unmerged, &rdev->flags);
1803 mddev->merge_check_needed = 1;
1804 }
1805
1806 if (rdev->saved_raid_disk >= first &&
1807 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1808 mirror = rdev->saved_raid_disk;
1809 else
1810 mirror = first;
1811 for ( ; mirror <= last ; mirror++) {
1812 struct raid10_info *p = &conf->mirrors[mirror];
1813 if (p->recovery_disabled == mddev->recovery_disabled)
1814 continue;
1815 if (p->rdev) {
1816 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1817 p->replacement != NULL)
1818 continue;
1819 clear_bit(In_sync, &rdev->flags);
1820 set_bit(Replacement, &rdev->flags);
1821 rdev->raid_disk = mirror;
1822 err = 0;
1823 disk_stack_limits(mddev->gendisk, rdev->bdev,
1824 rdev->data_offset << 9);
1825 conf->fullsync = 1;
1826 rcu_assign_pointer(p->replacement, rdev);
1827 break;
1828 }
1829
1830 disk_stack_limits(mddev->gendisk, rdev->bdev,
1831 rdev->data_offset << 9);
1832
1833 p->head_position = 0;
1834 p->recovery_disabled = mddev->recovery_disabled - 1;
1835 rdev->raid_disk = mirror;
1836 err = 0;
1837 if (rdev->saved_raid_disk != mirror)
1838 conf->fullsync = 1;
1839 rcu_assign_pointer(p->rdev, rdev);
1840 break;
1841 }
1842 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1843 /* Some requests might not have seen this new
1844 * merge_bvec_fn. We must wait for them to complete
1845 * before merging the device fully.
1846 * First we make sure any code which has tested
1847 * our function has submitted the request, then
1848 * we wait for all outstanding requests to complete.
1849 */
1850 synchronize_sched();
1851 freeze_array(conf, 0);
1852 unfreeze_array(conf);
1853 clear_bit(Unmerged, &rdev->flags);
1854 }
1855 md_integrity_add_rdev(rdev, mddev);
1856 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1857 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1858
1859 print_conf(conf);
1860 return err;
1861 }
1862
1863 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1864 {
1865 struct r10conf *conf = mddev->private;
1866 int err = 0;
1867 int number = rdev->raid_disk;
1868 struct md_rdev **rdevp;
1869 struct raid10_info *p = conf->mirrors + number;
1870
1871 print_conf(conf);
1872 if (rdev == p->rdev)
1873 rdevp = &p->rdev;
1874 else if (rdev == p->replacement)
1875 rdevp = &p->replacement;
1876 else
1877 return 0;
1878
1879 if (test_bit(In_sync, &rdev->flags) ||
1880 atomic_read(&rdev->nr_pending)) {
1881 err = -EBUSY;
1882 goto abort;
1883 }
1884 /* Only remove faulty devices if recovery
1885 * is not possible.
1886 */
1887 if (!test_bit(Faulty, &rdev->flags) &&
1888 mddev->recovery_disabled != p->recovery_disabled &&
1889 (!p->replacement || p->replacement == rdev) &&
1890 number < conf->geo.raid_disks &&
1891 enough(conf, -1)) {
1892 err = -EBUSY;
1893 goto abort;
1894 }
1895 *rdevp = NULL;
1896 synchronize_rcu();
1897 if (atomic_read(&rdev->nr_pending)) {
1898 /* lost the race, try later */
1899 err = -EBUSY;
1900 *rdevp = rdev;
1901 goto abort;
1902 } else if (p->replacement) {
1903 /* We must have just cleared 'rdev' */
1904 p->rdev = p->replacement;
1905 clear_bit(Replacement, &p->replacement->flags);
1906 smp_mb(); /* Make sure other CPUs may see both as identical
1907 * but will never see neither -- if they are careful.
1908 */
1909 p->replacement = NULL;
1910 clear_bit(WantReplacement, &rdev->flags);
1911 } else
1912 /* We might have just remove the Replacement as faulty
1913 * Clear the flag just in case
1914 */
1915 clear_bit(WantReplacement, &rdev->flags);
1916
1917 err = md_integrity_register(mddev);
1918
1919 abort:
1920
1921 print_conf(conf);
1922 return err;
1923 }
1924
1925
1926 static void end_sync_read(struct bio *bio, int error)
1927 {
1928 struct r10bio *r10_bio = bio->bi_private;
1929 struct r10conf *conf = r10_bio->mddev->private;
1930 int d;
1931
1932 if (bio == r10_bio->master_bio) {
1933 /* this is a reshape read */
1934 d = r10_bio->read_slot; /* really the read dev */
1935 } else
1936 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1937
1938 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1939 set_bit(R10BIO_Uptodate, &r10_bio->state);
1940 else
1941 /* The write handler will notice the lack of
1942 * R10BIO_Uptodate and record any errors etc
1943 */
1944 atomic_add(r10_bio->sectors,
1945 &conf->mirrors[d].rdev->corrected_errors);
1946
1947 /* for reconstruct, we always reschedule after a read.
1948 * for resync, only after all reads
1949 */
1950 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1951 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1952 atomic_dec_and_test(&r10_bio->remaining)) {
1953 /* we have read all the blocks,
1954 * do the comparison in process context in raid10d
1955 */
1956 reschedule_retry(r10_bio);
1957 }
1958 }
1959
1960 static void end_sync_request(struct r10bio *r10_bio)
1961 {
1962 struct mddev *mddev = r10_bio->mddev;
1963
1964 while (atomic_dec_and_test(&r10_bio->remaining)) {
1965 if (r10_bio->master_bio == NULL) {
1966 /* the primary of several recovery bios */
1967 sector_t s = r10_bio->sectors;
1968 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1969 test_bit(R10BIO_WriteError, &r10_bio->state))
1970 reschedule_retry(r10_bio);
1971 else
1972 put_buf(r10_bio);
1973 md_done_sync(mddev, s, 1);
1974 break;
1975 } else {
1976 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1977 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1978 test_bit(R10BIO_WriteError, &r10_bio->state))
1979 reschedule_retry(r10_bio);
1980 else
1981 put_buf(r10_bio);
1982 r10_bio = r10_bio2;
1983 }
1984 }
1985 }
1986
1987 static void end_sync_write(struct bio *bio, int error)
1988 {
1989 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1990 struct r10bio *r10_bio = bio->bi_private;
1991 struct mddev *mddev = r10_bio->mddev;
1992 struct r10conf *conf = mddev->private;
1993 int d;
1994 sector_t first_bad;
1995 int bad_sectors;
1996 int slot;
1997 int repl;
1998 struct md_rdev *rdev = NULL;
1999
2000 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2001 if (repl)
2002 rdev = conf->mirrors[d].replacement;
2003 else
2004 rdev = conf->mirrors[d].rdev;
2005
2006 if (!uptodate) {
2007 if (repl)
2008 md_error(mddev, rdev);
2009 else {
2010 set_bit(WriteErrorSeen, &rdev->flags);
2011 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2012 set_bit(MD_RECOVERY_NEEDED,
2013 &rdev->mddev->recovery);
2014 set_bit(R10BIO_WriteError, &r10_bio->state);
2015 }
2016 } else if (is_badblock(rdev,
2017 r10_bio->devs[slot].addr,
2018 r10_bio->sectors,
2019 &first_bad, &bad_sectors))
2020 set_bit(R10BIO_MadeGood, &r10_bio->state);
2021
2022 rdev_dec_pending(rdev, mddev);
2023
2024 end_sync_request(r10_bio);
2025 }
2026
2027 /*
2028 * Note: sync and recover and handled very differently for raid10
2029 * This code is for resync.
2030 * For resync, we read through virtual addresses and read all blocks.
2031 * If there is any error, we schedule a write. The lowest numbered
2032 * drive is authoritative.
2033 * However requests come for physical address, so we need to map.
2034 * For every physical address there are raid_disks/copies virtual addresses,
2035 * which is always are least one, but is not necessarly an integer.
2036 * This means that a physical address can span multiple chunks, so we may
2037 * have to submit multiple io requests for a single sync request.
2038 */
2039 /*
2040 * We check if all blocks are in-sync and only write to blocks that
2041 * aren't in sync
2042 */
2043 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2044 {
2045 struct r10conf *conf = mddev->private;
2046 int i, first;
2047 struct bio *tbio, *fbio;
2048 int vcnt;
2049
2050 atomic_set(&r10_bio->remaining, 1);
2051
2052 /* find the first device with a block */
2053 for (i=0; i<conf->copies; i++)
2054 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
2055 break;
2056
2057 if (i == conf->copies)
2058 goto done;
2059
2060 first = i;
2061 fbio = r10_bio->devs[i].bio;
2062
2063 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2064 /* now find blocks with errors */
2065 for (i=0 ; i < conf->copies ; i++) {
2066 int j, d;
2067
2068 tbio = r10_bio->devs[i].bio;
2069
2070 if (tbio->bi_end_io != end_sync_read)
2071 continue;
2072 if (i == first)
2073 continue;
2074 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
2075 /* We know that the bi_io_vec layout is the same for
2076 * both 'first' and 'i', so we just compare them.
2077 * All vec entries are PAGE_SIZE;
2078 */
2079 int sectors = r10_bio->sectors;
2080 for (j = 0; j < vcnt; j++) {
2081 int len = PAGE_SIZE;
2082 if (sectors < (len / 512))
2083 len = sectors * 512;
2084 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2085 page_address(tbio->bi_io_vec[j].bv_page),
2086 len))
2087 break;
2088 sectors -= len/512;
2089 }
2090 if (j == vcnt)
2091 continue;
2092 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2093 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2094 /* Don't fix anything. */
2095 continue;
2096 }
2097 /* Ok, we need to write this bio, either to correct an
2098 * inconsistency or to correct an unreadable block.
2099 * First we need to fixup bv_offset, bv_len and
2100 * bi_vecs, as the read request might have corrupted these
2101 */
2102 bio_reset(tbio);
2103
2104 tbio->bi_vcnt = vcnt;
2105 tbio->bi_size = r10_bio->sectors << 9;
2106 tbio->bi_rw = WRITE;
2107 tbio->bi_private = r10_bio;
2108 tbio->bi_sector = r10_bio->devs[i].addr;
2109
2110 for (j=0; j < vcnt ; j++) {
2111 tbio->bi_io_vec[j].bv_offset = 0;
2112 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
2113
2114 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2115 page_address(fbio->bi_io_vec[j].bv_page),
2116 PAGE_SIZE);
2117 }
2118 tbio->bi_end_io = end_sync_write;
2119
2120 d = r10_bio->devs[i].devnum;
2121 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2122 atomic_inc(&r10_bio->remaining);
2123 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2124
2125 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
2126 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2127 generic_make_request(tbio);
2128 }
2129
2130 /* Now write out to any replacement devices
2131 * that are active
2132 */
2133 for (i = 0; i < conf->copies; i++) {
2134 int j, d;
2135
2136 tbio = r10_bio->devs[i].repl_bio;
2137 if (!tbio || !tbio->bi_end_io)
2138 continue;
2139 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2140 && r10_bio->devs[i].bio != fbio)
2141 for (j = 0; j < vcnt; j++)
2142 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2143 page_address(fbio->bi_io_vec[j].bv_page),
2144 PAGE_SIZE);
2145 d = r10_bio->devs[i].devnum;
2146 atomic_inc(&r10_bio->remaining);
2147 md_sync_acct(conf->mirrors[d].replacement->bdev,
2148 bio_sectors(tbio));
2149 generic_make_request(tbio);
2150 }
2151
2152 done:
2153 if (atomic_dec_and_test(&r10_bio->remaining)) {
2154 md_done_sync(mddev, r10_bio->sectors, 1);
2155 put_buf(r10_bio);
2156 }
2157 }
2158
2159 /*
2160 * Now for the recovery code.
2161 * Recovery happens across physical sectors.
2162 * We recover all non-is_sync drives by finding the virtual address of
2163 * each, and then choose a working drive that also has that virt address.
2164 * There is a separate r10_bio for each non-in_sync drive.
2165 * Only the first two slots are in use. The first for reading,
2166 * The second for writing.
2167 *
2168 */
2169 static void fix_recovery_read_error(struct r10bio *r10_bio)
2170 {
2171 /* We got a read error during recovery.
2172 * We repeat the read in smaller page-sized sections.
2173 * If a read succeeds, write it to the new device or record
2174 * a bad block if we cannot.
2175 * If a read fails, record a bad block on both old and
2176 * new devices.
2177 */
2178 struct mddev *mddev = r10_bio->mddev;
2179 struct r10conf *conf = mddev->private;
2180 struct bio *bio = r10_bio->devs[0].bio;
2181 sector_t sect = 0;
2182 int sectors = r10_bio->sectors;
2183 int idx = 0;
2184 int dr = r10_bio->devs[0].devnum;
2185 int dw = r10_bio->devs[1].devnum;
2186
2187 while (sectors) {
2188 int s = sectors;
2189 struct md_rdev *rdev;
2190 sector_t addr;
2191 int ok;
2192
2193 if (s > (PAGE_SIZE>>9))
2194 s = PAGE_SIZE >> 9;
2195
2196 rdev = conf->mirrors[dr].rdev;
2197 addr = r10_bio->devs[0].addr + sect,
2198 ok = sync_page_io(rdev,
2199 addr,
2200 s << 9,
2201 bio->bi_io_vec[idx].bv_page,
2202 READ, false);
2203 if (ok) {
2204 rdev = conf->mirrors[dw].rdev;
2205 addr = r10_bio->devs[1].addr + sect;
2206 ok = sync_page_io(rdev,
2207 addr,
2208 s << 9,
2209 bio->bi_io_vec[idx].bv_page,
2210 WRITE, false);
2211 if (!ok) {
2212 set_bit(WriteErrorSeen, &rdev->flags);
2213 if (!test_and_set_bit(WantReplacement,
2214 &rdev->flags))
2215 set_bit(MD_RECOVERY_NEEDED,
2216 &rdev->mddev->recovery);
2217 }
2218 }
2219 if (!ok) {
2220 /* We don't worry if we cannot set a bad block -
2221 * it really is bad so there is no loss in not
2222 * recording it yet
2223 */
2224 rdev_set_badblocks(rdev, addr, s, 0);
2225
2226 if (rdev != conf->mirrors[dw].rdev) {
2227 /* need bad block on destination too */
2228 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2229 addr = r10_bio->devs[1].addr + sect;
2230 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2231 if (!ok) {
2232 /* just abort the recovery */
2233 printk(KERN_NOTICE
2234 "md/raid10:%s: recovery aborted"
2235 " due to read error\n",
2236 mdname(mddev));
2237
2238 conf->mirrors[dw].recovery_disabled
2239 = mddev->recovery_disabled;
2240 set_bit(MD_RECOVERY_INTR,
2241 &mddev->recovery);
2242 break;
2243 }
2244 }
2245 }
2246
2247 sectors -= s;
2248 sect += s;
2249 idx++;
2250 }
2251 }
2252
2253 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2254 {
2255 struct r10conf *conf = mddev->private;
2256 int d;
2257 struct bio *wbio, *wbio2;
2258
2259 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2260 fix_recovery_read_error(r10_bio);
2261 end_sync_request(r10_bio);
2262 return;
2263 }
2264
2265 /*
2266 * share the pages with the first bio
2267 * and submit the write request
2268 */
2269 d = r10_bio->devs[1].devnum;
2270 wbio = r10_bio->devs[1].bio;
2271 wbio2 = r10_bio->devs[1].repl_bio;
2272 /* Need to test wbio2->bi_end_io before we call
2273 * generic_make_request as if the former is NULL,
2274 * the latter is free to free wbio2.
2275 */
2276 if (wbio2 && !wbio2->bi_end_io)
2277 wbio2 = NULL;
2278 if (wbio->bi_end_io) {
2279 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2280 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2281 generic_make_request(wbio);
2282 }
2283 if (wbio2) {
2284 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2285 md_sync_acct(conf->mirrors[d].replacement->bdev,
2286 bio_sectors(wbio2));
2287 generic_make_request(wbio2);
2288 }
2289 }
2290
2291
2292 /*
2293 * Used by fix_read_error() to decay the per rdev read_errors.
2294 * We halve the read error count for every hour that has elapsed
2295 * since the last recorded read error.
2296 *
2297 */
2298 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2299 {
2300 struct timespec cur_time_mon;
2301 unsigned long hours_since_last;
2302 unsigned int read_errors = atomic_read(&rdev->read_errors);
2303
2304 ktime_get_ts(&cur_time_mon);
2305
2306 if (rdev->last_read_error.tv_sec == 0 &&
2307 rdev->last_read_error.tv_nsec == 0) {
2308 /* first time we've seen a read error */
2309 rdev->last_read_error = cur_time_mon;
2310 return;
2311 }
2312
2313 hours_since_last = (cur_time_mon.tv_sec -
2314 rdev->last_read_error.tv_sec) / 3600;
2315
2316 rdev->last_read_error = cur_time_mon;
2317
2318 /*
2319 * if hours_since_last is > the number of bits in read_errors
2320 * just set read errors to 0. We do this to avoid
2321 * overflowing the shift of read_errors by hours_since_last.
2322 */
2323 if (hours_since_last >= 8 * sizeof(read_errors))
2324 atomic_set(&rdev->read_errors, 0);
2325 else
2326 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2327 }
2328
2329 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2330 int sectors, struct page *page, int rw)
2331 {
2332 sector_t first_bad;
2333 int bad_sectors;
2334
2335 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2336 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2337 return -1;
2338 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2339 /* success */
2340 return 1;
2341 if (rw == WRITE) {
2342 set_bit(WriteErrorSeen, &rdev->flags);
2343 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2344 set_bit(MD_RECOVERY_NEEDED,
2345 &rdev->mddev->recovery);
2346 }
2347 /* need to record an error - either for the block or the device */
2348 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2349 md_error(rdev->mddev, rdev);
2350 return 0;
2351 }
2352
2353 /*
2354 * This is a kernel thread which:
2355 *
2356 * 1. Retries failed read operations on working mirrors.
2357 * 2. Updates the raid superblock when problems encounter.
2358 * 3. Performs writes following reads for array synchronising.
2359 */
2360
2361 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2362 {
2363 int sect = 0; /* Offset from r10_bio->sector */
2364 int sectors = r10_bio->sectors;
2365 struct md_rdev*rdev;
2366 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2367 int d = r10_bio->devs[r10_bio->read_slot].devnum;
2368
2369 /* still own a reference to this rdev, so it cannot
2370 * have been cleared recently.
2371 */
2372 rdev = conf->mirrors[d].rdev;
2373
2374 if (test_bit(Faulty, &rdev->flags))
2375 /* drive has already been failed, just ignore any
2376 more fix_read_error() attempts */
2377 return;
2378
2379 check_decay_read_errors(mddev, rdev);
2380 atomic_inc(&rdev->read_errors);
2381 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2382 char b[BDEVNAME_SIZE];
2383 bdevname(rdev->bdev, b);
2384
2385 printk(KERN_NOTICE
2386 "md/raid10:%s: %s: Raid device exceeded "
2387 "read_error threshold [cur %d:max %d]\n",
2388 mdname(mddev), b,
2389 atomic_read(&rdev->read_errors), max_read_errors);
2390 printk(KERN_NOTICE
2391 "md/raid10:%s: %s: Failing raid device\n",
2392 mdname(mddev), b);
2393 md_error(mddev, conf->mirrors[d].rdev);
2394 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2395 return;
2396 }
2397
2398 while(sectors) {
2399 int s = sectors;
2400 int sl = r10_bio->read_slot;
2401 int success = 0;
2402 int start;
2403
2404 if (s > (PAGE_SIZE>>9))
2405 s = PAGE_SIZE >> 9;
2406
2407 rcu_read_lock();
2408 do {
2409 sector_t first_bad;
2410 int bad_sectors;
2411
2412 d = r10_bio->devs[sl].devnum;
2413 rdev = rcu_dereference(conf->mirrors[d].rdev);
2414 if (rdev &&
2415 !test_bit(Unmerged, &rdev->flags) &&
2416 test_bit(In_sync, &rdev->flags) &&
2417 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2418 &first_bad, &bad_sectors) == 0) {
2419 atomic_inc(&rdev->nr_pending);
2420 rcu_read_unlock();
2421 success = sync_page_io(rdev,
2422 r10_bio->devs[sl].addr +
2423 sect,
2424 s<<9,
2425 conf->tmppage, READ, false);
2426 rdev_dec_pending(rdev, mddev);
2427 rcu_read_lock();
2428 if (success)
2429 break;
2430 }
2431 sl++;
2432 if (sl == conf->copies)
2433 sl = 0;
2434 } while (!success && sl != r10_bio->read_slot);
2435 rcu_read_unlock();
2436
2437 if (!success) {
2438 /* Cannot read from anywhere, just mark the block
2439 * as bad on the first device to discourage future
2440 * reads.
2441 */
2442 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2443 rdev = conf->mirrors[dn].rdev;
2444
2445 if (!rdev_set_badblocks(
2446 rdev,
2447 r10_bio->devs[r10_bio->read_slot].addr
2448 + sect,
2449 s, 0)) {
2450 md_error(mddev, rdev);
2451 r10_bio->devs[r10_bio->read_slot].bio
2452 = IO_BLOCKED;
2453 }
2454 break;
2455 }
2456
2457 start = sl;
2458 /* write it back and re-read */
2459 rcu_read_lock();
2460 while (sl != r10_bio->read_slot) {
2461 char b[BDEVNAME_SIZE];
2462
2463 if (sl==0)
2464 sl = conf->copies;
2465 sl--;
2466 d = r10_bio->devs[sl].devnum;
2467 rdev = rcu_dereference(conf->mirrors[d].rdev);
2468 if (!rdev ||
2469 test_bit(Unmerged, &rdev->flags) ||
2470 !test_bit(In_sync, &rdev->flags))
2471 continue;
2472
2473 atomic_inc(&rdev->nr_pending);
2474 rcu_read_unlock();
2475 if (r10_sync_page_io(rdev,
2476 r10_bio->devs[sl].addr +
2477 sect,
2478 s, conf->tmppage, WRITE)
2479 == 0) {
2480 /* Well, this device is dead */
2481 printk(KERN_NOTICE
2482 "md/raid10:%s: read correction "
2483 "write failed"
2484 " (%d sectors at %llu on %s)\n",
2485 mdname(mddev), s,
2486 (unsigned long long)(
2487 sect +
2488 choose_data_offset(r10_bio,
2489 rdev)),
2490 bdevname(rdev->bdev, b));
2491 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2492 "drive\n",
2493 mdname(mddev),
2494 bdevname(rdev->bdev, b));
2495 }
2496 rdev_dec_pending(rdev, mddev);
2497 rcu_read_lock();
2498 }
2499 sl = start;
2500 while (sl != r10_bio->read_slot) {
2501 char b[BDEVNAME_SIZE];
2502
2503 if (sl==0)
2504 sl = conf->copies;
2505 sl--;
2506 d = r10_bio->devs[sl].devnum;
2507 rdev = rcu_dereference(conf->mirrors[d].rdev);
2508 if (!rdev ||
2509 !test_bit(In_sync, &rdev->flags))
2510 continue;
2511
2512 atomic_inc(&rdev->nr_pending);
2513 rcu_read_unlock();
2514 switch (r10_sync_page_io(rdev,
2515 r10_bio->devs[sl].addr +
2516 sect,
2517 s, conf->tmppage,
2518 READ)) {
2519 case 0:
2520 /* Well, this device is dead */
2521 printk(KERN_NOTICE
2522 "md/raid10:%s: unable to read back "
2523 "corrected sectors"
2524 " (%d sectors at %llu on %s)\n",
2525 mdname(mddev), s,
2526 (unsigned long long)(
2527 sect +
2528 choose_data_offset(r10_bio, rdev)),
2529 bdevname(rdev->bdev, b));
2530 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2531 "drive\n",
2532 mdname(mddev),
2533 bdevname(rdev->bdev, b));
2534 break;
2535 case 1:
2536 printk(KERN_INFO
2537 "md/raid10:%s: read error corrected"
2538 " (%d sectors at %llu on %s)\n",
2539 mdname(mddev), s,
2540 (unsigned long long)(
2541 sect +
2542 choose_data_offset(r10_bio, rdev)),
2543 bdevname(rdev->bdev, b));
2544 atomic_add(s, &rdev->corrected_errors);
2545 }
2546
2547 rdev_dec_pending(rdev, mddev);
2548 rcu_read_lock();
2549 }
2550 rcu_read_unlock();
2551
2552 sectors -= s;
2553 sect += s;
2554 }
2555 }
2556
2557 static int narrow_write_error(struct r10bio *r10_bio, int i)
2558 {
2559 struct bio *bio = r10_bio->master_bio;
2560 struct mddev *mddev = r10_bio->mddev;
2561 struct r10conf *conf = mddev->private;
2562 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2563 /* bio has the data to be written to slot 'i' where
2564 * we just recently had a write error.
2565 * We repeatedly clone the bio and trim down to one block,
2566 * then try the write. Where the write fails we record
2567 * a bad block.
2568 * It is conceivable that the bio doesn't exactly align with
2569 * blocks. We must handle this.
2570 *
2571 * We currently own a reference to the rdev.
2572 */
2573
2574 int block_sectors;
2575 sector_t sector;
2576 int sectors;
2577 int sect_to_write = r10_bio->sectors;
2578 int ok = 1;
2579
2580 if (rdev->badblocks.shift < 0)
2581 return 0;
2582
2583 block_sectors = 1 << rdev->badblocks.shift;
2584 sector = r10_bio->sector;
2585 sectors = ((r10_bio->sector + block_sectors)
2586 & ~(sector_t)(block_sectors - 1))
2587 - sector;
2588
2589 while (sect_to_write) {
2590 struct bio *wbio;
2591 if (sectors > sect_to_write)
2592 sectors = sect_to_write;
2593 /* Write at 'sector' for 'sectors' */
2594 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2595 md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2596 wbio->bi_sector = (r10_bio->devs[i].addr+
2597 choose_data_offset(r10_bio, rdev) +
2598 (sector - r10_bio->sector));
2599 wbio->bi_bdev = rdev->bdev;
2600 if (submit_bio_wait(WRITE, wbio) == 0)
2601 /* Failure! */
2602 ok = rdev_set_badblocks(rdev, sector,
2603 sectors, 0)
2604 && ok;
2605
2606 bio_put(wbio);
2607 sect_to_write -= sectors;
2608 sector += sectors;
2609 sectors = block_sectors;
2610 }
2611 return ok;
2612 }
2613
2614 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2615 {
2616 int slot = r10_bio->read_slot;
2617 struct bio *bio;
2618 struct r10conf *conf = mddev->private;
2619 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2620 char b[BDEVNAME_SIZE];
2621 unsigned long do_sync;
2622 int max_sectors;
2623
2624 /* we got a read error. Maybe the drive is bad. Maybe just
2625 * the block and we can fix it.
2626 * We freeze all other IO, and try reading the block from
2627 * other devices. When we find one, we re-write
2628 * and check it that fixes the read error.
2629 * This is all done synchronously while the array is
2630 * frozen.
2631 */
2632 bio = r10_bio->devs[slot].bio;
2633 bdevname(bio->bi_bdev, b);
2634 bio_put(bio);
2635 r10_bio->devs[slot].bio = NULL;
2636
2637 if (mddev->ro == 0) {
2638 freeze_array(conf, 1);
2639 fix_read_error(conf, mddev, r10_bio);
2640 unfreeze_array(conf);
2641 } else
2642 r10_bio->devs[slot].bio = IO_BLOCKED;
2643
2644 rdev_dec_pending(rdev, mddev);
2645
2646 read_more:
2647 rdev = read_balance(conf, r10_bio, &max_sectors);
2648 if (rdev == NULL) {
2649 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2650 " read error for block %llu\n",
2651 mdname(mddev), b,
2652 (unsigned long long)r10_bio->sector);
2653 raid_end_bio_io(r10_bio);
2654 return;
2655 }
2656
2657 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2658 slot = r10_bio->read_slot;
2659 printk_ratelimited(
2660 KERN_ERR
2661 "md/raid10:%s: %s: redirecting "
2662 "sector %llu to another mirror\n",
2663 mdname(mddev),
2664 bdevname(rdev->bdev, b),
2665 (unsigned long long)r10_bio->sector);
2666 bio = bio_clone_mddev(r10_bio->master_bio,
2667 GFP_NOIO, mddev);
2668 md_trim_bio(bio,
2669 r10_bio->sector - bio->bi_sector,
2670 max_sectors);
2671 r10_bio->devs[slot].bio = bio;
2672 r10_bio->devs[slot].rdev = rdev;
2673 bio->bi_sector = r10_bio->devs[slot].addr
2674 + choose_data_offset(r10_bio, rdev);
2675 bio->bi_bdev = rdev->bdev;
2676 bio->bi_rw = READ | do_sync;
2677 bio->bi_private = r10_bio;
2678 bio->bi_end_io = raid10_end_read_request;
2679 if (max_sectors < r10_bio->sectors) {
2680 /* Drat - have to split this up more */
2681 struct bio *mbio = r10_bio->master_bio;
2682 int sectors_handled =
2683 r10_bio->sector + max_sectors
2684 - mbio->bi_sector;
2685 r10_bio->sectors = max_sectors;
2686 spin_lock_irq(&conf->device_lock);
2687 if (mbio->bi_phys_segments == 0)
2688 mbio->bi_phys_segments = 2;
2689 else
2690 mbio->bi_phys_segments++;
2691 spin_unlock_irq(&conf->device_lock);
2692 generic_make_request(bio);
2693
2694 r10_bio = mempool_alloc(conf->r10bio_pool,
2695 GFP_NOIO);
2696 r10_bio->master_bio = mbio;
2697 r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2698 r10_bio->state = 0;
2699 set_bit(R10BIO_ReadError,
2700 &r10_bio->state);
2701 r10_bio->mddev = mddev;
2702 r10_bio->sector = mbio->bi_sector
2703 + sectors_handled;
2704
2705 goto read_more;
2706 } else
2707 generic_make_request(bio);
2708 }
2709
2710 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2711 {
2712 /* Some sort of write request has finished and it
2713 * succeeded in writing where we thought there was a
2714 * bad block. So forget the bad block.
2715 * Or possibly if failed and we need to record
2716 * a bad block.
2717 */
2718 int m;
2719 struct md_rdev *rdev;