md/raid1: clear bad-block record when write succeeds.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / raid1.c
CommitLineData
1da177e4
LT
1/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
96de0e25 12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
1da177e4
LT
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
191ea9b2
N
15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
1da177e4
LT
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
5a0e3ad6 34#include <linux/slab.h>
25570727 35#include <linux/delay.h>
bff61975 36#include <linux/blkdev.h>
bff61975 37#include <linux/seq_file.h>
8bda470e 38#include <linux/ratelimit.h>
43b2e5d8 39#include "md.h"
ef740c37
CH
40#include "raid1.h"
41#include "bitmap.h"
191ea9b2
N
42
43#define DEBUG 0
d2eb35ac 44#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0)
1da177e4
LT
45
46/*
47 * Number of guaranteed r1bios in case of extreme VM load:
48 */
49#define NR_RAID1_BIOS 256
50
1da177e4 51
17999be4
N
52static void allow_barrier(conf_t *conf);
53static void lower_barrier(conf_t *conf);
1da177e4 54
dd0fc66f 55static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4
LT
56{
57 struct pool_info *pi = data;
1da177e4
LT
58 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
59
60 /* allocate a r1bio with room for raid_disks entries in the bios array */
7eaceacc 61 return kzalloc(size, gfp_flags);
1da177e4
LT
62}
63
64static void r1bio_pool_free(void *r1_bio, void *data)
65{
66 kfree(r1_bio);
67}
68
69#define RESYNC_BLOCK_SIZE (64*1024)
70//#define RESYNC_BLOCK_SIZE PAGE_SIZE
71#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
72#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
73#define RESYNC_WINDOW (2048*1024)
74
dd0fc66f 75static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
1da177e4
LT
76{
77 struct pool_info *pi = data;
78 struct page *page;
79 r1bio_t *r1_bio;
80 struct bio *bio;
81 int i, j;
82
83 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
7eaceacc 84 if (!r1_bio)
1da177e4 85 return NULL;
1da177e4
LT
86
87 /*
88 * Allocate bios : 1 for reading, n-1 for writing
89 */
90 for (j = pi->raid_disks ; j-- ; ) {
6746557f 91 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
1da177e4
LT
92 if (!bio)
93 goto out_free_bio;
94 r1_bio->bios[j] = bio;
95 }
96 /*
97 * Allocate RESYNC_PAGES data pages and attach them to
d11c171e
N
98 * the first bio.
99 * If this is a user-requested check/repair, allocate
100 * RESYNC_PAGES for each bio.
1da177e4 101 */
d11c171e
N
102 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
103 j = pi->raid_disks;
104 else
105 j = 1;
106 while(j--) {
107 bio = r1_bio->bios[j];
108 for (i = 0; i < RESYNC_PAGES; i++) {
109 page = alloc_page(gfp_flags);
110 if (unlikely(!page))
111 goto out_free_pages;
112
113 bio->bi_io_vec[i].bv_page = page;
303a0e11 114 bio->bi_vcnt = i+1;
d11c171e
N
115 }
116 }
117 /* If not user-requests, copy the page pointers to all bios */
118 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
119 for (i=0; i<RESYNC_PAGES ; i++)
120 for (j=1; j<pi->raid_disks; j++)
121 r1_bio->bios[j]->bi_io_vec[i].bv_page =
122 r1_bio->bios[0]->bi_io_vec[i].bv_page;
1da177e4
LT
123 }
124
125 r1_bio->master_bio = NULL;
126
127 return r1_bio;
128
129out_free_pages:
303a0e11
N
130 for (j=0 ; j < pi->raid_disks; j++)
131 for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
132 put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
d11c171e 133 j = -1;
1da177e4
LT
134out_free_bio:
135 while ( ++j < pi->raid_disks )
136 bio_put(r1_bio->bios[j]);
137 r1bio_pool_free(r1_bio, data);
138 return NULL;
139}
140
141static void r1buf_pool_free(void *__r1_bio, void *data)
142{
143 struct pool_info *pi = data;
d11c171e 144 int i,j;
1da177e4 145 r1bio_t *r1bio = __r1_bio;
1da177e4 146
d11c171e
N
147 for (i = 0; i < RESYNC_PAGES; i++)
148 for (j = pi->raid_disks; j-- ;) {
149 if (j == 0 ||
150 r1bio->bios[j]->bi_io_vec[i].bv_page !=
151 r1bio->bios[0]->bi_io_vec[i].bv_page)
1345b1d8 152 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
d11c171e 153 }
1da177e4
LT
154 for (i=0 ; i < pi->raid_disks; i++)
155 bio_put(r1bio->bios[i]);
156
157 r1bio_pool_free(r1bio, data);
158}
159
160static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
161{
162 int i;
163
164 for (i = 0; i < conf->raid_disks; i++) {
165 struct bio **bio = r1_bio->bios + i;
4367af55 166 if (!BIO_SPECIAL(*bio))
1da177e4
LT
167 bio_put(*bio);
168 *bio = NULL;
169 }
170}
171
858119e1 172static void free_r1bio(r1bio_t *r1_bio)
1da177e4 173{
070ec55d 174 conf_t *conf = r1_bio->mddev->private;
1da177e4 175
1da177e4
LT
176 put_all_bios(conf, r1_bio);
177 mempool_free(r1_bio, conf->r1bio_pool);
178}
179
858119e1 180static void put_buf(r1bio_t *r1_bio)
1da177e4 181{
070ec55d 182 conf_t *conf = r1_bio->mddev->private;
3e198f78
N
183 int i;
184
185 for (i=0; i<conf->raid_disks; i++) {
186 struct bio *bio = r1_bio->bios[i];
187 if (bio->bi_end_io)
188 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
189 }
1da177e4
LT
190
191 mempool_free(r1_bio, conf->r1buf_pool);
192
17999be4 193 lower_barrier(conf);
1da177e4
LT
194}
195
196static void reschedule_retry(r1bio_t *r1_bio)
197{
198 unsigned long flags;
199 mddev_t *mddev = r1_bio->mddev;
070ec55d 200 conf_t *conf = mddev->private;
1da177e4
LT
201
202 spin_lock_irqsave(&conf->device_lock, flags);
203 list_add(&r1_bio->retry_list, &conf->retry_list);
ddaf22ab 204 conf->nr_queued ++;
1da177e4
LT
205 spin_unlock_irqrestore(&conf->device_lock, flags);
206
17999be4 207 wake_up(&conf->wait_barrier);
1da177e4
LT
208 md_wakeup_thread(mddev->thread);
209}
210
211/*
212 * raid_end_bio_io() is called when we have finished servicing a mirrored
213 * operation and are ready to return a success/failure code to the buffer
214 * cache layer.
215 */
d2eb35ac
N
216static void call_bio_endio(r1bio_t *r1_bio)
217{
218 struct bio *bio = r1_bio->master_bio;
219 int done;
220 conf_t *conf = r1_bio->mddev->private;
221
222 if (bio->bi_phys_segments) {
223 unsigned long flags;
224 spin_lock_irqsave(&conf->device_lock, flags);
225 bio->bi_phys_segments--;
226 done = (bio->bi_phys_segments == 0);
227 spin_unlock_irqrestore(&conf->device_lock, flags);
228 } else
229 done = 1;
230
231 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
232 clear_bit(BIO_UPTODATE, &bio->bi_flags);
233 if (done) {
234 bio_endio(bio, 0);
235 /*
236 * Wake up any possible resync thread that waits for the device
237 * to go idle.
238 */
239 allow_barrier(conf);
240 }
241}
242
1da177e4
LT
243static void raid_end_bio_io(r1bio_t *r1_bio)
244{
245 struct bio *bio = r1_bio->master_bio;
246
4b6d287f
N
247 /* if nobody has done the final endio yet, do it now */
248 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
249 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
250 (bio_data_dir(bio) == WRITE) ? "write" : "read",
251 (unsigned long long) bio->bi_sector,
252 (unsigned long long) bio->bi_sector +
253 (bio->bi_size >> 9) - 1);
254
d2eb35ac 255 call_bio_endio(r1_bio);
4b6d287f 256 }
1da177e4
LT
257 free_r1bio(r1_bio);
258}
259
260/*
261 * Update disk head position estimator based on IRQ completion info.
262 */
263static inline void update_head_pos(int disk, r1bio_t *r1_bio)
264{
070ec55d 265 conf_t *conf = r1_bio->mddev->private;
1da177e4
LT
266
267 conf->mirrors[disk].head_position =
268 r1_bio->sector + (r1_bio->sectors);
269}
270
6712ecf8 271static void raid1_end_read_request(struct bio *bio, int error)
1da177e4
LT
272{
273 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
7b92813c 274 r1bio_t *r1_bio = bio->bi_private;
1da177e4 275 int mirror;
070ec55d 276 conf_t *conf = r1_bio->mddev->private;
1da177e4 277
1da177e4
LT
278 mirror = r1_bio->read_disk;
279 /*
280 * this branch is our 'one mirror IO has finished' event handler:
281 */
ddaf22ab
N
282 update_head_pos(mirror, r1_bio);
283
dd00a99e
N
284 if (uptodate)
285 set_bit(R1BIO_Uptodate, &r1_bio->state);
286 else {
287 /* If all other devices have failed, we want to return
288 * the error upwards rather than fail the last device.
289 * Here we redefine "uptodate" to mean "Don't want to retry"
1da177e4 290 */
dd00a99e
N
291 unsigned long flags;
292 spin_lock_irqsave(&conf->device_lock, flags);
293 if (r1_bio->mddev->degraded == conf->raid_disks ||
294 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
295 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
296 uptodate = 1;
297 spin_unlock_irqrestore(&conf->device_lock, flags);
298 }
1da177e4 299
dd00a99e 300 if (uptodate)
1da177e4 301 raid_end_bio_io(r1_bio);
dd00a99e 302 else {
1da177e4
LT
303 /*
304 * oops, read error:
305 */
306 char b[BDEVNAME_SIZE];
8bda470e
CD
307 printk_ratelimited(
308 KERN_ERR "md/raid1:%s: %s: "
309 "rescheduling sector %llu\n",
310 mdname(conf->mddev),
311 bdevname(conf->mirrors[mirror].rdev->bdev,
312 b),
313 (unsigned long long)r1_bio->sector);
d2eb35ac 314 set_bit(R1BIO_ReadError, &r1_bio->state);
1da177e4
LT
315 reschedule_retry(r1_bio);
316 }
317
318 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
1da177e4
LT
319}
320
af6d7b76 321static void r1_bio_write_done(r1bio_t *r1_bio)
4e78064f
N
322{
323 if (atomic_dec_and_test(&r1_bio->remaining))
324 {
325 /* it really is the end of this request */
326 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
327 /* free extra copy of the data pages */
af6d7b76 328 int i = r1_bio->behind_page_count;
4e78064f 329 while (i--)
af6d7b76
N
330 safe_put_page(r1_bio->behind_pages[i]);
331 kfree(r1_bio->behind_pages);
332 r1_bio->behind_pages = NULL;
4e78064f
N
333 }
334 /* clear the bitmap if all writes complete successfully */
335 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
336 r1_bio->sectors,
337 !test_bit(R1BIO_Degraded, &r1_bio->state),
af6d7b76 338 test_bit(R1BIO_BehindIO, &r1_bio->state));
4e78064f 339 md_write_end(r1_bio->mddev);
4367af55
N
340 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
341 reschedule_retry(r1_bio);
342 else
343 raid_end_bio_io(r1_bio);
4e78064f
N
344 }
345}
346
6712ecf8 347static void raid1_end_write_request(struct bio *bio, int error)
1da177e4
LT
348{
349 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
7b92813c 350 r1bio_t *r1_bio = bio->bi_private;
a9701a30 351 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
070ec55d 352 conf_t *conf = r1_bio->mddev->private;
04b857f7 353 struct bio *to_put = NULL;
1da177e4 354
1da177e4
LT
355
356 for (mirror = 0; mirror < conf->raid_disks; mirror++)
357 if (r1_bio->bios[mirror] == bio)
358 break;
359
e9c7469b
TH
360 /*
361 * 'one mirror IO has finished' event handler:
362 */
363 r1_bio->bios[mirror] = NULL;
364 to_put = bio;
365 if (!uptodate) {
366 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
367 /* an I/O failed, we can't clear the bitmap */
368 set_bit(R1BIO_Degraded, &r1_bio->state);
4367af55 369 } else {
1da177e4 370 /*
e9c7469b
TH
371 * Set R1BIO_Uptodate in our master bio, so that we
372 * will return a good error code for to the higher
373 * levels even if IO on some other mirrored buffer
374 * fails.
375 *
376 * The 'master' represents the composite IO operation
377 * to user-side. So if something waits for IO, then it
378 * will wait for the 'master' bio.
1da177e4 379 */
4367af55
N
380 sector_t first_bad;
381 int bad_sectors;
382
e9c7469b
TH
383 set_bit(R1BIO_Uptodate, &r1_bio->state);
384
4367af55
N
385 /* Maybe we can clear some bad blocks. */
386 if (is_badblock(conf->mirrors[mirror].rdev,
387 r1_bio->sector, r1_bio->sectors,
388 &first_bad, &bad_sectors)) {
389 r1_bio->bios[mirror] = IO_MADE_GOOD;
390 set_bit(R1BIO_MadeGood, &r1_bio->state);
391 }
392 }
393
e9c7469b
TH
394 update_head_pos(mirror, r1_bio);
395
396 if (behind) {
397 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
398 atomic_dec(&r1_bio->behind_remaining);
399
400 /*
401 * In behind mode, we ACK the master bio once the I/O
402 * has safely reached all non-writemostly
403 * disks. Setting the Returned bit ensures that this
404 * gets done only once -- we don't ever want to return
405 * -EIO here, instead we'll wait
406 */
407 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
408 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
409 /* Maybe we can return now */
410 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
411 struct bio *mbio = r1_bio->master_bio;
412 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
413 (unsigned long long) mbio->bi_sector,
414 (unsigned long long) mbio->bi_sector +
415 (mbio->bi_size >> 9) - 1);
d2eb35ac 416 call_bio_endio(r1_bio);
4b6d287f
N
417 }
418 }
419 }
4367af55
N
420 if (r1_bio->bios[mirror] == NULL)
421 rdev_dec_pending(conf->mirrors[mirror].rdev,
422 conf->mddev);
e9c7469b 423
1da177e4 424 /*
1da177e4
LT
425 * Let's see if all mirrored write operations have finished
426 * already.
427 */
af6d7b76 428 r1_bio_write_done(r1_bio);
c70810b3 429
04b857f7
N
430 if (to_put)
431 bio_put(to_put);
1da177e4
LT
432}
433
434
435/*
436 * This routine returns the disk from which the requested read should
437 * be done. There is a per-array 'next expected sequential IO' sector
438 * number - if this matches on the next IO then we use the last disk.
439 * There is also a per-disk 'last know head position' sector that is
440 * maintained from IRQ contexts, both the normal and the resync IO
441 * completion handlers update this position correctly. If there is no
442 * perfect sequential match then we pick the disk whose head is closest.
443 *
444 * If there are 2 mirrors in the same 2 devices, performance degrades
445 * because position is mirror, not device based.
446 *
447 * The rdev for the device selected will have nr_pending incremented.
448 */
d2eb35ac 449static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
1da177e4 450{
af3a2cd6 451 const sector_t this_sector = r1_bio->sector;
d2eb35ac
N
452 int sectors;
453 int best_good_sectors;
f3ac8bf7 454 int start_disk;
76073054 455 int best_disk;
f3ac8bf7 456 int i;
76073054 457 sector_t best_dist;
8ddf9efe 458 mdk_rdev_t *rdev;
f3ac8bf7 459 int choose_first;
1da177e4
LT
460
461 rcu_read_lock();
462 /*
8ddf9efe 463 * Check if we can balance. We can balance on the whole
1da177e4
LT
464 * device if no resync is going on, or below the resync window.
465 * We take the first readable disk when above the resync window.
466 */
467 retry:
d2eb35ac 468 sectors = r1_bio->sectors;
76073054
N
469 best_disk = -1;
470 best_dist = MaxSector;
d2eb35ac
N
471 best_good_sectors = 0;
472
1da177e4
LT
473 if (conf->mddev->recovery_cp < MaxSector &&
474 (this_sector + sectors >= conf->next_resync)) {
f3ac8bf7
N
475 choose_first = 1;
476 start_disk = 0;
477 } else {
478 choose_first = 0;
479 start_disk = conf->last_used;
1da177e4
LT
480 }
481
f3ac8bf7 482 for (i = 0 ; i < conf->raid_disks ; i++) {
76073054 483 sector_t dist;
d2eb35ac
N
484 sector_t first_bad;
485 int bad_sectors;
486
f3ac8bf7
N
487 int disk = start_disk + i;
488 if (disk >= conf->raid_disks)
489 disk -= conf->raid_disks;
490
491 rdev = rcu_dereference(conf->mirrors[disk].rdev);
492 if (r1_bio->bios[disk] == IO_BLOCKED
493 || rdev == NULL
76073054 494 || test_bit(Faulty, &rdev->flags))
f3ac8bf7 495 continue;
76073054
N
496 if (!test_bit(In_sync, &rdev->flags) &&
497 rdev->recovery_offset < this_sector + sectors)
1da177e4 498 continue;
76073054
N
499 if (test_bit(WriteMostly, &rdev->flags)) {
500 /* Don't balance among write-mostly, just
501 * use the first as a last resort */
502 if (best_disk < 0)
503 best_disk = disk;
504 continue;
505 }
506 /* This is a reasonable device to use. It might
507 * even be best.
508 */
d2eb35ac
N
509 if (is_badblock(rdev, this_sector, sectors,
510 &first_bad, &bad_sectors)) {
511 if (best_dist < MaxSector)
512 /* already have a better device */
513 continue;
514 if (first_bad <= this_sector) {
515 /* cannot read here. If this is the 'primary'
516 * device, then we must not read beyond
517 * bad_sectors from another device..
518 */
519 bad_sectors -= (this_sector - first_bad);
520 if (choose_first && sectors > bad_sectors)
521 sectors = bad_sectors;
522 if (best_good_sectors > sectors)
523 best_good_sectors = sectors;
524
525 } else {
526 sector_t good_sectors = first_bad - this_sector;
527 if (good_sectors > best_good_sectors) {
528 best_good_sectors = good_sectors;
529 best_disk = disk;
530 }
531 if (choose_first)
532 break;
533 }
534 continue;
535 } else
536 best_good_sectors = sectors;
537
76073054
N
538 dist = abs(this_sector - conf->mirrors[disk].head_position);
539 if (choose_first
540 /* Don't change to another disk for sequential reads */
541 || conf->next_seq_sect == this_sector
542 || dist == 0
543 /* If device is idle, use it */
544 || atomic_read(&rdev->nr_pending) == 0) {
545 best_disk = disk;
1da177e4
LT
546 break;
547 }
76073054
N
548 if (dist < best_dist) {
549 best_dist = dist;
550 best_disk = disk;
1da177e4 551 }
f3ac8bf7 552 }
1da177e4 553
76073054
N
554 if (best_disk >= 0) {
555 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
8ddf9efe
N
556 if (!rdev)
557 goto retry;
558 atomic_inc(&rdev->nr_pending);
76073054 559 if (test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
560 /* cannot risk returning a device that failed
561 * before we inc'ed nr_pending
562 */
03c902e1 563 rdev_dec_pending(rdev, conf->mddev);
1da177e4
LT
564 goto retry;
565 }
d2eb35ac 566 sectors = best_good_sectors;
8ddf9efe 567 conf->next_seq_sect = this_sector + sectors;
76073054 568 conf->last_used = best_disk;
1da177e4
LT
569 }
570 rcu_read_unlock();
d2eb35ac 571 *max_sectors = sectors;
1da177e4 572
76073054 573 return best_disk;
1da177e4
LT
574}
575
1ed7242e 576int md_raid1_congested(mddev_t *mddev, int bits)
0d129228 577{
070ec55d 578 conf_t *conf = mddev->private;
0d129228
N
579 int i, ret = 0;
580
581 rcu_read_lock();
582 for (i = 0; i < mddev->raid_disks; i++) {
583 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
584 if (rdev && !test_bit(Faulty, &rdev->flags)) {
165125e1 585 struct request_queue *q = bdev_get_queue(rdev->bdev);
0d129228 586
1ed7242e
JB
587 BUG_ON(!q);
588
0d129228
N
589 /* Note the '|| 1' - when read_balance prefers
590 * non-congested targets, it can be removed
591 */
91a9e99d 592 if ((bits & (1<<BDI_async_congested)) || 1)
0d129228
N
593 ret |= bdi_congested(&q->backing_dev_info, bits);
594 else
595 ret &= bdi_congested(&q->backing_dev_info, bits);
596 }
597 }
598 rcu_read_unlock();
599 return ret;
600}
1ed7242e 601EXPORT_SYMBOL_GPL(md_raid1_congested);
0d129228 602
1ed7242e
JB
603static int raid1_congested(void *data, int bits)
604{
605 mddev_t *mddev = data;
606
607 return mddev_congested(mddev, bits) ||
608 md_raid1_congested(mddev, bits);
609}
0d129228 610
7eaceacc 611static void flush_pending_writes(conf_t *conf)
a35e63ef
N
612{
613 /* Any writes that have been queued but are awaiting
614 * bitmap updates get flushed here.
a35e63ef 615 */
a35e63ef
N
616 spin_lock_irq(&conf->device_lock);
617
618 if (conf->pending_bio_list.head) {
619 struct bio *bio;
620 bio = bio_list_get(&conf->pending_bio_list);
a35e63ef
N
621 spin_unlock_irq(&conf->device_lock);
622 /* flush any pending bitmap writes to
623 * disk before proceeding w/ I/O */
624 bitmap_unplug(conf->mddev->bitmap);
625
626 while (bio) { /* submit pending writes */
627 struct bio *next = bio->bi_next;
628 bio->bi_next = NULL;
629 generic_make_request(bio);
630 bio = next;
631 }
a35e63ef
N
632 } else
633 spin_unlock_irq(&conf->device_lock);
7eaceacc
JA
634}
635
17999be4
N
636/* Barriers....
637 * Sometimes we need to suspend IO while we do something else,
638 * either some resync/recovery, or reconfigure the array.
639 * To do this we raise a 'barrier'.
640 * The 'barrier' is a counter that can be raised multiple times
641 * to count how many activities are happening which preclude
642 * normal IO.
643 * We can only raise the barrier if there is no pending IO.
644 * i.e. if nr_pending == 0.
645 * We choose only to raise the barrier if no-one is waiting for the
646 * barrier to go down. This means that as soon as an IO request
647 * is ready, no other operations which require a barrier will start
648 * until the IO request has had a chance.
649 *
650 * So: regular IO calls 'wait_barrier'. When that returns there
651 * is no backgroup IO happening, It must arrange to call
652 * allow_barrier when it has finished its IO.
653 * backgroup IO calls must call raise_barrier. Once that returns
654 * there is no normal IO happeing. It must arrange to call
655 * lower_barrier when the particular background IO completes.
1da177e4
LT
656 */
657#define RESYNC_DEPTH 32
658
17999be4 659static void raise_barrier(conf_t *conf)
1da177e4
LT
660{
661 spin_lock_irq(&conf->resync_lock);
17999be4
N
662
663 /* Wait until no block IO is waiting */
664 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
c3b328ac 665 conf->resync_lock, );
17999be4
N
666
667 /* block any new IO from starting */
668 conf->barrier++;
669
046abeed 670 /* Now wait for all pending IO to complete */
17999be4
N
671 wait_event_lock_irq(conf->wait_barrier,
672 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
c3b328ac 673 conf->resync_lock, );
17999be4
N
674
675 spin_unlock_irq(&conf->resync_lock);
676}
677
678static void lower_barrier(conf_t *conf)
679{
680 unsigned long flags;
709ae487 681 BUG_ON(conf->barrier <= 0);
17999be4
N
682 spin_lock_irqsave(&conf->resync_lock, flags);
683 conf->barrier--;
684 spin_unlock_irqrestore(&conf->resync_lock, flags);
685 wake_up(&conf->wait_barrier);
686}
687
688static void wait_barrier(conf_t *conf)
689{
690 spin_lock_irq(&conf->resync_lock);
691 if (conf->barrier) {
692 conf->nr_waiting++;
693 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
694 conf->resync_lock,
c3b328ac 695 );
17999be4 696 conf->nr_waiting--;
1da177e4 697 }
17999be4 698 conf->nr_pending++;
1da177e4
LT
699 spin_unlock_irq(&conf->resync_lock);
700}
701
17999be4
N
702static void allow_barrier(conf_t *conf)
703{
704 unsigned long flags;
705 spin_lock_irqsave(&conf->resync_lock, flags);
706 conf->nr_pending--;
707 spin_unlock_irqrestore(&conf->resync_lock, flags);
708 wake_up(&conf->wait_barrier);
709}
710
ddaf22ab
N
711static void freeze_array(conf_t *conf)
712{
713 /* stop syncio and normal IO and wait for everything to
714 * go quite.
715 * We increment barrier and nr_waiting, and then
1c830532
N
716 * wait until nr_pending match nr_queued+1
717 * This is called in the context of one normal IO request
718 * that has failed. Thus any sync request that might be pending
719 * will be blocked by nr_pending, and we need to wait for
720 * pending IO requests to complete or be queued for re-try.
721 * Thus the number queued (nr_queued) plus this request (1)
722 * must match the number of pending IOs (nr_pending) before
723 * we continue.
ddaf22ab
N
724 */
725 spin_lock_irq(&conf->resync_lock);
726 conf->barrier++;
727 conf->nr_waiting++;
728 wait_event_lock_irq(conf->wait_barrier,
1c830532 729 conf->nr_pending == conf->nr_queued+1,
ddaf22ab 730 conf->resync_lock,
c3b328ac 731 flush_pending_writes(conf));
ddaf22ab
N
732 spin_unlock_irq(&conf->resync_lock);
733}
734static void unfreeze_array(conf_t *conf)
735{
736 /* reverse the effect of the freeze */
737 spin_lock_irq(&conf->resync_lock);
738 conf->barrier--;
739 conf->nr_waiting--;
740 wake_up(&conf->wait_barrier);
741 spin_unlock_irq(&conf->resync_lock);
742}
743
17999be4 744
4e78064f 745/* duplicate the data pages for behind I/O
4e78064f 746 */
af6d7b76 747static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
4b6d287f
N
748{
749 int i;
750 struct bio_vec *bvec;
af6d7b76 751 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*),
4b6d287f
N
752 GFP_NOIO);
753 if (unlikely(!pages))
af6d7b76 754 return;
4b6d287f 755
4b6d287f 756 bio_for_each_segment(bvec, bio, i) {
af6d7b76
N
757 pages[i] = alloc_page(GFP_NOIO);
758 if (unlikely(!pages[i]))
4b6d287f 759 goto do_sync_io;
af6d7b76 760 memcpy(kmap(pages[i]) + bvec->bv_offset,
4b6d287f 761 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
af6d7b76 762 kunmap(pages[i]);
4b6d287f
N
763 kunmap(bvec->bv_page);
764 }
af6d7b76
N
765 r1_bio->behind_pages = pages;
766 r1_bio->behind_page_count = bio->bi_vcnt;
767 set_bit(R1BIO_BehindIO, &r1_bio->state);
768 return;
4b6d287f
N
769
770do_sync_io:
af6d7b76
N
771 for (i = 0; i < bio->bi_vcnt; i++)
772 if (pages[i])
773 put_page(pages[i]);
4b6d287f
N
774 kfree(pages);
775 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
4b6d287f
N
776}
777
21a52c6d 778static int make_request(mddev_t *mddev, struct bio * bio)
1da177e4 779{
070ec55d 780 conf_t *conf = mddev->private;
1da177e4
LT
781 mirror_info_t *mirror;
782 r1bio_t *r1_bio;
783 struct bio *read_bio;
1f68f0c4 784 int i, disks;
84255d10 785 struct bitmap *bitmap;
191ea9b2 786 unsigned long flags;
a362357b 787 const int rw = bio_data_dir(bio);
2c7d46ec 788 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
e9c7469b 789 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
6bfe0b49 790 mdk_rdev_t *blocked_rdev;
c3b328ac 791 int plugged;
1f68f0c4
N
792 int first_clone;
793 int sectors_handled;
794 int max_sectors;
191ea9b2 795
1da177e4
LT
796 /*
797 * Register the new request and wait if the reconstruction
798 * thread has put up a bar for new requests.
799 * Continue immediately if no resync is active currently.
800 */
62de608d 801
3d310eb7
N
802 md_write_start(mddev, bio); /* wait on superblock update early */
803
6eef4b21
N
804 if (bio_data_dir(bio) == WRITE &&
805 bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
806 bio->bi_sector < mddev->suspend_hi) {
807 /* As the suspend_* range is controlled by
808 * userspace, we want an interruptible
809 * wait.
810 */
811 DEFINE_WAIT(w);
812 for (;;) {
813 flush_signals(current);
814 prepare_to_wait(&conf->wait_barrier,
815 &w, TASK_INTERRUPTIBLE);
816 if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
817 bio->bi_sector >= mddev->suspend_hi)
818 break;
819 schedule();
820 }
821 finish_wait(&conf->wait_barrier, &w);
822 }
62de608d 823
17999be4 824 wait_barrier(conf);
1da177e4 825
84255d10
N
826 bitmap = mddev->bitmap;
827
1da177e4
LT
828 /*
829 * make_request() can abort the operation when READA is being
830 * used and no empty request is available.
831 *
832 */
833 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
834
835 r1_bio->master_bio = bio;
836 r1_bio->sectors = bio->bi_size >> 9;
191ea9b2 837 r1_bio->state = 0;
1da177e4
LT
838 r1_bio->mddev = mddev;
839 r1_bio->sector = bio->bi_sector;
840
d2eb35ac
N
841 /* We might need to issue multiple reads to different
842 * devices if there are bad blocks around, so we keep
843 * track of the number of reads in bio->bi_phys_segments.
844 * If this is 0, there is only one r1_bio and no locking
845 * will be needed when requests complete. If it is
846 * non-zero, then it is the number of not-completed requests.
847 */
848 bio->bi_phys_segments = 0;
849 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
850
a362357b 851 if (rw == READ) {
1da177e4
LT
852 /*
853 * read balancing logic:
854 */
d2eb35ac
N
855 int rdisk;
856
857read_again:
858 rdisk = read_balance(conf, r1_bio, &max_sectors);
1da177e4
LT
859
860 if (rdisk < 0) {
861 /* couldn't find anywhere to read from */
862 raid_end_bio_io(r1_bio);
863 return 0;
864 }
865 mirror = conf->mirrors + rdisk;
866
e555190d
N
867 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
868 bitmap) {
869 /* Reading from a write-mostly device must
870 * take care not to over-take any writes
871 * that are 'behind'
872 */
873 wait_event(bitmap->behind_wait,
874 atomic_read(&bitmap->behind_writes) == 0);
875 }
1da177e4
LT
876 r1_bio->read_disk = rdisk;
877
a167f663 878 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
d2eb35ac
N
879 md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
880 max_sectors);
1da177e4
LT
881
882 r1_bio->bios[rdisk] = read_bio;
883
884 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
885 read_bio->bi_bdev = mirror->rdev->bdev;
886 read_bio->bi_end_io = raid1_end_read_request;
7b6d91da 887 read_bio->bi_rw = READ | do_sync;
1da177e4
LT
888 read_bio->bi_private = r1_bio;
889
d2eb35ac
N
890 if (max_sectors < r1_bio->sectors) {
891 /* could not read all from this device, so we will
892 * need another r1_bio.
893 */
d2eb35ac
N
894
895 sectors_handled = (r1_bio->sector + max_sectors
896 - bio->bi_sector);
897 r1_bio->sectors = max_sectors;
898 spin_lock_irq(&conf->device_lock);
899 if (bio->bi_phys_segments == 0)
900 bio->bi_phys_segments = 2;
901 else
902 bio->bi_phys_segments++;
903 spin_unlock_irq(&conf->device_lock);
904 /* Cannot call generic_make_request directly
905 * as that will be queued in __make_request
906 * and subsequent mempool_alloc might block waiting
907 * for it. So hand bio over to raid1d.
908 */
909 reschedule_retry(r1_bio);
910
911 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
912
913 r1_bio->master_bio = bio;
914 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
915 r1_bio->state = 0;
916 r1_bio->mddev = mddev;
917 r1_bio->sector = bio->bi_sector + sectors_handled;
918 goto read_again;
919 } else
920 generic_make_request(read_bio);
1da177e4
LT
921 return 0;
922 }
923
924 /*
925 * WRITE:
926 */
1f68f0c4 927 /* first select target devices under rcu_lock and
1da177e4
LT
928 * inc refcount on their rdev. Record them by setting
929 * bios[x] to bio
1f68f0c4
N
930 * If there are known/acknowledged bad blocks on any device on
931 * which we have seen a write error, we want to avoid writing those
932 * blocks.
933 * This potentially requires several writes to write around
934 * the bad blocks. Each set of writes gets it's own r1bio
935 * with a set of bios attached.
1da177e4 936 */
c3b328ac
N
937 plugged = mddev_check_plugged(mddev);
938
1da177e4 939 disks = conf->raid_disks;
6bfe0b49
DW
940 retry_write:
941 blocked_rdev = NULL;
1da177e4 942 rcu_read_lock();
1f68f0c4 943 max_sectors = r1_bio->sectors;
1da177e4 944 for (i = 0; i < disks; i++) {
6bfe0b49
DW
945 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
946 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
947 atomic_inc(&rdev->nr_pending);
948 blocked_rdev = rdev;
949 break;
950 }
1f68f0c4
N
951 r1_bio->bios[i] = NULL;
952 if (!rdev || test_bit(Faulty, &rdev->flags)) {
953 set_bit(R1BIO_Degraded, &r1_bio->state);
954 continue;
955 }
956
957 atomic_inc(&rdev->nr_pending);
958 if (test_bit(WriteErrorSeen, &rdev->flags)) {
959 sector_t first_bad;
960 int bad_sectors;
961 int is_bad;
962
963 is_bad = is_badblock(rdev, r1_bio->sector,
964 max_sectors,
965 &first_bad, &bad_sectors);
966 if (is_bad < 0) {
967 /* mustn't write here until the bad block is
968 * acknowledged*/
969 set_bit(BlockedBadBlocks, &rdev->flags);
970 blocked_rdev = rdev;
971 break;
972 }
973 if (is_bad && first_bad <= r1_bio->sector) {
974 /* Cannot write here at all */
975 bad_sectors -= (r1_bio->sector - first_bad);
976 if (bad_sectors < max_sectors)
977 /* mustn't write more than bad_sectors
978 * to other devices yet
979 */
980 max_sectors = bad_sectors;
03c902e1 981 rdev_dec_pending(rdev, mddev);
1f68f0c4
N
982 /* We don't set R1BIO_Degraded as that
983 * only applies if the disk is
984 * missing, so it might be re-added,
985 * and we want to know to recover this
986 * chunk.
987 * In this case the device is here,
988 * and the fact that this chunk is not
989 * in-sync is recorded in the bad
990 * block log
991 */
992 continue;
964147d5 993 }
1f68f0c4
N
994 if (is_bad) {
995 int good_sectors = first_bad - r1_bio->sector;
996 if (good_sectors < max_sectors)
997 max_sectors = good_sectors;
998 }
999 }
1000 r1_bio->bios[i] = bio;
1da177e4
LT
1001 }
1002 rcu_read_unlock();
1003
6bfe0b49
DW
1004 if (unlikely(blocked_rdev)) {
1005 /* Wait for this device to become unblocked */
1006 int j;
1007
1008 for (j = 0; j < i; j++)
1009 if (r1_bio->bios[j])
1010 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1f68f0c4 1011 r1_bio->state = 0;
6bfe0b49
DW
1012 allow_barrier(conf);
1013 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1014 wait_barrier(conf);
1015 goto retry_write;
1016 }
1017
1f68f0c4
N
1018 if (max_sectors < r1_bio->sectors) {
1019 /* We are splitting this write into multiple parts, so
1020 * we need to prepare for allocating another r1_bio.
1021 */
1022 r1_bio->sectors = max_sectors;
1023 spin_lock_irq(&conf->device_lock);
1024 if (bio->bi_phys_segments == 0)
1025 bio->bi_phys_segments = 2;
1026 else
1027 bio->bi_phys_segments++;
1028 spin_unlock_irq(&conf->device_lock);
191ea9b2 1029 }
1f68f0c4 1030 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
4b6d287f 1031
4e78064f 1032 atomic_set(&r1_bio->remaining, 1);
4b6d287f 1033 atomic_set(&r1_bio->behind_remaining, 0);
06d91a5f 1034
1f68f0c4 1035 first_clone = 1;
1da177e4
LT
1036 for (i = 0; i < disks; i++) {
1037 struct bio *mbio;
1038 if (!r1_bio->bios[i])
1039 continue;
1040
a167f663 1041 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1f68f0c4
N
1042 md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
1043
1044 if (first_clone) {
1045 /* do behind I/O ?
1046 * Not if there are too many, or cannot
1047 * allocate memory, or a reader on WriteMostly
1048 * is waiting for behind writes to flush */
1049 if (bitmap &&
1050 (atomic_read(&bitmap->behind_writes)
1051 < mddev->bitmap_info.max_write_behind) &&
1052 !waitqueue_active(&bitmap->behind_wait))
1053 alloc_behind_pages(mbio, r1_bio);
1054
1055 bitmap_startwrite(bitmap, r1_bio->sector,
1056 r1_bio->sectors,
1057 test_bit(R1BIO_BehindIO,
1058 &r1_bio->state));
1059 first_clone = 0;
1060 }
af6d7b76 1061 if (r1_bio->behind_pages) {
4b6d287f
N
1062 struct bio_vec *bvec;
1063 int j;
1064
1065 /* Yes, I really want the '__' version so that
1066 * we clear any unused pointer in the io_vec, rather
1067 * than leave them unchanged. This is important
1068 * because when we come to free the pages, we won't
046abeed 1069 * know the original bi_idx, so we just free
4b6d287f
N
1070 * them all
1071 */
1072 __bio_for_each_segment(bvec, mbio, j, 0)
af6d7b76 1073 bvec->bv_page = r1_bio->behind_pages[j];
4b6d287f
N
1074 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1075 atomic_inc(&r1_bio->behind_remaining);
1076 }
1077
1f68f0c4
N
1078 r1_bio->bios[i] = mbio;
1079
1080 mbio->bi_sector = (r1_bio->sector +
1081 conf->mirrors[i].rdev->data_offset);
1082 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1083 mbio->bi_end_io = raid1_end_write_request;
1084 mbio->bi_rw = WRITE | do_flush_fua | do_sync;
1085 mbio->bi_private = r1_bio;
1086
1da177e4 1087 atomic_inc(&r1_bio->remaining);
4e78064f
N
1088 spin_lock_irqsave(&conf->device_lock, flags);
1089 bio_list_add(&conf->pending_bio_list, mbio);
4e78064f 1090 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 1091 }
af6d7b76 1092 r1_bio_write_done(r1_bio);
1da177e4 1093
4e78064f 1094 /* In case raid1d snuck in to freeze_array */
a35e63ef
N
1095 wake_up(&conf->wait_barrier);
1096
1f68f0c4
N
1097 if (sectors_handled < (bio->bi_size >> 9)) {
1098 /* We need another r1_bio. It has already been counted
1099 * in bio->bi_phys_segments
1100 */
1101 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1102 r1_bio->master_bio = bio;
1103 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1104 r1_bio->state = 0;
1105 r1_bio->mddev = mddev;
1106 r1_bio->sector = bio->bi_sector + sectors_handled;
1107 goto retry_write;
1108 }
1109
c3b328ac 1110 if (do_sync || !bitmap || !plugged)
e3881a68 1111 md_wakeup_thread(mddev->thread);
191ea9b2 1112
1da177e4
LT
1113 return 0;
1114}
1115
1116static void status(struct seq_file *seq, mddev_t *mddev)
1117{
070ec55d 1118 conf_t *conf = mddev->private;
1da177e4
LT
1119 int i;
1120
1121 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
11ce99e6 1122 conf->raid_disks - mddev->degraded);
ddac7c7e
N
1123 rcu_read_lock();
1124 for (i = 0; i < conf->raid_disks; i++) {
1125 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1da177e4 1126 seq_printf(seq, "%s",
ddac7c7e
N
1127 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1128 }
1129 rcu_read_unlock();
1da177e4
LT
1130 seq_printf(seq, "]");
1131}
1132
1133
1134static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1135{
1136 char b[BDEVNAME_SIZE];
070ec55d 1137 conf_t *conf = mddev->private;
1da177e4
LT
1138
1139 /*
1140 * If it is not operational, then we have already marked it as dead
1141 * else if it is the last working disks, ignore the error, let the
1142 * next level up know.
1143 * else mark the drive as failed
1144 */
b2d444d7 1145 if (test_bit(In_sync, &rdev->flags)
4044ba58 1146 && (conf->raid_disks - mddev->degraded) == 1) {
1da177e4
LT
1147 /*
1148 * Don't fail the drive, act as though we were just a
4044ba58
N
1149 * normal single drive.
1150 * However don't try a recovery from this drive as
1151 * it is very likely to fail.
1da177e4 1152 */
5389042f 1153 conf->recovery_disabled = mddev->recovery_disabled;
1da177e4 1154 return;
4044ba58 1155 }
de393cde 1156 set_bit(Blocked, &rdev->flags);
c04be0aa
N
1157 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1158 unsigned long flags;
1159 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 1160 mddev->degraded++;
dd00a99e 1161 set_bit(Faulty, &rdev->flags);
c04be0aa 1162 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1163 /*
1164 * if recovery is running, make sure it aborts.
1165 */
dfc70645 1166 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
dd00a99e
N
1167 } else
1168 set_bit(Faulty, &rdev->flags);
850b2b42 1169 set_bit(MD_CHANGE_DEVS, &mddev->flags);
067032bc
JP
1170 printk(KERN_ALERT
1171 "md/raid1:%s: Disk failure on %s, disabling device.\n"
1172 "md/raid1:%s: Operation continuing on %d devices.\n",
9dd1e2fa
N
1173 mdname(mddev), bdevname(rdev->bdev, b),
1174 mdname(mddev), conf->raid_disks - mddev->degraded);
1da177e4
LT
1175}
1176
1177static void print_conf(conf_t *conf)
1178{
1179 int i;
1da177e4 1180
9dd1e2fa 1181 printk(KERN_DEBUG "RAID1 conf printout:\n");
1da177e4 1182 if (!conf) {
9dd1e2fa 1183 printk(KERN_DEBUG "(!conf)\n");
1da177e4
LT
1184 return;
1185 }
9dd1e2fa 1186 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1da177e4
LT
1187 conf->raid_disks);
1188
ddac7c7e 1189 rcu_read_lock();
1da177e4
LT
1190 for (i = 0; i < conf->raid_disks; i++) {
1191 char b[BDEVNAME_SIZE];
ddac7c7e
N
1192 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1193 if (rdev)
9dd1e2fa 1194 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
ddac7c7e
N
1195 i, !test_bit(In_sync, &rdev->flags),
1196 !test_bit(Faulty, &rdev->flags),
1197 bdevname(rdev->bdev,b));
1da177e4 1198 }
ddac7c7e 1199 rcu_read_unlock();
1da177e4
LT
1200}
1201
1202static void close_sync(conf_t *conf)
1203{
17999be4
N
1204 wait_barrier(conf);
1205 allow_barrier(conf);
1da177e4
LT
1206
1207 mempool_destroy(conf->r1buf_pool);
1208 conf->r1buf_pool = NULL;
1209}
1210
1211static int raid1_spare_active(mddev_t *mddev)
1212{
1213 int i;
1214 conf_t *conf = mddev->private;
6b965620
N
1215 int count = 0;
1216 unsigned long flags;
1da177e4
LT
1217
1218 /*
1219 * Find all failed disks within the RAID1 configuration
ddac7c7e
N
1220 * and mark them readable.
1221 * Called under mddev lock, so rcu protection not needed.
1da177e4
LT
1222 */
1223 for (i = 0; i < conf->raid_disks; i++) {
ddac7c7e
N
1224 mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1225 if (rdev
1226 && !test_bit(Faulty, &rdev->flags)
c04be0aa 1227 && !test_and_set_bit(In_sync, &rdev->flags)) {
6b965620 1228 count++;
654e8b5a 1229 sysfs_notify_dirent_safe(rdev->sysfs_state);
1da177e4
LT
1230 }
1231 }
6b965620
N
1232 spin_lock_irqsave(&conf->device_lock, flags);
1233 mddev->degraded -= count;
1234 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4
LT
1235
1236 print_conf(conf);
6b965620 1237 return count;
1da177e4
LT
1238}
1239
1240
1241static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1242{
1243 conf_t *conf = mddev->private;
199050ea 1244 int err = -EEXIST;
41158c7e 1245 int mirror = 0;
1da177e4 1246 mirror_info_t *p;
6c2fce2e
NB
1247 int first = 0;
1248 int last = mddev->raid_disks - 1;
1da177e4 1249
5389042f
N
1250 if (mddev->recovery_disabled == conf->recovery_disabled)
1251 return -EBUSY;
1252
6c2fce2e
NB
1253 if (rdev->raid_disk >= 0)
1254 first = last = rdev->raid_disk;
1255
1256 for (mirror = first; mirror <= last; mirror++)
1da177e4
LT
1257 if ( !(p=conf->mirrors+mirror)->rdev) {
1258
8f6c2e4b
MP
1259 disk_stack_limits(mddev->gendisk, rdev->bdev,
1260 rdev->data_offset << 9);
627a2d3c
N
1261 /* as we don't honour merge_bvec_fn, we must
1262 * never risk violating it, so limit
1263 * ->max_segments to one lying with a single
1264 * page, as a one page request is never in
1265 * violation.
1da177e4 1266 */
627a2d3c
N
1267 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1268 blk_queue_max_segments(mddev->queue, 1);
1269 blk_queue_segment_boundary(mddev->queue,
1270 PAGE_CACHE_SIZE - 1);
1271 }
1da177e4
LT
1272
1273 p->head_position = 0;
1274 rdev->raid_disk = mirror;
199050ea 1275 err = 0;
6aea114a
N
1276 /* As all devices are equivalent, we don't need a full recovery
1277 * if this was recently any drive of the array
1278 */
1279 if (rdev->saved_raid_disk < 0)
41158c7e 1280 conf->fullsync = 1;
d6065f7b 1281 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
1282 break;
1283 }
ac5e7113 1284 md_integrity_add_rdev(rdev, mddev);
1da177e4 1285 print_conf(conf);
199050ea 1286 return err;
1da177e4
LT
1287}
1288
1289static int raid1_remove_disk(mddev_t *mddev, int number)
1290{
1291 conf_t *conf = mddev->private;
1292 int err = 0;
1293 mdk_rdev_t *rdev;
1294 mirror_info_t *p = conf->mirrors+ number;
1295
1296 print_conf(conf);
1297 rdev = p->rdev;
1298 if (rdev) {
b2d444d7 1299 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
1300 atomic_read(&rdev->nr_pending)) {
1301 err = -EBUSY;
1302 goto abort;
1303 }
046abeed 1304 /* Only remove non-faulty devices if recovery
dfc70645
N
1305 * is not possible.
1306 */
1307 if (!test_bit(Faulty, &rdev->flags) &&
5389042f 1308 mddev->recovery_disabled != conf->recovery_disabled &&
dfc70645
N
1309 mddev->degraded < conf->raid_disks) {
1310 err = -EBUSY;
1311 goto abort;
1312 }
1da177e4 1313 p->rdev = NULL;
fbd568a3 1314 synchronize_rcu();
1da177e4
LT
1315 if (atomic_read(&rdev->nr_pending)) {
1316 /* lost the race, try later */
1317 err = -EBUSY;
1318 p->rdev = rdev;
ac5e7113 1319 goto abort;
1da177e4 1320 }
a91a2785 1321 err = md_integrity_register(mddev);
1da177e4
LT
1322 }
1323abort:
1324
1325 print_conf(conf);
1326 return err;
1327}
1328
1329
6712ecf8 1330static void end_sync_read(struct bio *bio, int error)
1da177e4 1331{
7b92813c 1332 r1bio_t *r1_bio = bio->bi_private;
d11c171e 1333 int i;
1da177e4 1334
d11c171e
N
1335 for (i=r1_bio->mddev->raid_disks; i--; )
1336 if (r1_bio->bios[i] == bio)
1337 break;
1338 BUG_ON(i < 0);
1339 update_head_pos(i, r1_bio);
1da177e4
LT
1340 /*
1341 * we have read a block, now it needs to be re-written,
1342 * or re-read if the read failed.
1343 * We don't do much here, just schedule handling by raid1d
1344 */
69382e85 1345 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1da177e4 1346 set_bit(R1BIO_Uptodate, &r1_bio->state);
d11c171e
N
1347
1348 if (atomic_dec_and_test(&r1_bio->remaining))
1349 reschedule_retry(r1_bio);
1da177e4
LT
1350}
1351
6712ecf8 1352static void end_sync_write(struct bio *bio, int error)
1da177e4
LT
1353{
1354 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
7b92813c 1355 r1bio_t *r1_bio = bio->bi_private;
1da177e4 1356 mddev_t *mddev = r1_bio->mddev;
070ec55d 1357 conf_t *conf = mddev->private;
1da177e4
LT
1358 int i;
1359 int mirror=0;
4367af55
N
1360 sector_t first_bad;
1361 int bad_sectors;
1da177e4 1362
1da177e4
LT
1363 for (i = 0; i < conf->raid_disks; i++)
1364 if (r1_bio->bios[i] == bio) {
1365 mirror = i;
1366 break;
1367 }
6b1117d5 1368 if (!uptodate) {
57dab0bd 1369 sector_t sync_blocks = 0;
6b1117d5
N
1370 sector_t s = r1_bio->sector;
1371 long sectors_to_go = r1_bio->sectors;
1372 /* make sure these bits doesn't get cleared. */
1373 do {
5e3db645 1374 bitmap_end_sync(mddev->bitmap, s,
6b1117d5
N
1375 &sync_blocks, 1);
1376 s += sync_blocks;
1377 sectors_to_go -= sync_blocks;
1378 } while (sectors_to_go > 0);
1da177e4 1379 md_error(mddev, conf->mirrors[mirror].rdev);
4367af55
N
1380 } else if (is_badblock(conf->mirrors[mirror].rdev,
1381 r1_bio->sector,
1382 r1_bio->sectors,
1383 &first_bad, &bad_sectors))
1384 set_bit(R1BIO_MadeGood, &r1_bio->state);
e3b9703e 1385
1da177e4
LT
1386 update_head_pos(mirror, r1_bio);
1387
1388 if (atomic_dec_and_test(&r1_bio->remaining)) {
4367af55
N
1389 int s = r1_bio->sectors;
1390 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
1391 reschedule_retry(r1_bio);
1392 else {
1393 put_buf(r1_bio);
1394 md_done_sync(mddev, s, uptodate);
1395 }
1da177e4 1396 }
1da177e4
LT
1397}
1398
a68e5870 1399static int fix_sync_read_error(r1bio_t *r1_bio)
1da177e4 1400{
a68e5870
N
1401 /* Try some synchronous reads of other devices to get
1402 * good data, much like with normal read errors. Only
1403 * read into the pages we already have so we don't
1404 * need to re-issue the read request.
1405 * We don't need to freeze the array, because being in an
1406 * active sync request, there is no normal IO, and
1407 * no overlapping syncs.
06f60385
N
1408 * We don't need to check is_badblock() again as we
1409 * made sure that anything with a bad block in range
1410 * will have bi_end_io clear.
a68e5870
N
1411 */
1412 mddev_t *mddev = r1_bio->mddev;
070ec55d 1413 conf_t *conf = mddev->private;
a68e5870
N
1414 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1415 sector_t sect = r1_bio->sector;
1416 int sectors = r1_bio->sectors;
1417 int idx = 0;
1418
1419 while(sectors) {
1420 int s = sectors;
1421 int d = r1_bio->read_disk;
1422 int success = 0;
1423 mdk_rdev_t *rdev;
78d7f5f7 1424 int start;
a68e5870
N
1425
1426 if (s > (PAGE_SIZE>>9))
1427 s = PAGE_SIZE >> 9;
1428 do {
1429 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1430 /* No rcu protection needed here devices
1431 * can only be removed when no resync is
1432 * active, and resync is currently active
1433 */
1434 rdev = conf->mirrors[d].rdev;
9d3d8011 1435 if (sync_page_io(rdev, sect, s<<9,
a68e5870
N
1436 bio->bi_io_vec[idx].bv_page,
1437 READ, false)) {
1438 success = 1;
1439 break;
1440 }
1441 }
1442 d++;
1443 if (d == conf->raid_disks)
1444 d = 0;
1445 } while (!success && d != r1_bio->read_disk);
1446
78d7f5f7 1447 if (!success) {
a68e5870
N
1448 char b[BDEVNAME_SIZE];
1449 /* Cannot read from anywhere, array is toast */
1450 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1451 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1452 " for block %llu\n",
1453 mdname(mddev),
1454 bdevname(bio->bi_bdev, b),
1455 (unsigned long long)r1_bio->sector);
1456 md_done_sync(mddev, r1_bio->sectors, 0);
d11c171e 1457 put_buf(r1_bio);
a68e5870 1458 return 0;
d11c171e 1459 }
78d7f5f7
N
1460
1461 start = d;
1462 /* write it back and re-read */
1463 while (d != r1_bio->read_disk) {
1464 if (d == 0)
1465 d = conf->raid_disks;
1466 d--;
1467 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1468 continue;
1469 rdev = conf->mirrors[d].rdev;
9d3d8011 1470 if (sync_page_io(rdev, sect, s<<9,
78d7f5f7
N
1471 bio->bi_io_vec[idx].bv_page,
1472 WRITE, false) == 0) {
1473 r1_bio->bios[d]->bi_end_io = NULL;
1474 rdev_dec_pending(rdev, mddev);
1475 md_error(mddev, rdev);
9d3d8011 1476 }
78d7f5f7
N
1477 }
1478 d = start;
1479 while (d != r1_bio->read_disk) {
1480 if (d == 0)
1481 d = conf->raid_disks;
1482 d--;
1483 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1484 continue;
1485 rdev = conf->mirrors[d].rdev;
9d3d8011 1486 if (sync_page_io(rdev, sect, s<<9,
78d7f5f7
N
1487 bio->bi_io_vec[idx].bv_page,
1488 READ, false) == 0)
1489 md_error(mddev, rdev);
9d3d8011
NK
1490 else
1491 atomic_add(s, &rdev->corrected_errors);
78d7f5f7 1492 }
a68e5870
N
1493 sectors -= s;
1494 sect += s;
1495 idx ++;
1496 }
78d7f5f7 1497 set_bit(R1BIO_Uptodate, &r1_bio->state);
7ca78d57 1498 set_bit(BIO_UPTODATE, &bio->bi_flags);
a68e5870
N
1499 return 1;
1500}
1501
1502static int process_checks(r1bio_t *r1_bio)
1503{
1504 /* We have read all readable devices. If we haven't
1505 * got the block, then there is no hope left.
1506 * If we have, then we want to do a comparison
1507 * and skip the write if everything is the same.
1508 * If any blocks failed to read, then we need to
1509 * attempt an over-write
1510 */
1511 mddev_t *mddev = r1_bio->mddev;
1512 conf_t *conf = mddev->private;
1513 int primary;
1514 int i;
1515
78d7f5f7 1516 for (primary = 0; primary < conf->raid_disks; primary++)
a68e5870
N
1517 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1518 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1519 r1_bio->bios[primary]->bi_end_io = NULL;
1520 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1521 break;
1522 }
1523 r1_bio->read_disk = primary;
78d7f5f7
N
1524 for (i = 0; i < conf->raid_disks; i++) {
1525 int j;
1526 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1527 struct bio *pbio = r1_bio->bios[primary];
1528 struct bio *sbio = r1_bio->bios[i];
1529 int size;
a68e5870 1530
78d7f5f7
N
1531 if (r1_bio->bios[i]->bi_end_io != end_sync_read)
1532 continue;
1533
1534 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1535 for (j = vcnt; j-- ; ) {
1536 struct page *p, *s;
1537 p = pbio->bi_io_vec[j].bv_page;
1538 s = sbio->bi_io_vec[j].bv_page;
1539 if (memcmp(page_address(p),
1540 page_address(s),
1541 PAGE_SIZE))
1542 break;
69382e85 1543 }
78d7f5f7
N
1544 } else
1545 j = 0;
1546 if (j >= 0)
1547 mddev->resync_mismatches += r1_bio->sectors;
1548 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1549 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1550 /* No need to write to this device. */
1551 sbio->bi_end_io = NULL;
1552 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1553 continue;
1554 }
1555 /* fixup the bio for reuse */
1556 sbio->bi_vcnt = vcnt;
1557 sbio->bi_size = r1_bio->sectors << 9;
1558 sbio->bi_idx = 0;
1559 sbio->bi_phys_segments = 0;
1560 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1561 sbio->bi_flags |= 1 << BIO_UPTODATE;
1562 sbio->bi_next = NULL;
1563 sbio->bi_sector = r1_bio->sector +
1564 conf->mirrors[i].rdev->data_offset;
1565 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1566 size = sbio->bi_size;
1567 for (j = 0; j < vcnt ; j++) {
1568 struct bio_vec *bi;
1569 bi = &sbio->bi_io_vec[j];
1570 bi->bv_offset = 0;
1571 if (size > PAGE_SIZE)
1572 bi->bv_len = PAGE_SIZE;
1573 else
1574 bi->bv_len = size;
1575 size -= PAGE_SIZE;
1576 memcpy(page_address(bi->bv_page),
1577 page_address(pbio->bi_io_vec[j].bv_page),
1578 PAGE_SIZE);
69382e85 1579 }
78d7f5f7 1580 }
a68e5870
N
1581 return 0;
1582}
1583
1584static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1585{
1586 conf_t *conf = mddev->private;
1587 int i;
1588 int disks = conf->raid_disks;
1589 struct bio *bio, *wbio;
1590
1591 bio = r1_bio->bios[r1_bio->read_disk];
1592
a68e5870
N
1593 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
1594 /* ouch - failed to read all of that. */
1595 if (!fix_sync_read_error(r1_bio))
1596 return;
7ca78d57
N
1597
1598 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1599 if (process_checks(r1_bio) < 0)
1600 return;
d11c171e
N
1601 /*
1602 * schedule writes
1603 */
1da177e4
LT
1604 atomic_set(&r1_bio->remaining, 1);
1605 for (i = 0; i < disks ; i++) {
1606 wbio = r1_bio->bios[i];
3e198f78
N
1607 if (wbio->bi_end_io == NULL ||
1608 (wbio->bi_end_io == end_sync_read &&
1609 (i == r1_bio->read_disk ||
1610 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1da177e4
LT
1611 continue;
1612
3e198f78
N
1613 wbio->bi_rw = WRITE;
1614 wbio->bi_end_io = end_sync_write;
1da177e4
LT
1615 atomic_inc(&r1_bio->remaining);
1616 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
191ea9b2 1617
1da177e4
LT
1618 generic_make_request(wbio);
1619 }
1620
1621 if (atomic_dec_and_test(&r1_bio->remaining)) {
191ea9b2 1622 /* if we're here, all write(s) have completed, so clean up */
1da177e4
LT
1623 md_done_sync(mddev, r1_bio->sectors, 1);
1624 put_buf(r1_bio);
1625 }
1626}
1627
1628/*
1629 * This is a kernel thread which:
1630 *
1631 * 1. Retries failed read operations on working mirrors.
1632 * 2. Updates the raid superblock when problems encounter.
d2eb35ac 1633 * 3. Performs writes following reads for array synchronising.
1da177e4
LT
1634 */
1635
867868fb
N
1636static void fix_read_error(conf_t *conf, int read_disk,
1637 sector_t sect, int sectors)
1638{
1639 mddev_t *mddev = conf->mddev;
1640 while(sectors) {
1641 int s = sectors;
1642 int d = read_disk;
1643 int success = 0;
1644 int start;
1645 mdk_rdev_t *rdev;
1646
1647 if (s > (PAGE_SIZE>>9))
1648 s = PAGE_SIZE >> 9;
1649
1650 do {
1651 /* Note: no rcu protection needed here
1652 * as this is synchronous in the raid1d thread
1653 * which is the thread that might remove
1654 * a device. If raid1d ever becomes multi-threaded....
1655 */
d2eb35ac
N
1656 sector_t first_bad;
1657 int bad_sectors;
1658
867868fb
N
1659 rdev = conf->mirrors[d].rdev;
1660 if (rdev &&
1661 test_bit(In_sync, &rdev->flags) &&
d2eb35ac
N
1662 is_badblock(rdev, sect, s,
1663 &first_bad, &bad_sectors) == 0 &&
ccebd4c4
JB
1664 sync_page_io(rdev, sect, s<<9,
1665 conf->tmppage, READ, false))
867868fb
N
1666 success = 1;
1667 else {
1668 d++;
1669 if (d == conf->raid_disks)
1670 d = 0;
1671 }
1672 } while (!success && d != read_disk);
1673
1674 if (!success) {
1675 /* Cannot read from anywhere -- bye bye array */
1676 md_error(mddev, conf->mirrors[read_disk].rdev);
1677 break;
1678 }
1679 /* write it back and re-read */
1680 start = d;
1681 while (d != read_disk) {
1682 if (d==0)
1683 d = conf->raid_disks;
1684 d--;
1685 rdev = conf->mirrors[d].rdev;
1686 if (rdev &&
1687 test_bit(In_sync, &rdev->flags)) {
ccebd4c4
JB
1688 if (sync_page_io(rdev, sect, s<<9,
1689 conf->tmppage, WRITE, false)
867868fb
N
1690 == 0)
1691 /* Well, this device is dead */
1692 md_error(mddev, rdev);
1693 }
1694 }
1695 d = start;
1696 while (d != read_disk) {
1697 char b[BDEVNAME_SIZE];
1698 if (d==0)
1699 d = conf->raid_disks;
1700 d--;
1701 rdev = conf->mirrors[d].rdev;
1702 if (rdev &&
1703 test_bit(In_sync, &rdev->flags)) {
ccebd4c4
JB
1704 if (sync_page_io(rdev, sect, s<<9,
1705 conf->tmppage, READ, false)
867868fb
N
1706 == 0)
1707 /* Well, this device is dead */
1708 md_error(mddev, rdev);
1709 else {
1710 atomic_add(s, &rdev->corrected_errors);
1711 printk(KERN_INFO
9dd1e2fa 1712 "md/raid1:%s: read error corrected "
867868fb
N
1713 "(%d sectors at %llu on %s)\n",
1714 mdname(mddev), s,
969b755a
RD
1715 (unsigned long long)(sect +
1716 rdev->data_offset),
867868fb
N
1717 bdevname(rdev->bdev, b));
1718 }
1719 }
1720 }
1721 sectors -= s;
1722 sect += s;
1723 }
1724}
1725
1da177e4
LT
1726static void raid1d(mddev_t *mddev)
1727{
1728 r1bio_t *r1_bio;
1729 struct bio *bio;
1730 unsigned long flags;
070ec55d 1731 conf_t *conf = mddev->private;
1da177e4 1732 struct list_head *head = &conf->retry_list;
1da177e4 1733 mdk_rdev_t *rdev;
e1dfa0a2 1734 struct blk_plug plug;
1da177e4
LT
1735
1736 md_check_recovery(mddev);
e1dfa0a2
N
1737
1738 blk_start_plug(&plug);
1da177e4
LT
1739 for (;;) {
1740 char b[BDEVNAME_SIZE];
191ea9b2 1741
c3b328ac
N
1742 if (atomic_read(&mddev->plug_cnt) == 0)
1743 flush_pending_writes(conf);
191ea9b2 1744
a35e63ef
N
1745 spin_lock_irqsave(&conf->device_lock, flags);
1746 if (list_empty(head)) {
1747 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 1748 break;
a35e63ef 1749 }
1da177e4
LT
1750 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1751 list_del(head->prev);
ddaf22ab 1752 conf->nr_queued--;
1da177e4
LT
1753 spin_unlock_irqrestore(&conf->device_lock, flags);
1754
1755 mddev = r1_bio->mddev;
070ec55d 1756 conf = mddev->private;
4367af55
N
1757 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1758 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) {
1759 int m;
1760 int s = r1_bio->sectors;
1761 for (m = 0; m < conf->raid_disks ; m++) {
1762 struct bio *bio = r1_bio->bios[m];
1763 if (bio->bi_end_io != NULL &&
1764 test_bit(BIO_UPTODATE,
1765 &bio->bi_flags)) {
1766 rdev = conf->mirrors[m].rdev;
1767 rdev_clear_badblocks(
1768 rdev,
1769 r1_bio->sector,
1770 r1_bio->sectors);
1771 }
1772 }
1773 put_buf(r1_bio);
1774 md_done_sync(mddev, s, 1);
1775 } else
1776 sync_request_write(mddev, r1_bio);
1777 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state)) {
1778 int m;
1779 for (m = 0; m < conf->raid_disks ; m++)
1780 if (r1_bio->bios[m] == IO_MADE_GOOD) {
1781 rdev = conf->mirrors[m].rdev;
1782 rdev_clear_badblocks(
1783 rdev,
1784 r1_bio->sector,
1785 r1_bio->sectors);
1786 rdev_dec_pending(rdev, mddev);
1787 }
1788 raid_end_bio_io(r1_bio);
1789 } else if (test_bit(R1BIO_ReadError, &r1_bio->state)) {
1da177e4 1790 int disk;
d2eb35ac 1791 int max_sectors;
ddaf22ab 1792
d2eb35ac 1793 clear_bit(R1BIO_ReadError, &r1_bio->state);
ddaf22ab
N
1794 /* we got a read error. Maybe the drive is bad. Maybe just
1795 * the block and we can fix it.
1796 * We freeze all other IO, and try reading the block from
1797 * other devices. When we find one, we re-write
1798 * and check it that fixes the read error.
1799 * This is all done synchronously while the array is
1800 * frozen
1801 */
867868fb
N
1802 if (mddev->ro == 0) {
1803 freeze_array(conf);
1804 fix_read_error(conf, r1_bio->read_disk,
1805 r1_bio->sector,
1806 r1_bio->sectors);
1807 unfreeze_array(conf);
d0e26078
N
1808 } else
1809 md_error(mddev,
1810 conf->mirrors[r1_bio->read_disk].rdev);
ddaf22ab 1811
1da177e4 1812 bio = r1_bio->bios[r1_bio->read_disk];
d2eb35ac
N
1813 bdevname(bio->bi_bdev, b);
1814read_more:
1815 disk = read_balance(conf, r1_bio, &max_sectors);
1816 if (disk == -1) {
9dd1e2fa 1817 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
1da177e4 1818 " read error for block %llu\n",
d2eb35ac 1819 mdname(mddev), b,
1da177e4
LT
1820 (unsigned long long)r1_bio->sector);
1821 raid_end_bio_io(r1_bio);
1822 } else {
2c7d46ec 1823 const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
d2eb35ac
N
1824 if (bio) {
1825 r1_bio->bios[r1_bio->read_disk] =
1826 mddev->ro ? IO_BLOCKED : NULL;
1827 bio_put(bio);
1828 }
1da177e4 1829 r1_bio->read_disk = disk;
a167f663
N
1830 bio = bio_clone_mddev(r1_bio->master_bio,
1831 GFP_NOIO, mddev);
d2eb35ac
N
1832 md_trim_bio(bio,
1833 r1_bio->sector - bio->bi_sector,
1834 max_sectors);
1da177e4
LT
1835 r1_bio->bios[r1_bio->read_disk] = bio;
1836 rdev = conf->mirrors[disk].rdev;
8bda470e
CD
1837 printk_ratelimited(
1838 KERN_ERR
1839 "md/raid1:%s: redirecting sector %llu"
1840 " to other mirror: %s\n",
1841 mdname(mddev),
1842 (unsigned long long)r1_bio->sector,
1843 bdevname(rdev->bdev, b));
1da177e4
LT
1844 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1845 bio->bi_bdev = rdev->bdev;
1846 bio->bi_end_io = raid1_end_read_request;
7b6d91da 1847 bio->bi_rw = READ | do_sync;
1da177e4 1848 bio->bi_private = r1_bio;
d2eb35ac
N
1849 if (max_sectors < r1_bio->sectors) {
1850 /* Drat - have to split this up more */
1851 struct bio *mbio = r1_bio->master_bio;
1852 int sectors_handled =
1853 r1_bio->sector + max_sectors
1854 - mbio->bi_sector;
1855 r1_bio->sectors = max_sectors;
1856 spin_lock_irq(&conf->device_lock);
1857 if (mbio->bi_phys_segments == 0)
1858 mbio->bi_phys_segments = 2;
1859 else
1860 mbio->bi_phys_segments++;
1861 spin_unlock_irq(&conf->device_lock);
1862 generic_make_request(bio);
1863 bio = NULL;
1864
1865 r1_bio = mempool_alloc(conf->r1bio_pool,
1866 GFP_NOIO);
1867
1868 r1_bio->master_bio = mbio;
1869 r1_bio->sectors = (mbio->bi_size >> 9)
1870 - sectors_handled;
1871 r1_bio->state = 0;
1872 set_bit(R1BIO_ReadError,
1873 &r1_bio->state);
1874 r1_bio->mddev = mddev;
1875 r1_bio->sector = mbio->bi_sector
1876 + sectors_handled;
1877
1878 goto read_more;
1879 } else
1880 generic_make_request(bio);
1da177e4 1881 }
d2eb35ac
N
1882 } else {
1883 /* just a partial read to be scheduled from separate
1884 * context
1885 */
1886 generic_make_request(r1_bio->bios[r1_bio->read_disk]);
1da177e4 1887 }
1d9d5241 1888 cond_resched();
de393cde
N
1889 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
1890 md_check_recovery(mddev);
1da177e4 1891 }
e1dfa0a2 1892 blk_finish_plug(&plug);
1da177e4
LT
1893}
1894
1895
1896static int init_resync(conf_t *conf)
1897{
1898 int buffs;
1899
1900 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
9e77c485 1901 BUG_ON(conf->r1buf_pool);
1da177e4
LT
1902 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
1903 conf->poolinfo);
1904 if (!conf->r1buf_pool)
1905 return -ENOMEM;
1906 conf->next_resync = 0;
1907 return 0;
1908}
1909
1910/*
1911 * perform a "sync" on one "block"
1912 *
1913 * We need to make sure that no normal I/O request - particularly write
1914 * requests - conflict with active sync requests.
1915 *
1916 * This is achieved by tracking pending requests and a 'barrier' concept
1917 * that can be installed to exclude normal IO requests.
1918 */
1919
57afd89f 1920static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1da177e4 1921{
070ec55d 1922 conf_t *conf = mddev->private;
1da177e4
LT
1923 r1bio_t *r1_bio;
1924 struct bio *bio;
1925 sector_t max_sector, nr_sectors;
3e198f78 1926 int disk = -1;
1da177e4 1927 int i;
3e198f78
N
1928 int wonly = -1;
1929 int write_targets = 0, read_targets = 0;
57dab0bd 1930 sector_t sync_blocks;
e3b9703e 1931 int still_degraded = 0;
06f60385
N
1932 int good_sectors = RESYNC_SECTORS;
1933 int min_bad = 0; /* number of sectors that are bad in all devices */
1da177e4
LT
1934
1935 if (!conf->r1buf_pool)
1936 if (init_resync(conf))
57afd89f 1937 return 0;
1da177e4 1938
58c0fed4 1939 max_sector = mddev->dev_sectors;
1da177e4 1940 if (sector_nr >= max_sector) {
191ea9b2
N
1941 /* If we aborted, we need to abort the
1942 * sync on the 'current' bitmap chunk (there will
1943 * only be one in raid1 resync.
1944 * We can find the current addess in mddev->curr_resync
1945 */
6a806c51
N
1946 if (mddev->curr_resync < max_sector) /* aborted */
1947 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
191ea9b2 1948 &sync_blocks, 1);
6a806c51 1949 else /* completed sync */
191ea9b2 1950 conf->fullsync = 0;
6a806c51
N
1951
1952 bitmap_close_sync(mddev->bitmap);
1da177e4
LT
1953 close_sync(conf);
1954 return 0;
1955 }
1956
07d84d10
N
1957 if (mddev->bitmap == NULL &&
1958 mddev->recovery_cp == MaxSector &&
6394cca5 1959 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
07d84d10
N
1960 conf->fullsync == 0) {
1961 *skipped = 1;
1962 return max_sector - sector_nr;
1963 }
6394cca5
N
1964 /* before building a request, check if we can skip these blocks..
1965 * This call the bitmap_start_sync doesn't actually record anything
1966 */
e3b9703e 1967 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
e5de485f 1968 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
191ea9b2
N
1969 /* We can skip this block, and probably several more */
1970 *skipped = 1;
1971 return sync_blocks;
1972 }
1da177e4 1973 /*
17999be4
N
1974 * If there is non-resync activity waiting for a turn,
1975 * and resync is going fast enough,
1976 * then let it though before starting on this new sync request.
1da177e4 1977 */
17999be4 1978 if (!go_faster && conf->nr_waiting)
1da177e4 1979 msleep_interruptible(1000);
17999be4 1980
b47490c9 1981 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1c4588e9 1982 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
17999be4
N
1983 raise_barrier(conf);
1984
1985 conf->next_resync = sector_nr;
1da177e4 1986
3e198f78 1987 rcu_read_lock();
1da177e4 1988 /*
3e198f78
N
1989 * If we get a correctably read error during resync or recovery,
1990 * we might want to read from a different device. So we
1991 * flag all drives that could conceivably be read from for READ,
1992 * and any others (which will be non-In_sync devices) for WRITE.
1993 * If a read fails, we try reading from something else for which READ
1994 * is OK.
1da177e4 1995 */
1da177e4 1996
1da177e4
LT
1997 r1_bio->mddev = mddev;
1998 r1_bio->sector = sector_nr;
191ea9b2 1999 r1_bio->state = 0;
1da177e4 2000 set_bit(R1BIO_IsSync, &r1_bio->state);
1da177e4
LT
2001
2002 for (i=0; i < conf->raid_disks; i++) {
3e198f78 2003 mdk_rdev_t *rdev;
1da177e4
LT
2004 bio = r1_bio->bios[i];
2005
2006 /* take from bio_init */
2007 bio->bi_next = NULL;
db8d9d35 2008 bio->bi_flags &= ~(BIO_POOL_MASK-1);
1da177e4 2009 bio->bi_flags |= 1 << BIO_UPTODATE;
db8d9d35 2010 bio->bi_comp_cpu = -1;
802ba064 2011 bio->bi_rw = READ;
1da177e4
LT
2012 bio->bi_vcnt = 0;
2013 bio->bi_idx = 0;
2014 bio->bi_phys_segments = 0;
1da177e4
LT
2015 bio->bi_size = 0;
2016 bio->bi_end_io = NULL;
2017 bio->bi_private = NULL;
2018
3e198f78
N
2019 rdev = rcu_dereference(conf->mirrors[i].rdev);
2020 if (rdev == NULL ||
06f60385 2021 test_bit(Faulty, &rdev->flags)) {
e3b9703e 2022 still_degraded = 1;
3e198f78 2023 } else if (!test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
2024 bio->bi_rw = WRITE;
2025 bio->bi_end_io = end_sync_write;
2026 write_targets ++;
3e198f78
N
2027 } else {
2028 /* may need to read from here */
06f60385
N
2029 sector_t first_bad = MaxSector;
2030 int bad_sectors;
2031
2032 if (is_badblock(rdev, sector_nr, good_sectors,
2033 &first_bad, &bad_sectors)) {
2034 if (first_bad > sector_nr)
2035 good_sectors = first_bad - sector_nr;
2036 else {
2037 bad_sectors -= (sector_nr - first_bad);
2038 if (min_bad == 0 ||
2039 min_bad > bad_sectors)
2040 min_bad = bad_sectors;
2041 }
2042 }
2043 if (sector_nr < first_bad) {
2044 if (test_bit(WriteMostly, &rdev->flags)) {
2045 if (wonly < 0)
2046 wonly = i;
2047 } else {
2048 if (disk < 0)
2049 disk = i;
2050 }
2051 bio->bi_rw = READ;
2052 bio->bi_end_io = end_sync_read;
2053 read_targets++;
3e198f78 2054 }
3e198f78 2055 }
06f60385
N
2056 if (bio->bi_end_io) {
2057 atomic_inc(&rdev->nr_pending);
2058 bio->bi_sector = sector_nr + rdev->data_offset;
2059 bio->bi_bdev = rdev->bdev;
2060 bio->bi_private = r1_bio;
2061 }
1da177e4 2062 }
3e198f78
N
2063 rcu_read_unlock();
2064 if (disk < 0)
2065 disk = wonly;
2066 r1_bio->read_disk = disk;
191ea9b2 2067
06f60385
N
2068 if (read_targets == 0 && min_bad > 0) {
2069 /* These sectors are bad on all InSync devices, so we
2070 * need to mark them bad on all write targets
2071 */
2072 int ok = 1;
2073 for (i = 0 ; i < conf->raid_disks ; i++)
2074 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2075 mdk_rdev_t *rdev =
2076 rcu_dereference(conf->mirrors[i].rdev);
2077 ok = rdev_set_badblocks(rdev, sector_nr,
2078 min_bad, 0
2079 ) && ok;
2080 }
2081 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2082 *skipped = 1;
2083 put_buf(r1_bio);
2084
2085 if (!ok) {
2086 /* Cannot record the badblocks, so need to
2087 * abort the resync.
2088 * If there are multiple read targets, could just
2089 * fail the really bad ones ???
2090 */
2091 conf->recovery_disabled = mddev->recovery_disabled;
2092 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2093 return 0;
2094 } else
2095 return min_bad;
2096
2097 }
2098 if (min_bad > 0 && min_bad < good_sectors) {
2099 /* only resync enough to reach the next bad->good
2100 * transition */
2101 good_sectors = min_bad;
2102 }
2103
3e198f78
N
2104 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2105 /* extra read targets are also write targets */
2106 write_targets += read_targets-1;
2107
2108 if (write_targets == 0 || read_targets == 0) {
1da177e4
LT
2109 /* There is nowhere to write, so all non-sync
2110 * drives must be failed - so we are finished
2111 */
57afd89f
N
2112 sector_t rv = max_sector - sector_nr;
2113 *skipped = 1;
1da177e4 2114 put_buf(r1_bio);
1da177e4
LT
2115 return rv;
2116 }
2117
c6207277
N
2118 if (max_sector > mddev->resync_max)
2119 max_sector = mddev->resync_max; /* Don't do IO beyond here */
06f60385
N
2120 if (max_sector > sector_nr + good_sectors)
2121 max_sector = sector_nr + good_sectors;
1da177e4 2122 nr_sectors = 0;
289e99e8 2123 sync_blocks = 0;
1da177e4
LT
2124 do {
2125 struct page *page;
2126 int len = PAGE_SIZE;
2127 if (sector_nr + (len>>9) > max_sector)
2128 len = (max_sector - sector_nr) << 9;
2129 if (len == 0)
2130 break;
6a806c51
N
2131 if (sync_blocks == 0) {
2132 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
e5de485f
N
2133 &sync_blocks, still_degraded) &&
2134 !conf->fullsync &&
2135 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6a806c51 2136 break;
9e77c485 2137 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
7571ae88 2138 if ((len >> 9) > sync_blocks)
6a806c51 2139 len = sync_blocks<<9;
ab7a30c7 2140 }
191ea9b2 2141
1da177e4
LT
2142 for (i=0 ; i < conf->raid_disks; i++) {
2143 bio = r1_bio->bios[i];
2144 if (bio->bi_end_io) {
d11c171e 2145 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1da177e4
LT
2146 if (bio_add_page(bio, page, len, 0) == 0) {
2147 /* stop here */
d11c171e 2148 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1da177e4
LT
2149 while (i > 0) {
2150 i--;
2151 bio = r1_bio->bios[i];
6a806c51
N
2152 if (bio->bi_end_io==NULL)
2153 continue;
1da177e4
LT
2154 /* remove last page from this bio */
2155 bio->bi_vcnt--;
2156 bio->bi_size -= len;
2157 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2158 }
2159 goto bio_full;
2160 }
2161 }
2162 }
2163 nr_sectors += len>>9;
2164 sector_nr += len>>9;
191ea9b2 2165 sync_blocks -= (len>>9);
1da177e4
LT
2166 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2167 bio_full:
1da177e4
LT
2168 r1_bio->sectors = nr_sectors;
2169
d11c171e
N
2170 /* For a user-requested sync, we read all readable devices and do a
2171 * compare
2172 */
2173 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2174 atomic_set(&r1_bio->remaining, read_targets);
2175 for (i=0; i<conf->raid_disks; i++) {
2176 bio = r1_bio->bios[i];
2177 if (bio->bi_end_io == end_sync_read) {
ddac7c7e 2178 md_sync_acct(bio->bi_bdev, nr_sectors);
d11c171e
N
2179 generic_make_request(bio);
2180 }
2181 }
2182 } else {
2183 atomic_set(&r1_bio->remaining, 1);
2184 bio = r1_bio->bios[r1_bio->read_disk];
ddac7c7e 2185 md_sync_acct(bio->bi_bdev, nr_sectors);
d11c171e 2186 generic_make_request(bio);
1da177e4 2187
d11c171e 2188 }
1da177e4
LT
2189 return nr_sectors;
2190}
2191
80c3a6ce
DW
2192static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2193{
2194 if (sectors)
2195 return sectors;
2196
2197 return mddev->dev_sectors;
2198}
2199
709ae487 2200static conf_t *setup_conf(mddev_t *mddev)
1da177e4
LT
2201{
2202 conf_t *conf;
709ae487 2203 int i;
1da177e4
LT
2204 mirror_info_t *disk;
2205 mdk_rdev_t *rdev;
709ae487 2206 int err = -ENOMEM;
1da177e4 2207
9ffae0cf 2208 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1da177e4 2209 if (!conf)
709ae487 2210 goto abort;
1da177e4 2211
9ffae0cf 2212 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1da177e4
LT
2213 GFP_KERNEL);
2214 if (!conf->mirrors)
709ae487 2215 goto abort;
1da177e4 2216
ddaf22ab
N
2217 conf->tmppage = alloc_page(GFP_KERNEL);
2218 if (!conf->tmppage)
709ae487 2219 goto abort;
ddaf22ab 2220
709ae487 2221 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1da177e4 2222 if (!conf->poolinfo)
709ae487 2223 goto abort;
1da177e4
LT
2224 conf->poolinfo->raid_disks = mddev->raid_disks;
2225 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2226 r1bio_pool_free,
2227 conf->poolinfo);
2228 if (!conf->r1bio_pool)
709ae487
N
2229 goto abort;
2230
ed9bfdf1 2231 conf->poolinfo->mddev = mddev;
1da177e4 2232
e7e72bf6 2233 spin_lock_init(&conf->device_lock);
159ec1fc 2234 list_for_each_entry(rdev, &mddev->disks, same_set) {
709ae487 2235 int disk_idx = rdev->raid_disk;
1da177e4
LT
2236 if (disk_idx >= mddev->raid_disks
2237 || disk_idx < 0)
2238 continue;
2239 disk = conf->mirrors + disk_idx;
2240
2241 disk->rdev = rdev;
1da177e4
LT
2242
2243 disk->head_position = 0;
1da177e4
LT
2244 }
2245 conf->raid_disks = mddev->raid_disks;
2246 conf->mddev = mddev;
1da177e4 2247 INIT_LIST_HEAD(&conf->retry_list);
1da177e4
LT
2248
2249 spin_lock_init(&conf->resync_lock);
17999be4 2250 init_waitqueue_head(&conf->wait_barrier);
1da177e4 2251
191ea9b2 2252 bio_list_init(&conf->pending_bio_list);
191ea9b2 2253
709ae487 2254 conf->last_used = -1;
1da177e4
LT
2255 for (i = 0; i < conf->raid_disks; i++) {
2256
2257 disk = conf->mirrors + i;
2258
5fd6c1dc
N
2259 if (!disk->rdev ||
2260 !test_bit(In_sync, &disk->rdev->flags)) {
1da177e4 2261 disk->head_position = 0;
918f0238
N
2262 if (disk->rdev)
2263 conf->fullsync = 1;
709ae487
N
2264 } else if (conf->last_used < 0)
2265 /*
2266 * The first working device is used as a
2267 * starting point to read balancing.
2268 */
2269 conf->last_used = i;
1da177e4 2270 }
709ae487
N
2271
2272 err = -EIO;
2273 if (conf->last_used < 0) {
9dd1e2fa 2274 printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
709ae487
N
2275 mdname(mddev));
2276 goto abort;
2277 }
2278 err = -ENOMEM;
2279 conf->thread = md_register_thread(raid1d, mddev, NULL);
2280 if (!conf->thread) {
2281 printk(KERN_ERR
9dd1e2fa 2282 "md/raid1:%s: couldn't allocate thread\n",
709ae487
N
2283 mdname(mddev));
2284 goto abort;
11ce99e6 2285 }
1da177e4 2286
709ae487
N
2287 return conf;
2288
2289 abort:
2290 if (conf) {
2291 if (conf->r1bio_pool)
2292 mempool_destroy(conf->r1bio_pool);
2293 kfree(conf->mirrors);
2294 safe_put_page(conf->tmppage);
2295 kfree(conf->poolinfo);
2296 kfree(conf);
2297 }
2298 return ERR_PTR(err);
2299}
2300
2301static int run(mddev_t *mddev)
2302{
2303 conf_t *conf;
2304 int i;
2305 mdk_rdev_t *rdev;
2306
2307 if (mddev->level != 1) {
9dd1e2fa 2308 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
709ae487
N
2309 mdname(mddev), mddev->level);
2310 return -EIO;
2311 }
2312 if (mddev->reshape_position != MaxSector) {
9dd1e2fa 2313 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
709ae487
N
2314 mdname(mddev));
2315 return -EIO;
2316 }
1da177e4 2317 /*
709ae487
N
2318 * copy the already verified devices into our private RAID1
2319 * bookkeeping area. [whatever we allocate in run(),
2320 * should be freed in stop()]
1da177e4 2321 */
709ae487
N
2322 if (mddev->private == NULL)
2323 conf = setup_conf(mddev);
2324 else
2325 conf = mddev->private;
1da177e4 2326
709ae487
N
2327 if (IS_ERR(conf))
2328 return PTR_ERR(conf);
1da177e4 2329
709ae487 2330 list_for_each_entry(rdev, &mddev->disks, same_set) {
1ed7242e
JB
2331 if (!mddev->gendisk)
2332 continue;
709ae487
N
2333 disk_stack_limits(mddev->gendisk, rdev->bdev,
2334 rdev->data_offset << 9);
2335 /* as we don't honour merge_bvec_fn, we must never risk
627a2d3c
N
2336 * violating it, so limit ->max_segments to 1 lying within
2337 * a single page, as a one page request is never in violation.
709ae487 2338 */
627a2d3c
N
2339 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2340 blk_queue_max_segments(mddev->queue, 1);
2341 blk_queue_segment_boundary(mddev->queue,
2342 PAGE_CACHE_SIZE - 1);
2343 }
1da177e4 2344 }
191ea9b2 2345
709ae487
N
2346 mddev->degraded = 0;
2347 for (i=0; i < conf->raid_disks; i++)
2348 if (conf->mirrors[i].rdev == NULL ||
2349 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2350 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2351 mddev->degraded++;
2352
2353 if (conf->raid_disks - mddev->degraded == 1)
2354 mddev->recovery_cp = MaxSector;
2355
8c6ac868 2356 if (mddev->recovery_cp != MaxSector)
9dd1e2fa 2357 printk(KERN_NOTICE "md/raid1:%s: not clean"
8c6ac868
AN
2358 " -- starting background reconstruction\n",
2359 mdname(mddev));
1da177e4 2360 printk(KERN_INFO
9dd1e2fa 2361 "md/raid1:%s: active with %d out of %d mirrors\n",
1da177e4
LT
2362 mdname(mddev), mddev->raid_disks - mddev->degraded,
2363 mddev->raid_disks);
709ae487 2364
1da177e4
LT
2365 /*
2366 * Ok, everything is just fine now
2367 */
709ae487
N
2368 mddev->thread = conf->thread;
2369 conf->thread = NULL;
2370 mddev->private = conf;
2371
1f403624 2372 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
1da177e4 2373
1ed7242e
JB
2374 if (mddev->queue) {
2375 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2376 mddev->queue->backing_dev_info.congested_data = mddev;
2377 }
a91a2785 2378 return md_integrity_register(mddev);
1da177e4
LT
2379}
2380
2381static int stop(mddev_t *mddev)
2382{
070ec55d 2383 conf_t *conf = mddev->private;
4b6d287f 2384 struct bitmap *bitmap = mddev->bitmap;
4b6d287f
N
2385
2386 /* wait for behind writes to complete */
e555190d 2387 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
9dd1e2fa
N
2388 printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2389 mdname(mddev));
4b6d287f 2390 /* need to kick something here to make sure I/O goes? */
e555190d
N
2391 wait_event(bitmap->behind_wait,
2392 atomic_read(&bitmap->behind_writes) == 0);
4b6d287f 2393 }
1da177e4 2394
409c57f3
N
2395 raise_barrier(conf);
2396 lower_barrier(conf);
2397
1da177e4
LT
2398 md_unregister_thread(mddev->thread);
2399 mddev->thread = NULL;
1da177e4
LT
2400 if (conf->r1bio_pool)
2401 mempool_destroy(conf->r1bio_pool);
990a8baf
JJ
2402 kfree(conf->mirrors);
2403 kfree(conf->poolinfo);
1da177e4
LT
2404 kfree(conf);
2405 mddev->private = NULL;
2406 return 0;
2407}
2408
2409static int raid1_resize(mddev_t *mddev, sector_t sectors)
2410{
2411 /* no resync is happening, and there is enough space
2412 * on all devices, so we can resize.
2413 * We need to make sure resync covers any new space.
2414 * If the array is shrinking we should possibly wait until
2415 * any io in the removed space completes, but it hardly seems
2416 * worth it.
2417 */
1f403624 2418 md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0));
b522adcd
DW
2419 if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
2420 return -EINVAL;
f233ea5c 2421 set_capacity(mddev->gendisk, mddev->array_sectors);
449aad3e 2422 revalidate_disk(mddev->gendisk);
b522adcd 2423 if (sectors > mddev->dev_sectors &&
b098636c 2424 mddev->recovery_cp > mddev->dev_sectors) {
58c0fed4 2425 mddev->recovery_cp = mddev->dev_sectors;
1da177e4
LT
2426 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2427 }
b522adcd 2428 mddev->dev_sectors = sectors;
4b5c7ae8 2429 mddev->resync_max_sectors = sectors;
1da177e4
LT
2430 return 0;
2431}
2432
63c70c4f 2433static int raid1_reshape(mddev_t *mddev)
1da177e4
LT
2434{
2435 /* We need to:
2436 * 1/ resize the r1bio_pool
2437 * 2/ resize conf->mirrors
2438 *
2439 * We allocate a new r1bio_pool if we can.
2440 * Then raise a device barrier and wait until all IO stops.
2441 * Then resize conf->mirrors and swap in the new r1bio pool.
6ea9c07c
N
2442 *
2443 * At the same time, we "pack" the devices so that all the missing
2444 * devices have the higher raid_disk numbers.
1da177e4
LT
2445 */
2446 mempool_t *newpool, *oldpool;
2447 struct pool_info *newpoolinfo;
2448 mirror_info_t *newmirrors;
070ec55d 2449 conf_t *conf = mddev->private;
63c70c4f 2450 int cnt, raid_disks;
c04be0aa 2451 unsigned long flags;
b5470dc5 2452 int d, d2, err;
1da177e4 2453
63c70c4f 2454 /* Cannot change chunk_size, layout, or level */
664e7c41 2455 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
63c70c4f
N
2456 mddev->layout != mddev->new_layout ||
2457 mddev->level != mddev->new_level) {
664e7c41 2458 mddev->new_chunk_sectors = mddev->chunk_sectors;
63c70c4f
N
2459 mddev->new_layout = mddev->layout;
2460 mddev->new_level = mddev->level;
2461 return -EINVAL;
2462 }
2463
b5470dc5
DW
2464 err = md_allow_write(mddev);
2465 if (err)
2466 return err;
2a2275d6 2467
63c70c4f
N
2468 raid_disks = mddev->raid_disks + mddev->delta_disks;
2469
6ea9c07c
N
2470 if (raid_disks < conf->raid_disks) {
2471 cnt=0;
2472 for (d= 0; d < conf->raid_disks; d++)
2473 if (conf->mirrors[d].rdev)
2474 cnt++;
2475 if (cnt > raid_disks)
1da177e4 2476 return -EBUSY;
6ea9c07c 2477 }
1da177e4
LT
2478
2479 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2480 if (!newpoolinfo)
2481 return -ENOMEM;
2482 newpoolinfo->mddev = mddev;
2483 newpoolinfo->raid_disks = raid_disks;
2484
2485 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2486 r1bio_pool_free, newpoolinfo);
2487 if (!newpool) {
2488 kfree(newpoolinfo);
2489 return -ENOMEM;
2490 }
9ffae0cf 2491 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
1da177e4
LT
2492 if (!newmirrors) {
2493 kfree(newpoolinfo);
2494 mempool_destroy(newpool);
2495 return -ENOMEM;
2496 }
1da177e4 2497
17999be4 2498 raise_barrier(conf);
1da177e4
LT
2499
2500 /* ok, everything is stopped */
2501 oldpool = conf->r1bio_pool;
2502 conf->r1bio_pool = newpool;
6ea9c07c 2503
a88aa786
N
2504 for (d = d2 = 0; d < conf->raid_disks; d++) {
2505 mdk_rdev_t *rdev = conf->mirrors[d].rdev;
2506 if (rdev && rdev->raid_disk != d2) {
36fad858 2507 sysfs_unlink_rdev(mddev, rdev);
a88aa786 2508 rdev->raid_disk = d2;
36fad858
NK
2509 sysfs_unlink_rdev(mddev, rdev);
2510 if (sysfs_link_rdev(mddev, rdev))
a88aa786 2511 printk(KERN_WARNING
36fad858
NK
2512 "md/raid1:%s: cannot register rd%d\n",
2513 mdname(mddev), rdev->raid_disk);
6ea9c07c 2514 }
a88aa786
N
2515 if (rdev)
2516 newmirrors[d2++].rdev = rdev;
2517 }
1da177e4
LT
2518 kfree(conf->mirrors);
2519 conf->mirrors = newmirrors;
2520 kfree(conf->poolinfo);
2521 conf->poolinfo = newpoolinfo;
2522
c04be0aa 2523 spin_lock_irqsave(&conf->device_lock, flags);
1da177e4 2524 mddev->degraded += (raid_disks - conf->raid_disks);
c04be0aa 2525 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 2526 conf->raid_disks = mddev->raid_disks = raid_disks;
63c70c4f 2527 mddev->delta_disks = 0;
1da177e4 2528
6ea9c07c 2529 conf->last_used = 0; /* just make sure it is in-range */
17999be4 2530 lower_barrier(conf);
1da177e4
LT
2531
2532 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2533 md_wakeup_thread(mddev->thread);
2534
2535 mempool_destroy(oldpool);
2536 return 0;
2537}
2538
500af87a 2539static void raid1_quiesce(mddev_t *mddev, int state)
36fa3063 2540{
070ec55d 2541 conf_t *conf = mddev->private;
36fa3063
N
2542
2543 switch(state) {
6eef4b21
N
2544 case 2: /* wake for suspend */
2545 wake_up(&conf->wait_barrier);
2546 break;
9e6603da 2547 case 1:
17999be4 2548 raise_barrier(conf);
36fa3063 2549 break;
9e6603da 2550 case 0:
17999be4 2551 lower_barrier(conf);
36fa3063
N
2552 break;
2553 }
36fa3063
N
2554}
2555
709ae487
N
2556static void *raid1_takeover(mddev_t *mddev)
2557{
2558 /* raid1 can take over:
2559 * raid5 with 2 devices, any layout or chunk size
2560 */
2561 if (mddev->level == 5 && mddev->raid_disks == 2) {
2562 conf_t *conf;
2563 mddev->new_level = 1;
2564 mddev->new_layout = 0;
2565 mddev->new_chunk_sectors = 0;
2566 conf = setup_conf(mddev);
2567 if (!IS_ERR(conf))
2568 conf->barrier = 1;
2569 return conf;
2570 }
2571 return ERR_PTR(-EINVAL);
2572}
1da177e4 2573
2604b703 2574static struct mdk_personality raid1_personality =
1da177e4
LT
2575{
2576 .name = "raid1",
2604b703 2577 .level = 1,
1da177e4
LT
2578 .owner = THIS_MODULE,
2579 .make_request = make_request,
2580 .run = run,
2581 .stop = stop,
2582 .status = status,
2583 .error_handler = error,
2584 .hot_add_disk = raid1_add_disk,
2585 .hot_remove_disk= raid1_remove_disk,
2586 .spare_active = raid1_spare_active,
2587 .sync_request = sync_request,
2588 .resize = raid1_resize,
80c3a6ce 2589 .size = raid1_size,
63c70c4f 2590 .check_reshape = raid1_reshape,
36fa3063 2591 .quiesce = raid1_quiesce,
709ae487 2592 .takeover = raid1_takeover,
1da177e4
LT
2593};
2594
2595static int __init raid_init(void)
2596{
2604b703 2597 return register_md_personality(&raid1_personality);
1da177e4
LT
2598}
2599
2600static void raid_exit(void)
2601{
2604b703 2602 unregister_md_personality(&raid1_personality);
1da177e4
LT
2603}
2604
2605module_init(raid_init);
2606module_exit(raid_exit);
2607MODULE_LICENSE("GPL");
0efb9e61 2608MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
1da177e4 2609MODULE_ALIAS("md-personality-3"); /* RAID1 */
d9d166c2 2610MODULE_ALIAS("md-raid1");
2604b703 2611MODULE_ALIAS("md-level-1");