dm raid1: use hold framework in do_failures
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / md / dm-raid1.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
1f965b19 3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
06386bbf 8#include "dm-bio-record.h"
1da177e4 9
1da177e4
LT
10#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
1da177e4 15#include <linux/workqueue.h>
1f965b19 16#include <linux/device-mapper.h>
a765e20e
AK
17#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
1f965b19 20#include <linux/dm-region-hash.h>
1da177e4 21
72d94861 22#define DM_MSG_PREFIX "raid1"
1f965b19
HM
23
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
88be163a 25#define DM_IO_PAGES 64
1f965b19 26#define DM_KCOPYD_PAGES 64
72d94861 27
a8e6afa2 28#define DM_RAID1_HANDLE_ERRORS 0x01
f44db678 29#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
a8e6afa2 30
33184048 31static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 32
e4c8b3ba
NB
33/*-----------------------------------------------------------------
34 * Mirror set structures.
35 *---------------------------------------------------------------*/
72f4b314
JB
36enum dm_raid1_error {
37 DM_RAID1_WRITE_ERROR,
64b30c46 38 DM_RAID1_FLUSH_ERROR,
72f4b314
JB
39 DM_RAID1_SYNC_ERROR,
40 DM_RAID1_READ_ERROR
41};
42
e4c8b3ba 43struct mirror {
aa5617c5 44 struct mirror_set *ms;
e4c8b3ba 45 atomic_t error_count;
39ed7adb 46 unsigned long error_type;
e4c8b3ba
NB
47 struct dm_dev *dev;
48 sector_t offset;
49};
50
51struct mirror_set {
52 struct dm_target *ti;
53 struct list_head list;
1f965b19 54
a8e6afa2 55 uint64_t features;
e4c8b3ba 56
72f4b314 57 spinlock_t lock; /* protects the lists */
e4c8b3ba
NB
58 struct bio_list reads;
59 struct bio_list writes;
72f4b314 60 struct bio_list failures;
04788507 61 struct bio_list holds; /* bios are waiting until suspend */
e4c8b3ba 62
1f965b19
HM
63 struct dm_region_hash *rh;
64 struct dm_kcopyd_client *kcopyd_client;
88be163a 65 struct dm_io_client *io_client;
06386bbf 66 mempool_t *read_record_pool;
88be163a 67
e4c8b3ba
NB
68 /* recovery */
69 region_t nr_regions;
70 int in_sync;
fc1ff958 71 int log_failure;
b80aa7a0 72 atomic_t suspend;
e4c8b3ba 73
72f4b314 74 atomic_t default_mirror; /* Default mirror */
e4c8b3ba 75
6ad36fe2
HS
76 struct workqueue_struct *kmirrord_wq;
77 struct work_struct kmirrord_work;
a2aebe03
MP
78 struct timer_list timer;
79 unsigned long timer_pending;
80
72f4b314 81 struct work_struct trigger_event;
6ad36fe2 82
1f965b19 83 unsigned nr_mirrors;
e4c8b3ba
NB
84 struct mirror mirror[0];
85};
86
1f965b19 87static void wakeup_mirrord(void *context)
1da177e4 88{
1f965b19 89 struct mirror_set *ms = context;
1da177e4 90
6ad36fe2
HS
91 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
92}
93
a2aebe03
MP
94static void delayed_wake_fn(unsigned long data)
95{
96 struct mirror_set *ms = (struct mirror_set *) data;
97
98 clear_bit(0, &ms->timer_pending);
1f965b19 99 wakeup_mirrord(ms);
a2aebe03
MP
100}
101
102static void delayed_wake(struct mirror_set *ms)
103{
104 if (test_and_set_bit(0, &ms->timer_pending))
105 return;
106
107 ms->timer.expires = jiffies + HZ / 5;
108 ms->timer.data = (unsigned long) ms;
109 ms->timer.function = delayed_wake_fn;
110 add_timer(&ms->timer);
111}
112
1f965b19 113static void wakeup_all_recovery_waiters(void *context)
1da177e4 114{
1f965b19 115 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
116}
117
1f965b19 118static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1da177e4
LT
119{
120 unsigned long flags;
1da177e4 121 int should_wake = 0;
1f965b19 122 struct bio_list *bl;
1da177e4 123
1f965b19
HM
124 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
125 spin_lock_irqsave(&ms->lock, flags);
126 should_wake = !(bl->head);
127 bio_list_add(bl, bio);
128 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4
LT
129
130 if (should_wake)
1f965b19 131 wakeup_mirrord(ms);
1da177e4
LT
132}
133
1f965b19 134static void dispatch_bios(void *context, struct bio_list *bio_list)
1da177e4 135{
1f965b19
HM
136 struct mirror_set *ms = context;
137 struct bio *bio;
1da177e4 138
1f965b19
HM
139 while ((bio = bio_list_pop(bio_list)))
140 queue_bio(ms, bio, WRITE);
1da177e4
LT
141}
142
06386bbf
JB
143#define MIN_READ_RECORDS 20
144struct dm_raid1_read_record {
145 struct mirror *m;
146 struct dm_bio_details details;
147};
148
95f8fac8
MP
149static struct kmem_cache *_dm_raid1_read_record_cache;
150
1da177e4
LT
151/*
152 * Every mirror should look like this one.
153 */
154#define DEFAULT_MIRROR 0
155
156/*
06386bbf
JB
157 * This is yucky. We squirrel the mirror struct away inside
158 * bi_next for read/write buffers. This is safe since the bh
1da177e4
LT
159 * doesn't get submitted to the lower levels of block layer.
160 */
06386bbf 161static struct mirror *bio_get_m(struct bio *bio)
1da177e4 162{
06386bbf 163 return (struct mirror *) bio->bi_next;
1da177e4
LT
164}
165
06386bbf 166static void bio_set_m(struct bio *bio, struct mirror *m)
1da177e4 167{
06386bbf 168 bio->bi_next = (struct bio *) m;
1da177e4
LT
169}
170
72f4b314
JB
171static struct mirror *get_default_mirror(struct mirror_set *ms)
172{
173 return &ms->mirror[atomic_read(&ms->default_mirror)];
174}
175
176static void set_default_mirror(struct mirror *m)
177{
178 struct mirror_set *ms = m->ms;
179 struct mirror *m0 = &(ms->mirror[0]);
180
181 atomic_set(&ms->default_mirror, m - m0);
182}
183
184/* fail_mirror
185 * @m: mirror device to fail
186 * @error_type: one of the enum's, DM_RAID1_*_ERROR
187 *
188 * If errors are being handled, record the type of
189 * error encountered for this device. If this type
190 * of error has already been recorded, we can return;
191 * otherwise, we must signal userspace by triggering
192 * an event. Additionally, if the device is the
193 * primary device, we must choose a new primary, but
194 * only if the mirror is in-sync.
195 *
196 * This function must not block.
197 */
198static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
199{
200 struct mirror_set *ms = m->ms;
201 struct mirror *new;
202
72f4b314
JB
203 /*
204 * error_count is used for nothing more than a
205 * simple way to tell if a device has encountered
206 * errors.
207 */
208 atomic_inc(&m->error_count);
209
210 if (test_and_set_bit(error_type, &m->error_type))
211 return;
212
d460c65a
JB
213 if (!errors_handled(ms))
214 return;
215
72f4b314
JB
216 if (m != get_default_mirror(ms))
217 goto out;
218
219 if (!ms->in_sync) {
220 /*
221 * Better to issue requests to same failing device
222 * than to risk returning corrupt data.
223 */
224 DMERR("Primary mirror (%s) failed while out-of-sync: "
225 "Reads may fail.", m->dev->name);
226 goto out;
227 }
228
229 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
230 if (!atomic_read(&new->error_count)) {
231 set_default_mirror(new);
232 break;
233 }
234
235 if (unlikely(new == ms->mirror + ms->nr_mirrors))
236 DMWARN("All sides of mirror have failed.");
237
238out:
239 schedule_work(&ms->trigger_event);
240}
241
c0da3748
MP
242static int mirror_flush(struct dm_target *ti)
243{
244 struct mirror_set *ms = ti->private;
245 unsigned long error_bits;
246
247 unsigned int i;
248 struct dm_io_region io[ms->nr_mirrors];
249 struct mirror *m;
250 struct dm_io_request io_req = {
251 .bi_rw = WRITE_BARRIER,
252 .mem.type = DM_IO_KMEM,
253 .mem.ptr.bvec = NULL,
254 .client = ms->io_client,
255 };
256
257 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
258 io[i].bdev = m->dev->bdev;
259 io[i].sector = 0;
260 io[i].count = 0;
261 }
262
263 error_bits = -1;
264 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
265 if (unlikely(error_bits != 0)) {
266 for (i = 0; i < ms->nr_mirrors; i++)
267 if (test_bit(i, &error_bits))
268 fail_mirror(ms->mirror + i,
64b30c46 269 DM_RAID1_FLUSH_ERROR);
c0da3748
MP
270 return -EIO;
271 }
272
273 return 0;
274}
275
1da177e4
LT
276/*-----------------------------------------------------------------
277 * Recovery.
278 *
279 * When a mirror is first activated we may find that some regions
280 * are in the no-sync state. We have to recover these by
281 * recopying from the default mirror to all the others.
282 *---------------------------------------------------------------*/
4cdc1d1f 283static void recovery_complete(int read_err, unsigned long write_err,
1da177e4
LT
284 void *context)
285{
1f965b19
HM
286 struct dm_region *reg = context;
287 struct mirror_set *ms = dm_rh_region_context(reg);
8f0205b7 288 int m, bit = 0;
1da177e4 289
8f0205b7 290 if (read_err) {
f44db678
JB
291 /* Read error means the failure of default mirror. */
292 DMERR_LIMIT("Unable to read primary mirror during recovery");
8f0205b7
JB
293 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
294 }
f44db678 295
8f0205b7 296 if (write_err) {
4cdc1d1f 297 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
f44db678 298 write_err);
8f0205b7
JB
299 /*
300 * Bits correspond to devices (excluding default mirror).
301 * The default mirror cannot change during recovery.
302 */
303 for (m = 0; m < ms->nr_mirrors; m++) {
304 if (&ms->mirror[m] == get_default_mirror(ms))
305 continue;
306 if (test_bit(bit, &write_err))
307 fail_mirror(ms->mirror + m,
308 DM_RAID1_SYNC_ERROR);
309 bit++;
310 }
311 }
f44db678 312
1f965b19 313 dm_rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
314}
315
1f965b19 316static int recover(struct mirror_set *ms, struct dm_region *reg)
1da177e4
LT
317{
318 int r;
1f965b19 319 unsigned i;
eb69aca5 320 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
1da177e4
LT
321 struct mirror *m;
322 unsigned long flags = 0;
1f965b19
HM
323 region_t key = dm_rh_get_region_key(reg);
324 sector_t region_size = dm_rh_get_region_size(ms->rh);
1da177e4
LT
325
326 /* fill in the source */
72f4b314 327 m = get_default_mirror(ms);
1da177e4 328 from.bdev = m->dev->bdev;
1f965b19
HM
329 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
330 if (key == (ms->nr_regions - 1)) {
1da177e4
LT
331 /*
332 * The final region may be smaller than
333 * region_size.
334 */
1f965b19 335 from.count = ms->ti->len & (region_size - 1);
1da177e4 336 if (!from.count)
1f965b19 337 from.count = region_size;
1da177e4 338 } else
1f965b19 339 from.count = region_size;
1da177e4
LT
340
341 /* fill in the destinations */
342 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
72f4b314 343 if (&ms->mirror[i] == get_default_mirror(ms))
1da177e4
LT
344 continue;
345
346 m = ms->mirror + i;
347 dest->bdev = m->dev->bdev;
1f965b19 348 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
1da177e4
LT
349 dest->count = from.count;
350 dest++;
351 }
352
353 /* hand to kcopyd */
f7c83e2e
JB
354 if (!errors_handled(ms))
355 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
356
eb69aca5
HM
357 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
358 flags, recovery_complete, reg);
1da177e4
LT
359
360 return r;
361}
362
363static void do_recovery(struct mirror_set *ms)
364{
1f965b19
HM
365 struct dm_region *reg;
366 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 367 int r;
1da177e4
LT
368
369 /*
370 * Start quiescing some regions.
371 */
1f965b19 372 dm_rh_recovery_prepare(ms->rh);
1da177e4
LT
373
374 /*
375 * Copy any already quiesced regions.
376 */
1f965b19 377 while ((reg = dm_rh_recovery_start(ms->rh))) {
1da177e4
LT
378 r = recover(ms, reg);
379 if (r)
1f965b19 380 dm_rh_recovery_end(reg, 0);
1da177e4
LT
381 }
382
383 /*
384 * Update the in sync flag.
385 */
386 if (!ms->in_sync &&
387 (log->type->get_sync_count(log) == ms->nr_regions)) {
388 /* the sync is complete */
389 dm_table_event(ms->ti->table);
390 ms->in_sync = 1;
391 }
392}
393
394/*-----------------------------------------------------------------
395 * Reads
396 *---------------------------------------------------------------*/
397static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
398{
06386bbf
JB
399 struct mirror *m = get_default_mirror(ms);
400
401 do {
402 if (likely(!atomic_read(&m->error_count)))
403 return m;
404
405 if (m-- == ms->mirror)
406 m += ms->nr_mirrors;
407 } while (m != get_default_mirror(ms));
408
409 return NULL;
410}
411
412static int default_ok(struct mirror *m)
413{
414 struct mirror *default_mirror = get_default_mirror(m->ms);
415
416 return !atomic_read(&default_mirror->error_count);
417}
418
419static int mirror_available(struct mirror_set *ms, struct bio *bio)
420{
1f965b19
HM
421 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
422 region_t region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 423
1f965b19 424 if (log->type->in_sync(log, region, 0))
06386bbf
JB
425 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
426
427 return 0;
1da177e4
LT
428}
429
430/*
431 * remap a buffer to a particular mirror.
432 */
06386bbf
JB
433static sector_t map_sector(struct mirror *m, struct bio *bio)
434{
4184153f
MP
435 if (unlikely(!bio->bi_size))
436 return 0;
06386bbf
JB
437 return m->offset + (bio->bi_sector - m->ms->ti->begin);
438}
439
440static void map_bio(struct mirror *m, struct bio *bio)
1da177e4
LT
441{
442 bio->bi_bdev = m->dev->bdev;
06386bbf
JB
443 bio->bi_sector = map_sector(m, bio);
444}
445
22a1ceb1 446static void map_region(struct dm_io_region *io, struct mirror *m,
06386bbf
JB
447 struct bio *bio)
448{
449 io->bdev = m->dev->bdev;
450 io->sector = map_sector(m, bio);
451 io->count = bio->bi_size >> 9;
452}
453
04788507
MP
454static void hold_bio(struct mirror_set *ms, struct bio *bio)
455{
456 /*
457 * If device is suspended, complete the bio.
458 */
459 if (atomic_read(&ms->suspend)) {
460 if (dm_noflush_suspending(ms->ti))
461 bio_endio(bio, DM_ENDIO_REQUEUE);
462 else
463 bio_endio(bio, -EIO);
464 return;
465 }
466
467 /*
468 * Hold bio until the suspend is complete.
469 */
470 spin_lock_irq(&ms->lock);
471 bio_list_add(&ms->holds, bio);
472 spin_unlock_irq(&ms->lock);
473}
474
06386bbf
JB
475/*-----------------------------------------------------------------
476 * Reads
477 *---------------------------------------------------------------*/
478static void read_callback(unsigned long error, void *context)
479{
480 struct bio *bio = context;
481 struct mirror *m;
482
483 m = bio_get_m(bio);
484 bio_set_m(bio, NULL);
485
486 if (likely(!error)) {
487 bio_endio(bio, 0);
488 return;
489 }
490
491 fail_mirror(m, DM_RAID1_READ_ERROR);
492
493 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
494 DMWARN_LIMIT("Read failure on mirror device %s. "
495 "Trying alternative device.",
496 m->dev->name);
497 queue_bio(m->ms, bio, bio_rw(bio));
498 return;
499 }
500
501 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
502 m->dev->name);
503 bio_endio(bio, -EIO);
504}
505
506/* Asynchronous read. */
507static void read_async_bio(struct mirror *m, struct bio *bio)
508{
22a1ceb1 509 struct dm_io_region io;
06386bbf
JB
510 struct dm_io_request io_req = {
511 .bi_rw = READ,
512 .mem.type = DM_IO_BVEC,
513 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
514 .notify.fn = read_callback,
515 .notify.context = bio,
516 .client = m->ms->io_client,
517 };
518
519 map_region(&io, m, bio);
520 bio_set_m(bio, m);
1f965b19
HM
521 BUG_ON(dm_io(&io_req, 1, &io, NULL));
522}
523
524static inline int region_in_sync(struct mirror_set *ms, region_t region,
525 int may_block)
526{
527 int state = dm_rh_get_state(ms->rh, region, may_block);
528 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
1da177e4
LT
529}
530
531static void do_reads(struct mirror_set *ms, struct bio_list *reads)
532{
533 region_t region;
534 struct bio *bio;
535 struct mirror *m;
536
537 while ((bio = bio_list_pop(reads))) {
1f965b19 538 region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 539 m = get_default_mirror(ms);
1da177e4
LT
540
541 /*
542 * We can only read balance if the region is in sync.
543 */
1f965b19 544 if (likely(region_in_sync(ms, region, 1)))
1da177e4 545 m = choose_mirror(ms, bio->bi_sector);
06386bbf
JB
546 else if (m && atomic_read(&m->error_count))
547 m = NULL;
1da177e4 548
06386bbf
JB
549 if (likely(m))
550 read_async_bio(m, bio);
551 else
552 bio_endio(bio, -EIO);
1da177e4
LT
553 }
554}
555
556/*-----------------------------------------------------------------
557 * Writes.
558 *
559 * We do different things with the write io depending on the
560 * state of the region that it's in:
561 *
562 * SYNC: increment pending, use kcopyd to write to *all* mirrors
563 * RECOVERING: delay the io until recovery completes
564 * NOSYNC: increment pending, just write to the default mirror
565 *---------------------------------------------------------------*/
72f4b314 566
72f4b314 567
1da177e4
LT
568static void write_callback(unsigned long error, void *context)
569{
72f4b314 570 unsigned i, ret = 0;
1da177e4
LT
571 struct bio *bio = (struct bio *) context;
572 struct mirror_set *ms;
72f4b314
JB
573 int uptodate = 0;
574 int should_wake = 0;
575 unsigned long flags;
1da177e4 576
06386bbf
JB
577 ms = bio_get_m(bio)->ms;
578 bio_set_m(bio, NULL);
1da177e4
LT
579
580 /*
581 * NOTE: We don't decrement the pending count here,
582 * instead it is done by the targets endio function.
583 * This way we handle both writes to SYNC and NOSYNC
584 * regions with the same code.
585 */
72f4b314
JB
586 if (likely(!error))
587 goto out;
1da177e4 588
72f4b314
JB
589 for (i = 0; i < ms->nr_mirrors; i++)
590 if (test_bit(i, &error))
591 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
592 else
593 uptodate = 1;
594
595 if (unlikely(!uptodate)) {
596 DMERR("All replicated volumes dead, failing I/O");
597 /* None of the writes succeeded, fail the I/O. */
598 ret = -EIO;
599 } else if (errors_handled(ms)) {
1da177e4 600 /*
72f4b314
JB
601 * Need to raise event. Since raising
602 * events can block, we need to do it in
603 * the main thread.
1da177e4 604 */
72f4b314
JB
605 spin_lock_irqsave(&ms->lock, flags);
606 if (!ms->failures.head)
607 should_wake = 1;
608 bio_list_add(&ms->failures, bio);
609 spin_unlock_irqrestore(&ms->lock, flags);
610 if (should_wake)
1f965b19 611 wakeup_mirrord(ms);
72f4b314 612 return;
1da177e4 613 }
72f4b314
JB
614out:
615 bio_endio(bio, ret);
1da177e4
LT
616}
617
618static void do_write(struct mirror_set *ms, struct bio *bio)
619{
620 unsigned int i;
22a1ceb1 621 struct dm_io_region io[ms->nr_mirrors], *dest = io;
1da177e4 622 struct mirror *m;
88be163a 623 struct dm_io_request io_req = {
4184153f 624 .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
88be163a
MB
625 .mem.type = DM_IO_BVEC,
626 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
627 .notify.fn = write_callback,
628 .notify.context = bio,
629 .client = ms->io_client,
630 };
1da177e4 631
06386bbf
JB
632 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
633 map_region(dest++, m, bio);
1da177e4 634
06386bbf
JB
635 /*
636 * Use default mirror because we only need it to retrieve the reference
637 * to the mirror set in write_callback().
638 */
639 bio_set_m(bio, get_default_mirror(ms));
88be163a 640
1f965b19 641 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
1da177e4
LT
642}
643
644static void do_writes(struct mirror_set *ms, struct bio_list *writes)
645{
646 int state;
647 struct bio *bio;
648 struct bio_list sync, nosync, recover, *this_list = NULL;
7513c2a7
JB
649 struct bio_list requeue;
650 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
651 region_t region;
1da177e4
LT
652
653 if (!writes->head)
654 return;
655
656 /*
657 * Classify each write.
658 */
659 bio_list_init(&sync);
660 bio_list_init(&nosync);
661 bio_list_init(&recover);
7513c2a7 662 bio_list_init(&requeue);
1da177e4
LT
663
664 while ((bio = bio_list_pop(writes))) {
4184153f
MP
665 if (unlikely(bio_empty_barrier(bio))) {
666 bio_list_add(&sync, bio);
667 continue;
668 }
669
7513c2a7
JB
670 region = dm_rh_bio_to_region(ms->rh, bio);
671
672 if (log->type->is_remote_recovering &&
673 log->type->is_remote_recovering(log, region)) {
674 bio_list_add(&requeue, bio);
675 continue;
676 }
677
678 state = dm_rh_get_state(ms->rh, region, 1);
1da177e4 679 switch (state) {
1f965b19
HM
680 case DM_RH_CLEAN:
681 case DM_RH_DIRTY:
1da177e4
LT
682 this_list = &sync;
683 break;
684
1f965b19 685 case DM_RH_NOSYNC:
1da177e4
LT
686 this_list = &nosync;
687 break;
688
1f965b19 689 case DM_RH_RECOVERING:
1da177e4
LT
690 this_list = &recover;
691 break;
692 }
693
694 bio_list_add(this_list, bio);
695 }
696
7513c2a7
JB
697 /*
698 * Add bios that are delayed due to remote recovery
699 * back on to the write queue
700 */
701 if (unlikely(requeue.head)) {
702 spin_lock_irq(&ms->lock);
703 bio_list_merge(&ms->writes, &requeue);
704 spin_unlock_irq(&ms->lock);
69885683 705 delayed_wake(ms);
7513c2a7
JB
706 }
707
1da177e4
LT
708 /*
709 * Increment the pending counts for any regions that will
710 * be written to (writes to recover regions are going to
711 * be delayed).
712 */
1f965b19
HM
713 dm_rh_inc_pending(ms->rh, &sync);
714 dm_rh_inc_pending(ms->rh, &nosync);
d2b69864
JB
715
716 /*
717 * If the flush fails on a previous call and succeeds here,
718 * we must not reset the log_failure variable. We need
719 * userspace interaction to do that.
720 */
721 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
1da177e4
LT
722
723 /*
724 * Dispatch io.
725 */
b80aa7a0
JB
726 if (unlikely(ms->log_failure)) {
727 spin_lock_irq(&ms->lock);
728 bio_list_merge(&ms->failures, &sync);
729 spin_unlock_irq(&ms->lock);
1f965b19 730 wakeup_mirrord(ms);
b80aa7a0 731 } else
fc1ff958 732 while ((bio = bio_list_pop(&sync)))
b80aa7a0 733 do_write(ms, bio);
1da177e4
LT
734
735 while ((bio = bio_list_pop(&recover)))
1f965b19 736 dm_rh_delay(ms->rh, bio);
1da177e4
LT
737
738 while ((bio = bio_list_pop(&nosync))) {
06386bbf 739 map_bio(get_default_mirror(ms), bio);
1da177e4
LT
740 generic_make_request(bio);
741 }
742}
743
72f4b314
JB
744static void do_failures(struct mirror_set *ms, struct bio_list *failures)
745{
746 struct bio *bio;
747
0f398a84 748 if (likely(!failures->head))
72f4b314
JB
749 return;
750
b80aa7a0
JB
751 /*
752 * If the log has failed, unattempted writes are being
0f398a84 753 * put on the holds list. We can't issue those writes
b80aa7a0
JB
754 * until a log has been marked, so we must store them.
755 *
756 * If a 'noflush' suspend is in progress, we can requeue
757 * the I/O's to the core. This give userspace a chance
758 * to reconfigure the mirror, at which point the core
759 * will reissue the writes. If the 'noflush' flag is
760 * not set, we have no choice but to return errors.
761 *
762 * Some writes on the failures list may have been
763 * submitted before the log failure and represent a
764 * failure to write to one of the devices. It is ok
765 * for us to treat them the same and requeue them
766 * as well.
767 */
b80aa7a0 768
0f398a84
MP
769 while ((bio = bio_list_pop(failures))) {
770 if (ms->log_failure)
771 hold_bio(ms, bio);
772 else {
773 ms->in_sync = 0;
774 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
775 }
b80aa7a0 776 }
72f4b314
JB
777}
778
779static void trigger_event(struct work_struct *work)
780{
781 struct mirror_set *ms =
782 container_of(work, struct mirror_set, trigger_event);
783
784 dm_table_event(ms->ti->table);
785}
786
1da177e4
LT
787/*-----------------------------------------------------------------
788 * kmirrord
789 *---------------------------------------------------------------*/
a2aebe03 790static void do_mirror(struct work_struct *work)
1da177e4 791{
1f965b19
HM
792 struct mirror_set *ms = container_of(work, struct mirror_set,
793 kmirrord_work);
72f4b314
JB
794 struct bio_list reads, writes, failures;
795 unsigned long flags;
1da177e4 796
72f4b314 797 spin_lock_irqsave(&ms->lock, flags);
1da177e4
LT
798 reads = ms->reads;
799 writes = ms->writes;
72f4b314 800 failures = ms->failures;
1da177e4
LT
801 bio_list_init(&ms->reads);
802 bio_list_init(&ms->writes);
72f4b314
JB
803 bio_list_init(&ms->failures);
804 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4 805
1f965b19 806 dm_rh_update_states(ms->rh, errors_handled(ms));
1da177e4
LT
807 do_recovery(ms);
808 do_reads(ms, &reads);
809 do_writes(ms, &writes);
72f4b314 810 do_failures(ms, &failures);
7ff14a36
MP
811
812 dm_table_unplug_all(ms->ti->table);
1da177e4
LT
813}
814
1da177e4
LT
815/*-----------------------------------------------------------------
816 * Target functions
817 *---------------------------------------------------------------*/
818static struct mirror_set *alloc_context(unsigned int nr_mirrors,
819 uint32_t region_size,
820 struct dm_target *ti,
416cd17b 821 struct dm_dirty_log *dl)
1da177e4
LT
822{
823 size_t len;
824 struct mirror_set *ms = NULL;
825
1da177e4
LT
826 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
827
dd00cc48 828 ms = kzalloc(len, GFP_KERNEL);
1da177e4 829 if (!ms) {
72d94861 830 ti->error = "Cannot allocate mirror context";
1da177e4
LT
831 return NULL;
832 }
833
1da177e4
LT
834 spin_lock_init(&ms->lock);
835
836 ms->ti = ti;
837 ms->nr_mirrors = nr_mirrors;
838 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
839 ms->in_sync = 0;
b80aa7a0
JB
840 ms->log_failure = 0;
841 atomic_set(&ms->suspend, 0);
72f4b314 842 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1da177e4 843
95f8fac8
MP
844 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
845 _dm_raid1_read_record_cache);
846
06386bbf
JB
847 if (!ms->read_record_pool) {
848 ti->error = "Error creating mirror read_record_pool";
849 kfree(ms);
850 return NULL;
851 }
852
88be163a
MB
853 ms->io_client = dm_io_client_create(DM_IO_PAGES);
854 if (IS_ERR(ms->io_client)) {
855 ti->error = "Error creating dm_io client";
06386bbf 856 mempool_destroy(ms->read_record_pool);
88be163a
MB
857 kfree(ms);
858 return NULL;
859 }
860
1f965b19
HM
861 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
862 wakeup_all_recovery_waiters,
863 ms->ti->begin, MAX_RECOVERY,
864 dl, region_size, ms->nr_regions);
865 if (IS_ERR(ms->rh)) {
72d94861 866 ti->error = "Error creating dirty region hash";
a72cf737 867 dm_io_client_destroy(ms->io_client);
06386bbf 868 mempool_destroy(ms->read_record_pool);
1da177e4
LT
869 kfree(ms);
870 return NULL;
871 }
872
873 return ms;
874}
875
876static void free_context(struct mirror_set *ms, struct dm_target *ti,
877 unsigned int m)
878{
879 while (m--)
880 dm_put_device(ti, ms->mirror[m].dev);
881
88be163a 882 dm_io_client_destroy(ms->io_client);
1f965b19 883 dm_region_hash_destroy(ms->rh);
06386bbf 884 mempool_destroy(ms->read_record_pool);
1da177e4
LT
885 kfree(ms);
886}
887
1da177e4
LT
888static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
889 unsigned int mirror, char **argv)
890{
4ee218cd 891 unsigned long long offset;
1da177e4 892
4ee218cd 893 if (sscanf(argv[1], "%llu", &offset) != 1) {
72d94861 894 ti->error = "Invalid offset";
1da177e4
LT
895 return -EINVAL;
896 }
897
898 if (dm_get_device(ti, argv[0], offset, ti->len,
899 dm_table_get_mode(ti->table),
900 &ms->mirror[mirror].dev)) {
72d94861 901 ti->error = "Device lookup failure";
1da177e4
LT
902 return -ENXIO;
903 }
904
aa5617c5 905 ms->mirror[mirror].ms = ms;
72f4b314
JB
906 atomic_set(&(ms->mirror[mirror].error_count), 0);
907 ms->mirror[mirror].error_type = 0;
1da177e4
LT
908 ms->mirror[mirror].offset = offset;
909
910 return 0;
911}
912
1da177e4
LT
913/*
914 * Create dirty log: log_type #log_params <log_params>
915 */
416cd17b 916static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1f965b19
HM
917 unsigned argc, char **argv,
918 unsigned *args_used)
1da177e4 919{
1f965b19 920 unsigned param_count;
416cd17b 921 struct dm_dirty_log *dl;
1da177e4
LT
922
923 if (argc < 2) {
72d94861 924 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
925 return NULL;
926 }
927
928 if (sscanf(argv[1], "%u", &param_count) != 1) {
72d94861 929 ti->error = "Invalid mirror log argument count";
1da177e4
LT
930 return NULL;
931 }
932
933 *args_used = 2 + param_count;
934
935 if (argc < *args_used) {
72d94861 936 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
937 return NULL;
938 }
939
c0da3748
MP
940 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
941 argv + 2);
1da177e4 942 if (!dl) {
72d94861 943 ti->error = "Error creating mirror dirty log";
1da177e4
LT
944 return NULL;
945 }
946
1da177e4
LT
947 return dl;
948}
949
a8e6afa2
JB
950static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
951 unsigned *args_used)
952{
953 unsigned num_features;
954 struct dm_target *ti = ms->ti;
955
956 *args_used = 0;
957
958 if (!argc)
959 return 0;
960
961 if (sscanf(argv[0], "%u", &num_features) != 1) {
962 ti->error = "Invalid number of features";
963 return -EINVAL;
964 }
965
966 argc--;
967 argv++;
968 (*args_used)++;
969
970 if (num_features > argc) {
971 ti->error = "Not enough arguments to support feature count";
972 return -EINVAL;
973 }
974
975 if (!strcmp("handle_errors", argv[0]))
976 ms->features |= DM_RAID1_HANDLE_ERRORS;
977 else {
978 ti->error = "Unrecognised feature requested";
979 return -EINVAL;
980 }
981
982 (*args_used)++;
983
984 return 0;
985}
986
1da177e4
LT
987/*
988 * Construct a mirror mapping:
989 *
990 * log_type #log_params <log_params>
991 * #mirrors [mirror_path offset]{2,}
a8e6afa2 992 * [#features <features>]
1da177e4
LT
993 *
994 * log_type is "core" or "disk"
995 * #log_params is between 1 and 3
a8e6afa2
JB
996 *
997 * If present, features must be "handle_errors".
1da177e4 998 */
1da177e4
LT
999static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1000{
1001 int r;
1002 unsigned int nr_mirrors, m, args_used;
1003 struct mirror_set *ms;
416cd17b 1004 struct dm_dirty_log *dl;
1da177e4
LT
1005
1006 dl = create_dirty_log(ti, argc, argv, &args_used);
1007 if (!dl)
1008 return -EINVAL;
1009
1010 argv += args_used;
1011 argc -= args_used;
1012
1013 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
eb69aca5 1014 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
72d94861 1015 ti->error = "Invalid number of mirrors";
416cd17b 1016 dm_dirty_log_destroy(dl);
1da177e4
LT
1017 return -EINVAL;
1018 }
1019
1020 argv++, argc--;
1021
a8e6afa2
JB
1022 if (argc < nr_mirrors * 2) {
1023 ti->error = "Too few mirror arguments";
416cd17b 1024 dm_dirty_log_destroy(dl);
1da177e4
LT
1025 return -EINVAL;
1026 }
1027
1028 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1029 if (!ms) {
416cd17b 1030 dm_dirty_log_destroy(dl);
1da177e4
LT
1031 return -ENOMEM;
1032 }
1033
1034 /* Get the mirror parameter sets */
1035 for (m = 0; m < nr_mirrors; m++) {
1036 r = get_mirror(ms, ti, m, argv);
1037 if (r) {
1038 free_context(ms, ti, m);
1039 return r;
1040 }
1041 argv += 2;
1042 argc -= 2;
1043 }
1044
1045 ti->private = ms;
1f965b19 1046 ti->split_io = dm_rh_get_region_size(ms->rh);
4184153f 1047 ti->num_flush_requests = 1;
1da177e4 1048
6ad36fe2
HS
1049 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1050 if (!ms->kmirrord_wq) {
1051 DMERR("couldn't start kmirrord");
a72cf737
DM
1052 r = -ENOMEM;
1053 goto err_free_context;
6ad36fe2
HS
1054 }
1055 INIT_WORK(&ms->kmirrord_work, do_mirror);
a2aebe03
MP
1056 init_timer(&ms->timer);
1057 ms->timer_pending = 0;
72f4b314 1058 INIT_WORK(&ms->trigger_event, trigger_event);
6ad36fe2 1059
a8e6afa2 1060 r = parse_features(ms, argc, argv, &args_used);
a72cf737
DM
1061 if (r)
1062 goto err_destroy_wq;
a8e6afa2
JB
1063
1064 argv += args_used;
1065 argc -= args_used;
1066
f44db678
JB
1067 /*
1068 * Any read-balancing addition depends on the
1069 * DM_RAID1_HANDLE_ERRORS flag being present.
1070 * This is because the decision to balance depends
1071 * on the sync state of a region. If the above
1072 * flag is not present, we ignore errors; and
1073 * the sync state may be inaccurate.
1074 */
1075
a8e6afa2
JB
1076 if (argc) {
1077 ti->error = "Too many mirror arguments";
a72cf737
DM
1078 r = -EINVAL;
1079 goto err_destroy_wq;
a8e6afa2
JB
1080 }
1081
1f965b19 1082 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
a72cf737
DM
1083 if (r)
1084 goto err_destroy_wq;
1da177e4 1085
1f965b19 1086 wakeup_mirrord(ms);
1da177e4 1087 return 0;
a72cf737
DM
1088
1089err_destroy_wq:
1090 destroy_workqueue(ms->kmirrord_wq);
1091err_free_context:
1092 free_context(ms, ti, ms->nr_mirrors);
1093 return r;
1da177e4
LT
1094}
1095
1096static void mirror_dtr(struct dm_target *ti)
1097{
1098 struct mirror_set *ms = (struct mirror_set *) ti->private;
1099
a2aebe03 1100 del_timer_sync(&ms->timer);
6ad36fe2 1101 flush_workqueue(ms->kmirrord_wq);
18776c73 1102 flush_scheduled_work();
eb69aca5 1103 dm_kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1104 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1105 free_context(ms, ti, ms->nr_mirrors);
1106}
1107
1da177e4
LT
1108/*
1109 * Mirror mapping function
1110 */
1111static int mirror_map(struct dm_target *ti, struct bio *bio,
1112 union map_info *map_context)
1113{
1114 int r, rw = bio_rw(bio);
1115 struct mirror *m;
1116 struct mirror_set *ms = ti->private;
06386bbf 1117 struct dm_raid1_read_record *read_record = NULL;
1f965b19 1118 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4
LT
1119
1120 if (rw == WRITE) {
06386bbf 1121 /* Save region for mirror_end_io() handler */
1f965b19 1122 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1da177e4 1123 queue_bio(ms, bio, rw);
d2a7ad29 1124 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1125 }
1126
1f965b19 1127 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1da177e4
LT
1128 if (r < 0 && r != -EWOULDBLOCK)
1129 return r;
1130
1da177e4 1131 /*
06386bbf 1132 * If region is not in-sync queue the bio.
1da177e4 1133 */
06386bbf
JB
1134 if (!r || (r == -EWOULDBLOCK)) {
1135 if (rw == READA)
1136 return -EWOULDBLOCK;
1da177e4 1137
1da177e4 1138 queue_bio(ms, bio, rw);
d2a7ad29 1139 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1140 }
1141
06386bbf
JB
1142 /*
1143 * The region is in-sync and we can perform reads directly.
1144 * Store enough information so we can retry if it fails.
1145 */
1da177e4 1146 m = choose_mirror(ms, bio->bi_sector);
06386bbf 1147 if (unlikely(!m))
1da177e4
LT
1148 return -EIO;
1149
06386bbf
JB
1150 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1151 if (likely(read_record)) {
1152 dm_bio_record(&read_record->details, bio);
1153 map_context->ptr = read_record;
1154 read_record->m = m;
1155 }
1156
1157 map_bio(m, bio);
1158
d2a7ad29 1159 return DM_MAPIO_REMAPPED;
1da177e4
LT
1160}
1161
1162static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1163 int error, union map_info *map_context)
1164{
1165 int rw = bio_rw(bio);
1166 struct mirror_set *ms = (struct mirror_set *) ti->private;
06386bbf
JB
1167 struct mirror *m = NULL;
1168 struct dm_bio_details *bd = NULL;
1169 struct dm_raid1_read_record *read_record = map_context->ptr;
1da177e4
LT
1170
1171 /*
1172 * We need to dec pending if this was a write.
1173 */
06386bbf 1174 if (rw == WRITE) {
4184153f
MP
1175 if (likely(!bio_empty_barrier(bio)))
1176 dm_rh_dec(ms->rh, map_context->ll);
06386bbf
JB
1177 return error;
1178 }
1da177e4 1179
06386bbf
JB
1180 if (error == -EOPNOTSUPP)
1181 goto out;
1182
1f98a13f 1183 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
06386bbf
JB
1184 goto out;
1185
1186 if (unlikely(error)) {
1187 if (!read_record) {
1188 /*
1189 * There wasn't enough memory to record necessary
1190 * information for a retry or there was no other
1191 * mirror in-sync.
1192 */
e03f1a84 1193 DMERR_LIMIT("Mirror read failed.");
06386bbf
JB
1194 return -EIO;
1195 }
e03f1a84
AB
1196
1197 m = read_record->m;
1198
06386bbf
JB
1199 DMERR("Mirror read failed from %s. Trying alternative device.",
1200 m->dev->name);
1201
06386bbf
JB
1202 fail_mirror(m, DM_RAID1_READ_ERROR);
1203
1204 /*
1205 * A failed read is requeued for another attempt using an intact
1206 * mirror.
1207 */
1208 if (default_ok(m) || mirror_available(ms, bio)) {
1209 bd = &read_record->details;
1210
1211 dm_bio_restore(bd, bio);
1212 mempool_free(read_record, ms->read_record_pool);
1213 map_context->ptr = NULL;
1214 queue_bio(ms, bio, rw);
1215 return 1;
1216 }
1217 DMERR("All replicated volumes dead, failing I/O");
1218 }
1219
1220out:
1221 if (read_record) {
1222 mempool_free(read_record, ms->read_record_pool);
1223 map_context->ptr = NULL;
1224 }
1225
1226 return error;
1da177e4
LT
1227}
1228
b80aa7a0 1229static void mirror_presuspend(struct dm_target *ti)
1da177e4
LT
1230{
1231 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1232 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 1233
04788507
MP
1234 struct bio_list holds;
1235 struct bio *bio;
1236
b80aa7a0
JB
1237 atomic_set(&ms->suspend, 1);
1238
1239 /*
1240 * We must finish up all the work that we've
1241 * generated (i.e. recovery work).
1242 */
1f965b19 1243 dm_rh_stop_recovery(ms->rh);
33184048 1244
33184048 1245 wait_event(_kmirrord_recovery_stopped,
1f965b19 1246 !dm_rh_recovery_in_flight(ms->rh));
33184048 1247
b80aa7a0
JB
1248 if (log->type->presuspend && log->type->presuspend(log))
1249 /* FIXME: need better error handling */
1250 DMWARN("log presuspend failed");
1251
1252 /*
1253 * Now that recovery is complete/stopped and the
1254 * delayed bios are queued, we need to wait for
1255 * the worker thread to complete. This way,
1256 * we know that all of our I/O has been pushed.
1257 */
1258 flush_workqueue(ms->kmirrord_wq);
04788507
MP
1259
1260 /*
1261 * Now set ms->suspend is set and the workqueue flushed, no more
1262 * entries can be added to ms->hold list, so process it.
1263 *
1264 * Bios can still arrive concurrently with or after this
1265 * presuspend function, but they cannot join the hold list
1266 * because ms->suspend is set.
1267 */
1268 spin_lock_irq(&ms->lock);
1269 holds = ms->holds;
1270 bio_list_init(&ms->holds);
1271 spin_unlock_irq(&ms->lock);
1272
1273 while ((bio = bio_list_pop(&holds)))
1274 hold_bio(ms, bio);
b80aa7a0
JB
1275}
1276
1277static void mirror_postsuspend(struct dm_target *ti)
1278{
1279 struct mirror_set *ms = ti->private;
1f965b19 1280 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0 1281
6b3df0d7 1282 if (log->type->postsuspend && log->type->postsuspend(log))
1da177e4 1283 /* FIXME: need better error handling */
b80aa7a0 1284 DMWARN("log postsuspend failed");
1da177e4
LT
1285}
1286
1287static void mirror_resume(struct dm_target *ti)
1288{
b80aa7a0 1289 struct mirror_set *ms = ti->private;
1f965b19 1290 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0
JB
1291
1292 atomic_set(&ms->suspend, 0);
1da177e4
LT
1293 if (log->type->resume && log->type->resume(log))
1294 /* FIXME: need better error handling */
1295 DMWARN("log resume failed");
1f965b19 1296 dm_rh_start_recovery(ms->rh);
1da177e4
LT
1297}
1298
af195ac8
JB
1299/*
1300 * device_status_char
1301 * @m: mirror device/leg we want the status of
1302 *
1303 * We return one character representing the most severe error
1304 * we have encountered.
1305 * A => Alive - No failures
1306 * D => Dead - A write failure occurred leaving mirror out-of-sync
1307 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1308 * R => Read - A read failure occurred, mirror data unaffected
1309 *
1310 * Returns: <char>
1311 */
1312static char device_status_char(struct mirror *m)
1313{
1314 if (!atomic_read(&(m->error_count)))
1315 return 'A';
1316
64b30c46
MP
1317 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1318 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
af195ac8
JB
1319 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1320 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1321}
1322
1323
1da177e4
LT
1324static int mirror_status(struct dm_target *ti, status_type_t type,
1325 char *result, unsigned int maxlen)
1326{
315dcc22 1327 unsigned int m, sz = 0;
1da177e4 1328 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1329 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
af195ac8 1330 char buffer[ms->nr_mirrors + 1];
1da177e4 1331
1da177e4
LT
1332 switch (type) {
1333 case STATUSTYPE_INFO:
1334 DMEMIT("%d ", ms->nr_mirrors);
af195ac8 1335 for (m = 0; m < ms->nr_mirrors; m++) {
1da177e4 1336 DMEMIT("%s ", ms->mirror[m].dev->name);
af195ac8
JB
1337 buffer[m] = device_status_char(&(ms->mirror[m]));
1338 }
1339 buffer[m] = '\0';
1da177e4 1340
af195ac8 1341 DMEMIT("%llu/%llu 1 %s ",
1f965b19 1342 (unsigned long long)log->type->get_sync_count(log),
af195ac8 1343 (unsigned long long)ms->nr_regions, buffer);
315dcc22 1344
1f965b19 1345 sz += log->type->status(log, type, result+sz, maxlen-sz);
315dcc22 1346
1da177e4
LT
1347 break;
1348
1349 case STATUSTYPE_TABLE:
1f965b19 1350 sz = log->type->status(log, type, result, maxlen);
315dcc22 1351
e52b8f6d 1352 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1353 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1354 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
b80aa7a0 1355 (unsigned long long)ms->mirror[m].offset);
a8e6afa2
JB
1356
1357 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1358 DMEMIT(" 1 handle_errors");
1da177e4
LT
1359 }
1360
1361 return 0;
1362}
1363
af4874e0
MS
1364static int mirror_iterate_devices(struct dm_target *ti,
1365 iterate_devices_callout_fn fn, void *data)
1366{
1367 struct mirror_set *ms = ti->private;
1368 int ret = 0;
1369 unsigned i;
1370
1371 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1372 ret = fn(ti, ms->mirror[i].dev,
5dea271b 1373 ms->mirror[i].offset, ti->len, data);
af4874e0
MS
1374
1375 return ret;
1376}
1377
1da177e4
LT
1378static struct target_type mirror_target = {
1379 .name = "mirror",
af4874e0 1380 .version = {1, 12, 0},
1da177e4
LT
1381 .module = THIS_MODULE,
1382 .ctr = mirror_ctr,
1383 .dtr = mirror_dtr,
1384 .map = mirror_map,
1385 .end_io = mirror_end_io,
b80aa7a0 1386 .presuspend = mirror_presuspend,
1da177e4
LT
1387 .postsuspend = mirror_postsuspend,
1388 .resume = mirror_resume,
1389 .status = mirror_status,
af4874e0 1390 .iterate_devices = mirror_iterate_devices,
1da177e4
LT
1391};
1392
1393static int __init dm_mirror_init(void)
1394{
1395 int r;
1396
95f8fac8
MP
1397 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1398 if (!_dm_raid1_read_record_cache) {
1399 DMERR("Can't allocate dm_raid1_read_record cache");
1400 r = -ENOMEM;
1401 goto bad_cache;
1402 }
1403
1da177e4 1404 r = dm_register_target(&mirror_target);
95f8fac8 1405 if (r < 0) {
0cd33124 1406 DMERR("Failed to register mirror target");
95f8fac8
MP
1407 goto bad_target;
1408 }
1409
1410 return 0;
1da177e4 1411
95f8fac8
MP
1412bad_target:
1413 kmem_cache_destroy(_dm_raid1_read_record_cache);
1414bad_cache:
1da177e4
LT
1415 return r;
1416}
1417
1418static void __exit dm_mirror_exit(void)
1419{
10d3bd09 1420 dm_unregister_target(&mirror_target);
95f8fac8 1421 kmem_cache_destroy(_dm_raid1_read_record_cache);
1da177e4
LT
1422}
1423
1424/* Module hooks */
1425module_init(dm_mirror_init);
1426module_exit(dm_mirror_exit);
1427
1428MODULE_DESCRIPTION(DM_NAME " mirror target");
1429MODULE_AUTHOR("Joe Thornber");
1430MODULE_LICENSE("GPL");