dm exception store: separate type from instance
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / md / dm-raid1.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
1f965b19 3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
1da177e4 8#include "dm-bio-list.h"
06386bbf 9#include "dm-bio-record.h"
1da177e4 10
1da177e4
LT
11#include <linux/init.h>
12#include <linux/mempool.h>
13#include <linux/module.h>
14#include <linux/pagemap.h>
15#include <linux/slab.h>
1da177e4 16#include <linux/workqueue.h>
1f965b19 17#include <linux/device-mapper.h>
a765e20e
AK
18#include <linux/dm-io.h>
19#include <linux/dm-dirty-log.h>
20#include <linux/dm-kcopyd.h>
1f965b19 21#include <linux/dm-region-hash.h>
1da177e4 22
72d94861 23#define DM_MSG_PREFIX "raid1"
1f965b19
HM
24
25#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
88be163a 26#define DM_IO_PAGES 64
1f965b19 27#define DM_KCOPYD_PAGES 64
72d94861 28
a8e6afa2 29#define DM_RAID1_HANDLE_ERRORS 0x01
f44db678 30#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
a8e6afa2 31
33184048 32static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 33
e4c8b3ba
NB
34/*-----------------------------------------------------------------
35 * Mirror set structures.
36 *---------------------------------------------------------------*/
72f4b314
JB
37enum dm_raid1_error {
38 DM_RAID1_WRITE_ERROR,
39 DM_RAID1_SYNC_ERROR,
40 DM_RAID1_READ_ERROR
41};
42
e4c8b3ba 43struct mirror {
aa5617c5 44 struct mirror_set *ms;
e4c8b3ba 45 atomic_t error_count;
39ed7adb 46 unsigned long error_type;
e4c8b3ba
NB
47 struct dm_dev *dev;
48 sector_t offset;
49};
50
51struct mirror_set {
52 struct dm_target *ti;
53 struct list_head list;
1f965b19 54
a8e6afa2 55 uint64_t features;
e4c8b3ba 56
72f4b314 57 spinlock_t lock; /* protects the lists */
e4c8b3ba
NB
58 struct bio_list reads;
59 struct bio_list writes;
72f4b314 60 struct bio_list failures;
e4c8b3ba 61
1f965b19
HM
62 struct dm_region_hash *rh;
63 struct dm_kcopyd_client *kcopyd_client;
88be163a 64 struct dm_io_client *io_client;
06386bbf 65 mempool_t *read_record_pool;
88be163a 66
e4c8b3ba
NB
67 /* recovery */
68 region_t nr_regions;
69 int in_sync;
fc1ff958 70 int log_failure;
b80aa7a0 71 atomic_t suspend;
e4c8b3ba 72
72f4b314 73 atomic_t default_mirror; /* Default mirror */
e4c8b3ba 74
6ad36fe2
HS
75 struct workqueue_struct *kmirrord_wq;
76 struct work_struct kmirrord_work;
a2aebe03
MP
77 struct timer_list timer;
78 unsigned long timer_pending;
79
72f4b314 80 struct work_struct trigger_event;
6ad36fe2 81
1f965b19 82 unsigned nr_mirrors;
e4c8b3ba
NB
83 struct mirror mirror[0];
84};
85
1f965b19 86static void wakeup_mirrord(void *context)
1da177e4 87{
1f965b19 88 struct mirror_set *ms = context;
1da177e4 89
6ad36fe2
HS
90 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
91}
92
a2aebe03
MP
93static void delayed_wake_fn(unsigned long data)
94{
95 struct mirror_set *ms = (struct mirror_set *) data;
96
97 clear_bit(0, &ms->timer_pending);
1f965b19 98 wakeup_mirrord(ms);
a2aebe03
MP
99}
100
101static void delayed_wake(struct mirror_set *ms)
102{
103 if (test_and_set_bit(0, &ms->timer_pending))
104 return;
105
106 ms->timer.expires = jiffies + HZ / 5;
107 ms->timer.data = (unsigned long) ms;
108 ms->timer.function = delayed_wake_fn;
109 add_timer(&ms->timer);
110}
111
1f965b19 112static void wakeup_all_recovery_waiters(void *context)
1da177e4 113{
1f965b19 114 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
115}
116
1f965b19 117static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1da177e4
LT
118{
119 unsigned long flags;
1da177e4 120 int should_wake = 0;
1f965b19 121 struct bio_list *bl;
1da177e4 122
1f965b19
HM
123 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
124 spin_lock_irqsave(&ms->lock, flags);
125 should_wake = !(bl->head);
126 bio_list_add(bl, bio);
127 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4
LT
128
129 if (should_wake)
1f965b19 130 wakeup_mirrord(ms);
1da177e4
LT
131}
132
1f965b19 133static void dispatch_bios(void *context, struct bio_list *bio_list)
1da177e4 134{
1f965b19
HM
135 struct mirror_set *ms = context;
136 struct bio *bio;
1da177e4 137
1f965b19
HM
138 while ((bio = bio_list_pop(bio_list)))
139 queue_bio(ms, bio, WRITE);
1da177e4
LT
140}
141
06386bbf
JB
142#define MIN_READ_RECORDS 20
143struct dm_raid1_read_record {
144 struct mirror *m;
145 struct dm_bio_details details;
146};
147
95f8fac8
MP
148static struct kmem_cache *_dm_raid1_read_record_cache;
149
1da177e4
LT
150/*
151 * Every mirror should look like this one.
152 */
153#define DEFAULT_MIRROR 0
154
155/*
06386bbf
JB
156 * This is yucky. We squirrel the mirror struct away inside
157 * bi_next for read/write buffers. This is safe since the bh
1da177e4
LT
158 * doesn't get submitted to the lower levels of block layer.
159 */
06386bbf 160static struct mirror *bio_get_m(struct bio *bio)
1da177e4 161{
06386bbf 162 return (struct mirror *) bio->bi_next;
1da177e4
LT
163}
164
06386bbf 165static void bio_set_m(struct bio *bio, struct mirror *m)
1da177e4 166{
06386bbf 167 bio->bi_next = (struct bio *) m;
1da177e4
LT
168}
169
72f4b314
JB
170static struct mirror *get_default_mirror(struct mirror_set *ms)
171{
172 return &ms->mirror[atomic_read(&ms->default_mirror)];
173}
174
175static void set_default_mirror(struct mirror *m)
176{
177 struct mirror_set *ms = m->ms;
178 struct mirror *m0 = &(ms->mirror[0]);
179
180 atomic_set(&ms->default_mirror, m - m0);
181}
182
183/* fail_mirror
184 * @m: mirror device to fail
185 * @error_type: one of the enum's, DM_RAID1_*_ERROR
186 *
187 * If errors are being handled, record the type of
188 * error encountered for this device. If this type
189 * of error has already been recorded, we can return;
190 * otherwise, we must signal userspace by triggering
191 * an event. Additionally, if the device is the
192 * primary device, we must choose a new primary, but
193 * only if the mirror is in-sync.
194 *
195 * This function must not block.
196 */
197static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
198{
199 struct mirror_set *ms = m->ms;
200 struct mirror *new;
201
72f4b314
JB
202 /*
203 * error_count is used for nothing more than a
204 * simple way to tell if a device has encountered
205 * errors.
206 */
207 atomic_inc(&m->error_count);
208
209 if (test_and_set_bit(error_type, &m->error_type))
210 return;
211
d460c65a
JB
212 if (!errors_handled(ms))
213 return;
214
72f4b314
JB
215 if (m != get_default_mirror(ms))
216 goto out;
217
218 if (!ms->in_sync) {
219 /*
220 * Better to issue requests to same failing device
221 * than to risk returning corrupt data.
222 */
223 DMERR("Primary mirror (%s) failed while out-of-sync: "
224 "Reads may fail.", m->dev->name);
225 goto out;
226 }
227
228 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
229 if (!atomic_read(&new->error_count)) {
230 set_default_mirror(new);
231 break;
232 }
233
234 if (unlikely(new == ms->mirror + ms->nr_mirrors))
235 DMWARN("All sides of mirror have failed.");
236
237out:
238 schedule_work(&ms->trigger_event);
239}
240
1da177e4
LT
241/*-----------------------------------------------------------------
242 * Recovery.
243 *
244 * When a mirror is first activated we may find that some regions
245 * are in the no-sync state. We have to recover these by
246 * recopying from the default mirror to all the others.
247 *---------------------------------------------------------------*/
4cdc1d1f 248static void recovery_complete(int read_err, unsigned long write_err,
1da177e4
LT
249 void *context)
250{
1f965b19
HM
251 struct dm_region *reg = context;
252 struct mirror_set *ms = dm_rh_region_context(reg);
8f0205b7 253 int m, bit = 0;
1da177e4 254
8f0205b7 255 if (read_err) {
f44db678
JB
256 /* Read error means the failure of default mirror. */
257 DMERR_LIMIT("Unable to read primary mirror during recovery");
8f0205b7
JB
258 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
259 }
f44db678 260
8f0205b7 261 if (write_err) {
4cdc1d1f 262 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
f44db678 263 write_err);
8f0205b7
JB
264 /*
265 * Bits correspond to devices (excluding default mirror).
266 * The default mirror cannot change during recovery.
267 */
268 for (m = 0; m < ms->nr_mirrors; m++) {
269 if (&ms->mirror[m] == get_default_mirror(ms))
270 continue;
271 if (test_bit(bit, &write_err))
272 fail_mirror(ms->mirror + m,
273 DM_RAID1_SYNC_ERROR);
274 bit++;
275 }
276 }
f44db678 277
1f965b19 278 dm_rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
279}
280
1f965b19 281static int recover(struct mirror_set *ms, struct dm_region *reg)
1da177e4
LT
282{
283 int r;
1f965b19 284 unsigned i;
eb69aca5 285 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
1da177e4
LT
286 struct mirror *m;
287 unsigned long flags = 0;
1f965b19
HM
288 region_t key = dm_rh_get_region_key(reg);
289 sector_t region_size = dm_rh_get_region_size(ms->rh);
1da177e4
LT
290
291 /* fill in the source */
72f4b314 292 m = get_default_mirror(ms);
1da177e4 293 from.bdev = m->dev->bdev;
1f965b19
HM
294 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
295 if (key == (ms->nr_regions - 1)) {
1da177e4
LT
296 /*
297 * The final region may be smaller than
298 * region_size.
299 */
1f965b19 300 from.count = ms->ti->len & (region_size - 1);
1da177e4 301 if (!from.count)
1f965b19 302 from.count = region_size;
1da177e4 303 } else
1f965b19 304 from.count = region_size;
1da177e4
LT
305
306 /* fill in the destinations */
307 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
72f4b314 308 if (&ms->mirror[i] == get_default_mirror(ms))
1da177e4
LT
309 continue;
310
311 m = ms->mirror + i;
312 dest->bdev = m->dev->bdev;
1f965b19 313 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
1da177e4
LT
314 dest->count = from.count;
315 dest++;
316 }
317
318 /* hand to kcopyd */
f7c83e2e
JB
319 if (!errors_handled(ms))
320 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
321
eb69aca5
HM
322 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
323 flags, recovery_complete, reg);
1da177e4
LT
324
325 return r;
326}
327
328static void do_recovery(struct mirror_set *ms)
329{
1f965b19
HM
330 struct dm_region *reg;
331 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 332 int r;
1da177e4
LT
333
334 /*
335 * Start quiescing some regions.
336 */
1f965b19 337 dm_rh_recovery_prepare(ms->rh);
1da177e4
LT
338
339 /*
340 * Copy any already quiesced regions.
341 */
1f965b19 342 while ((reg = dm_rh_recovery_start(ms->rh))) {
1da177e4
LT
343 r = recover(ms, reg);
344 if (r)
1f965b19 345 dm_rh_recovery_end(reg, 0);
1da177e4
LT
346 }
347
348 /*
349 * Update the in sync flag.
350 */
351 if (!ms->in_sync &&
352 (log->type->get_sync_count(log) == ms->nr_regions)) {
353 /* the sync is complete */
354 dm_table_event(ms->ti->table);
355 ms->in_sync = 1;
356 }
357}
358
359/*-----------------------------------------------------------------
360 * Reads
361 *---------------------------------------------------------------*/
362static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
363{
06386bbf
JB
364 struct mirror *m = get_default_mirror(ms);
365
366 do {
367 if (likely(!atomic_read(&m->error_count)))
368 return m;
369
370 if (m-- == ms->mirror)
371 m += ms->nr_mirrors;
372 } while (m != get_default_mirror(ms));
373
374 return NULL;
375}
376
377static int default_ok(struct mirror *m)
378{
379 struct mirror *default_mirror = get_default_mirror(m->ms);
380
381 return !atomic_read(&default_mirror->error_count);
382}
383
384static int mirror_available(struct mirror_set *ms, struct bio *bio)
385{
1f965b19
HM
386 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
387 region_t region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 388
1f965b19 389 if (log->type->in_sync(log, region, 0))
06386bbf
JB
390 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
391
392 return 0;
1da177e4
LT
393}
394
395/*
396 * remap a buffer to a particular mirror.
397 */
06386bbf
JB
398static sector_t map_sector(struct mirror *m, struct bio *bio)
399{
400 return m->offset + (bio->bi_sector - m->ms->ti->begin);
401}
402
403static void map_bio(struct mirror *m, struct bio *bio)
1da177e4
LT
404{
405 bio->bi_bdev = m->dev->bdev;
06386bbf
JB
406 bio->bi_sector = map_sector(m, bio);
407}
408
22a1ceb1 409static void map_region(struct dm_io_region *io, struct mirror *m,
06386bbf
JB
410 struct bio *bio)
411{
412 io->bdev = m->dev->bdev;
413 io->sector = map_sector(m, bio);
414 io->count = bio->bi_size >> 9;
415}
416
417/*-----------------------------------------------------------------
418 * Reads
419 *---------------------------------------------------------------*/
420static void read_callback(unsigned long error, void *context)
421{
422 struct bio *bio = context;
423 struct mirror *m;
424
425 m = bio_get_m(bio);
426 bio_set_m(bio, NULL);
427
428 if (likely(!error)) {
429 bio_endio(bio, 0);
430 return;
431 }
432
433 fail_mirror(m, DM_RAID1_READ_ERROR);
434
435 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
436 DMWARN_LIMIT("Read failure on mirror device %s. "
437 "Trying alternative device.",
438 m->dev->name);
439 queue_bio(m->ms, bio, bio_rw(bio));
440 return;
441 }
442
443 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
444 m->dev->name);
445 bio_endio(bio, -EIO);
446}
447
448/* Asynchronous read. */
449static void read_async_bio(struct mirror *m, struct bio *bio)
450{
22a1ceb1 451 struct dm_io_region io;
06386bbf
JB
452 struct dm_io_request io_req = {
453 .bi_rw = READ,
454 .mem.type = DM_IO_BVEC,
455 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
456 .notify.fn = read_callback,
457 .notify.context = bio,
458 .client = m->ms->io_client,
459 };
460
461 map_region(&io, m, bio);
462 bio_set_m(bio, m);
1f965b19
HM
463 BUG_ON(dm_io(&io_req, 1, &io, NULL));
464}
465
466static inline int region_in_sync(struct mirror_set *ms, region_t region,
467 int may_block)
468{
469 int state = dm_rh_get_state(ms->rh, region, may_block);
470 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
1da177e4
LT
471}
472
473static void do_reads(struct mirror_set *ms, struct bio_list *reads)
474{
475 region_t region;
476 struct bio *bio;
477 struct mirror *m;
478
479 while ((bio = bio_list_pop(reads))) {
1f965b19 480 region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 481 m = get_default_mirror(ms);
1da177e4
LT
482
483 /*
484 * We can only read balance if the region is in sync.
485 */
1f965b19 486 if (likely(region_in_sync(ms, region, 1)))
1da177e4 487 m = choose_mirror(ms, bio->bi_sector);
06386bbf
JB
488 else if (m && atomic_read(&m->error_count))
489 m = NULL;
1da177e4 490
06386bbf
JB
491 if (likely(m))
492 read_async_bio(m, bio);
493 else
494 bio_endio(bio, -EIO);
1da177e4
LT
495 }
496}
497
498/*-----------------------------------------------------------------
499 * Writes.
500 *
501 * We do different things with the write io depending on the
502 * state of the region that it's in:
503 *
504 * SYNC: increment pending, use kcopyd to write to *all* mirrors
505 * RECOVERING: delay the io until recovery completes
506 * NOSYNC: increment pending, just write to the default mirror
507 *---------------------------------------------------------------*/
72f4b314 508
72f4b314 509
1da177e4
LT
510static void write_callback(unsigned long error, void *context)
511{
72f4b314 512 unsigned i, ret = 0;
1da177e4
LT
513 struct bio *bio = (struct bio *) context;
514 struct mirror_set *ms;
72f4b314
JB
515 int uptodate = 0;
516 int should_wake = 0;
517 unsigned long flags;
1da177e4 518
06386bbf
JB
519 ms = bio_get_m(bio)->ms;
520 bio_set_m(bio, NULL);
1da177e4
LT
521
522 /*
523 * NOTE: We don't decrement the pending count here,
524 * instead it is done by the targets endio function.
525 * This way we handle both writes to SYNC and NOSYNC
526 * regions with the same code.
527 */
72f4b314
JB
528 if (likely(!error))
529 goto out;
1da177e4 530
72f4b314
JB
531 for (i = 0; i < ms->nr_mirrors; i++)
532 if (test_bit(i, &error))
533 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
534 else
535 uptodate = 1;
536
537 if (unlikely(!uptodate)) {
538 DMERR("All replicated volumes dead, failing I/O");
539 /* None of the writes succeeded, fail the I/O. */
540 ret = -EIO;
541 } else if (errors_handled(ms)) {
1da177e4 542 /*
72f4b314
JB
543 * Need to raise event. Since raising
544 * events can block, we need to do it in
545 * the main thread.
1da177e4 546 */
72f4b314
JB
547 spin_lock_irqsave(&ms->lock, flags);
548 if (!ms->failures.head)
549 should_wake = 1;
550 bio_list_add(&ms->failures, bio);
551 spin_unlock_irqrestore(&ms->lock, flags);
552 if (should_wake)
1f965b19 553 wakeup_mirrord(ms);
72f4b314 554 return;
1da177e4 555 }
72f4b314
JB
556out:
557 bio_endio(bio, ret);
1da177e4
LT
558}
559
560static void do_write(struct mirror_set *ms, struct bio *bio)
561{
562 unsigned int i;
22a1ceb1 563 struct dm_io_region io[ms->nr_mirrors], *dest = io;
1da177e4 564 struct mirror *m;
88be163a
MB
565 struct dm_io_request io_req = {
566 .bi_rw = WRITE,
567 .mem.type = DM_IO_BVEC,
568 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
569 .notify.fn = write_callback,
570 .notify.context = bio,
571 .client = ms->io_client,
572 };
1da177e4 573
06386bbf
JB
574 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
575 map_region(dest++, m, bio);
1da177e4 576
06386bbf
JB
577 /*
578 * Use default mirror because we only need it to retrieve the reference
579 * to the mirror set in write_callback().
580 */
581 bio_set_m(bio, get_default_mirror(ms));
88be163a 582
1f965b19 583 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
1da177e4
LT
584}
585
586static void do_writes(struct mirror_set *ms, struct bio_list *writes)
587{
588 int state;
589 struct bio *bio;
590 struct bio_list sync, nosync, recover, *this_list = NULL;
591
592 if (!writes->head)
593 return;
594
595 /*
596 * Classify each write.
597 */
598 bio_list_init(&sync);
599 bio_list_init(&nosync);
600 bio_list_init(&recover);
601
602 while ((bio = bio_list_pop(writes))) {
1f965b19
HM
603 state = dm_rh_get_state(ms->rh,
604 dm_rh_bio_to_region(ms->rh, bio), 1);
1da177e4 605 switch (state) {
1f965b19
HM
606 case DM_RH_CLEAN:
607 case DM_RH_DIRTY:
1da177e4
LT
608 this_list = &sync;
609 break;
610
1f965b19 611 case DM_RH_NOSYNC:
1da177e4
LT
612 this_list = &nosync;
613 break;
614
1f965b19 615 case DM_RH_RECOVERING:
1da177e4
LT
616 this_list = &recover;
617 break;
618 }
619
620 bio_list_add(this_list, bio);
621 }
622
623 /*
624 * Increment the pending counts for any regions that will
625 * be written to (writes to recover regions are going to
626 * be delayed).
627 */
1f965b19
HM
628 dm_rh_inc_pending(ms->rh, &sync);
629 dm_rh_inc_pending(ms->rh, &nosync);
630 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
1da177e4
LT
631
632 /*
633 * Dispatch io.
634 */
b80aa7a0
JB
635 if (unlikely(ms->log_failure)) {
636 spin_lock_irq(&ms->lock);
637 bio_list_merge(&ms->failures, &sync);
638 spin_unlock_irq(&ms->lock);
1f965b19 639 wakeup_mirrord(ms);
b80aa7a0 640 } else
fc1ff958 641 while ((bio = bio_list_pop(&sync)))
b80aa7a0 642 do_write(ms, bio);
1da177e4
LT
643
644 while ((bio = bio_list_pop(&recover)))
1f965b19 645 dm_rh_delay(ms->rh, bio);
1da177e4
LT
646
647 while ((bio = bio_list_pop(&nosync))) {
06386bbf 648 map_bio(get_default_mirror(ms), bio);
1da177e4
LT
649 generic_make_request(bio);
650 }
651}
652
72f4b314
JB
653static void do_failures(struct mirror_set *ms, struct bio_list *failures)
654{
655 struct bio *bio;
656
657 if (!failures->head)
658 return;
659
b80aa7a0 660 if (!ms->log_failure) {
b34578a4 661 while ((bio = bio_list_pop(failures))) {
1f965b19
HM
662 ms->in_sync = 0;
663 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
b34578a4 664 }
b80aa7a0
JB
665 return;
666 }
667
668 /*
669 * If the log has failed, unattempted writes are being
670 * put on the failures list. We can't issue those writes
671 * until a log has been marked, so we must store them.
672 *
673 * If a 'noflush' suspend is in progress, we can requeue
674 * the I/O's to the core. This give userspace a chance
675 * to reconfigure the mirror, at which point the core
676 * will reissue the writes. If the 'noflush' flag is
677 * not set, we have no choice but to return errors.
678 *
679 * Some writes on the failures list may have been
680 * submitted before the log failure and represent a
681 * failure to write to one of the devices. It is ok
682 * for us to treat them the same and requeue them
683 * as well.
684 */
685 if (dm_noflush_suspending(ms->ti)) {
686 while ((bio = bio_list_pop(failures)))
687 bio_endio(bio, DM_ENDIO_REQUEUE);
688 return;
689 }
690
691 if (atomic_read(&ms->suspend)) {
692 while ((bio = bio_list_pop(failures)))
693 bio_endio(bio, -EIO);
694 return;
695 }
696
697 spin_lock_irq(&ms->lock);
698 bio_list_merge(&ms->failures, failures);
699 spin_unlock_irq(&ms->lock);
700
a2aebe03 701 delayed_wake(ms);
72f4b314
JB
702}
703
704static void trigger_event(struct work_struct *work)
705{
706 struct mirror_set *ms =
707 container_of(work, struct mirror_set, trigger_event);
708
709 dm_table_event(ms->ti->table);
710}
711
1da177e4
LT
712/*-----------------------------------------------------------------
713 * kmirrord
714 *---------------------------------------------------------------*/
a2aebe03 715static void do_mirror(struct work_struct *work)
1da177e4 716{
1f965b19
HM
717 struct mirror_set *ms = container_of(work, struct mirror_set,
718 kmirrord_work);
72f4b314
JB
719 struct bio_list reads, writes, failures;
720 unsigned long flags;
1da177e4 721
72f4b314 722 spin_lock_irqsave(&ms->lock, flags);
1da177e4
LT
723 reads = ms->reads;
724 writes = ms->writes;
72f4b314 725 failures = ms->failures;
1da177e4
LT
726 bio_list_init(&ms->reads);
727 bio_list_init(&ms->writes);
72f4b314
JB
728 bio_list_init(&ms->failures);
729 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4 730
1f965b19 731 dm_rh_update_states(ms->rh, errors_handled(ms));
1da177e4
LT
732 do_recovery(ms);
733 do_reads(ms, &reads);
734 do_writes(ms, &writes);
72f4b314 735 do_failures(ms, &failures);
7ff14a36
MP
736
737 dm_table_unplug_all(ms->ti->table);
1da177e4
LT
738}
739
1da177e4
LT
740/*-----------------------------------------------------------------
741 * Target functions
742 *---------------------------------------------------------------*/
743static struct mirror_set *alloc_context(unsigned int nr_mirrors,
744 uint32_t region_size,
745 struct dm_target *ti,
416cd17b 746 struct dm_dirty_log *dl)
1da177e4
LT
747{
748 size_t len;
749 struct mirror_set *ms = NULL;
750
1da177e4
LT
751 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
752
dd00cc48 753 ms = kzalloc(len, GFP_KERNEL);
1da177e4 754 if (!ms) {
72d94861 755 ti->error = "Cannot allocate mirror context";
1da177e4
LT
756 return NULL;
757 }
758
1da177e4
LT
759 spin_lock_init(&ms->lock);
760
761 ms->ti = ti;
762 ms->nr_mirrors = nr_mirrors;
763 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
764 ms->in_sync = 0;
b80aa7a0
JB
765 ms->log_failure = 0;
766 atomic_set(&ms->suspend, 0);
72f4b314 767 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1da177e4 768
95f8fac8
MP
769 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
770 _dm_raid1_read_record_cache);
771
06386bbf
JB
772 if (!ms->read_record_pool) {
773 ti->error = "Error creating mirror read_record_pool";
774 kfree(ms);
775 return NULL;
776 }
777
88be163a
MB
778 ms->io_client = dm_io_client_create(DM_IO_PAGES);
779 if (IS_ERR(ms->io_client)) {
780 ti->error = "Error creating dm_io client";
06386bbf 781 mempool_destroy(ms->read_record_pool);
88be163a
MB
782 kfree(ms);
783 return NULL;
784 }
785
1f965b19
HM
786 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
787 wakeup_all_recovery_waiters,
788 ms->ti->begin, MAX_RECOVERY,
789 dl, region_size, ms->nr_regions);
790 if (IS_ERR(ms->rh)) {
72d94861 791 ti->error = "Error creating dirty region hash";
a72cf737 792 dm_io_client_destroy(ms->io_client);
06386bbf 793 mempool_destroy(ms->read_record_pool);
1da177e4
LT
794 kfree(ms);
795 return NULL;
796 }
797
798 return ms;
799}
800
801static void free_context(struct mirror_set *ms, struct dm_target *ti,
802 unsigned int m)
803{
804 while (m--)
805 dm_put_device(ti, ms->mirror[m].dev);
806
88be163a 807 dm_io_client_destroy(ms->io_client);
1f965b19 808 dm_region_hash_destroy(ms->rh);
06386bbf 809 mempool_destroy(ms->read_record_pool);
1da177e4
LT
810 kfree(ms);
811}
812
1da177e4
LT
813static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
814 unsigned int mirror, char **argv)
815{
4ee218cd 816 unsigned long long offset;
1da177e4 817
4ee218cd 818 if (sscanf(argv[1], "%llu", &offset) != 1) {
72d94861 819 ti->error = "Invalid offset";
1da177e4
LT
820 return -EINVAL;
821 }
822
823 if (dm_get_device(ti, argv[0], offset, ti->len,
824 dm_table_get_mode(ti->table),
825 &ms->mirror[mirror].dev)) {
72d94861 826 ti->error = "Device lookup failure";
1da177e4
LT
827 return -ENXIO;
828 }
829
aa5617c5 830 ms->mirror[mirror].ms = ms;
72f4b314
JB
831 atomic_set(&(ms->mirror[mirror].error_count), 0);
832 ms->mirror[mirror].error_type = 0;
1da177e4
LT
833 ms->mirror[mirror].offset = offset;
834
835 return 0;
836}
837
1da177e4
LT
838/*
839 * Create dirty log: log_type #log_params <log_params>
840 */
416cd17b 841static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1f965b19
HM
842 unsigned argc, char **argv,
843 unsigned *args_used)
1da177e4 844{
1f965b19 845 unsigned param_count;
416cd17b 846 struct dm_dirty_log *dl;
1da177e4
LT
847
848 if (argc < 2) {
72d94861 849 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
850 return NULL;
851 }
852
853 if (sscanf(argv[1], "%u", &param_count) != 1) {
72d94861 854 ti->error = "Invalid mirror log argument count";
1da177e4
LT
855 return NULL;
856 }
857
858 *args_used = 2 + param_count;
859
860 if (argc < *args_used) {
72d94861 861 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
862 return NULL;
863 }
864
416cd17b 865 dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
1da177e4 866 if (!dl) {
72d94861 867 ti->error = "Error creating mirror dirty log";
1da177e4
LT
868 return NULL;
869 }
870
1da177e4
LT
871 return dl;
872}
873
a8e6afa2
JB
874static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
875 unsigned *args_used)
876{
877 unsigned num_features;
878 struct dm_target *ti = ms->ti;
879
880 *args_used = 0;
881
882 if (!argc)
883 return 0;
884
885 if (sscanf(argv[0], "%u", &num_features) != 1) {
886 ti->error = "Invalid number of features";
887 return -EINVAL;
888 }
889
890 argc--;
891 argv++;
892 (*args_used)++;
893
894 if (num_features > argc) {
895 ti->error = "Not enough arguments to support feature count";
896 return -EINVAL;
897 }
898
899 if (!strcmp("handle_errors", argv[0]))
900 ms->features |= DM_RAID1_HANDLE_ERRORS;
901 else {
902 ti->error = "Unrecognised feature requested";
903 return -EINVAL;
904 }
905
906 (*args_used)++;
907
908 return 0;
909}
910
1da177e4
LT
911/*
912 * Construct a mirror mapping:
913 *
914 * log_type #log_params <log_params>
915 * #mirrors [mirror_path offset]{2,}
a8e6afa2 916 * [#features <features>]
1da177e4
LT
917 *
918 * log_type is "core" or "disk"
919 * #log_params is between 1 and 3
a8e6afa2
JB
920 *
921 * If present, features must be "handle_errors".
1da177e4 922 */
1da177e4
LT
923static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
924{
925 int r;
926 unsigned int nr_mirrors, m, args_used;
927 struct mirror_set *ms;
416cd17b 928 struct dm_dirty_log *dl;
1da177e4
LT
929
930 dl = create_dirty_log(ti, argc, argv, &args_used);
931 if (!dl)
932 return -EINVAL;
933
934 argv += args_used;
935 argc -= args_used;
936
937 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
eb69aca5 938 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
72d94861 939 ti->error = "Invalid number of mirrors";
416cd17b 940 dm_dirty_log_destroy(dl);
1da177e4
LT
941 return -EINVAL;
942 }
943
944 argv++, argc--;
945
a8e6afa2
JB
946 if (argc < nr_mirrors * 2) {
947 ti->error = "Too few mirror arguments";
416cd17b 948 dm_dirty_log_destroy(dl);
1da177e4
LT
949 return -EINVAL;
950 }
951
952 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
953 if (!ms) {
416cd17b 954 dm_dirty_log_destroy(dl);
1da177e4
LT
955 return -ENOMEM;
956 }
957
958 /* Get the mirror parameter sets */
959 for (m = 0; m < nr_mirrors; m++) {
960 r = get_mirror(ms, ti, m, argv);
961 if (r) {
962 free_context(ms, ti, m);
963 return r;
964 }
965 argv += 2;
966 argc -= 2;
967 }
968
969 ti->private = ms;
1f965b19 970 ti->split_io = dm_rh_get_region_size(ms->rh);
1da177e4 971
6ad36fe2
HS
972 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
973 if (!ms->kmirrord_wq) {
974 DMERR("couldn't start kmirrord");
a72cf737
DM
975 r = -ENOMEM;
976 goto err_free_context;
6ad36fe2
HS
977 }
978 INIT_WORK(&ms->kmirrord_work, do_mirror);
a2aebe03
MP
979 init_timer(&ms->timer);
980 ms->timer_pending = 0;
72f4b314 981 INIT_WORK(&ms->trigger_event, trigger_event);
6ad36fe2 982
a8e6afa2 983 r = parse_features(ms, argc, argv, &args_used);
a72cf737
DM
984 if (r)
985 goto err_destroy_wq;
a8e6afa2
JB
986
987 argv += args_used;
988 argc -= args_used;
989
f44db678
JB
990 /*
991 * Any read-balancing addition depends on the
992 * DM_RAID1_HANDLE_ERRORS flag being present.
993 * This is because the decision to balance depends
994 * on the sync state of a region. If the above
995 * flag is not present, we ignore errors; and
996 * the sync state may be inaccurate.
997 */
998
a8e6afa2
JB
999 if (argc) {
1000 ti->error = "Too many mirror arguments";
a72cf737
DM
1001 r = -EINVAL;
1002 goto err_destroy_wq;
a8e6afa2
JB
1003 }
1004
1f965b19 1005 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
a72cf737
DM
1006 if (r)
1007 goto err_destroy_wq;
1da177e4 1008
1f965b19 1009 wakeup_mirrord(ms);
1da177e4 1010 return 0;
a72cf737
DM
1011
1012err_destroy_wq:
1013 destroy_workqueue(ms->kmirrord_wq);
1014err_free_context:
1015 free_context(ms, ti, ms->nr_mirrors);
1016 return r;
1da177e4
LT
1017}
1018
1019static void mirror_dtr(struct dm_target *ti)
1020{
1021 struct mirror_set *ms = (struct mirror_set *) ti->private;
1022
a2aebe03 1023 del_timer_sync(&ms->timer);
6ad36fe2 1024 flush_workqueue(ms->kmirrord_wq);
18776c73 1025 flush_scheduled_work();
eb69aca5 1026 dm_kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1027 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1028 free_context(ms, ti, ms->nr_mirrors);
1029}
1030
1da177e4
LT
1031/*
1032 * Mirror mapping function
1033 */
1034static int mirror_map(struct dm_target *ti, struct bio *bio,
1035 union map_info *map_context)
1036{
1037 int r, rw = bio_rw(bio);
1038 struct mirror *m;
1039 struct mirror_set *ms = ti->private;
06386bbf 1040 struct dm_raid1_read_record *read_record = NULL;
1f965b19 1041 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4
LT
1042
1043 if (rw == WRITE) {
06386bbf 1044 /* Save region for mirror_end_io() handler */
1f965b19 1045 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1da177e4 1046 queue_bio(ms, bio, rw);
d2a7ad29 1047 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1048 }
1049
1f965b19 1050 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1da177e4
LT
1051 if (r < 0 && r != -EWOULDBLOCK)
1052 return r;
1053
1da177e4 1054 /*
06386bbf 1055 * If region is not in-sync queue the bio.
1da177e4 1056 */
06386bbf
JB
1057 if (!r || (r == -EWOULDBLOCK)) {
1058 if (rw == READA)
1059 return -EWOULDBLOCK;
1da177e4 1060
1da177e4 1061 queue_bio(ms, bio, rw);
d2a7ad29 1062 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1063 }
1064
06386bbf
JB
1065 /*
1066 * The region is in-sync and we can perform reads directly.
1067 * Store enough information so we can retry if it fails.
1068 */
1da177e4 1069 m = choose_mirror(ms, bio->bi_sector);
06386bbf 1070 if (unlikely(!m))
1da177e4
LT
1071 return -EIO;
1072
06386bbf
JB
1073 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1074 if (likely(read_record)) {
1075 dm_bio_record(&read_record->details, bio);
1076 map_context->ptr = read_record;
1077 read_record->m = m;
1078 }
1079
1080 map_bio(m, bio);
1081
d2a7ad29 1082 return DM_MAPIO_REMAPPED;
1da177e4
LT
1083}
1084
1085static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1086 int error, union map_info *map_context)
1087{
1088 int rw = bio_rw(bio);
1089 struct mirror_set *ms = (struct mirror_set *) ti->private;
06386bbf
JB
1090 struct mirror *m = NULL;
1091 struct dm_bio_details *bd = NULL;
1092 struct dm_raid1_read_record *read_record = map_context->ptr;
1da177e4
LT
1093
1094 /*
1095 * We need to dec pending if this was a write.
1096 */
06386bbf 1097 if (rw == WRITE) {
1f965b19 1098 dm_rh_dec(ms->rh, map_context->ll);
06386bbf
JB
1099 return error;
1100 }
1da177e4 1101
06386bbf
JB
1102 if (error == -EOPNOTSUPP)
1103 goto out;
1104
1105 if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
1106 goto out;
1107
1108 if (unlikely(error)) {
1109 if (!read_record) {
1110 /*
1111 * There wasn't enough memory to record necessary
1112 * information for a retry or there was no other
1113 * mirror in-sync.
1114 */
e03f1a84 1115 DMERR_LIMIT("Mirror read failed.");
06386bbf
JB
1116 return -EIO;
1117 }
e03f1a84
AB
1118
1119 m = read_record->m;
1120
06386bbf
JB
1121 DMERR("Mirror read failed from %s. Trying alternative device.",
1122 m->dev->name);
1123
06386bbf
JB
1124 fail_mirror(m, DM_RAID1_READ_ERROR);
1125
1126 /*
1127 * A failed read is requeued for another attempt using an intact
1128 * mirror.
1129 */
1130 if (default_ok(m) || mirror_available(ms, bio)) {
1131 bd = &read_record->details;
1132
1133 dm_bio_restore(bd, bio);
1134 mempool_free(read_record, ms->read_record_pool);
1135 map_context->ptr = NULL;
1136 queue_bio(ms, bio, rw);
1137 return 1;
1138 }
1139 DMERR("All replicated volumes dead, failing I/O");
1140 }
1141
1142out:
1143 if (read_record) {
1144 mempool_free(read_record, ms->read_record_pool);
1145 map_context->ptr = NULL;
1146 }
1147
1148 return error;
1da177e4
LT
1149}
1150
b80aa7a0 1151static void mirror_presuspend(struct dm_target *ti)
1da177e4
LT
1152{
1153 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1154 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 1155
b80aa7a0
JB
1156 atomic_set(&ms->suspend, 1);
1157
1158 /*
1159 * We must finish up all the work that we've
1160 * generated (i.e. recovery work).
1161 */
1f965b19 1162 dm_rh_stop_recovery(ms->rh);
33184048 1163
33184048 1164 wait_event(_kmirrord_recovery_stopped,
1f965b19 1165 !dm_rh_recovery_in_flight(ms->rh));
33184048 1166
b80aa7a0
JB
1167 if (log->type->presuspend && log->type->presuspend(log))
1168 /* FIXME: need better error handling */
1169 DMWARN("log presuspend failed");
1170
1171 /*
1172 * Now that recovery is complete/stopped and the
1173 * delayed bios are queued, we need to wait for
1174 * the worker thread to complete. This way,
1175 * we know that all of our I/O has been pushed.
1176 */
1177 flush_workqueue(ms->kmirrord_wq);
1178}
1179
1180static void mirror_postsuspend(struct dm_target *ti)
1181{
1182 struct mirror_set *ms = ti->private;
1f965b19 1183 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0 1184
6b3df0d7 1185 if (log->type->postsuspend && log->type->postsuspend(log))
1da177e4 1186 /* FIXME: need better error handling */
b80aa7a0 1187 DMWARN("log postsuspend failed");
1da177e4
LT
1188}
1189
1190static void mirror_resume(struct dm_target *ti)
1191{
b80aa7a0 1192 struct mirror_set *ms = ti->private;
1f965b19 1193 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0
JB
1194
1195 atomic_set(&ms->suspend, 0);
1da177e4
LT
1196 if (log->type->resume && log->type->resume(log))
1197 /* FIXME: need better error handling */
1198 DMWARN("log resume failed");
1f965b19 1199 dm_rh_start_recovery(ms->rh);
1da177e4
LT
1200}
1201
af195ac8
JB
1202/*
1203 * device_status_char
1204 * @m: mirror device/leg we want the status of
1205 *
1206 * We return one character representing the most severe error
1207 * we have encountered.
1208 * A => Alive - No failures
1209 * D => Dead - A write failure occurred leaving mirror out-of-sync
1210 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1211 * R => Read - A read failure occurred, mirror data unaffected
1212 *
1213 * Returns: <char>
1214 */
1215static char device_status_char(struct mirror *m)
1216{
1217 if (!atomic_read(&(m->error_count)))
1218 return 'A';
1219
1220 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1221 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1222 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1223}
1224
1225
1da177e4
LT
1226static int mirror_status(struct dm_target *ti, status_type_t type,
1227 char *result, unsigned int maxlen)
1228{
315dcc22 1229 unsigned int m, sz = 0;
1da177e4 1230 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1231 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
af195ac8 1232 char buffer[ms->nr_mirrors + 1];
1da177e4 1233
1da177e4
LT
1234 switch (type) {
1235 case STATUSTYPE_INFO:
1236 DMEMIT("%d ", ms->nr_mirrors);
af195ac8 1237 for (m = 0; m < ms->nr_mirrors; m++) {
1da177e4 1238 DMEMIT("%s ", ms->mirror[m].dev->name);
af195ac8
JB
1239 buffer[m] = device_status_char(&(ms->mirror[m]));
1240 }
1241 buffer[m] = '\0';
1da177e4 1242
af195ac8 1243 DMEMIT("%llu/%llu 1 %s ",
1f965b19 1244 (unsigned long long)log->type->get_sync_count(log),
af195ac8 1245 (unsigned long long)ms->nr_regions, buffer);
315dcc22 1246
1f965b19 1247 sz += log->type->status(log, type, result+sz, maxlen-sz);
315dcc22 1248
1da177e4
LT
1249 break;
1250
1251 case STATUSTYPE_TABLE:
1f965b19 1252 sz = log->type->status(log, type, result, maxlen);
315dcc22 1253
e52b8f6d 1254 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1255 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1256 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
b80aa7a0 1257 (unsigned long long)ms->mirror[m].offset);
a8e6afa2
JB
1258
1259 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1260 DMEMIT(" 1 handle_errors");
1da177e4
LT
1261 }
1262
1263 return 0;
1264}
1265
1266static struct target_type mirror_target = {
1267 .name = "mirror",
af195ac8 1268 .version = {1, 0, 20},
1da177e4
LT
1269 .module = THIS_MODULE,
1270 .ctr = mirror_ctr,
1271 .dtr = mirror_dtr,
1272 .map = mirror_map,
1273 .end_io = mirror_end_io,
b80aa7a0 1274 .presuspend = mirror_presuspend,
1da177e4
LT
1275 .postsuspend = mirror_postsuspend,
1276 .resume = mirror_resume,
1277 .status = mirror_status,
1278};
1279
1280static int __init dm_mirror_init(void)
1281{
1282 int r;
1283
95f8fac8
MP
1284 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1285 if (!_dm_raid1_read_record_cache) {
1286 DMERR("Can't allocate dm_raid1_read_record cache");
1287 r = -ENOMEM;
1288 goto bad_cache;
1289 }
1290
1da177e4 1291 r = dm_register_target(&mirror_target);
95f8fac8 1292 if (r < 0) {
0cd33124 1293 DMERR("Failed to register mirror target");
95f8fac8
MP
1294 goto bad_target;
1295 }
1296
1297 return 0;
1da177e4 1298
95f8fac8
MP
1299bad_target:
1300 kmem_cache_destroy(_dm_raid1_read_record_cache);
1301bad_cache:
1da177e4
LT
1302 return r;
1303}
1304
1305static void __exit dm_mirror_exit(void)
1306{
10d3bd09 1307 dm_unregister_target(&mirror_target);
95f8fac8 1308 kmem_cache_destroy(_dm_raid1_read_record_cache);
1da177e4
LT
1309}
1310
1311/* Module hooks */
1312module_init(dm_mirror_init);
1313module_exit(dm_mirror_exit);
1314
1315MODULE_DESCRIPTION(DM_NAME " mirror target");
1316MODULE_AUTHOR("Joe Thornber");
1317MODULE_LICENSE("GPL");