drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / dm-snap-persistent.c
1 /*
2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006-2008 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8 #include "dm-exception-store.h"
9
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/export.h>
14 #include <linux/slab.h>
15 #include <linux/dm-io.h>
16
17 #define DM_MSG_PREFIX "persistent snapshot"
18 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
19
20 /*-----------------------------------------------------------------
21 * Persistent snapshots, by persistent we mean that the snapshot
22 * will survive a reboot.
23 *---------------------------------------------------------------*/
24
25 /*
26 * We need to store a record of which parts of the origin have
27 * been copied to the snapshot device. The snapshot code
28 * requires that we copy exception chunks to chunk aligned areas
29 * of the COW store. It makes sense therefore, to store the
30 * metadata in chunk size blocks.
31 *
32 * There is no backward or forward compatibility implemented,
33 * snapshots with different disk versions than the kernel will
34 * not be usable. It is expected that "lvcreate" will blank out
35 * the start of a fresh COW device before calling the snapshot
36 * constructor.
37 *
38 * The first chunk of the COW device just contains the header.
39 * After this there is a chunk filled with exception metadata,
40 * followed by as many exception chunks as can fit in the
41 * metadata areas.
42 *
43 * All on disk structures are in little-endian format. The end
44 * of the exceptions info is indicated by an exception with a
45 * new_chunk of 0, which is invalid since it would point to the
46 * header chunk.
47 */
48
49 /*
50 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
51 */
52 #define SNAP_MAGIC 0x70416e53
53
54 /*
55 * The on-disk version of the metadata.
56 */
57 #define SNAPSHOT_DISK_VERSION 1
58
59 #define NUM_SNAPSHOT_HDR_CHUNKS 1
60
61 struct disk_header {
62 __le32 magic;
63
64 /*
65 * Is this snapshot valid. There is no way of recovering
66 * an invalid snapshot.
67 */
68 __le32 valid;
69
70 /*
71 * Simple, incrementing version. no backward
72 * compatibility.
73 */
74 __le32 version;
75
76 /* In sectors */
77 __le32 chunk_size;
78 } __packed;
79
80 struct disk_exception {
81 __le64 old_chunk;
82 __le64 new_chunk;
83 } __packed;
84
85 struct core_exception {
86 uint64_t old_chunk;
87 uint64_t new_chunk;
88 };
89
90 struct commit_callback {
91 void (*callback)(void *, int success);
92 void *context;
93 };
94
95 /*
96 * The top level structure for a persistent exception store.
97 */
98 struct pstore {
99 struct dm_exception_store *store;
100 int version;
101 int valid;
102 uint32_t exceptions_per_area;
103
104 /*
105 * Now that we have an asynchronous kcopyd there is no
106 * need for large chunk sizes, so it wont hurt to have a
107 * whole chunks worth of metadata in memory at once.
108 */
109 void *area;
110
111 /*
112 * An area of zeros used to clear the next area.
113 */
114 void *zero_area;
115
116 /*
117 * An area used for header. The header can be written
118 * concurrently with metadata (when invalidating the snapshot),
119 * so it needs a separate buffer.
120 */
121 void *header_area;
122
123 /*
124 * Used to keep track of which metadata area the data in
125 * 'chunk' refers to.
126 */
127 chunk_t current_area;
128
129 /*
130 * The next free chunk for an exception.
131 *
132 * When creating exceptions, all the chunks here and above are
133 * free. It holds the next chunk to be allocated. On rare
134 * occasions (e.g. after a system crash) holes can be left in
135 * the exception store because chunks can be committed out of
136 * order.
137 *
138 * When merging exceptions, it does not necessarily mean all the
139 * chunks here and above are free. It holds the value it would
140 * have held if all chunks had been committed in order of
141 * allocation. Consequently the value may occasionally be
142 * slightly too low, but since it's only used for 'status' and
143 * it can never reach its minimum value too early this doesn't
144 * matter.
145 */
146
147 chunk_t next_free;
148
149 /*
150 * The index of next free exception in the current
151 * metadata area.
152 */
153 uint32_t current_committed;
154
155 atomic_t pending_count;
156 uint32_t callback_count;
157 struct commit_callback *callbacks;
158 struct dm_io_client *io_client;
159
160 struct workqueue_struct *metadata_wq;
161 };
162
163 static int alloc_area(struct pstore *ps)
164 {
165 int r = -ENOMEM;
166 size_t len;
167
168 len = ps->store->chunk_size << SECTOR_SHIFT;
169
170 /*
171 * Allocate the chunk_size block of memory that will hold
172 * a single metadata area.
173 */
174 ps->area = vmalloc(len);
175 if (!ps->area)
176 goto err_area;
177
178 ps->zero_area = vzalloc(len);
179 if (!ps->zero_area)
180 goto err_zero_area;
181
182 ps->header_area = vmalloc(len);
183 if (!ps->header_area)
184 goto err_header_area;
185
186 return 0;
187
188 err_header_area:
189 vfree(ps->zero_area);
190
191 err_zero_area:
192 vfree(ps->area);
193
194 err_area:
195 return r;
196 }
197
198 static void free_area(struct pstore *ps)
199 {
200 if (ps->area)
201 vfree(ps->area);
202 ps->area = NULL;
203
204 if (ps->zero_area)
205 vfree(ps->zero_area);
206 ps->zero_area = NULL;
207
208 if (ps->header_area)
209 vfree(ps->header_area);
210 ps->header_area = NULL;
211 }
212
213 struct mdata_req {
214 struct dm_io_region *where;
215 struct dm_io_request *io_req;
216 struct work_struct work;
217 int result;
218 };
219
220 static void do_metadata(struct work_struct *work)
221 {
222 struct mdata_req *req = container_of(work, struct mdata_req, work);
223
224 req->result = dm_io(req->io_req, 1, req->where, NULL);
225 }
226
227 /*
228 * Read or write a chunk aligned and sized block of data from a device.
229 */
230 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
231 int metadata)
232 {
233 struct dm_io_region where = {
234 .bdev = dm_snap_cow(ps->store->snap)->bdev,
235 .sector = ps->store->chunk_size * chunk,
236 .count = ps->store->chunk_size,
237 };
238 struct dm_io_request io_req = {
239 .bi_rw = rw,
240 .mem.type = DM_IO_VMA,
241 .mem.ptr.vma = area,
242 .client = ps->io_client,
243 .notify.fn = NULL,
244 };
245 struct mdata_req req;
246
247 if (!metadata)
248 return dm_io(&io_req, 1, &where, NULL);
249
250 req.where = &where;
251 req.io_req = &io_req;
252
253 /*
254 * Issue the synchronous I/O from a different thread
255 * to avoid generic_make_request recursion.
256 */
257 INIT_WORK_ONSTACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq);
260
261 return req.result;
262 }
263
264 /*
265 * Convert a metadata area index to a chunk index.
266 */
267 static chunk_t area_location(struct pstore *ps, chunk_t area)
268 {
269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270 }
271
272 static void skip_metadata(struct pstore *ps)
273 {
274 uint32_t stride = ps->exceptions_per_area + 1;
275 chunk_t next_free = ps->next_free;
276 if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
277 ps->next_free++;
278 }
279
280 /*
281 * Read or write a metadata area. Remembering to skip the first
282 * chunk which holds the header.
283 */
284 static int area_io(struct pstore *ps, int rw)
285 {
286 int r;
287 chunk_t chunk;
288
289 chunk = area_location(ps, ps->current_area);
290
291 r = chunk_io(ps, ps->area, chunk, rw, 0);
292 if (r)
293 return r;
294
295 return 0;
296 }
297
298 static void zero_memory_area(struct pstore *ps)
299 {
300 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
301 }
302
303 static int zero_disk_area(struct pstore *ps, chunk_t area)
304 {
305 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
306 }
307
308 static int read_header(struct pstore *ps, int *new_snapshot)
309 {
310 int r;
311 struct disk_header *dh;
312 unsigned chunk_size;
313 int chunk_size_supplied = 1;
314 char *chunk_err;
315
316 /*
317 * Use default chunk size (or logical_block_size, if larger)
318 * if none supplied
319 */
320 if (!ps->store->chunk_size) {
321 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
322 bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
323 bdev) >> 9);
324 ps->store->chunk_mask = ps->store->chunk_size - 1;
325 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
326 chunk_size_supplied = 0;
327 }
328
329 ps->io_client = dm_io_client_create();
330 if (IS_ERR(ps->io_client))
331 return PTR_ERR(ps->io_client);
332
333 r = alloc_area(ps);
334 if (r)
335 return r;
336
337 r = chunk_io(ps, ps->header_area, 0, READ, 1);
338 if (r)
339 goto bad;
340
341 dh = ps->header_area;
342
343 if (le32_to_cpu(dh->magic) == 0) {
344 *new_snapshot = 1;
345 return 0;
346 }
347
348 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
349 DMWARN("Invalid or corrupt snapshot");
350 r = -ENXIO;
351 goto bad;
352 }
353
354 *new_snapshot = 0;
355 ps->valid = le32_to_cpu(dh->valid);
356 ps->version = le32_to_cpu(dh->version);
357 chunk_size = le32_to_cpu(dh->chunk_size);
358
359 if (ps->store->chunk_size == chunk_size)
360 return 0;
361
362 if (chunk_size_supplied)
363 DMWARN("chunk size %u in device metadata overrides "
364 "table chunk size of %u.",
365 chunk_size, ps->store->chunk_size);
366
367 /* We had a bogus chunk_size. Fix stuff up. */
368 free_area(ps);
369
370 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
371 &chunk_err);
372 if (r) {
373 DMERR("invalid on-disk chunk size %u: %s.",
374 chunk_size, chunk_err);
375 return r;
376 }
377
378 r = alloc_area(ps);
379 return r;
380
381 bad:
382 free_area(ps);
383 return r;
384 }
385
386 static int write_header(struct pstore *ps)
387 {
388 struct disk_header *dh;
389
390 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
391
392 dh = ps->header_area;
393 dh->magic = cpu_to_le32(SNAP_MAGIC);
394 dh->valid = cpu_to_le32(ps->valid);
395 dh->version = cpu_to_le32(ps->version);
396 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
397
398 return chunk_io(ps, ps->header_area, 0, WRITE, 1);
399 }
400
401 /*
402 * Access functions for the disk exceptions, these do the endian conversions.
403 */
404 static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
405 {
406 BUG_ON(index >= ps->exceptions_per_area);
407
408 return ((struct disk_exception *) ps->area) + index;
409 }
410
411 static void read_exception(struct pstore *ps,
412 uint32_t index, struct core_exception *result)
413 {
414 struct disk_exception *de = get_exception(ps, index);
415
416 /* copy it */
417 result->old_chunk = le64_to_cpu(de->old_chunk);
418 result->new_chunk = le64_to_cpu(de->new_chunk);
419 }
420
421 static void write_exception(struct pstore *ps,
422 uint32_t index, struct core_exception *e)
423 {
424 struct disk_exception *de = get_exception(ps, index);
425
426 /* copy it */
427 de->old_chunk = cpu_to_le64(e->old_chunk);
428 de->new_chunk = cpu_to_le64(e->new_chunk);
429 }
430
431 static void clear_exception(struct pstore *ps, uint32_t index)
432 {
433 struct disk_exception *de = get_exception(ps, index);
434
435 /* clear it */
436 de->old_chunk = 0;
437 de->new_chunk = 0;
438 }
439
440 /*
441 * Registers the exceptions that are present in the current area.
442 * 'full' is filled in to indicate if the area has been
443 * filled.
444 */
445 static int insert_exceptions(struct pstore *ps,
446 int (*callback)(void *callback_context,
447 chunk_t old, chunk_t new),
448 void *callback_context,
449 int *full)
450 {
451 int r;
452 unsigned int i;
453 struct core_exception e;
454
455 /* presume the area is full */
456 *full = 1;
457
458 for (i = 0; i < ps->exceptions_per_area; i++) {
459 read_exception(ps, i, &e);
460
461 /*
462 * If the new_chunk is pointing at the start of
463 * the COW device, where the first metadata area
464 * is we know that we've hit the end of the
465 * exceptions. Therefore the area is not full.
466 */
467 if (e.new_chunk == 0LL) {
468 ps->current_committed = i;
469 *full = 0;
470 break;
471 }
472
473 /*
474 * Keep track of the start of the free chunks.
475 */
476 if (ps->next_free <= e.new_chunk)
477 ps->next_free = e.new_chunk + 1;
478
479 /*
480 * Otherwise we add the exception to the snapshot.
481 */
482 r = callback(callback_context, e.old_chunk, e.new_chunk);
483 if (r)
484 return r;
485 }
486
487 return 0;
488 }
489
490 static int read_exceptions(struct pstore *ps,
491 int (*callback)(void *callback_context, chunk_t old,
492 chunk_t new),
493 void *callback_context)
494 {
495 int r, full = 1;
496
497 /*
498 * Keeping reading chunks and inserting exceptions until
499 * we find a partially full area.
500 */
501 for (ps->current_area = 0; full; ps->current_area++) {
502 r = area_io(ps, READ);
503 if (r)
504 return r;
505
506 r = insert_exceptions(ps, callback, callback_context, &full);
507 if (r)
508 return r;
509 }
510
511 ps->current_area--;
512
513 skip_metadata(ps);
514
515 return 0;
516 }
517
518 static struct pstore *get_info(struct dm_exception_store *store)
519 {
520 return (struct pstore *) store->context;
521 }
522
523 static void persistent_usage(struct dm_exception_store *store,
524 sector_t *total_sectors,
525 sector_t *sectors_allocated,
526 sector_t *metadata_sectors)
527 {
528 struct pstore *ps = get_info(store);
529
530 *sectors_allocated = ps->next_free * store->chunk_size;
531 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
532
533 /*
534 * First chunk is the fixed header.
535 * Then there are (ps->current_area + 1) metadata chunks, each one
536 * separated from the next by ps->exceptions_per_area data chunks.
537 */
538 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
539 store->chunk_size;
540 }
541
542 static void persistent_dtr(struct dm_exception_store *store)
543 {
544 struct pstore *ps = get_info(store);
545
546 destroy_workqueue(ps->metadata_wq);
547
548 /* Created in read_header */
549 if (ps->io_client)
550 dm_io_client_destroy(ps->io_client);
551 free_area(ps);
552
553 /* Allocated in persistent_read_metadata */
554 if (ps->callbacks)
555 vfree(ps->callbacks);
556
557 kfree(ps);
558 }
559
560 static int persistent_read_metadata(struct dm_exception_store *store,
561 int (*callback)(void *callback_context,
562 chunk_t old, chunk_t new),
563 void *callback_context)
564 {
565 int r, uninitialized_var(new_snapshot);
566 struct pstore *ps = get_info(store);
567
568 /*
569 * Read the snapshot header.
570 */
571 r = read_header(ps, &new_snapshot);
572 if (r)
573 return r;
574
575 /*
576 * Now we know correct chunk_size, complete the initialisation.
577 */
578 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
579 sizeof(struct disk_exception);
580 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
581 sizeof(*ps->callbacks));
582 if (!ps->callbacks)
583 return -ENOMEM;
584
585 /*
586 * Do we need to setup a new snapshot ?
587 */
588 if (new_snapshot) {
589 r = write_header(ps);
590 if (r) {
591 DMWARN("write_header failed");
592 return r;
593 }
594
595 ps->current_area = 0;
596 zero_memory_area(ps);
597 r = zero_disk_area(ps, 0);
598 if (r)
599 DMWARN("zero_disk_area(0) failed");
600 return r;
601 }
602 /*
603 * Sanity checks.
604 */
605 if (ps->version != SNAPSHOT_DISK_VERSION) {
606 DMWARN("unable to handle snapshot disk version %d",
607 ps->version);
608 return -EINVAL;
609 }
610
611 /*
612 * Metadata are valid, but snapshot is invalidated
613 */
614 if (!ps->valid)
615 return 1;
616
617 /*
618 * Read the metadata.
619 */
620 r = read_exceptions(ps, callback, callback_context);
621
622 return r;
623 }
624
625 static int persistent_prepare_exception(struct dm_exception_store *store,
626 struct dm_exception *e)
627 {
628 struct pstore *ps = get_info(store);
629 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
630
631 /* Is there enough room ? */
632 if (size < ((ps->next_free + 1) * store->chunk_size))
633 return -ENOSPC;
634
635 e->new_chunk = ps->next_free;
636
637 /*
638 * Move onto the next free pending, making sure to take
639 * into account the location of the metadata chunks.
640 */
641 ps->next_free++;
642 skip_metadata(ps);
643
644 atomic_inc(&ps->pending_count);
645 return 0;
646 }
647
648 static void persistent_commit_exception(struct dm_exception_store *store,
649 struct dm_exception *e, int valid,
650 void (*callback) (void *, int success),
651 void *callback_context)
652 {
653 unsigned int i;
654 struct pstore *ps = get_info(store);
655 struct core_exception ce;
656 struct commit_callback *cb;
657
658 if (!valid)
659 ps->valid = 0;
660
661 ce.old_chunk = e->old_chunk;
662 ce.new_chunk = e->new_chunk;
663 write_exception(ps, ps->current_committed++, &ce);
664
665 /*
666 * Add the callback to the back of the array. This code
667 * is the only place where the callback array is
668 * manipulated, and we know that it will never be called
669 * multiple times concurrently.
670 */
671 cb = ps->callbacks + ps->callback_count++;
672 cb->callback = callback;
673 cb->context = callback_context;
674
675 /*
676 * If there are exceptions in flight and we have not yet
677 * filled this metadata area there's nothing more to do.
678 */
679 if (!atomic_dec_and_test(&ps->pending_count) &&
680 (ps->current_committed != ps->exceptions_per_area))
681 return;
682
683 /*
684 * If we completely filled the current area, then wipe the next one.
685 */
686 if ((ps->current_committed == ps->exceptions_per_area) &&
687 zero_disk_area(ps, ps->current_area + 1))
688 ps->valid = 0;
689
690 /*
691 * Commit exceptions to disk.
692 */
693 if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
694 ps->valid = 0;
695
696 /*
697 * Advance to the next area if this one is full.
698 */
699 if (ps->current_committed == ps->exceptions_per_area) {
700 ps->current_committed = 0;
701 ps->current_area++;
702 zero_memory_area(ps);
703 }
704
705 for (i = 0; i < ps->callback_count; i++) {
706 cb = ps->callbacks + i;
707 cb->callback(cb->context, ps->valid);
708 }
709
710 ps->callback_count = 0;
711 }
712
713 static int persistent_prepare_merge(struct dm_exception_store *store,
714 chunk_t *last_old_chunk,
715 chunk_t *last_new_chunk)
716 {
717 struct pstore *ps = get_info(store);
718 struct core_exception ce;
719 int nr_consecutive;
720 int r;
721
722 /*
723 * When current area is empty, move back to preceding area.
724 */
725 if (!ps->current_committed) {
726 /*
727 * Have we finished?
728 */
729 if (!ps->current_area)
730 return 0;
731
732 ps->current_area--;
733 r = area_io(ps, READ);
734 if (r < 0)
735 return r;
736 ps->current_committed = ps->exceptions_per_area;
737 }
738
739 read_exception(ps, ps->current_committed - 1, &ce);
740 *last_old_chunk = ce.old_chunk;
741 *last_new_chunk = ce.new_chunk;
742
743 /*
744 * Find number of consecutive chunks within the current area,
745 * working backwards.
746 */
747 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
748 nr_consecutive++) {
749 read_exception(ps, ps->current_committed - 1 - nr_consecutive,
750 &ce);
751 if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
752 ce.new_chunk != *last_new_chunk - nr_consecutive)
753 break;
754 }
755
756 return nr_consecutive;
757 }
758
759 static int persistent_commit_merge(struct dm_exception_store *store,
760 int nr_merged)
761 {
762 int r, i;
763 struct pstore *ps = get_info(store);
764
765 BUG_ON(nr_merged > ps->current_committed);
766
767 for (i = 0; i < nr_merged; i++)
768 clear_exception(ps, ps->current_committed - 1 - i);
769
770 r = area_io(ps, WRITE_FLUSH_FUA);
771 if (r < 0)
772 return r;
773
774 ps->current_committed -= nr_merged;
775
776 /*
777 * At this stage, only persistent_usage() uses ps->next_free, so
778 * we make no attempt to keep ps->next_free strictly accurate
779 * as exceptions may have been committed out-of-order originally.
780 * Once a snapshot has become merging, we set it to the value it
781 * would have held had all the exceptions been committed in order.
782 *
783 * ps->current_area does not get reduced by prepare_merge() until
784 * after commit_merge() has removed the nr_merged previous exceptions.
785 */
786 ps->next_free = area_location(ps, ps->current_area) +
787 ps->current_committed + 1;
788
789 return 0;
790 }
791
792 static void persistent_drop_snapshot(struct dm_exception_store *store)
793 {
794 struct pstore *ps = get_info(store);
795
796 ps->valid = 0;
797 if (write_header(ps))
798 DMWARN("write header failed");
799 }
800
801 static int persistent_ctr(struct dm_exception_store *store,
802 unsigned argc, char **argv)
803 {
804 struct pstore *ps;
805
806 /* allocate the pstore */
807 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
808 if (!ps)
809 return -ENOMEM;
810
811 ps->store = store;
812 ps->valid = 1;
813 ps->version = SNAPSHOT_DISK_VERSION;
814 ps->area = NULL;
815 ps->zero_area = NULL;
816 ps->header_area = NULL;
817 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
818 ps->current_committed = 0;
819
820 ps->callback_count = 0;
821 atomic_set(&ps->pending_count, 0);
822 ps->callbacks = NULL;
823
824 ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
825 if (!ps->metadata_wq) {
826 kfree(ps);
827 DMERR("couldn't start header metadata update thread");
828 return -ENOMEM;
829 }
830
831 store->context = ps;
832
833 return 0;
834 }
835
836 static unsigned persistent_status(struct dm_exception_store *store,
837 status_type_t status, char *result,
838 unsigned maxlen)
839 {
840 unsigned sz = 0;
841
842 switch (status) {
843 case STATUSTYPE_INFO:
844 break;
845 case STATUSTYPE_TABLE:
846 DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
847 }
848
849 return sz;
850 }
851
852 static struct dm_exception_store_type _persistent_type = {
853 .name = "persistent",
854 .module = THIS_MODULE,
855 .ctr = persistent_ctr,
856 .dtr = persistent_dtr,
857 .read_metadata = persistent_read_metadata,
858 .prepare_exception = persistent_prepare_exception,
859 .commit_exception = persistent_commit_exception,
860 .prepare_merge = persistent_prepare_merge,
861 .commit_merge = persistent_commit_merge,
862 .drop_snapshot = persistent_drop_snapshot,
863 .usage = persistent_usage,
864 .status = persistent_status,
865 };
866
867 static struct dm_exception_store_type _persistent_compat_type = {
868 .name = "P",
869 .module = THIS_MODULE,
870 .ctr = persistent_ctr,
871 .dtr = persistent_dtr,
872 .read_metadata = persistent_read_metadata,
873 .prepare_exception = persistent_prepare_exception,
874 .commit_exception = persistent_commit_exception,
875 .prepare_merge = persistent_prepare_merge,
876 .commit_merge = persistent_commit_merge,
877 .drop_snapshot = persistent_drop_snapshot,
878 .usage = persistent_usage,
879 .status = persistent_status,
880 };
881
882 int dm_persistent_snapshot_init(void)
883 {
884 int r;
885
886 r = dm_exception_store_type_register(&_persistent_type);
887 if (r) {
888 DMERR("Unable to register persistent exception store type");
889 return r;
890 }
891
892 r = dm_exception_store_type_register(&_persistent_compat_type);
893 if (r) {
894 DMERR("Unable to register old-style persistent exception "
895 "store type");
896 dm_exception_store_type_unregister(&_persistent_type);
897 return r;
898 }
899
900 return r;
901 }
902
903 void dm_persistent_snapshot_exit(void)
904 {
905 dm_exception_store_type_unregister(&_persistent_type);
906 dm_exception_store_type_unregister(&_persistent_compat_type);
907 }