Auto-update from upstream
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / md / dm.c
1 /*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8 #include "dm.h"
9 #include "dm-bio-list.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/blkpg.h>
15 #include <linux/bio.h>
16 #include <linux/buffer_head.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20
21 static const char *_name = DM_NAME;
22
23 static unsigned int major = 0;
24 static unsigned int _major = 0;
25
26 /*
27 * One of these is allocated per bio.
28 */
29 struct dm_io {
30 struct mapped_device *md;
31 int error;
32 struct bio *bio;
33 atomic_t io_count;
34 };
35
36 /*
37 * One of these is allocated per target within a bio. Hopefully
38 * this will be simplified out one day.
39 */
40 struct target_io {
41 struct dm_io *io;
42 struct dm_target *ti;
43 union map_info info;
44 };
45
46 union map_info *dm_get_mapinfo(struct bio *bio)
47 {
48 if (bio && bio->bi_private)
49 return &((struct target_io *)bio->bi_private)->info;
50 return NULL;
51 }
52
53 /*
54 * Bits for the md->flags field.
55 */
56 #define DMF_BLOCK_IO 0
57 #define DMF_SUSPENDED 1
58 #define DMF_FROZEN 2
59
60 struct mapped_device {
61 struct rw_semaphore io_lock;
62 struct semaphore suspend_lock;
63 rwlock_t map_lock;
64 atomic_t holders;
65
66 unsigned long flags;
67
68 request_queue_t *queue;
69 struct gendisk *disk;
70
71 void *interface_ptr;
72
73 /*
74 * A list of ios that arrived while we were suspended.
75 */
76 atomic_t pending;
77 wait_queue_head_t wait;
78 struct bio_list deferred;
79
80 /*
81 * The current mapping.
82 */
83 struct dm_table *map;
84
85 /*
86 * io objects are allocated from here.
87 */
88 mempool_t *io_pool;
89 mempool_t *tio_pool;
90
91 /*
92 * Event handling.
93 */
94 atomic_t event_nr;
95 wait_queue_head_t eventq;
96
97 /*
98 * freeze/thaw support require holding onto a super block
99 */
100 struct super_block *frozen_sb;
101 struct block_device *suspended_bdev;
102 };
103
104 #define MIN_IOS 256
105 static kmem_cache_t *_io_cache;
106 static kmem_cache_t *_tio_cache;
107
108 static struct bio_set *dm_set;
109
110 static int __init local_init(void)
111 {
112 int r;
113
114 dm_set = bioset_create(16, 16, 4);
115 if (!dm_set)
116 return -ENOMEM;
117
118 /* allocate a slab for the dm_ios */
119 _io_cache = kmem_cache_create("dm_io",
120 sizeof(struct dm_io), 0, 0, NULL, NULL);
121 if (!_io_cache)
122 return -ENOMEM;
123
124 /* allocate a slab for the target ios */
125 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
126 0, 0, NULL, NULL);
127 if (!_tio_cache) {
128 kmem_cache_destroy(_io_cache);
129 return -ENOMEM;
130 }
131
132 _major = major;
133 r = register_blkdev(_major, _name);
134 if (r < 0) {
135 kmem_cache_destroy(_tio_cache);
136 kmem_cache_destroy(_io_cache);
137 return r;
138 }
139
140 if (!_major)
141 _major = r;
142
143 return 0;
144 }
145
146 static void local_exit(void)
147 {
148 kmem_cache_destroy(_tio_cache);
149 kmem_cache_destroy(_io_cache);
150
151 bioset_free(dm_set);
152
153 if (unregister_blkdev(_major, _name) < 0)
154 DMERR("devfs_unregister_blkdev failed");
155
156 _major = 0;
157
158 DMINFO("cleaned up");
159 }
160
161 int (*_inits[])(void) __initdata = {
162 local_init,
163 dm_target_init,
164 dm_linear_init,
165 dm_stripe_init,
166 dm_interface_init,
167 };
168
169 void (*_exits[])(void) = {
170 local_exit,
171 dm_target_exit,
172 dm_linear_exit,
173 dm_stripe_exit,
174 dm_interface_exit,
175 };
176
177 static int __init dm_init(void)
178 {
179 const int count = ARRAY_SIZE(_inits);
180
181 int r, i;
182
183 for (i = 0; i < count; i++) {
184 r = _inits[i]();
185 if (r)
186 goto bad;
187 }
188
189 return 0;
190
191 bad:
192 while (i--)
193 _exits[i]();
194
195 return r;
196 }
197
198 static void __exit dm_exit(void)
199 {
200 int i = ARRAY_SIZE(_exits);
201
202 while (i--)
203 _exits[i]();
204 }
205
206 /*
207 * Block device functions
208 */
209 static int dm_blk_open(struct inode *inode, struct file *file)
210 {
211 struct mapped_device *md;
212
213 md = inode->i_bdev->bd_disk->private_data;
214 dm_get(md);
215 return 0;
216 }
217
218 static int dm_blk_close(struct inode *inode, struct file *file)
219 {
220 struct mapped_device *md;
221
222 md = inode->i_bdev->bd_disk->private_data;
223 dm_put(md);
224 return 0;
225 }
226
227 static inline struct dm_io *alloc_io(struct mapped_device *md)
228 {
229 return mempool_alloc(md->io_pool, GFP_NOIO);
230 }
231
232 static inline void free_io(struct mapped_device *md, struct dm_io *io)
233 {
234 mempool_free(io, md->io_pool);
235 }
236
237 static inline struct target_io *alloc_tio(struct mapped_device *md)
238 {
239 return mempool_alloc(md->tio_pool, GFP_NOIO);
240 }
241
242 static inline void free_tio(struct mapped_device *md, struct target_io *tio)
243 {
244 mempool_free(tio, md->tio_pool);
245 }
246
247 /*
248 * Add the bio to the list of deferred io.
249 */
250 static int queue_io(struct mapped_device *md, struct bio *bio)
251 {
252 down_write(&md->io_lock);
253
254 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
255 up_write(&md->io_lock);
256 return 1;
257 }
258
259 bio_list_add(&md->deferred, bio);
260
261 up_write(&md->io_lock);
262 return 0; /* deferred successfully */
263 }
264
265 /*
266 * Everyone (including functions in this file), should use this
267 * function to access the md->map field, and make sure they call
268 * dm_table_put() when finished.
269 */
270 struct dm_table *dm_get_table(struct mapped_device *md)
271 {
272 struct dm_table *t;
273
274 read_lock(&md->map_lock);
275 t = md->map;
276 if (t)
277 dm_table_get(t);
278 read_unlock(&md->map_lock);
279
280 return t;
281 }
282
283 /*-----------------------------------------------------------------
284 * CRUD START:
285 * A more elegant soln is in the works that uses the queue
286 * merge fn, unfortunately there are a couple of changes to
287 * the block layer that I want to make for this. So in the
288 * interests of getting something for people to use I give
289 * you this clearly demarcated crap.
290 *---------------------------------------------------------------*/
291
292 /*
293 * Decrements the number of outstanding ios that a bio has been
294 * cloned into, completing the original io if necc.
295 */
296 static inline void dec_pending(struct dm_io *io, int error)
297 {
298 if (error)
299 io->error = error;
300
301 if (atomic_dec_and_test(&io->io_count)) {
302 if (atomic_dec_and_test(&io->md->pending))
303 /* nudge anyone waiting on suspend queue */
304 wake_up(&io->md->wait);
305
306 bio_endio(io->bio, io->bio->bi_size, io->error);
307 free_io(io->md, io);
308 }
309 }
310
311 static int clone_endio(struct bio *bio, unsigned int done, int error)
312 {
313 int r = 0;
314 struct target_io *tio = bio->bi_private;
315 struct dm_io *io = tio->io;
316 dm_endio_fn endio = tio->ti->type->end_io;
317
318 if (bio->bi_size)
319 return 1;
320
321 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
322 error = -EIO;
323
324 if (endio) {
325 r = endio(tio->ti, bio, error, &tio->info);
326 if (r < 0)
327 error = r;
328
329 else if (r > 0)
330 /* the target wants another shot at the io */
331 return 1;
332 }
333
334 free_tio(io->md, tio);
335 dec_pending(io, error);
336 bio_put(bio);
337 return r;
338 }
339
340 static sector_t max_io_len(struct mapped_device *md,
341 sector_t sector, struct dm_target *ti)
342 {
343 sector_t offset = sector - ti->begin;
344 sector_t len = ti->len - offset;
345
346 /*
347 * Does the target need to split even further ?
348 */
349 if (ti->split_io) {
350 sector_t boundary;
351 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
352 - offset;
353 if (len > boundary)
354 len = boundary;
355 }
356
357 return len;
358 }
359
360 static void __map_bio(struct dm_target *ti, struct bio *clone,
361 struct target_io *tio)
362 {
363 int r;
364
365 /*
366 * Sanity checks.
367 */
368 BUG_ON(!clone->bi_size);
369
370 clone->bi_end_io = clone_endio;
371 clone->bi_private = tio;
372
373 /*
374 * Map the clone. If r == 0 we don't need to do
375 * anything, the target has assumed ownership of
376 * this io.
377 */
378 atomic_inc(&tio->io->io_count);
379 r = ti->type->map(ti, clone, &tio->info);
380 if (r > 0)
381 /* the bio has been remapped so dispatch it */
382 generic_make_request(clone);
383
384 else if (r < 0) {
385 /* error the io and bail out */
386 struct dm_io *io = tio->io;
387 free_tio(tio->io->md, tio);
388 dec_pending(io, r);
389 bio_put(clone);
390 }
391 }
392
393 struct clone_info {
394 struct mapped_device *md;
395 struct dm_table *map;
396 struct bio *bio;
397 struct dm_io *io;
398 sector_t sector;
399 sector_t sector_count;
400 unsigned short idx;
401 };
402
403 static void dm_bio_destructor(struct bio *bio)
404 {
405 bio_free(bio, dm_set);
406 }
407
408 /*
409 * Creates a little bio that is just does part of a bvec.
410 */
411 static struct bio *split_bvec(struct bio *bio, sector_t sector,
412 unsigned short idx, unsigned int offset,
413 unsigned int len)
414 {
415 struct bio *clone;
416 struct bio_vec *bv = bio->bi_io_vec + idx;
417
418 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set);
419 clone->bi_destructor = dm_bio_destructor;
420 *clone->bi_io_vec = *bv;
421
422 clone->bi_sector = sector;
423 clone->bi_bdev = bio->bi_bdev;
424 clone->bi_rw = bio->bi_rw;
425 clone->bi_vcnt = 1;
426 clone->bi_size = to_bytes(len);
427 clone->bi_io_vec->bv_offset = offset;
428 clone->bi_io_vec->bv_len = clone->bi_size;
429
430 return clone;
431 }
432
433 /*
434 * Creates a bio that consists of range of complete bvecs.
435 */
436 static struct bio *clone_bio(struct bio *bio, sector_t sector,
437 unsigned short idx, unsigned short bv_count,
438 unsigned int len)
439 {
440 struct bio *clone;
441
442 clone = bio_clone(bio, GFP_NOIO);
443 clone->bi_sector = sector;
444 clone->bi_idx = idx;
445 clone->bi_vcnt = idx + bv_count;
446 clone->bi_size = to_bytes(len);
447 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
448
449 return clone;
450 }
451
452 static void __clone_and_map(struct clone_info *ci)
453 {
454 struct bio *clone, *bio = ci->bio;
455 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
456 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
457 struct target_io *tio;
458
459 /*
460 * Allocate a target io object.
461 */
462 tio = alloc_tio(ci->md);
463 tio->io = ci->io;
464 tio->ti = ti;
465 memset(&tio->info, 0, sizeof(tio->info));
466
467 if (ci->sector_count <= max) {
468 /*
469 * Optimise for the simple case where we can do all of
470 * the remaining io with a single clone.
471 */
472 clone = clone_bio(bio, ci->sector, ci->idx,
473 bio->bi_vcnt - ci->idx, ci->sector_count);
474 __map_bio(ti, clone, tio);
475 ci->sector_count = 0;
476
477 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
478 /*
479 * There are some bvecs that don't span targets.
480 * Do as many of these as possible.
481 */
482 int i;
483 sector_t remaining = max;
484 sector_t bv_len;
485
486 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
487 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
488
489 if (bv_len > remaining)
490 break;
491
492 remaining -= bv_len;
493 len += bv_len;
494 }
495
496 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
497 __map_bio(ti, clone, tio);
498
499 ci->sector += len;
500 ci->sector_count -= len;
501 ci->idx = i;
502
503 } else {
504 /*
505 * Create two copy bios to deal with io that has
506 * been split across a target.
507 */
508 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
509
510 clone = split_bvec(bio, ci->sector, ci->idx,
511 bv->bv_offset, max);
512 __map_bio(ti, clone, tio);
513
514 ci->sector += max;
515 ci->sector_count -= max;
516 ti = dm_table_find_target(ci->map, ci->sector);
517
518 len = to_sector(bv->bv_len) - max;
519 clone = split_bvec(bio, ci->sector, ci->idx,
520 bv->bv_offset + to_bytes(max), len);
521 tio = alloc_tio(ci->md);
522 tio->io = ci->io;
523 tio->ti = ti;
524 memset(&tio->info, 0, sizeof(tio->info));
525 __map_bio(ti, clone, tio);
526
527 ci->sector += len;
528 ci->sector_count -= len;
529 ci->idx++;
530 }
531 }
532
533 /*
534 * Split the bio into several clones.
535 */
536 static void __split_bio(struct mapped_device *md, struct bio *bio)
537 {
538 struct clone_info ci;
539
540 ci.map = dm_get_table(md);
541 if (!ci.map) {
542 bio_io_error(bio, bio->bi_size);
543 return;
544 }
545
546 ci.md = md;
547 ci.bio = bio;
548 ci.io = alloc_io(md);
549 ci.io->error = 0;
550 atomic_set(&ci.io->io_count, 1);
551 ci.io->bio = bio;
552 ci.io->md = md;
553 ci.sector = bio->bi_sector;
554 ci.sector_count = bio_sectors(bio);
555 ci.idx = bio->bi_idx;
556
557 atomic_inc(&md->pending);
558 while (ci.sector_count)
559 __clone_and_map(&ci);
560
561 /* drop the extra reference count */
562 dec_pending(ci.io, 0);
563 dm_table_put(ci.map);
564 }
565 /*-----------------------------------------------------------------
566 * CRUD END
567 *---------------------------------------------------------------*/
568
569 /*
570 * The request function that just remaps the bio built up by
571 * dm_merge_bvec.
572 */
573 static int dm_request(request_queue_t *q, struct bio *bio)
574 {
575 int r;
576 struct mapped_device *md = q->queuedata;
577
578 down_read(&md->io_lock);
579
580 /*
581 * If we're suspended we have to queue
582 * this io for later.
583 */
584 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
585 up_read(&md->io_lock);
586
587 if (bio_rw(bio) == READA) {
588 bio_io_error(bio, bio->bi_size);
589 return 0;
590 }
591
592 r = queue_io(md, bio);
593 if (r < 0) {
594 bio_io_error(bio, bio->bi_size);
595 return 0;
596
597 } else if (r == 0)
598 return 0; /* deferred successfully */
599
600 /*
601 * We're in a while loop, because someone could suspend
602 * before we get to the following read lock.
603 */
604 down_read(&md->io_lock);
605 }
606
607 __split_bio(md, bio);
608 up_read(&md->io_lock);
609 return 0;
610 }
611
612 static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
613 sector_t *error_sector)
614 {
615 struct mapped_device *md = q->queuedata;
616 struct dm_table *map = dm_get_table(md);
617 int ret = -ENXIO;
618
619 if (map) {
620 ret = dm_table_flush_all(map);
621 dm_table_put(map);
622 }
623
624 return ret;
625 }
626
627 static void dm_unplug_all(request_queue_t *q)
628 {
629 struct mapped_device *md = q->queuedata;
630 struct dm_table *map = dm_get_table(md);
631
632 if (map) {
633 dm_table_unplug_all(map);
634 dm_table_put(map);
635 }
636 }
637
638 static int dm_any_congested(void *congested_data, int bdi_bits)
639 {
640 int r;
641 struct mapped_device *md = (struct mapped_device *) congested_data;
642 struct dm_table *map = dm_get_table(md);
643
644 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
645 r = bdi_bits;
646 else
647 r = dm_table_any_congested(map, bdi_bits);
648
649 dm_table_put(map);
650 return r;
651 }
652
653 /*-----------------------------------------------------------------
654 * An IDR is used to keep track of allocated minor numbers.
655 *---------------------------------------------------------------*/
656 static DECLARE_MUTEX(_minor_lock);
657 static DEFINE_IDR(_minor_idr);
658
659 static void free_minor(unsigned int minor)
660 {
661 down(&_minor_lock);
662 idr_remove(&_minor_idr, minor);
663 up(&_minor_lock);
664 }
665
666 /*
667 * See if the device with a specific minor # is free.
668 */
669 static int specific_minor(struct mapped_device *md, unsigned int minor)
670 {
671 int r, m;
672
673 if (minor >= (1 << MINORBITS))
674 return -EINVAL;
675
676 down(&_minor_lock);
677
678 if (idr_find(&_minor_idr, minor)) {
679 r = -EBUSY;
680 goto out;
681 }
682
683 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
684 if (!r) {
685 r = -ENOMEM;
686 goto out;
687 }
688
689 r = idr_get_new_above(&_minor_idr, md, minor, &m);
690 if (r) {
691 goto out;
692 }
693
694 if (m != minor) {
695 idr_remove(&_minor_idr, m);
696 r = -EBUSY;
697 goto out;
698 }
699
700 out:
701 up(&_minor_lock);
702 return r;
703 }
704
705 static int next_free_minor(struct mapped_device *md, unsigned int *minor)
706 {
707 int r;
708 unsigned int m;
709
710 down(&_minor_lock);
711
712 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
713 if (!r) {
714 r = -ENOMEM;
715 goto out;
716 }
717
718 r = idr_get_new(&_minor_idr, md, &m);
719 if (r) {
720 goto out;
721 }
722
723 if (m >= (1 << MINORBITS)) {
724 idr_remove(&_minor_idr, m);
725 r = -ENOSPC;
726 goto out;
727 }
728
729 *minor = m;
730
731 out:
732 up(&_minor_lock);
733 return r;
734 }
735
736 static struct block_device_operations dm_blk_dops;
737
738 /*
739 * Allocate and initialise a blank device with a given minor.
740 */
741 static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
742 {
743 int r;
744 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
745
746 if (!md) {
747 DMWARN("unable to allocate device, out of memory.");
748 return NULL;
749 }
750
751 /* get a minor number for the dev */
752 r = persistent ? specific_minor(md, minor) : next_free_minor(md, &minor);
753 if (r < 0)
754 goto bad1;
755
756 memset(md, 0, sizeof(*md));
757 init_rwsem(&md->io_lock);
758 init_MUTEX(&md->suspend_lock);
759 rwlock_init(&md->map_lock);
760 atomic_set(&md->holders, 1);
761 atomic_set(&md->event_nr, 0);
762
763 md->queue = blk_alloc_queue(GFP_KERNEL);
764 if (!md->queue)
765 goto bad1;
766
767 md->queue->queuedata = md;
768 md->queue->backing_dev_info.congested_fn = dm_any_congested;
769 md->queue->backing_dev_info.congested_data = md;
770 blk_queue_make_request(md->queue, dm_request);
771 md->queue->unplug_fn = dm_unplug_all;
772 md->queue->issue_flush_fn = dm_flush_all;
773
774 md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
775 mempool_free_slab, _io_cache);
776 if (!md->io_pool)
777 goto bad2;
778
779 md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
780 mempool_free_slab, _tio_cache);
781 if (!md->tio_pool)
782 goto bad3;
783
784 md->disk = alloc_disk(1);
785 if (!md->disk)
786 goto bad4;
787
788 md->disk->major = _major;
789 md->disk->first_minor = minor;
790 md->disk->fops = &dm_blk_dops;
791 md->disk->queue = md->queue;
792 md->disk->private_data = md;
793 sprintf(md->disk->disk_name, "dm-%d", minor);
794 add_disk(md->disk);
795
796 atomic_set(&md->pending, 0);
797 init_waitqueue_head(&md->wait);
798 init_waitqueue_head(&md->eventq);
799
800 return md;
801
802 bad4:
803 mempool_destroy(md->tio_pool);
804 bad3:
805 mempool_destroy(md->io_pool);
806 bad2:
807 blk_put_queue(md->queue);
808 free_minor(minor);
809 bad1:
810 kfree(md);
811 return NULL;
812 }
813
814 static void free_dev(struct mapped_device *md)
815 {
816 free_minor(md->disk->first_minor);
817 mempool_destroy(md->tio_pool);
818 mempool_destroy(md->io_pool);
819 del_gendisk(md->disk);
820 put_disk(md->disk);
821 blk_put_queue(md->queue);
822 kfree(md);
823 }
824
825 /*
826 * Bind a table to the device.
827 */
828 static void event_callback(void *context)
829 {
830 struct mapped_device *md = (struct mapped_device *) context;
831
832 atomic_inc(&md->event_nr);
833 wake_up(&md->eventq);
834 }
835
836 static void __set_size(struct mapped_device *md, sector_t size)
837 {
838 set_capacity(md->disk, size);
839
840 down(&md->suspended_bdev->bd_inode->i_sem);
841 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
842 up(&md->suspended_bdev->bd_inode->i_sem);
843 }
844
845 static int __bind(struct mapped_device *md, struct dm_table *t)
846 {
847 request_queue_t *q = md->queue;
848 sector_t size;
849
850 size = dm_table_get_size(t);
851 __set_size(md, size);
852 if (size == 0)
853 return 0;
854
855 dm_table_get(t);
856 dm_table_event_callback(t, event_callback, md);
857
858 write_lock(&md->map_lock);
859 md->map = t;
860 dm_table_set_restrictions(t, q);
861 write_unlock(&md->map_lock);
862
863 return 0;
864 }
865
866 static void __unbind(struct mapped_device *md)
867 {
868 struct dm_table *map = md->map;
869
870 if (!map)
871 return;
872
873 dm_table_event_callback(map, NULL, NULL);
874 write_lock(&md->map_lock);
875 md->map = NULL;
876 write_unlock(&md->map_lock);
877 dm_table_put(map);
878 }
879
880 /*
881 * Constructor for a new device.
882 */
883 static int create_aux(unsigned int minor, int persistent,
884 struct mapped_device **result)
885 {
886 struct mapped_device *md;
887
888 md = alloc_dev(minor, persistent);
889 if (!md)
890 return -ENXIO;
891
892 *result = md;
893 return 0;
894 }
895
896 int dm_create(struct mapped_device **result)
897 {
898 return create_aux(0, 0, result);
899 }
900
901 int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
902 {
903 return create_aux(minor, 1, result);
904 }
905
906 static struct mapped_device *dm_find_md(dev_t dev)
907 {
908 struct mapped_device *md;
909 unsigned minor = MINOR(dev);
910
911 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
912 return NULL;
913
914 down(&_minor_lock);
915
916 md = idr_find(&_minor_idr, minor);
917 if (!md || (dm_disk(md)->first_minor != minor))
918 md = NULL;
919
920 up(&_minor_lock);
921
922 return md;
923 }
924
925 struct mapped_device *dm_get_md(dev_t dev)
926 {
927 struct mapped_device *md = dm_find_md(dev);
928
929 if (md)
930 dm_get(md);
931
932 return md;
933 }
934
935 void *dm_get_mdptr(dev_t dev)
936 {
937 struct mapped_device *md;
938 void *mdptr = NULL;
939
940 md = dm_find_md(dev);
941 if (md)
942 mdptr = md->interface_ptr;
943 return mdptr;
944 }
945
946 void dm_set_mdptr(struct mapped_device *md, void *ptr)
947 {
948 md->interface_ptr = ptr;
949 }
950
951 void dm_get(struct mapped_device *md)
952 {
953 atomic_inc(&md->holders);
954 }
955
956 void dm_put(struct mapped_device *md)
957 {
958 struct dm_table *map = dm_get_table(md);
959
960 if (atomic_dec_and_test(&md->holders)) {
961 if (!dm_suspended(md)) {
962 dm_table_presuspend_targets(map);
963 dm_table_postsuspend_targets(map);
964 }
965 __unbind(md);
966 free_dev(md);
967 }
968
969 dm_table_put(map);
970 }
971
972 /*
973 * Process the deferred bios
974 */
975 static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
976 {
977 struct bio *n;
978
979 while (c) {
980 n = c->bi_next;
981 c->bi_next = NULL;
982 __split_bio(md, c);
983 c = n;
984 }
985 }
986
987 /*
988 * Swap in a new table (destroying old one).
989 */
990 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
991 {
992 int r = -EINVAL;
993
994 down(&md->suspend_lock);
995
996 /* device must be suspended */
997 if (!dm_suspended(md))
998 goto out;
999
1000 __unbind(md);
1001 r = __bind(md, table);
1002
1003 out:
1004 up(&md->suspend_lock);
1005 return r;
1006 }
1007
1008 /*
1009 * Functions to lock and unlock any filesystem running on the
1010 * device.
1011 */
1012 static int lock_fs(struct mapped_device *md)
1013 {
1014 int r;
1015
1016 WARN_ON(md->frozen_sb);
1017
1018 md->frozen_sb = freeze_bdev(md->suspended_bdev);
1019 if (IS_ERR(md->frozen_sb)) {
1020 r = PTR_ERR(md->frozen_sb);
1021 md->frozen_sb = NULL;
1022 return r;
1023 }
1024
1025 set_bit(DMF_FROZEN, &md->flags);
1026
1027 /* don't bdput right now, we don't want the bdev
1028 * to go away while it is locked.
1029 */
1030 return 0;
1031 }
1032
1033 static void unlock_fs(struct mapped_device *md)
1034 {
1035 if (!test_bit(DMF_FROZEN, &md->flags))
1036 return;
1037
1038 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1039 md->frozen_sb = NULL;
1040 clear_bit(DMF_FROZEN, &md->flags);
1041 }
1042
1043 /*
1044 * We need to be able to change a mapping table under a mounted
1045 * filesystem. For example we might want to move some data in
1046 * the background. Before the table can be swapped with
1047 * dm_bind_table, dm_suspend must be called to flush any in
1048 * flight bios and ensure that any further io gets deferred.
1049 */
1050 int dm_suspend(struct mapped_device *md, int do_lockfs)
1051 {
1052 struct dm_table *map = NULL;
1053 DECLARE_WAITQUEUE(wait, current);
1054 int r = -EINVAL;
1055
1056 down(&md->suspend_lock);
1057
1058 if (dm_suspended(md))
1059 goto out;
1060
1061 map = dm_get_table(md);
1062
1063 /* This does not get reverted if there's an error later. */
1064 dm_table_presuspend_targets(map);
1065
1066 md->suspended_bdev = bdget_disk(md->disk, 0);
1067 if (!md->suspended_bdev) {
1068 DMWARN("bdget failed in dm_suspend");
1069 r = -ENOMEM;
1070 goto out;
1071 }
1072
1073 /* Flush I/O to the device. */
1074 if (do_lockfs) {
1075 r = lock_fs(md);
1076 if (r)
1077 goto out;
1078 }
1079
1080 /*
1081 * First we set the BLOCK_IO flag so no more ios will be mapped.
1082 */
1083 down_write(&md->io_lock);
1084 set_bit(DMF_BLOCK_IO, &md->flags);
1085
1086 add_wait_queue(&md->wait, &wait);
1087 up_write(&md->io_lock);
1088
1089 /* unplug */
1090 if (map)
1091 dm_table_unplug_all(map);
1092
1093 /*
1094 * Then we wait for the already mapped ios to
1095 * complete.
1096 */
1097 while (1) {
1098 set_current_state(TASK_INTERRUPTIBLE);
1099
1100 if (!atomic_read(&md->pending) || signal_pending(current))
1101 break;
1102
1103 io_schedule();
1104 }
1105 set_current_state(TASK_RUNNING);
1106
1107 down_write(&md->io_lock);
1108 remove_wait_queue(&md->wait, &wait);
1109
1110 /* were we interrupted ? */
1111 r = -EINTR;
1112 if (atomic_read(&md->pending)) {
1113 up_write(&md->io_lock);
1114 unlock_fs(md);
1115 clear_bit(DMF_BLOCK_IO, &md->flags);
1116 goto out;
1117 }
1118 up_write(&md->io_lock);
1119
1120 dm_table_postsuspend_targets(map);
1121
1122 set_bit(DMF_SUSPENDED, &md->flags);
1123
1124 r = 0;
1125
1126 out:
1127 if (r && md->suspended_bdev) {
1128 bdput(md->suspended_bdev);
1129 md->suspended_bdev = NULL;
1130 }
1131
1132 dm_table_put(map);
1133 up(&md->suspend_lock);
1134 return r;
1135 }
1136
1137 int dm_resume(struct mapped_device *md)
1138 {
1139 int r = -EINVAL;
1140 struct bio *def;
1141 struct dm_table *map = NULL;
1142
1143 down(&md->suspend_lock);
1144 if (!dm_suspended(md))
1145 goto out;
1146
1147 map = dm_get_table(md);
1148 if (!map || !dm_table_get_size(map))
1149 goto out;
1150
1151 dm_table_resume_targets(map);
1152
1153 down_write(&md->io_lock);
1154 clear_bit(DMF_BLOCK_IO, &md->flags);
1155
1156 def = bio_list_get(&md->deferred);
1157 __flush_deferred_io(md, def);
1158 up_write(&md->io_lock);
1159
1160 unlock_fs(md);
1161
1162 bdput(md->suspended_bdev);
1163 md->suspended_bdev = NULL;
1164
1165 clear_bit(DMF_SUSPENDED, &md->flags);
1166
1167 dm_table_unplug_all(map);
1168
1169 r = 0;
1170
1171 out:
1172 dm_table_put(map);
1173 up(&md->suspend_lock);
1174
1175 return r;
1176 }
1177
1178 /*-----------------------------------------------------------------
1179 * Event notification.
1180 *---------------------------------------------------------------*/
1181 uint32_t dm_get_event_nr(struct mapped_device *md)
1182 {
1183 return atomic_read(&md->event_nr);
1184 }
1185
1186 int dm_wait_event(struct mapped_device *md, int event_nr)
1187 {
1188 return wait_event_interruptible(md->eventq,
1189 (event_nr != atomic_read(&md->event_nr)));
1190 }
1191
1192 /*
1193 * The gendisk is only valid as long as you have a reference
1194 * count on 'md'.
1195 */
1196 struct gendisk *dm_disk(struct mapped_device *md)
1197 {
1198 return md->disk;
1199 }
1200
1201 int dm_suspended(struct mapped_device *md)
1202 {
1203 return test_bit(DMF_SUSPENDED, &md->flags);
1204 }
1205
1206 static struct block_device_operations dm_blk_dops = {
1207 .open = dm_blk_open,
1208 .release = dm_blk_close,
1209 .owner = THIS_MODULE
1210 };
1211
1212 EXPORT_SYMBOL(dm_get_mapinfo);
1213
1214 /*
1215 * module hooks
1216 */
1217 module_init(dm_init);
1218 module_exit(dm_exit);
1219
1220 module_param(major, uint, 0);
1221 MODULE_PARM_DESC(major, "The major number of the device mapper");
1222 MODULE_DESCRIPTION(DM_NAME " driver");
1223 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1224 MODULE_LICENSE("GPL");