44a2fa6814ce97cbd05d3e3e34c65672c6dbbebe
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / md / dm.c
1 /*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8 #include "dm.h"
9 #include "dm-uevent.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
22
23 #include <trace/events/block.h>
24
25 #define DM_MSG_PREFIX "core"
26
27 #ifdef CONFIG_PRINTK
28 /*
29 * ratelimit state to be used in DMXXX_LIMIT().
30 */
31 DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
32 DEFAULT_RATELIMIT_INTERVAL,
33 DEFAULT_RATELIMIT_BURST);
34 EXPORT_SYMBOL(dm_ratelimit_state);
35 #endif
36
37 /*
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
40 */
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
43
44 static const char *_name = DM_NAME;
45
46 static unsigned int major = 0;
47 static unsigned int _major = 0;
48
49 static DEFINE_IDR(_minor_idr);
50
51 static DEFINE_SPINLOCK(_minor_lock);
52
53 static void do_deferred_remove(struct work_struct *w);
54
55 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
56
57 /*
58 * For bio-based dm.
59 * One of these is allocated per bio.
60 */
61 struct dm_io {
62 struct mapped_device *md;
63 int error;
64 atomic_t io_count;
65 struct bio *bio;
66 unsigned long start_time;
67 spinlock_t endio_lock;
68 struct dm_stats_aux stats_aux;
69 };
70
71 /*
72 * For request-based dm.
73 * One of these is allocated per request.
74 */
75 struct dm_rq_target_io {
76 struct mapped_device *md;
77 struct dm_target *ti;
78 struct request *orig, clone;
79 int error;
80 union map_info info;
81 };
82
83 /*
84 * For request-based dm - the bio clones we allocate are embedded in these
85 * structs.
86 *
87 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
88 * the bioset is created - this means the bio has to come at the end of the
89 * struct.
90 */
91 struct dm_rq_clone_bio_info {
92 struct bio *orig;
93 struct dm_rq_target_io *tio;
94 struct bio clone;
95 };
96
97 union map_info *dm_get_mapinfo(struct bio *bio)
98 {
99 if (bio && bio->bi_private)
100 return &((struct dm_target_io *)bio->bi_private)->info;
101 return NULL;
102 }
103
104 union map_info *dm_get_rq_mapinfo(struct request *rq)
105 {
106 if (rq && rq->end_io_data)
107 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
108 return NULL;
109 }
110 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
111
112 #define MINOR_ALLOCED ((void *)-1)
113
114 /*
115 * Bits for the md->flags field.
116 */
117 #define DMF_BLOCK_IO_FOR_SUSPEND 0
118 #define DMF_SUSPENDED 1
119 #define DMF_FROZEN 2
120 #define DMF_FREEING 3
121 #define DMF_DELETING 4
122 #define DMF_NOFLUSH_SUSPENDING 5
123 #define DMF_MERGE_IS_OPTIONAL 6
124 #define DMF_DEFERRED_REMOVE 7
125
126 /*
127 * A dummy definition to make RCU happy.
128 * struct dm_table should never be dereferenced in this file.
129 */
130 struct dm_table {
131 int undefined__;
132 };
133
134 /*
135 * Work processed by per-device workqueue.
136 */
137 struct mapped_device {
138 struct srcu_struct io_barrier;
139 struct mutex suspend_lock;
140 atomic_t holders;
141 atomic_t open_count;
142
143 /*
144 * The current mapping.
145 * Use dm_get_live_table{_fast} or take suspend_lock for
146 * dereference.
147 */
148 struct dm_table *map;
149
150 unsigned long flags;
151
152 struct request_queue *queue;
153 unsigned type;
154 /* Protect queue and type against concurrent access. */
155 struct mutex type_lock;
156
157 struct target_type *immutable_target_type;
158
159 struct gendisk *disk;
160 char name[16];
161
162 void *interface_ptr;
163
164 /*
165 * A list of ios that arrived while we were suspended.
166 */
167 atomic_t pending[2];
168 wait_queue_head_t wait;
169 struct work_struct work;
170 struct bio_list deferred;
171 spinlock_t deferred_lock;
172
173 /*
174 * Processing queue (flush)
175 */
176 struct workqueue_struct *wq;
177
178 /*
179 * io objects are allocated from here.
180 */
181 mempool_t *io_pool;
182
183 struct bio_set *bs;
184
185 /*
186 * Event handling.
187 */
188 atomic_t event_nr;
189 wait_queue_head_t eventq;
190 atomic_t uevent_seq;
191 struct list_head uevent_list;
192 spinlock_t uevent_lock; /* Protect access to uevent_list */
193
194 /*
195 * freeze/thaw support require holding onto a super block
196 */
197 struct super_block *frozen_sb;
198 struct block_device *bdev;
199
200 /* forced geometry settings */
201 struct hd_geometry geometry;
202
203 /* sysfs handle */
204 struct kobject kobj;
205
206 /* zero-length flush that will be cloned and submitted to targets */
207 struct bio flush_bio;
208
209 struct dm_stats stats;
210 };
211
212 /*
213 * For mempools pre-allocation at the table loading time.
214 */
215 struct dm_md_mempools {
216 mempool_t *io_pool;
217 struct bio_set *bs;
218 };
219
220 #define RESERVED_BIO_BASED_IOS 16
221 #define RESERVED_REQUEST_BASED_IOS 256
222 #define RESERVED_MAX_IOS 1024
223 static struct kmem_cache *_io_cache;
224 static struct kmem_cache *_rq_tio_cache;
225
226 /*
227 * Bio-based DM's mempools' reserved IOs set by the user.
228 */
229 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
230
231 /*
232 * Request-based DM's mempools' reserved IOs set by the user.
233 */
234 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
235
236 static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
237 unsigned def, unsigned max)
238 {
239 unsigned ios = ACCESS_ONCE(*reserved_ios);
240 unsigned modified_ios = 0;
241
242 if (!ios)
243 modified_ios = def;
244 else if (ios > max)
245 modified_ios = max;
246
247 if (modified_ios) {
248 (void)cmpxchg(reserved_ios, ios, modified_ios);
249 ios = modified_ios;
250 }
251
252 return ios;
253 }
254
255 unsigned dm_get_reserved_bio_based_ios(void)
256 {
257 return __dm_get_reserved_ios(&reserved_bio_based_ios,
258 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
259 }
260 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
261
262 unsigned dm_get_reserved_rq_based_ios(void)
263 {
264 return __dm_get_reserved_ios(&reserved_rq_based_ios,
265 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
266 }
267 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
268
269 static int __init local_init(void)
270 {
271 int r = -ENOMEM;
272
273 /* allocate a slab for the dm_ios */
274 _io_cache = KMEM_CACHE(dm_io, 0);
275 if (!_io_cache)
276 return r;
277
278 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
279 if (!_rq_tio_cache)
280 goto out_free_io_cache;
281
282 r = dm_uevent_init();
283 if (r)
284 goto out_free_rq_tio_cache;
285
286 _major = major;
287 r = register_blkdev(_major, _name);
288 if (r < 0)
289 goto out_uevent_exit;
290
291 if (!_major)
292 _major = r;
293
294 return 0;
295
296 out_uevent_exit:
297 dm_uevent_exit();
298 out_free_rq_tio_cache:
299 kmem_cache_destroy(_rq_tio_cache);
300 out_free_io_cache:
301 kmem_cache_destroy(_io_cache);
302
303 return r;
304 }
305
306 static void local_exit(void)
307 {
308 flush_scheduled_work();
309
310 kmem_cache_destroy(_rq_tio_cache);
311 kmem_cache_destroy(_io_cache);
312 unregister_blkdev(_major, _name);
313 dm_uevent_exit();
314
315 _major = 0;
316
317 DMINFO("cleaned up");
318 }
319
320 static int (*_inits[])(void) __initdata = {
321 local_init,
322 dm_target_init,
323 dm_linear_init,
324 dm_stripe_init,
325 dm_io_init,
326 dm_kcopyd_init,
327 dm_interface_init,
328 dm_statistics_init,
329 };
330
331 static void (*_exits[])(void) = {
332 local_exit,
333 dm_target_exit,
334 dm_linear_exit,
335 dm_stripe_exit,
336 dm_io_exit,
337 dm_kcopyd_exit,
338 dm_interface_exit,
339 dm_statistics_exit,
340 };
341
342 static int __init dm_init(void)
343 {
344 const int count = ARRAY_SIZE(_inits);
345
346 int r, i;
347
348 for (i = 0; i < count; i++) {
349 r = _inits[i]();
350 if (r)
351 goto bad;
352 }
353
354 return 0;
355
356 bad:
357 while (i--)
358 _exits[i]();
359
360 return r;
361 }
362
363 static void __exit dm_exit(void)
364 {
365 int i = ARRAY_SIZE(_exits);
366
367 while (i--)
368 _exits[i]();
369
370 /*
371 * Should be empty by this point.
372 */
373 idr_destroy(&_minor_idr);
374 }
375
376 /*
377 * Block device functions
378 */
379 int dm_deleting_md(struct mapped_device *md)
380 {
381 return test_bit(DMF_DELETING, &md->flags);
382 }
383
384 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
385 {
386 struct mapped_device *md;
387
388 spin_lock(&_minor_lock);
389
390 md = bdev->bd_disk->private_data;
391 if (!md)
392 goto out;
393
394 if (test_bit(DMF_FREEING, &md->flags) ||
395 dm_deleting_md(md)) {
396 md = NULL;
397 goto out;
398 }
399
400 dm_get(md);
401 atomic_inc(&md->open_count);
402
403 out:
404 spin_unlock(&_minor_lock);
405
406 return md ? 0 : -ENXIO;
407 }
408
409 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
410 {
411 struct mapped_device *md = disk->private_data;
412
413 spin_lock(&_minor_lock);
414
415 if (atomic_dec_and_test(&md->open_count) &&
416 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
417 schedule_work(&deferred_remove_work);
418
419 dm_put(md);
420
421 spin_unlock(&_minor_lock);
422 }
423
424 int dm_open_count(struct mapped_device *md)
425 {
426 return atomic_read(&md->open_count);
427 }
428
429 /*
430 * Guarantees nothing is using the device before it's deleted.
431 */
432 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
433 {
434 int r = 0;
435
436 spin_lock(&_minor_lock);
437
438 if (dm_open_count(md)) {
439 r = -EBUSY;
440 if (mark_deferred)
441 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
442 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
443 r = -EEXIST;
444 else
445 set_bit(DMF_DELETING, &md->flags);
446
447 spin_unlock(&_minor_lock);
448
449 return r;
450 }
451
452 int dm_cancel_deferred_remove(struct mapped_device *md)
453 {
454 int r = 0;
455
456 spin_lock(&_minor_lock);
457
458 if (test_bit(DMF_DELETING, &md->flags))
459 r = -EBUSY;
460 else
461 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
462
463 spin_unlock(&_minor_lock);
464
465 return r;
466 }
467
468 static void do_deferred_remove(struct work_struct *w)
469 {
470 dm_deferred_remove();
471 }
472
473 sector_t dm_get_size(struct mapped_device *md)
474 {
475 return get_capacity(md->disk);
476 }
477
478 struct dm_stats *dm_get_stats(struct mapped_device *md)
479 {
480 return &md->stats;
481 }
482
483 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
484 {
485 struct mapped_device *md = bdev->bd_disk->private_data;
486
487 return dm_get_geometry(md, geo);
488 }
489
490 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
491 unsigned int cmd, unsigned long arg)
492 {
493 struct mapped_device *md = bdev->bd_disk->private_data;
494 int srcu_idx;
495 struct dm_table *map;
496 struct dm_target *tgt;
497 int r = -ENOTTY;
498
499 retry:
500 map = dm_get_live_table(md, &srcu_idx);
501
502 if (!map || !dm_table_get_size(map))
503 goto out;
504
505 /* We only support devices that have a single target */
506 if (dm_table_get_num_targets(map) != 1)
507 goto out;
508
509 tgt = dm_table_get_target(map, 0);
510
511 if (dm_suspended_md(md)) {
512 r = -EAGAIN;
513 goto out;
514 }
515
516 if (tgt->type->ioctl)
517 r = tgt->type->ioctl(tgt, cmd, arg);
518
519 out:
520 dm_put_live_table(md, srcu_idx);
521
522 if (r == -ENOTCONN) {
523 msleep(10);
524 goto retry;
525 }
526
527 return r;
528 }
529
530 static struct dm_io *alloc_io(struct mapped_device *md)
531 {
532 return mempool_alloc(md->io_pool, GFP_NOIO);
533 }
534
535 static void free_io(struct mapped_device *md, struct dm_io *io)
536 {
537 mempool_free(io, md->io_pool);
538 }
539
540 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
541 {
542 bio_put(&tio->clone);
543 }
544
545 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
546 gfp_t gfp_mask)
547 {
548 return mempool_alloc(md->io_pool, gfp_mask);
549 }
550
551 static void free_rq_tio(struct dm_rq_target_io *tio)
552 {
553 mempool_free(tio, tio->md->io_pool);
554 }
555
556 static int md_in_flight(struct mapped_device *md)
557 {
558 return atomic_read(&md->pending[READ]) +
559 atomic_read(&md->pending[WRITE]);
560 }
561
562 static void start_io_acct(struct dm_io *io)
563 {
564 struct mapped_device *md = io->md;
565 struct bio *bio = io->bio;
566 int cpu;
567 int rw = bio_data_dir(bio);
568
569 io->start_time = jiffies;
570
571 cpu = part_stat_lock();
572 part_round_stats(cpu, &dm_disk(md)->part0);
573 part_stat_unlock();
574 atomic_set(&dm_disk(md)->part0.in_flight[rw],
575 atomic_inc_return(&md->pending[rw]));
576
577 if (unlikely(dm_stats_used(&md->stats)))
578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
579 bio_sectors(bio), false, 0, &io->stats_aux);
580 }
581
582 static void end_io_acct(struct dm_io *io)
583 {
584 struct mapped_device *md = io->md;
585 struct bio *bio = io->bio;
586 unsigned long duration = jiffies - io->start_time;
587 int pending, cpu;
588 int rw = bio_data_dir(bio);
589
590 cpu = part_stat_lock();
591 part_round_stats(cpu, &dm_disk(md)->part0);
592 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
593 part_stat_unlock();
594
595 if (unlikely(dm_stats_used(&md->stats)))
596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
597 bio_sectors(bio), true, duration, &io->stats_aux);
598
599 /*
600 * After this is decremented the bio must not be touched if it is
601 * a flush.
602 */
603 pending = atomic_dec_return(&md->pending[rw]);
604 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
605 pending += atomic_read(&md->pending[rw^0x1]);
606
607 /* nudge anyone waiting on suspend queue */
608 if (!pending)
609 wake_up(&md->wait);
610 }
611
612 /*
613 * Add the bio to the list of deferred io.
614 */
615 static void queue_io(struct mapped_device *md, struct bio *bio)
616 {
617 unsigned long flags;
618
619 spin_lock_irqsave(&md->deferred_lock, flags);
620 bio_list_add(&md->deferred, bio);
621 spin_unlock_irqrestore(&md->deferred_lock, flags);
622 queue_work(md->wq, &md->work);
623 }
624
625 /*
626 * Everyone (including functions in this file), should use this
627 * function to access the md->map field, and make sure they call
628 * dm_put_live_table() when finished.
629 */
630 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
631 {
632 *srcu_idx = srcu_read_lock(&md->io_barrier);
633
634 return srcu_dereference(md->map, &md->io_barrier);
635 }
636
637 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
638 {
639 srcu_read_unlock(&md->io_barrier, srcu_idx);
640 }
641
642 void dm_sync_table(struct mapped_device *md)
643 {
644 synchronize_srcu(&md->io_barrier);
645 synchronize_rcu_expedited();
646 }
647
648 /*
649 * A fast alternative to dm_get_live_table/dm_put_live_table.
650 * The caller must not block between these two functions.
651 */
652 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
653 {
654 rcu_read_lock();
655 return rcu_dereference(md->map);
656 }
657
658 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
659 {
660 rcu_read_unlock();
661 }
662
663 /*
664 * Get the geometry associated with a dm device
665 */
666 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
667 {
668 *geo = md->geometry;
669
670 return 0;
671 }
672
673 /*
674 * Set the geometry of a device.
675 */
676 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
677 {
678 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
679
680 if (geo->start > sz) {
681 DMWARN("Start sector is beyond the geometry limits.");
682 return -EINVAL;
683 }
684
685 md->geometry = *geo;
686
687 return 0;
688 }
689
690 /*-----------------------------------------------------------------
691 * CRUD START:
692 * A more elegant soln is in the works that uses the queue
693 * merge fn, unfortunately there are a couple of changes to
694 * the block layer that I want to make for this. So in the
695 * interests of getting something for people to use I give
696 * you this clearly demarcated crap.
697 *---------------------------------------------------------------*/
698
699 static int __noflush_suspending(struct mapped_device *md)
700 {
701 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
702 }
703
704 /*
705 * Decrements the number of outstanding ios that a bio has been
706 * cloned into, completing the original io if necc.
707 */
708 static void dec_pending(struct dm_io *io, int error)
709 {
710 unsigned long flags;
711 int io_error;
712 struct bio *bio;
713 struct mapped_device *md = io->md;
714
715 /* Push-back supersedes any I/O errors */
716 if (unlikely(error)) {
717 spin_lock_irqsave(&io->endio_lock, flags);
718 if (!(io->error > 0 && __noflush_suspending(md)))
719 io->error = error;
720 spin_unlock_irqrestore(&io->endio_lock, flags);
721 }
722
723 if (atomic_dec_and_test(&io->io_count)) {
724 if (io->error == DM_ENDIO_REQUEUE) {
725 /*
726 * Target requested pushing back the I/O.
727 */
728 spin_lock_irqsave(&md->deferred_lock, flags);
729 if (__noflush_suspending(md))
730 bio_list_add_head(&md->deferred, io->bio);
731 else
732 /* noflush suspend was interrupted. */
733 io->error = -EIO;
734 spin_unlock_irqrestore(&md->deferred_lock, flags);
735 }
736
737 io_error = io->error;
738 bio = io->bio;
739 end_io_acct(io);
740 free_io(md, io);
741
742 if (io_error == DM_ENDIO_REQUEUE)
743 return;
744
745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
746 /*
747 * Preflush done for flush with data, reissue
748 * without REQ_FLUSH.
749 */
750 bio->bi_rw &= ~REQ_FLUSH;
751 queue_io(md, bio);
752 } else {
753 /* done with normal IO or empty flush */
754 trace_block_bio_complete(md->queue, bio, io_error);
755 bio_endio(bio, io_error);
756 }
757 }
758 }
759
760 static void clone_endio(struct bio *bio, int error)
761 {
762 int r = 0;
763 struct dm_target_io *tio = bio->bi_private;
764 struct dm_io *io = tio->io;
765 struct mapped_device *md = tio->io->md;
766 dm_endio_fn endio = tio->ti->type->end_io;
767
768 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
769 error = -EIO;
770
771 if (endio) {
772 r = endio(tio->ti, bio, error);
773 if (r < 0 || r == DM_ENDIO_REQUEUE)
774 /*
775 * error and requeue request are handled
776 * in dec_pending().
777 */
778 error = r;
779 else if (r == DM_ENDIO_INCOMPLETE)
780 /* The target will handle the io */
781 return;
782 else if (r) {
783 DMWARN("unimplemented target endio return value: %d", r);
784 BUG();
785 }
786 }
787
788 free_tio(md, tio);
789 dec_pending(io, error);
790 }
791
792 /*
793 * Partial completion handling for request-based dm
794 */
795 static void end_clone_bio(struct bio *clone, int error)
796 {
797 struct dm_rq_clone_bio_info *info = clone->bi_private;
798 struct dm_rq_target_io *tio = info->tio;
799 struct bio *bio = info->orig;
800 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
801
802 bio_put(clone);
803
804 if (tio->error)
805 /*
806 * An error has already been detected on the request.
807 * Once error occurred, just let clone->end_io() handle
808 * the remainder.
809 */
810 return;
811 else if (error) {
812 /*
813 * Don't notice the error to the upper layer yet.
814 * The error handling decision is made by the target driver,
815 * when the request is completed.
816 */
817 tio->error = error;
818 return;
819 }
820
821 /*
822 * I/O for the bio successfully completed.
823 * Notice the data completion to the upper layer.
824 */
825
826 /*
827 * bios are processed from the head of the list.
828 * So the completing bio should always be rq->bio.
829 * If it's not, something wrong is happening.
830 */
831 if (tio->orig->bio != bio)
832 DMERR("bio completion is going in the middle of the request");
833
834 /*
835 * Update the original request.
836 * Do not use blk_end_request() here, because it may complete
837 * the original request before the clone, and break the ordering.
838 */
839 blk_update_request(tio->orig, 0, nr_bytes);
840 }
841
842 /*
843 * Don't touch any member of the md after calling this function because
844 * the md may be freed in dm_put() at the end of this function.
845 * Or do dm_get() before calling this function and dm_put() later.
846 */
847 static void rq_completed(struct mapped_device *md, int rw, int run_queue)
848 {
849 atomic_dec(&md->pending[rw]);
850
851 /* nudge anyone waiting on suspend queue */
852 if (!md_in_flight(md))
853 wake_up(&md->wait);
854
855 /*
856 * Run this off this callpath, as drivers could invoke end_io while
857 * inside their request_fn (and holding the queue lock). Calling
858 * back into ->request_fn() could deadlock attempting to grab the
859 * queue lock again.
860 */
861 if (run_queue)
862 blk_run_queue_async(md->queue);
863
864 /*
865 * dm_put() must be at the end of this function. See the comment above
866 */
867 dm_put(md);
868 }
869
870 static void free_rq_clone(struct request *clone)
871 {
872 struct dm_rq_target_io *tio = clone->end_io_data;
873
874 blk_rq_unprep_clone(clone);
875 free_rq_tio(tio);
876 }
877
878 /*
879 * Complete the clone and the original request.
880 * Must be called without queue lock.
881 */
882 static void dm_end_request(struct request *clone, int error)
883 {
884 int rw = rq_data_dir(clone);
885 struct dm_rq_target_io *tio = clone->end_io_data;
886 struct mapped_device *md = tio->md;
887 struct request *rq = tio->orig;
888
889 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
890 rq->errors = clone->errors;
891 rq->resid_len = clone->resid_len;
892
893 if (rq->sense)
894 /*
895 * We are using the sense buffer of the original
896 * request.
897 * So setting the length of the sense data is enough.
898 */
899 rq->sense_len = clone->sense_len;
900 }
901
902 free_rq_clone(clone);
903 blk_end_request_all(rq, error);
904 rq_completed(md, rw, true);
905 }
906
907 static void dm_unprep_request(struct request *rq)
908 {
909 struct request *clone = rq->special;
910
911 rq->special = NULL;
912 rq->cmd_flags &= ~REQ_DONTPREP;
913
914 free_rq_clone(clone);
915 }
916
917 /*
918 * Requeue the original request of a clone.
919 */
920 void dm_requeue_unmapped_request(struct request *clone)
921 {
922 int rw = rq_data_dir(clone);
923 struct dm_rq_target_io *tio = clone->end_io_data;
924 struct mapped_device *md = tio->md;
925 struct request *rq = tio->orig;
926 struct request_queue *q = rq->q;
927 unsigned long flags;
928
929 dm_unprep_request(rq);
930
931 spin_lock_irqsave(q->queue_lock, flags);
932 blk_requeue_request(q, rq);
933 spin_unlock_irqrestore(q->queue_lock, flags);
934
935 rq_completed(md, rw, 0);
936 }
937 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
938
939 static void __stop_queue(struct request_queue *q)
940 {
941 blk_stop_queue(q);
942 }
943
944 static void stop_queue(struct request_queue *q)
945 {
946 unsigned long flags;
947
948 spin_lock_irqsave(q->queue_lock, flags);
949 __stop_queue(q);
950 spin_unlock_irqrestore(q->queue_lock, flags);
951 }
952
953 static void __start_queue(struct request_queue *q)
954 {
955 if (blk_queue_stopped(q))
956 blk_start_queue(q);
957 }
958
959 static void start_queue(struct request_queue *q)
960 {
961 unsigned long flags;
962
963 spin_lock_irqsave(q->queue_lock, flags);
964 __start_queue(q);
965 spin_unlock_irqrestore(q->queue_lock, flags);
966 }
967
968 static void dm_done(struct request *clone, int error, bool mapped)
969 {
970 int r = error;
971 struct dm_rq_target_io *tio = clone->end_io_data;
972 dm_request_endio_fn rq_end_io = NULL;
973
974 if (tio->ti) {
975 rq_end_io = tio->ti->type->rq_end_io;
976
977 if (mapped && rq_end_io)
978 r = rq_end_io(tio->ti, clone, error, &tio->info);
979 }
980
981 if (r <= 0)
982 /* The target wants to complete the I/O */
983 dm_end_request(clone, r);
984 else if (r == DM_ENDIO_INCOMPLETE)
985 /* The target will handle the I/O */
986 return;
987 else if (r == DM_ENDIO_REQUEUE)
988 /* The target wants to requeue the I/O */
989 dm_requeue_unmapped_request(clone);
990 else {
991 DMWARN("unimplemented target endio return value: %d", r);
992 BUG();
993 }
994 }
995
996 /*
997 * Request completion handler for request-based dm
998 */
999 static void dm_softirq_done(struct request *rq)
1000 {
1001 bool mapped = true;
1002 struct request *clone = rq->completion_data;
1003 struct dm_rq_target_io *tio = clone->end_io_data;
1004
1005 if (rq->cmd_flags & REQ_FAILED)
1006 mapped = false;
1007
1008 dm_done(clone, tio->error, mapped);
1009 }
1010
1011 /*
1012 * Complete the clone and the original request with the error status
1013 * through softirq context.
1014 */
1015 static void dm_complete_request(struct request *clone, int error)
1016 {
1017 struct dm_rq_target_io *tio = clone->end_io_data;
1018 struct request *rq = tio->orig;
1019
1020 tio->error = error;
1021 rq->completion_data = clone;
1022 blk_complete_request(rq);
1023 }
1024
1025 /*
1026 * Complete the not-mapped clone and the original request with the error status
1027 * through softirq context.
1028 * Target's rq_end_io() function isn't called.
1029 * This may be used when the target's map_rq() function fails.
1030 */
1031 void dm_kill_unmapped_request(struct request *clone, int error)
1032 {
1033 struct dm_rq_target_io *tio = clone->end_io_data;
1034 struct request *rq = tio->orig;
1035
1036 rq->cmd_flags |= REQ_FAILED;
1037 dm_complete_request(clone, error);
1038 }
1039 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1040
1041 /*
1042 * Called with the queue lock held
1043 */
1044 static void end_clone_request(struct request *clone, int error)
1045 {
1046 /*
1047 * For just cleaning up the information of the queue in which
1048 * the clone was dispatched.
1049 * The clone is *NOT* freed actually here because it is alloced from
1050 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1051 */
1052 __blk_put_request(clone->q, clone);
1053
1054 /*
1055 * Actual request completion is done in a softirq context which doesn't
1056 * hold the queue lock. Otherwise, deadlock could occur because:
1057 * - another request may be submitted by the upper level driver
1058 * of the stacking during the completion
1059 * - the submission which requires queue lock may be done
1060 * against this queue
1061 */
1062 dm_complete_request(clone, error);
1063 }
1064
1065 /*
1066 * Return maximum size of I/O possible at the supplied sector up to the current
1067 * target boundary.
1068 */
1069 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1070 {
1071 sector_t target_offset = dm_target_offset(ti, sector);
1072
1073 return ti->len - target_offset;
1074 }
1075
1076 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1077 {
1078 sector_t len = max_io_len_target_boundary(sector, ti);
1079 sector_t offset, max_len;
1080
1081 /*
1082 * Does the target need to split even further?
1083 */
1084 if (ti->max_io_len) {
1085 offset = dm_target_offset(ti, sector);
1086 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1087 max_len = sector_div(offset, ti->max_io_len);
1088 else
1089 max_len = offset & (ti->max_io_len - 1);
1090 max_len = ti->max_io_len - max_len;
1091
1092 if (len > max_len)
1093 len = max_len;
1094 }
1095
1096 return len;
1097 }
1098
1099 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1100 {
1101 if (len > UINT_MAX) {
1102 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1103 (unsigned long long)len, UINT_MAX);
1104 ti->error = "Maximum size of target IO is too large";
1105 return -EINVAL;
1106 }
1107
1108 ti->max_io_len = (uint32_t) len;
1109
1110 return 0;
1111 }
1112 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1113
1114 static void __map_bio(struct dm_target_io *tio)
1115 {
1116 int r;
1117 sector_t sector;
1118 struct mapped_device *md;
1119 struct bio *clone = &tio->clone;
1120 struct dm_target *ti = tio->ti;
1121
1122 clone->bi_end_io = clone_endio;
1123 clone->bi_private = tio;
1124
1125 /*
1126 * Map the clone. If r == 0 we don't need to do
1127 * anything, the target has assumed ownership of
1128 * this io.
1129 */
1130 atomic_inc(&tio->io->io_count);
1131 sector = clone->bi_iter.bi_sector;
1132 r = ti->type->map(ti, clone);
1133 if (r == DM_MAPIO_REMAPPED) {
1134 /* the bio has been remapped so dispatch it */
1135
1136 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1137 tio->io->bio->bi_bdev->bd_dev, sector);
1138
1139 generic_make_request(clone);
1140 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1141 /* error the io and bail out, or requeue it if needed */
1142 md = tio->io->md;
1143 dec_pending(tio->io, r);
1144 free_tio(md, tio);
1145 } else if (r) {
1146 DMWARN("unimplemented target map return value: %d", r);
1147 BUG();
1148 }
1149 }
1150
1151 struct clone_info {
1152 struct mapped_device *md;
1153 struct dm_table *map;
1154 struct bio *bio;
1155 struct dm_io *io;
1156 sector_t sector;
1157 sector_t sector_count;
1158 };
1159
1160 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1161 {
1162 bio->bi_iter.bi_sector = sector;
1163 bio->bi_iter.bi_size = to_bytes(len);
1164 }
1165
1166 /*
1167 * Creates a bio that consists of range of complete bvecs.
1168 */
1169 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1170 sector_t sector, unsigned len)
1171 {
1172 struct bio *clone = &tio->clone;
1173
1174 __bio_clone_fast(clone, bio);
1175
1176 if (bio_integrity(bio))
1177 bio_integrity_clone(clone, bio, GFP_NOIO);
1178
1179 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1180 clone->bi_iter.bi_size = to_bytes(len);
1181
1182 if (bio_integrity(bio))
1183 bio_integrity_trim(clone, 0, len);
1184 }
1185
1186 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1187 struct dm_target *ti, int nr_iovecs,
1188 unsigned target_bio_nr)
1189 {
1190 struct dm_target_io *tio;
1191 struct bio *clone;
1192
1193 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
1194 tio = container_of(clone, struct dm_target_io, clone);
1195
1196 tio->io = ci->io;
1197 tio->ti = ti;
1198 memset(&tio->info, 0, sizeof(tio->info));
1199 tio->target_bio_nr = target_bio_nr;
1200
1201 return tio;
1202 }
1203
1204 static void __clone_and_map_simple_bio(struct clone_info *ci,
1205 struct dm_target *ti,
1206 unsigned target_bio_nr, sector_t len)
1207 {
1208 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
1209 struct bio *clone = &tio->clone;
1210
1211 /*
1212 * Discard requests require the bio's inline iovecs be initialized.
1213 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1214 * and discard, so no need for concern about wasted bvec allocations.
1215 */
1216 __bio_clone_fast(clone, ci->bio);
1217 if (len)
1218 bio_setup_sector(clone, ci->sector, len);
1219
1220 __map_bio(tio);
1221 }
1222
1223 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1224 unsigned num_bios, sector_t len)
1225 {
1226 unsigned target_bio_nr;
1227
1228 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1229 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1230 }
1231
1232 static int __send_empty_flush(struct clone_info *ci)
1233 {
1234 unsigned target_nr = 0;
1235 struct dm_target *ti;
1236
1237 BUG_ON(bio_has_data(ci->bio));
1238 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1239 __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
1240
1241 return 0;
1242 }
1243
1244 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1245 sector_t sector, unsigned len)
1246 {
1247 struct bio *bio = ci->bio;
1248 struct dm_target_io *tio;
1249 unsigned target_bio_nr;
1250 unsigned num_target_bios = 1;
1251
1252 /*
1253 * Does the target want to receive duplicate copies of the bio?
1254 */
1255 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1256 num_target_bios = ti->num_write_bios(ti, bio);
1257
1258 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1259 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1260 clone_bio(tio, bio, sector, len);
1261 __map_bio(tio);
1262 }
1263 }
1264
1265 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1266
1267 static unsigned get_num_discard_bios(struct dm_target *ti)
1268 {
1269 return ti->num_discard_bios;
1270 }
1271
1272 static unsigned get_num_write_same_bios(struct dm_target *ti)
1273 {
1274 return ti->num_write_same_bios;
1275 }
1276
1277 typedef bool (*is_split_required_fn)(struct dm_target *ti);
1278
1279 static bool is_split_required_for_discard(struct dm_target *ti)
1280 {
1281 return ti->split_discard_bios;
1282 }
1283
1284 static int __send_changing_extent_only(struct clone_info *ci,
1285 get_num_bios_fn get_num_bios,
1286 is_split_required_fn is_split_required)
1287 {
1288 struct dm_target *ti;
1289 sector_t len;
1290 unsigned num_bios;
1291
1292 do {
1293 ti = dm_table_find_target(ci->map, ci->sector);
1294 if (!dm_target_is_valid(ti))
1295 return -EIO;
1296
1297 /*
1298 * Even though the device advertised support for this type of
1299 * request, that does not mean every target supports it, and
1300 * reconfiguration might also have changed that since the
1301 * check was performed.
1302 */
1303 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1304 if (!num_bios)
1305 return -EOPNOTSUPP;
1306
1307 if (is_split_required && !is_split_required(ti))
1308 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1309 else
1310 len = min(ci->sector_count, max_io_len(ci->sector, ti));
1311
1312 __send_duplicate_bios(ci, ti, num_bios, len);
1313
1314 ci->sector += len;
1315 } while (ci->sector_count -= len);
1316
1317 return 0;
1318 }
1319
1320 static int __send_discard(struct clone_info *ci)
1321 {
1322 return __send_changing_extent_only(ci, get_num_discard_bios,
1323 is_split_required_for_discard);
1324 }
1325
1326 static int __send_write_same(struct clone_info *ci)
1327 {
1328 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1329 }
1330
1331 /*
1332 * Select the correct strategy for processing a non-flush bio.
1333 */
1334 static int __split_and_process_non_flush(struct clone_info *ci)
1335 {
1336 struct bio *bio = ci->bio;
1337 struct dm_target *ti;
1338 unsigned len;
1339
1340 if (unlikely(bio->bi_rw & REQ_DISCARD))
1341 return __send_discard(ci);
1342 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1343 return __send_write_same(ci);
1344
1345 ti = dm_table_find_target(ci->map, ci->sector);
1346 if (!dm_target_is_valid(ti))
1347 return -EIO;
1348
1349 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1350
1351 __clone_and_map_data_bio(ci, ti, ci->sector, len);
1352
1353 ci->sector += len;
1354 ci->sector_count -= len;
1355
1356 return 0;
1357 }
1358
1359 /*
1360 * Entry point to split a bio into clones and submit them to the targets.
1361 */
1362 static void __split_and_process_bio(struct mapped_device *md,
1363 struct dm_table *map, struct bio *bio)
1364 {
1365 struct clone_info ci;
1366 int error = 0;
1367
1368 if (unlikely(!map)) {
1369 bio_io_error(bio);
1370 return;
1371 }
1372
1373 ci.map = map;
1374 ci.md = md;
1375 ci.io = alloc_io(md);
1376 ci.io->error = 0;
1377 atomic_set(&ci.io->io_count, 1);
1378 ci.io->bio = bio;
1379 ci.io->md = md;
1380 spin_lock_init(&ci.io->endio_lock);
1381 ci.sector = bio->bi_iter.bi_sector;
1382
1383 start_io_acct(ci.io);
1384
1385 if (bio->bi_rw & REQ_FLUSH) {
1386 ci.bio = &ci.md->flush_bio;
1387 ci.sector_count = 0;
1388 error = __send_empty_flush(&ci);
1389 /* dec_pending submits any data associated with flush */
1390 } else {
1391 ci.bio = bio;
1392 ci.sector_count = bio_sectors(bio);
1393 while (ci.sector_count && !error)
1394 error = __split_and_process_non_flush(&ci);
1395 }
1396
1397 /* drop the extra reference count */
1398 dec_pending(ci.io, error);
1399 }
1400 /*-----------------------------------------------------------------
1401 * CRUD END
1402 *---------------------------------------------------------------*/
1403
1404 static int dm_merge_bvec(struct request_queue *q,
1405 struct bvec_merge_data *bvm,
1406 struct bio_vec *biovec)
1407 {
1408 struct mapped_device *md = q->queuedata;
1409 struct dm_table *map = dm_get_live_table_fast(md);
1410 struct dm_target *ti;
1411 sector_t max_sectors;
1412 int max_size = 0;
1413
1414 if (unlikely(!map))
1415 goto out;
1416
1417 ti = dm_table_find_target(map, bvm->bi_sector);
1418 if (!dm_target_is_valid(ti))
1419 goto out;
1420
1421 /*
1422 * Find maximum amount of I/O that won't need splitting
1423 */
1424 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1425 (sector_t) BIO_MAX_SECTORS);
1426 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1427 if (max_size < 0)
1428 max_size = 0;
1429
1430 /*
1431 * merge_bvec_fn() returns number of bytes
1432 * it can accept at this offset
1433 * max is precomputed maximal io size
1434 */
1435 if (max_size && ti->type->merge)
1436 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1437 /*
1438 * If the target doesn't support merge method and some of the devices
1439 * provided their merge_bvec method (we know this by looking at
1440 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1441 * entries. So always set max_size to 0, and the code below allows
1442 * just one page.
1443 */
1444 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1445
1446 max_size = 0;
1447
1448 out:
1449 dm_put_live_table_fast(md);
1450 /*
1451 * Always allow an entire first page
1452 */
1453 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1454 max_size = biovec->bv_len;
1455
1456 return max_size;
1457 }
1458
1459 /*
1460 * The request function that just remaps the bio built up by
1461 * dm_merge_bvec.
1462 */
1463 static void _dm_request(struct request_queue *q, struct bio *bio)
1464 {
1465 int rw = bio_data_dir(bio);
1466 struct mapped_device *md = q->queuedata;
1467 int cpu;
1468 int srcu_idx;
1469 struct dm_table *map;
1470
1471 map = dm_get_live_table(md, &srcu_idx);
1472
1473 cpu = part_stat_lock();
1474 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1475 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1476 part_stat_unlock();
1477
1478 /* if we're suspended, we have to queue this io for later */
1479 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1480 dm_put_live_table(md, srcu_idx);
1481
1482 if (bio_rw(bio) != READA)
1483 queue_io(md, bio);
1484 else
1485 bio_io_error(bio);
1486 return;
1487 }
1488
1489 __split_and_process_bio(md, map, bio);
1490 dm_put_live_table(md, srcu_idx);
1491 return;
1492 }
1493
1494 int dm_request_based(struct mapped_device *md)
1495 {
1496 return blk_queue_stackable(md->queue);
1497 }
1498
1499 static void dm_request(struct request_queue *q, struct bio *bio)
1500 {
1501 struct mapped_device *md = q->queuedata;
1502
1503 if (dm_request_based(md))
1504 blk_queue_bio(q, bio);
1505 else
1506 _dm_request(q, bio);
1507 }
1508
1509 void dm_dispatch_request(struct request *rq)
1510 {
1511 int r;
1512
1513 if (blk_queue_io_stat(rq->q))
1514 rq->cmd_flags |= REQ_IO_STAT;
1515
1516 rq->start_time = jiffies;
1517 r = blk_insert_cloned_request(rq->q, rq);
1518 if (r)
1519 dm_complete_request(rq, r);
1520 }
1521 EXPORT_SYMBOL_GPL(dm_dispatch_request);
1522
1523 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1524 void *data)
1525 {
1526 struct dm_rq_target_io *tio = data;
1527 struct dm_rq_clone_bio_info *info =
1528 container_of(bio, struct dm_rq_clone_bio_info, clone);
1529
1530 info->orig = bio_orig;
1531 info->tio = tio;
1532 bio->bi_end_io = end_clone_bio;
1533 bio->bi_private = info;
1534
1535 return 0;
1536 }
1537
1538 static int setup_clone(struct request *clone, struct request *rq,
1539 struct dm_rq_target_io *tio)
1540 {
1541 int r;
1542
1543 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1544 dm_rq_bio_constructor, tio);
1545 if (r)
1546 return r;
1547
1548 clone->cmd = rq->cmd;
1549 clone->cmd_len = rq->cmd_len;
1550 clone->sense = rq->sense;
1551 clone->buffer = rq->buffer;
1552 clone->end_io = end_clone_request;
1553 clone->end_io_data = tio;
1554
1555 return 0;
1556 }
1557
1558 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1559 gfp_t gfp_mask)
1560 {
1561 struct request *clone;
1562 struct dm_rq_target_io *tio;
1563
1564 tio = alloc_rq_tio(md, gfp_mask);
1565 if (!tio)
1566 return NULL;
1567
1568 tio->md = md;
1569 tio->ti = NULL;
1570 tio->orig = rq;
1571 tio->error = 0;
1572 memset(&tio->info, 0, sizeof(tio->info));
1573
1574 clone = &tio->clone;
1575 if (setup_clone(clone, rq, tio)) {
1576 /* -ENOMEM */
1577 free_rq_tio(tio);
1578 return NULL;
1579 }
1580
1581 return clone;
1582 }
1583
1584 /*
1585 * Called with the queue lock held.
1586 */
1587 static int dm_prep_fn(struct request_queue *q, struct request *rq)
1588 {
1589 struct mapped_device *md = q->queuedata;
1590 struct request *clone;
1591
1592 if (unlikely(rq->special)) {
1593 DMWARN("Already has something in rq->special.");
1594 return BLKPREP_KILL;
1595 }
1596
1597 clone = clone_rq(rq, md, GFP_ATOMIC);
1598 if (!clone)
1599 return BLKPREP_DEFER;
1600
1601 rq->special = clone;
1602 rq->cmd_flags |= REQ_DONTPREP;
1603
1604 return BLKPREP_OK;
1605 }
1606
1607 /*
1608 * Returns:
1609 * 0 : the request has been processed (not requeued)
1610 * !0 : the request has been requeued
1611 */
1612 static int map_request(struct dm_target *ti, struct request *clone,
1613 struct mapped_device *md)
1614 {
1615 int r, requeued = 0;
1616 struct dm_rq_target_io *tio = clone->end_io_data;
1617
1618 tio->ti = ti;
1619 r = ti->type->map_rq(ti, clone, &tio->info);
1620 switch (r) {
1621 case DM_MAPIO_SUBMITTED:
1622 /* The target has taken the I/O to submit by itself later */
1623 break;
1624 case DM_MAPIO_REMAPPED:
1625 /* The target has remapped the I/O so dispatch it */
1626 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1627 blk_rq_pos(tio->orig));
1628 dm_dispatch_request(clone);
1629 break;
1630 case DM_MAPIO_REQUEUE:
1631 /* The target wants to requeue the I/O */
1632 dm_requeue_unmapped_request(clone);
1633 requeued = 1;
1634 break;
1635 default:
1636 if (r > 0) {
1637 DMWARN("unimplemented target map return value: %d", r);
1638 BUG();
1639 }
1640
1641 /* The target wants to complete the I/O */
1642 dm_kill_unmapped_request(clone, r);
1643 break;
1644 }
1645
1646 return requeued;
1647 }
1648
1649 static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1650 {
1651 struct request *clone;
1652
1653 blk_start_request(orig);
1654 clone = orig->special;
1655 atomic_inc(&md->pending[rq_data_dir(clone)]);
1656
1657 /*
1658 * Hold the md reference here for the in-flight I/O.
1659 * We can't rely on the reference count by device opener,
1660 * because the device may be closed during the request completion
1661 * when all bios are completed.
1662 * See the comment in rq_completed() too.
1663 */
1664 dm_get(md);
1665
1666 return clone;
1667 }
1668
1669 /*
1670 * q->request_fn for request-based dm.
1671 * Called with the queue lock held.
1672 */
1673 static void dm_request_fn(struct request_queue *q)
1674 {
1675 struct mapped_device *md = q->queuedata;
1676 int srcu_idx;
1677 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1678 struct dm_target *ti;
1679 struct request *rq, *clone;
1680 sector_t pos;
1681
1682 /*
1683 * For suspend, check blk_queue_stopped() and increment
1684 * ->pending within a single queue_lock not to increment the
1685 * number of in-flight I/Os after the queue is stopped in
1686 * dm_suspend().
1687 */
1688 while (!blk_queue_stopped(q)) {
1689 rq = blk_peek_request(q);
1690 if (!rq)
1691 goto delay_and_out;
1692
1693 /* always use block 0 to find the target for flushes for now */
1694 pos = 0;
1695 if (!(rq->cmd_flags & REQ_FLUSH))
1696 pos = blk_rq_pos(rq);
1697
1698 ti = dm_table_find_target(map, pos);
1699 if (!dm_target_is_valid(ti)) {
1700 /*
1701 * Must perform setup, that dm_done() requires,
1702 * before calling dm_kill_unmapped_request
1703 */
1704 DMERR_LIMIT("request attempted access beyond the end of device");
1705 clone = dm_start_request(md, rq);
1706 dm_kill_unmapped_request(clone, -EIO);
1707 continue;
1708 }
1709
1710 if (ti->type->busy && ti->type->busy(ti))
1711 goto delay_and_out;
1712
1713 clone = dm_start_request(md, rq);
1714
1715 spin_unlock(q->queue_lock);
1716 if (map_request(ti, clone, md))
1717 goto requeued;
1718
1719 BUG_ON(!irqs_disabled());
1720 spin_lock(q->queue_lock);
1721 }
1722
1723 goto out;
1724
1725 requeued:
1726 BUG_ON(!irqs_disabled());
1727 spin_lock(q->queue_lock);
1728
1729 delay_and_out:
1730 blk_delay_queue(q, HZ / 10);
1731 out:
1732 dm_put_live_table(md, srcu_idx);
1733 }
1734
1735 int dm_underlying_device_busy(struct request_queue *q)
1736 {
1737 return blk_lld_busy(q);
1738 }
1739 EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1740
1741 static int dm_lld_busy(struct request_queue *q)
1742 {
1743 int r;
1744 struct mapped_device *md = q->queuedata;
1745 struct dm_table *map = dm_get_live_table_fast(md);
1746
1747 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1748 r = 1;
1749 else
1750 r = dm_table_any_busy_target(map);
1751
1752 dm_put_live_table_fast(md);
1753
1754 return r;
1755 }
1756
1757 static int dm_any_congested(void *congested_data, int bdi_bits)
1758 {
1759 int r = bdi_bits;
1760 struct mapped_device *md = congested_data;
1761 struct dm_table *map;
1762
1763 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1764 map = dm_get_live_table_fast(md);
1765 if (map) {
1766 /*
1767 * Request-based dm cares about only own queue for
1768 * the query about congestion status of request_queue
1769 */
1770 if (dm_request_based(md))
1771 r = md->queue->backing_dev_info.state &
1772 bdi_bits;
1773 else
1774 r = dm_table_any_congested(map, bdi_bits);
1775 }
1776 dm_put_live_table_fast(md);
1777 }
1778
1779 return r;
1780 }
1781
1782 /*-----------------------------------------------------------------
1783 * An IDR is used to keep track of allocated minor numbers.
1784 *---------------------------------------------------------------*/
1785 static void free_minor(int minor)
1786 {
1787 spin_lock(&_minor_lock);
1788 idr_remove(&_minor_idr, minor);
1789 spin_unlock(&_minor_lock);
1790 }
1791
1792 /*
1793 * See if the device with a specific minor # is free.
1794 */
1795 static int specific_minor(int minor)
1796 {
1797 int r;
1798
1799 if (minor >= (1 << MINORBITS))
1800 return -EINVAL;
1801
1802 idr_preload(GFP_KERNEL);
1803 spin_lock(&_minor_lock);
1804
1805 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1806
1807 spin_unlock(&_minor_lock);
1808 idr_preload_end();
1809 if (r < 0)
1810 return r == -ENOSPC ? -EBUSY : r;
1811 return 0;
1812 }
1813
1814 static int next_free_minor(int *minor)
1815 {
1816 int r;
1817
1818 idr_preload(GFP_KERNEL);
1819 spin_lock(&_minor_lock);
1820
1821 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1822
1823 spin_unlock(&_minor_lock);
1824 idr_preload_end();
1825 if (r < 0)
1826 return r;
1827 *minor = r;
1828 return 0;
1829 }
1830
1831 static const struct block_device_operations dm_blk_dops;
1832
1833 static void dm_wq_work(struct work_struct *work);
1834
1835 static void dm_init_md_queue(struct mapped_device *md)
1836 {
1837 /*
1838 * Request-based dm devices cannot be stacked on top of bio-based dm
1839 * devices. The type of this dm device has not been decided yet.
1840 * The type is decided at the first table loading time.
1841 * To prevent problematic device stacking, clear the queue flag
1842 * for request stacking support until then.
1843 *
1844 * This queue is new, so no concurrency on the queue_flags.
1845 */
1846 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1847
1848 md->queue->queuedata = md;
1849 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1850 md->queue->backing_dev_info.congested_data = md;
1851 blk_queue_make_request(md->queue, dm_request);
1852 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1853 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1854 }
1855
1856 /*
1857 * Allocate and initialise a blank device with a given minor.
1858 */
1859 static struct mapped_device *alloc_dev(int minor)
1860 {
1861 int r;
1862 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1863 void *old_md;
1864
1865 if (!md) {
1866 DMWARN("unable to allocate device, out of memory.");
1867 return NULL;
1868 }
1869
1870 if (!try_module_get(THIS_MODULE))
1871 goto bad_module_get;
1872
1873 /* get a minor number for the dev */
1874 if (minor == DM_ANY_MINOR)
1875 r = next_free_minor(&minor);
1876 else
1877 r = specific_minor(minor);
1878 if (r < 0)
1879 goto bad_minor;
1880
1881 r = init_srcu_struct(&md->io_barrier);
1882 if (r < 0)
1883 goto bad_io_barrier;
1884
1885 md->type = DM_TYPE_NONE;
1886 mutex_init(&md->suspend_lock);
1887 mutex_init(&md->type_lock);
1888 spin_lock_init(&md->deferred_lock);
1889 atomic_set(&md->holders, 1);
1890 atomic_set(&md->open_count, 0);
1891 atomic_set(&md->event_nr, 0);
1892 atomic_set(&md->uevent_seq, 0);
1893 INIT_LIST_HEAD(&md->uevent_list);
1894 spin_lock_init(&md->uevent_lock);
1895
1896 md->queue = blk_alloc_queue(GFP_KERNEL);
1897 if (!md->queue)
1898 goto bad_queue;
1899
1900 dm_init_md_queue(md);
1901
1902 md->disk = alloc_disk(1);
1903 if (!md->disk)
1904 goto bad_disk;
1905
1906 atomic_set(&md->pending[0], 0);
1907 atomic_set(&md->pending[1], 0);
1908 init_waitqueue_head(&md->wait);
1909 INIT_WORK(&md->work, dm_wq_work);
1910 init_waitqueue_head(&md->eventq);
1911
1912 md->disk->major = _major;
1913 md->disk->first_minor = minor;
1914 md->disk->fops = &dm_blk_dops;
1915 md->disk->queue = md->queue;
1916 md->disk->private_data = md;
1917 sprintf(md->disk->disk_name, "dm-%d", minor);
1918 add_disk(md->disk);
1919 format_dev_t(md->name, MKDEV(_major, minor));
1920
1921 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1922 if (!md->wq)
1923 goto bad_thread;
1924
1925 md->bdev = bdget_disk(md->disk, 0);
1926 if (!md->bdev)
1927 goto bad_bdev;
1928
1929 bio_init(&md->flush_bio);
1930 md->flush_bio.bi_bdev = md->bdev;
1931 md->flush_bio.bi_rw = WRITE_FLUSH;
1932
1933 dm_stats_init(&md->stats);
1934
1935 /* Populate the mapping, nobody knows we exist yet */
1936 spin_lock(&_minor_lock);
1937 old_md = idr_replace(&_minor_idr, md, minor);
1938 spin_unlock(&_minor_lock);
1939
1940 BUG_ON(old_md != MINOR_ALLOCED);
1941
1942 return md;
1943
1944 bad_bdev:
1945 destroy_workqueue(md->wq);
1946 bad_thread:
1947 del_gendisk(md->disk);
1948 put_disk(md->disk);
1949 bad_disk:
1950 blk_cleanup_queue(md->queue);
1951 bad_queue:
1952 cleanup_srcu_struct(&md->io_barrier);
1953 bad_io_barrier:
1954 free_minor(minor);
1955 bad_minor:
1956 module_put(THIS_MODULE);
1957 bad_module_get:
1958 kfree(md);
1959 return NULL;
1960 }
1961
1962 static void unlock_fs(struct mapped_device *md);
1963
1964 static void free_dev(struct mapped_device *md)
1965 {
1966 int minor = MINOR(disk_devt(md->disk));
1967
1968 unlock_fs(md);
1969 bdput(md->bdev);
1970 destroy_workqueue(md->wq);
1971 if (md->io_pool)
1972 mempool_destroy(md->io_pool);
1973 if (md->bs)
1974 bioset_free(md->bs);
1975 blk_integrity_unregister(md->disk);
1976 del_gendisk(md->disk);
1977 cleanup_srcu_struct(&md->io_barrier);
1978 free_minor(minor);
1979
1980 spin_lock(&_minor_lock);
1981 md->disk->private_data = NULL;
1982 spin_unlock(&_minor_lock);
1983
1984 put_disk(md->disk);
1985 blk_cleanup_queue(md->queue);
1986 dm_stats_cleanup(&md->stats);
1987 module_put(THIS_MODULE);
1988 kfree(md);
1989 }
1990
1991 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1992 {
1993 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1994
1995 if (md->io_pool && md->bs) {
1996 /* The md already has necessary mempools. */
1997 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
1998 /*
1999 * Reload bioset because front_pad may have changed
2000 * because a different table was loaded.
2001 */
2002 bioset_free(md->bs);
2003 md->bs = p->bs;
2004 p->bs = NULL;
2005 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2006 /*
2007 * There's no need to reload with request-based dm
2008 * because the size of front_pad doesn't change.
2009 * Note for future: If you are to reload bioset,
2010 * prep-ed requests in the queue may refer
2011 * to bio from the old bioset, so you must walk
2012 * through the queue to unprep.
2013 */
2014 }
2015 goto out;
2016 }
2017
2018 BUG_ON(!p || md->io_pool || md->bs);
2019
2020 md->io_pool = p->io_pool;
2021 p->io_pool = NULL;
2022 md->bs = p->bs;
2023 p->bs = NULL;
2024
2025 out:
2026 /* mempool bind completed, now no need any mempools in the table */
2027 dm_table_free_md_mempools(t);
2028 }
2029
2030 /*
2031 * Bind a table to the device.
2032 */
2033 static void event_callback(void *context)
2034 {
2035 unsigned long flags;
2036 LIST_HEAD(uevents);
2037 struct mapped_device *md = (struct mapped_device *) context;
2038
2039 spin_lock_irqsave(&md->uevent_lock, flags);
2040 list_splice_init(&md->uevent_list, &uevents);
2041 spin_unlock_irqrestore(&md->uevent_lock, flags);
2042
2043 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2044
2045 atomic_inc(&md->event_nr);
2046 wake_up(&md->eventq);
2047 }
2048
2049 /*
2050 * Protected by md->suspend_lock obtained by dm_swap_table().
2051 */
2052 static void __set_size(struct mapped_device *md, sector_t size)
2053 {
2054 set_capacity(md->disk, size);
2055
2056 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2057 }
2058
2059 /*
2060 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2061 *
2062 * If this function returns 0, then the device is either a non-dm
2063 * device without a merge_bvec_fn, or it is a dm device that is
2064 * able to split any bios it receives that are too big.
2065 */
2066 int dm_queue_merge_is_compulsory(struct request_queue *q)
2067 {
2068 struct mapped_device *dev_md;
2069
2070 if (!q->merge_bvec_fn)
2071 return 0;
2072
2073 if (q->make_request_fn == dm_request) {
2074 dev_md = q->queuedata;
2075 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2076 return 0;
2077 }
2078
2079 return 1;
2080 }
2081
2082 static int dm_device_merge_is_compulsory(struct dm_target *ti,
2083 struct dm_dev *dev, sector_t start,
2084 sector_t len, void *data)
2085 {
2086 struct block_device *bdev = dev->bdev;
2087 struct request_queue *q = bdev_get_queue(bdev);
2088
2089 return dm_queue_merge_is_compulsory(q);
2090 }
2091
2092 /*
2093 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2094 * on the properties of the underlying devices.
2095 */
2096 static int dm_table_merge_is_optional(struct dm_table *table)
2097 {
2098 unsigned i = 0;
2099 struct dm_target *ti;
2100
2101 while (i < dm_table_get_num_targets(table)) {
2102 ti = dm_table_get_target(table, i++);
2103
2104 if (ti->type->iterate_devices &&
2105 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2106 return 0;
2107 }
2108
2109 return 1;
2110 }
2111
2112 /*
2113 * Returns old map, which caller must destroy.
2114 */
2115 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2116 struct queue_limits *limits)
2117 {
2118 struct dm_table *old_map;
2119 struct request_queue *q = md->queue;
2120 sector_t size;
2121 int merge_is_optional;
2122
2123 size = dm_table_get_size(t);
2124
2125 /*
2126 * Wipe any geometry if the size of the table changed.
2127 */
2128 if (size != dm_get_size(md))
2129 memset(&md->geometry, 0, sizeof(md->geometry));
2130
2131 __set_size(md, size);
2132
2133 dm_table_event_callback(t, event_callback, md);
2134
2135 /*
2136 * The queue hasn't been stopped yet, if the old table type wasn't
2137 * for request-based during suspension. So stop it to prevent
2138 * I/O mapping before resume.
2139 * This must be done before setting the queue restrictions,
2140 * because request-based dm may be run just after the setting.
2141 */
2142 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2143 stop_queue(q);
2144
2145 __bind_mempools(md, t);
2146
2147 merge_is_optional = dm_table_merge_is_optional(t);
2148
2149 old_map = md->map;
2150 rcu_assign_pointer(md->map, t);
2151 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2152
2153 dm_table_set_restrictions(t, q, limits);
2154 if (merge_is_optional)
2155 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2156 else
2157 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2158 dm_sync_table(md);
2159
2160 return old_map;
2161 }
2162
2163 /*
2164 * Returns unbound table for the caller to free.
2165 */
2166 static struct dm_table *__unbind(struct mapped_device *md)
2167 {
2168 struct dm_table *map = md->map;
2169
2170 if (!map)
2171 return NULL;
2172
2173 dm_table_event_callback(map, NULL, NULL);
2174 rcu_assign_pointer(md->map, NULL);
2175 dm_sync_table(md);
2176
2177 return map;
2178 }
2179
2180 /*
2181 * Constructor for a new device.
2182 */
2183 int dm_create(int minor, struct mapped_device **result)
2184 {
2185 struct mapped_device *md;
2186
2187 md = alloc_dev(minor);
2188 if (!md)
2189 return -ENXIO;
2190
2191 dm_sysfs_init(md);
2192
2193 *result = md;
2194 return 0;
2195 }
2196
2197 /*
2198 * Functions to manage md->type.
2199 * All are required to hold md->type_lock.
2200 */
2201 void dm_lock_md_type(struct mapped_device *md)
2202 {
2203 mutex_lock(&md->type_lock);
2204 }
2205
2206 void dm_unlock_md_type(struct mapped_device *md)
2207 {
2208 mutex_unlock(&md->type_lock);
2209 }
2210
2211 void dm_set_md_type(struct mapped_device *md, unsigned type)
2212 {
2213 BUG_ON(!mutex_is_locked(&md->type_lock));
2214 md->type = type;
2215 }
2216
2217 unsigned dm_get_md_type(struct mapped_device *md)
2218 {
2219 BUG_ON(!mutex_is_locked(&md->type_lock));
2220 return md->type;
2221 }
2222
2223 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2224 {
2225 return md->immutable_target_type;
2226 }
2227
2228 /*
2229 * The queue_limits are only valid as long as you have a reference
2230 * count on 'md'.
2231 */
2232 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2233 {
2234 BUG_ON(!atomic_read(&md->holders));
2235 return &md->queue->limits;
2236 }
2237 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2238
2239 /*
2240 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2241 */
2242 static int dm_init_request_based_queue(struct mapped_device *md)
2243 {
2244 struct request_queue *q = NULL;
2245
2246 if (md->queue->elevator)
2247 return 1;
2248
2249 /* Fully initialize the queue */
2250 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2251 if (!q)
2252 return 0;
2253
2254 md->queue = q;
2255 dm_init_md_queue(md);
2256 blk_queue_softirq_done(md->queue, dm_softirq_done);
2257 blk_queue_prep_rq(md->queue, dm_prep_fn);
2258 blk_queue_lld_busy(md->queue, dm_lld_busy);
2259
2260 elv_register_queue(md->queue);
2261
2262 return 1;
2263 }
2264
2265 /*
2266 * Setup the DM device's queue based on md's type
2267 */
2268 int dm_setup_md_queue(struct mapped_device *md)
2269 {
2270 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2271 !dm_init_request_based_queue(md)) {
2272 DMWARN("Cannot initialize queue for request-based mapped device");
2273 return -EINVAL;
2274 }
2275
2276 return 0;
2277 }
2278
2279 static struct mapped_device *dm_find_md(dev_t dev)
2280 {
2281 struct mapped_device *md;
2282 unsigned minor = MINOR(dev);
2283
2284 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2285 return NULL;
2286
2287 spin_lock(&_minor_lock);
2288
2289 md = idr_find(&_minor_idr, minor);
2290 if (md && (md == MINOR_ALLOCED ||
2291 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2292 dm_deleting_md(md) ||
2293 test_bit(DMF_FREEING, &md->flags))) {
2294 md = NULL;
2295 goto out;
2296 }
2297
2298 out:
2299 spin_unlock(&_minor_lock);
2300
2301 return md;
2302 }
2303
2304 struct mapped_device *dm_get_md(dev_t dev)
2305 {
2306 struct mapped_device *md = dm_find_md(dev);
2307
2308 if (md)
2309 dm_get(md);
2310
2311 return md;
2312 }
2313 EXPORT_SYMBOL_GPL(dm_get_md);
2314
2315 void *dm_get_mdptr(struct mapped_device *md)
2316 {
2317 return md->interface_ptr;
2318 }
2319
2320 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2321 {
2322 md->interface_ptr = ptr;
2323 }
2324
2325 void dm_get(struct mapped_device *md)
2326 {
2327 atomic_inc(&md->holders);
2328 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2329 }
2330
2331 const char *dm_device_name(struct mapped_device *md)
2332 {
2333 return md->name;
2334 }
2335 EXPORT_SYMBOL_GPL(dm_device_name);
2336
2337 static void __dm_destroy(struct mapped_device *md, bool wait)
2338 {
2339 struct dm_table *map;
2340 int srcu_idx;
2341
2342 might_sleep();
2343
2344 spin_lock(&_minor_lock);
2345 map = dm_get_live_table(md, &srcu_idx);
2346 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2347 set_bit(DMF_FREEING, &md->flags);
2348 spin_unlock(&_minor_lock);
2349
2350 if (!dm_suspended_md(md)) {
2351 dm_table_presuspend_targets(map);
2352 dm_table_postsuspend_targets(map);
2353 }
2354
2355 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2356 dm_put_live_table(md, srcu_idx);
2357
2358 /*
2359 * Rare, but there may be I/O requests still going to complete,
2360 * for example. Wait for all references to disappear.
2361 * No one should increment the reference count of the mapped_device,
2362 * after the mapped_device state becomes DMF_FREEING.
2363 */
2364 if (wait)
2365 while (atomic_read(&md->holders))
2366 msleep(1);
2367 else if (atomic_read(&md->holders))
2368 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2369 dm_device_name(md), atomic_read(&md->holders));
2370
2371 dm_sysfs_exit(md);
2372 dm_table_destroy(__unbind(md));
2373 free_dev(md);
2374 }
2375
2376 void dm_destroy(struct mapped_device *md)
2377 {
2378 __dm_destroy(md, true);
2379 }
2380
2381 void dm_destroy_immediate(struct mapped_device *md)
2382 {
2383 __dm_destroy(md, false);
2384 }
2385
2386 void dm_put(struct mapped_device *md)
2387 {
2388 atomic_dec(&md->holders);
2389 }
2390 EXPORT_SYMBOL_GPL(dm_put);
2391
2392 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2393 {
2394 int r = 0;
2395 DECLARE_WAITQUEUE(wait, current);
2396
2397 add_wait_queue(&md->wait, &wait);
2398
2399 while (1) {
2400 set_current_state(interruptible);
2401
2402 if (!md_in_flight(md))
2403 break;
2404
2405 if (interruptible == TASK_INTERRUPTIBLE &&
2406 signal_pending(current)) {
2407 r = -EINTR;
2408 break;
2409 }
2410
2411 io_schedule();
2412 }
2413 set_current_state(TASK_RUNNING);
2414
2415 remove_wait_queue(&md->wait, &wait);
2416
2417 return r;
2418 }
2419
2420 /*
2421 * Process the deferred bios
2422 */
2423 static void dm_wq_work(struct work_struct *work)
2424 {
2425 struct mapped_device *md = container_of(work, struct mapped_device,
2426 work);
2427 struct bio *c;
2428 int srcu_idx;
2429 struct dm_table *map;
2430
2431 map = dm_get_live_table(md, &srcu_idx);
2432
2433 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2434 spin_lock_irq(&md->deferred_lock);
2435 c = bio_list_pop(&md->deferred);
2436 spin_unlock_irq(&md->deferred_lock);
2437
2438 if (!c)
2439 break;
2440
2441 if (dm_request_based(md))
2442 generic_make_request(c);
2443 else
2444 __split_and_process_bio(md, map, c);
2445 }
2446
2447 dm_put_live_table(md, srcu_idx);
2448 }
2449
2450 static void dm_queue_flush(struct mapped_device *md)
2451 {
2452 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2453 smp_mb__after_clear_bit();
2454 queue_work(md->wq, &md->work);
2455 }
2456
2457 /*
2458 * Swap in a new table, returning the old one for the caller to destroy.
2459 */
2460 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2461 {
2462 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2463 struct queue_limits limits;
2464 int r;
2465
2466 mutex_lock(&md->suspend_lock);
2467
2468 /* device must be suspended */
2469 if (!dm_suspended_md(md))
2470 goto out;
2471
2472 /*
2473 * If the new table has no data devices, retain the existing limits.
2474 * This helps multipath with queue_if_no_path if all paths disappear,
2475 * then new I/O is queued based on these limits, and then some paths
2476 * reappear.
2477 */
2478 if (dm_table_has_no_data_devices(table)) {
2479 live_map = dm_get_live_table_fast(md);
2480 if (live_map)
2481 limits = md->queue->limits;
2482 dm_put_live_table_fast(md);
2483 }
2484
2485 if (!live_map) {
2486 r = dm_calculate_queue_limits(table, &limits);
2487 if (r) {
2488 map = ERR_PTR(r);
2489 goto out;
2490 }
2491 }
2492
2493 map = __bind(md, table, &limits);
2494
2495 out:
2496 mutex_unlock(&md->suspend_lock);
2497 return map;
2498 }
2499
2500 /*
2501 * Functions to lock and unlock any filesystem running on the
2502 * device.
2503 */
2504 static int lock_fs(struct mapped_device *md)
2505 {
2506 int r;
2507
2508 WARN_ON(md->frozen_sb);
2509
2510 md->frozen_sb = freeze_bdev(md->bdev);
2511 if (IS_ERR(md->frozen_sb)) {
2512 r = PTR_ERR(md->frozen_sb);
2513 md->frozen_sb = NULL;
2514 return r;
2515 }
2516
2517 set_bit(DMF_FROZEN, &md->flags);
2518
2519 return 0;
2520 }
2521
2522 static void unlock_fs(struct mapped_device *md)
2523 {
2524 if (!test_bit(DMF_FROZEN, &md->flags))
2525 return;
2526
2527 thaw_bdev(md->bdev, md->frozen_sb);
2528 md->frozen_sb = NULL;
2529 clear_bit(DMF_FROZEN, &md->flags);
2530 }
2531
2532 /*
2533 * We need to be able to change a mapping table under a mounted
2534 * filesystem. For example we might want to move some data in
2535 * the background. Before the table can be swapped with
2536 * dm_bind_table, dm_suspend must be called to flush any in
2537 * flight bios and ensure that any further io gets deferred.
2538 */
2539 /*
2540 * Suspend mechanism in request-based dm.
2541 *
2542 * 1. Flush all I/Os by lock_fs() if needed.
2543 * 2. Stop dispatching any I/O by stopping the request_queue.
2544 * 3. Wait for all in-flight I/Os to be completed or requeued.
2545 *
2546 * To abort suspend, start the request_queue.
2547 */
2548 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2549 {
2550 struct dm_table *map = NULL;
2551 int r = 0;
2552 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2553 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2554
2555 mutex_lock(&md->suspend_lock);
2556
2557 if (dm_suspended_md(md)) {
2558 r = -EINVAL;
2559 goto out_unlock;
2560 }
2561
2562 map = md->map;
2563
2564 /*
2565 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2566 * This flag is cleared before dm_suspend returns.
2567 */
2568 if (noflush)
2569 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2570
2571 /* This does not get reverted if there's an error later. */
2572 dm_table_presuspend_targets(map);
2573
2574 /*
2575 * Flush I/O to the device.
2576 * Any I/O submitted after lock_fs() may not be flushed.
2577 * noflush takes precedence over do_lockfs.
2578 * (lock_fs() flushes I/Os and waits for them to complete.)
2579 */
2580 if (!noflush && do_lockfs) {
2581 r = lock_fs(md);
2582 if (r)
2583 goto out_unlock;
2584 }
2585
2586 /*
2587 * Here we must make sure that no processes are submitting requests
2588 * to target drivers i.e. no one may be executing
2589 * __split_and_process_bio. This is called from dm_request and
2590 * dm_wq_work.
2591 *
2592 * To get all processes out of __split_and_process_bio in dm_request,
2593 * we take the write lock. To prevent any process from reentering
2594 * __split_and_process_bio from dm_request and quiesce the thread
2595 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2596 * flush_workqueue(md->wq).
2597 */
2598 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2599 synchronize_srcu(&md->io_barrier);
2600
2601 /*
2602 * Stop md->queue before flushing md->wq in case request-based
2603 * dm defers requests to md->wq from md->queue.
2604 */
2605 if (dm_request_based(md))
2606 stop_queue(md->queue);
2607
2608 flush_workqueue(md->wq);
2609
2610 /*
2611 * At this point no more requests are entering target request routines.
2612 * We call dm_wait_for_completion to wait for all existing requests
2613 * to finish.
2614 */
2615 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2616
2617 if (noflush)
2618 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2619 synchronize_srcu(&md->io_barrier);
2620
2621 /* were we interrupted ? */
2622 if (r < 0) {
2623 dm_queue_flush(md);
2624
2625 if (dm_request_based(md))
2626 start_queue(md->queue);
2627
2628 unlock_fs(md);
2629 goto out_unlock; /* pushback list is already flushed, so skip flush */
2630 }
2631
2632 /*
2633 * If dm_wait_for_completion returned 0, the device is completely
2634 * quiescent now. There is no request-processing activity. All new
2635 * requests are being added to md->deferred list.
2636 */
2637
2638 set_bit(DMF_SUSPENDED, &md->flags);
2639
2640 dm_table_postsuspend_targets(map);
2641
2642 out_unlock:
2643 mutex_unlock(&md->suspend_lock);
2644 return r;
2645 }
2646
2647 int dm_resume(struct mapped_device *md)
2648 {
2649 int r = -EINVAL;
2650 struct dm_table *map = NULL;
2651
2652 mutex_lock(&md->suspend_lock);
2653 if (!dm_suspended_md(md))
2654 goto out;
2655
2656 map = md->map;
2657 if (!map || !dm_table_get_size(map))
2658 goto out;
2659
2660 r = dm_table_resume_targets(map);
2661 if (r)
2662 goto out;
2663
2664 dm_queue_flush(md);
2665
2666 /*
2667 * Flushing deferred I/Os must be done after targets are resumed
2668 * so that mapping of targets can work correctly.
2669 * Request-based dm is queueing the deferred I/Os in its request_queue.
2670 */
2671 if (dm_request_based(md))
2672 start_queue(md->queue);
2673
2674 unlock_fs(md);
2675
2676 clear_bit(DMF_SUSPENDED, &md->flags);
2677
2678 r = 0;
2679 out:
2680 mutex_unlock(&md->suspend_lock);
2681
2682 return r;
2683 }
2684
2685 /*
2686 * Internal suspend/resume works like userspace-driven suspend. It waits
2687 * until all bios finish and prevents issuing new bios to the target drivers.
2688 * It may be used only from the kernel.
2689 *
2690 * Internal suspend holds md->suspend_lock, which prevents interaction with
2691 * userspace-driven suspend.
2692 */
2693
2694 void dm_internal_suspend(struct mapped_device *md)
2695 {
2696 mutex_lock(&md->suspend_lock);
2697 if (dm_suspended_md(md))
2698 return;
2699
2700 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2701 synchronize_srcu(&md->io_barrier);
2702 flush_workqueue(md->wq);
2703 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2704 }
2705
2706 void dm_internal_resume(struct mapped_device *md)
2707 {
2708 if (dm_suspended_md(md))
2709 goto done;
2710
2711 dm_queue_flush(md);
2712
2713 done:
2714 mutex_unlock(&md->suspend_lock);
2715 }
2716
2717 /*-----------------------------------------------------------------
2718 * Event notification.
2719 *---------------------------------------------------------------*/
2720 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2721 unsigned cookie)
2722 {
2723 char udev_cookie[DM_COOKIE_LENGTH];
2724 char *envp[] = { udev_cookie, NULL };
2725
2726 if (!cookie)
2727 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2728 else {
2729 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2730 DM_COOKIE_ENV_VAR_NAME, cookie);
2731 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2732 action, envp);
2733 }
2734 }
2735
2736 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2737 {
2738 return atomic_add_return(1, &md->uevent_seq);
2739 }
2740
2741 uint32_t dm_get_event_nr(struct mapped_device *md)
2742 {
2743 return atomic_read(&md->event_nr);
2744 }
2745
2746 int dm_wait_event(struct mapped_device *md, int event_nr)
2747 {
2748 return wait_event_interruptible(md->eventq,
2749 (event_nr != atomic_read(&md->event_nr)));
2750 }
2751
2752 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2753 {
2754 unsigned long flags;
2755
2756 spin_lock_irqsave(&md->uevent_lock, flags);
2757 list_add(elist, &md->uevent_list);
2758 spin_unlock_irqrestore(&md->uevent_lock, flags);
2759 }
2760
2761 /*
2762 * The gendisk is only valid as long as you have a reference
2763 * count on 'md'.
2764 */
2765 struct gendisk *dm_disk(struct mapped_device *md)
2766 {
2767 return md->disk;
2768 }
2769
2770 struct kobject *dm_kobject(struct mapped_device *md)
2771 {
2772 return &md->kobj;
2773 }
2774
2775 /*
2776 * struct mapped_device should not be exported outside of dm.c
2777 * so use this check to verify that kobj is part of md structure
2778 */
2779 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2780 {
2781 struct mapped_device *md;
2782
2783 md = container_of(kobj, struct mapped_device, kobj);
2784 if (&md->kobj != kobj)
2785 return NULL;
2786
2787 if (test_bit(DMF_FREEING, &md->flags) ||
2788 dm_deleting_md(md))
2789 return NULL;
2790
2791 dm_get(md);
2792 return md;
2793 }
2794
2795 int dm_suspended_md(struct mapped_device *md)
2796 {
2797 return test_bit(DMF_SUSPENDED, &md->flags);
2798 }
2799
2800 int dm_test_deferred_remove_flag(struct mapped_device *md)
2801 {
2802 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2803 }
2804
2805 int dm_suspended(struct dm_target *ti)
2806 {
2807 return dm_suspended_md(dm_table_get_md(ti->table));
2808 }
2809 EXPORT_SYMBOL_GPL(dm_suspended);
2810
2811 int dm_noflush_suspending(struct dm_target *ti)
2812 {
2813 return __noflush_suspending(dm_table_get_md(ti->table));
2814 }
2815 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2816
2817 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
2818 {
2819 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
2820 struct kmem_cache *cachep;
2821 unsigned int pool_size;
2822 unsigned int front_pad;
2823
2824 if (!pools)
2825 return NULL;
2826
2827 if (type == DM_TYPE_BIO_BASED) {
2828 cachep = _io_cache;
2829 pool_size = dm_get_reserved_bio_based_ios();
2830 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2831 } else if (type == DM_TYPE_REQUEST_BASED) {
2832 cachep = _rq_tio_cache;
2833 pool_size = dm_get_reserved_rq_based_ios();
2834 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2835 /* per_bio_data_size is not used. See __bind_mempools(). */
2836 WARN_ON(per_bio_data_size != 0);
2837 } else
2838 goto out;
2839
2840 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2841 if (!pools->io_pool)
2842 goto out;
2843
2844 pools->bs = bioset_create(pool_size, front_pad);
2845 if (!pools->bs)
2846 goto out;
2847
2848 if (integrity && bioset_integrity_create(pools->bs, pool_size))
2849 goto out;
2850
2851 return pools;
2852
2853 out:
2854 dm_free_md_mempools(pools);
2855
2856 return NULL;
2857 }
2858
2859 void dm_free_md_mempools(struct dm_md_mempools *pools)
2860 {
2861 if (!pools)
2862 return;
2863
2864 if (pools->io_pool)
2865 mempool_destroy(pools->io_pool);
2866
2867 if (pools->bs)
2868 bioset_free(pools->bs);
2869
2870 kfree(pools);
2871 }
2872
2873 static const struct block_device_operations dm_blk_dops = {
2874 .open = dm_blk_open,
2875 .release = dm_blk_close,
2876 .ioctl = dm_blk_ioctl,
2877 .getgeo = dm_blk_getgeo,
2878 .owner = THIS_MODULE
2879 };
2880
2881 EXPORT_SYMBOL(dm_get_mapinfo);
2882
2883 /*
2884 * module hooks
2885 */
2886 module_init(dm_init);
2887 module_exit(dm_exit);
2888
2889 module_param(major, uint, 0);
2890 MODULE_PARM_DESC(major, "The major number of the device mapper");
2891
2892 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2893 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2894
2895 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2896 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2897
2898 MODULE_DESCRIPTION(DM_NAME " driver");
2899 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2900 MODULE_LICENSE("GPL");