Linux 2.6.30-rc1
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9#include "dm-bio-list.h"
51e5b2bd 10#include "dm-uevent.h"
1da177e4
LT
11
12#include <linux/init.h>
13#include <linux/module.h>
48c9c27b 14#include <linux/mutex.h>
1da177e4
LT
15#include <linux/moduleparam.h>
16#include <linux/blkpg.h>
17#include <linux/bio.h>
18#include <linux/buffer_head.h>
19#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
3ac51e74 22#include <linux/hdreg.h>
2056a782 23#include <linux/blktrace_api.h>
5f3ea37c 24#include <trace/block.h>
1da177e4 25
72d94861
AK
26#define DM_MSG_PREFIX "core"
27
1da177e4
LT
28static const char *_name = DM_NAME;
29
30static unsigned int major = 0;
31static unsigned int _major = 0;
32
f32c10b0 33static DEFINE_SPINLOCK(_minor_lock);
1da177e4 34/*
8fbf26ad 35 * For bio-based dm.
1da177e4
LT
36 * One of these is allocated per bio.
37 */
38struct dm_io {
39 struct mapped_device *md;
40 int error;
1da177e4 41 atomic_t io_count;
6ae2fa67 42 struct bio *bio;
3eaf840e 43 unsigned long start_time;
1da177e4
LT
44};
45
46/*
8fbf26ad 47 * For bio-based dm.
1da177e4
LT
48 * One of these is allocated per target within a bio. Hopefully
49 * this will be simplified out one day.
50 */
028867ac 51struct dm_target_io {
1da177e4
LT
52 struct dm_io *io;
53 struct dm_target *ti;
54 union map_info info;
55};
56
0bfc2455
IM
57DEFINE_TRACE(block_bio_complete);
58
8fbf26ad
KU
59/*
60 * For request-based dm.
61 * One of these is allocated per request.
62 */
63struct dm_rq_target_io {
64 struct mapped_device *md;
65 struct dm_target *ti;
66 struct request *orig, clone;
67 int error;
68 union map_info info;
69};
70
71/*
72 * For request-based dm.
73 * One of these is allocated per bio.
74 */
75struct dm_rq_clone_bio_info {
76 struct bio *orig;
77 struct request *rq;
78};
79
1da177e4
LT
80union map_info *dm_get_mapinfo(struct bio *bio)
81{
17b2f66f 82 if (bio && bio->bi_private)
028867ac 83 return &((struct dm_target_io *)bio->bi_private)->info;
17b2f66f 84 return NULL;
1da177e4
LT
85}
86
ba61fdd1
JM
87#define MINOR_ALLOCED ((void *)-1)
88
1da177e4
LT
89/*
90 * Bits for the md->flags field.
91 */
92#define DMF_BLOCK_IO 0
93#define DMF_SUSPENDED 1
aa8d7c2f 94#define DMF_FROZEN 2
fba9f90e 95#define DMF_FREEING 3
5c6bd75d 96#define DMF_DELETING 4
2e93ccc1 97#define DMF_NOFLUSH_SUSPENDING 5
1da177e4 98
304f3f6a
MB
99/*
100 * Work processed by per-device workqueue.
101 */
1da177e4 102struct mapped_device {
2ca3310e 103 struct rw_semaphore io_lock;
e61290a4 104 struct mutex suspend_lock;
1da177e4
LT
105 rwlock_t map_lock;
106 atomic_t holders;
5c6bd75d 107 atomic_t open_count;
1da177e4
LT
108
109 unsigned long flags;
110
165125e1 111 struct request_queue *queue;
1da177e4 112 struct gendisk *disk;
7e51f257 113 char name[16];
1da177e4
LT
114
115 void *interface_ptr;
116
117 /*
118 * A list of ios that arrived while we were suspended.
119 */
120 atomic_t pending;
121 wait_queue_head_t wait;
53d5914f 122 struct work_struct work;
74859364 123 struct bio_list deferred;
022c2611 124 spinlock_t deferred_lock;
1da177e4 125
304f3f6a
MB
126 /*
127 * Processing queue (flush/barriers)
128 */
129 struct workqueue_struct *wq;
130
1da177e4
LT
131 /*
132 * The current mapping.
133 */
134 struct dm_table *map;
135
136 /*
137 * io objects are allocated from here.
138 */
139 mempool_t *io_pool;
140 mempool_t *tio_pool;
141
9faf400f
SB
142 struct bio_set *bs;
143
1da177e4
LT
144 /*
145 * Event handling.
146 */
147 atomic_t event_nr;
148 wait_queue_head_t eventq;
7a8c3d3b
MA
149 atomic_t uevent_seq;
150 struct list_head uevent_list;
151 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
152
153 /*
154 * freeze/thaw support require holding onto a super block
155 */
156 struct super_block *frozen_sb;
e39e2e95 157 struct block_device *suspended_bdev;
3ac51e74
DW
158
159 /* forced geometry settings */
160 struct hd_geometry geometry;
784aae73
MB
161
162 /* sysfs handle */
163 struct kobject kobj;
1da177e4
LT
164};
165
166#define MIN_IOS 256
e18b890b
CL
167static struct kmem_cache *_io_cache;
168static struct kmem_cache *_tio_cache;
8fbf26ad
KU
169static struct kmem_cache *_rq_tio_cache;
170static struct kmem_cache *_rq_bio_info_cache;
1da177e4 171
1da177e4
LT
172static int __init local_init(void)
173{
51157b4a 174 int r = -ENOMEM;
1da177e4 175
1da177e4 176 /* allocate a slab for the dm_ios */
028867ac 177 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 178 if (!_io_cache)
51157b4a 179 return r;
1da177e4
LT
180
181 /* allocate a slab for the target ios */
028867ac 182 _tio_cache = KMEM_CACHE(dm_target_io, 0);
51157b4a
KU
183 if (!_tio_cache)
184 goto out_free_io_cache;
1da177e4 185
8fbf26ad
KU
186 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
187 if (!_rq_tio_cache)
188 goto out_free_tio_cache;
189
190 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
191 if (!_rq_bio_info_cache)
192 goto out_free_rq_tio_cache;
193
51e5b2bd 194 r = dm_uevent_init();
51157b4a 195 if (r)
8fbf26ad 196 goto out_free_rq_bio_info_cache;
51e5b2bd 197
1da177e4
LT
198 _major = major;
199 r = register_blkdev(_major, _name);
51157b4a
KU
200 if (r < 0)
201 goto out_uevent_exit;
1da177e4
LT
202
203 if (!_major)
204 _major = r;
205
206 return 0;
51157b4a
KU
207
208out_uevent_exit:
209 dm_uevent_exit();
8fbf26ad
KU
210out_free_rq_bio_info_cache:
211 kmem_cache_destroy(_rq_bio_info_cache);
212out_free_rq_tio_cache:
213 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
214out_free_tio_cache:
215 kmem_cache_destroy(_tio_cache);
216out_free_io_cache:
217 kmem_cache_destroy(_io_cache);
218
219 return r;
1da177e4
LT
220}
221
222static void local_exit(void)
223{
8fbf26ad
KU
224 kmem_cache_destroy(_rq_bio_info_cache);
225 kmem_cache_destroy(_rq_tio_cache);
1da177e4
LT
226 kmem_cache_destroy(_tio_cache);
227 kmem_cache_destroy(_io_cache);
00d59405 228 unregister_blkdev(_major, _name);
51e5b2bd 229 dm_uevent_exit();
1da177e4
LT
230
231 _major = 0;
232
233 DMINFO("cleaned up");
234}
235
b9249e55 236static int (*_inits[])(void) __initdata = {
1da177e4
LT
237 local_init,
238 dm_target_init,
239 dm_linear_init,
240 dm_stripe_init,
945fa4d2 241 dm_kcopyd_init,
1da177e4
LT
242 dm_interface_init,
243};
244
b9249e55 245static void (*_exits[])(void) = {
1da177e4
LT
246 local_exit,
247 dm_target_exit,
248 dm_linear_exit,
249 dm_stripe_exit,
945fa4d2 250 dm_kcopyd_exit,
1da177e4
LT
251 dm_interface_exit,
252};
253
254static int __init dm_init(void)
255{
256 const int count = ARRAY_SIZE(_inits);
257
258 int r, i;
259
260 for (i = 0; i < count; i++) {
261 r = _inits[i]();
262 if (r)
263 goto bad;
264 }
265
266 return 0;
267
268 bad:
269 while (i--)
270 _exits[i]();
271
272 return r;
273}
274
275static void __exit dm_exit(void)
276{
277 int i = ARRAY_SIZE(_exits);
278
279 while (i--)
280 _exits[i]();
281}
282
283/*
284 * Block device functions
285 */
fe5f9f2c 286static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
287{
288 struct mapped_device *md;
289
fba9f90e
JM
290 spin_lock(&_minor_lock);
291
fe5f9f2c 292 md = bdev->bd_disk->private_data;
fba9f90e
JM
293 if (!md)
294 goto out;
295
5c6bd75d
AK
296 if (test_bit(DMF_FREEING, &md->flags) ||
297 test_bit(DMF_DELETING, &md->flags)) {
fba9f90e
JM
298 md = NULL;
299 goto out;
300 }
301
1da177e4 302 dm_get(md);
5c6bd75d 303 atomic_inc(&md->open_count);
fba9f90e
JM
304
305out:
306 spin_unlock(&_minor_lock);
307
308 return md ? 0 : -ENXIO;
1da177e4
LT
309}
310
fe5f9f2c 311static int dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 312{
fe5f9f2c 313 struct mapped_device *md = disk->private_data;
5c6bd75d 314 atomic_dec(&md->open_count);
1da177e4
LT
315 dm_put(md);
316 return 0;
317}
318
5c6bd75d
AK
319int dm_open_count(struct mapped_device *md)
320{
321 return atomic_read(&md->open_count);
322}
323
324/*
325 * Guarantees nothing is using the device before it's deleted.
326 */
327int dm_lock_for_deletion(struct mapped_device *md)
328{
329 int r = 0;
330
331 spin_lock(&_minor_lock);
332
333 if (dm_open_count(md))
334 r = -EBUSY;
335 else
336 set_bit(DMF_DELETING, &md->flags);
337
338 spin_unlock(&_minor_lock);
339
340 return r;
341}
342
3ac51e74
DW
343static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
344{
345 struct mapped_device *md = bdev->bd_disk->private_data;
346
347 return dm_get_geometry(md, geo);
348}
349
fe5f9f2c 350static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
351 unsigned int cmd, unsigned long arg)
352{
fe5f9f2c
AV
353 struct mapped_device *md = bdev->bd_disk->private_data;
354 struct dm_table *map = dm_get_table(md);
aa129a22
MB
355 struct dm_target *tgt;
356 int r = -ENOTTY;
357
aa129a22
MB
358 if (!map || !dm_table_get_size(map))
359 goto out;
360
361 /* We only support devices that have a single target */
362 if (dm_table_get_num_targets(map) != 1)
363 goto out;
364
365 tgt = dm_table_get_target(map, 0);
366
367 if (dm_suspended(md)) {
368 r = -EAGAIN;
369 goto out;
370 }
371
372 if (tgt->type->ioctl)
647b3d00 373 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
374
375out:
376 dm_table_put(map);
377
aa129a22
MB
378 return r;
379}
380
028867ac 381static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
382{
383 return mempool_alloc(md->io_pool, GFP_NOIO);
384}
385
028867ac 386static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
387{
388 mempool_free(io, md->io_pool);
389}
390
028867ac 391static struct dm_target_io *alloc_tio(struct mapped_device *md)
1da177e4
LT
392{
393 return mempool_alloc(md->tio_pool, GFP_NOIO);
394}
395
028867ac 396static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4
LT
397{
398 mempool_free(tio, md->tio_pool);
399}
400
3eaf840e
JNN
401static void start_io_acct(struct dm_io *io)
402{
403 struct mapped_device *md = io->md;
c9959059 404 int cpu;
3eaf840e
JNN
405
406 io->start_time = jiffies;
407
074a7aca
TH
408 cpu = part_stat_lock();
409 part_round_stats(cpu, &dm_disk(md)->part0);
410 part_stat_unlock();
411 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
3eaf840e
JNN
412}
413
d221d2e7 414static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
415{
416 struct mapped_device *md = io->md;
417 struct bio *bio = io->bio;
418 unsigned long duration = jiffies - io->start_time;
c9959059 419 int pending, cpu;
3eaf840e
JNN
420 int rw = bio_data_dir(bio);
421
074a7aca
TH
422 cpu = part_stat_lock();
423 part_round_stats(cpu, &dm_disk(md)->part0);
424 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
425 part_stat_unlock();
3eaf840e 426
074a7aca
TH
427 dm_disk(md)->part0.in_flight = pending =
428 atomic_dec_return(&md->pending);
3eaf840e 429
d221d2e7
MP
430 /* nudge anyone waiting on suspend queue */
431 if (!pending)
432 wake_up(&md->wait);
3eaf840e
JNN
433}
434
1da177e4
LT
435/*
436 * Add the bio to the list of deferred io.
437 */
438static int queue_io(struct mapped_device *md, struct bio *bio)
439{
2ca3310e 440 down_write(&md->io_lock);
1da177e4
LT
441
442 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
2ca3310e 443 up_write(&md->io_lock);
1da177e4
LT
444 return 1;
445 }
446
022c2611 447 spin_lock_irq(&md->deferred_lock);
1da177e4 448 bio_list_add(&md->deferred, bio);
022c2611 449 spin_unlock_irq(&md->deferred_lock);
1da177e4 450
2ca3310e 451 up_write(&md->io_lock);
1da177e4
LT
452 return 0; /* deferred successfully */
453}
454
455/*
456 * Everyone (including functions in this file), should use this
457 * function to access the md->map field, and make sure they call
458 * dm_table_put() when finished.
459 */
460struct dm_table *dm_get_table(struct mapped_device *md)
461{
462 struct dm_table *t;
463
464 read_lock(&md->map_lock);
465 t = md->map;
466 if (t)
467 dm_table_get(t);
468 read_unlock(&md->map_lock);
469
470 return t;
471}
472
3ac51e74
DW
473/*
474 * Get the geometry associated with a dm device
475 */
476int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
477{
478 *geo = md->geometry;
479
480 return 0;
481}
482
483/*
484 * Set the geometry of a device.
485 */
486int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
487{
488 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
489
490 if (geo->start > sz) {
491 DMWARN("Start sector is beyond the geometry limits.");
492 return -EINVAL;
493 }
494
495 md->geometry = *geo;
496
497 return 0;
498}
499
1da177e4
LT
500/*-----------------------------------------------------------------
501 * CRUD START:
502 * A more elegant soln is in the works that uses the queue
503 * merge fn, unfortunately there are a couple of changes to
504 * the block layer that I want to make for this. So in the
505 * interests of getting something for people to use I give
506 * you this clearly demarcated crap.
507 *---------------------------------------------------------------*/
508
2e93ccc1
KU
509static int __noflush_suspending(struct mapped_device *md)
510{
511 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
512}
513
1da177e4
LT
514/*
515 * Decrements the number of outstanding ios that a bio has been
516 * cloned into, completing the original io if necc.
517 */
858119e1 518static void dec_pending(struct dm_io *io, int error)
1da177e4 519{
2e93ccc1 520 unsigned long flags;
b35f8caa
MB
521 int io_error;
522 struct bio *bio;
523 struct mapped_device *md = io->md;
2e93ccc1
KU
524
525 /* Push-back supersedes any I/O errors */
b35f8caa 526 if (error && !(io->error > 0 && __noflush_suspending(md)))
1da177e4
LT
527 io->error = error;
528
529 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
530 if (io->error == DM_ENDIO_REQUEUE) {
531 /*
532 * Target requested pushing back the I/O.
2e93ccc1 533 */
022c2611 534 spin_lock_irqsave(&md->deferred_lock, flags);
b35f8caa 535 if (__noflush_suspending(md))
022c2611 536 bio_list_add(&md->deferred, io->bio);
2e93ccc1
KU
537 else
538 /* noflush suspend was interrupted. */
539 io->error = -EIO;
022c2611 540 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
541 }
542
d221d2e7 543 end_io_acct(io);
1da177e4 544
b35f8caa
MB
545 io_error = io->error;
546 bio = io->bio;
2e93ccc1 547
b35f8caa
MB
548 free_io(md, io);
549
550 if (io_error != DM_ENDIO_REQUEUE) {
551 trace_block_bio_complete(md->queue, bio);
2056a782 552
b35f8caa
MB
553 bio_endio(bio, io_error);
554 }
1da177e4
LT
555 }
556}
557
6712ecf8 558static void clone_endio(struct bio *bio, int error)
1da177e4
LT
559{
560 int r = 0;
028867ac 561 struct dm_target_io *tio = bio->bi_private;
b35f8caa 562 struct dm_io *io = tio->io;
9faf400f 563 struct mapped_device *md = tio->io->md;
1da177e4
LT
564 dm_endio_fn endio = tio->ti->type->end_io;
565
1da177e4
LT
566 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
567 error = -EIO;
568
569 if (endio) {
570 r = endio(tio->ti, bio, error, &tio->info);
2e93ccc1
KU
571 if (r < 0 || r == DM_ENDIO_REQUEUE)
572 /*
573 * error and requeue request are handled
574 * in dec_pending().
575 */
1da177e4 576 error = r;
45cbcd79
KU
577 else if (r == DM_ENDIO_INCOMPLETE)
578 /* The target will handle the io */
6712ecf8 579 return;
45cbcd79
KU
580 else if (r) {
581 DMWARN("unimplemented target endio return value: %d", r);
582 BUG();
583 }
1da177e4
LT
584 }
585
9faf400f
SB
586 /*
587 * Store md for cleanup instead of tio which is about to get freed.
588 */
589 bio->bi_private = md->bs;
590
9faf400f 591 free_tio(md, tio);
b35f8caa
MB
592 bio_put(bio);
593 dec_pending(io, error);
1da177e4
LT
594}
595
596static sector_t max_io_len(struct mapped_device *md,
597 sector_t sector, struct dm_target *ti)
598{
599 sector_t offset = sector - ti->begin;
600 sector_t len = ti->len - offset;
601
602 /*
603 * Does the target need to split even further ?
604 */
605 if (ti->split_io) {
606 sector_t boundary;
607 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
608 - offset;
609 if (len > boundary)
610 len = boundary;
611 }
612
613 return len;
614}
615
616static void __map_bio(struct dm_target *ti, struct bio *clone,
028867ac 617 struct dm_target_io *tio)
1da177e4
LT
618{
619 int r;
2056a782 620 sector_t sector;
9faf400f 621 struct mapped_device *md;
1da177e4
LT
622
623 /*
624 * Sanity checks.
625 */
626 BUG_ON(!clone->bi_size);
627
628 clone->bi_end_io = clone_endio;
629 clone->bi_private = tio;
630
631 /*
632 * Map the clone. If r == 0 we don't need to do
633 * anything, the target has assumed ownership of
634 * this io.
635 */
636 atomic_inc(&tio->io->io_count);
2056a782 637 sector = clone->bi_sector;
1da177e4 638 r = ti->type->map(ti, clone, &tio->info);
45cbcd79 639 if (r == DM_MAPIO_REMAPPED) {
1da177e4 640 /* the bio has been remapped so dispatch it */
2056a782 641
5f3ea37c 642 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
c7149d6b
AB
643 tio->io->bio->bi_bdev->bd_dev,
644 clone->bi_sector, sector);
2056a782 645
1da177e4 646 generic_make_request(clone);
2e93ccc1
KU
647 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
648 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
649 md = tio->io->md;
650 dec_pending(tio->io, r);
651 /*
652 * Store bio_set for cleanup.
653 */
654 clone->bi_private = md->bs;
1da177e4 655 bio_put(clone);
9faf400f 656 free_tio(md, tio);
45cbcd79
KU
657 } else if (r) {
658 DMWARN("unimplemented target map return value: %d", r);
659 BUG();
1da177e4
LT
660 }
661}
662
663struct clone_info {
664 struct mapped_device *md;
665 struct dm_table *map;
666 struct bio *bio;
667 struct dm_io *io;
668 sector_t sector;
669 sector_t sector_count;
670 unsigned short idx;
671};
672
3676347a
PO
673static void dm_bio_destructor(struct bio *bio)
674{
9faf400f
SB
675 struct bio_set *bs = bio->bi_private;
676
677 bio_free(bio, bs);
3676347a
PO
678}
679
1da177e4
LT
680/*
681 * Creates a little bio that is just does part of a bvec.
682 */
683static struct bio *split_bvec(struct bio *bio, sector_t sector,
684 unsigned short idx, unsigned int offset,
9faf400f 685 unsigned int len, struct bio_set *bs)
1da177e4
LT
686{
687 struct bio *clone;
688 struct bio_vec *bv = bio->bi_io_vec + idx;
689
9faf400f 690 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
3676347a 691 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
692 *clone->bi_io_vec = *bv;
693
694 clone->bi_sector = sector;
695 clone->bi_bdev = bio->bi_bdev;
696 clone->bi_rw = bio->bi_rw;
697 clone->bi_vcnt = 1;
698 clone->bi_size = to_bytes(len);
699 clone->bi_io_vec->bv_offset = offset;
700 clone->bi_io_vec->bv_len = clone->bi_size;
f3e1d26e 701 clone->bi_flags |= 1 << BIO_CLONED;
1da177e4
LT
702
703 return clone;
704}
705
706/*
707 * Creates a bio that consists of range of complete bvecs.
708 */
709static struct bio *clone_bio(struct bio *bio, sector_t sector,
710 unsigned short idx, unsigned short bv_count,
9faf400f 711 unsigned int len, struct bio_set *bs)
1da177e4
LT
712{
713 struct bio *clone;
714
9faf400f
SB
715 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
716 __bio_clone(clone, bio);
717 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
718 clone->bi_sector = sector;
719 clone->bi_idx = idx;
720 clone->bi_vcnt = idx + bv_count;
721 clone->bi_size = to_bytes(len);
722 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
723
724 return clone;
725}
726
512875bd 727static int __clone_and_map(struct clone_info *ci)
1da177e4
LT
728{
729 struct bio *clone, *bio = ci->bio;
512875bd
JN
730 struct dm_target *ti;
731 sector_t len = 0, max;
028867ac 732 struct dm_target_io *tio;
1da177e4 733
512875bd
JN
734 ti = dm_table_find_target(ci->map, ci->sector);
735 if (!dm_target_is_valid(ti))
736 return -EIO;
737
738 max = max_io_len(ci->md, ci->sector, ti);
739
1da177e4
LT
740 /*
741 * Allocate a target io object.
742 */
743 tio = alloc_tio(ci->md);
744 tio->io = ci->io;
745 tio->ti = ti;
746 memset(&tio->info, 0, sizeof(tio->info));
747
748 if (ci->sector_count <= max) {
749 /*
750 * Optimise for the simple case where we can do all of
751 * the remaining io with a single clone.
752 */
753 clone = clone_bio(bio, ci->sector, ci->idx,
9faf400f
SB
754 bio->bi_vcnt - ci->idx, ci->sector_count,
755 ci->md->bs);
1da177e4
LT
756 __map_bio(ti, clone, tio);
757 ci->sector_count = 0;
758
759 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
760 /*
761 * There are some bvecs that don't span targets.
762 * Do as many of these as possible.
763 */
764 int i;
765 sector_t remaining = max;
766 sector_t bv_len;
767
768 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
769 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
770
771 if (bv_len > remaining)
772 break;
773
774 remaining -= bv_len;
775 len += bv_len;
776 }
777
9faf400f
SB
778 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
779 ci->md->bs);
1da177e4
LT
780 __map_bio(ti, clone, tio);
781
782 ci->sector += len;
783 ci->sector_count -= len;
784 ci->idx = i;
785
786 } else {
787 /*
d2044a94 788 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
789 */
790 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
791 sector_t remaining = to_sector(bv->bv_len);
792 unsigned int offset = 0;
1da177e4 793
d2044a94
AK
794 do {
795 if (offset) {
796 ti = dm_table_find_target(ci->map, ci->sector);
512875bd
JN
797 if (!dm_target_is_valid(ti))
798 return -EIO;
799
d2044a94 800 max = max_io_len(ci->md, ci->sector, ti);
1da177e4 801
d2044a94
AK
802 tio = alloc_tio(ci->md);
803 tio->io = ci->io;
804 tio->ti = ti;
805 memset(&tio->info, 0, sizeof(tio->info));
806 }
807
808 len = min(remaining, max);
809
810 clone = split_bvec(bio, ci->sector, ci->idx,
9faf400f
SB
811 bv->bv_offset + offset, len,
812 ci->md->bs);
d2044a94
AK
813
814 __map_bio(ti, clone, tio);
815
816 ci->sector += len;
817 ci->sector_count -= len;
818 offset += to_bytes(len);
819 } while (remaining -= len);
1da177e4 820
1da177e4
LT
821 ci->idx++;
822 }
512875bd
JN
823
824 return 0;
1da177e4
LT
825}
826
827/*
8a53c28d 828 * Split the bio into several clones and submit it to targets.
1da177e4 829 */
f0b9a450 830static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1da177e4
LT
831{
832 struct clone_info ci;
512875bd 833 int error = 0;
1da177e4
LT
834
835 ci.map = dm_get_table(md);
f0b9a450
MP
836 if (unlikely(!ci.map)) {
837 bio_io_error(bio);
838 return;
839 }
ab4c1424
AK
840 if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) {
841 dm_table_put(ci.map);
842 bio_endio(bio, -EOPNOTSUPP);
f0b9a450 843 return;
ab4c1424 844 }
1da177e4
LT
845 ci.md = md;
846 ci.bio = bio;
847 ci.io = alloc_io(md);
848 ci.io->error = 0;
849 atomic_set(&ci.io->io_count, 1);
850 ci.io->bio = bio;
851 ci.io->md = md;
852 ci.sector = bio->bi_sector;
853 ci.sector_count = bio_sectors(bio);
854 ci.idx = bio->bi_idx;
855
3eaf840e 856 start_io_acct(ci.io);
512875bd
JN
857 while (ci.sector_count && !error)
858 error = __clone_and_map(&ci);
1da177e4
LT
859
860 /* drop the extra reference count */
512875bd 861 dec_pending(ci.io, error);
1da177e4
LT
862 dm_table_put(ci.map);
863}
864/*-----------------------------------------------------------------
865 * CRUD END
866 *---------------------------------------------------------------*/
867
f6fccb12
MB
868static int dm_merge_bvec(struct request_queue *q,
869 struct bvec_merge_data *bvm,
870 struct bio_vec *biovec)
871{
872 struct mapped_device *md = q->queuedata;
873 struct dm_table *map = dm_get_table(md);
874 struct dm_target *ti;
875 sector_t max_sectors;
5037108a 876 int max_size = 0;
f6fccb12
MB
877
878 if (unlikely(!map))
5037108a 879 goto out;
f6fccb12
MB
880
881 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac
MP
882 if (!dm_target_is_valid(ti))
883 goto out_table;
f6fccb12
MB
884
885 /*
886 * Find maximum amount of I/O that won't need splitting
887 */
888 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
889 (sector_t) BIO_MAX_SECTORS);
890 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
891 if (max_size < 0)
892 max_size = 0;
893
894 /*
895 * merge_bvec_fn() returns number of bytes
896 * it can accept at this offset
897 * max is precomputed maximal io size
898 */
899 if (max_size && ti->type->merge)
900 max_size = ti->type->merge(ti, bvm, biovec, max_size);
901
b01cd5ac 902out_table:
5037108a
MP
903 dm_table_put(map);
904
905out:
f6fccb12
MB
906 /*
907 * Always allow an entire first page
908 */
909 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
910 max_size = biovec->bv_len;
911
f6fccb12
MB
912 return max_size;
913}
914
1da177e4
LT
915/*
916 * The request function that just remaps the bio built up by
917 * dm_merge_bvec.
918 */
165125e1 919static int dm_request(struct request_queue *q, struct bio *bio)
1da177e4 920{
9e4e5f87 921 int r = -EIO;
12f03a49 922 int rw = bio_data_dir(bio);
1da177e4 923 struct mapped_device *md = q->queuedata;
c9959059 924 int cpu;
1da177e4 925
2ca3310e 926 down_read(&md->io_lock);
1da177e4 927
074a7aca
TH
928 cpu = part_stat_lock();
929 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
930 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
931 part_stat_unlock();
12f03a49 932
1da177e4
LT
933 /*
934 * If we're suspended we have to queue
935 * this io for later.
936 */
937 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
2ca3310e 938 up_read(&md->io_lock);
1da177e4 939
9e4e5f87
MB
940 if (bio_rw(bio) != READA)
941 r = queue_io(md, bio);
1da177e4 942
9e4e5f87
MB
943 if (r <= 0)
944 goto out_req;
1da177e4
LT
945
946 /*
947 * We're in a while loop, because someone could suspend
948 * before we get to the following read lock.
949 */
2ca3310e 950 down_read(&md->io_lock);
1da177e4
LT
951 }
952
f0b9a450 953 __split_and_process_bio(md, bio);
2ca3310e 954 up_read(&md->io_lock);
f0b9a450 955 return 0;
9e4e5f87
MB
956
957out_req:
958 if (r < 0)
959 bio_io_error(bio);
960
1da177e4
LT
961 return 0;
962}
963
165125e1 964static void dm_unplug_all(struct request_queue *q)
1da177e4
LT
965{
966 struct mapped_device *md = q->queuedata;
967 struct dm_table *map = dm_get_table(md);
968
969 if (map) {
970 dm_table_unplug_all(map);
971 dm_table_put(map);
972 }
973}
974
975static int dm_any_congested(void *congested_data, int bdi_bits)
976{
8a57dfc6
CS
977 int r = bdi_bits;
978 struct mapped_device *md = congested_data;
979 struct dm_table *map;
1da177e4 980
8a57dfc6
CS
981 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
982 map = dm_get_table(md);
983 if (map) {
984 r = dm_table_any_congested(map, bdi_bits);
985 dm_table_put(map);
986 }
987 }
988
1da177e4
LT
989 return r;
990}
991
992/*-----------------------------------------------------------------
993 * An IDR is used to keep track of allocated minor numbers.
994 *---------------------------------------------------------------*/
1da177e4
LT
995static DEFINE_IDR(_minor_idr);
996
2b06cfff 997static void free_minor(int minor)
1da177e4 998{
f32c10b0 999 spin_lock(&_minor_lock);
1da177e4 1000 idr_remove(&_minor_idr, minor);
f32c10b0 1001 spin_unlock(&_minor_lock);
1da177e4
LT
1002}
1003
1004/*
1005 * See if the device with a specific minor # is free.
1006 */
cf13ab8e 1007static int specific_minor(int minor)
1da177e4
LT
1008{
1009 int r, m;
1010
1011 if (minor >= (1 << MINORBITS))
1012 return -EINVAL;
1013
62f75c2f
JM
1014 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1015 if (!r)
1016 return -ENOMEM;
1017
f32c10b0 1018 spin_lock(&_minor_lock);
1da177e4
LT
1019
1020 if (idr_find(&_minor_idr, minor)) {
1021 r = -EBUSY;
1022 goto out;
1023 }
1024
ba61fdd1 1025 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
62f75c2f 1026 if (r)
1da177e4 1027 goto out;
1da177e4
LT
1028
1029 if (m != minor) {
1030 idr_remove(&_minor_idr, m);
1031 r = -EBUSY;
1032 goto out;
1033 }
1034
1035out:
f32c10b0 1036 spin_unlock(&_minor_lock);
1da177e4
LT
1037 return r;
1038}
1039
cf13ab8e 1040static int next_free_minor(int *minor)
1da177e4 1041{
2b06cfff 1042 int r, m;
1da177e4 1043
1da177e4 1044 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
62f75c2f
JM
1045 if (!r)
1046 return -ENOMEM;
1047
f32c10b0 1048 spin_lock(&_minor_lock);
1da177e4 1049
ba61fdd1 1050 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
cf13ab8e 1051 if (r)
1da177e4 1052 goto out;
1da177e4
LT
1053
1054 if (m >= (1 << MINORBITS)) {
1055 idr_remove(&_minor_idr, m);
1056 r = -ENOSPC;
1057 goto out;
1058 }
1059
1060 *minor = m;
1061
1062out:
f32c10b0 1063 spin_unlock(&_minor_lock);
1da177e4
LT
1064 return r;
1065}
1066
1067static struct block_device_operations dm_blk_dops;
1068
53d5914f
MP
1069static void dm_wq_work(struct work_struct *work);
1070
1da177e4
LT
1071/*
1072 * Allocate and initialise a blank device with a given minor.
1073 */
2b06cfff 1074static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1075{
1076 int r;
cf13ab8e 1077 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1078 void *old_md;
1da177e4
LT
1079
1080 if (!md) {
1081 DMWARN("unable to allocate device, out of memory.");
1082 return NULL;
1083 }
1084
10da4f79 1085 if (!try_module_get(THIS_MODULE))
6ed7ade8 1086 goto bad_module_get;
10da4f79 1087
1da177e4 1088 /* get a minor number for the dev */
2b06cfff 1089 if (minor == DM_ANY_MINOR)
cf13ab8e 1090 r = next_free_minor(&minor);
2b06cfff 1091 else
cf13ab8e 1092 r = specific_minor(minor);
1da177e4 1093 if (r < 0)
6ed7ade8 1094 goto bad_minor;
1da177e4 1095
2ca3310e 1096 init_rwsem(&md->io_lock);
e61290a4 1097 mutex_init(&md->suspend_lock);
022c2611 1098 spin_lock_init(&md->deferred_lock);
1da177e4
LT
1099 rwlock_init(&md->map_lock);
1100 atomic_set(&md->holders, 1);
5c6bd75d 1101 atomic_set(&md->open_count, 0);
1da177e4 1102 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1103 atomic_set(&md->uevent_seq, 0);
1104 INIT_LIST_HEAD(&md->uevent_list);
1105 spin_lock_init(&md->uevent_lock);
1da177e4
LT
1106
1107 md->queue = blk_alloc_queue(GFP_KERNEL);
1108 if (!md->queue)
6ed7ade8 1109 goto bad_queue;
1da177e4
LT
1110
1111 md->queue->queuedata = md;
1112 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1113 md->queue->backing_dev_info.congested_data = md;
1114 blk_queue_make_request(md->queue, dm_request);
99360b4c 1115 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
daef265f 1116 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1da177e4 1117 md->queue->unplug_fn = dm_unplug_all;
f6fccb12 1118 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1da177e4 1119
93d2341c 1120 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
74859364 1121 if (!md->io_pool)
6ed7ade8 1122 goto bad_io_pool;
1da177e4 1123
93d2341c 1124 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1da177e4 1125 if (!md->tio_pool)
6ed7ade8 1126 goto bad_tio_pool;
1da177e4 1127
bb799ca0 1128 md->bs = bioset_create(16, 0);
9faf400f
SB
1129 if (!md->bs)
1130 goto bad_no_bioset;
1131
1da177e4
LT
1132 md->disk = alloc_disk(1);
1133 if (!md->disk)
6ed7ade8 1134 goto bad_disk;
1da177e4 1135
f0b04115
JM
1136 atomic_set(&md->pending, 0);
1137 init_waitqueue_head(&md->wait);
53d5914f 1138 INIT_WORK(&md->work, dm_wq_work);
f0b04115
JM
1139 init_waitqueue_head(&md->eventq);
1140
1da177e4
LT
1141 md->disk->major = _major;
1142 md->disk->first_minor = minor;
1143 md->disk->fops = &dm_blk_dops;
1144 md->disk->queue = md->queue;
1145 md->disk->private_data = md;
1146 sprintf(md->disk->disk_name, "dm-%d", minor);
1147 add_disk(md->disk);
7e51f257 1148 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1149
304f3f6a
MB
1150 md->wq = create_singlethread_workqueue("kdmflush");
1151 if (!md->wq)
1152 goto bad_thread;
1153
ba61fdd1 1154 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1155 spin_lock(&_minor_lock);
ba61fdd1 1156 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1157 spin_unlock(&_minor_lock);
ba61fdd1
JM
1158
1159 BUG_ON(old_md != MINOR_ALLOCED);
1160
1da177e4
LT
1161 return md;
1162
304f3f6a
MB
1163bad_thread:
1164 put_disk(md->disk);
6ed7ade8 1165bad_disk:
9faf400f 1166 bioset_free(md->bs);
6ed7ade8 1167bad_no_bioset:
1da177e4 1168 mempool_destroy(md->tio_pool);
6ed7ade8 1169bad_tio_pool:
1da177e4 1170 mempool_destroy(md->io_pool);
6ed7ade8 1171bad_io_pool:
1312f40e 1172 blk_cleanup_queue(md->queue);
6ed7ade8 1173bad_queue:
1da177e4 1174 free_minor(minor);
6ed7ade8 1175bad_minor:
10da4f79 1176 module_put(THIS_MODULE);
6ed7ade8 1177bad_module_get:
1da177e4
LT
1178 kfree(md);
1179 return NULL;
1180}
1181
ae9da83f
JN
1182static void unlock_fs(struct mapped_device *md);
1183
1da177e4
LT
1184static void free_dev(struct mapped_device *md)
1185{
f331c029 1186 int minor = MINOR(disk_devt(md->disk));
63d94e48 1187
d9dde59b 1188 if (md->suspended_bdev) {
ae9da83f 1189 unlock_fs(md);
d9dde59b
JN
1190 bdput(md->suspended_bdev);
1191 }
304f3f6a 1192 destroy_workqueue(md->wq);
1da177e4
LT
1193 mempool_destroy(md->tio_pool);
1194 mempool_destroy(md->io_pool);
9faf400f 1195 bioset_free(md->bs);
1da177e4 1196 del_gendisk(md->disk);
63d94e48 1197 free_minor(minor);
fba9f90e
JM
1198
1199 spin_lock(&_minor_lock);
1200 md->disk->private_data = NULL;
1201 spin_unlock(&_minor_lock);
1202
1da177e4 1203 put_disk(md->disk);
1312f40e 1204 blk_cleanup_queue(md->queue);
10da4f79 1205 module_put(THIS_MODULE);
1da177e4
LT
1206 kfree(md);
1207}
1208
1209/*
1210 * Bind a table to the device.
1211 */
1212static void event_callback(void *context)
1213{
7a8c3d3b
MA
1214 unsigned long flags;
1215 LIST_HEAD(uevents);
1da177e4
LT
1216 struct mapped_device *md = (struct mapped_device *) context;
1217
7a8c3d3b
MA
1218 spin_lock_irqsave(&md->uevent_lock, flags);
1219 list_splice_init(&md->uevent_list, &uevents);
1220 spin_unlock_irqrestore(&md->uevent_lock, flags);
1221
ed9e1982 1222 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 1223
1da177e4
LT
1224 atomic_inc(&md->event_nr);
1225 wake_up(&md->eventq);
1226}
1227
4e90188b 1228static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 1229{
4e90188b 1230 set_capacity(md->disk, size);
1da177e4 1231
1b1dcc1b 1232 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
e39e2e95 1233 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1b1dcc1b 1234 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
1da177e4
LT
1235}
1236
1237static int __bind(struct mapped_device *md, struct dm_table *t)
1238{
165125e1 1239 struct request_queue *q = md->queue;
1da177e4
LT
1240 sector_t size;
1241
1242 size = dm_table_get_size(t);
3ac51e74
DW
1243
1244 /*
1245 * Wipe any geometry if the size of the table changed.
1246 */
1247 if (size != get_capacity(md->disk))
1248 memset(&md->geometry, 0, sizeof(md->geometry));
1249
bfa152fa
JN
1250 if (md->suspended_bdev)
1251 __set_size(md, size);
d5816876
MP
1252
1253 if (!size) {
1254 dm_table_destroy(t);
1da177e4 1255 return 0;
d5816876 1256 }
1da177e4 1257
2ca3310e
AK
1258 dm_table_event_callback(t, event_callback, md);
1259
1da177e4
LT
1260 write_lock(&md->map_lock);
1261 md->map = t;
2ca3310e 1262 dm_table_set_restrictions(t, q);
1da177e4
LT
1263 write_unlock(&md->map_lock);
1264
1da177e4
LT
1265 return 0;
1266}
1267
1268static void __unbind(struct mapped_device *md)
1269{
1270 struct dm_table *map = md->map;
1271
1272 if (!map)
1273 return;
1274
1275 dm_table_event_callback(map, NULL, NULL);
1276 write_lock(&md->map_lock);
1277 md->map = NULL;
1278 write_unlock(&md->map_lock);
d5816876 1279 dm_table_destroy(map);
1da177e4
LT
1280}
1281
1282/*
1283 * Constructor for a new device.
1284 */
2b06cfff 1285int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
1286{
1287 struct mapped_device *md;
1288
2b06cfff 1289 md = alloc_dev(minor);
1da177e4
LT
1290 if (!md)
1291 return -ENXIO;
1292
784aae73
MB
1293 dm_sysfs_init(md);
1294
1da177e4
LT
1295 *result = md;
1296 return 0;
1297}
1298
637842cf 1299static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
1300{
1301 struct mapped_device *md;
1da177e4
LT
1302 unsigned minor = MINOR(dev);
1303
1304 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1305 return NULL;
1306
f32c10b0 1307 spin_lock(&_minor_lock);
1da177e4
LT
1308
1309 md = idr_find(&_minor_idr, minor);
fba9f90e 1310 if (md && (md == MINOR_ALLOCED ||
f331c029 1311 (MINOR(disk_devt(dm_disk(md))) != minor) ||
17b2f66f 1312 test_bit(DMF_FREEING, &md->flags))) {
637842cf 1313 md = NULL;
fba9f90e
JM
1314 goto out;
1315 }
1da177e4 1316
fba9f90e 1317out:
f32c10b0 1318 spin_unlock(&_minor_lock);
1da177e4 1319
637842cf
DT
1320 return md;
1321}
1322
d229a958
DT
1323struct mapped_device *dm_get_md(dev_t dev)
1324{
1325 struct mapped_device *md = dm_find_md(dev);
1326
1327 if (md)
1328 dm_get(md);
1329
1330 return md;
1331}
1332
9ade92a9 1333void *dm_get_mdptr(struct mapped_device *md)
637842cf 1334{
9ade92a9 1335 return md->interface_ptr;
1da177e4
LT
1336}
1337
1338void dm_set_mdptr(struct mapped_device *md, void *ptr)
1339{
1340 md->interface_ptr = ptr;
1341}
1342
1343void dm_get(struct mapped_device *md)
1344{
1345 atomic_inc(&md->holders);
1346}
1347
72d94861
AK
1348const char *dm_device_name(struct mapped_device *md)
1349{
1350 return md->name;
1351}
1352EXPORT_SYMBOL_GPL(dm_device_name);
1353
1da177e4
LT
1354void dm_put(struct mapped_device *md)
1355{
1134e5ae 1356 struct dm_table *map;
1da177e4 1357
fba9f90e
JM
1358 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1359
f32c10b0 1360 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1134e5ae 1361 map = dm_get_table(md);
f331c029
TH
1362 idr_replace(&_minor_idr, MINOR_ALLOCED,
1363 MINOR(disk_devt(dm_disk(md))));
fba9f90e 1364 set_bit(DMF_FREEING, &md->flags);
f32c10b0 1365 spin_unlock(&_minor_lock);
cf222b37 1366 if (!dm_suspended(md)) {
1da177e4
LT
1367 dm_table_presuspend_targets(map);
1368 dm_table_postsuspend_targets(map);
1369 }
784aae73 1370 dm_sysfs_exit(md);
1134e5ae 1371 dm_table_put(map);
a1b51e98 1372 __unbind(md);
1da177e4
LT
1373 free_dev(md);
1374 }
1da177e4 1375}
79eb885c 1376EXPORT_SYMBOL_GPL(dm_put);
1da177e4 1377
401600df 1378static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
1379{
1380 int r = 0;
b44ebeb0
MP
1381 DECLARE_WAITQUEUE(wait, current);
1382
1383 dm_unplug_all(md->queue);
1384
1385 add_wait_queue(&md->wait, &wait);
46125c1c
MB
1386
1387 while (1) {
401600df 1388 set_current_state(interruptible);
46125c1c
MB
1389
1390 smp_mb();
1391 if (!atomic_read(&md->pending))
1392 break;
1393
401600df
MP
1394 if (interruptible == TASK_INTERRUPTIBLE &&
1395 signal_pending(current)) {
46125c1c
MB
1396 r = -EINTR;
1397 break;
1398 }
1399
1400 io_schedule();
1401 }
1402 set_current_state(TASK_RUNNING);
1403
b44ebeb0
MP
1404 remove_wait_queue(&md->wait, &wait);
1405
46125c1c
MB
1406 return r;
1407}
1408
1da177e4
LT
1409/*
1410 * Process the deferred bios
1411 */
ef208587 1412static void dm_wq_work(struct work_struct *work)
1da177e4 1413{
ef208587
MP
1414 struct mapped_device *md = container_of(work, struct mapped_device,
1415 work);
6d6f10df 1416 struct bio *c;
1da177e4 1417
ef208587
MP
1418 down_write(&md->io_lock);
1419
022c2611
MP
1420next_bio:
1421 spin_lock_irq(&md->deferred_lock);
1422 c = bio_list_pop(&md->deferred);
1423 spin_unlock_irq(&md->deferred_lock);
1424
1425 if (c) {
f0b9a450 1426 __split_and_process_bio(md, c);
022c2611
MP
1427 goto next_bio;
1428 }
73d410c0
MB
1429
1430 clear_bit(DMF_BLOCK_IO, &md->flags);
ef208587
MP
1431
1432 up_write(&md->io_lock);
1da177e4
LT
1433}
1434
9a1fb464 1435static void dm_queue_flush(struct mapped_device *md)
304f3f6a 1436{
53d5914f 1437 queue_work(md->wq, &md->work);
304f3f6a
MB
1438 flush_workqueue(md->wq);
1439}
1440
1da177e4
LT
1441/*
1442 * Swap in a new table (destroying old one).
1443 */
1444int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1445{
93c534ae 1446 int r = -EINVAL;
1da177e4 1447
e61290a4 1448 mutex_lock(&md->suspend_lock);
1da177e4
LT
1449
1450 /* device must be suspended */
cf222b37 1451 if (!dm_suspended(md))
93c534ae 1452 goto out;
1da177e4 1453
bfa152fa
JN
1454 /* without bdev, the device size cannot be changed */
1455 if (!md->suspended_bdev)
1456 if (get_capacity(md->disk) != dm_table_get_size(table))
1457 goto out;
1458
1da177e4
LT
1459 __unbind(md);
1460 r = __bind(md, table);
1da177e4 1461
93c534ae 1462out:
e61290a4 1463 mutex_unlock(&md->suspend_lock);
93c534ae 1464 return r;
1da177e4
LT
1465}
1466
1467/*
1468 * Functions to lock and unlock any filesystem running on the
1469 * device.
1470 */
2ca3310e 1471static int lock_fs(struct mapped_device *md)
1da177e4 1472{
e39e2e95 1473 int r;
1da177e4
LT
1474
1475 WARN_ON(md->frozen_sb);
dfbe03f6 1476
e39e2e95 1477 md->frozen_sb = freeze_bdev(md->suspended_bdev);
dfbe03f6 1478 if (IS_ERR(md->frozen_sb)) {
cf222b37 1479 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
1480 md->frozen_sb = NULL;
1481 return r;
dfbe03f6
AK
1482 }
1483
aa8d7c2f
AK
1484 set_bit(DMF_FROZEN, &md->flags);
1485
1da177e4 1486 /* don't bdput right now, we don't want the bdev
e39e2e95 1487 * to go away while it is locked.
1da177e4
LT
1488 */
1489 return 0;
1490}
1491
2ca3310e 1492static void unlock_fs(struct mapped_device *md)
1da177e4 1493{
aa8d7c2f
AK
1494 if (!test_bit(DMF_FROZEN, &md->flags))
1495 return;
1496
e39e2e95 1497 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1da177e4 1498 md->frozen_sb = NULL;
aa8d7c2f 1499 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
1500}
1501
1502/*
1503 * We need to be able to change a mapping table under a mounted
1504 * filesystem. For example we might want to move some data in
1505 * the background. Before the table can be swapped with
1506 * dm_bind_table, dm_suspend must be called to flush any in
1507 * flight bios and ensure that any further io gets deferred.
1508 */
a3d77d35 1509int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 1510{
2ca3310e 1511 struct dm_table *map = NULL;
46125c1c 1512 int r = 0;
a3d77d35 1513 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 1514 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 1515
e61290a4 1516 mutex_lock(&md->suspend_lock);
2ca3310e 1517
73d410c0
MB
1518 if (dm_suspended(md)) {
1519 r = -EINVAL;
d287483d 1520 goto out_unlock;
73d410c0 1521 }
1da177e4
LT
1522
1523 map = dm_get_table(md);
1da177e4 1524
2e93ccc1
KU
1525 /*
1526 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1527 * This flag is cleared before dm_suspend returns.
1528 */
1529 if (noflush)
1530 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1531
cf222b37
AK
1532 /* This does not get reverted if there's an error later. */
1533 dm_table_presuspend_targets(map);
1534
bfa152fa
JN
1535 /* bdget() can stall if the pending I/Os are not flushed */
1536 if (!noflush) {
1537 md->suspended_bdev = bdget_disk(md->disk, 0);
1538 if (!md->suspended_bdev) {
1539 DMWARN("bdget failed in dm_suspend");
1540 r = -ENOMEM;
f431d966 1541 goto out;
bfa152fa 1542 }
e39e2e95 1543
6d6f10df
MB
1544 /*
1545 * Flush I/O to the device. noflush supersedes do_lockfs,
1546 * because lock_fs() needs to flush I/Os.
1547 */
1548 if (do_lockfs) {
1549 r = lock_fs(md);
1550 if (r)
1551 goto out;
1552 }
aa8d7c2f 1553 }
1da177e4
LT
1554
1555 /*
354e0071 1556 * First we set the BLOCK_IO flag so no more ios will be mapped.
1da177e4 1557 */
2ca3310e
AK
1558 down_write(&md->io_lock);
1559 set_bit(DMF_BLOCK_IO, &md->flags);
1da177e4 1560
2ca3310e 1561 up_write(&md->io_lock);
1da177e4 1562
1da177e4 1563 /*
46125c1c 1564 * Wait for the already-mapped ios to complete.
1da177e4 1565 */
401600df 1566 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1da177e4 1567
2ca3310e 1568 down_write(&md->io_lock);
1da177e4 1569
6d6f10df 1570 if (noflush)
022c2611 1571 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
94d6351e 1572 up_write(&md->io_lock);
2e93ccc1 1573
1da177e4 1574 /* were we interrupted ? */
46125c1c 1575 if (r < 0) {
9a1fb464 1576 dm_queue_flush(md);
73d410c0 1577
2ca3310e 1578 unlock_fs(md);
2e93ccc1 1579 goto out; /* pushback list is already flushed, so skip flush */
2ca3310e 1580 }
1da177e4 1581
cf222b37 1582 dm_table_postsuspend_targets(map);
1da177e4 1583
2ca3310e 1584 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 1585
2ca3310e 1586out:
e39e2e95
AK
1587 if (r && md->suspended_bdev) {
1588 bdput(md->suspended_bdev);
1589 md->suspended_bdev = NULL;
1590 }
1591
2ca3310e 1592 dm_table_put(map);
d287483d
AK
1593
1594out_unlock:
e61290a4 1595 mutex_unlock(&md->suspend_lock);
cf222b37 1596 return r;
1da177e4
LT
1597}
1598
1599int dm_resume(struct mapped_device *md)
1600{
cf222b37 1601 int r = -EINVAL;
cf222b37 1602 struct dm_table *map = NULL;
1da177e4 1603
e61290a4 1604 mutex_lock(&md->suspend_lock);
2ca3310e 1605 if (!dm_suspended(md))
cf222b37 1606 goto out;
cf222b37
AK
1607
1608 map = dm_get_table(md);
2ca3310e 1609 if (!map || !dm_table_get_size(map))
cf222b37 1610 goto out;
1da177e4 1611
8757b776
MB
1612 r = dm_table_resume_targets(map);
1613 if (r)
1614 goto out;
2ca3310e 1615
9a1fb464 1616 dm_queue_flush(md);
2ca3310e
AK
1617
1618 unlock_fs(md);
1619
bfa152fa
JN
1620 if (md->suspended_bdev) {
1621 bdput(md->suspended_bdev);
1622 md->suspended_bdev = NULL;
1623 }
e39e2e95 1624
2ca3310e
AK
1625 clear_bit(DMF_SUSPENDED, &md->flags);
1626
1da177e4 1627 dm_table_unplug_all(map);
1da177e4 1628
69267a30 1629 dm_kobject_uevent(md);
8560ed6f 1630
cf222b37 1631 r = 0;
2ca3310e 1632
cf222b37
AK
1633out:
1634 dm_table_put(map);
e61290a4 1635 mutex_unlock(&md->suspend_lock);
2ca3310e 1636
cf222b37 1637 return r;
1da177e4
LT
1638}
1639
1640/*-----------------------------------------------------------------
1641 * Event notification.
1642 *---------------------------------------------------------------*/
69267a30
AK
1643void dm_kobject_uevent(struct mapped_device *md)
1644{
ed9e1982 1645 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
69267a30
AK
1646}
1647
7a8c3d3b
MA
1648uint32_t dm_next_uevent_seq(struct mapped_device *md)
1649{
1650 return atomic_add_return(1, &md->uevent_seq);
1651}
1652
1da177e4
LT
1653uint32_t dm_get_event_nr(struct mapped_device *md)
1654{
1655 return atomic_read(&md->event_nr);
1656}
1657
1658int dm_wait_event(struct mapped_device *md, int event_nr)
1659{
1660 return wait_event_interruptible(md->eventq,
1661 (event_nr != atomic_read(&md->event_nr)));
1662}
1663
7a8c3d3b
MA
1664void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1665{
1666 unsigned long flags;
1667
1668 spin_lock_irqsave(&md->uevent_lock, flags);
1669 list_add(elist, &md->uevent_list);
1670 spin_unlock_irqrestore(&md->uevent_lock, flags);
1671}
1672
1da177e4
LT
1673/*
1674 * The gendisk is only valid as long as you have a reference
1675 * count on 'md'.
1676 */
1677struct gendisk *dm_disk(struct mapped_device *md)
1678{
1679 return md->disk;
1680}
1681
784aae73
MB
1682struct kobject *dm_kobject(struct mapped_device *md)
1683{
1684 return &md->kobj;
1685}
1686
1687/*
1688 * struct mapped_device should not be exported outside of dm.c
1689 * so use this check to verify that kobj is part of md structure
1690 */
1691struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1692{
1693 struct mapped_device *md;
1694
1695 md = container_of(kobj, struct mapped_device, kobj);
1696 if (&md->kobj != kobj)
1697 return NULL;
1698
1699 dm_get(md);
1700 return md;
1701}
1702
1da177e4
LT
1703int dm_suspended(struct mapped_device *md)
1704{
1705 return test_bit(DMF_SUSPENDED, &md->flags);
1706}
1707
2e93ccc1
KU
1708int dm_noflush_suspending(struct dm_target *ti)
1709{
1710 struct mapped_device *md = dm_table_get_md(ti->table);
1711 int r = __noflush_suspending(md);
1712
1713 dm_put(md);
1714
1715 return r;
1716}
1717EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1718
1da177e4
LT
1719static struct block_device_operations dm_blk_dops = {
1720 .open = dm_blk_open,
1721 .release = dm_blk_close,
aa129a22 1722 .ioctl = dm_blk_ioctl,
3ac51e74 1723 .getgeo = dm_blk_getgeo,
1da177e4
LT
1724 .owner = THIS_MODULE
1725};
1726
1727EXPORT_SYMBOL(dm_get_mapinfo);
1728
1729/*
1730 * module hooks
1731 */
1732module_init(dm_init);
1733module_exit(dm_exit);
1734
1735module_param(major, uint, 0);
1736MODULE_PARM_DESC(major, "The major number of the device mapper");
1737MODULE_DESCRIPTION(DM_NAME " driver");
1738MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1739MODULE_LICENSE("GPL");