block: Abstract out bvec iterator
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / md / dm-thin.c
1 /*
2 * Copyright (C) 2011-2012 Red Hat UK.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18
19 #define DM_MSG_PREFIX "thin"
20
21 /*
22 * Tunable constants
23 */
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
28
29 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
30 "A percentage of time allocated for copy on write");
31
32 /*
33 * The block size of the device holding pool data must be
34 * between 64KB and 1GB.
35 */
36 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
37 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
38
39 /*
40 * Device id is restricted to 24 bits.
41 */
42 #define MAX_DEV_ID ((1 << 24) - 1)
43
44 /*
45 * How do we handle breaking sharing of data blocks?
46 * =================================================
47 *
48 * We use a standard copy-on-write btree to store the mappings for the
49 * devices (note I'm talking about copy-on-write of the metadata here, not
50 * the data). When you take an internal snapshot you clone the root node
51 * of the origin btree. After this there is no concept of an origin or a
52 * snapshot. They are just two device trees that happen to point to the
53 * same data blocks.
54 *
55 * When we get a write in we decide if it's to a shared data block using
56 * some timestamp magic. If it is, we have to break sharing.
57 *
58 * Let's say we write to a shared block in what was the origin. The
59 * steps are:
60 *
61 * i) plug io further to this physical block. (see bio_prison code).
62 *
63 * ii) quiesce any read io to that shared data block. Obviously
64 * including all devices that share this block. (see dm_deferred_set code)
65 *
66 * iii) copy the data block to a newly allocate block. This step can be
67 * missed out if the io covers the block. (schedule_copy).
68 *
69 * iv) insert the new mapping into the origin's btree
70 * (process_prepared_mapping). This act of inserting breaks some
71 * sharing of btree nodes between the two devices. Breaking sharing only
72 * effects the btree of that specific device. Btrees for the other
73 * devices that share the block never change. The btree for the origin
74 * device as it was after the last commit is untouched, ie. we're using
75 * persistent data structures in the functional programming sense.
76 *
77 * v) unplug io to this physical block, including the io that triggered
78 * the breaking of sharing.
79 *
80 * Steps (ii) and (iii) occur in parallel.
81 *
82 * The metadata _doesn't_ need to be committed before the io continues. We
83 * get away with this because the io is always written to a _new_ block.
84 * If there's a crash, then:
85 *
86 * - The origin mapping will point to the old origin block (the shared
87 * one). This will contain the data as it was before the io that triggered
88 * the breaking of sharing came in.
89 *
90 * - The snap mapping still points to the old block. As it would after
91 * the commit.
92 *
93 * The downside of this scheme is the timestamp magic isn't perfect, and
94 * will continue to think that data block in the snapshot device is shared
95 * even after the write to the origin has broken sharing. I suspect data
96 * blocks will typically be shared by many different devices, so we're
97 * breaking sharing n + 1 times, rather than n, where n is the number of
98 * devices that reference this data block. At the moment I think the
99 * benefits far, far outweigh the disadvantages.
100 */
101
102 /*----------------------------------------------------------------*/
103
104 /*
105 * Key building.
106 */
107 static void build_data_key(struct dm_thin_device *td,
108 dm_block_t b, struct dm_cell_key *key)
109 {
110 key->virtual = 0;
111 key->dev = dm_thin_dev_id(td);
112 key->block = b;
113 }
114
115 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
116 struct dm_cell_key *key)
117 {
118 key->virtual = 1;
119 key->dev = dm_thin_dev_id(td);
120 key->block = b;
121 }
122
123 /*----------------------------------------------------------------*/
124
125 /*
126 * A pool device ties together a metadata device and a data device. It
127 * also provides the interface for creating and destroying internal
128 * devices.
129 */
130 struct dm_thin_new_mapping;
131
132 /*
133 * The pool runs in 3 modes. Ordered in degraded order for comparisons.
134 */
135 enum pool_mode {
136 PM_WRITE, /* metadata may be changed */
137 PM_READ_ONLY, /* metadata may not be changed */
138 PM_FAIL, /* all I/O fails */
139 };
140
141 struct pool_features {
142 enum pool_mode mode;
143
144 bool zero_new_blocks:1;
145 bool discard_enabled:1;
146 bool discard_passdown:1;
147 };
148
149 struct thin_c;
150 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
151 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
152
153 struct pool {
154 struct list_head list;
155 struct dm_target *ti; /* Only set if a pool target is bound */
156
157 struct mapped_device *pool_md;
158 struct block_device *md_dev;
159 struct dm_pool_metadata *pmd;
160
161 dm_block_t low_water_blocks;
162 uint32_t sectors_per_block;
163 int sectors_per_block_shift;
164
165 struct pool_features pf;
166 unsigned low_water_triggered:1; /* A dm event has been sent */
167 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
168
169 struct dm_bio_prison *prison;
170 struct dm_kcopyd_client *copier;
171
172 struct workqueue_struct *wq;
173 struct work_struct worker;
174 struct delayed_work waker;
175
176 unsigned long last_commit_jiffies;
177 unsigned ref_count;
178
179 spinlock_t lock;
180 struct bio_list deferred_bios;
181 struct bio_list deferred_flush_bios;
182 struct list_head prepared_mappings;
183 struct list_head prepared_discards;
184
185 struct bio_list retry_on_resume_list;
186
187 struct dm_deferred_set *shared_read_ds;
188 struct dm_deferred_set *all_io_ds;
189
190 struct dm_thin_new_mapping *next_mapping;
191 mempool_t *mapping_pool;
192
193 process_bio_fn process_bio;
194 process_bio_fn process_discard;
195
196 process_mapping_fn process_prepared_mapping;
197 process_mapping_fn process_prepared_discard;
198 };
199
200 static enum pool_mode get_pool_mode(struct pool *pool);
201 static void set_pool_mode(struct pool *pool, enum pool_mode mode);
202
203 /*
204 * Target context for a pool.
205 */
206 struct pool_c {
207 struct dm_target *ti;
208 struct pool *pool;
209 struct dm_dev *data_dev;
210 struct dm_dev *metadata_dev;
211 struct dm_target_callbacks callbacks;
212
213 dm_block_t low_water_blocks;
214 struct pool_features requested_pf; /* Features requested during table load */
215 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
216 };
217
218 /*
219 * Target context for a thin.
220 */
221 struct thin_c {
222 struct dm_dev *pool_dev;
223 struct dm_dev *origin_dev;
224 dm_thin_id dev_id;
225
226 struct pool *pool;
227 struct dm_thin_device *td;
228 };
229
230 /*----------------------------------------------------------------*/
231
232 /*
233 * wake_worker() is used when new work is queued and when pool_resume is
234 * ready to continue deferred IO processing.
235 */
236 static void wake_worker(struct pool *pool)
237 {
238 queue_work(pool->wq, &pool->worker);
239 }
240
241 /*----------------------------------------------------------------*/
242
243 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
244 struct dm_bio_prison_cell **cell_result)
245 {
246 int r;
247 struct dm_bio_prison_cell *cell_prealloc;
248
249 /*
250 * Allocate a cell from the prison's mempool.
251 * This might block but it can't fail.
252 */
253 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
254
255 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
256 if (r)
257 /*
258 * We reused an old cell; we can get rid of
259 * the new one.
260 */
261 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
262
263 return r;
264 }
265
266 static void cell_release(struct pool *pool,
267 struct dm_bio_prison_cell *cell,
268 struct bio_list *bios)
269 {
270 dm_cell_release(pool->prison, cell, bios);
271 dm_bio_prison_free_cell(pool->prison, cell);
272 }
273
274 static void cell_release_no_holder(struct pool *pool,
275 struct dm_bio_prison_cell *cell,
276 struct bio_list *bios)
277 {
278 dm_cell_release_no_holder(pool->prison, cell, bios);
279 dm_bio_prison_free_cell(pool->prison, cell);
280 }
281
282 static void cell_defer_no_holder_no_free(struct thin_c *tc,
283 struct dm_bio_prison_cell *cell)
284 {
285 struct pool *pool = tc->pool;
286 unsigned long flags;
287
288 spin_lock_irqsave(&pool->lock, flags);
289 dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
290 spin_unlock_irqrestore(&pool->lock, flags);
291
292 wake_worker(pool);
293 }
294
295 static void cell_error(struct pool *pool,
296 struct dm_bio_prison_cell *cell)
297 {
298 dm_cell_error(pool->prison, cell);
299 dm_bio_prison_free_cell(pool->prison, cell);
300 }
301
302 /*----------------------------------------------------------------*/
303
304 /*
305 * A global list of pools that uses a struct mapped_device as a key.
306 */
307 static struct dm_thin_pool_table {
308 struct mutex mutex;
309 struct list_head pools;
310 } dm_thin_pool_table;
311
312 static void pool_table_init(void)
313 {
314 mutex_init(&dm_thin_pool_table.mutex);
315 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
316 }
317
318 static void __pool_table_insert(struct pool *pool)
319 {
320 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
321 list_add(&pool->list, &dm_thin_pool_table.pools);
322 }
323
324 static void __pool_table_remove(struct pool *pool)
325 {
326 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
327 list_del(&pool->list);
328 }
329
330 static struct pool *__pool_table_lookup(struct mapped_device *md)
331 {
332 struct pool *pool = NULL, *tmp;
333
334 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
335
336 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
337 if (tmp->pool_md == md) {
338 pool = tmp;
339 break;
340 }
341 }
342
343 return pool;
344 }
345
346 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
347 {
348 struct pool *pool = NULL, *tmp;
349
350 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
351
352 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
353 if (tmp->md_dev == md_dev) {
354 pool = tmp;
355 break;
356 }
357 }
358
359 return pool;
360 }
361
362 /*----------------------------------------------------------------*/
363
364 struct dm_thin_endio_hook {
365 struct thin_c *tc;
366 struct dm_deferred_entry *shared_read_entry;
367 struct dm_deferred_entry *all_io_entry;
368 struct dm_thin_new_mapping *overwrite_mapping;
369 };
370
371 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
372 {
373 struct bio *bio;
374 struct bio_list bios;
375
376 bio_list_init(&bios);
377 bio_list_merge(&bios, master);
378 bio_list_init(master);
379
380 while ((bio = bio_list_pop(&bios))) {
381 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
382
383 if (h->tc == tc)
384 bio_endio(bio, DM_ENDIO_REQUEUE);
385 else
386 bio_list_add(master, bio);
387 }
388 }
389
390 static void requeue_io(struct thin_c *tc)
391 {
392 struct pool *pool = tc->pool;
393 unsigned long flags;
394
395 spin_lock_irqsave(&pool->lock, flags);
396 __requeue_bio_list(tc, &pool->deferred_bios);
397 __requeue_bio_list(tc, &pool->retry_on_resume_list);
398 spin_unlock_irqrestore(&pool->lock, flags);
399 }
400
401 /*
402 * This section of code contains the logic for processing a thin device's IO.
403 * Much of the code depends on pool object resources (lists, workqueues, etc)
404 * but most is exclusively called from the thin target rather than the thin-pool
405 * target.
406 */
407
408 static bool block_size_is_power_of_two(struct pool *pool)
409 {
410 return pool->sectors_per_block_shift >= 0;
411 }
412
413 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
414 {
415 struct pool *pool = tc->pool;
416 sector_t block_nr = bio->bi_iter.bi_sector;
417
418 if (block_size_is_power_of_two(pool))
419 block_nr >>= pool->sectors_per_block_shift;
420 else
421 (void) sector_div(block_nr, pool->sectors_per_block);
422
423 return block_nr;
424 }
425
426 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
427 {
428 struct pool *pool = tc->pool;
429 sector_t bi_sector = bio->bi_iter.bi_sector;
430
431 bio->bi_bdev = tc->pool_dev->bdev;
432 if (block_size_is_power_of_two(pool))
433 bio->bi_iter.bi_sector =
434 (block << pool->sectors_per_block_shift) |
435 (bi_sector & (pool->sectors_per_block - 1));
436 else
437 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
438 sector_div(bi_sector, pool->sectors_per_block);
439 }
440
441 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
442 {
443 bio->bi_bdev = tc->origin_dev->bdev;
444 }
445
446 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
447 {
448 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
449 dm_thin_changed_this_transaction(tc->td);
450 }
451
452 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
453 {
454 struct dm_thin_endio_hook *h;
455
456 if (bio->bi_rw & REQ_DISCARD)
457 return;
458
459 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
460 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
461 }
462
463 static void issue(struct thin_c *tc, struct bio *bio)
464 {
465 struct pool *pool = tc->pool;
466 unsigned long flags;
467
468 if (!bio_triggers_commit(tc, bio)) {
469 generic_make_request(bio);
470 return;
471 }
472
473 /*
474 * Complete bio with an error if earlier I/O caused changes to
475 * the metadata that can't be committed e.g, due to I/O errors
476 * on the metadata device.
477 */
478 if (dm_thin_aborted_changes(tc->td)) {
479 bio_io_error(bio);
480 return;
481 }
482
483 /*
484 * Batch together any bios that trigger commits and then issue a
485 * single commit for them in process_deferred_bios().
486 */
487 spin_lock_irqsave(&pool->lock, flags);
488 bio_list_add(&pool->deferred_flush_bios, bio);
489 spin_unlock_irqrestore(&pool->lock, flags);
490 }
491
492 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
493 {
494 remap_to_origin(tc, bio);
495 issue(tc, bio);
496 }
497
498 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
499 dm_block_t block)
500 {
501 remap(tc, bio, block);
502 issue(tc, bio);
503 }
504
505 /*----------------------------------------------------------------*/
506
507 /*
508 * Bio endio functions.
509 */
510 struct dm_thin_new_mapping {
511 struct list_head list;
512
513 unsigned quiesced:1;
514 unsigned prepared:1;
515 unsigned pass_discard:1;
516
517 struct thin_c *tc;
518 dm_block_t virt_block;
519 dm_block_t data_block;
520 struct dm_bio_prison_cell *cell, *cell2;
521 int err;
522
523 /*
524 * If the bio covers the whole area of a block then we can avoid
525 * zeroing or copying. Instead this bio is hooked. The bio will
526 * still be in the cell, so care has to be taken to avoid issuing
527 * the bio twice.
528 */
529 struct bio *bio;
530 bio_end_io_t *saved_bi_end_io;
531 };
532
533 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
534 {
535 struct pool *pool = m->tc->pool;
536
537 if (m->quiesced && m->prepared) {
538 list_add(&m->list, &pool->prepared_mappings);
539 wake_worker(pool);
540 }
541 }
542
543 static void copy_complete(int read_err, unsigned long write_err, void *context)
544 {
545 unsigned long flags;
546 struct dm_thin_new_mapping *m = context;
547 struct pool *pool = m->tc->pool;
548
549 m->err = read_err || write_err ? -EIO : 0;
550
551 spin_lock_irqsave(&pool->lock, flags);
552 m->prepared = 1;
553 __maybe_add_mapping(m);
554 spin_unlock_irqrestore(&pool->lock, flags);
555 }
556
557 static void overwrite_endio(struct bio *bio, int err)
558 {
559 unsigned long flags;
560 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
561 struct dm_thin_new_mapping *m = h->overwrite_mapping;
562 struct pool *pool = m->tc->pool;
563
564 m->err = err;
565
566 spin_lock_irqsave(&pool->lock, flags);
567 m->prepared = 1;
568 __maybe_add_mapping(m);
569 spin_unlock_irqrestore(&pool->lock, flags);
570 }
571
572 /*----------------------------------------------------------------*/
573
574 /*
575 * Workqueue.
576 */
577
578 /*
579 * Prepared mapping jobs.
580 */
581
582 /*
583 * This sends the bios in the cell back to the deferred_bios list.
584 */
585 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
586 {
587 struct pool *pool = tc->pool;
588 unsigned long flags;
589
590 spin_lock_irqsave(&pool->lock, flags);
591 cell_release(pool, cell, &pool->deferred_bios);
592 spin_unlock_irqrestore(&tc->pool->lock, flags);
593
594 wake_worker(pool);
595 }
596
597 /*
598 * Same as cell_defer above, except it omits the original holder of the cell.
599 */
600 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
601 {
602 struct pool *pool = tc->pool;
603 unsigned long flags;
604
605 spin_lock_irqsave(&pool->lock, flags);
606 cell_release_no_holder(pool, cell, &pool->deferred_bios);
607 spin_unlock_irqrestore(&pool->lock, flags);
608
609 wake_worker(pool);
610 }
611
612 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
613 {
614 if (m->bio)
615 m->bio->bi_end_io = m->saved_bi_end_io;
616 cell_error(m->tc->pool, m->cell);
617 list_del(&m->list);
618 mempool_free(m, m->tc->pool->mapping_pool);
619 }
620
621 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
622 {
623 struct thin_c *tc = m->tc;
624 struct pool *pool = tc->pool;
625 struct bio *bio;
626 int r;
627
628 bio = m->bio;
629 if (bio)
630 bio->bi_end_io = m->saved_bi_end_io;
631
632 if (m->err) {
633 cell_error(pool, m->cell);
634 goto out;
635 }
636
637 /*
638 * Commit the prepared block into the mapping btree.
639 * Any I/O for this block arriving after this point will get
640 * remapped to it directly.
641 */
642 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
643 if (r) {
644 DMERR_LIMIT("dm_thin_insert_block() failed");
645 cell_error(pool, m->cell);
646 goto out;
647 }
648
649 /*
650 * Release any bios held while the block was being provisioned.
651 * If we are processing a write bio that completely covers the block,
652 * we already processed it so can ignore it now when processing
653 * the bios in the cell.
654 */
655 if (bio) {
656 cell_defer_no_holder(tc, m->cell);
657 bio_endio(bio, 0);
658 } else
659 cell_defer(tc, m->cell);
660
661 out:
662 list_del(&m->list);
663 mempool_free(m, pool->mapping_pool);
664 }
665
666 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
667 {
668 struct thin_c *tc = m->tc;
669
670 bio_io_error(m->bio);
671 cell_defer_no_holder(tc, m->cell);
672 cell_defer_no_holder(tc, m->cell2);
673 mempool_free(m, tc->pool->mapping_pool);
674 }
675
676 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
677 {
678 struct thin_c *tc = m->tc;
679
680 inc_all_io_entry(tc->pool, m->bio);
681 cell_defer_no_holder(tc, m->cell);
682 cell_defer_no_holder(tc, m->cell2);
683
684 if (m->pass_discard)
685 remap_and_issue(tc, m->bio, m->data_block);
686 else
687 bio_endio(m->bio, 0);
688
689 mempool_free(m, tc->pool->mapping_pool);
690 }
691
692 static void process_prepared_discard(struct dm_thin_new_mapping *m)
693 {
694 int r;
695 struct thin_c *tc = m->tc;
696
697 r = dm_thin_remove_block(tc->td, m->virt_block);
698 if (r)
699 DMERR_LIMIT("dm_thin_remove_block() failed");
700
701 process_prepared_discard_passdown(m);
702 }
703
704 static void process_prepared(struct pool *pool, struct list_head *head,
705 process_mapping_fn *fn)
706 {
707 unsigned long flags;
708 struct list_head maps;
709 struct dm_thin_new_mapping *m, *tmp;
710
711 INIT_LIST_HEAD(&maps);
712 spin_lock_irqsave(&pool->lock, flags);
713 list_splice_init(head, &maps);
714 spin_unlock_irqrestore(&pool->lock, flags);
715
716 list_for_each_entry_safe(m, tmp, &maps, list)
717 (*fn)(m);
718 }
719
720 /*
721 * Deferred bio jobs.
722 */
723 static int io_overlaps_block(struct pool *pool, struct bio *bio)
724 {
725 return bio->bi_iter.bi_size ==
726 (pool->sectors_per_block << SECTOR_SHIFT);
727 }
728
729 static int io_overwrites_block(struct pool *pool, struct bio *bio)
730 {
731 return (bio_data_dir(bio) == WRITE) &&
732 io_overlaps_block(pool, bio);
733 }
734
735 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
736 bio_end_io_t *fn)
737 {
738 *save = bio->bi_end_io;
739 bio->bi_end_io = fn;
740 }
741
742 static int ensure_next_mapping(struct pool *pool)
743 {
744 if (pool->next_mapping)
745 return 0;
746
747 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
748
749 return pool->next_mapping ? 0 : -ENOMEM;
750 }
751
752 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
753 {
754 struct dm_thin_new_mapping *r = pool->next_mapping;
755
756 BUG_ON(!pool->next_mapping);
757
758 pool->next_mapping = NULL;
759
760 return r;
761 }
762
763 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
764 struct dm_dev *origin, dm_block_t data_origin,
765 dm_block_t data_dest,
766 struct dm_bio_prison_cell *cell, struct bio *bio)
767 {
768 int r;
769 struct pool *pool = tc->pool;
770 struct dm_thin_new_mapping *m = get_next_mapping(pool);
771
772 INIT_LIST_HEAD(&m->list);
773 m->quiesced = 0;
774 m->prepared = 0;
775 m->tc = tc;
776 m->virt_block = virt_block;
777 m->data_block = data_dest;
778 m->cell = cell;
779 m->err = 0;
780 m->bio = NULL;
781
782 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
783 m->quiesced = 1;
784
785 /*
786 * IO to pool_dev remaps to the pool target's data_dev.
787 *
788 * If the whole block of data is being overwritten, we can issue the
789 * bio immediately. Otherwise we use kcopyd to clone the data first.
790 */
791 if (io_overwrites_block(pool, bio)) {
792 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
793
794 h->overwrite_mapping = m;
795 m->bio = bio;
796 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
797 inc_all_io_entry(pool, bio);
798 remap_and_issue(tc, bio, data_dest);
799 } else {
800 struct dm_io_region from, to;
801
802 from.bdev = origin->bdev;
803 from.sector = data_origin * pool->sectors_per_block;
804 from.count = pool->sectors_per_block;
805
806 to.bdev = tc->pool_dev->bdev;
807 to.sector = data_dest * pool->sectors_per_block;
808 to.count = pool->sectors_per_block;
809
810 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
811 0, copy_complete, m);
812 if (r < 0) {
813 mempool_free(m, pool->mapping_pool);
814 DMERR_LIMIT("dm_kcopyd_copy() failed");
815 cell_error(pool, cell);
816 }
817 }
818 }
819
820 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
821 dm_block_t data_origin, dm_block_t data_dest,
822 struct dm_bio_prison_cell *cell, struct bio *bio)
823 {
824 schedule_copy(tc, virt_block, tc->pool_dev,
825 data_origin, data_dest, cell, bio);
826 }
827
828 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
829 dm_block_t data_dest,
830 struct dm_bio_prison_cell *cell, struct bio *bio)
831 {
832 schedule_copy(tc, virt_block, tc->origin_dev,
833 virt_block, data_dest, cell, bio);
834 }
835
836 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
837 dm_block_t data_block, struct dm_bio_prison_cell *cell,
838 struct bio *bio)
839 {
840 struct pool *pool = tc->pool;
841 struct dm_thin_new_mapping *m = get_next_mapping(pool);
842
843 INIT_LIST_HEAD(&m->list);
844 m->quiesced = 1;
845 m->prepared = 0;
846 m->tc = tc;
847 m->virt_block = virt_block;
848 m->data_block = data_block;
849 m->cell = cell;
850 m->err = 0;
851 m->bio = NULL;
852
853 /*
854 * If the whole block of data is being overwritten or we are not
855 * zeroing pre-existing data, we can issue the bio immediately.
856 * Otherwise we use kcopyd to zero the data first.
857 */
858 if (!pool->pf.zero_new_blocks)
859 process_prepared_mapping(m);
860
861 else if (io_overwrites_block(pool, bio)) {
862 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
863
864 h->overwrite_mapping = m;
865 m->bio = bio;
866 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
867 inc_all_io_entry(pool, bio);
868 remap_and_issue(tc, bio, data_block);
869 } else {
870 int r;
871 struct dm_io_region to;
872
873 to.bdev = tc->pool_dev->bdev;
874 to.sector = data_block * pool->sectors_per_block;
875 to.count = pool->sectors_per_block;
876
877 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
878 if (r < 0) {
879 mempool_free(m, pool->mapping_pool);
880 DMERR_LIMIT("dm_kcopyd_zero() failed");
881 cell_error(pool, cell);
882 }
883 }
884 }
885
886 static int commit(struct pool *pool)
887 {
888 int r;
889
890 r = dm_pool_commit_metadata(pool->pmd);
891 if (r)
892 DMERR_LIMIT("%s: commit failed: error = %d",
893 dm_device_name(pool->pool_md), r);
894
895 return r;
896 }
897
898 /*
899 * A non-zero return indicates read_only or fail_io mode.
900 * Many callers don't care about the return value.
901 */
902 static int commit_or_fallback(struct pool *pool)
903 {
904 int r;
905
906 if (get_pool_mode(pool) != PM_WRITE)
907 return -EINVAL;
908
909 r = commit(pool);
910 if (r)
911 set_pool_mode(pool, PM_READ_ONLY);
912
913 return r;
914 }
915
916 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
917 {
918 int r;
919 dm_block_t free_blocks;
920 unsigned long flags;
921 struct pool *pool = tc->pool;
922
923 /*
924 * Once no_free_space is set we must not allow allocation to succeed.
925 * Otherwise it is difficult to explain, debug, test and support.
926 */
927 if (pool->no_free_space)
928 return -ENOSPC;
929
930 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
931 if (r)
932 return r;
933
934 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
935 DMWARN("%s: reached low water mark for data device: sending event.",
936 dm_device_name(pool->pool_md));
937 spin_lock_irqsave(&pool->lock, flags);
938 pool->low_water_triggered = 1;
939 spin_unlock_irqrestore(&pool->lock, flags);
940 dm_table_event(pool->ti->table);
941 }
942
943 if (!free_blocks) {
944 /*
945 * Try to commit to see if that will free up some
946 * more space.
947 */
948 (void) commit_or_fallback(pool);
949
950 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
951 if (r)
952 return r;
953
954 /*
955 * If we still have no space we set a flag to avoid
956 * doing all this checking and return -ENOSPC. This
957 * flag serves as a latch that disallows allocations from
958 * this pool until the admin takes action (e.g. resize or
959 * table reload).
960 */
961 if (!free_blocks) {
962 DMWARN("%s: no free space available.",
963 dm_device_name(pool->pool_md));
964 spin_lock_irqsave(&pool->lock, flags);
965 pool->no_free_space = 1;
966 spin_unlock_irqrestore(&pool->lock, flags);
967 return -ENOSPC;
968 }
969 }
970
971 r = dm_pool_alloc_data_block(pool->pmd, result);
972 if (r)
973 return r;
974
975 return 0;
976 }
977
978 /*
979 * If we have run out of space, queue bios until the device is
980 * resumed, presumably after having been reloaded with more space.
981 */
982 static void retry_on_resume(struct bio *bio)
983 {
984 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
985 struct thin_c *tc = h->tc;
986 struct pool *pool = tc->pool;
987 unsigned long flags;
988
989 spin_lock_irqsave(&pool->lock, flags);
990 bio_list_add(&pool->retry_on_resume_list, bio);
991 spin_unlock_irqrestore(&pool->lock, flags);
992 }
993
994 static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
995 {
996 struct bio *bio;
997 struct bio_list bios;
998
999 bio_list_init(&bios);
1000 cell_release(pool, cell, &bios);
1001
1002 while ((bio = bio_list_pop(&bios)))
1003 retry_on_resume(bio);
1004 }
1005
1006 static void process_discard(struct thin_c *tc, struct bio *bio)
1007 {
1008 int r;
1009 unsigned long flags;
1010 struct pool *pool = tc->pool;
1011 struct dm_bio_prison_cell *cell, *cell2;
1012 struct dm_cell_key key, key2;
1013 dm_block_t block = get_bio_block(tc, bio);
1014 struct dm_thin_lookup_result lookup_result;
1015 struct dm_thin_new_mapping *m;
1016
1017 build_virtual_key(tc->td, block, &key);
1018 if (bio_detain(tc->pool, &key, bio, &cell))
1019 return;
1020
1021 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1022 switch (r) {
1023 case 0:
1024 /*
1025 * Check nobody is fiddling with this pool block. This can
1026 * happen if someone's in the process of breaking sharing
1027 * on this block.
1028 */
1029 build_data_key(tc->td, lookup_result.block, &key2);
1030 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1031 cell_defer_no_holder(tc, cell);
1032 break;
1033 }
1034
1035 if (io_overlaps_block(pool, bio)) {
1036 /*
1037 * IO may still be going to the destination block. We must
1038 * quiesce before we can do the removal.
1039 */
1040 m = get_next_mapping(pool);
1041 m->tc = tc;
1042 m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
1043 m->virt_block = block;
1044 m->data_block = lookup_result.block;
1045 m->cell = cell;
1046 m->cell2 = cell2;
1047 m->err = 0;
1048 m->bio = bio;
1049
1050 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1051 spin_lock_irqsave(&pool->lock, flags);
1052 list_add(&m->list, &pool->prepared_discards);
1053 spin_unlock_irqrestore(&pool->lock, flags);
1054 wake_worker(pool);
1055 }
1056 } else {
1057 inc_all_io_entry(pool, bio);
1058 cell_defer_no_holder(tc, cell);
1059 cell_defer_no_holder(tc, cell2);
1060
1061 /*
1062 * The DM core makes sure that the discard doesn't span
1063 * a block boundary. So we submit the discard of a
1064 * partial block appropriately.
1065 */
1066 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1067 remap_and_issue(tc, bio, lookup_result.block);
1068 else
1069 bio_endio(bio, 0);
1070 }
1071 break;
1072
1073 case -ENODATA:
1074 /*
1075 * It isn't provisioned, just forget it.
1076 */
1077 cell_defer_no_holder(tc, cell);
1078 bio_endio(bio, 0);
1079 break;
1080
1081 default:
1082 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1083 __func__, r);
1084 cell_defer_no_holder(tc, cell);
1085 bio_io_error(bio);
1086 break;
1087 }
1088 }
1089
1090 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1091 struct dm_cell_key *key,
1092 struct dm_thin_lookup_result *lookup_result,
1093 struct dm_bio_prison_cell *cell)
1094 {
1095 int r;
1096 dm_block_t data_block;
1097 struct pool *pool = tc->pool;
1098
1099 r = alloc_data_block(tc, &data_block);
1100 switch (r) {
1101 case 0:
1102 schedule_internal_copy(tc, block, lookup_result->block,
1103 data_block, cell, bio);
1104 break;
1105
1106 case -ENOSPC:
1107 no_space(pool, cell);
1108 break;
1109
1110 default:
1111 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1112 __func__, r);
1113 set_pool_mode(pool, PM_READ_ONLY);
1114 cell_error(pool, cell);
1115 break;
1116 }
1117 }
1118
1119 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1120 dm_block_t block,
1121 struct dm_thin_lookup_result *lookup_result)
1122 {
1123 struct dm_bio_prison_cell *cell;
1124 struct pool *pool = tc->pool;
1125 struct dm_cell_key key;
1126
1127 /*
1128 * If cell is already occupied, then sharing is already in the process
1129 * of being broken so we have nothing further to do here.
1130 */
1131 build_data_key(tc->td, lookup_result->block, &key);
1132 if (bio_detain(pool, &key, bio, &cell))
1133 return;
1134
1135 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1136 break_sharing(tc, bio, block, &key, lookup_result, cell);
1137 else {
1138 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1139
1140 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1141 inc_all_io_entry(pool, bio);
1142 cell_defer_no_holder(tc, cell);
1143
1144 remap_and_issue(tc, bio, lookup_result->block);
1145 }
1146 }
1147
1148 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1149 struct dm_bio_prison_cell *cell)
1150 {
1151 int r;
1152 dm_block_t data_block;
1153 struct pool *pool = tc->pool;
1154
1155 /*
1156 * Remap empty bios (flushes) immediately, without provisioning.
1157 */
1158 if (!bio->bi_iter.bi_size) {
1159 inc_all_io_entry(pool, bio);
1160 cell_defer_no_holder(tc, cell);
1161
1162 remap_and_issue(tc, bio, 0);
1163 return;
1164 }
1165
1166 /*
1167 * Fill read bios with zeroes and complete them immediately.
1168 */
1169 if (bio_data_dir(bio) == READ) {
1170 zero_fill_bio(bio);
1171 cell_defer_no_holder(tc, cell);
1172 bio_endio(bio, 0);
1173 return;
1174 }
1175
1176 r = alloc_data_block(tc, &data_block);
1177 switch (r) {
1178 case 0:
1179 if (tc->origin_dev)
1180 schedule_external_copy(tc, block, data_block, cell, bio);
1181 else
1182 schedule_zero(tc, block, data_block, cell, bio);
1183 break;
1184
1185 case -ENOSPC:
1186 no_space(pool, cell);
1187 break;
1188
1189 default:
1190 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1191 __func__, r);
1192 set_pool_mode(pool, PM_READ_ONLY);
1193 cell_error(pool, cell);
1194 break;
1195 }
1196 }
1197
1198 static void process_bio(struct thin_c *tc, struct bio *bio)
1199 {
1200 int r;
1201 struct pool *pool = tc->pool;
1202 dm_block_t block = get_bio_block(tc, bio);
1203 struct dm_bio_prison_cell *cell;
1204 struct dm_cell_key key;
1205 struct dm_thin_lookup_result lookup_result;
1206
1207 /*
1208 * If cell is already occupied, then the block is already
1209 * being provisioned so we have nothing further to do here.
1210 */
1211 build_virtual_key(tc->td, block, &key);
1212 if (bio_detain(pool, &key, bio, &cell))
1213 return;
1214
1215 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1216 switch (r) {
1217 case 0:
1218 if (lookup_result.shared) {
1219 process_shared_bio(tc, bio, block, &lookup_result);
1220 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1221 } else {
1222 inc_all_io_entry(pool, bio);
1223 cell_defer_no_holder(tc, cell);
1224
1225 remap_and_issue(tc, bio, lookup_result.block);
1226 }
1227 break;
1228
1229 case -ENODATA:
1230 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1231 inc_all_io_entry(pool, bio);
1232 cell_defer_no_holder(tc, cell);
1233
1234 remap_to_origin_and_issue(tc, bio);
1235 } else
1236 provision_block(tc, bio, block, cell);
1237 break;
1238
1239 default:
1240 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1241 __func__, r);
1242 cell_defer_no_holder(tc, cell);
1243 bio_io_error(bio);
1244 break;
1245 }
1246 }
1247
1248 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1249 {
1250 int r;
1251 int rw = bio_data_dir(bio);
1252 dm_block_t block = get_bio_block(tc, bio);
1253 struct dm_thin_lookup_result lookup_result;
1254
1255 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1256 switch (r) {
1257 case 0:
1258 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1259 bio_io_error(bio);
1260 else {
1261 inc_all_io_entry(tc->pool, bio);
1262 remap_and_issue(tc, bio, lookup_result.block);
1263 }
1264 break;
1265
1266 case -ENODATA:
1267 if (rw != READ) {
1268 bio_io_error(bio);
1269 break;
1270 }
1271
1272 if (tc->origin_dev) {
1273 inc_all_io_entry(tc->pool, bio);
1274 remap_to_origin_and_issue(tc, bio);
1275 break;
1276 }
1277
1278 zero_fill_bio(bio);
1279 bio_endio(bio, 0);
1280 break;
1281
1282 default:
1283 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1284 __func__, r);
1285 bio_io_error(bio);
1286 break;
1287 }
1288 }
1289
1290 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1291 {
1292 bio_io_error(bio);
1293 }
1294
1295 /*
1296 * FIXME: should we also commit due to size of transaction, measured in
1297 * metadata blocks?
1298 */
1299 static int need_commit_due_to_time(struct pool *pool)
1300 {
1301 return jiffies < pool->last_commit_jiffies ||
1302 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1303 }
1304
1305 static void process_deferred_bios(struct pool *pool)
1306 {
1307 unsigned long flags;
1308 struct bio *bio;
1309 struct bio_list bios;
1310
1311 bio_list_init(&bios);
1312
1313 spin_lock_irqsave(&pool->lock, flags);
1314 bio_list_merge(&bios, &pool->deferred_bios);
1315 bio_list_init(&pool->deferred_bios);
1316 spin_unlock_irqrestore(&pool->lock, flags);
1317
1318 while ((bio = bio_list_pop(&bios))) {
1319 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1320 struct thin_c *tc = h->tc;
1321
1322 /*
1323 * If we've got no free new_mapping structs, and processing
1324 * this bio might require one, we pause until there are some
1325 * prepared mappings to process.
1326 */
1327 if (ensure_next_mapping(pool)) {
1328 spin_lock_irqsave(&pool->lock, flags);
1329 bio_list_merge(&pool->deferred_bios, &bios);
1330 spin_unlock_irqrestore(&pool->lock, flags);
1331
1332 break;
1333 }
1334
1335 if (bio->bi_rw & REQ_DISCARD)
1336 pool->process_discard(tc, bio);
1337 else
1338 pool->process_bio(tc, bio);
1339 }
1340
1341 /*
1342 * If there are any deferred flush bios, we must commit
1343 * the metadata before issuing them.
1344 */
1345 bio_list_init(&bios);
1346 spin_lock_irqsave(&pool->lock, flags);
1347 bio_list_merge(&bios, &pool->deferred_flush_bios);
1348 bio_list_init(&pool->deferred_flush_bios);
1349 spin_unlock_irqrestore(&pool->lock, flags);
1350
1351 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1352 return;
1353
1354 if (commit_or_fallback(pool)) {
1355 while ((bio = bio_list_pop(&bios)))
1356 bio_io_error(bio);
1357 return;
1358 }
1359 pool->last_commit_jiffies = jiffies;
1360
1361 while ((bio = bio_list_pop(&bios)))
1362 generic_make_request(bio);
1363 }
1364
1365 static void do_worker(struct work_struct *ws)
1366 {
1367 struct pool *pool = container_of(ws, struct pool, worker);
1368
1369 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1370 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1371 process_deferred_bios(pool);
1372 }
1373
1374 /*
1375 * We want to commit periodically so that not too much
1376 * unwritten data builds up.
1377 */
1378 static void do_waker(struct work_struct *ws)
1379 {
1380 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1381 wake_worker(pool);
1382 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1383 }
1384
1385 /*----------------------------------------------------------------*/
1386
1387 static enum pool_mode get_pool_mode(struct pool *pool)
1388 {
1389 return pool->pf.mode;
1390 }
1391
1392 static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1393 {
1394 int r;
1395
1396 pool->pf.mode = mode;
1397
1398 switch (mode) {
1399 case PM_FAIL:
1400 DMERR("%s: switching pool to failure mode",
1401 dm_device_name(pool->pool_md));
1402 pool->process_bio = process_bio_fail;
1403 pool->process_discard = process_bio_fail;
1404 pool->process_prepared_mapping = process_prepared_mapping_fail;
1405 pool->process_prepared_discard = process_prepared_discard_fail;
1406 break;
1407
1408 case PM_READ_ONLY:
1409 DMERR("%s: switching pool to read-only mode",
1410 dm_device_name(pool->pool_md));
1411 r = dm_pool_abort_metadata(pool->pmd);
1412 if (r) {
1413 DMERR("%s: aborting transaction failed",
1414 dm_device_name(pool->pool_md));
1415 set_pool_mode(pool, PM_FAIL);
1416 } else {
1417 dm_pool_metadata_read_only(pool->pmd);
1418 pool->process_bio = process_bio_read_only;
1419 pool->process_discard = process_discard;
1420 pool->process_prepared_mapping = process_prepared_mapping_fail;
1421 pool->process_prepared_discard = process_prepared_discard_passdown;
1422 }
1423 break;
1424
1425 case PM_WRITE:
1426 pool->process_bio = process_bio;
1427 pool->process_discard = process_discard;
1428 pool->process_prepared_mapping = process_prepared_mapping;
1429 pool->process_prepared_discard = process_prepared_discard;
1430 break;
1431 }
1432 }
1433
1434 /*----------------------------------------------------------------*/
1435
1436 /*
1437 * Mapping functions.
1438 */
1439
1440 /*
1441 * Called only while mapping a thin bio to hand it over to the workqueue.
1442 */
1443 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1444 {
1445 unsigned long flags;
1446 struct pool *pool = tc->pool;
1447
1448 spin_lock_irqsave(&pool->lock, flags);
1449 bio_list_add(&pool->deferred_bios, bio);
1450 spin_unlock_irqrestore(&pool->lock, flags);
1451
1452 wake_worker(pool);
1453 }
1454
1455 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1456 {
1457 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1458
1459 h->tc = tc;
1460 h->shared_read_entry = NULL;
1461 h->all_io_entry = NULL;
1462 h->overwrite_mapping = NULL;
1463 }
1464
1465 /*
1466 * Non-blocking function called from the thin target's map function.
1467 */
1468 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1469 {
1470 int r;
1471 struct thin_c *tc = ti->private;
1472 dm_block_t block = get_bio_block(tc, bio);
1473 struct dm_thin_device *td = tc->td;
1474 struct dm_thin_lookup_result result;
1475 struct dm_bio_prison_cell cell1, cell2;
1476 struct dm_bio_prison_cell *cell_result;
1477 struct dm_cell_key key;
1478
1479 thin_hook_bio(tc, bio);
1480
1481 if (get_pool_mode(tc->pool) == PM_FAIL) {
1482 bio_io_error(bio);
1483 return DM_MAPIO_SUBMITTED;
1484 }
1485
1486 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1487 thin_defer_bio(tc, bio);
1488 return DM_MAPIO_SUBMITTED;
1489 }
1490
1491 r = dm_thin_find_block(td, block, 0, &result);
1492
1493 /*
1494 * Note that we defer readahead too.
1495 */
1496 switch (r) {
1497 case 0:
1498 if (unlikely(result.shared)) {
1499 /*
1500 * We have a race condition here between the
1501 * result.shared value returned by the lookup and
1502 * snapshot creation, which may cause new
1503 * sharing.
1504 *
1505 * To avoid this always quiesce the origin before
1506 * taking the snap. You want to do this anyway to
1507 * ensure a consistent application view
1508 * (i.e. lockfs).
1509 *
1510 * More distant ancestors are irrelevant. The
1511 * shared flag will be set in their case.
1512 */
1513 thin_defer_bio(tc, bio);
1514 return DM_MAPIO_SUBMITTED;
1515 }
1516
1517 build_virtual_key(tc->td, block, &key);
1518 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1519 return DM_MAPIO_SUBMITTED;
1520
1521 build_data_key(tc->td, result.block, &key);
1522 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1523 cell_defer_no_holder_no_free(tc, &cell1);
1524 return DM_MAPIO_SUBMITTED;
1525 }
1526
1527 inc_all_io_entry(tc->pool, bio);
1528 cell_defer_no_holder_no_free(tc, &cell2);
1529 cell_defer_no_holder_no_free(tc, &cell1);
1530
1531 remap(tc, bio, result.block);
1532 return DM_MAPIO_REMAPPED;
1533
1534 case -ENODATA:
1535 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1536 /*
1537 * This block isn't provisioned, and we have no way
1538 * of doing so. Just error it.
1539 */
1540 bio_io_error(bio);
1541 return DM_MAPIO_SUBMITTED;
1542 }
1543 /* fall through */
1544
1545 case -EWOULDBLOCK:
1546 /*
1547 * In future, the failed dm_thin_find_block above could
1548 * provide the hint to load the metadata into cache.
1549 */
1550 thin_defer_bio(tc, bio);
1551 return DM_MAPIO_SUBMITTED;
1552
1553 default:
1554 /*
1555 * Must always call bio_io_error on failure.
1556 * dm_thin_find_block can fail with -EINVAL if the
1557 * pool is switched to fail-io mode.
1558 */
1559 bio_io_error(bio);
1560 return DM_MAPIO_SUBMITTED;
1561 }
1562 }
1563
1564 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1565 {
1566 int r;
1567 unsigned long flags;
1568 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1569
1570 spin_lock_irqsave(&pt->pool->lock, flags);
1571 r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1572 spin_unlock_irqrestore(&pt->pool->lock, flags);
1573
1574 if (!r) {
1575 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1576 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1577 }
1578
1579 return r;
1580 }
1581
1582 static void __requeue_bios(struct pool *pool)
1583 {
1584 bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1585 bio_list_init(&pool->retry_on_resume_list);
1586 }
1587
1588 /*----------------------------------------------------------------
1589 * Binding of control targets to a pool object
1590 *--------------------------------------------------------------*/
1591 static bool data_dev_supports_discard(struct pool_c *pt)
1592 {
1593 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1594
1595 return q && blk_queue_discard(q);
1596 }
1597
1598 static bool is_factor(sector_t block_size, uint32_t n)
1599 {
1600 return !sector_div(block_size, n);
1601 }
1602
1603 /*
1604 * If discard_passdown was enabled verify that the data device
1605 * supports discards. Disable discard_passdown if not.
1606 */
1607 static void disable_passdown_if_not_supported(struct pool_c *pt)
1608 {
1609 struct pool *pool = pt->pool;
1610 struct block_device *data_bdev = pt->data_dev->bdev;
1611 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1612 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1613 const char *reason = NULL;
1614 char buf[BDEVNAME_SIZE];
1615
1616 if (!pt->adjusted_pf.discard_passdown)
1617 return;
1618
1619 if (!data_dev_supports_discard(pt))
1620 reason = "discard unsupported";
1621
1622 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1623 reason = "max discard sectors smaller than a block";
1624
1625 else if (data_limits->discard_granularity > block_size)
1626 reason = "discard granularity larger than a block";
1627
1628 else if (!is_factor(block_size, data_limits->discard_granularity))
1629 reason = "discard granularity not a factor of block size";
1630
1631 if (reason) {
1632 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1633 pt->adjusted_pf.discard_passdown = false;
1634 }
1635 }
1636
1637 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1638 {
1639 struct pool_c *pt = ti->private;
1640
1641 /*
1642 * We want to make sure that degraded pools are never upgraded.
1643 */
1644 enum pool_mode old_mode = pool->pf.mode;
1645 enum pool_mode new_mode = pt->adjusted_pf.mode;
1646
1647 if (old_mode > new_mode)
1648 new_mode = old_mode;
1649
1650 pool->ti = ti;
1651 pool->low_water_blocks = pt->low_water_blocks;
1652 pool->pf = pt->adjusted_pf;
1653
1654 set_pool_mode(pool, new_mode);
1655
1656 return 0;
1657 }
1658
1659 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1660 {
1661 if (pool->ti == ti)
1662 pool->ti = NULL;
1663 }
1664
1665 /*----------------------------------------------------------------
1666 * Pool creation
1667 *--------------------------------------------------------------*/
1668 /* Initialize pool features. */
1669 static void pool_features_init(struct pool_features *pf)
1670 {
1671 pf->mode = PM_WRITE;
1672 pf->zero_new_blocks = true;
1673 pf->discard_enabled = true;
1674 pf->discard_passdown = true;
1675 }
1676
1677 static void __pool_destroy(struct pool *pool)
1678 {
1679 __pool_table_remove(pool);
1680
1681 if (dm_pool_metadata_close(pool->pmd) < 0)
1682 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1683
1684 dm_bio_prison_destroy(pool->prison);
1685 dm_kcopyd_client_destroy(pool->copier);
1686
1687 if (pool->wq)
1688 destroy_workqueue(pool->wq);
1689
1690 if (pool->next_mapping)
1691 mempool_free(pool->next_mapping, pool->mapping_pool);
1692 mempool_destroy(pool->mapping_pool);
1693 dm_deferred_set_destroy(pool->shared_read_ds);
1694 dm_deferred_set_destroy(pool->all_io_ds);
1695 kfree(pool);
1696 }
1697
1698 static struct kmem_cache *_new_mapping_cache;
1699
1700 static struct pool *pool_create(struct mapped_device *pool_md,
1701 struct block_device *metadata_dev,
1702 unsigned long block_size,
1703 int read_only, char **error)
1704 {
1705 int r;
1706 void *err_p;
1707 struct pool *pool;
1708 struct dm_pool_metadata *pmd;
1709 bool format_device = read_only ? false : true;
1710
1711 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1712 if (IS_ERR(pmd)) {
1713 *error = "Error creating metadata object";
1714 return (struct pool *)pmd;
1715 }
1716
1717 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1718 if (!pool) {
1719 *error = "Error allocating memory for pool";
1720 err_p = ERR_PTR(-ENOMEM);
1721 goto bad_pool;
1722 }
1723
1724 pool->pmd = pmd;
1725 pool->sectors_per_block = block_size;
1726 if (block_size & (block_size - 1))
1727 pool->sectors_per_block_shift = -1;
1728 else
1729 pool->sectors_per_block_shift = __ffs(block_size);
1730 pool->low_water_blocks = 0;
1731 pool_features_init(&pool->pf);
1732 pool->prison = dm_bio_prison_create(PRISON_CELLS);
1733 if (!pool->prison) {
1734 *error = "Error creating pool's bio prison";
1735 err_p = ERR_PTR(-ENOMEM);
1736 goto bad_prison;
1737 }
1738
1739 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1740 if (IS_ERR(pool->copier)) {
1741 r = PTR_ERR(pool->copier);
1742 *error = "Error creating pool's kcopyd client";
1743 err_p = ERR_PTR(r);
1744 goto bad_kcopyd_client;
1745 }
1746
1747 /*
1748 * Create singlethreaded workqueue that will service all devices
1749 * that use this metadata.
1750 */
1751 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1752 if (!pool->wq) {
1753 *error = "Error creating pool's workqueue";
1754 err_p = ERR_PTR(-ENOMEM);
1755 goto bad_wq;
1756 }
1757
1758 INIT_WORK(&pool->worker, do_worker);
1759 INIT_DELAYED_WORK(&pool->waker, do_waker);
1760 spin_lock_init(&pool->lock);
1761 bio_list_init(&pool->deferred_bios);
1762 bio_list_init(&pool->deferred_flush_bios);
1763 INIT_LIST_HEAD(&pool->prepared_mappings);
1764 INIT_LIST_HEAD(&pool->prepared_discards);
1765 pool->low_water_triggered = 0;
1766 pool->no_free_space = 0;
1767 bio_list_init(&pool->retry_on_resume_list);
1768
1769 pool->shared_read_ds = dm_deferred_set_create();
1770 if (!pool->shared_read_ds) {
1771 *error = "Error creating pool's shared read deferred set";
1772 err_p = ERR_PTR(-ENOMEM);
1773 goto bad_shared_read_ds;
1774 }
1775
1776 pool->all_io_ds = dm_deferred_set_create();
1777 if (!pool->all_io_ds) {
1778 *error = "Error creating pool's all io deferred set";
1779 err_p = ERR_PTR(-ENOMEM);
1780 goto bad_all_io_ds;
1781 }
1782
1783 pool->next_mapping = NULL;
1784 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1785 _new_mapping_cache);
1786 if (!pool->mapping_pool) {
1787 *error = "Error creating pool's mapping mempool";
1788 err_p = ERR_PTR(-ENOMEM);
1789 goto bad_mapping_pool;
1790 }
1791
1792 pool->ref_count = 1;
1793 pool->last_commit_jiffies = jiffies;
1794 pool->pool_md = pool_md;
1795 pool->md_dev = metadata_dev;
1796 __pool_table_insert(pool);
1797
1798 return pool;
1799
1800 bad_mapping_pool:
1801 dm_deferred_set_destroy(pool->all_io_ds);
1802 bad_all_io_ds:
1803 dm_deferred_set_destroy(pool->shared_read_ds);
1804 bad_shared_read_ds:
1805 destroy_workqueue(pool->wq);
1806 bad_wq:
1807 dm_kcopyd_client_destroy(pool->copier);
1808 bad_kcopyd_client:
1809 dm_bio_prison_destroy(pool->prison);
1810 bad_prison:
1811 kfree(pool);
1812 bad_pool:
1813 if (dm_pool_metadata_close(pmd))
1814 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1815
1816 return err_p;
1817 }
1818
1819 static void __pool_inc(struct pool *pool)
1820 {
1821 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1822 pool->ref_count++;
1823 }
1824
1825 static void __pool_dec(struct pool *pool)
1826 {
1827 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1828 BUG_ON(!pool->ref_count);
1829 if (!--pool->ref_count)
1830 __pool_destroy(pool);
1831 }
1832
1833 static struct pool *__pool_find(struct mapped_device *pool_md,
1834 struct block_device *metadata_dev,
1835 unsigned long block_size, int read_only,
1836 char **error, int *created)
1837 {
1838 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1839
1840 if (pool) {
1841 if (pool->pool_md != pool_md) {
1842 *error = "metadata device already in use by a pool";
1843 return ERR_PTR(-EBUSY);
1844 }
1845 __pool_inc(pool);
1846
1847 } else {
1848 pool = __pool_table_lookup(pool_md);
1849 if (pool) {
1850 if (pool->md_dev != metadata_dev) {
1851 *error = "different pool cannot replace a pool";
1852 return ERR_PTR(-EINVAL);
1853 }
1854 __pool_inc(pool);
1855
1856 } else {
1857 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1858 *created = 1;
1859 }
1860 }
1861
1862 return pool;
1863 }
1864
1865 /*----------------------------------------------------------------
1866 * Pool target methods
1867 *--------------------------------------------------------------*/
1868 static void pool_dtr(struct dm_target *ti)
1869 {
1870 struct pool_c *pt = ti->private;
1871
1872 mutex_lock(&dm_thin_pool_table.mutex);
1873
1874 unbind_control_target(pt->pool, ti);
1875 __pool_dec(pt->pool);
1876 dm_put_device(ti, pt->metadata_dev);
1877 dm_put_device(ti, pt->data_dev);
1878 kfree(pt);
1879
1880 mutex_unlock(&dm_thin_pool_table.mutex);
1881 }
1882
1883 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1884 struct dm_target *ti)
1885 {
1886 int r;
1887 unsigned argc;
1888 const char *arg_name;
1889
1890 static struct dm_arg _args[] = {
1891 {0, 3, "Invalid number of pool feature arguments"},
1892 };
1893
1894 /*
1895 * No feature arguments supplied.
1896 */
1897 if (!as->argc)
1898 return 0;
1899
1900 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1901 if (r)
1902 return -EINVAL;
1903
1904 while (argc && !r) {
1905 arg_name = dm_shift_arg(as);
1906 argc--;
1907
1908 if (!strcasecmp(arg_name, "skip_block_zeroing"))
1909 pf->zero_new_blocks = false;
1910
1911 else if (!strcasecmp(arg_name, "ignore_discard"))
1912 pf->discard_enabled = false;
1913
1914 else if (!strcasecmp(arg_name, "no_discard_passdown"))
1915 pf->discard_passdown = false;
1916
1917 else if (!strcasecmp(arg_name, "read_only"))
1918 pf->mode = PM_READ_ONLY;
1919
1920 else {
1921 ti->error = "Unrecognised pool feature requested";
1922 r = -EINVAL;
1923 break;
1924 }
1925 }
1926
1927 return r;
1928 }
1929
1930 static void metadata_low_callback(void *context)
1931 {
1932 struct pool *pool = context;
1933
1934 DMWARN("%s: reached low water mark for metadata device: sending event.",
1935 dm_device_name(pool->pool_md));
1936
1937 dm_table_event(pool->ti->table);
1938 }
1939
1940 static sector_t get_metadata_dev_size(struct block_device *bdev)
1941 {
1942 sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
1943 char buffer[BDEVNAME_SIZE];
1944
1945 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
1946 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1947 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
1948 metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
1949 }
1950
1951 return metadata_dev_size;
1952 }
1953
1954 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
1955 {
1956 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
1957
1958 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
1959
1960 return metadata_dev_size;
1961 }
1962
1963 /*
1964 * When a metadata threshold is crossed a dm event is triggered, and
1965 * userland should respond by growing the metadata device. We could let
1966 * userland set the threshold, like we do with the data threshold, but I'm
1967 * not sure they know enough to do this well.
1968 */
1969 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
1970 {
1971 /*
1972 * 4M is ample for all ops with the possible exception of thin
1973 * device deletion which is harmless if it fails (just retry the
1974 * delete after you've grown the device).
1975 */
1976 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
1977 return min((dm_block_t)1024ULL /* 4M */, quarter);
1978 }
1979
1980 /*
1981 * thin-pool <metadata dev> <data dev>
1982 * <data block size (sectors)>
1983 * <low water mark (blocks)>
1984 * [<#feature args> [<arg>]*]
1985 *
1986 * Optional feature arguments are:
1987 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1988 * ignore_discard: disable discard
1989 * no_discard_passdown: don't pass discards down to the data device
1990 */
1991 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1992 {
1993 int r, pool_created = 0;
1994 struct pool_c *pt;
1995 struct pool *pool;
1996 struct pool_features pf;
1997 struct dm_arg_set as;
1998 struct dm_dev *data_dev;
1999 unsigned long block_size;
2000 dm_block_t low_water_blocks;
2001 struct dm_dev *metadata_dev;
2002 fmode_t metadata_mode;
2003
2004 /*
2005 * FIXME Remove validation from scope of lock.
2006 */
2007 mutex_lock(&dm_thin_pool_table.mutex);
2008
2009 if (argc < 4) {
2010 ti->error = "Invalid argument count";
2011 r = -EINVAL;
2012 goto out_unlock;
2013 }
2014
2015 as.argc = argc;
2016 as.argv = argv;
2017
2018 /*
2019 * Set default pool features.
2020 */
2021 pool_features_init(&pf);
2022
2023 dm_consume_args(&as, 4);
2024 r = parse_pool_features(&as, &pf, ti);
2025 if (r)
2026 goto out_unlock;
2027
2028 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2029 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2030 if (r) {
2031 ti->error = "Error opening metadata block device";
2032 goto out_unlock;
2033 }
2034
2035 /*
2036 * Run for the side-effect of possibly issuing a warning if the
2037 * device is too big.
2038 */
2039 (void) get_metadata_dev_size(metadata_dev->bdev);
2040
2041 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2042 if (r) {
2043 ti->error = "Error getting data device";
2044 goto out_metadata;
2045 }
2046
2047 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2048 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2049 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2050 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2051 ti->error = "Invalid block size";
2052 r = -EINVAL;
2053 goto out;
2054 }
2055
2056 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2057 ti->error = "Invalid low water mark";
2058 r = -EINVAL;
2059 goto out;
2060 }
2061
2062 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2063 if (!pt) {
2064 r = -ENOMEM;
2065 goto out;
2066 }
2067
2068 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2069 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2070 if (IS_ERR(pool)) {
2071 r = PTR_ERR(pool);
2072 goto out_free_pt;
2073 }
2074
2075 /*
2076 * 'pool_created' reflects whether this is the first table load.
2077 * Top level discard support is not allowed to be changed after
2078 * initial load. This would require a pool reload to trigger thin
2079 * device changes.
2080 */
2081 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2082 ti->error = "Discard support cannot be disabled once enabled";
2083 r = -EINVAL;
2084 goto out_flags_changed;
2085 }
2086
2087 pt->pool = pool;
2088 pt->ti = ti;
2089 pt->metadata_dev = metadata_dev;
2090 pt->data_dev = data_dev;
2091 pt->low_water_blocks = low_water_blocks;
2092 pt->adjusted_pf = pt->requested_pf = pf;
2093 ti->num_flush_bios = 1;
2094
2095 /*
2096 * Only need to enable discards if the pool should pass
2097 * them down to the data device. The thin device's discard
2098 * processing will cause mappings to be removed from the btree.
2099 */
2100 ti->discard_zeroes_data_unsupported = true;
2101 if (pf.discard_enabled && pf.discard_passdown) {
2102 ti->num_discard_bios = 1;
2103
2104 /*
2105 * Setting 'discards_supported' circumvents the normal
2106 * stacking of discard limits (this keeps the pool and
2107 * thin devices' discard limits consistent).
2108 */
2109 ti->discards_supported = true;
2110 }
2111 ti->private = pt;
2112
2113 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2114 calc_metadata_threshold(pt),
2115 metadata_low_callback,
2116 pool);
2117 if (r)
2118 goto out_free_pt;
2119
2120 pt->callbacks.congested_fn = pool_is_congested;
2121 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2122
2123 mutex_unlock(&dm_thin_pool_table.mutex);
2124
2125 return 0;
2126
2127 out_flags_changed:
2128 __pool_dec(pool);
2129 out_free_pt:
2130 kfree(pt);
2131 out:
2132 dm_put_device(ti, data_dev);
2133 out_metadata:
2134 dm_put_device(ti, metadata_dev);
2135 out_unlock:
2136 mutex_unlock(&dm_thin_pool_table.mutex);
2137
2138 return r;
2139 }
2140
2141 static int pool_map(struct dm_target *ti, struct bio *bio)
2142 {
2143 int r;
2144 struct pool_c *pt = ti->private;
2145 struct pool *pool = pt->pool;
2146 unsigned long flags;
2147
2148 /*
2149 * As this is a singleton target, ti->begin is always zero.
2150 */
2151 spin_lock_irqsave(&pool->lock, flags);
2152 bio->bi_bdev = pt->data_dev->bdev;
2153 r = DM_MAPIO_REMAPPED;
2154 spin_unlock_irqrestore(&pool->lock, flags);
2155
2156 return r;
2157 }
2158
2159 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2160 {
2161 int r;
2162 struct pool_c *pt = ti->private;
2163 struct pool *pool = pt->pool;
2164 sector_t data_size = ti->len;
2165 dm_block_t sb_data_size;
2166
2167 *need_commit = false;
2168
2169 (void) sector_div(data_size, pool->sectors_per_block);
2170
2171 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2172 if (r) {
2173 DMERR("%s: failed to retrieve data device size",
2174 dm_device_name(pool->pool_md));
2175 return r;
2176 }
2177
2178 if (data_size < sb_data_size) {
2179 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2180 dm_device_name(pool->pool_md),
2181 (unsigned long long)data_size, sb_data_size);
2182 return -EINVAL;
2183
2184 } else if (data_size > sb_data_size) {
2185 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2186 if (r) {
2187 DMERR("%s: failed to resize data device",
2188 dm_device_name(pool->pool_md));
2189 set_pool_mode(pool, PM_READ_ONLY);
2190 return r;
2191 }
2192
2193 *need_commit = true;
2194 }
2195
2196 return 0;
2197 }
2198
2199 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2200 {
2201 int r;
2202 struct pool_c *pt = ti->private;
2203 struct pool *pool = pt->pool;
2204 dm_block_t metadata_dev_size, sb_metadata_dev_size;
2205
2206 *need_commit = false;
2207
2208 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
2209
2210 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2211 if (r) {
2212 DMERR("%s: failed to retrieve metadata device size",
2213 dm_device_name(pool->pool_md));
2214 return r;
2215 }
2216
2217 if (metadata_dev_size < sb_metadata_dev_size) {
2218 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2219 dm_device_name(pool->pool_md),
2220 metadata_dev_size, sb_metadata_dev_size);
2221 return -EINVAL;
2222
2223 } else if (metadata_dev_size > sb_metadata_dev_size) {
2224 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2225 if (r) {
2226 DMERR("%s: failed to resize metadata device",
2227 dm_device_name(pool->pool_md));
2228 return r;
2229 }
2230
2231 *need_commit = true;
2232 }
2233
2234 return 0;
2235 }
2236
2237 /*
2238 * Retrieves the number of blocks of the data device from
2239 * the superblock and compares it to the actual device size,
2240 * thus resizing the data device in case it has grown.
2241 *
2242 * This both copes with opening preallocated data devices in the ctr
2243 * being followed by a resume
2244 * -and-
2245 * calling the resume method individually after userspace has
2246 * grown the data device in reaction to a table event.
2247 */
2248 static int pool_preresume(struct dm_target *ti)
2249 {
2250 int r;
2251 bool need_commit1, need_commit2;
2252 struct pool_c *pt = ti->private;
2253 struct pool *pool = pt->pool;
2254
2255 /*
2256 * Take control of the pool object.
2257 */
2258 r = bind_control_target(pool, ti);
2259 if (r)
2260 return r;
2261
2262 r = maybe_resize_data_dev(ti, &need_commit1);
2263 if (r)
2264 return r;
2265
2266 r = maybe_resize_metadata_dev(ti, &need_commit2);
2267 if (r)
2268 return r;
2269
2270 if (need_commit1 || need_commit2)
2271 (void) commit_or_fallback(pool);
2272
2273 return 0;
2274 }
2275
2276 static void pool_resume(struct dm_target *ti)
2277 {
2278 struct pool_c *pt = ti->private;
2279 struct pool *pool = pt->pool;
2280 unsigned long flags;
2281
2282 spin_lock_irqsave(&pool->lock, flags);
2283 pool->low_water_triggered = 0;
2284 pool->no_free_space = 0;
2285 __requeue_bios(pool);
2286 spin_unlock_irqrestore(&pool->lock, flags);
2287
2288 do_waker(&pool->waker.work);
2289 }
2290
2291 static void pool_postsuspend(struct dm_target *ti)
2292 {
2293 struct pool_c *pt = ti->private;
2294 struct pool *pool = pt->pool;
2295
2296 cancel_delayed_work(&pool->waker);
2297 flush_workqueue(pool->wq);
2298 (void) commit_or_fallback(pool);
2299 }
2300
2301 static int check_arg_count(unsigned argc, unsigned args_required)
2302 {
2303 if (argc != args_required) {
2304 DMWARN("Message received with %u arguments instead of %u.",
2305 argc, args_required);
2306 return -EINVAL;
2307 }
2308
2309 return 0;
2310 }
2311
2312 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2313 {
2314 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2315 *dev_id <= MAX_DEV_ID)
2316 return 0;
2317
2318 if (warning)
2319 DMWARN("Message received with invalid device id: %s", arg);
2320
2321 return -EINVAL;
2322 }
2323
2324 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2325 {
2326 dm_thin_id dev_id;
2327 int r;
2328
2329 r = check_arg_count(argc, 2);
2330 if (r)
2331 return r;
2332
2333 r = read_dev_id(argv[1], &dev_id, 1);
2334 if (r)
2335 return r;
2336
2337 r = dm_pool_create_thin(pool->pmd, dev_id);
2338 if (r) {
2339 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2340 argv[1]);
2341 return r;
2342 }
2343
2344 return 0;
2345 }
2346
2347 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2348 {
2349 dm_thin_id dev_id;
2350 dm_thin_id origin_dev_id;
2351 int r;
2352
2353 r = check_arg_count(argc, 3);
2354 if (r)
2355 return r;
2356
2357 r = read_dev_id(argv[1], &dev_id, 1);
2358 if (r)
2359 return r;
2360
2361 r = read_dev_id(argv[2], &origin_dev_id, 1);
2362 if (r)
2363 return r;
2364
2365 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2366 if (r) {
2367 DMWARN("Creation of new snapshot %s of device %s failed.",
2368 argv[1], argv[2]);
2369 return r;
2370 }
2371
2372 return 0;
2373 }
2374
2375 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2376 {
2377 dm_thin_id dev_id;
2378 int r;
2379
2380 r = check_arg_count(argc, 2);
2381 if (r)
2382 return r;
2383
2384 r = read_dev_id(argv[1], &dev_id, 1);
2385 if (r)
2386 return r;
2387
2388 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2389 if (r)
2390 DMWARN("Deletion of thin device %s failed.", argv[1]);
2391
2392 return r;
2393 }
2394
2395 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2396 {
2397 dm_thin_id old_id, new_id;
2398 int r;
2399
2400 r = check_arg_count(argc, 3);
2401 if (r)
2402 return r;
2403
2404 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2405 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2406 return -EINVAL;
2407 }
2408
2409 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2410 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2411 return -EINVAL;
2412 }
2413
2414 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2415 if (r) {
2416 DMWARN("Failed to change transaction id from %s to %s.",
2417 argv[1], argv[2]);
2418 return r;
2419 }
2420
2421 return 0;
2422 }
2423
2424 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2425 {
2426 int r;
2427
2428 r = check_arg_count(argc, 1);
2429 if (r)
2430 return r;
2431
2432 (void) commit_or_fallback(pool);
2433
2434 r = dm_pool_reserve_metadata_snap(pool->pmd);
2435 if (r)
2436 DMWARN("reserve_metadata_snap message failed.");
2437
2438 return r;
2439 }
2440
2441 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2442 {
2443 int r;
2444
2445 r = check_arg_count(argc, 1);
2446 if (r)
2447 return r;
2448
2449 r = dm_pool_release_metadata_snap(pool->pmd);
2450 if (r)
2451 DMWARN("release_metadata_snap message failed.");
2452
2453 return r;
2454 }
2455
2456 /*
2457 * Messages supported:
2458 * create_thin <dev_id>
2459 * create_snap <dev_id> <origin_id>
2460 * delete <dev_id>
2461 * trim <dev_id> <new_size_in_sectors>
2462 * set_transaction_id <current_trans_id> <new_trans_id>
2463 * reserve_metadata_snap
2464 * release_metadata_snap
2465 */
2466 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2467 {
2468 int r = -EINVAL;
2469 struct pool_c *pt = ti->private;
2470 struct pool *pool = pt->pool;
2471
2472 if (!strcasecmp(argv[0], "create_thin"))
2473 r = process_create_thin_mesg(argc, argv, pool);
2474
2475 else if (!strcasecmp(argv[0], "create_snap"))
2476 r = process_create_snap_mesg(argc, argv, pool);
2477
2478 else if (!strcasecmp(argv[0], "delete"))
2479 r = process_delete_mesg(argc, argv, pool);
2480
2481 else if (!strcasecmp(argv[0], "set_transaction_id"))
2482 r = process_set_transaction_id_mesg(argc, argv, pool);
2483
2484 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2485 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2486
2487 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2488 r = process_release_metadata_snap_mesg(argc, argv, pool);
2489
2490 else
2491 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2492
2493 if (!r)
2494 (void) commit_or_fallback(pool);
2495
2496 return r;
2497 }
2498
2499 static void emit_flags(struct pool_features *pf, char *result,
2500 unsigned sz, unsigned maxlen)
2501 {
2502 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2503 !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
2504 DMEMIT("%u ", count);
2505
2506 if (!pf->zero_new_blocks)
2507 DMEMIT("skip_block_zeroing ");
2508
2509 if (!pf->discard_enabled)
2510 DMEMIT("ignore_discard ");
2511
2512 if (!pf->discard_passdown)
2513 DMEMIT("no_discard_passdown ");
2514
2515 if (pf->mode == PM_READ_ONLY)
2516 DMEMIT("read_only ");
2517 }
2518
2519 /*
2520 * Status line is:
2521 * <transaction id> <used metadata sectors>/<total metadata sectors>
2522 * <used data sectors>/<total data sectors> <held metadata root>
2523 */
2524 static void pool_status(struct dm_target *ti, status_type_t type,
2525 unsigned status_flags, char *result, unsigned maxlen)
2526 {
2527 int r;
2528 unsigned sz = 0;
2529 uint64_t transaction_id;
2530 dm_block_t nr_free_blocks_data;
2531 dm_block_t nr_free_blocks_metadata;
2532 dm_block_t nr_blocks_data;
2533 dm_block_t nr_blocks_metadata;
2534 dm_block_t held_root;
2535 char buf[BDEVNAME_SIZE];
2536 char buf2[BDEVNAME_SIZE];
2537 struct pool_c *pt = ti->private;
2538 struct pool *pool = pt->pool;
2539
2540 switch (type) {
2541 case STATUSTYPE_INFO:
2542 if (get_pool_mode(pool) == PM_FAIL) {
2543 DMEMIT("Fail");
2544 break;
2545 }
2546
2547 /* Commit to ensure statistics aren't out-of-date */
2548 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2549 (void) commit_or_fallback(pool);
2550
2551 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2552 if (r) {
2553 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2554 dm_device_name(pool->pool_md), r);
2555 goto err;
2556 }
2557
2558 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2559 if (r) {
2560 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2561 dm_device_name(pool->pool_md), r);
2562 goto err;
2563 }
2564
2565 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2566 if (r) {
2567 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2568 dm_device_name(pool->pool_md), r);
2569 goto err;
2570 }
2571
2572 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2573 if (r) {
2574 DMERR("%s: dm_pool_get_free_block_count returned %d",
2575 dm_device_name(pool->pool_md), r);
2576 goto err;
2577 }
2578
2579 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2580 if (r) {
2581 DMERR("%s: dm_pool_get_data_dev_size returned %d",
2582 dm_device_name(pool->pool_md), r);
2583 goto err;
2584 }
2585
2586 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2587 if (r) {
2588 DMERR("%s: dm_pool_get_metadata_snap returned %d",
2589 dm_device_name(pool->pool_md), r);
2590 goto err;
2591 }
2592
2593 DMEMIT("%llu %llu/%llu %llu/%llu ",
2594 (unsigned long long)transaction_id,
2595 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2596 (unsigned long long)nr_blocks_metadata,
2597 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2598 (unsigned long long)nr_blocks_data);
2599
2600 if (held_root)
2601 DMEMIT("%llu ", held_root);
2602 else
2603 DMEMIT("- ");
2604
2605 if (pool->pf.mode == PM_READ_ONLY)
2606 DMEMIT("ro ");
2607 else
2608 DMEMIT("rw ");
2609
2610 if (!pool->pf.discard_enabled)
2611 DMEMIT("ignore_discard");
2612 else if (pool->pf.discard_passdown)
2613 DMEMIT("discard_passdown");
2614 else
2615 DMEMIT("no_discard_passdown");
2616
2617 break;
2618
2619 case STATUSTYPE_TABLE:
2620 DMEMIT("%s %s %lu %llu ",
2621 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2622 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2623 (unsigned long)pool->sectors_per_block,
2624 (unsigned long long)pt->low_water_blocks);
2625 emit_flags(&pt->requested_pf, result, sz, maxlen);
2626 break;
2627 }
2628 return;
2629
2630 err:
2631 DMEMIT("Error");
2632 }
2633
2634 static int pool_iterate_devices(struct dm_target *ti,
2635 iterate_devices_callout_fn fn, void *data)
2636 {
2637 struct pool_c *pt = ti->private;
2638
2639 return fn(ti, pt->data_dev, 0, ti->len, data);
2640 }
2641
2642 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2643 struct bio_vec *biovec, int max_size)
2644 {
2645 struct pool_c *pt = ti->private;
2646 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2647
2648 if (!q->merge_bvec_fn)
2649 return max_size;
2650
2651 bvm->bi_bdev = pt->data_dev->bdev;
2652
2653 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2654 }
2655
2656 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2657 {
2658 struct pool *pool = pt->pool;
2659 struct queue_limits *data_limits;
2660
2661 limits->max_discard_sectors = pool->sectors_per_block;
2662
2663 /*
2664 * discard_granularity is just a hint, and not enforced.
2665 */
2666 if (pt->adjusted_pf.discard_passdown) {
2667 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2668 limits->discard_granularity = data_limits->discard_granularity;
2669 } else
2670 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2671 }
2672
2673 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2674 {
2675 struct pool_c *pt = ti->private;
2676 struct pool *pool = pt->pool;
2677 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2678
2679 /*
2680 * If the system-determined stacked limits are compatible with the
2681 * pool's blocksize (io_opt is a factor) do not override them.
2682 */
2683 if (io_opt_sectors < pool->sectors_per_block ||
2684 do_div(io_opt_sectors, pool->sectors_per_block)) {
2685 blk_limits_io_min(limits, 0);
2686 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2687 }
2688
2689 /*
2690 * pt->adjusted_pf is a staging area for the actual features to use.
2691 * They get transferred to the live pool in bind_control_target()
2692 * called from pool_preresume().
2693 */
2694 if (!pt->adjusted_pf.discard_enabled) {
2695 /*
2696 * Must explicitly disallow stacking discard limits otherwise the
2697 * block layer will stack them if pool's data device has support.
2698 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2699 * user to see that, so make sure to set all discard limits to 0.
2700 */
2701 limits->discard_granularity = 0;
2702 return;
2703 }
2704
2705 disable_passdown_if_not_supported(pt);
2706
2707 set_discard_limits(pt, limits);
2708 }
2709
2710 static struct target_type pool_target = {
2711 .name = "thin-pool",
2712 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2713 DM_TARGET_IMMUTABLE,
2714 .version = {1, 9, 0},
2715 .module = THIS_MODULE,
2716 .ctr = pool_ctr,
2717 .dtr = pool_dtr,
2718 .map = pool_map,
2719 .postsuspend = pool_postsuspend,
2720 .preresume = pool_preresume,
2721 .resume = pool_resume,
2722 .message = pool_message,
2723 .status = pool_status,
2724 .merge = pool_merge,
2725 .iterate_devices = pool_iterate_devices,
2726 .io_hints = pool_io_hints,
2727 };
2728
2729 /*----------------------------------------------------------------
2730 * Thin target methods
2731 *--------------------------------------------------------------*/
2732 static void thin_dtr(struct dm_target *ti)
2733 {
2734 struct thin_c *tc = ti->private;
2735
2736 mutex_lock(&dm_thin_pool_table.mutex);
2737
2738 __pool_dec(tc->pool);
2739 dm_pool_close_thin_device(tc->td);
2740 dm_put_device(ti, tc->pool_dev);
2741 if (tc->origin_dev)
2742 dm_put_device(ti, tc->origin_dev);
2743 kfree(tc);
2744
2745 mutex_unlock(&dm_thin_pool_table.mutex);
2746 }
2747
2748 /*
2749 * Thin target parameters:
2750 *
2751 * <pool_dev> <dev_id> [origin_dev]
2752 *
2753 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2754 * dev_id: the internal device identifier
2755 * origin_dev: a device external to the pool that should act as the origin
2756 *
2757 * If the pool device has discards disabled, they get disabled for the thin
2758 * device as well.
2759 */
2760 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2761 {
2762 int r;
2763 struct thin_c *tc;
2764 struct dm_dev *pool_dev, *origin_dev;
2765 struct mapped_device *pool_md;
2766
2767 mutex_lock(&dm_thin_pool_table.mutex);
2768
2769 if (argc != 2 && argc != 3) {
2770 ti->error = "Invalid argument count";
2771 r = -EINVAL;
2772 goto out_unlock;
2773 }
2774
2775 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2776 if (!tc) {
2777 ti->error = "Out of memory";
2778 r = -ENOMEM;
2779 goto out_unlock;
2780 }
2781
2782 if (argc == 3) {
2783 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2784 if (r) {
2785 ti->error = "Error opening origin device";
2786 goto bad_origin_dev;
2787 }
2788 tc->origin_dev = origin_dev;
2789 }
2790
2791 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2792 if (r) {
2793 ti->error = "Error opening pool device";
2794 goto bad_pool_dev;
2795 }
2796 tc->pool_dev = pool_dev;
2797
2798 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2799 ti->error = "Invalid device id";
2800 r = -EINVAL;
2801 goto bad_common;
2802 }
2803
2804 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2805 if (!pool_md) {
2806 ti->error = "Couldn't get pool mapped device";
2807 r = -EINVAL;
2808 goto bad_common;
2809 }
2810
2811 tc->pool = __pool_table_lookup(pool_md);
2812 if (!tc->pool) {
2813 ti->error = "Couldn't find pool object";
2814 r = -EINVAL;
2815 goto bad_pool_lookup;
2816 }
2817 __pool_inc(tc->pool);
2818
2819 if (get_pool_mode(tc->pool) == PM_FAIL) {
2820 ti->error = "Couldn't open thin device, Pool is in fail mode";
2821 goto bad_thin_open;
2822 }
2823
2824 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2825 if (r) {
2826 ti->error = "Couldn't open thin internal device";
2827 goto bad_thin_open;
2828 }
2829
2830 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2831 if (r)
2832 goto bad_thin_open;
2833
2834 ti->num_flush_bios = 1;
2835 ti->flush_supported = true;
2836 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2837
2838 /* In case the pool supports discards, pass them on. */
2839 ti->discard_zeroes_data_unsupported = true;
2840 if (tc->pool->pf.discard_enabled) {
2841 ti->discards_supported = true;
2842 ti->num_discard_bios = 1;
2843 /* Discard bios must be split on a block boundary */
2844 ti->split_discard_bios = true;
2845 }
2846
2847 dm_put(pool_md);
2848
2849 mutex_unlock(&dm_thin_pool_table.mutex);
2850
2851 return 0;
2852
2853 bad_thin_open:
2854 __pool_dec(tc->pool);
2855 bad_pool_lookup:
2856 dm_put(pool_md);
2857 bad_common:
2858 dm_put_device(ti, tc->pool_dev);
2859 bad_pool_dev:
2860 if (tc->origin_dev)
2861 dm_put_device(ti, tc->origin_dev);
2862 bad_origin_dev:
2863 kfree(tc);
2864 out_unlock:
2865 mutex_unlock(&dm_thin_pool_table.mutex);
2866
2867 return r;
2868 }
2869
2870 static int thin_map(struct dm_target *ti, struct bio *bio)
2871 {
2872 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
2873
2874 return thin_bio_map(ti, bio);
2875 }
2876
2877 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2878 {
2879 unsigned long flags;
2880 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2881 struct list_head work;
2882 struct dm_thin_new_mapping *m, *tmp;
2883 struct pool *pool = h->tc->pool;
2884
2885 if (h->shared_read_entry) {
2886 INIT_LIST_HEAD(&work);
2887 dm_deferred_entry_dec(h->shared_read_entry, &work);
2888
2889 spin_lock_irqsave(&pool->lock, flags);
2890 list_for_each_entry_safe(m, tmp, &work, list) {
2891 list_del(&m->list);
2892 m->quiesced = 1;
2893 __maybe_add_mapping(m);
2894 }
2895 spin_unlock_irqrestore(&pool->lock, flags);
2896 }
2897
2898 if (h->all_io_entry) {
2899 INIT_LIST_HEAD(&work);
2900 dm_deferred_entry_dec(h->all_io_entry, &work);
2901 if (!list_empty(&work)) {
2902 spin_lock_irqsave(&pool->lock, flags);
2903 list_for_each_entry_safe(m, tmp, &work, list)
2904 list_add(&m->list, &pool->prepared_discards);
2905 spin_unlock_irqrestore(&pool->lock, flags);
2906 wake_worker(pool);
2907 }
2908 }
2909
2910 return 0;
2911 }
2912
2913 static void thin_postsuspend(struct dm_target *ti)
2914 {
2915 if (dm_noflush_suspending(ti))
2916 requeue_io((struct thin_c *)ti->private);
2917 }
2918
2919 /*
2920 * <nr mapped sectors> <highest mapped sector>
2921 */
2922 static void thin_status(struct dm_target *ti, status_type_t type,
2923 unsigned status_flags, char *result, unsigned maxlen)
2924 {
2925 int r;
2926 ssize_t sz = 0;
2927 dm_block_t mapped, highest;
2928 char buf[BDEVNAME_SIZE];
2929 struct thin_c *tc = ti->private;
2930
2931 if (get_pool_mode(tc->pool) == PM_FAIL) {
2932 DMEMIT("Fail");
2933 return;
2934 }
2935
2936 if (!tc->td)
2937 DMEMIT("-");
2938 else {
2939 switch (type) {
2940 case STATUSTYPE_INFO:
2941 r = dm_thin_get_mapped_count(tc->td, &mapped);
2942 if (r) {
2943 DMERR("dm_thin_get_mapped_count returned %d", r);
2944 goto err;
2945 }
2946
2947 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2948 if (r < 0) {
2949 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
2950 goto err;
2951 }
2952
2953 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2954 if (r)
2955 DMEMIT("%llu", ((highest + 1) *
2956 tc->pool->sectors_per_block) - 1);
2957 else
2958 DMEMIT("-");
2959 break;
2960
2961 case STATUSTYPE_TABLE:
2962 DMEMIT("%s %lu",
2963 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2964 (unsigned long) tc->dev_id);
2965 if (tc->origin_dev)
2966 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2967 break;
2968 }
2969 }
2970
2971 return;
2972
2973 err:
2974 DMEMIT("Error");
2975 }
2976
2977 static int thin_iterate_devices(struct dm_target *ti,
2978 iterate_devices_callout_fn fn, void *data)
2979 {
2980 sector_t blocks;
2981 struct thin_c *tc = ti->private;
2982 struct pool *pool = tc->pool;
2983
2984 /*
2985 * We can't call dm_pool_get_data_dev_size() since that blocks. So
2986 * we follow a more convoluted path through to the pool's target.
2987 */
2988 if (!pool->ti)
2989 return 0; /* nothing is bound */
2990
2991 blocks = pool->ti->len;
2992 (void) sector_div(blocks, pool->sectors_per_block);
2993 if (blocks)
2994 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
2995
2996 return 0;
2997 }
2998
2999 static struct target_type thin_target = {
3000 .name = "thin",
3001 .version = {1, 9, 0},
3002 .module = THIS_MODULE,
3003 .ctr = thin_ctr,
3004 .dtr = thin_dtr,
3005 .map = thin_map,
3006 .end_io = thin_endio,
3007 .postsuspend = thin_postsuspend,
3008 .status = thin_status,
3009 .iterate_devices = thin_iterate_devices,
3010 };
3011
3012 /*----------------------------------------------------------------*/
3013
3014 static int __init dm_thin_init(void)
3015 {
3016 int r;
3017
3018 pool_table_init();
3019
3020 r = dm_register_target(&thin_target);
3021 if (r)
3022 return r;
3023
3024 r = dm_register_target(&pool_target);
3025 if (r)
3026 goto bad_pool_target;
3027
3028 r = -ENOMEM;
3029
3030 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3031 if (!_new_mapping_cache)
3032 goto bad_new_mapping_cache;
3033
3034 return 0;
3035
3036 bad_new_mapping_cache:
3037 dm_unregister_target(&pool_target);
3038 bad_pool_target:
3039 dm_unregister_target(&thin_target);
3040
3041 return r;
3042 }
3043
3044 static void dm_thin_exit(void)
3045 {
3046 dm_unregister_target(&thin_target);
3047 dm_unregister_target(&pool_target);
3048
3049 kmem_cache_destroy(_new_mapping_cache);
3050 }
3051
3052 module_init(dm_thin_init);
3053 module_exit(dm_thin_exit);
3054
3055 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3056 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3057 MODULE_LICENSE("GPL");