struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ struct dm_dev_internal *dd =
+ list_entry(tmp, struct dm_dev_internal, list);
kfree(dd);
}
}
/*
* See if we've already got a device in the list.
*/
-static struct dm_dev *find_device(struct list_head *l, dev_t dev)
+static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
- if (dd->bdev->bd_dev == dev)
+ if (dd->dm_dev.bdev->bd_dev == dev)
return dd;
return NULL;
/*
* Open a device so we can use it as a map destination.
*/
-static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
+static int open_dev(struct dm_dev_internal *d, dev_t dev,
+ struct mapped_device *md)
{
static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
- BUG_ON(d->bdev);
+ BUG_ON(d->dm_dev.bdev);
- bdev = open_by_devnum(dev, d->mode);
+ bdev = open_by_devnum(dev, d->dm_dev.mode);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
blkdev_put(bdev);
else
- d->bdev = bdev;
+ d->dm_dev.bdev = bdev;
return r;
}
/*
* Close a device that we've been using.
*/
-static void close_dev(struct dm_dev *d, struct mapped_device *md)
+static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
{
- if (!d->bdev)
+ if (!d->dm_dev.bdev)
return;
- bd_release_from_disk(d->bdev, dm_disk(md));
- blkdev_put(d->bdev);
- d->bdev = NULL;
+ bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
+ blkdev_put(d->dm_dev.bdev);
+ d->dm_dev.bdev = NULL;
}
/*
* If possible, this checks an area of a destination device is valid.
*/
-static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
+static int check_device_area(struct dm_dev_internal *dd, sector_t start,
+ sector_t len)
{
- sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
if (!dev_size)
return 1;
* careful to leave things as they were if we fail to reopen the
* device.
*/
-static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
+static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
+ struct mapped_device *md)
{
int r;
- struct dm_dev dd_copy;
- dev_t dev = dd->bdev->bd_dev;
+ struct dm_dev_internal dd_copy;
+ dev_t dev = dd->dm_dev.bdev->bd_dev;
dd_copy = *dd;
- dd->mode |= new_mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode |= new_mode;
+ dd->dm_dev.bdev = NULL;
r = open_dev(dd, dev, md);
if (!r)
close_dev(&dd_copy, md);
{
int r;
dev_t uninitialized_var(dev);
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
unsigned int major, minor;
BUG_ON(!t);
if (!dd)
return -ENOMEM;
- dd->mode = mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode = mode;
+ dd->dm_dev.bdev = NULL;
if ((r = open_dev(dd, dev, t->md))) {
kfree(dd);
return r;
}
- format_dev_t(dd->name, dev);
+ format_dev_t(dd->dm_dev.name, dev);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
- } else if (dd->mode != (mode | dd->mode)) {
+ } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
if (!check_device_area(dd, start, len)) {
DMWARN("device %s too small for target", path);
- dm_put_device(ti, dd);
+ dm_put_device(ti, &dd->dm_dev);
return -EINVAL;
}
- *result = dd;
+ *result = &dd->dm_dev;
return 0;
}
/*
* Decrement a devices use count and remove it if necessary.
*/
-void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
+void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
+ struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
+ dm_dev);
+
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
int r = 0;
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
void dm_table_unplug_all(struct dm_table *t)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
blk_unplug(q);
}