dm_set_mdptr(hc->md, NULL);
mutex_unlock(&dm_hash_cells_mutex);
- table = dm_get_table(hc->md);
+ table = dm_get_live_table(hc->md);
if (table) {
dm_table_event(table);
dm_table_put(table);
/*
* Wake up any dm event waiters.
*/
- table = dm_get_table(hc->md);
+ table = dm_get_live_table(hc->md);
if (table) {
dm_table_event(table);
dm_table_put(table);
param->event_nr = dm_get_event_nr(md);
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (table) {
param->flags |= DM_ACTIVE_PRESENT_FLAG;
param->target_count = dm_table_get_num_targets(table);
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (table) {
retrieve_status(table, param, param_size);
dm_table_put(table);
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (table) {
retrieve_deps(table, param, param_size);
dm_table_put(table);
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (table) {
retrieve_status(table, param, param_size);
dm_table_put(table);
goto out;
}
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (!table)
goto out_argv;
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *tgt;
int r = -ENOTTY;
* function to access the md->map field, and make sure they call
* dm_table_put() when finished.
*/
-struct dm_table *dm_get_table(struct mapped_device *md)
+struct dm_table *dm_get_live_table(struct mapped_device *md)
{
struct dm_table *t;
unsigned long flags;
struct clone_info ci;
int error = 0;
- ci.map = dm_get_table(md);
+ ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
bio_io_error(bio);
struct bio_vec *biovec)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
sector_t max_sectors;
int max_size = 0;
static void dm_request_fn(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
struct request *rq, *clone;
{
int r;
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
r = 1;
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
if (map) {
if (dm_request_based(md))
struct dm_table *map;
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
if (map) {
/*
* Request-based dm cares about only own queue for
BUG_ON(test_bit(DMF_FREEING, &md->flags));
if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
idr_replace(&_minor_idr, MINOR_ALLOCED,
MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
static int dm_rq_barrier(struct mapped_device *md)
{
int i, j;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
unsigned num_targets = dm_table_get_num_targets(map);
struct dm_target *ti;
struct request *clone;
goto out_unlock;
}
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
if (!dm_suspended(md))
goto out;
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
if (!map || !dm_table_get_size(map))
goto out;