return r;
}
-int dm_table_any_busy_target(struct dm_table *t)
-{
- unsigned i;
- struct dm_target *ti;
-
- for (i = 0; i < t->num_targets; i++) {
- ti = t->targets + i;
- if (ti->type->busy && ti->type->busy(ti))
- return 1;
- }
-
- return 0;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
dm_put_live_table(md, srcu_idx);
}
-static int dm_lld_busy(struct request_queue *q)
-{
- int r;
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table_fast(md);
-
- if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
- r = 1;
- else
- r = dm_table_any_busy_target(map);
-
- dm_put_live_table_fast(md);
-
- return r;
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
dm_init_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
- blk_queue_lld_busy(md->queue, dm_lld_busy);
/* Also initialize the request-based DM worker thread */
init_kthread_worker(&md->kworker);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
-int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);