This avoids that new requests are queued while __dm_destroy() is in
progress.
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
static void __dm_destroy(struct mapped_device *md, bool wait)
{
+ struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
+
if (dm_request_based(md) && md->kworker_task)
flush_kthread_worker(&md->kworker);