unsigned queue_io; /* Must we queue all I/O? */
unsigned queue_if_no_path; /* Queue I/O if last path fails? */
- unsigned suspended; /* Has dm core suspended our I/O? */
+ unsigned saved_queue_if_no_path;/* Saved state during suspension */
struct work_struct process_queued_ios;
struct bio_list queued_ios;
m->queue_size--;
if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path && !m->suspended)) {
+ (!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
bio_list_add(&m->queued_ios, bio);
m->queue_size++;
spin_lock_irqsave(&m->lock, flags);
+ m->saved_queue_if_no_path = m->queue_if_no_path;
m->queue_if_no_path = queue_if_no_path;
if (!m->queue_if_no_path)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = m->current_pgpath;
if ((pgpath && m->queue_io) ||
- (!pgpath && m->queue_if_no_path && !m->suspended))
+ (!pgpath && m->queue_if_no_path))
must_queue = 1;
init_required = m->pg_init_required;
spin_lock(&m->lock);
if (!m->nr_valid_paths) {
- if (!m->queue_if_no_path || m->suspended) {
+ if (!m->queue_if_no_path) {
spin_unlock(&m->lock);
return -EIO;
} else {
/*
* Suspend can't complete until all the I/O is processed so if
- * the last path failed we will now error any queued I/O.
+ * the last path fails we must error any remaining I/O.
+ * Note that if the freeze_bdev fails while suspending, the
+ * queue_if_no_path state is lost - userspace should reset it.
*/
static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = (struct multipath *) ti->private;
- unsigned long flags;
- spin_lock_irqsave(&m->lock, flags);
- m->suspended = 1;
- if (m->queue_if_no_path)
- queue_work(kmultipathd, &m->process_queued_ios);
- spin_unlock_irqrestore(&m->lock, flags);
+ queue_if_no_path(m, 0);
}
+/*
+ * Restore the queue_if_no_path setting.
+ */
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = (struct multipath *) ti->private;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- m->suspended = 0;
+ m->queue_if_no_path = m->saved_queue_if_no_path;
spin_unlock_irqrestore(&m->lock, flags);
}