{
struct request_queue *q = bdi->unplug_io_data;
- /*
- * devices don't necessarily have an ->unplug_fn defined
- */
- if (q->unplug_fn) {
- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
- q->rq.count[READ] + q->rq.count[WRITE]);
-
- q->unplug_fn(q);
- }
+ blk_unplug(q);
}
static void blk_unplug_work(struct work_struct *work)
kblockd_schedule_work(&q->unplug_work);
}
+void blk_unplug(struct request_queue *q)
+{
+ /*
+ * devices don't necessarily have an ->unplug_fn defined
+ */
+ if (q->unplug_fn) {
+ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+ q->rq.count[READ] + q->rq.count[WRITE]);
+
+ q->unplug_fn(q);
+ }
+}
+EXPORT_SYMBOL(blk_unplug);
+
/**
* blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question
prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock);
- bitmap->mddev->queue
- ->unplug_fn(bitmap->mddev->queue);
+ blk_unplug(bitmap->mddev->queue);
schedule();
finish_wait(&bitmap->overflow_wait, &__wait);
continue;
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
struct request_queue *q = bdev_get_queue(dd->bdev);
- if (q->unplug_fn)
- q->unplug_fn(q);
+ blk_unplug(q);
}
}
for (i=0; i < mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
- if (r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
+ blk_unplug(r_queue);
}
}
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
- mddev->queue->unplug_fn(mddev->queue);
+ blk_unplug(mddev->queue);
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
* this also signals 'finished resyncing' to md_stop
*/
out:
- mddev->queue->unplug_fn(mddev->queue);
+ blk_unplug(mddev->queue);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- if (r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
+ blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
- if (r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
+ blk_unplug(r_queue);
}
}
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- if (r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
+ blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- if (r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
+ blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- if (r_queue->unplug_fn)
- r_queue->unplug_fn(r_queue);
+ blk_unplug(r_queue);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
extern int blk_verify_command(unsigned char *, int);
+extern void blk_unplug(struct request_queue *q);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{