* blk_queue_ordered - does this queue support ordered writes
* @q: the request queue
* @ordered: one of QUEUE_ORDERED_*
- * @prepare_flush_fn: rq setup helper for cache flush ordered writes
*
* Description:
* For journalled file systems, doing ordered writes on a commit
* feature should call this function and indicate so.
*
**/
-int blk_queue_ordered(struct request_queue *q, unsigned ordered,
- prepare_flush_fn *prepare_flush_fn)
+int blk_queue_ordered(struct request_queue *q, unsigned ordered)
{
if (ordered != QUEUE_ORDERED_NONE &&
ordered != QUEUE_ORDERED_DRAIN &&
q->ordered = ordered;
q->next_ordered = ordered;
- q->prepare_flush_fn = prepare_flush_fn;
return 0;
}
rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
rq->rq_disk = q->bar_rq.rq_disk;
rq->end_io = end_io;
- if (q->prepare_flush_fn)
- q->prepare_flush_fn(q, rq);
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
if (!brd->brd_queue)
goto out_free_dev;
blk_queue_make_request(brd->brd_queue, brd_make_request);
- blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
+ blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
lo->lo_queue->unplug_fn = loop_unplug;
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
- blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
+ blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
blk_queue_prep_rq(q, blk_queue_start_tag);
- blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, NULL);
+ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
disk->queue = q;
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
- blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, NULL);
+ blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
/* If barriers are supported, tell block layer that queue is ordered */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
- blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, NULL);
+ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
- blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
+ blk_queue_ordered(q, QUEUE_ORDERED_TAG);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
int err;
err = blk_queue_ordered(info->rq,
- info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
- NULL);
+ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE);
if (err)
return err;
} else
ordered = QUEUE_ORDERED_DRAIN;
- blk_queue_ordered(drive->queue, ordered, NULL);
+ blk_queue_ordered(drive->queue, ordered);
}
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH, NULL);
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
md->disk = alloc_disk(1);
if (!md->disk)
mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
+ blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
*/
blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
- blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
+ blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
}
/*
else
ordered = QUEUE_ORDERED_DRAIN;
- blk_queue_ordered(sdkp->disk->queue, ordered, NULL);
+ blk_queue_ordered(sdkp->disk->queue, ordered);
set_capacity(disk, sdkp->capacity);
kfree(buffer);
};
typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
struct bio_vec *);
-typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
unprep_rq_fn *unprep_rq_fn;
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
- prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
+extern int blk_queue_ordered(struct request_queue *, unsigned);
extern bool blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);