2 * Functions related to barrier IO handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
13 * Cache flushing for ordered writes handling
15 unsigned blk_ordered_cur_seq(struct request_queue
*q
)
19 return 1 << ffz(q
->ordseq
);
22 unsigned blk_ordered_req_seq(struct request
*rq
)
24 struct request_queue
*q
= rq
->q
;
26 BUG_ON(q
->ordseq
== 0);
28 if (rq
== &q
->pre_flush_rq
)
29 return QUEUE_ORDSEQ_PREFLUSH
;
31 return QUEUE_ORDSEQ_BAR
;
32 if (rq
== &q
->post_flush_rq
)
33 return QUEUE_ORDSEQ_POSTFLUSH
;
36 * !fs requests don't need to follow barrier ordering. Always
37 * put them at the front. This fixes the following deadlock.
39 * http://thread.gmane.org/gmane.linux.kernel/537473
41 if (rq
->cmd_type
!= REQ_TYPE_FS
)
42 return QUEUE_ORDSEQ_DRAIN
;
44 if ((rq
->cmd_flags
& REQ_ORDERED_COLOR
) ==
45 (q
->orig_bar_rq
->cmd_flags
& REQ_ORDERED_COLOR
))
46 return QUEUE_ORDSEQ_DRAIN
;
48 return QUEUE_ORDSEQ_DONE
;
51 bool blk_ordered_complete_seq(struct request_queue
*q
, unsigned seq
, int error
)
55 if (error
&& !q
->orderr
)
58 BUG_ON(q
->ordseq
& seq
);
61 if (blk_ordered_cur_seq(q
) != QUEUE_ORDSEQ_DONE
)
65 * Okay, sequence complete.
69 __blk_end_request_all(rq
, q
->orderr
);
73 static void pre_flush_end_io(struct request
*rq
, int error
)
75 elv_completed_request(rq
->q
, rq
);
76 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_PREFLUSH
, error
);
79 static void bar_end_io(struct request
*rq
, int error
)
81 elv_completed_request(rq
->q
, rq
);
82 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_BAR
, error
);
85 static void post_flush_end_io(struct request
*rq
, int error
)
87 elv_completed_request(rq
->q
, rq
);
88 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_POSTFLUSH
, error
);
91 static void queue_flush(struct request_queue
*q
, unsigned which
)
96 if (which
== QUEUE_ORDERED_DO_PREFLUSH
) {
97 rq
= &q
->pre_flush_rq
;
98 end_io
= pre_flush_end_io
;
100 rq
= &q
->post_flush_rq
;
101 end_io
= post_flush_end_io
;
105 rq
->cmd_type
= REQ_TYPE_FS
;
106 rq
->cmd_flags
= REQ_HARDBARRIER
| REQ_FLUSH
;
107 rq
->rq_disk
= q
->orig_bar_rq
->rq_disk
;
110 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
113 static inline bool start_ordered(struct request_queue
*q
, struct request
**rqp
)
115 struct request
*rq
= *rqp
;
119 q
->ordered
= q
->next_ordered
;
120 q
->ordseq
|= QUEUE_ORDSEQ_STARTED
;
123 * For an empty barrier, there's no actual BAR request, which
124 * in turn makes POSTFLUSH unnecessary. Mask them off.
126 if (!blk_rq_sectors(rq
))
127 q
->ordered
&= ~(QUEUE_ORDERED_DO_BAR
|
128 QUEUE_ORDERED_DO_POSTFLUSH
);
130 /* stash away the original request */
131 blk_dequeue_request(rq
);
136 * Queue ordered sequence. As we stack them at the head, we
137 * need to queue in reverse order. Note that we rely on that
138 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
139 * request gets inbetween ordered sequence.
141 if (q
->ordered
& QUEUE_ORDERED_DO_POSTFLUSH
) {
142 queue_flush(q
, QUEUE_ORDERED_DO_POSTFLUSH
);
143 rq
= &q
->post_flush_rq
;
145 skip
|= QUEUE_ORDSEQ_POSTFLUSH
;
147 if (q
->ordered
& QUEUE_ORDERED_DO_BAR
) {
150 /* initialize proxy request and queue it */
152 if (bio_data_dir(q
->orig_bar_rq
->bio
) == WRITE
)
153 rq
->cmd_flags
|= REQ_WRITE
;
154 if (q
->ordered
& QUEUE_ORDERED_DO_FUA
)
155 rq
->cmd_flags
|= REQ_FUA
;
156 init_request_from_bio(rq
, q
->orig_bar_rq
->bio
);
157 rq
->end_io
= bar_end_io
;
159 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
161 skip
|= QUEUE_ORDSEQ_BAR
;
163 if (q
->ordered
& QUEUE_ORDERED_DO_PREFLUSH
) {
164 queue_flush(q
, QUEUE_ORDERED_DO_PREFLUSH
);
165 rq
= &q
->pre_flush_rq
;
167 skip
|= QUEUE_ORDSEQ_PREFLUSH
;
169 if (queue_in_flight(q
))
172 skip
|= QUEUE_ORDSEQ_DRAIN
;
177 * Complete skipped sequences. If whole sequence is complete,
178 * return false to tell elevator that this request is gone.
180 return !blk_ordered_complete_seq(q
, skip
, 0);
183 bool blk_do_ordered(struct request_queue
*q
, struct request
**rqp
)
185 struct request
*rq
= *rqp
;
186 const int is_barrier
= rq
->cmd_type
== REQ_TYPE_FS
&&
187 (rq
->cmd_flags
& REQ_HARDBARRIER
);
193 if (q
->next_ordered
!= QUEUE_ORDERED_NONE
)
194 return start_ordered(q
, rqp
);
197 * Queue ordering not supported. Terminate
200 blk_dequeue_request(rq
);
201 __blk_end_request_all(rq
, -EOPNOTSUPP
);
208 * Ordered sequence in progress
211 /* Special requests are not subject to ordering rules. */
212 if (rq
->cmd_type
!= REQ_TYPE_FS
&&
213 rq
!= &q
->pre_flush_rq
&& rq
!= &q
->post_flush_rq
)
216 /* Ordered by draining. Wait for turn. */
217 WARN_ON(blk_ordered_req_seq(rq
) < blk_ordered_cur_seq(q
));
218 if (blk_ordered_req_seq(rq
) > blk_ordered_cur_seq(q
))
224 static void bio_end_empty_barrier(struct bio
*bio
, int err
)
227 if (err
== -EOPNOTSUPP
)
228 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
229 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
232 complete(bio
->bi_private
);
237 * blkdev_issue_flush - queue a flush
238 * @bdev: blockdev to issue flush for
239 * @gfp_mask: memory allocation flags (for bio_alloc)
240 * @error_sector: error sector
241 * @flags: BLKDEV_IFL_* flags to control behaviour
244 * Issue a flush for the block device in question. Caller can supply
245 * room for storing the error offset in case of a flush error, if they
246 * wish to. If WAIT flag is not passed then caller may check only what
247 * request was pushed in some internal queue for later handling.
249 int blkdev_issue_flush(struct block_device
*bdev
, gfp_t gfp_mask
,
250 sector_t
*error_sector
, unsigned long flags
)
252 DECLARE_COMPLETION_ONSTACK(wait
);
253 struct request_queue
*q
;
257 if (bdev
->bd_disk
== NULL
)
260 q
= bdev_get_queue(bdev
);
265 * some block devices may not have their queue correctly set up here
266 * (e.g. loop device without a backing file) and so issuing a flush
267 * here will panic. Ensure there is a request function before issuing
270 if (!q
->make_request_fn
)
273 bio
= bio_alloc(gfp_mask
, 0);
274 bio
->bi_end_io
= bio_end_empty_barrier
;
276 if (test_bit(BLKDEV_WAIT
, &flags
))
277 bio
->bi_private
= &wait
;
280 submit_bio(WRITE_BARRIER
, bio
);
281 if (test_bit(BLKDEV_WAIT
, &flags
)) {
282 wait_for_completion(&wait
);
284 * The driver must store the error location in ->bi_sector, if
285 * it supports it. For non-stacked drivers, this should be
286 * copied from blk_rq_pos(rq).
289 *error_sector
= bio
->bi_sector
;
292 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
294 else if (!bio_flagged(bio
, BIO_UPTODATE
))
300 EXPORT_SYMBOL(blkdev_issue_flush
);