block: update comment on end_request()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
afdc1a78 8#include <scsi/sg.h> /* for struct sg_iovec */
86db1e29
JA
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
23 rq->data_len += bio->bi_size;
24 }
25 return 0;
26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28
29static int __blk_rq_unmap_user(struct bio *bio)
30{
31 int ret = 0;
32
33 if (bio) {
34 if (bio_flagged(bio, BIO_USER_MAPPED))
35 bio_unmap_user(bio);
36 else
37 ret = bio_uncopy_user(bio);
38 }
39
40 return ret;
41}
42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
152e283f
FT
44 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, gfp_t gfp_mask)
86db1e29
JA
46{
47 unsigned long uaddr;
48 struct bio *bio, *orig_bio;
49 int reading, ret;
50
51 reading = rq_data_dir(rq) == READ;
52
53 /*
54 * if alignment requirement is satisfied, map in user pages for
55 * direct dma. else, set up kernel bounce buffers
56 */
57 uaddr = (unsigned long) ubuf;
87904074 58 if (blk_rq_aligned(q, ubuf, len) && !map_data)
a3bce90e 59 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
86db1e29 60 else
152e283f 61 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
86db1e29
JA
62
63 if (IS_ERR(bio))
64 return PTR_ERR(bio);
65
66 orig_bio = bio;
67 blk_queue_bounce(q, &bio);
68
69 /*
70 * We link the bounce buffer in and could have to traverse it
71 * later so we have to get a ref to prevent it from being freed
72 */
73 bio_get(bio);
74
75 ret = blk_rq_append_bio(q, rq, bio);
76 if (!ret)
77 return bio->bi_size;
78
79 /* if it was boucned we must call the end io function */
80 bio_endio(bio, 0);
81 __blk_rq_unmap_user(orig_bio);
82 bio_put(bio);
83 return ret;
84}
85
86/**
710027a4 87 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
88 * @q: request queue where request should be inserted
89 * @rq: request structure to fill
152e283f 90 * @map_data: pointer to the rq_map_data holding pages (if necessary)
86db1e29
JA
91 * @ubuf: the user buffer
92 * @len: length of user data
a3bce90e 93 * @gfp_mask: memory allocation flags
86db1e29
JA
94 *
95 * Description:
710027a4 96 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
97 * a kernel bounce buffer is used.
98 *
710027a4 99 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
100 * still in process context.
101 *
102 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
103 * before being submitted to the device, as pages mapped may be out of
104 * reach. It's the callers responsibility to make sure this happens. The
105 * original bio must be passed back in to blk_rq_unmap_user() for proper
106 * unmapping.
107 */
108int blk_rq_map_user(struct request_queue *q, struct request *rq,
152e283f
FT
109 struct rq_map_data *map_data, void __user *ubuf,
110 unsigned long len, gfp_t gfp_mask)
86db1e29
JA
111{
112 unsigned long bytes_read = 0;
113 struct bio *bio = NULL;
114 int ret;
115
116 if (len > (q->max_hw_sectors << 9))
117 return -EINVAL;
118 if (!len || !ubuf)
119 return -EINVAL;
120
121 while (bytes_read != len) {
122 unsigned long map_len, end, start;
123
124 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
125 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
126 >> PAGE_SHIFT;
127 start = (unsigned long)ubuf >> PAGE_SHIFT;
128
129 /*
130 * A bad offset could cause us to require BIO_MAX_PAGES + 1
131 * pages. If this happens we just lower the requested
132 * mapping len by a page so that we can fit
133 */
134 if (end - start > BIO_MAX_PAGES)
135 map_len -= PAGE_SIZE;
136
152e283f
FT
137 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
138 gfp_mask);
86db1e29
JA
139 if (ret < 0)
140 goto unmap_rq;
141 if (!bio)
142 bio = rq->bio;
143 bytes_read += ret;
144 ubuf += ret;
145 }
146
f18573ab
FT
147 if (!bio_flagged(bio, BIO_USER_MAPPED))
148 rq->cmd_flags |= REQ_COPY_USER;
40b01b9b 149
86db1e29
JA
150 rq->buffer = rq->data = NULL;
151 return 0;
152unmap_rq:
153 blk_rq_unmap_user(bio);
84e9e03c 154 rq->bio = NULL;
86db1e29
JA
155 return ret;
156}
86db1e29
JA
157EXPORT_SYMBOL(blk_rq_map_user);
158
159/**
710027a4 160 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
161 * @q: request queue where request should be inserted
162 * @rq: request to map data to
152e283f 163 * @map_data: pointer to the rq_map_data holding pages (if necessary)
86db1e29
JA
164 * @iov: pointer to the iovec
165 * @iov_count: number of elements in the iovec
166 * @len: I/O byte count
a3bce90e 167 * @gfp_mask: memory allocation flags
86db1e29
JA
168 *
169 * Description:
710027a4 170 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
171 * a kernel bounce buffer is used.
172 *
710027a4 173 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
174 * still in process context.
175 *
176 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
177 * before being submitted to the device, as pages mapped may be out of
178 * reach. It's the callers responsibility to make sure this happens. The
179 * original bio must be passed back in to blk_rq_unmap_user() for proper
180 * unmapping.
181 */
182int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
152e283f
FT
183 struct rq_map_data *map_data, struct sg_iovec *iov,
184 int iov_count, unsigned int len, gfp_t gfp_mask)
86db1e29
JA
185{
186 struct bio *bio;
afdc1a78
FT
187 int i, read = rq_data_dir(rq) == READ;
188 int unaligned = 0;
86db1e29
JA
189
190 if (!iov || iov_count <= 0)
191 return -EINVAL;
192
afdc1a78
FT
193 for (i = 0; i < iov_count; i++) {
194 unsigned long uaddr = (unsigned long)iov[i].iov_base;
195
196 if (uaddr & queue_dma_alignment(q)) {
197 unaligned = 1;
198 break;
199 }
200 }
201
152e283f
FT
202 if (unaligned || (q->dma_pad_mask & len) || map_data)
203 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
204 gfp_mask);
afdc1a78 205 else
a3bce90e 206 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
afdc1a78 207
86db1e29
JA
208 if (IS_ERR(bio))
209 return PTR_ERR(bio);
210
211 if (bio->bi_size != len) {
212 bio_endio(bio, 0);
213 bio_unmap_user(bio);
214 return -EINVAL;
215 }
216
f18573ab
FT
217 if (!bio_flagged(bio, BIO_USER_MAPPED))
218 rq->cmd_flags |= REQ_COPY_USER;
219
07359fc6 220 blk_queue_bounce(q, &bio);
86db1e29
JA
221 bio_get(bio);
222 blk_rq_bio_prep(q, rq, bio);
223 rq->buffer = rq->data = NULL;
224 return 0;
225}
152e283f 226EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e29
JA
227
228/**
229 * blk_rq_unmap_user - unmap a request with user data
230 * @bio: start of bio list
231 *
232 * Description:
233 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
234 * supply the original rq->bio from the blk_rq_map_user() return, since
710027a4 235 * the I/O completion may have changed rq->bio.
86db1e29
JA
236 */
237int blk_rq_unmap_user(struct bio *bio)
238{
239 struct bio *mapped_bio;
240 int ret = 0, ret2;
241
242 while (bio) {
243 mapped_bio = bio;
244 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
245 mapped_bio = bio->bi_private;
246
247 ret2 = __blk_rq_unmap_user(mapped_bio);
248 if (ret2 && !ret)
249 ret = ret2;
250
251 mapped_bio = bio;
252 bio = bio->bi_next;
253 bio_put(mapped_bio);
254 }
255
256 return ret;
257}
86db1e29
JA
258EXPORT_SYMBOL(blk_rq_unmap_user);
259
260/**
710027a4 261 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
262 * @q: request queue where request should be inserted
263 * @rq: request to fill
264 * @kbuf: the kernel buffer
265 * @len: length of user data
266 * @gfp_mask: memory allocation flags
68154e90
FT
267 *
268 * Description:
269 * Data will be mapped directly if possible. Otherwise a bounce
270 * buffer is used.
86db1e29
JA
271 */
272int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
273 unsigned int len, gfp_t gfp_mask)
274{
68154e90
FT
275 int reading = rq_data_dir(rq) == READ;
276 int do_copy = 0;
86db1e29
JA
277 struct bio *bio;
278
279 if (len > (q->max_hw_sectors << 9))
280 return -EINVAL;
281 if (!len || !kbuf)
282 return -EINVAL;
283
87904074 284 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
68154e90
FT
285 if (do_copy)
286 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
287 else
288 bio = bio_map_kern(q, kbuf, len, gfp_mask);
289
86db1e29
JA
290 if (IS_ERR(bio))
291 return PTR_ERR(bio);
292
293 if (rq_data_dir(rq) == WRITE)
294 bio->bi_rw |= (1 << BIO_RW);
295
68154e90
FT
296 if (do_copy)
297 rq->cmd_flags |= REQ_COPY_USER;
298
86db1e29
JA
299 blk_rq_bio_prep(q, rq, bio);
300 blk_queue_bounce(q, &rq->bio);
301 rq->buffer = rq->data = NULL;
302 return 0;
303}
86db1e29 304EXPORT_SYMBOL(blk_rq_map_kern);