[SCSI] block: fix the partial mappings with struct rq_map_data
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
afdc1a78 8#include <scsi/sg.h> /* for struct sg_iovec */
86db1e29
JA
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
23 rq->data_len += bio->bi_size;
24 }
25 return 0;
26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28
29static int __blk_rq_unmap_user(struct bio *bio)
30{
31 int ret = 0;
32
33 if (bio) {
34 if (bio_flagged(bio, BIO_USER_MAPPED))
35 bio_unmap_user(bio);
36 else
37 ret = bio_uncopy_user(bio);
38 }
39
40 return ret;
41}
42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
152e283f 44 struct rq_map_data *map_data, void __user *ubuf,
81882766 45 unsigned int len, int null_mapped, gfp_t gfp_mask)
86db1e29
JA
46{
47 unsigned long uaddr;
48 struct bio *bio, *orig_bio;
49 int reading, ret;
50
51 reading = rq_data_dir(rq) == READ;
52
53 /*
54 * if alignment requirement is satisfied, map in user pages for
55 * direct dma. else, set up kernel bounce buffers
56 */
57 uaddr = (unsigned long) ubuf;
87904074 58 if (blk_rq_aligned(q, ubuf, len) && !map_data)
a3bce90e 59 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
86db1e29 60 else
152e283f 61 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
86db1e29
JA
62
63 if (IS_ERR(bio))
64 return PTR_ERR(bio);
65
81882766
FT
66 if (null_mapped)
67 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
68
86db1e29
JA
69 orig_bio = bio;
70 blk_queue_bounce(q, &bio);
71
72 /*
73 * We link the bounce buffer in and could have to traverse it
74 * later so we have to get a ref to prevent it from being freed
75 */
76 bio_get(bio);
77
78 ret = blk_rq_append_bio(q, rq, bio);
79 if (!ret)
80 return bio->bi_size;
81
82 /* if it was boucned we must call the end io function */
83 bio_endio(bio, 0);
84 __blk_rq_unmap_user(orig_bio);
85 bio_put(bio);
86 return ret;
87}
88
89/**
710027a4 90 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
91 * @q: request queue where request should be inserted
92 * @rq: request structure to fill
152e283f 93 * @map_data: pointer to the rq_map_data holding pages (if necessary)
86db1e29
JA
94 * @ubuf: the user buffer
95 * @len: length of user data
a3bce90e 96 * @gfp_mask: memory allocation flags
86db1e29
JA
97 *
98 * Description:
710027a4 99 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
100 * a kernel bounce buffer is used.
101 *
710027a4 102 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
103 * still in process context.
104 *
105 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
106 * before being submitted to the device, as pages mapped may be out of
107 * reach. It's the callers responsibility to make sure this happens. The
108 * original bio must be passed back in to blk_rq_unmap_user() for proper
109 * unmapping.
110 */
111int blk_rq_map_user(struct request_queue *q, struct request *rq,
152e283f
FT
112 struct rq_map_data *map_data, void __user *ubuf,
113 unsigned long len, gfp_t gfp_mask)
86db1e29
JA
114{
115 unsigned long bytes_read = 0;
116 struct bio *bio = NULL;
81882766 117 int ret, null_mapped = 0;
86db1e29
JA
118
119 if (len > (q->max_hw_sectors << 9))
120 return -EINVAL;
81882766 121 if (!len)
86db1e29 122 return -EINVAL;
81882766
FT
123 if (!ubuf) {
124 if (!map_data || rq_data_dir(rq) != READ)
125 return -EINVAL;
126 null_mapped = 1;
127 }
86db1e29
JA
128
129 while (bytes_read != len) {
130 unsigned long map_len, end, start;
131
132 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
133 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
134 >> PAGE_SHIFT;
135 start = (unsigned long)ubuf >> PAGE_SHIFT;
136
137 /*
138 * A bad offset could cause us to require BIO_MAX_PAGES + 1
139 * pages. If this happens we just lower the requested
140 * mapping len by a page so that we can fit
141 */
142 if (end - start > BIO_MAX_PAGES)
143 map_len -= PAGE_SIZE;
144
152e283f 145 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
81882766 146 null_mapped, gfp_mask);
86db1e29
JA
147 if (ret < 0)
148 goto unmap_rq;
149 if (!bio)
150 bio = rq->bio;
151 bytes_read += ret;
152 ubuf += ret;
56c451f4
FT
153
154 if (map_data)
155 map_data->offset += ret;
86db1e29
JA
156 }
157
f18573ab
FT
158 if (!bio_flagged(bio, BIO_USER_MAPPED))
159 rq->cmd_flags |= REQ_COPY_USER;
40b01b9b 160
86db1e29
JA
161 rq->buffer = rq->data = NULL;
162 return 0;
163unmap_rq:
164 blk_rq_unmap_user(bio);
84e9e03c 165 rq->bio = NULL;
86db1e29
JA
166 return ret;
167}
86db1e29
JA
168EXPORT_SYMBOL(blk_rq_map_user);
169
170/**
710027a4 171 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
172 * @q: request queue where request should be inserted
173 * @rq: request to map data to
152e283f 174 * @map_data: pointer to the rq_map_data holding pages (if necessary)
86db1e29
JA
175 * @iov: pointer to the iovec
176 * @iov_count: number of elements in the iovec
177 * @len: I/O byte count
a3bce90e 178 * @gfp_mask: memory allocation flags
86db1e29
JA
179 *
180 * Description:
710027a4 181 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
182 * a kernel bounce buffer is used.
183 *
710027a4 184 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
185 * still in process context.
186 *
187 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
188 * before being submitted to the device, as pages mapped may be out of
189 * reach. It's the callers responsibility to make sure this happens. The
190 * original bio must be passed back in to blk_rq_unmap_user() for proper
191 * unmapping.
192 */
193int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
152e283f
FT
194 struct rq_map_data *map_data, struct sg_iovec *iov,
195 int iov_count, unsigned int len, gfp_t gfp_mask)
86db1e29
JA
196{
197 struct bio *bio;
afdc1a78
FT
198 int i, read = rq_data_dir(rq) == READ;
199 int unaligned = 0;
86db1e29
JA
200
201 if (!iov || iov_count <= 0)
202 return -EINVAL;
203
afdc1a78
FT
204 for (i = 0; i < iov_count; i++) {
205 unsigned long uaddr = (unsigned long)iov[i].iov_base;
206
207 if (uaddr & queue_dma_alignment(q)) {
208 unaligned = 1;
209 break;
210 }
211 }
212
152e283f
FT
213 if (unaligned || (q->dma_pad_mask & len) || map_data)
214 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
215 gfp_mask);
afdc1a78 216 else
a3bce90e 217 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
afdc1a78 218
86db1e29
JA
219 if (IS_ERR(bio))
220 return PTR_ERR(bio);
221
222 if (bio->bi_size != len) {
c26156b2
JA
223 /*
224 * Grab an extra reference to this bio, as bio_unmap_user()
225 * expects to be able to drop it twice as it happens on the
226 * normal IO completion path
227 */
228 bio_get(bio);
86db1e29 229 bio_endio(bio, 0);
53cc0b29 230 __blk_rq_unmap_user(bio);
86db1e29
JA
231 return -EINVAL;
232 }
233
f18573ab
FT
234 if (!bio_flagged(bio, BIO_USER_MAPPED))
235 rq->cmd_flags |= REQ_COPY_USER;
236
07359fc6 237 blk_queue_bounce(q, &bio);
86db1e29
JA
238 bio_get(bio);
239 blk_rq_bio_prep(q, rq, bio);
240 rq->buffer = rq->data = NULL;
241 return 0;
242}
152e283f 243EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e29
JA
244
245/**
246 * blk_rq_unmap_user - unmap a request with user data
247 * @bio: start of bio list
248 *
249 * Description:
250 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
251 * supply the original rq->bio from the blk_rq_map_user() return, since
710027a4 252 * the I/O completion may have changed rq->bio.
86db1e29
JA
253 */
254int blk_rq_unmap_user(struct bio *bio)
255{
256 struct bio *mapped_bio;
257 int ret = 0, ret2;
258
259 while (bio) {
260 mapped_bio = bio;
261 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
262 mapped_bio = bio->bi_private;
263
264 ret2 = __blk_rq_unmap_user(mapped_bio);
265 if (ret2 && !ret)
266 ret = ret2;
267
268 mapped_bio = bio;
269 bio = bio->bi_next;
270 bio_put(mapped_bio);
271 }
272
273 return ret;
274}
86db1e29
JA
275EXPORT_SYMBOL(blk_rq_unmap_user);
276
277/**
710027a4 278 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
279 * @q: request queue where request should be inserted
280 * @rq: request to fill
281 * @kbuf: the kernel buffer
282 * @len: length of user data
283 * @gfp_mask: memory allocation flags
68154e90
FT
284 *
285 * Description:
286 * Data will be mapped directly if possible. Otherwise a bounce
287 * buffer is used.
86db1e29
JA
288 */
289int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
290 unsigned int len, gfp_t gfp_mask)
291{
68154e90
FT
292 int reading = rq_data_dir(rq) == READ;
293 int do_copy = 0;
86db1e29
JA
294 struct bio *bio;
295
296 if (len > (q->max_hw_sectors << 9))
297 return -EINVAL;
298 if (!len || !kbuf)
299 return -EINVAL;
300
87904074 301 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
68154e90
FT
302 if (do_copy)
303 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
304 else
305 bio = bio_map_kern(q, kbuf, len, gfp_mask);
306
86db1e29
JA
307 if (IS_ERR(bio))
308 return PTR_ERR(bio);
309
310 if (rq_data_dir(rq) == WRITE)
311 bio->bi_rw |= (1 << BIO_RW);
312
68154e90
FT
313 if (do_copy)
314 rq->cmd_flags |= REQ_COPY_USER;
315
86db1e29
JA
316 blk_rq_bio_prep(q, rq, bio);
317 blk_queue_bounce(q, &rq->bio);
318 rq->buffer = rq->data = NULL;
319 return 0;
320}
86db1e29 321EXPORT_SYMBOL(blk_rq_map_kern);