block: add gfp_mask argument to blk_rq_map_user and blk_rq_map_user_iov
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
afdc1a78 8#include <scsi/sg.h> /* for struct sg_iovec */
86db1e29
JA
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
23 rq->data_len += bio->bi_size;
24 }
25 return 0;
26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28
29static int __blk_rq_unmap_user(struct bio *bio)
30{
31 int ret = 0;
32
33 if (bio) {
34 if (bio_flagged(bio, BIO_USER_MAPPED))
35 bio_unmap_user(bio);
36 else
37 ret = bio_uncopy_user(bio);
38 }
39
40 return ret;
41}
42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
a3bce90e
FT
44 void __user *ubuf, unsigned int len,
45 gfp_t gfp_mask)
86db1e29
JA
46{
47 unsigned long uaddr;
e3790c7d 48 unsigned int alignment;
86db1e29
JA
49 struct bio *bio, *orig_bio;
50 int reading, ret;
51
52 reading = rq_data_dir(rq) == READ;
53
54 /*
55 * if alignment requirement is satisfied, map in user pages for
56 * direct dma. else, set up kernel bounce buffers
57 */
58 uaddr = (unsigned long) ubuf;
e3790c7d
TH
59 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
60 if (!(uaddr & alignment) && !(len & alignment))
a3bce90e 61 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
86db1e29 62 else
a3bce90e 63 bio = bio_copy_user(q, uaddr, len, reading, gfp_mask);
86db1e29
JA
64
65 if (IS_ERR(bio))
66 return PTR_ERR(bio);
67
68 orig_bio = bio;
69 blk_queue_bounce(q, &bio);
70
71 /*
72 * We link the bounce buffer in and could have to traverse it
73 * later so we have to get a ref to prevent it from being freed
74 */
75 bio_get(bio);
76
77 ret = blk_rq_append_bio(q, rq, bio);
78 if (!ret)
79 return bio->bi_size;
80
81 /* if it was boucned we must call the end io function */
82 bio_endio(bio, 0);
83 __blk_rq_unmap_user(orig_bio);
84 bio_put(bio);
85 return ret;
86}
87
88/**
710027a4 89 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
90 * @q: request queue where request should be inserted
91 * @rq: request structure to fill
92 * @ubuf: the user buffer
93 * @len: length of user data
a3bce90e 94 * @gfp_mask: memory allocation flags
86db1e29
JA
95 *
96 * Description:
710027a4 97 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
98 * a kernel bounce buffer is used.
99 *
710027a4 100 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
101 * still in process context.
102 *
103 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
104 * before being submitted to the device, as pages mapped may be out of
105 * reach. It's the callers responsibility to make sure this happens. The
106 * original bio must be passed back in to blk_rq_unmap_user() for proper
107 * unmapping.
108 */
109int blk_rq_map_user(struct request_queue *q, struct request *rq,
a3bce90e 110 void __user *ubuf, unsigned long len, gfp_t gfp_mask)
86db1e29
JA
111{
112 unsigned long bytes_read = 0;
113 struct bio *bio = NULL;
114 int ret;
115
116 if (len > (q->max_hw_sectors << 9))
117 return -EINVAL;
118 if (!len || !ubuf)
119 return -EINVAL;
120
121 while (bytes_read != len) {
122 unsigned long map_len, end, start;
123
124 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
125 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
126 >> PAGE_SHIFT;
127 start = (unsigned long)ubuf >> PAGE_SHIFT;
128
129 /*
130 * A bad offset could cause us to require BIO_MAX_PAGES + 1
131 * pages. If this happens we just lower the requested
132 * mapping len by a page so that we can fit
133 */
134 if (end - start > BIO_MAX_PAGES)
135 map_len -= PAGE_SIZE;
136
a3bce90e 137 ret = __blk_rq_map_user(q, rq, ubuf, map_len, gfp_mask);
86db1e29
JA
138 if (ret < 0)
139 goto unmap_rq;
140 if (!bio)
141 bio = rq->bio;
142 bytes_read += ret;
143 ubuf += ret;
144 }
145
f18573ab
FT
146 if (!bio_flagged(bio, BIO_USER_MAPPED))
147 rq->cmd_flags |= REQ_COPY_USER;
40b01b9b 148
86db1e29
JA
149 rq->buffer = rq->data = NULL;
150 return 0;
151unmap_rq:
152 blk_rq_unmap_user(bio);
84e9e03c 153 rq->bio = NULL;
86db1e29
JA
154 return ret;
155}
86db1e29
JA
156EXPORT_SYMBOL(blk_rq_map_user);
157
158/**
710027a4 159 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
160 * @q: request queue where request should be inserted
161 * @rq: request to map data to
162 * @iov: pointer to the iovec
163 * @iov_count: number of elements in the iovec
164 * @len: I/O byte count
a3bce90e 165 * @gfp_mask: memory allocation flags
86db1e29
JA
166 *
167 * Description:
710027a4 168 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
169 * a kernel bounce buffer is used.
170 *
710027a4 171 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
172 * still in process context.
173 *
174 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
175 * before being submitted to the device, as pages mapped may be out of
176 * reach. It's the callers responsibility to make sure this happens. The
177 * original bio must be passed back in to blk_rq_unmap_user() for proper
178 * unmapping.
179 */
180int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
a3bce90e
FT
181 struct sg_iovec *iov, int iov_count, unsigned int len,
182 gfp_t gfp_mask)
86db1e29
JA
183{
184 struct bio *bio;
afdc1a78
FT
185 int i, read = rq_data_dir(rq) == READ;
186 int unaligned = 0;
86db1e29
JA
187
188 if (!iov || iov_count <= 0)
189 return -EINVAL;
190
afdc1a78
FT
191 for (i = 0; i < iov_count; i++) {
192 unsigned long uaddr = (unsigned long)iov[i].iov_base;
193
194 if (uaddr & queue_dma_alignment(q)) {
195 unaligned = 1;
196 break;
197 }
198 }
199
200 if (unaligned || (q->dma_pad_mask & len))
a3bce90e 201 bio = bio_copy_user_iov(q, iov, iov_count, read, gfp_mask);
afdc1a78 202 else
a3bce90e 203 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
afdc1a78 204
86db1e29
JA
205 if (IS_ERR(bio))
206 return PTR_ERR(bio);
207
208 if (bio->bi_size != len) {
209 bio_endio(bio, 0);
210 bio_unmap_user(bio);
211 return -EINVAL;
212 }
213
f18573ab
FT
214 if (!bio_flagged(bio, BIO_USER_MAPPED))
215 rq->cmd_flags |= REQ_COPY_USER;
216
07359fc6 217 blk_queue_bounce(q, &bio);
86db1e29
JA
218 bio_get(bio);
219 blk_rq_bio_prep(q, rq, bio);
220 rq->buffer = rq->data = NULL;
221 return 0;
222}
86db1e29
JA
223
224/**
225 * blk_rq_unmap_user - unmap a request with user data
226 * @bio: start of bio list
227 *
228 * Description:
229 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
230 * supply the original rq->bio from the blk_rq_map_user() return, since
710027a4 231 * the I/O completion may have changed rq->bio.
86db1e29
JA
232 */
233int blk_rq_unmap_user(struct bio *bio)
234{
235 struct bio *mapped_bio;
236 int ret = 0, ret2;
237
238 while (bio) {
239 mapped_bio = bio;
240 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
241 mapped_bio = bio->bi_private;
242
243 ret2 = __blk_rq_unmap_user(mapped_bio);
244 if (ret2 && !ret)
245 ret = ret2;
246
247 mapped_bio = bio;
248 bio = bio->bi_next;
249 bio_put(mapped_bio);
250 }
251
252 return ret;
253}
86db1e29
JA
254EXPORT_SYMBOL(blk_rq_unmap_user);
255
256/**
710027a4 257 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
258 * @q: request queue where request should be inserted
259 * @rq: request to fill
260 * @kbuf: the kernel buffer
261 * @len: length of user data
262 * @gfp_mask: memory allocation flags
68154e90
FT
263 *
264 * Description:
265 * Data will be mapped directly if possible. Otherwise a bounce
266 * buffer is used.
86db1e29
JA
267 */
268int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
269 unsigned int len, gfp_t gfp_mask)
270{
68154e90
FT
271 unsigned long kaddr;
272 unsigned int alignment;
273 int reading = rq_data_dir(rq) == READ;
274 int do_copy = 0;
86db1e29
JA
275 struct bio *bio;
276
277 if (len > (q->max_hw_sectors << 9))
278 return -EINVAL;
279 if (!len || !kbuf)
280 return -EINVAL;
281
68154e90
FT
282 kaddr = (unsigned long)kbuf;
283 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
a76eef95
FT
284 do_copy = ((kaddr & alignment) || (len & alignment) ||
285 object_is_on_stack(kbuf));
30c00eda 286
68154e90
FT
287 if (do_copy)
288 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
289 else
290 bio = bio_map_kern(q, kbuf, len, gfp_mask);
291
86db1e29
JA
292 if (IS_ERR(bio))
293 return PTR_ERR(bio);
294
295 if (rq_data_dir(rq) == WRITE)
296 bio->bi_rw |= (1 << BIO_RW);
297
68154e90
FT
298 if (do_copy)
299 rq->cmd_flags |= REQ_COPY_USER;
300
86db1e29
JA
301 blk_rq_bio_prep(q, rq, bio);
302 blk_queue_bounce(q, &rq->bio);
303 rq->buffer = rq->data = NULL;
304 return 0;
305}
86db1e29 306EXPORT_SYMBOL(blk_rq_map_kern);