Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
86db1e29 JA |
2 | /* |
3 | * Functions related to mapping data to requests | |
4 | */ | |
5 | #include <linux/kernel.h> | |
68db0cf1 | 6 | #include <linux/sched/task_stack.h> |
86db1e29 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
26e49cfc | 10 | #include <linux/uio.h> |
86db1e29 JA |
11 | |
12 | #include "blk.h" | |
13 | ||
98d61d5b | 14 | /* |
88da0286 JA |
15 | * Append a bio to a passthrough request. Only works if the bio can be merged |
16 | * into the request based on the driver constraints. | |
98d61d5b | 17 | */ |
88da0286 | 18 | int blk_rq_append_bio(struct request *rq, struct bio **bio) |
86db1e29 | 19 | { |
88da0286 JA |
20 | struct bio *orig_bio = *bio; |
21 | ||
22 | blk_queue_bounce(rq->q, bio); | |
caa4b024 | 23 | |
98d61d5b | 24 | if (!rq->bio) { |
88da0286 | 25 | blk_rq_bio_prep(rq->q, rq, *bio); |
98d61d5b | 26 | } else { |
88da0286 JA |
27 | if (!ll_back_merge_fn(rq->q, rq, *bio)) { |
28 | if (orig_bio != *bio) { | |
29 | bio_put(*bio); | |
30 | *bio = orig_bio; | |
31 | } | |
98d61d5b | 32 | return -EINVAL; |
88da0286 | 33 | } |
98d61d5b | 34 | |
88da0286 JA |
35 | rq->biotail->bi_next = *bio; |
36 | rq->biotail = *bio; | |
37 | rq->__data_len += (*bio)->bi_iter.bi_size; | |
86db1e29 | 38 | } |
98d61d5b | 39 | |
86db1e29 JA |
40 | return 0; |
41 | } | |
98d61d5b | 42 | EXPORT_SYMBOL(blk_rq_append_bio); |
86db1e29 JA |
43 | |
44 | static int __blk_rq_unmap_user(struct bio *bio) | |
45 | { | |
46 | int ret = 0; | |
47 | ||
48 | if (bio) { | |
49 | if (bio_flagged(bio, BIO_USER_MAPPED)) | |
50 | bio_unmap_user(bio); | |
51 | else | |
52 | ret = bio_uncopy_user(bio); | |
53 | } | |
54 | ||
55 | return ret; | |
56 | } | |
57 | ||
4d6af73d CH |
58 | static int __blk_rq_map_user_iov(struct request *rq, |
59 | struct rq_map_data *map_data, struct iov_iter *iter, | |
60 | gfp_t gfp_mask, bool copy) | |
61 | { | |
62 | struct request_queue *q = rq->q; | |
63 | struct bio *bio, *orig_bio; | |
64 | int ret; | |
65 | ||
66 | if (copy) | |
67 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); | |
68 | else | |
69 | bio = bio_map_user_iov(q, iter, gfp_mask); | |
70 | ||
71 | if (IS_ERR(bio)) | |
72 | return PTR_ERR(bio); | |
73 | ||
aebf526b CH |
74 | bio->bi_opf &= ~REQ_OP_MASK; |
75 | bio->bi_opf |= req_op(rq); | |
76 | ||
4d6af73d CH |
77 | if (map_data && map_data->null_mapped) |
78 | bio_set_flag(bio, BIO_NULL_MAPPED); | |
79 | ||
80 | iov_iter_advance(iter, bio->bi_iter.bi_size); | |
81 | if (map_data) | |
82 | map_data->offset += bio->bi_iter.bi_size; | |
83 | ||
84 | orig_bio = bio; | |
4d6af73d CH |
85 | |
86 | /* | |
87 | * We link the bounce buffer in and could have to traverse it | |
88 | * later so we have to get a ref to prevent it from being freed | |
89 | */ | |
88da0286 | 90 | ret = blk_rq_append_bio(rq, &bio); |
4d6af73d | 91 | if (ret) { |
4d6af73d | 92 | __blk_rq_unmap_user(orig_bio); |
4d6af73d CH |
93 | return ret; |
94 | } | |
88da0286 | 95 | bio_get(bio); |
4d6af73d CH |
96 | |
97 | return 0; | |
98 | } | |
99 | ||
86db1e29 | 100 | /** |
aebf526b | 101 | * blk_rq_map_user_iov - map user data to a request, for passthrough requests |
86db1e29 JA |
102 | * @q: request queue where request should be inserted |
103 | * @rq: request to map data to | |
152e283f | 104 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
26e49cfc | 105 | * @iter: iovec iterator |
a3bce90e | 106 | * @gfp_mask: memory allocation flags |
86db1e29 JA |
107 | * |
108 | * Description: | |
710027a4 | 109 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e29 JA |
110 | * a kernel bounce buffer is used. |
111 | * | |
710027a4 | 112 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e29 JA |
113 | * still in process context. |
114 | * | |
115 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
116 | * before being submitted to the device, as pages mapped may be out of | |
117 | * reach. It's the callers responsibility to make sure this happens. The | |
118 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
119 | * unmapping. | |
120 | */ | |
121 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |
26e49cfc KO |
122 | struct rq_map_data *map_data, |
123 | const struct iov_iter *iter, gfp_t gfp_mask) | |
86db1e29 | 124 | { |
357f435d AV |
125 | bool copy = false; |
126 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); | |
4d6af73d CH |
127 | struct bio *bio = NULL; |
128 | struct iov_iter i; | |
2cc50a19 | 129 | int ret = -EINVAL; |
86db1e29 | 130 | |
a0ac402c LT |
131 | if (!iter_is_iovec(iter)) |
132 | goto fail; | |
133 | ||
357f435d AV |
134 | if (map_data) |
135 | copy = true; | |
136 | else if (iov_iter_alignment(iter) & align) | |
137 | copy = true; | |
138 | else if (queue_virt_boundary(q)) | |
139 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); | |
afdc1a78 | 140 | |
4d6af73d CH |
141 | i = *iter; |
142 | do { | |
143 | ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); | |
144 | if (ret) | |
145 | goto unmap_rq; | |
146 | if (!bio) | |
147 | bio = rq->bio; | |
148 | } while (iov_iter_count(&i)); | |
86db1e29 | 149 | |
f18573ab | 150 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
e8064021 | 151 | rq->rq_flags |= RQF_COPY_USER; |
86db1e29 | 152 | return 0; |
4d6af73d CH |
153 | |
154 | unmap_rq: | |
155 | __blk_rq_unmap_user(bio); | |
a0ac402c | 156 | fail: |
4d6af73d | 157 | rq->bio = NULL; |
2cc50a19 | 158 | return ret; |
86db1e29 | 159 | } |
152e283f | 160 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e29 | 161 | |
ddad8dd0 CH |
162 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
163 | struct rq_map_data *map_data, void __user *ubuf, | |
164 | unsigned long len, gfp_t gfp_mask) | |
165 | { | |
26e49cfc KO |
166 | struct iovec iov; |
167 | struct iov_iter i; | |
8f7e885a | 168 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
ddad8dd0 | 169 | |
8f7e885a AV |
170 | if (unlikely(ret < 0)) |
171 | return ret; | |
ddad8dd0 | 172 | |
26e49cfc | 173 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
ddad8dd0 CH |
174 | } |
175 | EXPORT_SYMBOL(blk_rq_map_user); | |
176 | ||
86db1e29 JA |
177 | /** |
178 | * blk_rq_unmap_user - unmap a request with user data | |
179 | * @bio: start of bio list | |
180 | * | |
181 | * Description: | |
182 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | |
183 | * supply the original rq->bio from the blk_rq_map_user() return, since | |
710027a4 | 184 | * the I/O completion may have changed rq->bio. |
86db1e29 JA |
185 | */ |
186 | int blk_rq_unmap_user(struct bio *bio) | |
187 | { | |
188 | struct bio *mapped_bio; | |
189 | int ret = 0, ret2; | |
190 | ||
191 | while (bio) { | |
192 | mapped_bio = bio; | |
193 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | |
194 | mapped_bio = bio->bi_private; | |
195 | ||
196 | ret2 = __blk_rq_unmap_user(mapped_bio); | |
197 | if (ret2 && !ret) | |
198 | ret = ret2; | |
199 | ||
200 | mapped_bio = bio; | |
201 | bio = bio->bi_next; | |
202 | bio_put(mapped_bio); | |
203 | } | |
204 | ||
205 | return ret; | |
206 | } | |
86db1e29 JA |
207 | EXPORT_SYMBOL(blk_rq_unmap_user); |
208 | ||
209 | /** | |
aebf526b | 210 | * blk_rq_map_kern - map kernel data to a request, for passthrough requests |
86db1e29 JA |
211 | * @q: request queue where request should be inserted |
212 | * @rq: request to fill | |
213 | * @kbuf: the kernel buffer | |
214 | * @len: length of user data | |
215 | * @gfp_mask: memory allocation flags | |
68154e90 FT |
216 | * |
217 | * Description: | |
218 | * Data will be mapped directly if possible. Otherwise a bounce | |
e227867f | 219 | * buffer is used. Can be called multiple times to append multiple |
3a5a3927 | 220 | * buffers. |
86db1e29 JA |
221 | */ |
222 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |
223 | unsigned int len, gfp_t gfp_mask) | |
224 | { | |
68154e90 | 225 | int reading = rq_data_dir(rq) == READ; |
14417799 | 226 | unsigned long addr = (unsigned long) kbuf; |
68154e90 | 227 | int do_copy = 0; |
88da0286 | 228 | struct bio *bio, *orig_bio; |
3a5a3927 | 229 | int ret; |
86db1e29 | 230 | |
ae03bf63 | 231 | if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e29 JA |
232 | return -EINVAL; |
233 | if (!len || !kbuf) | |
234 | return -EINVAL; | |
235 | ||
14417799 | 236 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
68154e90 FT |
237 | if (do_copy) |
238 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | |
239 | else | |
240 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
241 | ||
86db1e29 JA |
242 | if (IS_ERR(bio)) |
243 | return PTR_ERR(bio); | |
244 | ||
aebf526b CH |
245 | bio->bi_opf &= ~REQ_OP_MASK; |
246 | bio->bi_opf |= req_op(rq); | |
86db1e29 | 247 | |
68154e90 | 248 | if (do_copy) |
e8064021 | 249 | rq->rq_flags |= RQF_COPY_USER; |
68154e90 | 250 | |
88da0286 JA |
251 | orig_bio = bio; |
252 | ret = blk_rq_append_bio(rq, &bio); | |
3a5a3927 JB |
253 | if (unlikely(ret)) { |
254 | /* request is too big */ | |
88da0286 | 255 | bio_put(orig_bio); |
3a5a3927 JB |
256 | return ret; |
257 | } | |
258 | ||
86db1e29 JA |
259 | return 0; |
260 | } | |
86db1e29 | 261 | EXPORT_SYMBOL(blk_rq_map_kern); |