]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
86db1e29 JA |
2 | /* |
3 | * Functions related to mapping data to requests | |
4 | */ | |
5 | #include <linux/kernel.h> | |
68db0cf1 | 6 | #include <linux/sched/task_stack.h> |
86db1e29 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
26e49cfc | 10 | #include <linux/uio.h> |
86db1e29 JA |
11 | |
12 | #include "blk.h" | |
13 | ||
98d61d5b | 14 | /* |
0abc2a10 JA |
15 | * Append a bio to a passthrough request. Only works if the bio can be merged |
16 | * into the request based on the driver constraints. | |
98d61d5b | 17 | */ |
0abc2a10 | 18 | int blk_rq_append_bio(struct request *rq, struct bio **bio) |
86db1e29 | 19 | { |
0abc2a10 | 20 | struct bio *orig_bio = *bio; |
14ccb66b CH |
21 | struct bvec_iter iter; |
22 | struct bio_vec bv; | |
23 | unsigned int nr_segs = 0; | |
0abc2a10 JA |
24 | |
25 | blk_queue_bounce(rq->q, bio); | |
caa4b024 | 26 | |
14ccb66b CH |
27 | bio_for_each_bvec(bv, *bio, iter) |
28 | nr_segs++; | |
29 | ||
98d61d5b | 30 | if (!rq->bio) { |
14ccb66b | 31 | blk_rq_bio_prep(rq, *bio, nr_segs); |
98d61d5b | 32 | } else { |
14ccb66b | 33 | if (!ll_back_merge_fn(rq, *bio, nr_segs)) { |
0abc2a10 JA |
34 | if (orig_bio != *bio) { |
35 | bio_put(*bio); | |
36 | *bio = orig_bio; | |
37 | } | |
98d61d5b | 38 | return -EINVAL; |
0abc2a10 | 39 | } |
98d61d5b | 40 | |
0abc2a10 JA |
41 | rq->biotail->bi_next = *bio; |
42 | rq->biotail = *bio; | |
43 | rq->__data_len += (*bio)->bi_iter.bi_size; | |
86db1e29 | 44 | } |
98d61d5b | 45 | |
86db1e29 JA |
46 | return 0; |
47 | } | |
98d61d5b | 48 | EXPORT_SYMBOL(blk_rq_append_bio); |
86db1e29 JA |
49 | |
50 | static int __blk_rq_unmap_user(struct bio *bio) | |
51 | { | |
52 | int ret = 0; | |
53 | ||
54 | if (bio) { | |
55 | if (bio_flagged(bio, BIO_USER_MAPPED)) | |
56 | bio_unmap_user(bio); | |
57 | else | |
58 | ret = bio_uncopy_user(bio); | |
59 | } | |
60 | ||
61 | return ret; | |
62 | } | |
63 | ||
4d6af73d CH |
64 | static int __blk_rq_map_user_iov(struct request *rq, |
65 | struct rq_map_data *map_data, struct iov_iter *iter, | |
66 | gfp_t gfp_mask, bool copy) | |
67 | { | |
68 | struct request_queue *q = rq->q; | |
69 | struct bio *bio, *orig_bio; | |
70 | int ret; | |
71 | ||
72 | if (copy) | |
73 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); | |
74 | else | |
75 | bio = bio_map_user_iov(q, iter, gfp_mask); | |
76 | ||
77 | if (IS_ERR(bio)) | |
78 | return PTR_ERR(bio); | |
79 | ||
aebf526b CH |
80 | bio->bi_opf &= ~REQ_OP_MASK; |
81 | bio->bi_opf |= req_op(rq); | |
82 | ||
4d6af73d | 83 | orig_bio = bio; |
4d6af73d CH |
84 | |
85 | /* | |
86 | * We link the bounce buffer in and could have to traverse it | |
87 | * later so we have to get a ref to prevent it from being freed | |
88 | */ | |
0abc2a10 | 89 | ret = blk_rq_append_bio(rq, &bio); |
4d6af73d | 90 | if (ret) { |
4d6af73d | 91 | __blk_rq_unmap_user(orig_bio); |
4d6af73d CH |
92 | return ret; |
93 | } | |
0abc2a10 | 94 | bio_get(bio); |
4d6af73d CH |
95 | |
96 | return 0; | |
97 | } | |
98 | ||
86db1e29 | 99 | /** |
aebf526b | 100 | * blk_rq_map_user_iov - map user data to a request, for passthrough requests |
86db1e29 JA |
101 | * @q: request queue where request should be inserted |
102 | * @rq: request to map data to | |
152e283f | 103 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
26e49cfc | 104 | * @iter: iovec iterator |
a3bce90e | 105 | * @gfp_mask: memory allocation flags |
86db1e29 JA |
106 | * |
107 | * Description: | |
710027a4 | 108 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e29 JA |
109 | * a kernel bounce buffer is used. |
110 | * | |
710027a4 | 111 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e29 JA |
112 | * still in process context. |
113 | * | |
114 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
115 | * before being submitted to the device, as pages mapped may be out of | |
116 | * reach. It's the callers responsibility to make sure this happens. The | |
117 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
118 | * unmapping. | |
119 | */ | |
120 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |
26e49cfc KO |
121 | struct rq_map_data *map_data, |
122 | const struct iov_iter *iter, gfp_t gfp_mask) | |
86db1e29 | 123 | { |
357f435d AV |
124 | bool copy = false; |
125 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); | |
4d6af73d CH |
126 | struct bio *bio = NULL; |
127 | struct iov_iter i; | |
69e0927b | 128 | int ret = -EINVAL; |
86db1e29 | 129 | |
a0ac402c LT |
130 | if (!iter_is_iovec(iter)) |
131 | goto fail; | |
132 | ||
357f435d AV |
133 | if (map_data) |
134 | copy = true; | |
135 | else if (iov_iter_alignment(iter) & align) | |
136 | copy = true; | |
137 | else if (queue_virt_boundary(q)) | |
138 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); | |
afdc1a78 | 139 | |
4d6af73d CH |
140 | i = *iter; |
141 | do { | |
142 | ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); | |
143 | if (ret) | |
144 | goto unmap_rq; | |
145 | if (!bio) | |
146 | bio = rq->bio; | |
147 | } while (iov_iter_count(&i)); | |
86db1e29 | 148 | |
f18573ab | 149 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
e8064021 | 150 | rq->rq_flags |= RQF_COPY_USER; |
86db1e29 | 151 | return 0; |
4d6af73d CH |
152 | |
153 | unmap_rq: | |
154 | __blk_rq_unmap_user(bio); | |
a0ac402c | 155 | fail: |
4d6af73d | 156 | rq->bio = NULL; |
69e0927b | 157 | return ret; |
86db1e29 | 158 | } |
152e283f | 159 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e29 | 160 | |
ddad8dd0 CH |
161 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
162 | struct rq_map_data *map_data, void __user *ubuf, | |
163 | unsigned long len, gfp_t gfp_mask) | |
164 | { | |
26e49cfc KO |
165 | struct iovec iov; |
166 | struct iov_iter i; | |
8f7e885a | 167 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
ddad8dd0 | 168 | |
8f7e885a AV |
169 | if (unlikely(ret < 0)) |
170 | return ret; | |
ddad8dd0 | 171 | |
26e49cfc | 172 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
ddad8dd0 CH |
173 | } |
174 | EXPORT_SYMBOL(blk_rq_map_user); | |
175 | ||
86db1e29 JA |
176 | /** |
177 | * blk_rq_unmap_user - unmap a request with user data | |
178 | * @bio: start of bio list | |
179 | * | |
180 | * Description: | |
181 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | |
182 | * supply the original rq->bio from the blk_rq_map_user() return, since | |
710027a4 | 183 | * the I/O completion may have changed rq->bio. |
86db1e29 JA |
184 | */ |
185 | int blk_rq_unmap_user(struct bio *bio) | |
186 | { | |
187 | struct bio *mapped_bio; | |
188 | int ret = 0, ret2; | |
189 | ||
190 | while (bio) { | |
191 | mapped_bio = bio; | |
192 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | |
193 | mapped_bio = bio->bi_private; | |
194 | ||
195 | ret2 = __blk_rq_unmap_user(mapped_bio); | |
196 | if (ret2 && !ret) | |
197 | ret = ret2; | |
198 | ||
199 | mapped_bio = bio; | |
200 | bio = bio->bi_next; | |
201 | bio_put(mapped_bio); | |
202 | } | |
203 | ||
204 | return ret; | |
205 | } | |
86db1e29 JA |
206 | EXPORT_SYMBOL(blk_rq_unmap_user); |
207 | ||
208 | /** | |
aebf526b | 209 | * blk_rq_map_kern - map kernel data to a request, for passthrough requests |
86db1e29 JA |
210 | * @q: request queue where request should be inserted |
211 | * @rq: request to fill | |
212 | * @kbuf: the kernel buffer | |
213 | * @len: length of user data | |
214 | * @gfp_mask: memory allocation flags | |
68154e90 FT |
215 | * |
216 | * Description: | |
217 | * Data will be mapped directly if possible. Otherwise a bounce | |
e227867f | 218 | * buffer is used. Can be called multiple times to append multiple |
3a5a3927 | 219 | * buffers. |
86db1e29 JA |
220 | */ |
221 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |
222 | unsigned int len, gfp_t gfp_mask) | |
223 | { | |
68154e90 | 224 | int reading = rq_data_dir(rq) == READ; |
14417799 | 225 | unsigned long addr = (unsigned long) kbuf; |
68154e90 | 226 | int do_copy = 0; |
0abc2a10 | 227 | struct bio *bio, *orig_bio; |
3a5a3927 | 228 | int ret; |
86db1e29 | 229 | |
ae03bf63 | 230 | if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e29 JA |
231 | return -EINVAL; |
232 | if (!len || !kbuf) | |
233 | return -EINVAL; | |
234 | ||
14417799 | 235 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
68154e90 FT |
236 | if (do_copy) |
237 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | |
238 | else | |
239 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
240 | ||
86db1e29 JA |
241 | if (IS_ERR(bio)) |
242 | return PTR_ERR(bio); | |
243 | ||
aebf526b CH |
244 | bio->bi_opf &= ~REQ_OP_MASK; |
245 | bio->bi_opf |= req_op(rq); | |
86db1e29 | 246 | |
68154e90 | 247 | if (do_copy) |
e8064021 | 248 | rq->rq_flags |= RQF_COPY_USER; |
68154e90 | 249 | |
0abc2a10 JA |
250 | orig_bio = bio; |
251 | ret = blk_rq_append_bio(rq, &bio); | |
3a5a3927 JB |
252 | if (unlikely(ret)) { |
253 | /* request is too big */ | |
0abc2a10 | 254 | bio_put(orig_bio); |
3a5a3927 JB |
255 | return ret; |
256 | } | |
257 | ||
86db1e29 JA |
258 | return 0; |
259 | } | |
86db1e29 | 260 | EXPORT_SYMBOL(blk_rq_map_kern); |