]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Functions related to mapping data to requests | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/uio.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
12 | /* | |
13 | * Append a bio to a passthrough request. Only works can be merged into | |
14 | * the request based on the driver constraints. | |
15 | */ | |
16 | int blk_rq_append_bio(struct request *rq, struct bio *bio) | |
17 | { | |
18 | if (!rq->bio) { | |
19 | rq->cmd_flags &= REQ_OP_MASK; | |
20 | rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK); | |
21 | blk_rq_bio_prep(rq->q, rq, bio); | |
22 | } else { | |
23 | if (!ll_back_merge_fn(rq->q, rq, bio)) | |
24 | return -EINVAL; | |
25 | ||
26 | rq->biotail->bi_next = bio; | |
27 | rq->biotail = bio; | |
28 | rq->__data_len += bio->bi_iter.bi_size; | |
29 | } | |
30 | ||
31 | return 0; | |
32 | } | |
33 | EXPORT_SYMBOL(blk_rq_append_bio); | |
34 | ||
35 | static int __blk_rq_unmap_user(struct bio *bio) | |
36 | { | |
37 | int ret = 0; | |
38 | ||
39 | if (bio) { | |
40 | if (bio_flagged(bio, BIO_USER_MAPPED)) | |
41 | bio_unmap_user(bio); | |
42 | else | |
43 | ret = bio_uncopy_user(bio); | |
44 | } | |
45 | ||
46 | return ret; | |
47 | } | |
48 | ||
49 | static int __blk_rq_map_user_iov(struct request *rq, | |
50 | struct rq_map_data *map_data, struct iov_iter *iter, | |
51 | gfp_t gfp_mask, bool copy) | |
52 | { | |
53 | struct request_queue *q = rq->q; | |
54 | struct bio *bio, *orig_bio; | |
55 | int ret; | |
56 | ||
57 | if (copy) | |
58 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); | |
59 | else | |
60 | bio = bio_map_user_iov(q, iter, gfp_mask); | |
61 | ||
62 | if (IS_ERR(bio)) | |
63 | return PTR_ERR(bio); | |
64 | ||
65 | if (map_data && map_data->null_mapped) | |
66 | bio_set_flag(bio, BIO_NULL_MAPPED); | |
67 | ||
68 | iov_iter_advance(iter, bio->bi_iter.bi_size); | |
69 | if (map_data) | |
70 | map_data->offset += bio->bi_iter.bi_size; | |
71 | ||
72 | orig_bio = bio; | |
73 | blk_queue_bounce(q, &bio); | |
74 | ||
75 | /* | |
76 | * We link the bounce buffer in and could have to traverse it | |
77 | * later so we have to get a ref to prevent it from being freed | |
78 | */ | |
79 | bio_get(bio); | |
80 | ||
81 | ret = blk_rq_append_bio(rq, bio); | |
82 | if (ret) { | |
83 | bio_endio(bio); | |
84 | __blk_rq_unmap_user(orig_bio); | |
85 | bio_put(bio); | |
86 | return ret; | |
87 | } | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
92 | /** | |
93 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage | |
94 | * @q: request queue where request should be inserted | |
95 | * @rq: request to map data to | |
96 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | |
97 | * @iter: iovec iterator | |
98 | * @gfp_mask: memory allocation flags | |
99 | * | |
100 | * Description: | |
101 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise | |
102 | * a kernel bounce buffer is used. | |
103 | * | |
104 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while | |
105 | * still in process context. | |
106 | * | |
107 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
108 | * before being submitted to the device, as pages mapped may be out of | |
109 | * reach. It's the callers responsibility to make sure this happens. The | |
110 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
111 | * unmapping. | |
112 | */ | |
113 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |
114 | struct rq_map_data *map_data, | |
115 | const struct iov_iter *iter, gfp_t gfp_mask) | |
116 | { | |
117 | bool copy = false; | |
118 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); | |
119 | struct bio *bio = NULL; | |
120 | struct iov_iter i; | |
121 | int ret; | |
122 | ||
123 | if (!iter_is_iovec(iter)) | |
124 | goto fail; | |
125 | ||
126 | if (map_data) | |
127 | copy = true; | |
128 | else if (iov_iter_alignment(iter) & align) | |
129 | copy = true; | |
130 | else if (queue_virt_boundary(q)) | |
131 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); | |
132 | ||
133 | i = *iter; | |
134 | do { | |
135 | ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); | |
136 | if (ret) | |
137 | goto unmap_rq; | |
138 | if (!bio) | |
139 | bio = rq->bio; | |
140 | } while (iov_iter_count(&i)); | |
141 | ||
142 | if (!bio_flagged(bio, BIO_USER_MAPPED)) | |
143 | rq->rq_flags |= RQF_COPY_USER; | |
144 | return 0; | |
145 | ||
146 | unmap_rq: | |
147 | __blk_rq_unmap_user(bio); | |
148 | fail: | |
149 | rq->bio = NULL; | |
150 | return -EINVAL; | |
151 | } | |
152 | EXPORT_SYMBOL(blk_rq_map_user_iov); | |
153 | ||
154 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | |
155 | struct rq_map_data *map_data, void __user *ubuf, | |
156 | unsigned long len, gfp_t gfp_mask) | |
157 | { | |
158 | struct iovec iov; | |
159 | struct iov_iter i; | |
160 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); | |
161 | ||
162 | if (unlikely(ret < 0)) | |
163 | return ret; | |
164 | ||
165 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); | |
166 | } | |
167 | EXPORT_SYMBOL(blk_rq_map_user); | |
168 | ||
169 | /** | |
170 | * blk_rq_unmap_user - unmap a request with user data | |
171 | * @bio: start of bio list | |
172 | * | |
173 | * Description: | |
174 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | |
175 | * supply the original rq->bio from the blk_rq_map_user() return, since | |
176 | * the I/O completion may have changed rq->bio. | |
177 | */ | |
178 | int blk_rq_unmap_user(struct bio *bio) | |
179 | { | |
180 | struct bio *mapped_bio; | |
181 | int ret = 0, ret2; | |
182 | ||
183 | while (bio) { | |
184 | mapped_bio = bio; | |
185 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | |
186 | mapped_bio = bio->bi_private; | |
187 | ||
188 | ret2 = __blk_rq_unmap_user(mapped_bio); | |
189 | if (ret2 && !ret) | |
190 | ret = ret2; | |
191 | ||
192 | mapped_bio = bio; | |
193 | bio = bio->bi_next; | |
194 | bio_put(mapped_bio); | |
195 | } | |
196 | ||
197 | return ret; | |
198 | } | |
199 | EXPORT_SYMBOL(blk_rq_unmap_user); | |
200 | ||
201 | /** | |
202 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage | |
203 | * @q: request queue where request should be inserted | |
204 | * @rq: request to fill | |
205 | * @kbuf: the kernel buffer | |
206 | * @len: length of user data | |
207 | * @gfp_mask: memory allocation flags | |
208 | * | |
209 | * Description: | |
210 | * Data will be mapped directly if possible. Otherwise a bounce | |
211 | * buffer is used. Can be called multiple times to append multiple | |
212 | * buffers. | |
213 | */ | |
214 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |
215 | unsigned int len, gfp_t gfp_mask) | |
216 | { | |
217 | int reading = rq_data_dir(rq) == READ; | |
218 | unsigned long addr = (unsigned long) kbuf; | |
219 | int do_copy = 0; | |
220 | struct bio *bio; | |
221 | int ret; | |
222 | ||
223 | if (len > (queue_max_hw_sectors(q) << 9)) | |
224 | return -EINVAL; | |
225 | if (!len || !kbuf) | |
226 | return -EINVAL; | |
227 | ||
228 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); | |
229 | if (do_copy) | |
230 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | |
231 | else | |
232 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
233 | ||
234 | if (IS_ERR(bio)) | |
235 | return PTR_ERR(bio); | |
236 | ||
237 | if (!reading) | |
238 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | |
239 | ||
240 | if (do_copy) | |
241 | rq->rq_flags |= RQF_COPY_USER; | |
242 | ||
243 | ret = blk_rq_append_bio(rq, bio); | |
244 | if (unlikely(ret)) { | |
245 | /* request is too big */ | |
246 | bio_put(bio); | |
247 | return ret; | |
248 | } | |
249 | ||
250 | blk_queue_bounce(q, &rq->bio); | |
251 | return 0; | |
252 | } | |
253 | EXPORT_SYMBOL(blk_rq_map_kern); |