]>
Commit | Line | Data |
---|---|---|
86db1e29 JA |
1 | /* |
2 | * Functions related to mapping data to requests | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | ||
9 | #include "blk.h" | |
10 | ||
11 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | |
12 | struct bio *bio) | |
13 | { | |
14 | if (!rq->bio) | |
15 | blk_rq_bio_prep(q, rq, bio); | |
16 | else if (!ll_back_merge_fn(q, rq, bio)) | |
17 | return -EINVAL; | |
18 | else { | |
19 | rq->biotail->bi_next = bio; | |
20 | rq->biotail = bio; | |
21 | ||
22 | rq->data_len += bio->bi_size; | |
23 | } | |
24 | return 0; | |
25 | } | |
26 | EXPORT_SYMBOL(blk_rq_append_bio); | |
27 | ||
28 | static int __blk_rq_unmap_user(struct bio *bio) | |
29 | { | |
30 | int ret = 0; | |
31 | ||
32 | if (bio) { | |
33 | if (bio_flagged(bio, BIO_USER_MAPPED)) | |
34 | bio_unmap_user(bio); | |
35 | else | |
36 | ret = bio_uncopy_user(bio); | |
37 | } | |
38 | ||
39 | return ret; | |
40 | } | |
41 | ||
42 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |
43 | void __user *ubuf, unsigned int len) | |
44 | { | |
45 | unsigned long uaddr; | |
46 | struct bio *bio, *orig_bio; | |
47 | int reading, ret; | |
48 | ||
49 | reading = rq_data_dir(rq) == READ; | |
50 | ||
51 | /* | |
52 | * if alignment requirement is satisfied, map in user pages for | |
53 | * direct dma. else, set up kernel bounce buffers | |
54 | */ | |
55 | uaddr = (unsigned long) ubuf; | |
56 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) | |
57 | bio = bio_map_user(q, NULL, uaddr, len, reading); | |
58 | else | |
59 | bio = bio_copy_user(q, uaddr, len, reading); | |
60 | ||
61 | if (IS_ERR(bio)) | |
62 | return PTR_ERR(bio); | |
63 | ||
64 | orig_bio = bio; | |
65 | blk_queue_bounce(q, &bio); | |
66 | ||
67 | /* | |
68 | * We link the bounce buffer in and could have to traverse it | |
69 | * later so we have to get a ref to prevent it from being freed | |
70 | */ | |
71 | bio_get(bio); | |
72 | ||
73 | ret = blk_rq_append_bio(q, rq, bio); | |
74 | if (!ret) | |
75 | return bio->bi_size; | |
76 | ||
77 | /* if it was boucned we must call the end io function */ | |
78 | bio_endio(bio, 0); | |
79 | __blk_rq_unmap_user(orig_bio); | |
80 | bio_put(bio); | |
81 | return ret; | |
82 | } | |
83 | ||
84 | /** | |
85 | * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage | |
86 | * @q: request queue where request should be inserted | |
87 | * @rq: request structure to fill | |
88 | * @ubuf: the user buffer | |
89 | * @len: length of user data | |
90 | * | |
91 | * Description: | |
92 | * Data will be mapped directly for zero copy io, if possible. Otherwise | |
93 | * a kernel bounce buffer is used. | |
94 | * | |
95 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | |
96 | * still in process context. | |
97 | * | |
98 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
99 | * before being submitted to the device, as pages mapped may be out of | |
100 | * reach. It's the callers responsibility to make sure this happens. The | |
101 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
102 | * unmapping. | |
103 | */ | |
104 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | |
105 | void __user *ubuf, unsigned long len) | |
106 | { | |
107 | unsigned long bytes_read = 0; | |
108 | struct bio *bio = NULL; | |
109 | int ret; | |
110 | ||
111 | if (len > (q->max_hw_sectors << 9)) | |
112 | return -EINVAL; | |
113 | if (!len || !ubuf) | |
114 | return -EINVAL; | |
115 | ||
116 | while (bytes_read != len) { | |
117 | unsigned long map_len, end, start; | |
118 | ||
119 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | |
120 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | |
121 | >> PAGE_SHIFT; | |
122 | start = (unsigned long)ubuf >> PAGE_SHIFT; | |
123 | ||
124 | /* | |
125 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 | |
126 | * pages. If this happens we just lower the requested | |
127 | * mapping len by a page so that we can fit | |
128 | */ | |
129 | if (end - start > BIO_MAX_PAGES) | |
130 | map_len -= PAGE_SIZE; | |
131 | ||
132 | ret = __blk_rq_map_user(q, rq, ubuf, map_len); | |
133 | if (ret < 0) | |
134 | goto unmap_rq; | |
135 | if (!bio) | |
136 | bio = rq->bio; | |
137 | bytes_read += ret; | |
138 | ubuf += ret; | |
139 | } | |
140 | ||
141 | rq->buffer = rq->data = NULL; | |
142 | return 0; | |
143 | unmap_rq: | |
144 | blk_rq_unmap_user(bio); | |
145 | return ret; | |
146 | } | |
147 | ||
148 | EXPORT_SYMBOL(blk_rq_map_user); | |
149 | ||
150 | /** | |
151 | * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage | |
152 | * @q: request queue where request should be inserted | |
153 | * @rq: request to map data to | |
154 | * @iov: pointer to the iovec | |
155 | * @iov_count: number of elements in the iovec | |
156 | * @len: I/O byte count | |
157 | * | |
158 | * Description: | |
159 | * Data will be mapped directly for zero copy io, if possible. Otherwise | |
160 | * a kernel bounce buffer is used. | |
161 | * | |
162 | * A matching blk_rq_unmap_user() must be issued at the end of io, while | |
163 | * still in process context. | |
164 | * | |
165 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
166 | * before being submitted to the device, as pages mapped may be out of | |
167 | * reach. It's the callers responsibility to make sure this happens. The | |
168 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
169 | * unmapping. | |
170 | */ | |
171 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |
172 | struct sg_iovec *iov, int iov_count, unsigned int len) | |
173 | { | |
174 | struct bio *bio; | |
175 | ||
176 | if (!iov || iov_count <= 0) | |
177 | return -EINVAL; | |
178 | ||
179 | /* we don't allow misaligned data like bio_map_user() does. If the | |
180 | * user is using sg, they're expected to know the alignment constraints | |
181 | * and respect them accordingly */ | |
182 | bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); | |
183 | if (IS_ERR(bio)) | |
184 | return PTR_ERR(bio); | |
185 | ||
186 | if (bio->bi_size != len) { | |
187 | bio_endio(bio, 0); | |
188 | bio_unmap_user(bio); | |
189 | return -EINVAL; | |
190 | } | |
191 | ||
192 | bio_get(bio); | |
193 | blk_rq_bio_prep(q, rq, bio); | |
194 | rq->buffer = rq->data = NULL; | |
195 | return 0; | |
196 | } | |
197 | ||
198 | EXPORT_SYMBOL(blk_rq_map_user_iov); | |
199 | ||
200 | /** | |
201 | * blk_rq_unmap_user - unmap a request with user data | |
202 | * @bio: start of bio list | |
203 | * | |
204 | * Description: | |
205 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | |
206 | * supply the original rq->bio from the blk_rq_map_user() return, since | |
207 | * the io completion may have changed rq->bio. | |
208 | */ | |
209 | int blk_rq_unmap_user(struct bio *bio) | |
210 | { | |
211 | struct bio *mapped_bio; | |
212 | int ret = 0, ret2; | |
213 | ||
214 | while (bio) { | |
215 | mapped_bio = bio; | |
216 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | |
217 | mapped_bio = bio->bi_private; | |
218 | ||
219 | ret2 = __blk_rq_unmap_user(mapped_bio); | |
220 | if (ret2 && !ret) | |
221 | ret = ret2; | |
222 | ||
223 | mapped_bio = bio; | |
224 | bio = bio->bi_next; | |
225 | bio_put(mapped_bio); | |
226 | } | |
227 | ||
228 | return ret; | |
229 | } | |
230 | ||
231 | EXPORT_SYMBOL(blk_rq_unmap_user); | |
232 | ||
233 | /** | |
234 | * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage | |
235 | * @q: request queue where request should be inserted | |
236 | * @rq: request to fill | |
237 | * @kbuf: the kernel buffer | |
238 | * @len: length of user data | |
239 | * @gfp_mask: memory allocation flags | |
240 | */ | |
241 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |
242 | unsigned int len, gfp_t gfp_mask) | |
243 | { | |
244 | struct bio *bio; | |
245 | ||
246 | if (len > (q->max_hw_sectors << 9)) | |
247 | return -EINVAL; | |
248 | if (!len || !kbuf) | |
249 | return -EINVAL; | |
250 | ||
251 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
252 | if (IS_ERR(bio)) | |
253 | return PTR_ERR(bio); | |
254 | ||
255 | if (rq_data_dir(rq) == WRITE) | |
256 | bio->bi_rw |= (1 << BIO_RW); | |
257 | ||
258 | blk_rq_bio_prep(q, rq, bio); | |
259 | blk_queue_bounce(q, &rq->bio); | |
260 | rq->buffer = rq->data = NULL; | |
261 | return 0; | |
262 | } | |
263 | ||
264 | EXPORT_SYMBOL(blk_rq_map_kern); |