]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - block/blk-map.c
0f22911f17dc42f8e969c9f69a2d493c7821b16a
[mirror_ubuntu-artful-kernel.git] / block / blk-map.c
1 /*
2 * Functions related to mapping data to requests
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/uio.h>
9
10 #include "blk.h"
11
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14 {
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
23 rq->__data_len += bio->bi_iter.bi_size;
24 }
25 return 0;
26 }
27
28 static int __blk_rq_unmap_user(struct bio *bio)
29 {
30 int ret = 0;
31
32 if (bio) {
33 if (bio_flagged(bio, BIO_USER_MAPPED))
34 bio_unmap_user(bio);
35 else
36 ret = bio_uncopy_user(bio);
37 }
38
39 return ret;
40 }
41
42 /**
43 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
44 * @q: request queue where request should be inserted
45 * @rq: request to map data to
46 * @map_data: pointer to the rq_map_data holding pages (if necessary)
47 * @iter: iovec iterator
48 * @gfp_mask: memory allocation flags
49 *
50 * Description:
51 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
52 * a kernel bounce buffer is used.
53 *
54 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
55 * still in process context.
56 *
57 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
58 * before being submitted to the device, as pages mapped may be out of
59 * reach. It's the callers responsibility to make sure this happens. The
60 * original bio must be passed back in to blk_rq_unmap_user() for proper
61 * unmapping.
62 */
63 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
64 struct rq_map_data *map_data,
65 const struct iov_iter *iter, gfp_t gfp_mask)
66 {
67 struct bio *bio;
68 int unaligned = 0;
69 struct iov_iter i;
70 struct iovec iov;
71
72 if (!iter || !iter->count)
73 return -EINVAL;
74
75 iov_for_each(iov, i, *iter) {
76 unsigned long uaddr = (unsigned long) iov.iov_base;
77
78 if (!iov.iov_len)
79 return -EINVAL;
80
81 /*
82 * Keep going so we check length of all segments
83 */
84 if (uaddr & queue_dma_alignment(q))
85 unaligned = 1;
86 }
87
88 if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
90 else
91 bio = bio_map_user_iov(q, iter, gfp_mask);
92
93 if (IS_ERR(bio))
94 return PTR_ERR(bio);
95
96 if (bio->bi_iter.bi_size != iter->count) {
97 /*
98 * Grab an extra reference to this bio, as bio_unmap_user()
99 * expects to be able to drop it twice as it happens on the
100 * normal IO completion path
101 */
102 bio_get(bio);
103 bio_endio(bio, 0);
104 __blk_rq_unmap_user(bio);
105 return -EINVAL;
106 }
107
108 if (!bio_flagged(bio, BIO_USER_MAPPED))
109 rq->cmd_flags |= REQ_COPY_USER;
110
111 blk_queue_bounce(q, &bio);
112 bio_get(bio);
113 blk_rq_bio_prep(q, rq, bio);
114 return 0;
115 }
116 EXPORT_SYMBOL(blk_rq_map_user_iov);
117
118 int blk_rq_map_user(struct request_queue *q, struct request *rq,
119 struct rq_map_data *map_data, void __user *ubuf,
120 unsigned long len, gfp_t gfp_mask)
121 {
122 struct iovec iov;
123 struct iov_iter i;
124
125 iov.iov_base = ubuf;
126 iov.iov_len = len;
127 iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len);
128
129 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
130 }
131 EXPORT_SYMBOL(blk_rq_map_user);
132
133 /**
134 * blk_rq_unmap_user - unmap a request with user data
135 * @bio: start of bio list
136 *
137 * Description:
138 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
139 * supply the original rq->bio from the blk_rq_map_user() return, since
140 * the I/O completion may have changed rq->bio.
141 */
142 int blk_rq_unmap_user(struct bio *bio)
143 {
144 struct bio *mapped_bio;
145 int ret = 0, ret2;
146
147 while (bio) {
148 mapped_bio = bio;
149 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
150 mapped_bio = bio->bi_private;
151
152 ret2 = __blk_rq_unmap_user(mapped_bio);
153 if (ret2 && !ret)
154 ret = ret2;
155
156 mapped_bio = bio;
157 bio = bio->bi_next;
158 bio_put(mapped_bio);
159 }
160
161 return ret;
162 }
163 EXPORT_SYMBOL(blk_rq_unmap_user);
164
165 /**
166 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
167 * @q: request queue where request should be inserted
168 * @rq: request to fill
169 * @kbuf: the kernel buffer
170 * @len: length of user data
171 * @gfp_mask: memory allocation flags
172 *
173 * Description:
174 * Data will be mapped directly if possible. Otherwise a bounce
175 * buffer is used. Can be called multiple times to append multiple
176 * buffers.
177 */
178 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
179 unsigned int len, gfp_t gfp_mask)
180 {
181 int reading = rq_data_dir(rq) == READ;
182 unsigned long addr = (unsigned long) kbuf;
183 int do_copy = 0;
184 struct bio *bio;
185 int ret;
186
187 if (len > (queue_max_hw_sectors(q) << 9))
188 return -EINVAL;
189 if (!len || !kbuf)
190 return -EINVAL;
191
192 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
193 if (do_copy)
194 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
195 else
196 bio = bio_map_kern(q, kbuf, len, gfp_mask);
197
198 if (IS_ERR(bio))
199 return PTR_ERR(bio);
200
201 if (!reading)
202 bio->bi_rw |= REQ_WRITE;
203
204 if (do_copy)
205 rq->cmd_flags |= REQ_COPY_USER;
206
207 ret = blk_rq_append_bio(q, rq, bio);
208 if (unlikely(ret)) {
209 /* request is too big */
210 bio_put(bio);
211 return ret;
212 }
213
214 blk_queue_bounce(q, &rq->bio);
215 return 0;
216 }
217 EXPORT_SYMBOL(blk_rq_map_kern);