]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - block/blk-map.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
26e49cfc 8#include <linux/uio.h>
86db1e29
JA
9
10#include "blk.h"
11
98d61d5b
CH
12/*
13 * Append a bio to a passthrough request. Only works can be merged into
14 * the request based on the driver constraints.
15 */
16int blk_rq_append_bio(struct request *rq, struct bio *bio)
86db1e29 17{
98d61d5b
CH
18 if (!rq->bio) {
19 blk_rq_bio_prep(rq->q, rq, bio);
20 } else {
21 if (!ll_back_merge_fn(rq->q, rq, bio))
22 return -EINVAL;
23
86db1e29
JA
24 rq->biotail->bi_next = bio;
25 rq->biotail = bio;
4f024f37 26 rq->__data_len += bio->bi_iter.bi_size;
86db1e29 27 }
98d61d5b 28
86db1e29
JA
29 return 0;
30}
98d61d5b 31EXPORT_SYMBOL(blk_rq_append_bio);
86db1e29
JA
32
33static int __blk_rq_unmap_user(struct bio *bio)
34{
35 int ret = 0;
36
37 if (bio) {
38 if (bio_flagged(bio, BIO_USER_MAPPED))
39 bio_unmap_user(bio);
40 else
41 ret = bio_uncopy_user(bio);
42 }
43
44 return ret;
45}
46
4d6af73d
CH
47static int __blk_rq_map_user_iov(struct request *rq,
48 struct rq_map_data *map_data, struct iov_iter *iter,
49 gfp_t gfp_mask, bool copy)
50{
51 struct request_queue *q = rq->q;
52 struct bio *bio, *orig_bio;
53 int ret;
54
55 if (copy)
56 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
57 else
58 bio = bio_map_user_iov(q, iter, gfp_mask);
59
60 if (IS_ERR(bio))
61 return PTR_ERR(bio);
62
aebf526b
CH
63 bio->bi_opf &= ~REQ_OP_MASK;
64 bio->bi_opf |= req_op(rq);
65
4d6af73d
CH
66 if (map_data && map_data->null_mapped)
67 bio_set_flag(bio, BIO_NULL_MAPPED);
68
69 iov_iter_advance(iter, bio->bi_iter.bi_size);
70 if (map_data)
71 map_data->offset += bio->bi_iter.bi_size;
72
73 orig_bio = bio;
74 blk_queue_bounce(q, &bio);
75
76 /*
77 * We link the bounce buffer in and could have to traverse it
78 * later so we have to get a ref to prevent it from being freed
79 */
80 bio_get(bio);
81
98d61d5b 82 ret = blk_rq_append_bio(rq, bio);
4d6af73d
CH
83 if (ret) {
84 bio_endio(bio);
85 __blk_rq_unmap_user(orig_bio);
86 bio_put(bio);
87 return ret;
88 }
89
90 return 0;
91}
92
86db1e29 93/**
aebf526b 94 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
86db1e29
JA
95 * @q: request queue where request should be inserted
96 * @rq: request to map data to
152e283f 97 * @map_data: pointer to the rq_map_data holding pages (if necessary)
26e49cfc 98 * @iter: iovec iterator
a3bce90e 99 * @gfp_mask: memory allocation flags
86db1e29
JA
100 *
101 * Description:
710027a4 102 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
103 * a kernel bounce buffer is used.
104 *
710027a4 105 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
106 * still in process context.
107 *
108 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
109 * before being submitted to the device, as pages mapped may be out of
110 * reach. It's the callers responsibility to make sure this happens. The
111 * original bio must be passed back in to blk_rq_unmap_user() for proper
112 * unmapping.
113 */
114int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26e49cfc
KO
115 struct rq_map_data *map_data,
116 const struct iov_iter *iter, gfp_t gfp_mask)
86db1e29 117{
357f435d
AV
118 bool copy = false;
119 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
4d6af73d
CH
120 struct bio *bio = NULL;
121 struct iov_iter i;
122 int ret;
86db1e29 123
a0ac402c
LT
124 if (!iter_is_iovec(iter))
125 goto fail;
126
357f435d
AV
127 if (map_data)
128 copy = true;
129 else if (iov_iter_alignment(iter) & align)
130 copy = true;
131 else if (queue_virt_boundary(q))
132 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
afdc1a78 133
4d6af73d
CH
134 i = *iter;
135 do {
136 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
137 if (ret)
138 goto unmap_rq;
139 if (!bio)
140 bio = rq->bio;
141 } while (iov_iter_count(&i));
86db1e29 142
f18573ab 143 if (!bio_flagged(bio, BIO_USER_MAPPED))
e8064021 144 rq->rq_flags |= RQF_COPY_USER;
86db1e29 145 return 0;
4d6af73d
CH
146
147unmap_rq:
148 __blk_rq_unmap_user(bio);
a0ac402c 149fail:
4d6af73d
CH
150 rq->bio = NULL;
151 return -EINVAL;
86db1e29 152}
152e283f 153EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e29 154
ddad8dd0
CH
155int blk_rq_map_user(struct request_queue *q, struct request *rq,
156 struct rq_map_data *map_data, void __user *ubuf,
157 unsigned long len, gfp_t gfp_mask)
158{
26e49cfc
KO
159 struct iovec iov;
160 struct iov_iter i;
8f7e885a 161 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
ddad8dd0 162
8f7e885a
AV
163 if (unlikely(ret < 0))
164 return ret;
ddad8dd0 165
26e49cfc 166 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
ddad8dd0
CH
167}
168EXPORT_SYMBOL(blk_rq_map_user);
169
86db1e29
JA
170/**
171 * blk_rq_unmap_user - unmap a request with user data
172 * @bio: start of bio list
173 *
174 * Description:
175 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
176 * supply the original rq->bio from the blk_rq_map_user() return, since
710027a4 177 * the I/O completion may have changed rq->bio.
86db1e29
JA
178 */
179int blk_rq_unmap_user(struct bio *bio)
180{
181 struct bio *mapped_bio;
182 int ret = 0, ret2;
183
184 while (bio) {
185 mapped_bio = bio;
186 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
187 mapped_bio = bio->bi_private;
188
189 ret2 = __blk_rq_unmap_user(mapped_bio);
190 if (ret2 && !ret)
191 ret = ret2;
192
193 mapped_bio = bio;
194 bio = bio->bi_next;
195 bio_put(mapped_bio);
196 }
197
198 return ret;
199}
86db1e29
JA
200EXPORT_SYMBOL(blk_rq_unmap_user);
201
202/**
aebf526b 203 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
86db1e29
JA
204 * @q: request queue where request should be inserted
205 * @rq: request to fill
206 * @kbuf: the kernel buffer
207 * @len: length of user data
208 * @gfp_mask: memory allocation flags
68154e90
FT
209 *
210 * Description:
211 * Data will be mapped directly if possible. Otherwise a bounce
e227867f 212 * buffer is used. Can be called multiple times to append multiple
3a5a3927 213 * buffers.
86db1e29
JA
214 */
215int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
216 unsigned int len, gfp_t gfp_mask)
217{
68154e90 218 int reading = rq_data_dir(rq) == READ;
14417799 219 unsigned long addr = (unsigned long) kbuf;
68154e90 220 int do_copy = 0;
86db1e29 221 struct bio *bio;
3a5a3927 222 int ret;
86db1e29 223
ae03bf63 224 if (len > (queue_max_hw_sectors(q) << 9))
86db1e29
JA
225 return -EINVAL;
226 if (!len || !kbuf)
227 return -EINVAL;
228
14417799 229 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
68154e90
FT
230 if (do_copy)
231 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
232 else
233 bio = bio_map_kern(q, kbuf, len, gfp_mask);
234
86db1e29
JA
235 if (IS_ERR(bio))
236 return PTR_ERR(bio);
237
aebf526b
CH
238 bio->bi_opf &= ~REQ_OP_MASK;
239 bio->bi_opf |= req_op(rq);
86db1e29 240
68154e90 241 if (do_copy)
e8064021 242 rq->rq_flags |= RQF_COPY_USER;
68154e90 243
98d61d5b 244 ret = blk_rq_append_bio(rq, bio);
3a5a3927
JB
245 if (unlikely(ret)) {
246 /* request is too big */
247 bio_put(bio);
248 return ret;
249 }
250
86db1e29 251 blk_queue_bounce(q, &rq->bio);
86db1e29
JA
252 return 0;
253}
86db1e29 254EXPORT_SYMBOL(blk_rq_map_kern);