]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-map.c
move more stuff down into bio_copy_user_iov()
[mirror_ubuntu-bionic-kernel.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
68db0cf1 5#include <linux/sched/task_stack.h>
86db1e29
JA
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
26e49cfc 9#include <linux/uio.h>
86db1e29
JA
10
11#include "blk.h"
12
98d61d5b
CH
13/*
14 * Append a bio to a passthrough request. Only works can be merged into
15 * the request based on the driver constraints.
16 */
17int blk_rq_append_bio(struct request *rq, struct bio *bio)
86db1e29 18{
caa4b024
CH
19 blk_queue_bounce(rq->q, &bio);
20
98d61d5b
CH
21 if (!rq->bio) {
22 blk_rq_bio_prep(rq->q, rq, bio);
23 } else {
24 if (!ll_back_merge_fn(rq->q, rq, bio))
25 return -EINVAL;
26
86db1e29
JA
27 rq->biotail->bi_next = bio;
28 rq->biotail = bio;
4f024f37 29 rq->__data_len += bio->bi_iter.bi_size;
86db1e29 30 }
98d61d5b 31
86db1e29
JA
32 return 0;
33}
98d61d5b 34EXPORT_SYMBOL(blk_rq_append_bio);
86db1e29
JA
35
36static int __blk_rq_unmap_user(struct bio *bio)
37{
38 int ret = 0;
39
40 if (bio) {
41 if (bio_flagged(bio, BIO_USER_MAPPED))
42 bio_unmap_user(bio);
43 else
44 ret = bio_uncopy_user(bio);
45 }
46
47 return ret;
48}
49
4d6af73d
CH
50static int __blk_rq_map_user_iov(struct request *rq,
51 struct rq_map_data *map_data, struct iov_iter *iter,
52 gfp_t gfp_mask, bool copy)
53{
54 struct request_queue *q = rq->q;
55 struct bio *bio, *orig_bio;
56 int ret;
57
58 if (copy)
59 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
60 else
61 bio = bio_map_user_iov(q, iter, gfp_mask);
62
63 if (IS_ERR(bio))
64 return PTR_ERR(bio);
65
aebf526b
CH
66 bio->bi_opf &= ~REQ_OP_MASK;
67 bio->bi_opf |= req_op(rq);
68
4d6af73d 69 orig_bio = bio;
4d6af73d
CH
70
71 /*
72 * We link the bounce buffer in and could have to traverse it
73 * later so we have to get a ref to prevent it from being freed
74 */
98d61d5b 75 ret = blk_rq_append_bio(rq, bio);
caa4b024 76 bio_get(bio);
4d6af73d
CH
77 if (ret) {
78 bio_endio(bio);
79 __blk_rq_unmap_user(orig_bio);
80 bio_put(bio);
81 return ret;
82 }
83
84 return 0;
85}
86
86db1e29 87/**
aebf526b 88 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
86db1e29
JA
89 * @q: request queue where request should be inserted
90 * @rq: request to map data to
152e283f 91 * @map_data: pointer to the rq_map_data holding pages (if necessary)
26e49cfc 92 * @iter: iovec iterator
a3bce90e 93 * @gfp_mask: memory allocation flags
86db1e29
JA
94 *
95 * Description:
710027a4 96 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
97 * a kernel bounce buffer is used.
98 *
710027a4 99 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
100 * still in process context.
101 *
102 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
103 * before being submitted to the device, as pages mapped may be out of
104 * reach. It's the callers responsibility to make sure this happens. The
105 * original bio must be passed back in to blk_rq_unmap_user() for proper
106 * unmapping.
107 */
108int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26e49cfc
KO
109 struct rq_map_data *map_data,
110 const struct iov_iter *iter, gfp_t gfp_mask)
86db1e29 111{
357f435d
AV
112 bool copy = false;
113 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
4d6af73d
CH
114 struct bio *bio = NULL;
115 struct iov_iter i;
116 int ret;
86db1e29 117
a0ac402c
LT
118 if (!iter_is_iovec(iter))
119 goto fail;
120
357f435d
AV
121 if (map_data)
122 copy = true;
123 else if (iov_iter_alignment(iter) & align)
124 copy = true;
125 else if (queue_virt_boundary(q))
126 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
afdc1a78 127
4d6af73d
CH
128 i = *iter;
129 do {
130 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
131 if (ret)
132 goto unmap_rq;
133 if (!bio)
134 bio = rq->bio;
135 } while (iov_iter_count(&i));
86db1e29 136
f18573ab 137 if (!bio_flagged(bio, BIO_USER_MAPPED))
e8064021 138 rq->rq_flags |= RQF_COPY_USER;
86db1e29 139 return 0;
4d6af73d
CH
140
141unmap_rq:
142 __blk_rq_unmap_user(bio);
a0ac402c 143fail:
4d6af73d
CH
144 rq->bio = NULL;
145 return -EINVAL;
86db1e29 146}
152e283f 147EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e29 148
ddad8dd0
CH
149int blk_rq_map_user(struct request_queue *q, struct request *rq,
150 struct rq_map_data *map_data, void __user *ubuf,
151 unsigned long len, gfp_t gfp_mask)
152{
26e49cfc
KO
153 struct iovec iov;
154 struct iov_iter i;
8f7e885a 155 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
ddad8dd0 156
8f7e885a
AV
157 if (unlikely(ret < 0))
158 return ret;
ddad8dd0 159
26e49cfc 160 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
ddad8dd0
CH
161}
162EXPORT_SYMBOL(blk_rq_map_user);
163
86db1e29
JA
164/**
165 * blk_rq_unmap_user - unmap a request with user data
166 * @bio: start of bio list
167 *
168 * Description:
169 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
170 * supply the original rq->bio from the blk_rq_map_user() return, since
710027a4 171 * the I/O completion may have changed rq->bio.
86db1e29
JA
172 */
173int blk_rq_unmap_user(struct bio *bio)
174{
175 struct bio *mapped_bio;
176 int ret = 0, ret2;
177
178 while (bio) {
179 mapped_bio = bio;
180 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
181 mapped_bio = bio->bi_private;
182
183 ret2 = __blk_rq_unmap_user(mapped_bio);
184 if (ret2 && !ret)
185 ret = ret2;
186
187 mapped_bio = bio;
188 bio = bio->bi_next;
189 bio_put(mapped_bio);
190 }
191
192 return ret;
193}
86db1e29
JA
194EXPORT_SYMBOL(blk_rq_unmap_user);
195
196/**
aebf526b 197 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
86db1e29
JA
198 * @q: request queue where request should be inserted
199 * @rq: request to fill
200 * @kbuf: the kernel buffer
201 * @len: length of user data
202 * @gfp_mask: memory allocation flags
68154e90
FT
203 *
204 * Description:
205 * Data will be mapped directly if possible. Otherwise a bounce
e227867f 206 * buffer is used. Can be called multiple times to append multiple
3a5a3927 207 * buffers.
86db1e29
JA
208 */
209int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
210 unsigned int len, gfp_t gfp_mask)
211{
68154e90 212 int reading = rq_data_dir(rq) == READ;
14417799 213 unsigned long addr = (unsigned long) kbuf;
68154e90 214 int do_copy = 0;
86db1e29 215 struct bio *bio;
3a5a3927 216 int ret;
86db1e29 217
ae03bf63 218 if (len > (queue_max_hw_sectors(q) << 9))
86db1e29
JA
219 return -EINVAL;
220 if (!len || !kbuf)
221 return -EINVAL;
222
14417799 223 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
68154e90
FT
224 if (do_copy)
225 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
226 else
227 bio = bio_map_kern(q, kbuf, len, gfp_mask);
228
86db1e29
JA
229 if (IS_ERR(bio))
230 return PTR_ERR(bio);
231
aebf526b
CH
232 bio->bi_opf &= ~REQ_OP_MASK;
233 bio->bi_opf |= req_op(rq);
86db1e29 234
68154e90 235 if (do_copy)
e8064021 236 rq->rq_flags |= RQF_COPY_USER;
68154e90 237
98d61d5b 238 ret = blk_rq_append_bio(rq, bio);
3a5a3927
JB
239 if (unlikely(ret)) {
240 /* request is too big */
241 bio_put(bio);
242 return ret;
243 }
244
86db1e29
JA
245 return 0;
246}
86db1e29 247EXPORT_SYMBOL(blk_rq_map_kern);