]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - block/blk-map.c
BCM270X_DT: Add bcm2708-rpi-0-w.dts
[mirror_ubuntu-zesty-kernel.git] / block / blk-map.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to mapping data to requests
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
26e49cfc 8#include <linux/uio.h>
86db1e29
JA
9
10#include "blk.h"
11
98d61d5b
CH
12/*
13 * Append a bio to a passthrough request. Only works can be merged into
14 * the request based on the driver constraints.
15 */
16int blk_rq_append_bio(struct request *rq, struct bio *bio)
86db1e29 17{
98d61d5b 18 if (!rq->bio) {
ef295ecf
CH
19 rq->cmd_flags &= REQ_OP_MASK;
20 rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
98d61d5b
CH
21 blk_rq_bio_prep(rq->q, rq, bio);
22 } else {
23 if (!ll_back_merge_fn(rq->q, rq, bio))
24 return -EINVAL;
25
86db1e29
JA
26 rq->biotail->bi_next = bio;
27 rq->biotail = bio;
4f024f37 28 rq->__data_len += bio->bi_iter.bi_size;
86db1e29 29 }
98d61d5b 30
86db1e29
JA
31 return 0;
32}
98d61d5b 33EXPORT_SYMBOL(blk_rq_append_bio);
86db1e29
JA
34
35static int __blk_rq_unmap_user(struct bio *bio)
36{
37 int ret = 0;
38
39 if (bio) {
40 if (bio_flagged(bio, BIO_USER_MAPPED))
41 bio_unmap_user(bio);
42 else
43 ret = bio_uncopy_user(bio);
44 }
45
46 return ret;
47}
48
4d6af73d
CH
49static int __blk_rq_map_user_iov(struct request *rq,
50 struct rq_map_data *map_data, struct iov_iter *iter,
51 gfp_t gfp_mask, bool copy)
52{
53 struct request_queue *q = rq->q;
54 struct bio *bio, *orig_bio;
55 int ret;
56
57 if (copy)
58 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
59 else
60 bio = bio_map_user_iov(q, iter, gfp_mask);
61
62 if (IS_ERR(bio))
63 return PTR_ERR(bio);
64
65 if (map_data && map_data->null_mapped)
66 bio_set_flag(bio, BIO_NULL_MAPPED);
67
68 iov_iter_advance(iter, bio->bi_iter.bi_size);
69 if (map_data)
70 map_data->offset += bio->bi_iter.bi_size;
71
72 orig_bio = bio;
73 blk_queue_bounce(q, &bio);
74
75 /*
76 * We link the bounce buffer in and could have to traverse it
77 * later so we have to get a ref to prevent it from being freed
78 */
79 bio_get(bio);
80
98d61d5b 81 ret = blk_rq_append_bio(rq, bio);
4d6af73d
CH
82 if (ret) {
83 bio_endio(bio);
84 __blk_rq_unmap_user(orig_bio);
85 bio_put(bio);
86 return ret;
87 }
88
89 return 0;
90}
91
86db1e29 92/**
710027a4 93 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
94 * @q: request queue where request should be inserted
95 * @rq: request to map data to
152e283f 96 * @map_data: pointer to the rq_map_data holding pages (if necessary)
26e49cfc 97 * @iter: iovec iterator
a3bce90e 98 * @gfp_mask: memory allocation flags
86db1e29
JA
99 *
100 * Description:
710027a4 101 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e29
JA
102 * a kernel bounce buffer is used.
103 *
710027a4 104 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e29
JA
105 * still in process context.
106 *
107 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
108 * before being submitted to the device, as pages mapped may be out of
109 * reach. It's the callers responsibility to make sure this happens. The
110 * original bio must be passed back in to blk_rq_unmap_user() for proper
111 * unmapping.
112 */
113int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26e49cfc
KO
114 struct rq_map_data *map_data,
115 const struct iov_iter *iter, gfp_t gfp_mask)
86db1e29 116{
357f435d
AV
117 bool copy = false;
118 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
4d6af73d
CH
119 struct bio *bio = NULL;
120 struct iov_iter i;
121 int ret;
86db1e29 122
a0ac402c
LT
123 if (!iter_is_iovec(iter))
124 goto fail;
125
357f435d
AV
126 if (map_data)
127 copy = true;
128 else if (iov_iter_alignment(iter) & align)
129 copy = true;
130 else if (queue_virt_boundary(q))
131 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
afdc1a78 132
4d6af73d
CH
133 i = *iter;
134 do {
135 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
136 if (ret)
137 goto unmap_rq;
138 if (!bio)
139 bio = rq->bio;
140 } while (iov_iter_count(&i));
86db1e29 141
f18573ab 142 if (!bio_flagged(bio, BIO_USER_MAPPED))
e8064021 143 rq->rq_flags |= RQF_COPY_USER;
86db1e29 144 return 0;
4d6af73d
CH
145
146unmap_rq:
147 __blk_rq_unmap_user(bio);
a0ac402c 148fail:
4d6af73d
CH
149 rq->bio = NULL;
150 return -EINVAL;
86db1e29 151}
152e283f 152EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e29 153
ddad8dd0
CH
154int blk_rq_map_user(struct request_queue *q, struct request *rq,
155 struct rq_map_data *map_data, void __user *ubuf,
156 unsigned long len, gfp_t gfp_mask)
157{
26e49cfc
KO
158 struct iovec iov;
159 struct iov_iter i;
8f7e885a 160 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
ddad8dd0 161
8f7e885a
AV
162 if (unlikely(ret < 0))
163 return ret;
ddad8dd0 164
26e49cfc 165 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
ddad8dd0
CH
166}
167EXPORT_SYMBOL(blk_rq_map_user);
168
86db1e29
JA
169/**
170 * blk_rq_unmap_user - unmap a request with user data
171 * @bio: start of bio list
172 *
173 * Description:
174 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
175 * supply the original rq->bio from the blk_rq_map_user() return, since
710027a4 176 * the I/O completion may have changed rq->bio.
86db1e29
JA
177 */
178int blk_rq_unmap_user(struct bio *bio)
179{
180 struct bio *mapped_bio;
181 int ret = 0, ret2;
182
183 while (bio) {
184 mapped_bio = bio;
185 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
186 mapped_bio = bio->bi_private;
187
188 ret2 = __blk_rq_unmap_user(mapped_bio);
189 if (ret2 && !ret)
190 ret = ret2;
191
192 mapped_bio = bio;
193 bio = bio->bi_next;
194 bio_put(mapped_bio);
195 }
196
197 return ret;
198}
86db1e29
JA
199EXPORT_SYMBOL(blk_rq_unmap_user);
200
201/**
710027a4 202 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e29
JA
203 * @q: request queue where request should be inserted
204 * @rq: request to fill
205 * @kbuf: the kernel buffer
206 * @len: length of user data
207 * @gfp_mask: memory allocation flags
68154e90
FT
208 *
209 * Description:
210 * Data will be mapped directly if possible. Otherwise a bounce
e227867f 211 * buffer is used. Can be called multiple times to append multiple
3a5a3927 212 * buffers.
86db1e29
JA
213 */
214int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
215 unsigned int len, gfp_t gfp_mask)
216{
68154e90 217 int reading = rq_data_dir(rq) == READ;
14417799 218 unsigned long addr = (unsigned long) kbuf;
68154e90 219 int do_copy = 0;
86db1e29 220 struct bio *bio;
3a5a3927 221 int ret;
86db1e29 222
ae03bf63 223 if (len > (queue_max_hw_sectors(q) << 9))
86db1e29
JA
224 return -EINVAL;
225 if (!len || !kbuf)
226 return -EINVAL;
227
14417799 228 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
68154e90
FT
229 if (do_copy)
230 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
231 else
232 bio = bio_map_kern(q, kbuf, len, gfp_mask);
233
86db1e29
JA
234 if (IS_ERR(bio))
235 return PTR_ERR(bio);
236
609f6ea1 237 if (!reading)
95fe6c1a 238 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
86db1e29 239
68154e90 240 if (do_copy)
e8064021 241 rq->rq_flags |= RQF_COPY_USER;
68154e90 242
98d61d5b 243 ret = blk_rq_append_bio(rq, bio);
3a5a3927
JB
244 if (unlikely(ret)) {
245 /* request is too big */
246 bio_put(bio);
247 return ret;
248 }
249
86db1e29 250 blk_queue_bounce(q, &rq->bio);
86db1e29
JA
251 return 0;
252}
86db1e29 253EXPORT_SYMBOL(blk_rq_map_kern);