]>
git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - block/blk-map.c
2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
11 int blk_rq_append_bio(struct request_queue
*q
, struct request
*rq
,
15 blk_rq_bio_prep(q
, rq
, bio
);
16 else if (!ll_back_merge_fn(q
, rq
, bio
))
19 rq
->biotail
->bi_next
= bio
;
22 rq
->data_len
+= bio
->bi_size
;
26 EXPORT_SYMBOL(blk_rq_append_bio
);
28 static int __blk_rq_unmap_user(struct bio
*bio
)
33 if (bio_flagged(bio
, BIO_USER_MAPPED
))
36 ret
= bio_uncopy_user(bio
);
42 static int __blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
43 void __user
*ubuf
, unsigned int len
)
46 struct bio
*bio
, *orig_bio
;
49 reading
= rq_data_dir(rq
) == READ
;
52 * if alignment requirement is satisfied, map in user pages for
53 * direct dma. else, set up kernel bounce buffers
55 uaddr
= (unsigned long) ubuf
;
56 if (!(uaddr
& queue_dma_alignment(q
)) &&
57 !(len
& queue_dma_alignment(q
)))
58 bio
= bio_map_user(q
, NULL
, uaddr
, len
, reading
);
60 bio
= bio_copy_user(q
, uaddr
, len
, reading
);
66 blk_queue_bounce(q
, &bio
);
69 * We link the bounce buffer in and could have to traverse it
70 * later so we have to get a ref to prevent it from being freed
74 ret
= blk_rq_append_bio(q
, rq
, bio
);
78 /* if it was boucned we must call the end io function */
80 __blk_rq_unmap_user(orig_bio
);
86 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
87 * @q: request queue where request should be inserted
88 * @rq: request structure to fill
89 * @ubuf: the user buffer
90 * @len: length of user data
93 * Data will be mapped directly for zero copy io, if possible. Otherwise
94 * a kernel bounce buffer is used.
96 * A matching blk_rq_unmap_user() must be issued at the end of io, while
97 * still in process context.
99 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
100 * before being submitted to the device, as pages mapped may be out of
101 * reach. It's the callers responsibility to make sure this happens. The
102 * original bio must be passed back in to blk_rq_unmap_user() for proper
105 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
106 void __user
*ubuf
, unsigned long len
)
108 unsigned long bytes_read
= 0;
109 struct bio
*bio
= NULL
;
112 if (len
> (q
->max_hw_sectors
<< 9))
117 while (bytes_read
!= len
) {
118 unsigned long map_len
, end
, start
;
120 map_len
= min_t(unsigned long, len
- bytes_read
, BIO_MAX_SIZE
);
121 end
= ((unsigned long)ubuf
+ map_len
+ PAGE_SIZE
- 1)
123 start
= (unsigned long)ubuf
>> PAGE_SHIFT
;
126 * A bad offset could cause us to require BIO_MAX_PAGES + 1
127 * pages. If this happens we just lower the requested
128 * mapping len by a page so that we can fit
130 if (end
- start
> BIO_MAX_PAGES
)
131 map_len
-= PAGE_SIZE
;
133 ret
= __blk_rq_map_user(q
, rq
, ubuf
, map_len
);
143 * __blk_rq_map_user() copies the buffers if starting address
144 * or length isn't aligned. As the copied buffer is always
145 * page aligned, we know that there's enough room for padding.
146 * Extend the last bio and update rq->data_len accordingly.
148 * On unmap, bio_uncopy_user() will use unmodified
149 * bio_map_data pointed to by bio->bi_private.
151 if (len
& queue_dma_alignment(q
)) {
152 unsigned int pad_len
= (queue_dma_alignment(q
) & ~len
) + 1;
154 rq
->extra_len
+= pad_len
;
157 rq
->buffer
= rq
->data
= NULL
;
160 blk_rq_unmap_user(bio
);
164 EXPORT_SYMBOL(blk_rq_map_user
);
167 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
168 * @q: request queue where request should be inserted
169 * @rq: request to map data to
170 * @iov: pointer to the iovec
171 * @iov_count: number of elements in the iovec
172 * @len: I/O byte count
175 * Data will be mapped directly for zero copy io, if possible. Otherwise
176 * a kernel bounce buffer is used.
178 * A matching blk_rq_unmap_user() must be issued at the end of io, while
179 * still in process context.
181 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
182 * before being submitted to the device, as pages mapped may be out of
183 * reach. It's the callers responsibility to make sure this happens. The
184 * original bio must be passed back in to blk_rq_unmap_user() for proper
187 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
188 struct sg_iovec
*iov
, int iov_count
, unsigned int len
)
192 if (!iov
|| iov_count
<= 0)
195 /* we don't allow misaligned data like bio_map_user() does. If the
196 * user is using sg, they're expected to know the alignment constraints
197 * and respect them accordingly */
198 bio
= bio_map_user_iov(q
, NULL
, iov
, iov_count
,
199 rq_data_dir(rq
) == READ
);
203 if (bio
->bi_size
!= len
) {
210 blk_rq_bio_prep(q
, rq
, bio
);
211 rq
->buffer
= rq
->data
= NULL
;
214 EXPORT_SYMBOL(blk_rq_map_user_iov
);
217 * blk_rq_unmap_user - unmap a request with user data
218 * @bio: start of bio list
221 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
222 * supply the original rq->bio from the blk_rq_map_user() return, since
223 * the io completion may have changed rq->bio.
225 int blk_rq_unmap_user(struct bio
*bio
)
227 struct bio
*mapped_bio
;
232 if (unlikely(bio_flagged(bio
, BIO_BOUNCED
)))
233 mapped_bio
= bio
->bi_private
;
235 ret2
= __blk_rq_unmap_user(mapped_bio
);
246 EXPORT_SYMBOL(blk_rq_unmap_user
);
249 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
250 * @q: request queue where request should be inserted
251 * @rq: request to fill
252 * @kbuf: the kernel buffer
253 * @len: length of user data
254 * @gfp_mask: memory allocation flags
256 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
257 unsigned int len
, gfp_t gfp_mask
)
261 if (len
> (q
->max_hw_sectors
<< 9))
266 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
270 if (rq_data_dir(rq
) == WRITE
)
271 bio
->bi_rw
|= (1 << BIO_RW
);
273 blk_rq_bio_prep(q
, rq
, bio
);
274 blk_queue_bounce(q
, &rq
->bio
);
275 rq
->buffer
= rq
->data
= NULL
;
278 EXPORT_SYMBOL(blk_rq_map_kern
);