]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-map.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to mapping data to requests
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
15 bool is_our_pages
: 1;
16 bool is_null_mapped
: 1;
21 static struct bio_map_data
*bio_alloc_map_data(struct iov_iter
*data
,
24 struct bio_map_data
*bmd
;
26 if (data
->nr_segs
> UIO_MAXIOV
)
29 bmd
= kmalloc(struct_size(bmd
, iov
, data
->nr_segs
), gfp_mask
);
32 memcpy(bmd
->iov
, data
->iov
, sizeof(struct iovec
) * data
->nr_segs
);
34 bmd
->iter
.iov
= bmd
->iov
;
39 * bio_copy_from_iter - copy all pages from iov_iter to bio
40 * @bio: The &struct bio which describes the I/O as destination
41 * @iter: iov_iter as source
43 * Copy all pages from iov_iter to bio.
44 * Returns 0 on success, or error on failure.
46 static int bio_copy_from_iter(struct bio
*bio
, struct iov_iter
*iter
)
49 struct bvec_iter_all iter_all
;
51 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
54 ret
= copy_page_from_iter(bvec
->bv_page
,
59 if (!iov_iter_count(iter
))
62 if (ret
< bvec
->bv_len
)
70 * bio_copy_to_iter - copy all pages from bio to iov_iter
71 * @bio: The &struct bio which describes the I/O as source
72 * @iter: iov_iter as destination
74 * Copy all pages from bio to iov_iter.
75 * Returns 0 on success, or error on failure.
77 static int bio_copy_to_iter(struct bio
*bio
, struct iov_iter iter
)
80 struct bvec_iter_all iter_all
;
82 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
85 ret
= copy_page_to_iter(bvec
->bv_page
,
90 if (!iov_iter_count(&iter
))
93 if (ret
< bvec
->bv_len
)
101 * bio_uncopy_user - finish previously mapped bio
102 * @bio: bio being terminated
104 * Free pages allocated from bio_copy_user_iov() and write back data
105 * to user space in case of a read.
107 static int bio_uncopy_user(struct bio
*bio
)
109 struct bio_map_data
*bmd
= bio
->bi_private
;
112 if (!bmd
->is_null_mapped
) {
114 * if we're in a workqueue, the request is orphaned, so
115 * don't copy into a random user address space, just free
116 * and return -EINTR so user space doesn't expect any data.
120 else if (bio_data_dir(bio
) == READ
)
121 ret
= bio_copy_to_iter(bio
, bmd
->iter
);
122 if (bmd
->is_our_pages
)
129 static int bio_copy_user_iov(struct request
*rq
, struct rq_map_data
*map_data
,
130 struct iov_iter
*iter
, gfp_t gfp_mask
)
132 struct bio_map_data
*bmd
;
137 unsigned int len
= iter
->count
;
138 unsigned int offset
= map_data
? offset_in_page(map_data
->offset
) : 0;
140 bmd
= bio_alloc_map_data(iter
, gfp_mask
);
145 * We need to do a deep copy of the iov_iter including the iovecs.
146 * The caller provided iov might point to an on-stack or otherwise
149 bmd
->is_our_pages
= !map_data
;
150 bmd
->is_null_mapped
= (map_data
&& map_data
->null_mapped
);
152 nr_pages
= bio_max_segs(DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
));
155 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
158 bio
->bi_opf
|= req_op(rq
);
161 nr_pages
= 1 << map_data
->page_order
;
162 i
= map_data
->offset
/ PAGE_SIZE
;
165 unsigned int bytes
= PAGE_SIZE
;
173 if (i
== map_data
->nr_entries
* nr_pages
) {
178 page
= map_data
->pages
[i
/ nr_pages
];
179 page
+= (i
% nr_pages
);
183 page
= alloc_page(GFP_NOIO
| gfp_mask
);
190 if (bio_add_pc_page(rq
->q
, bio
, page
, bytes
, offset
) < bytes
) {
201 map_data
->offset
+= bio
->bi_iter
.bi_size
;
206 if ((iov_iter_rw(iter
) == WRITE
&&
207 (!map_data
|| !map_data
->null_mapped
)) ||
208 (map_data
&& map_data
->from_user
)) {
209 ret
= bio_copy_from_iter(bio
, iter
);
213 if (bmd
->is_our_pages
)
215 iov_iter_advance(iter
, bio
->bi_iter
.bi_size
);
218 bio
->bi_private
= bmd
;
220 ret
= blk_rq_append_bio(rq
, bio
);
233 static int bio_map_user_iov(struct request
*rq
, struct iov_iter
*iter
,
236 unsigned int max_sectors
= queue_max_hw_sectors(rq
->q
);
241 if (!iov_iter_count(iter
))
244 bio
= bio_kmalloc(gfp_mask
, iov_iter_npages(iter
, BIO_MAX_VECS
));
247 bio
->bi_opf
|= req_op(rq
);
249 while (iov_iter_count(iter
)) {
252 size_t offs
, added
= 0;
255 bytes
= iov_iter_get_pages_alloc(iter
, &pages
, LONG_MAX
, &offs
);
256 if (unlikely(bytes
<= 0)) {
257 ret
= bytes
? bytes
: -EFAULT
;
261 npages
= DIV_ROUND_UP(offs
+ bytes
, PAGE_SIZE
);
263 if (unlikely(offs
& queue_dma_alignment(rq
->q
))) {
267 for (j
= 0; j
< npages
; j
++) {
268 struct page
*page
= pages
[j
];
269 unsigned int n
= PAGE_SIZE
- offs
;
270 bool same_page
= false;
275 if (!bio_add_hw_page(rq
->q
, bio
, page
, n
, offs
,
276 max_sectors
, &same_page
)) {
286 iov_iter_advance(iter
, added
);
289 * release the pages we didn't map into the bio, if any
292 put_page(pages
[j
++]);
294 /* couldn't stuff something into bio? */
299 ret
= blk_rq_append_bio(rq
, bio
);
305 bio_release_pages(bio
, false);
310 static void bio_invalidate_vmalloc_pages(struct bio
*bio
)
312 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
313 if (bio
->bi_private
&& !op_is_write(bio_op(bio
))) {
314 unsigned long i
, len
= 0;
316 for (i
= 0; i
< bio
->bi_vcnt
; i
++)
317 len
+= bio
->bi_io_vec
[i
].bv_len
;
318 invalidate_kernel_vmap_range(bio
->bi_private
, len
);
323 static void bio_map_kern_endio(struct bio
*bio
)
325 bio_invalidate_vmalloc_pages(bio
);
330 * bio_map_kern - map kernel address into bio
331 * @q: the struct request_queue for the bio
332 * @data: pointer to buffer to map
333 * @len: length in bytes
334 * @gfp_mask: allocation flags for bio allocation
336 * Map the kernel address into a bio suitable for io to a block
337 * device. Returns an error pointer in case of error.
339 static struct bio
*bio_map_kern(struct request_queue
*q
, void *data
,
340 unsigned int len
, gfp_t gfp_mask
)
342 unsigned long kaddr
= (unsigned long)data
;
343 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
344 unsigned long start
= kaddr
>> PAGE_SHIFT
;
345 const int nr_pages
= end
- start
;
346 bool is_vmalloc
= is_vmalloc_addr(data
);
351 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
353 return ERR_PTR(-ENOMEM
);
356 flush_kernel_vmap_range(data
, len
);
357 bio
->bi_private
= data
;
360 offset
= offset_in_page(kaddr
);
361 for (i
= 0; i
< nr_pages
; i
++) {
362 unsigned int bytes
= PAGE_SIZE
- offset
;
371 page
= virt_to_page(data
);
373 page
= vmalloc_to_page(data
);
374 if (bio_add_pc_page(q
, bio
, page
, bytes
,
376 /* we don't support partial mappings */
378 return ERR_PTR(-EINVAL
);
386 bio
->bi_end_io
= bio_map_kern_endio
;
390 static void bio_copy_kern_endio(struct bio
*bio
)
396 static void bio_copy_kern_endio_read(struct bio
*bio
)
398 char *p
= bio
->bi_private
;
399 struct bio_vec
*bvec
;
400 struct bvec_iter_all iter_all
;
402 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
403 memcpy_from_bvec(p
, bvec
);
407 bio_copy_kern_endio(bio
);
411 * bio_copy_kern - copy kernel address into bio
412 * @q: the struct request_queue for the bio
413 * @data: pointer to buffer to copy
414 * @len: length in bytes
415 * @gfp_mask: allocation flags for bio and page allocation
416 * @reading: data direction is READ
418 * copy the kernel address into a bio suitable for io to a block
419 * device. Returns an error pointer in case of error.
421 static struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
,
422 unsigned int len
, gfp_t gfp_mask
, int reading
)
424 unsigned long kaddr
= (unsigned long)data
;
425 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
426 unsigned long start
= kaddr
>> PAGE_SHIFT
;
435 return ERR_PTR(-EINVAL
);
437 nr_pages
= end
- start
;
438 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
440 return ERR_PTR(-ENOMEM
);
444 unsigned int bytes
= PAGE_SIZE
;
449 page
= alloc_page(GFP_NOIO
| __GFP_ZERO
| gfp_mask
);
454 memcpy(page_address(page
), p
, bytes
);
456 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
464 bio
->bi_end_io
= bio_copy_kern_endio_read
;
465 bio
->bi_private
= data
;
467 bio
->bi_end_io
= bio_copy_kern_endio
;
475 return ERR_PTR(-ENOMEM
);
479 * Append a bio to a passthrough request. Only works if the bio can be merged
480 * into the request based on the driver constraints.
482 int blk_rq_append_bio(struct request
*rq
, struct bio
*bio
)
484 struct bvec_iter iter
;
486 unsigned int nr_segs
= 0;
488 bio_for_each_bvec(bv
, bio
, iter
)
492 blk_rq_bio_prep(rq
, bio
, nr_segs
);
494 if (!ll_back_merge_fn(rq
, bio
, nr_segs
))
496 rq
->biotail
->bi_next
= bio
;
498 rq
->__data_len
+= (bio
)->bi_iter
.bi_size
;
499 bio_crypt_free_ctx(bio
);
504 EXPORT_SYMBOL(blk_rq_append_bio
);
507 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
508 * @q: request queue where request should be inserted
509 * @rq: request to map data to
510 * @map_data: pointer to the rq_map_data holding pages (if necessary)
511 * @iter: iovec iterator
512 * @gfp_mask: memory allocation flags
515 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
516 * a kernel bounce buffer is used.
518 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
519 * still in process context.
521 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
522 struct rq_map_data
*map_data
,
523 const struct iov_iter
*iter
, gfp_t gfp_mask
)
526 unsigned long align
= q
->dma_pad_mask
| queue_dma_alignment(q
);
527 struct bio
*bio
= NULL
;
531 if (!iter_is_iovec(iter
))
536 else if (blk_queue_may_bounce(q
))
538 else if (iov_iter_alignment(iter
) & align
)
540 else if (queue_virt_boundary(q
))
541 copy
= queue_virt_boundary(q
) & iov_iter_gap_alignment(iter
);
546 ret
= bio_copy_user_iov(rq
, map_data
, &i
, gfp_mask
);
548 ret
= bio_map_user_iov(rq
, &i
, gfp_mask
);
553 } while (iov_iter_count(&i
));
558 blk_rq_unmap_user(bio
);
563 EXPORT_SYMBOL(blk_rq_map_user_iov
);
565 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
566 struct rq_map_data
*map_data
, void __user
*ubuf
,
567 unsigned long len
, gfp_t gfp_mask
)
571 int ret
= import_single_range(rq_data_dir(rq
), ubuf
, len
, &iov
, &i
);
573 if (unlikely(ret
< 0))
576 return blk_rq_map_user_iov(q
, rq
, map_data
, &i
, gfp_mask
);
578 EXPORT_SYMBOL(blk_rq_map_user
);
581 * blk_rq_unmap_user - unmap a request with user data
582 * @bio: start of bio list
585 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
586 * supply the original rq->bio from the blk_rq_map_user() return, since
587 * the I/O completion may have changed rq->bio.
589 int blk_rq_unmap_user(struct bio
*bio
)
591 struct bio
*next_bio
;
595 if (bio
->bi_private
) {
596 ret2
= bio_uncopy_user(bio
);
600 bio_release_pages(bio
, bio_data_dir(bio
) == READ
);
610 EXPORT_SYMBOL(blk_rq_unmap_user
);
613 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
614 * @q: request queue where request should be inserted
615 * @rq: request to fill
616 * @kbuf: the kernel buffer
617 * @len: length of user data
618 * @gfp_mask: memory allocation flags
621 * Data will be mapped directly if possible. Otherwise a bounce
622 * buffer is used. Can be called multiple times to append multiple
625 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
626 unsigned int len
, gfp_t gfp_mask
)
628 int reading
= rq_data_dir(rq
) == READ
;
629 unsigned long addr
= (unsigned long) kbuf
;
633 if (len
> (queue_max_hw_sectors(q
) << 9))
638 if (!blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
) ||
639 blk_queue_may_bounce(q
))
640 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
642 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
647 bio
->bi_opf
&= ~REQ_OP_MASK
;
648 bio
->bi_opf
|= req_op(rq
);
650 ret
= blk_rq_append_bio(rq
, bio
);
655 EXPORT_SYMBOL(blk_rq_map_kern
);