]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/bio.c
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/uio.h>
23 #include <linux/iocontext.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/export.h>
28 #include <linux/mempool.h>
29 #include <linux/workqueue.h>
30 #include <linux/cgroup.h>
32 #include <trace/events/block.h>
36 * Test patch to inline a certain number of bi_io_vec's inside the bio
37 * itself, to shrink a bio data allocation from two mempool calls to one
39 #define BIO_INLINE_VECS 4
42 * if you change this list, also change bvec_alloc or things will
43 * break badly! cannot be bigger than what you can fit into an
46 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
47 static struct biovec_slab bvec_slabs
[BVEC_POOL_NR
] __read_mostly
= {
48 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES
),
53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
54 * IO code that does not need private memory pools.
56 struct bio_set
*fs_bio_set
;
57 EXPORT_SYMBOL(fs_bio_set
);
60 * Our slab pool management
63 struct kmem_cache
*slab
;
64 unsigned int slab_ref
;
65 unsigned int slab_size
;
68 static DEFINE_MUTEX(bio_slab_lock
);
69 static struct bio_slab
*bio_slabs
;
70 static unsigned int bio_slab_nr
, bio_slab_max
;
72 static struct kmem_cache
*bio_find_or_create_slab(unsigned int extra_size
)
74 unsigned int sz
= sizeof(struct bio
) + extra_size
;
75 struct kmem_cache
*slab
= NULL
;
76 struct bio_slab
*bslab
, *new_bio_slabs
;
77 unsigned int new_bio_slab_max
;
78 unsigned int i
, entry
= -1;
80 mutex_lock(&bio_slab_lock
);
83 while (i
< bio_slab_nr
) {
84 bslab
= &bio_slabs
[i
];
86 if (!bslab
->slab
&& entry
== -1)
88 else if (bslab
->slab_size
== sz
) {
99 if (bio_slab_nr
== bio_slab_max
&& entry
== -1) {
100 new_bio_slab_max
= bio_slab_max
<< 1;
101 new_bio_slabs
= krealloc(bio_slabs
,
102 new_bio_slab_max
* sizeof(struct bio_slab
),
106 bio_slab_max
= new_bio_slab_max
;
107 bio_slabs
= new_bio_slabs
;
110 entry
= bio_slab_nr
++;
112 bslab
= &bio_slabs
[entry
];
114 snprintf(bslab
->name
, sizeof(bslab
->name
), "bio-%d", entry
);
115 slab
= kmem_cache_create(bslab
->name
, sz
, ARCH_KMALLOC_MINALIGN
,
116 SLAB_HWCACHE_ALIGN
, NULL
);
122 bslab
->slab_size
= sz
;
124 mutex_unlock(&bio_slab_lock
);
128 static void bio_put_slab(struct bio_set
*bs
)
130 struct bio_slab
*bslab
= NULL
;
133 mutex_lock(&bio_slab_lock
);
135 for (i
= 0; i
< bio_slab_nr
; i
++) {
136 if (bs
->bio_slab
== bio_slabs
[i
].slab
) {
137 bslab
= &bio_slabs
[i
];
142 if (WARN(!bslab
, KERN_ERR
"bio: unable to find slab!\n"))
145 WARN_ON(!bslab
->slab_ref
);
147 if (--bslab
->slab_ref
)
150 kmem_cache_destroy(bslab
->slab
);
154 mutex_unlock(&bio_slab_lock
);
157 unsigned int bvec_nr_vecs(unsigned short idx
)
159 return bvec_slabs
[idx
].nr_vecs
;
162 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned int idx
)
168 BIO_BUG_ON(idx
>= BVEC_POOL_NR
);
170 if (idx
== BVEC_POOL_MAX
) {
171 mempool_free(bv
, pool
);
173 struct biovec_slab
*bvs
= bvec_slabs
+ idx
;
175 kmem_cache_free(bvs
->slab
, bv
);
179 struct bio_vec
*bvec_alloc(gfp_t gfp_mask
, int nr
, unsigned long *idx
,
185 * see comment near bvec_array define!
203 case 129 ... BIO_MAX_PAGES
:
211 * idx now points to the pool we want to allocate from. only the
212 * 1-vec entry pool is mempool backed.
214 if (*idx
== BVEC_POOL_MAX
) {
216 bvl
= mempool_alloc(pool
, gfp_mask
);
218 struct biovec_slab
*bvs
= bvec_slabs
+ *idx
;
219 gfp_t __gfp_mask
= gfp_mask
& ~(__GFP_DIRECT_RECLAIM
| __GFP_IO
);
222 * Make this allocation restricted and don't dump info on
223 * allocation failures, since we'll fallback to the mempool
224 * in case of failure.
226 __gfp_mask
|= __GFP_NOMEMALLOC
| __GFP_NORETRY
| __GFP_NOWARN
;
229 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
230 * is set, retry with the 1-entry mempool
232 bvl
= kmem_cache_alloc(bvs
->slab
, __gfp_mask
);
233 if (unlikely(!bvl
&& (gfp_mask
& __GFP_DIRECT_RECLAIM
))) {
234 *idx
= BVEC_POOL_MAX
;
243 static void __bio_free(struct bio
*bio
)
245 bio_disassociate_task(bio
);
247 if (bio_integrity(bio
))
248 bio_integrity_free(bio
);
251 static void bio_free(struct bio
*bio
)
253 struct bio_set
*bs
= bio
->bi_pool
;
259 bvec_free(bs
->bvec_pool
, bio
->bi_io_vec
, BVEC_POOL_IDX(bio
));
262 * If we have front padding, adjust the bio pointer before freeing
267 mempool_free(p
, bs
->bio_pool
);
269 /* Bio was allocated by bio_kmalloc() */
274 void bio_init(struct bio
*bio
, struct bio_vec
*table
,
275 unsigned short max_vecs
)
277 memset(bio
, 0, sizeof(*bio
));
278 atomic_set(&bio
->__bi_remaining
, 1);
279 atomic_set(&bio
->__bi_cnt
, 1);
281 bio
->bi_io_vec
= table
;
282 bio
->bi_max_vecs
= max_vecs
;
284 EXPORT_SYMBOL(bio_init
);
287 * bio_reset - reinitialize a bio
291 * After calling bio_reset(), @bio will be in the same state as a freshly
292 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
293 * preserved are the ones that are initialized by bio_alloc_bioset(). See
294 * comment in struct bio.
296 void bio_reset(struct bio
*bio
)
298 unsigned long flags
= bio
->bi_flags
& (~0UL << BIO_RESET_BITS
);
302 memset(bio
, 0, BIO_RESET_BYTES
);
303 bio
->bi_flags
= flags
;
304 atomic_set(&bio
->__bi_remaining
, 1);
306 EXPORT_SYMBOL(bio_reset
);
308 static struct bio
*__bio_chain_endio(struct bio
*bio
)
310 struct bio
*parent
= bio
->bi_private
;
312 if (!parent
->bi_error
)
313 parent
->bi_error
= bio
->bi_error
;
318 static void bio_chain_endio(struct bio
*bio
)
320 bio_endio(__bio_chain_endio(bio
));
324 * bio_chain - chain bio completions
325 * @bio: the target bio
326 * @parent: the @bio's parent bio
328 * The caller won't have a bi_end_io called when @bio completes - instead,
329 * @parent's bi_end_io won't be called until both @parent and @bio have
330 * completed; the chained bio will also be freed when it completes.
332 * The caller must not set bi_private or bi_end_io in @bio.
334 void bio_chain(struct bio
*bio
, struct bio
*parent
)
336 BUG_ON(bio
->bi_private
|| bio
->bi_end_io
);
338 bio
->bi_private
= parent
;
339 bio
->bi_end_io
= bio_chain_endio
;
340 bio_inc_remaining(parent
);
342 EXPORT_SYMBOL(bio_chain
);
344 static void bio_alloc_rescue(struct work_struct
*work
)
346 struct bio_set
*bs
= container_of(work
, struct bio_set
, rescue_work
);
350 spin_lock(&bs
->rescue_lock
);
351 bio
= bio_list_pop(&bs
->rescue_list
);
352 spin_unlock(&bs
->rescue_lock
);
357 generic_make_request(bio
);
361 static void punt_bios_to_rescuer(struct bio_set
*bs
)
363 struct bio_list punt
, nopunt
;
367 * In order to guarantee forward progress we must punt only bios that
368 * were allocated from this bio_set; otherwise, if there was a bio on
369 * there for a stacking driver higher up in the stack, processing it
370 * could require allocating bios from this bio_set, and doing that from
371 * our own rescuer would be bad.
373 * Since bio lists are singly linked, pop them all instead of trying to
374 * remove from the middle of the list:
377 bio_list_init(&punt
);
378 bio_list_init(&nopunt
);
380 while ((bio
= bio_list_pop(¤t
->bio_list
[0])))
381 bio_list_add(bio
->bi_pool
== bs
? &punt
: &nopunt
, bio
);
382 current
->bio_list
[0] = nopunt
;
384 bio_list_init(&nopunt
);
385 while ((bio
= bio_list_pop(¤t
->bio_list
[1])))
386 bio_list_add(bio
->bi_pool
== bs
? &punt
: &nopunt
, bio
);
387 current
->bio_list
[1] = nopunt
;
389 spin_lock(&bs
->rescue_lock
);
390 bio_list_merge(&bs
->rescue_list
, &punt
);
391 spin_unlock(&bs
->rescue_lock
);
393 queue_work(bs
->rescue_workqueue
, &bs
->rescue_work
);
397 * bio_alloc_bioset - allocate a bio for I/O
398 * @gfp_mask: the GFP_ mask given to the slab allocator
399 * @nr_iovecs: number of iovecs to pre-allocate
400 * @bs: the bio_set to allocate from.
403 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
404 * backed by the @bs's mempool.
406 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
407 * always be able to allocate a bio. This is due to the mempool guarantees.
408 * To make this work, callers must never allocate more than 1 bio at a time
409 * from this pool. Callers that need to allocate more than 1 bio must always
410 * submit the previously allocated bio for IO before attempting to allocate
411 * a new one. Failure to do so can cause deadlocks under memory pressure.
413 * Note that when running under generic_make_request() (i.e. any block
414 * driver), bios are not submitted until after you return - see the code in
415 * generic_make_request() that converts recursion into iteration, to prevent
418 * This would normally mean allocating multiple bios under
419 * generic_make_request() would be susceptible to deadlocks, but we have
420 * deadlock avoidance code that resubmits any blocked bios from a rescuer
423 * However, we do not guarantee forward progress for allocations from other
424 * mempools. Doing multiple allocations from the same mempool under
425 * generic_make_request() should be avoided - instead, use bio_set's front_pad
426 * for per bio allocations.
429 * Pointer to new bio on success, NULL on failure.
431 struct bio
*bio_alloc_bioset(gfp_t gfp_mask
, unsigned int nr_iovecs
,
434 gfp_t saved_gfp
= gfp_mask
;
436 unsigned inline_vecs
;
437 struct bio_vec
*bvl
= NULL
;
442 if (nr_iovecs
> UIO_MAXIOV
)
445 p
= kmalloc(sizeof(struct bio
) +
446 nr_iovecs
* sizeof(struct bio_vec
),
449 inline_vecs
= nr_iovecs
;
451 /* should not use nobvec bioset for nr_iovecs > 0 */
452 if (WARN_ON_ONCE(!bs
->bvec_pool
&& nr_iovecs
> 0))
455 * generic_make_request() converts recursion to iteration; this
456 * means if we're running beneath it, any bios we allocate and
457 * submit will not be submitted (and thus freed) until after we
460 * This exposes us to a potential deadlock if we allocate
461 * multiple bios from the same bio_set() while running
462 * underneath generic_make_request(). If we were to allocate
463 * multiple bios (say a stacking block driver that was splitting
464 * bios), we would deadlock if we exhausted the mempool's
467 * We solve this, and guarantee forward progress, with a rescuer
468 * workqueue per bio_set. If we go to allocate and there are
469 * bios on current->bio_list, we first try the allocation
470 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
471 * bios we would be blocking to the rescuer workqueue before
472 * we retry with the original gfp_flags.
475 if (current
->bio_list
&&
476 (!bio_list_empty(¤t
->bio_list
[0]) ||
477 !bio_list_empty(¤t
->bio_list
[1])))
478 gfp_mask
&= ~__GFP_DIRECT_RECLAIM
;
480 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
481 if (!p
&& gfp_mask
!= saved_gfp
) {
482 punt_bios_to_rescuer(bs
);
483 gfp_mask
= saved_gfp
;
484 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
487 front_pad
= bs
->front_pad
;
488 inline_vecs
= BIO_INLINE_VECS
;
495 bio_init(bio
, NULL
, 0);
497 if (nr_iovecs
> inline_vecs
) {
498 unsigned long idx
= 0;
500 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
501 if (!bvl
&& gfp_mask
!= saved_gfp
) {
502 punt_bios_to_rescuer(bs
);
503 gfp_mask
= saved_gfp
;
504 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
510 bio
->bi_flags
|= idx
<< BVEC_POOL_OFFSET
;
511 } else if (nr_iovecs
) {
512 bvl
= bio
->bi_inline_vecs
;
516 bio
->bi_max_vecs
= nr_iovecs
;
517 bio
->bi_io_vec
= bvl
;
521 mempool_free(p
, bs
->bio_pool
);
524 EXPORT_SYMBOL(bio_alloc_bioset
);
526 void zero_fill_bio(struct bio
*bio
)
530 struct bvec_iter iter
;
532 bio_for_each_segment(bv
, bio
, iter
) {
533 char *data
= bvec_kmap_irq(&bv
, &flags
);
534 memset(data
, 0, bv
.bv_len
);
535 flush_dcache_page(bv
.bv_page
);
536 bvec_kunmap_irq(data
, &flags
);
539 EXPORT_SYMBOL(zero_fill_bio
);
542 * bio_put - release a reference to a bio
543 * @bio: bio to release reference to
546 * Put a reference to a &struct bio, either one you have gotten with
547 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
549 void bio_put(struct bio
*bio
)
551 if (!bio_flagged(bio
, BIO_REFFED
))
554 BIO_BUG_ON(!atomic_read(&bio
->__bi_cnt
));
559 if (atomic_dec_and_test(&bio
->__bi_cnt
))
563 EXPORT_SYMBOL(bio_put
);
565 inline int bio_phys_segments(struct request_queue
*q
, struct bio
*bio
)
567 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
568 blk_recount_segments(q
, bio
);
570 return bio
->bi_phys_segments
;
572 EXPORT_SYMBOL(bio_phys_segments
);
575 * __bio_clone_fast - clone a bio that shares the original bio's biovec
576 * @bio: destination bio
577 * @bio_src: bio to clone
579 * Clone a &bio. Caller will own the returned bio, but not
580 * the actual data it points to. Reference count of returned
583 * Caller must ensure that @bio_src is not freed before @bio.
585 void __bio_clone_fast(struct bio
*bio
, struct bio
*bio_src
)
587 BUG_ON(bio
->bi_pool
&& BVEC_POOL_IDX(bio
));
590 * most users will be overriding ->bi_bdev with a new target,
591 * so we don't set nor calculate new physical/hw segment counts here
593 bio
->bi_bdev
= bio_src
->bi_bdev
;
594 bio_set_flag(bio
, BIO_CLONED
);
595 bio
->bi_opf
= bio_src
->bi_opf
;
596 bio
->bi_iter
= bio_src
->bi_iter
;
597 bio
->bi_io_vec
= bio_src
->bi_io_vec
;
599 bio_clone_blkcg_association(bio
, bio_src
);
601 EXPORT_SYMBOL(__bio_clone_fast
);
604 * bio_clone_fast - clone a bio that shares the original bio's biovec
606 * @gfp_mask: allocation priority
607 * @bs: bio_set to allocate from
609 * Like __bio_clone_fast, only also allocates the returned bio
611 struct bio
*bio_clone_fast(struct bio
*bio
, gfp_t gfp_mask
, struct bio_set
*bs
)
615 b
= bio_alloc_bioset(gfp_mask
, 0, bs
);
619 __bio_clone_fast(b
, bio
);
621 if (bio_integrity(bio
)) {
624 ret
= bio_integrity_clone(b
, bio
, gfp_mask
);
634 EXPORT_SYMBOL(bio_clone_fast
);
636 static struct bio
*__bio_clone_bioset(struct bio
*bio_src
, gfp_t gfp_mask
,
637 struct bio_set
*bs
, int offset
,
640 struct bvec_iter iter
;
643 struct bvec_iter iter_src
= bio_src
->bi_iter
;
645 /* for supporting partial clone */
646 if (offset
|| size
!= bio_src
->bi_iter
.bi_size
) {
647 bio_advance_iter(bio_src
, &iter_src
, offset
);
648 iter_src
.bi_size
= size
;
652 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
653 * bio_src->bi_io_vec to bio->bi_io_vec.
655 * We can't do that anymore, because:
657 * - The point of cloning the biovec is to produce a bio with a biovec
658 * the caller can modify: bi_idx and bi_bvec_done should be 0.
660 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
661 * we tried to clone the whole thing bio_alloc_bioset() would fail.
662 * But the clone should succeed as long as the number of biovecs we
663 * actually need to allocate is fewer than BIO_MAX_PAGES.
665 * - Lastly, bi_vcnt should not be looked at or relied upon by code
666 * that does not own the bio - reason being drivers don't use it for
667 * iterating over the biovec anymore, so expecting it to be kept up
668 * to date (i.e. for clones that share the parent biovec) is just
669 * asking for trouble and would force extra work on
670 * __bio_clone_fast() anyways.
673 bio
= bio_alloc_bioset(gfp_mask
, __bio_segments(bio_src
,
677 bio
->bi_bdev
= bio_src
->bi_bdev
;
678 bio
->bi_opf
= bio_src
->bi_opf
;
679 bio
->bi_iter
.bi_sector
= bio_src
->bi_iter
.bi_sector
;
680 bio
->bi_iter
.bi_size
= bio_src
->bi_iter
.bi_size
;
682 switch (bio_op(bio
)) {
684 case REQ_OP_SECURE_ERASE
:
685 case REQ_OP_WRITE_ZEROES
:
687 case REQ_OP_WRITE_SAME
:
688 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bio_src
->bi_io_vec
[0];
691 __bio_for_each_segment(bv
, bio_src
, iter
, iter_src
)
692 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bv
;
696 if (bio_integrity(bio_src
)) {
699 ret
= bio_integrity_clone(bio
, bio_src
, gfp_mask
);
706 bio_clone_blkcg_association(bio
, bio_src
);
712 * bio_clone_bioset - clone a bio
713 * @bio_src: bio to clone
714 * @gfp_mask: allocation priority
715 * @bs: bio_set to allocate from
717 * Clone bio. Caller will own the returned bio, but not the actual data it
718 * points to. Reference count of returned bio will be one.
720 struct bio
*bio_clone_bioset(struct bio
*bio_src
, gfp_t gfp_mask
,
723 return __bio_clone_bioset(bio_src
, gfp_mask
, bs
, 0,
724 bio_src
->bi_iter
.bi_size
);
726 EXPORT_SYMBOL(bio_clone_bioset
);
729 * bio_clone_bioset_partial - clone a partial bio
730 * @bio_src: bio to clone
731 * @gfp_mask: allocation priority
732 * @bs: bio_set to allocate from
733 * @offset: cloned starting from the offset
734 * @size: size for the cloned bio
736 * Clone bio. Caller will own the returned bio, but not the actual data it
737 * points to. Reference count of returned bio will be one.
739 struct bio
*bio_clone_bioset_partial(struct bio
*bio_src
, gfp_t gfp_mask
,
740 struct bio_set
*bs
, int offset
,
743 return __bio_clone_bioset(bio_src
, gfp_mask
, bs
, offset
, size
);
745 EXPORT_SYMBOL(bio_clone_bioset_partial
);
748 * bio_add_pc_page - attempt to add page to bio
749 * @q: the target queue
750 * @bio: destination bio
752 * @len: vec entry length
753 * @offset: vec entry offset
755 * Attempt to add a page to the bio_vec maplist. This can fail for a
756 * number of reasons, such as the bio being full or target block device
757 * limitations. The target block device must allow bio's up to PAGE_SIZE,
758 * so it is always possible to add a single page to an empty bio.
760 * This should only be used by REQ_PC bios.
762 int bio_add_pc_page(struct request_queue
*q
, struct bio
*bio
, struct page
763 *page
, unsigned int len
, unsigned int offset
)
765 int retried_segments
= 0;
766 struct bio_vec
*bvec
;
769 * cloned bio must not modify vec list
771 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
774 if (((bio
->bi_iter
.bi_size
+ len
) >> 9) > queue_max_hw_sectors(q
))
778 * For filesystems with a blocksize smaller than the pagesize
779 * we will often be called with the same page as last time and
780 * a consecutive offset. Optimize this special case.
782 if (bio
->bi_vcnt
> 0) {
783 struct bio_vec
*prev
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
785 if (page
== prev
->bv_page
&&
786 offset
== prev
->bv_offset
+ prev
->bv_len
) {
788 bio
->bi_iter
.bi_size
+= len
;
793 * If the queue doesn't support SG gaps and adding this
794 * offset would create a gap, disallow it.
796 if (bvec_gap_to_prev(q
, prev
, offset
))
800 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
804 * setup the new entry, we might clear it again later if we
805 * cannot add the page
807 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
808 bvec
->bv_page
= page
;
810 bvec
->bv_offset
= offset
;
812 bio
->bi_phys_segments
++;
813 bio
->bi_iter
.bi_size
+= len
;
816 * Perform a recount if the number of segments is greater
817 * than queue_max_segments(q).
820 while (bio
->bi_phys_segments
> queue_max_segments(q
)) {
822 if (retried_segments
)
825 retried_segments
= 1;
826 blk_recount_segments(q
, bio
);
829 /* If we may be able to merge these biovecs, force a recount */
830 if (bio
->bi_vcnt
> 1 && (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
)))
831 bio_clear_flag(bio
, BIO_SEG_VALID
);
837 bvec
->bv_page
= NULL
;
841 bio
->bi_iter
.bi_size
-= len
;
842 blk_recount_segments(q
, bio
);
845 EXPORT_SYMBOL(bio_add_pc_page
);
848 * bio_add_page - attempt to add page to bio
849 * @bio: destination bio
851 * @len: vec entry length
852 * @offset: vec entry offset
854 * Attempt to add a page to the bio_vec maplist. This will only fail
855 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
857 int bio_add_page(struct bio
*bio
, struct page
*page
,
858 unsigned int len
, unsigned int offset
)
863 * cloned bio must not modify vec list
865 if (WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
)))
869 * For filesystems with a blocksize smaller than the pagesize
870 * we will often be called with the same page as last time and
871 * a consecutive offset. Optimize this special case.
873 if (bio
->bi_vcnt
> 0) {
874 bv
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
876 if (page
== bv
->bv_page
&&
877 offset
== bv
->bv_offset
+ bv
->bv_len
) {
883 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
886 bv
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
889 bv
->bv_offset
= offset
;
893 bio
->bi_iter
.bi_size
+= len
;
896 EXPORT_SYMBOL(bio_add_page
);
899 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
900 * @bio: bio to add pages to
901 * @iter: iov iterator describing the region to be mapped
903 * Pins as many pages from *iter and appends them to @bio's bvec array. The
904 * pages will have to be released using put_page() when done.
906 int bio_iov_iter_get_pages(struct bio
*bio
, struct iov_iter
*iter
)
908 unsigned short nr_pages
= bio
->bi_max_vecs
- bio
->bi_vcnt
;
909 struct bio_vec
*bv
= bio
->bi_io_vec
+ bio
->bi_vcnt
;
910 struct page
**pages
= (struct page
**)bv
;
914 size
= iov_iter_get_pages(iter
, pages
, LONG_MAX
, nr_pages
, &offset
);
915 if (unlikely(size
<= 0))
916 return size
? size
: -EFAULT
;
917 nr_pages
= (size
+ offset
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
920 * Deep magic below: We need to walk the pinned pages backwards
921 * because we are abusing the space allocated for the bio_vecs
922 * for the page array. Because the bio_vecs are larger than the
923 * page pointers by definition this will always work. But it also
924 * means we can't use bio_add_page, so any changes to it's semantics
925 * need to be reflected here as well.
927 bio
->bi_iter
.bi_size
+= size
;
928 bio
->bi_vcnt
+= nr_pages
;
930 diff
= (nr_pages
* PAGE_SIZE
- offset
) - size
;
932 bv
[nr_pages
].bv_page
= pages
[nr_pages
];
933 bv
[nr_pages
].bv_len
= PAGE_SIZE
;
934 bv
[nr_pages
].bv_offset
= 0;
937 bv
[0].bv_offset
+= offset
;
938 bv
[0].bv_len
-= offset
;
940 bv
[bio
->bi_vcnt
- 1].bv_len
-= diff
;
942 iov_iter_advance(iter
, size
);
945 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages
);
947 struct submit_bio_ret
{
948 struct completion event
;
952 static void submit_bio_wait_endio(struct bio
*bio
)
954 struct submit_bio_ret
*ret
= bio
->bi_private
;
956 ret
->error
= bio
->bi_error
;
957 complete(&ret
->event
);
961 * submit_bio_wait - submit a bio, and wait until it completes
962 * @bio: The &struct bio which describes the I/O
964 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
965 * bio_endio() on failure.
967 int submit_bio_wait(struct bio
*bio
)
969 struct submit_bio_ret ret
;
971 init_completion(&ret
.event
);
972 bio
->bi_private
= &ret
;
973 bio
->bi_end_io
= submit_bio_wait_endio
;
974 bio
->bi_opf
|= REQ_SYNC
;
976 wait_for_completion_io(&ret
.event
);
980 EXPORT_SYMBOL(submit_bio_wait
);
983 * bio_advance - increment/complete a bio by some number of bytes
984 * @bio: bio to advance
985 * @bytes: number of bytes to complete
987 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
988 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
989 * be updated on the last bvec as well.
991 * @bio will then represent the remaining, uncompleted portion of the io.
993 void bio_advance(struct bio
*bio
, unsigned bytes
)
995 if (bio_integrity(bio
))
996 bio_integrity_advance(bio
, bytes
);
998 bio_advance_iter(bio
, &bio
->bi_iter
, bytes
);
1000 EXPORT_SYMBOL(bio_advance
);
1003 * bio_alloc_pages - allocates a single page for each bvec in a bio
1004 * @bio: bio to allocate pages for
1005 * @gfp_mask: flags for allocation
1007 * Allocates pages up to @bio->bi_vcnt.
1009 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
1012 int bio_alloc_pages(struct bio
*bio
, gfp_t gfp_mask
)
1017 bio_for_each_segment_all(bv
, bio
, i
) {
1018 bv
->bv_page
= alloc_page(gfp_mask
);
1020 while (--bv
>= bio
->bi_io_vec
)
1021 __free_page(bv
->bv_page
);
1028 EXPORT_SYMBOL(bio_alloc_pages
);
1031 * bio_copy_data - copy contents of data buffers from one chain of bios to
1033 * @src: source bio list
1034 * @dst: destination bio list
1036 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
1037 * @src and @dst as linked lists of bios.
1039 * Stops when it reaches the end of either @src or @dst - that is, copies
1040 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1042 void bio_copy_data(struct bio
*dst
, struct bio
*src
)
1044 struct bvec_iter src_iter
, dst_iter
;
1045 struct bio_vec src_bv
, dst_bv
;
1046 void *src_p
, *dst_p
;
1049 src_iter
= src
->bi_iter
;
1050 dst_iter
= dst
->bi_iter
;
1053 if (!src_iter
.bi_size
) {
1058 src_iter
= src
->bi_iter
;
1061 if (!dst_iter
.bi_size
) {
1066 dst_iter
= dst
->bi_iter
;
1069 src_bv
= bio_iter_iovec(src
, src_iter
);
1070 dst_bv
= bio_iter_iovec(dst
, dst_iter
);
1072 bytes
= min(src_bv
.bv_len
, dst_bv
.bv_len
);
1074 src_p
= kmap_atomic(src_bv
.bv_page
);
1075 dst_p
= kmap_atomic(dst_bv
.bv_page
);
1077 memcpy(dst_p
+ dst_bv
.bv_offset
,
1078 src_p
+ src_bv
.bv_offset
,
1081 kunmap_atomic(dst_p
);
1082 kunmap_atomic(src_p
);
1084 bio_advance_iter(src
, &src_iter
, bytes
);
1085 bio_advance_iter(dst
, &dst_iter
, bytes
);
1088 EXPORT_SYMBOL(bio_copy_data
);
1090 struct bio_map_data
{
1092 struct iov_iter iter
;
1096 static struct bio_map_data
*bio_alloc_map_data(unsigned int iov_count
,
1099 if (iov_count
> UIO_MAXIOV
)
1102 return kmalloc(sizeof(struct bio_map_data
) +
1103 sizeof(struct iovec
) * iov_count
, gfp_mask
);
1107 * bio_copy_from_iter - copy all pages from iov_iter to bio
1108 * @bio: The &struct bio which describes the I/O as destination
1109 * @iter: iov_iter as source
1111 * Copy all pages from iov_iter to bio.
1112 * Returns 0 on success, or error on failure.
1114 static int bio_copy_from_iter(struct bio
*bio
, struct iov_iter iter
)
1117 struct bio_vec
*bvec
;
1119 bio_for_each_segment_all(bvec
, bio
, i
) {
1122 ret
= copy_page_from_iter(bvec
->bv_page
,
1127 if (!iov_iter_count(&iter
))
1130 if (ret
< bvec
->bv_len
)
1138 * bio_copy_to_iter - copy all pages from bio to iov_iter
1139 * @bio: The &struct bio which describes the I/O as source
1140 * @iter: iov_iter as destination
1142 * Copy all pages from bio to iov_iter.
1143 * Returns 0 on success, or error on failure.
1145 static int bio_copy_to_iter(struct bio
*bio
, struct iov_iter iter
)
1148 struct bio_vec
*bvec
;
1150 bio_for_each_segment_all(bvec
, bio
, i
) {
1153 ret
= copy_page_to_iter(bvec
->bv_page
,
1158 if (!iov_iter_count(&iter
))
1161 if (ret
< bvec
->bv_len
)
1168 void bio_free_pages(struct bio
*bio
)
1170 struct bio_vec
*bvec
;
1173 bio_for_each_segment_all(bvec
, bio
, i
)
1174 __free_page(bvec
->bv_page
);
1176 EXPORT_SYMBOL(bio_free_pages
);
1179 * bio_uncopy_user - finish previously mapped bio
1180 * @bio: bio being terminated
1182 * Free pages allocated from bio_copy_user_iov() and write back data
1183 * to user space in case of a read.
1185 int bio_uncopy_user(struct bio
*bio
)
1187 struct bio_map_data
*bmd
= bio
->bi_private
;
1190 if (!bio_flagged(bio
, BIO_NULL_MAPPED
)) {
1192 * if we're in a workqueue, the request is orphaned, so
1193 * don't copy into a random user address space, just free
1194 * and return -EINTR so user space doesn't expect any data.
1198 else if (bio_data_dir(bio
) == READ
)
1199 ret
= bio_copy_to_iter(bio
, bmd
->iter
);
1200 if (bmd
->is_our_pages
)
1201 bio_free_pages(bio
);
1209 * bio_copy_user_iov - copy user data to bio
1210 * @q: destination block queue
1211 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1212 * @iter: iovec iterator
1213 * @gfp_mask: memory allocation flags
1215 * Prepares and returns a bio for indirect user io, bouncing data
1216 * to/from kernel pages as necessary. Must be paired with
1217 * call bio_uncopy_user() on io completion.
1219 struct bio
*bio_copy_user_iov(struct request_queue
*q
,
1220 struct rq_map_data
*map_data
,
1221 const struct iov_iter
*iter
,
1224 struct bio_map_data
*bmd
;
1229 unsigned int len
= iter
->count
;
1230 unsigned int offset
= map_data
? offset_in_page(map_data
->offset
) : 0;
1232 for (i
= 0; i
< iter
->nr_segs
; i
++) {
1233 unsigned long uaddr
;
1235 unsigned long start
;
1237 uaddr
= (unsigned long) iter
->iov
[i
].iov_base
;
1238 end
= (uaddr
+ iter
->iov
[i
].iov_len
+ PAGE_SIZE
- 1)
1240 start
= uaddr
>> PAGE_SHIFT
;
1246 return ERR_PTR(-EINVAL
);
1248 nr_pages
+= end
- start
;
1254 bmd
= bio_alloc_map_data(iter
->nr_segs
, gfp_mask
);
1256 return ERR_PTR(-ENOMEM
);
1259 * We need to do a deep copy of the iov_iter including the iovecs.
1260 * The caller provided iov might point to an on-stack or otherwise
1263 bmd
->is_our_pages
= map_data
? 0 : 1;
1264 memcpy(bmd
->iov
, iter
->iov
, sizeof(struct iovec
) * iter
->nr_segs
);
1265 iov_iter_init(&bmd
->iter
, iter
->type
, bmd
->iov
,
1266 iter
->nr_segs
, iter
->count
);
1269 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1276 nr_pages
= 1 << map_data
->page_order
;
1277 i
= map_data
->offset
/ PAGE_SIZE
;
1280 unsigned int bytes
= PAGE_SIZE
;
1288 if (i
== map_data
->nr_entries
* nr_pages
) {
1293 page
= map_data
->pages
[i
/ nr_pages
];
1294 page
+= (i
% nr_pages
);
1298 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
1305 if (bio_add_pc_page(q
, bio
, page
, bytes
, offset
) < bytes
)
1318 if (((iter
->type
& WRITE
) && (!map_data
|| !map_data
->null_mapped
)) ||
1319 (map_data
&& map_data
->from_user
)) {
1320 ret
= bio_copy_from_iter(bio
, *iter
);
1325 bio
->bi_private
= bmd
;
1329 bio_free_pages(bio
);
1333 return ERR_PTR(ret
);
1337 * bio_map_user_iov - map user iovec into bio
1338 * @q: the struct request_queue for the bio
1339 * @iter: iovec iterator
1340 * @gfp_mask: memory allocation flags
1342 * Map the user space address into a bio suitable for io to a block
1343 * device. Returns an error pointer in case of error.
1345 struct bio
*bio_map_user_iov(struct request_queue
*q
,
1346 const struct iov_iter
*iter
,
1351 struct page
**pages
;
1358 iov_for_each(iov
, i
, *iter
) {
1359 unsigned long uaddr
= (unsigned long) iov
.iov_base
;
1360 unsigned long len
= iov
.iov_len
;
1361 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1362 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1368 return ERR_PTR(-EINVAL
);
1370 nr_pages
+= end
- start
;
1372 * buffer must be aligned to at least logical block size for now
1374 if (uaddr
& queue_dma_alignment(q
))
1375 return ERR_PTR(-EINVAL
);
1379 return ERR_PTR(-EINVAL
);
1381 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1383 return ERR_PTR(-ENOMEM
);
1386 pages
= kcalloc(nr_pages
, sizeof(struct page
*), gfp_mask
);
1390 iov_for_each(iov
, i
, *iter
) {
1391 unsigned long uaddr
= (unsigned long) iov
.iov_base
;
1392 unsigned long len
= iov
.iov_len
;
1393 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1394 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1395 const int local_nr_pages
= end
- start
;
1396 const int page_limit
= cur_page
+ local_nr_pages
;
1398 ret
= get_user_pages_fast(uaddr
, local_nr_pages
,
1399 (iter
->type
& WRITE
) != WRITE
,
1401 if (ret
< local_nr_pages
) {
1406 offset
= offset_in_page(uaddr
);
1407 for (j
= cur_page
; j
< page_limit
; j
++) {
1408 unsigned int bytes
= PAGE_SIZE
- offset
;
1419 if (bio_add_pc_page(q
, bio
, pages
[j
], bytes
, offset
) <
1429 * release the pages we didn't map into the bio, if any
1431 while (j
< page_limit
)
1432 put_page(pages
[j
++]);
1437 bio_set_flag(bio
, BIO_USER_MAPPED
);
1440 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
1441 * it would normally disappear when its bi_end_io is run.
1442 * however, we need it for the unmap, so grab an extra
1449 for (j
= 0; j
< nr_pages
; j
++) {
1457 return ERR_PTR(ret
);
1460 static void __bio_unmap_user(struct bio
*bio
)
1462 struct bio_vec
*bvec
;
1466 * make sure we dirty pages we wrote to
1468 bio_for_each_segment_all(bvec
, bio
, i
) {
1469 if (bio_data_dir(bio
) == READ
)
1470 set_page_dirty_lock(bvec
->bv_page
);
1472 put_page(bvec
->bv_page
);
1479 * bio_unmap_user - unmap a bio
1480 * @bio: the bio being unmapped
1482 * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
1485 * bio_unmap_user() may sleep.
1487 void bio_unmap_user(struct bio
*bio
)
1489 __bio_unmap_user(bio
);
1493 static void bio_map_kern_endio(struct bio
*bio
)
1499 * bio_map_kern - map kernel address into bio
1500 * @q: the struct request_queue for the bio
1501 * @data: pointer to buffer to map
1502 * @len: length in bytes
1503 * @gfp_mask: allocation flags for bio allocation
1505 * Map the kernel address into a bio suitable for io to a block
1506 * device. Returns an error pointer in case of error.
1508 struct bio
*bio_map_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1511 unsigned long kaddr
= (unsigned long)data
;
1512 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1513 unsigned long start
= kaddr
>> PAGE_SHIFT
;
1514 const int nr_pages
= end
- start
;
1518 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1520 return ERR_PTR(-ENOMEM
);
1522 offset
= offset_in_page(kaddr
);
1523 for (i
= 0; i
< nr_pages
; i
++) {
1524 unsigned int bytes
= PAGE_SIZE
- offset
;
1532 if (bio_add_pc_page(q
, bio
, virt_to_page(data
), bytes
,
1534 /* we don't support partial mappings */
1536 return ERR_PTR(-EINVAL
);
1544 bio
->bi_end_io
= bio_map_kern_endio
;
1547 EXPORT_SYMBOL(bio_map_kern
);
1549 static void bio_copy_kern_endio(struct bio
*bio
)
1551 bio_free_pages(bio
);
1555 static void bio_copy_kern_endio_read(struct bio
*bio
)
1557 char *p
= bio
->bi_private
;
1558 struct bio_vec
*bvec
;
1561 bio_for_each_segment_all(bvec
, bio
, i
) {
1562 memcpy(p
, page_address(bvec
->bv_page
), bvec
->bv_len
);
1566 bio_copy_kern_endio(bio
);
1570 * bio_copy_kern - copy kernel address into bio
1571 * @q: the struct request_queue for the bio
1572 * @data: pointer to buffer to copy
1573 * @len: length in bytes
1574 * @gfp_mask: allocation flags for bio and page allocation
1575 * @reading: data direction is READ
1577 * copy the kernel address into a bio suitable for io to a block
1578 * device. Returns an error pointer in case of error.
1580 struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1581 gfp_t gfp_mask
, int reading
)
1583 unsigned long kaddr
= (unsigned long)data
;
1584 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1585 unsigned long start
= kaddr
>> PAGE_SHIFT
;
1594 return ERR_PTR(-EINVAL
);
1596 nr_pages
= end
- start
;
1597 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1599 return ERR_PTR(-ENOMEM
);
1603 unsigned int bytes
= PAGE_SIZE
;
1608 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
1613 memcpy(page_address(page
), p
, bytes
);
1615 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
1623 bio
->bi_end_io
= bio_copy_kern_endio_read
;
1624 bio
->bi_private
= data
;
1626 bio
->bi_end_io
= bio_copy_kern_endio
;
1632 bio_free_pages(bio
);
1634 return ERR_PTR(-ENOMEM
);
1638 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1639 * for performing direct-IO in BIOs.
1641 * The problem is that we cannot run set_page_dirty() from interrupt context
1642 * because the required locks are not interrupt-safe. So what we can do is to
1643 * mark the pages dirty _before_ performing IO. And in interrupt context,
1644 * check that the pages are still dirty. If so, fine. If not, redirty them
1645 * in process context.
1647 * We special-case compound pages here: normally this means reads into hugetlb
1648 * pages. The logic in here doesn't really work right for compound pages
1649 * because the VM does not uniformly chase down the head page in all cases.
1650 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1651 * handle them at all. So we skip compound pages here at an early stage.
1653 * Note that this code is very hard to test under normal circumstances because
1654 * direct-io pins the pages with get_user_pages(). This makes
1655 * is_page_cache_freeable return false, and the VM will not clean the pages.
1656 * But other code (eg, flusher threads) could clean the pages if they are mapped
1659 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1660 * deferred bio dirtying paths.
1664 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1666 void bio_set_pages_dirty(struct bio
*bio
)
1668 struct bio_vec
*bvec
;
1671 bio_for_each_segment_all(bvec
, bio
, i
) {
1672 struct page
*page
= bvec
->bv_page
;
1674 if (page
&& !PageCompound(page
))
1675 set_page_dirty_lock(page
);
1679 static void bio_release_pages(struct bio
*bio
)
1681 struct bio_vec
*bvec
;
1684 bio_for_each_segment_all(bvec
, bio
, i
) {
1685 struct page
*page
= bvec
->bv_page
;
1693 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1694 * If they are, then fine. If, however, some pages are clean then they must
1695 * have been written out during the direct-IO read. So we take another ref on
1696 * the BIO and the offending pages and re-dirty the pages in process context.
1698 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1699 * here on. It will run one put_page() against each page and will run one
1700 * bio_put() against the BIO.
1703 static void bio_dirty_fn(struct work_struct
*work
);
1705 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
);
1706 static DEFINE_SPINLOCK(bio_dirty_lock
);
1707 static struct bio
*bio_dirty_list
;
1710 * This runs in process context
1712 static void bio_dirty_fn(struct work_struct
*work
)
1714 unsigned long flags
;
1717 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1718 bio
= bio_dirty_list
;
1719 bio_dirty_list
= NULL
;
1720 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1723 struct bio
*next
= bio
->bi_private
;
1725 bio_set_pages_dirty(bio
);
1726 bio_release_pages(bio
);
1732 void bio_check_pages_dirty(struct bio
*bio
)
1734 struct bio_vec
*bvec
;
1735 int nr_clean_pages
= 0;
1738 bio_for_each_segment_all(bvec
, bio
, i
) {
1739 struct page
*page
= bvec
->bv_page
;
1741 if (PageDirty(page
) || PageCompound(page
)) {
1743 bvec
->bv_page
= NULL
;
1749 if (nr_clean_pages
) {
1750 unsigned long flags
;
1752 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1753 bio
->bi_private
= bio_dirty_list
;
1754 bio_dirty_list
= bio
;
1755 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1756 schedule_work(&bio_dirty_work
);
1762 void generic_start_io_acct(int rw
, unsigned long sectors
,
1763 struct hd_struct
*part
)
1765 int cpu
= part_stat_lock();
1767 part_round_stats(cpu
, part
);
1768 part_stat_inc(cpu
, part
, ios
[rw
]);
1769 part_stat_add(cpu
, part
, sectors
[rw
], sectors
);
1770 part_inc_in_flight(part
, rw
);
1774 EXPORT_SYMBOL(generic_start_io_acct
);
1776 void generic_end_io_acct(int rw
, struct hd_struct
*part
,
1777 unsigned long start_time
)
1779 unsigned long duration
= jiffies
- start_time
;
1780 int cpu
= part_stat_lock();
1782 part_stat_add(cpu
, part
, ticks
[rw
], duration
);
1783 part_round_stats(cpu
, part
);
1784 part_dec_in_flight(part
, rw
);
1788 EXPORT_SYMBOL(generic_end_io_acct
);
1790 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1791 void bio_flush_dcache_pages(struct bio
*bi
)
1793 struct bio_vec bvec
;
1794 struct bvec_iter iter
;
1796 bio_for_each_segment(bvec
, bi
, iter
)
1797 flush_dcache_page(bvec
.bv_page
);
1799 EXPORT_SYMBOL(bio_flush_dcache_pages
);
1802 static inline bool bio_remaining_done(struct bio
*bio
)
1805 * If we're not chaining, then ->__bi_remaining is always 1 and
1806 * we always end io on the first invocation.
1808 if (!bio_flagged(bio
, BIO_CHAIN
))
1811 BUG_ON(atomic_read(&bio
->__bi_remaining
) <= 0);
1813 if (atomic_dec_and_test(&bio
->__bi_remaining
)) {
1814 bio_clear_flag(bio
, BIO_CHAIN
);
1822 * bio_endio - end I/O on a bio
1826 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1827 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1828 * bio unless they own it and thus know that it has an end_io function.
1830 void bio_endio(struct bio
*bio
)
1833 if (!bio_remaining_done(bio
))
1837 * Need to have a real endio function for chained bios, otherwise
1838 * various corner cases will break (like stacking block devices that
1839 * save/restore bi_end_io) - however, we want to avoid unbounded
1840 * recursion and blowing the stack. Tail call optimization would
1841 * handle this, but compiling with frame pointers also disables
1842 * gcc's sibling call optimization.
1844 if (bio
->bi_end_io
== bio_chain_endio
) {
1845 bio
= __bio_chain_endio(bio
);
1849 blk_throtl_bio_endio(bio
);
1851 bio
->bi_end_io(bio
);
1853 EXPORT_SYMBOL(bio_endio
);
1856 * bio_split - split a bio
1857 * @bio: bio to split
1858 * @sectors: number of sectors to split from the front of @bio
1860 * @bs: bio set to allocate from
1862 * Allocates and returns a new bio which represents @sectors from the start of
1863 * @bio, and updates @bio to represent the remaining sectors.
1865 * Unless this is a discard request the newly allocated bio will point
1866 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1867 * @bio is not freed before the split.
1869 struct bio
*bio_split(struct bio
*bio
, int sectors
,
1870 gfp_t gfp
, struct bio_set
*bs
)
1872 struct bio
*split
= NULL
;
1874 BUG_ON(sectors
<= 0);
1875 BUG_ON(sectors
>= bio_sectors(bio
));
1877 split
= bio_clone_fast(bio
, gfp
, bs
);
1881 split
->bi_iter
.bi_size
= sectors
<< 9;
1883 if (bio_integrity(split
))
1884 bio_integrity_trim(split
, 0, sectors
);
1886 bio_advance(bio
, split
->bi_iter
.bi_size
);
1890 EXPORT_SYMBOL(bio_split
);
1893 * bio_trim - trim a bio
1895 * @offset: number of sectors to trim from the front of @bio
1896 * @size: size we want to trim @bio to, in sectors
1898 void bio_trim(struct bio
*bio
, int offset
, int size
)
1900 /* 'bio' is a cloned bio which we need to trim to match
1901 * the given offset and size.
1905 if (offset
== 0 && size
== bio
->bi_iter
.bi_size
)
1908 bio_clear_flag(bio
, BIO_SEG_VALID
);
1910 bio_advance(bio
, offset
<< 9);
1912 bio
->bi_iter
.bi_size
= size
;
1914 EXPORT_SYMBOL_GPL(bio_trim
);
1917 * create memory pools for biovec's in a bio_set.
1918 * use the global biovec slabs created for general use.
1920 mempool_t
*biovec_create_pool(int pool_entries
)
1922 struct biovec_slab
*bp
= bvec_slabs
+ BVEC_POOL_MAX
;
1924 return mempool_create_slab_pool(pool_entries
, bp
->slab
);
1927 void bioset_free(struct bio_set
*bs
)
1929 if (bs
->rescue_workqueue
)
1930 destroy_workqueue(bs
->rescue_workqueue
);
1933 mempool_destroy(bs
->bio_pool
);
1936 mempool_destroy(bs
->bvec_pool
);
1938 bioset_integrity_free(bs
);
1943 EXPORT_SYMBOL(bioset_free
);
1945 static struct bio_set
*__bioset_create(unsigned int pool_size
,
1946 unsigned int front_pad
,
1947 bool create_bvec_pool
)
1949 unsigned int back_pad
= BIO_INLINE_VECS
* sizeof(struct bio_vec
);
1952 bs
= kzalloc(sizeof(*bs
), GFP_KERNEL
);
1956 bs
->front_pad
= front_pad
;
1958 spin_lock_init(&bs
->rescue_lock
);
1959 bio_list_init(&bs
->rescue_list
);
1960 INIT_WORK(&bs
->rescue_work
, bio_alloc_rescue
);
1962 bs
->bio_slab
= bio_find_or_create_slab(front_pad
+ back_pad
);
1963 if (!bs
->bio_slab
) {
1968 bs
->bio_pool
= mempool_create_slab_pool(pool_size
, bs
->bio_slab
);
1972 if (create_bvec_pool
) {
1973 bs
->bvec_pool
= biovec_create_pool(pool_size
);
1978 bs
->rescue_workqueue
= alloc_workqueue("bioset", WQ_MEM_RECLAIM
, 0);
1979 if (!bs
->rescue_workqueue
)
1989 * bioset_create - Create a bio_set
1990 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1991 * @front_pad: Number of bytes to allocate in front of the returned bio
1994 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1995 * to ask for a number of bytes to be allocated in front of the bio.
1996 * Front pad allocation is useful for embedding the bio inside
1997 * another structure, to avoid allocating extra data to go with the bio.
1998 * Note that the bio must be embedded at the END of that structure always,
1999 * or things will break badly.
2001 struct bio_set
*bioset_create(unsigned int pool_size
, unsigned int front_pad
)
2003 return __bioset_create(pool_size
, front_pad
, true);
2005 EXPORT_SYMBOL(bioset_create
);
2008 * bioset_create_nobvec - Create a bio_set without bio_vec mempool
2009 * @pool_size: Number of bio to cache in the mempool
2010 * @front_pad: Number of bytes to allocate in front of the returned bio
2013 * Same functionality as bioset_create() except that mempool is not
2014 * created for bio_vecs. Saving some memory for bio_clone_fast() users.
2016 struct bio_set
*bioset_create_nobvec(unsigned int pool_size
, unsigned int front_pad
)
2018 return __bioset_create(pool_size
, front_pad
, false);
2020 EXPORT_SYMBOL(bioset_create_nobvec
);
2022 #ifdef CONFIG_BLK_CGROUP
2025 * bio_associate_blkcg - associate a bio with the specified blkcg
2027 * @blkcg_css: css of the blkcg to associate
2029 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
2030 * treat @bio as if it were issued by a task which belongs to the blkcg.
2032 * This function takes an extra reference of @blkcg_css which will be put
2033 * when @bio is released. The caller must own @bio and is responsible for
2034 * synchronizing calls to this function.
2036 int bio_associate_blkcg(struct bio
*bio
, struct cgroup_subsys_state
*blkcg_css
)
2038 if (unlikely(bio
->bi_css
))
2041 bio
->bi_css
= blkcg_css
;
2044 EXPORT_SYMBOL_GPL(bio_associate_blkcg
);
2047 * bio_associate_current - associate a bio with %current
2050 * Associate @bio with %current if it hasn't been associated yet. Block
2051 * layer will treat @bio as if it were issued by %current no matter which
2052 * task actually issues it.
2054 * This function takes an extra reference of @task's io_context and blkcg
2055 * which will be put when @bio is released. The caller must own @bio,
2056 * ensure %current->io_context exists, and is responsible for synchronizing
2057 * calls to this function.
2059 int bio_associate_current(struct bio
*bio
)
2061 struct io_context
*ioc
;
2066 ioc
= current
->io_context
;
2070 get_io_context_active(ioc
);
2072 bio
->bi_css
= task_get_css(current
, io_cgrp_id
);
2075 EXPORT_SYMBOL_GPL(bio_associate_current
);
2078 * bio_disassociate_task - undo bio_associate_current()
2081 void bio_disassociate_task(struct bio
*bio
)
2084 put_io_context(bio
->bi_ioc
);
2088 css_put(bio
->bi_css
);
2094 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2095 * @dst: destination bio
2098 void bio_clone_blkcg_association(struct bio
*dst
, struct bio
*src
)
2101 WARN_ON(bio_associate_blkcg(dst
, src
->bi_css
));
2104 #endif /* CONFIG_BLK_CGROUP */
2106 static void __init
biovec_init_slabs(void)
2110 for (i
= 0; i
< BVEC_POOL_NR
; i
++) {
2112 struct biovec_slab
*bvs
= bvec_slabs
+ i
;
2114 if (bvs
->nr_vecs
<= BIO_INLINE_VECS
) {
2119 size
= bvs
->nr_vecs
* sizeof(struct bio_vec
);
2120 bvs
->slab
= kmem_cache_create(bvs
->name
, size
, 0,
2121 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
2125 static int __init
init_bio(void)
2129 bio_slabs
= kzalloc(bio_slab_max
* sizeof(struct bio_slab
), GFP_KERNEL
);
2131 panic("bio: can't allocate bios\n");
2133 bio_integrity_init();
2134 biovec_init_slabs();
2136 fs_bio_set
= bioset_create(BIO_POOL_SIZE
, 0);
2138 panic("bio: can't allocate bios\n");
2140 if (bioset_integrity_create(fs_bio_set
, BIO_POOL_SIZE
))
2141 panic("bio: can't create integrity pool\n");
2145 subsys_initcall(init_bio
);