2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/iocontext.h>
23 #include <linux/slab.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/export.h>
27 #include <linux/mempool.h>
28 #include <linux/workqueue.h>
29 #include <linux/cgroup.h>
30 #include <scsi/sg.h> /* for struct sg_iovec */
32 #include <trace/events/block.h>
35 * Test patch to inline a certain number of bi_io_vec's inside the bio
36 * itself, to shrink a bio data allocation from two mempool calls to one
38 #define BIO_INLINE_VECS 4
40 static mempool_t
*bio_split_pool __read_mostly
;
43 * if you change this list, also change bvec_alloc or things will
44 * break badly! cannot be bigger than what you can fit into an
47 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
48 static struct biovec_slab bvec_slabs
[BIOVEC_NR_POOLS
] __read_mostly
= {
49 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES
),
54 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
55 * IO code that does not need private memory pools.
57 struct bio_set
*fs_bio_set
;
58 EXPORT_SYMBOL(fs_bio_set
);
61 * Our slab pool management
64 struct kmem_cache
*slab
;
65 unsigned int slab_ref
;
66 unsigned int slab_size
;
69 static DEFINE_MUTEX(bio_slab_lock
);
70 static struct bio_slab
*bio_slabs
;
71 static unsigned int bio_slab_nr
, bio_slab_max
;
73 static struct kmem_cache
*bio_find_or_create_slab(unsigned int extra_size
)
75 unsigned int sz
= sizeof(struct bio
) + extra_size
;
76 struct kmem_cache
*slab
= NULL
;
77 struct bio_slab
*bslab
, *new_bio_slabs
;
78 unsigned int new_bio_slab_max
;
79 unsigned int i
, entry
= -1;
81 mutex_lock(&bio_slab_lock
);
84 while (i
< bio_slab_nr
) {
85 bslab
= &bio_slabs
[i
];
87 if (!bslab
->slab
&& entry
== -1)
89 else if (bslab
->slab_size
== sz
) {
100 if (bio_slab_nr
== bio_slab_max
&& entry
== -1) {
101 new_bio_slab_max
= bio_slab_max
<< 1;
102 new_bio_slabs
= krealloc(bio_slabs
,
103 new_bio_slab_max
* sizeof(struct bio_slab
),
107 bio_slab_max
= new_bio_slab_max
;
108 bio_slabs
= new_bio_slabs
;
111 entry
= bio_slab_nr
++;
113 bslab
= &bio_slabs
[entry
];
115 snprintf(bslab
->name
, sizeof(bslab
->name
), "bio-%d", entry
);
116 slab
= kmem_cache_create(bslab
->name
, sz
, 0, SLAB_HWCACHE_ALIGN
, NULL
);
120 printk(KERN_INFO
"bio: create slab <%s> at %d\n", bslab
->name
, entry
);
123 bslab
->slab_size
= sz
;
125 mutex_unlock(&bio_slab_lock
);
129 static void bio_put_slab(struct bio_set
*bs
)
131 struct bio_slab
*bslab
= NULL
;
134 mutex_lock(&bio_slab_lock
);
136 for (i
= 0; i
< bio_slab_nr
; i
++) {
137 if (bs
->bio_slab
== bio_slabs
[i
].slab
) {
138 bslab
= &bio_slabs
[i
];
143 if (WARN(!bslab
, KERN_ERR
"bio: unable to find slab!\n"))
146 WARN_ON(!bslab
->slab_ref
);
148 if (--bslab
->slab_ref
)
151 kmem_cache_destroy(bslab
->slab
);
155 mutex_unlock(&bio_slab_lock
);
158 unsigned int bvec_nr_vecs(unsigned short idx
)
160 return bvec_slabs
[idx
].nr_vecs
;
163 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned int idx
)
165 BIO_BUG_ON(idx
>= BIOVEC_NR_POOLS
);
167 if (idx
== BIOVEC_MAX_IDX
)
168 mempool_free(bv
, pool
);
170 struct biovec_slab
*bvs
= bvec_slabs
+ idx
;
172 kmem_cache_free(bvs
->slab
, bv
);
176 struct bio_vec
*bvec_alloc(gfp_t gfp_mask
, int nr
, unsigned long *idx
,
182 * see comment near bvec_array define!
200 case 129 ... BIO_MAX_PAGES
:
208 * idx now points to the pool we want to allocate from. only the
209 * 1-vec entry pool is mempool backed.
211 if (*idx
== BIOVEC_MAX_IDX
) {
213 bvl
= mempool_alloc(pool
, gfp_mask
);
215 struct biovec_slab
*bvs
= bvec_slabs
+ *idx
;
216 gfp_t __gfp_mask
= gfp_mask
& ~(__GFP_WAIT
| __GFP_IO
);
219 * Make this allocation restricted and don't dump info on
220 * allocation failures, since we'll fallback to the mempool
221 * in case of failure.
223 __gfp_mask
|= __GFP_NOMEMALLOC
| __GFP_NORETRY
| __GFP_NOWARN
;
226 * Try a slab allocation. If this fails and __GFP_WAIT
227 * is set, retry with the 1-entry mempool
229 bvl
= kmem_cache_alloc(bvs
->slab
, __gfp_mask
);
230 if (unlikely(!bvl
&& (gfp_mask
& __GFP_WAIT
))) {
231 *idx
= BIOVEC_MAX_IDX
;
239 static void __bio_free(struct bio
*bio
)
241 bio_disassociate_task(bio
);
243 if (bio_integrity(bio
))
244 bio_integrity_free(bio
);
247 static void bio_free(struct bio
*bio
)
249 struct bio_set
*bs
= bio
->bi_pool
;
255 if (bio_flagged(bio
, BIO_OWNS_VEC
))
256 bvec_free(bs
->bvec_pool
, bio
->bi_io_vec
, BIO_POOL_IDX(bio
));
259 * If we have front padding, adjust the bio pointer before freeing
264 mempool_free(p
, bs
->bio_pool
);
266 /* Bio was allocated by bio_kmalloc() */
271 void bio_init(struct bio
*bio
)
273 memset(bio
, 0, sizeof(*bio
));
274 bio
->bi_flags
= 1 << BIO_UPTODATE
;
275 atomic_set(&bio
->bi_cnt
, 1);
277 EXPORT_SYMBOL(bio_init
);
280 * bio_reset - reinitialize a bio
284 * After calling bio_reset(), @bio will be in the same state as a freshly
285 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
286 * preserved are the ones that are initialized by bio_alloc_bioset(). See
287 * comment in struct bio.
289 void bio_reset(struct bio
*bio
)
291 unsigned long flags
= bio
->bi_flags
& (~0UL << BIO_RESET_BITS
);
295 memset(bio
, 0, BIO_RESET_BYTES
);
296 bio
->bi_flags
= flags
|(1 << BIO_UPTODATE
);
298 EXPORT_SYMBOL(bio_reset
);
300 static void bio_alloc_rescue(struct work_struct
*work
)
302 struct bio_set
*bs
= container_of(work
, struct bio_set
, rescue_work
);
306 spin_lock(&bs
->rescue_lock
);
307 bio
= bio_list_pop(&bs
->rescue_list
);
308 spin_unlock(&bs
->rescue_lock
);
313 generic_make_request(bio
);
317 static void punt_bios_to_rescuer(struct bio_set
*bs
)
319 struct bio_list punt
, nopunt
;
323 * In order to guarantee forward progress we must punt only bios that
324 * were allocated from this bio_set; otherwise, if there was a bio on
325 * there for a stacking driver higher up in the stack, processing it
326 * could require allocating bios from this bio_set, and doing that from
327 * our own rescuer would be bad.
329 * Since bio lists are singly linked, pop them all instead of trying to
330 * remove from the middle of the list:
333 bio_list_init(&punt
);
334 bio_list_init(&nopunt
);
336 while ((bio
= bio_list_pop(current
->bio_list
)))
337 bio_list_add(bio
->bi_pool
== bs
? &punt
: &nopunt
, bio
);
339 *current
->bio_list
= nopunt
;
341 spin_lock(&bs
->rescue_lock
);
342 bio_list_merge(&bs
->rescue_list
, &punt
);
343 spin_unlock(&bs
->rescue_lock
);
345 queue_work(bs
->rescue_workqueue
, &bs
->rescue_work
);
349 * bio_alloc_bioset - allocate a bio for I/O
350 * @gfp_mask: the GFP_ mask given to the slab allocator
351 * @nr_iovecs: number of iovecs to pre-allocate
352 * @bs: the bio_set to allocate from.
355 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
356 * backed by the @bs's mempool.
358 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
359 * able to allocate a bio. This is due to the mempool guarantees. To make this
360 * work, callers must never allocate more than 1 bio at a time from this pool.
361 * Callers that need to allocate more than 1 bio must always submit the
362 * previously allocated bio for IO before attempting to allocate a new one.
363 * Failure to do so can cause deadlocks under memory pressure.
365 * Note that when running under generic_make_request() (i.e. any block
366 * driver), bios are not submitted until after you return - see the code in
367 * generic_make_request() that converts recursion into iteration, to prevent
370 * This would normally mean allocating multiple bios under
371 * generic_make_request() would be susceptible to deadlocks, but we have
372 * deadlock avoidance code that resubmits any blocked bios from a rescuer
375 * However, we do not guarantee forward progress for allocations from other
376 * mempools. Doing multiple allocations from the same mempool under
377 * generic_make_request() should be avoided - instead, use bio_set's front_pad
378 * for per bio allocations.
381 * Pointer to new bio on success, NULL on failure.
383 struct bio
*bio_alloc_bioset(gfp_t gfp_mask
, int nr_iovecs
, struct bio_set
*bs
)
385 gfp_t saved_gfp
= gfp_mask
;
387 unsigned inline_vecs
;
388 unsigned long idx
= BIO_POOL_NONE
;
389 struct bio_vec
*bvl
= NULL
;
394 if (nr_iovecs
> UIO_MAXIOV
)
397 p
= kmalloc(sizeof(struct bio
) +
398 nr_iovecs
* sizeof(struct bio_vec
),
401 inline_vecs
= nr_iovecs
;
404 * generic_make_request() converts recursion to iteration; this
405 * means if we're running beneath it, any bios we allocate and
406 * submit will not be submitted (and thus freed) until after we
409 * This exposes us to a potential deadlock if we allocate
410 * multiple bios from the same bio_set() while running
411 * underneath generic_make_request(). If we were to allocate
412 * multiple bios (say a stacking block driver that was splitting
413 * bios), we would deadlock if we exhausted the mempool's
416 * We solve this, and guarantee forward progress, with a rescuer
417 * workqueue per bio_set. If we go to allocate and there are
418 * bios on current->bio_list, we first try the allocation
419 * without __GFP_WAIT; if that fails, we punt those bios we
420 * would be blocking to the rescuer workqueue before we retry
421 * with the original gfp_flags.
424 if (current
->bio_list
&& !bio_list_empty(current
->bio_list
))
425 gfp_mask
&= ~__GFP_WAIT
;
427 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
428 if (!p
&& gfp_mask
!= saved_gfp
) {
429 punt_bios_to_rescuer(bs
);
430 gfp_mask
= saved_gfp
;
431 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
434 front_pad
= bs
->front_pad
;
435 inline_vecs
= BIO_INLINE_VECS
;
444 if (nr_iovecs
> inline_vecs
) {
445 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
446 if (!bvl
&& gfp_mask
!= saved_gfp
) {
447 punt_bios_to_rescuer(bs
);
448 gfp_mask
= saved_gfp
;
449 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
455 bio
->bi_flags
|= 1 << BIO_OWNS_VEC
;
456 } else if (nr_iovecs
) {
457 bvl
= bio
->bi_inline_vecs
;
461 bio
->bi_flags
|= idx
<< BIO_POOL_OFFSET
;
462 bio
->bi_max_vecs
= nr_iovecs
;
463 bio
->bi_io_vec
= bvl
;
467 mempool_free(p
, bs
->bio_pool
);
470 EXPORT_SYMBOL(bio_alloc_bioset
);
472 void zero_fill_bio(struct bio
*bio
)
478 bio_for_each_segment(bv
, bio
, i
) {
479 char *data
= bvec_kmap_irq(bv
, &flags
);
480 memset(data
, 0, bv
->bv_len
);
481 flush_dcache_page(bv
->bv_page
);
482 bvec_kunmap_irq(data
, &flags
);
485 EXPORT_SYMBOL(zero_fill_bio
);
488 * bio_put - release a reference to a bio
489 * @bio: bio to release reference to
492 * Put a reference to a &struct bio, either one you have gotten with
493 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
495 void bio_put(struct bio
*bio
)
497 BIO_BUG_ON(!atomic_read(&bio
->bi_cnt
));
502 if (atomic_dec_and_test(&bio
->bi_cnt
))
505 EXPORT_SYMBOL(bio_put
);
507 inline int bio_phys_segments(struct request_queue
*q
, struct bio
*bio
)
509 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
510 blk_recount_segments(q
, bio
);
512 return bio
->bi_phys_segments
;
514 EXPORT_SYMBOL(bio_phys_segments
);
517 * __bio_clone - clone a bio
518 * @bio: destination bio
519 * @bio_src: bio to clone
521 * Clone a &bio. Caller will own the returned bio, but not
522 * the actual data it points to. Reference count of returned
525 void __bio_clone(struct bio
*bio
, struct bio
*bio_src
)
527 memcpy(bio
->bi_io_vec
, bio_src
->bi_io_vec
,
528 bio_src
->bi_max_vecs
* sizeof(struct bio_vec
));
531 * most users will be overriding ->bi_bdev with a new target,
532 * so we don't set nor calculate new physical/hw segment counts here
534 bio
->bi_sector
= bio_src
->bi_sector
;
535 bio
->bi_bdev
= bio_src
->bi_bdev
;
536 bio
->bi_flags
|= 1 << BIO_CLONED
;
537 bio
->bi_rw
= bio_src
->bi_rw
;
538 bio
->bi_vcnt
= bio_src
->bi_vcnt
;
539 bio
->bi_size
= bio_src
->bi_size
;
540 bio
->bi_idx
= bio_src
->bi_idx
;
542 EXPORT_SYMBOL(__bio_clone
);
545 * bio_clone_bioset - clone a bio
547 * @gfp_mask: allocation priority
548 * @bs: bio_set to allocate from
550 * Like __bio_clone, only also allocates the returned bio
552 struct bio
*bio_clone_bioset(struct bio
*bio
, gfp_t gfp_mask
,
557 b
= bio_alloc_bioset(gfp_mask
, bio
->bi_max_vecs
, bs
);
563 if (bio_integrity(bio
)) {
566 ret
= bio_integrity_clone(b
, bio
, gfp_mask
);
576 EXPORT_SYMBOL(bio_clone_bioset
);
579 * bio_get_nr_vecs - return approx number of vecs
582 * Return the approximate number of pages we can send to this target.
583 * There's no guarantee that you will be able to fit this number of pages
584 * into a bio, it does not account for dynamic restrictions that vary
587 int bio_get_nr_vecs(struct block_device
*bdev
)
589 struct request_queue
*q
= bdev_get_queue(bdev
);
592 nr_pages
= min_t(unsigned,
593 queue_max_segments(q
),
594 queue_max_sectors(q
) / (PAGE_SIZE
>> 9) + 1);
596 return min_t(unsigned, nr_pages
, BIO_MAX_PAGES
);
599 EXPORT_SYMBOL(bio_get_nr_vecs
);
601 static int __bio_add_page(struct request_queue
*q
, struct bio
*bio
, struct page
602 *page
, unsigned int len
, unsigned int offset
,
603 unsigned short max_sectors
)
605 int retried_segments
= 0;
606 struct bio_vec
*bvec
;
609 * cloned bio must not modify vec list
611 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
614 if (((bio
->bi_size
+ len
) >> 9) > max_sectors
)
618 * For filesystems with a blocksize smaller than the pagesize
619 * we will often be called with the same page as last time and
620 * a consecutive offset. Optimize this special case.
622 if (bio
->bi_vcnt
> 0) {
623 struct bio_vec
*prev
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
625 if (page
== prev
->bv_page
&&
626 offset
== prev
->bv_offset
+ prev
->bv_len
) {
627 unsigned int prev_bv_len
= prev
->bv_len
;
630 if (q
->merge_bvec_fn
) {
631 struct bvec_merge_data bvm
= {
632 /* prev_bvec is already charged in
633 bi_size, discharge it in order to
634 simulate merging updated prev_bvec
636 .bi_bdev
= bio
->bi_bdev
,
637 .bi_sector
= bio
->bi_sector
,
638 .bi_size
= bio
->bi_size
- prev_bv_len
,
642 if (q
->merge_bvec_fn(q
, &bvm
, prev
) < prev
->bv_len
) {
652 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
656 * we might lose a segment or two here, but rather that than
657 * make this too complex.
660 while (bio
->bi_phys_segments
>= queue_max_segments(q
)) {
662 if (retried_segments
)
665 retried_segments
= 1;
666 blk_recount_segments(q
, bio
);
670 * setup the new entry, we might clear it again later if we
671 * cannot add the page
673 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
674 bvec
->bv_page
= page
;
676 bvec
->bv_offset
= offset
;
679 * if queue has other restrictions (eg varying max sector size
680 * depending on offset), it can specify a merge_bvec_fn in the
681 * queue to get further control
683 if (q
->merge_bvec_fn
) {
684 struct bvec_merge_data bvm
= {
685 .bi_bdev
= bio
->bi_bdev
,
686 .bi_sector
= bio
->bi_sector
,
687 .bi_size
= bio
->bi_size
,
692 * merge_bvec_fn() returns number of bytes it can accept
695 if (q
->merge_bvec_fn(q
, &bvm
, bvec
) < bvec
->bv_len
) {
696 bvec
->bv_page
= NULL
;
703 /* If we may be able to merge these biovecs, force a recount */
704 if (bio
->bi_vcnt
&& (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
)))
705 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
708 bio
->bi_phys_segments
++;
715 * bio_add_pc_page - attempt to add page to bio
716 * @q: the target queue
717 * @bio: destination bio
719 * @len: vec entry length
720 * @offset: vec entry offset
722 * Attempt to add a page to the bio_vec maplist. This can fail for a
723 * number of reasons, such as the bio being full or target block device
724 * limitations. The target block device must allow bio's up to PAGE_SIZE,
725 * so it is always possible to add a single page to an empty bio.
727 * This should only be used by REQ_PC bios.
729 int bio_add_pc_page(struct request_queue
*q
, struct bio
*bio
, struct page
*page
,
730 unsigned int len
, unsigned int offset
)
732 return __bio_add_page(q
, bio
, page
, len
, offset
,
733 queue_max_hw_sectors(q
));
735 EXPORT_SYMBOL(bio_add_pc_page
);
738 * bio_add_page - attempt to add page to bio
739 * @bio: destination bio
741 * @len: vec entry length
742 * @offset: vec entry offset
744 * Attempt to add a page to the bio_vec maplist. This can fail for a
745 * number of reasons, such as the bio being full or target block device
746 * limitations. The target block device must allow bio's up to PAGE_SIZE,
747 * so it is always possible to add a single page to an empty bio.
749 int bio_add_page(struct bio
*bio
, struct page
*page
, unsigned int len
,
752 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
753 return __bio_add_page(q
, bio
, page
, len
, offset
, queue_max_sectors(q
));
755 EXPORT_SYMBOL(bio_add_page
);
757 struct submit_bio_ret
{
758 struct completion event
;
762 static void submit_bio_wait_endio(struct bio
*bio
, int error
)
764 struct submit_bio_ret
*ret
= bio
->bi_private
;
767 complete(&ret
->event
);
771 * submit_bio_wait - submit a bio, and wait until it completes
772 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
773 * @bio: The &struct bio which describes the I/O
775 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
776 * bio_endio() on failure.
778 int submit_bio_wait(int rw
, struct bio
*bio
)
780 struct submit_bio_ret ret
;
783 init_completion(&ret
.event
);
784 bio
->bi_private
= &ret
;
785 bio
->bi_end_io
= submit_bio_wait_endio
;
787 wait_for_completion(&ret
.event
);
791 EXPORT_SYMBOL(submit_bio_wait
);
794 * bio_advance - increment/complete a bio by some number of bytes
795 * @bio: bio to advance
796 * @bytes: number of bytes to complete
798 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
799 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
800 * be updated on the last bvec as well.
802 * @bio will then represent the remaining, uncompleted portion of the io.
804 void bio_advance(struct bio
*bio
, unsigned bytes
)
806 if (bio_integrity(bio
))
807 bio_integrity_advance(bio
, bytes
);
809 bio
->bi_sector
+= bytes
>> 9;
810 bio
->bi_size
-= bytes
;
812 if (bio
->bi_rw
& BIO_NO_ADVANCE_ITER_MASK
)
816 if (unlikely(bio
->bi_idx
>= bio
->bi_vcnt
)) {
817 WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
818 bio
->bi_idx
, bio
->bi_vcnt
);
822 if (bytes
>= bio_iovec(bio
)->bv_len
) {
823 bytes
-= bio_iovec(bio
)->bv_len
;
826 bio_iovec(bio
)->bv_len
-= bytes
;
827 bio_iovec(bio
)->bv_offset
+= bytes
;
832 EXPORT_SYMBOL(bio_advance
);
835 * bio_alloc_pages - allocates a single page for each bvec in a bio
836 * @bio: bio to allocate pages for
837 * @gfp_mask: flags for allocation
839 * Allocates pages up to @bio->bi_vcnt.
841 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
844 int bio_alloc_pages(struct bio
*bio
, gfp_t gfp_mask
)
849 bio_for_each_segment_all(bv
, bio
, i
) {
850 bv
->bv_page
= alloc_page(gfp_mask
);
852 while (--bv
>= bio
->bi_io_vec
)
853 __free_page(bv
->bv_page
);
860 EXPORT_SYMBOL(bio_alloc_pages
);
863 * bio_copy_data - copy contents of data buffers from one chain of bios to
865 * @src: source bio list
866 * @dst: destination bio list
868 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
869 * @src and @dst as linked lists of bios.
871 * Stops when it reaches the end of either @src or @dst - that is, copies
872 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
874 void bio_copy_data(struct bio
*dst
, struct bio
*src
)
876 struct bio_vec
*src_bv
, *dst_bv
;
877 unsigned src_offset
, dst_offset
, bytes
;
880 src_bv
= bio_iovec(src
);
881 dst_bv
= bio_iovec(dst
);
883 src_offset
= src_bv
->bv_offset
;
884 dst_offset
= dst_bv
->bv_offset
;
887 if (src_offset
== src_bv
->bv_offset
+ src_bv
->bv_len
) {
889 if (src_bv
== bio_iovec_idx(src
, src
->bi_vcnt
)) {
894 src_bv
= bio_iovec(src
);
897 src_offset
= src_bv
->bv_offset
;
900 if (dst_offset
== dst_bv
->bv_offset
+ dst_bv
->bv_len
) {
902 if (dst_bv
== bio_iovec_idx(dst
, dst
->bi_vcnt
)) {
907 dst_bv
= bio_iovec(dst
);
910 dst_offset
= dst_bv
->bv_offset
;
913 bytes
= min(dst_bv
->bv_offset
+ dst_bv
->bv_len
- dst_offset
,
914 src_bv
->bv_offset
+ src_bv
->bv_len
- src_offset
);
916 src_p
= kmap_atomic(src_bv
->bv_page
);
917 dst_p
= kmap_atomic(dst_bv
->bv_page
);
919 memcpy(dst_p
+ dst_bv
->bv_offset
,
920 src_p
+ src_bv
->bv_offset
,
923 kunmap_atomic(dst_p
);
924 kunmap_atomic(src_p
);
930 EXPORT_SYMBOL(bio_copy_data
);
932 struct bio_map_data
{
933 struct bio_vec
*iovecs
;
934 struct sg_iovec
*sgvecs
;
939 static void bio_set_map_data(struct bio_map_data
*bmd
, struct bio
*bio
,
940 struct sg_iovec
*iov
, int iov_count
,
943 memcpy(bmd
->iovecs
, bio
->bi_io_vec
, sizeof(struct bio_vec
) * bio
->bi_vcnt
);
944 memcpy(bmd
->sgvecs
, iov
, sizeof(struct sg_iovec
) * iov_count
);
945 bmd
->nr_sgvecs
= iov_count
;
946 bmd
->is_our_pages
= is_our_pages
;
947 bio
->bi_private
= bmd
;
950 static void bio_free_map_data(struct bio_map_data
*bmd
)
957 static struct bio_map_data
*bio_alloc_map_data(int nr_segs
,
958 unsigned int iov_count
,
961 struct bio_map_data
*bmd
;
963 if (iov_count
> UIO_MAXIOV
)
966 bmd
= kmalloc(sizeof(*bmd
), gfp_mask
);
970 bmd
->iovecs
= kmalloc(sizeof(struct bio_vec
) * nr_segs
, gfp_mask
);
976 bmd
->sgvecs
= kmalloc(sizeof(struct sg_iovec
) * iov_count
, gfp_mask
);
985 static int __bio_copy_iov(struct bio
*bio
, struct bio_vec
*iovecs
,
986 struct sg_iovec
*iov
, int iov_count
,
987 int to_user
, int from_user
, int do_free_page
)
990 struct bio_vec
*bvec
;
992 unsigned int iov_off
= 0;
994 bio_for_each_segment_all(bvec
, bio
, i
) {
995 char *bv_addr
= page_address(bvec
->bv_page
);
996 unsigned int bv_len
= iovecs
[i
].bv_len
;
998 while (bv_len
&& iov_idx
< iov_count
) {
1000 char __user
*iov_addr
;
1002 bytes
= min_t(unsigned int,
1003 iov
[iov_idx
].iov_len
- iov_off
, bv_len
);
1004 iov_addr
= iov
[iov_idx
].iov_base
+ iov_off
;
1008 ret
= copy_to_user(iov_addr
, bv_addr
,
1012 ret
= copy_from_user(bv_addr
, iov_addr
,
1024 if (iov
[iov_idx
].iov_len
== iov_off
) {
1031 __free_page(bvec
->bv_page
);
1038 * bio_uncopy_user - finish previously mapped bio
1039 * @bio: bio being terminated
1041 * Free pages allocated from bio_copy_user() and write back data
1042 * to user space in case of a read.
1044 int bio_uncopy_user(struct bio
*bio
)
1046 struct bio_map_data
*bmd
= bio
->bi_private
;
1049 if (!bio_flagged(bio
, BIO_NULL_MAPPED
))
1050 ret
= __bio_copy_iov(bio
, bmd
->iovecs
, bmd
->sgvecs
,
1051 bmd
->nr_sgvecs
, bio_data_dir(bio
) == READ
,
1052 0, bmd
->is_our_pages
);
1053 bio_free_map_data(bmd
);
1057 EXPORT_SYMBOL(bio_uncopy_user
);
1060 * bio_copy_user_iov - copy user data to bio
1061 * @q: destination block queue
1062 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1064 * @iov_count: number of elements in the iovec
1065 * @write_to_vm: bool indicating writing to pages or not
1066 * @gfp_mask: memory allocation flags
1068 * Prepares and returns a bio for indirect user io, bouncing data
1069 * to/from kernel pages as necessary. Must be paired with
1070 * call bio_uncopy_user() on io completion.
1072 struct bio
*bio_copy_user_iov(struct request_queue
*q
,
1073 struct rq_map_data
*map_data
,
1074 struct sg_iovec
*iov
, int iov_count
,
1075 int write_to_vm
, gfp_t gfp_mask
)
1077 struct bio_map_data
*bmd
;
1078 struct bio_vec
*bvec
;
1083 unsigned int len
= 0;
1084 unsigned int offset
= map_data
? map_data
->offset
& ~PAGE_MASK
: 0;
1086 for (i
= 0; i
< iov_count
; i
++) {
1087 unsigned long uaddr
;
1089 unsigned long start
;
1091 uaddr
= (unsigned long)iov
[i
].iov_base
;
1092 end
= (uaddr
+ iov
[i
].iov_len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1093 start
= uaddr
>> PAGE_SHIFT
;
1099 return ERR_PTR(-EINVAL
);
1101 nr_pages
+= end
- start
;
1102 len
+= iov
[i
].iov_len
;
1108 bmd
= bio_alloc_map_data(nr_pages
, iov_count
, gfp_mask
);
1110 return ERR_PTR(-ENOMEM
);
1113 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1118 bio
->bi_rw
|= REQ_WRITE
;
1123 nr_pages
= 1 << map_data
->page_order
;
1124 i
= map_data
->offset
/ PAGE_SIZE
;
1127 unsigned int bytes
= PAGE_SIZE
;
1135 if (i
== map_data
->nr_entries
* nr_pages
) {
1140 page
= map_data
->pages
[i
/ nr_pages
];
1141 page
+= (i
% nr_pages
);
1145 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
1152 if (bio_add_pc_page(q
, bio
, page
, bytes
, offset
) < bytes
)
1165 if ((!write_to_vm
&& (!map_data
|| !map_data
->null_mapped
)) ||
1166 (map_data
&& map_data
->from_user
)) {
1167 ret
= __bio_copy_iov(bio
, bio
->bi_io_vec
, iov
, iov_count
, 0, 1, 0);
1172 bio_set_map_data(bmd
, bio
, iov
, iov_count
, map_data
? 0 : 1);
1176 bio_for_each_segment_all(bvec
, bio
, i
)
1177 __free_page(bvec
->bv_page
);
1181 bio_free_map_data(bmd
);
1182 return ERR_PTR(ret
);
1186 * bio_copy_user - copy user data to bio
1187 * @q: destination block queue
1188 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1189 * @uaddr: start of user address
1190 * @len: length in bytes
1191 * @write_to_vm: bool indicating writing to pages or not
1192 * @gfp_mask: memory allocation flags
1194 * Prepares and returns a bio for indirect user io, bouncing data
1195 * to/from kernel pages as necessary. Must be paired with
1196 * call bio_uncopy_user() on io completion.
1198 struct bio
*bio_copy_user(struct request_queue
*q
, struct rq_map_data
*map_data
,
1199 unsigned long uaddr
, unsigned int len
,
1200 int write_to_vm
, gfp_t gfp_mask
)
1202 struct sg_iovec iov
;
1204 iov
.iov_base
= (void __user
*)uaddr
;
1207 return bio_copy_user_iov(q
, map_data
, &iov
, 1, write_to_vm
, gfp_mask
);
1209 EXPORT_SYMBOL(bio_copy_user
);
1211 static struct bio
*__bio_map_user_iov(struct request_queue
*q
,
1212 struct block_device
*bdev
,
1213 struct sg_iovec
*iov
, int iov_count
,
1214 int write_to_vm
, gfp_t gfp_mask
)
1218 struct page
**pages
;
1223 for (i
= 0; i
< iov_count
; i
++) {
1224 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
1225 unsigned long len
= iov
[i
].iov_len
;
1226 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1227 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1233 return ERR_PTR(-EINVAL
);
1235 nr_pages
+= end
- start
;
1237 * buffer must be aligned to at least hardsector size for now
1239 if (uaddr
& queue_dma_alignment(q
))
1240 return ERR_PTR(-EINVAL
);
1244 return ERR_PTR(-EINVAL
);
1246 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1248 return ERR_PTR(-ENOMEM
);
1251 pages
= kcalloc(nr_pages
, sizeof(struct page
*), gfp_mask
);
1255 for (i
= 0; i
< iov_count
; i
++) {
1256 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
1257 unsigned long len
= iov
[i
].iov_len
;
1258 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1259 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1260 const int local_nr_pages
= end
- start
;
1261 const int page_limit
= cur_page
+ local_nr_pages
;
1263 ret
= get_user_pages_fast(uaddr
, local_nr_pages
,
1264 write_to_vm
, &pages
[cur_page
]);
1265 if (ret
< local_nr_pages
) {
1270 offset
= uaddr
& ~PAGE_MASK
;
1271 for (j
= cur_page
; j
< page_limit
; j
++) {
1272 unsigned int bytes
= PAGE_SIZE
- offset
;
1283 if (bio_add_pc_page(q
, bio
, pages
[j
], bytes
, offset
) <
1293 * release the pages we didn't map into the bio, if any
1295 while (j
< page_limit
)
1296 page_cache_release(pages
[j
++]);
1302 * set data direction, and check if mapped pages need bouncing
1305 bio
->bi_rw
|= REQ_WRITE
;
1307 bio
->bi_bdev
= bdev
;
1308 bio
->bi_flags
|= (1 << BIO_USER_MAPPED
);
1312 for (i
= 0; i
< nr_pages
; i
++) {
1315 page_cache_release(pages
[i
]);
1320 return ERR_PTR(ret
);
1324 * bio_map_user - map user address into bio
1325 * @q: the struct request_queue for the bio
1326 * @bdev: destination block device
1327 * @uaddr: start of user address
1328 * @len: length in bytes
1329 * @write_to_vm: bool indicating writing to pages or not
1330 * @gfp_mask: memory allocation flags
1332 * Map the user space address into a bio suitable for io to a block
1333 * device. Returns an error pointer in case of error.
1335 struct bio
*bio_map_user(struct request_queue
*q
, struct block_device
*bdev
,
1336 unsigned long uaddr
, unsigned int len
, int write_to_vm
,
1339 struct sg_iovec iov
;
1341 iov
.iov_base
= (void __user
*)uaddr
;
1344 return bio_map_user_iov(q
, bdev
, &iov
, 1, write_to_vm
, gfp_mask
);
1346 EXPORT_SYMBOL(bio_map_user
);
1349 * bio_map_user_iov - map user sg_iovec table into bio
1350 * @q: the struct request_queue for the bio
1351 * @bdev: destination block device
1353 * @iov_count: number of elements in the iovec
1354 * @write_to_vm: bool indicating writing to pages or not
1355 * @gfp_mask: memory allocation flags
1357 * Map the user space address into a bio suitable for io to a block
1358 * device. Returns an error pointer in case of error.
1360 struct bio
*bio_map_user_iov(struct request_queue
*q
, struct block_device
*bdev
,
1361 struct sg_iovec
*iov
, int iov_count
,
1362 int write_to_vm
, gfp_t gfp_mask
)
1366 bio
= __bio_map_user_iov(q
, bdev
, iov
, iov_count
, write_to_vm
,
1372 * subtle -- if __bio_map_user() ended up bouncing a bio,
1373 * it would normally disappear when its bi_end_io is run.
1374 * however, we need it for the unmap, so grab an extra
1382 static void __bio_unmap_user(struct bio
*bio
)
1384 struct bio_vec
*bvec
;
1388 * make sure we dirty pages we wrote to
1390 bio_for_each_segment_all(bvec
, bio
, i
) {
1391 if (bio_data_dir(bio
) == READ
)
1392 set_page_dirty_lock(bvec
->bv_page
);
1394 page_cache_release(bvec
->bv_page
);
1401 * bio_unmap_user - unmap a bio
1402 * @bio: the bio being unmapped
1404 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1405 * a process context.
1407 * bio_unmap_user() may sleep.
1409 void bio_unmap_user(struct bio
*bio
)
1411 __bio_unmap_user(bio
);
1414 EXPORT_SYMBOL(bio_unmap_user
);
1416 static void bio_map_kern_endio(struct bio
*bio
, int err
)
1421 static struct bio
*__bio_map_kern(struct request_queue
*q
, void *data
,
1422 unsigned int len
, gfp_t gfp_mask
)
1424 unsigned long kaddr
= (unsigned long)data
;
1425 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1426 unsigned long start
= kaddr
>> PAGE_SHIFT
;
1427 const int nr_pages
= end
- start
;
1431 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1433 return ERR_PTR(-ENOMEM
);
1435 offset
= offset_in_page(kaddr
);
1436 for (i
= 0; i
< nr_pages
; i
++) {
1437 unsigned int bytes
= PAGE_SIZE
- offset
;
1445 if (bio_add_pc_page(q
, bio
, virt_to_page(data
), bytes
,
1454 bio
->bi_end_io
= bio_map_kern_endio
;
1459 * bio_map_kern - map kernel address into bio
1460 * @q: the struct request_queue for the bio
1461 * @data: pointer to buffer to map
1462 * @len: length in bytes
1463 * @gfp_mask: allocation flags for bio allocation
1465 * Map the kernel address into a bio suitable for io to a block
1466 * device. Returns an error pointer in case of error.
1468 struct bio
*bio_map_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1473 bio
= __bio_map_kern(q
, data
, len
, gfp_mask
);
1477 if (bio
->bi_size
== len
)
1481 * Don't support partial mappings.
1484 return ERR_PTR(-EINVAL
);
1486 EXPORT_SYMBOL(bio_map_kern
);
1488 static void bio_copy_kern_endio(struct bio
*bio
, int err
)
1490 struct bio_vec
*bvec
;
1491 const int read
= bio_data_dir(bio
) == READ
;
1492 struct bio_map_data
*bmd
= bio
->bi_private
;
1494 char *p
= bmd
->sgvecs
[0].iov_base
;
1496 bio_for_each_segment_all(bvec
, bio
, i
) {
1497 char *addr
= page_address(bvec
->bv_page
);
1498 int len
= bmd
->iovecs
[i
].bv_len
;
1501 memcpy(p
, addr
, len
);
1503 __free_page(bvec
->bv_page
);
1507 bio_free_map_data(bmd
);
1512 * bio_copy_kern - copy kernel address into bio
1513 * @q: the struct request_queue for the bio
1514 * @data: pointer to buffer to copy
1515 * @len: length in bytes
1516 * @gfp_mask: allocation flags for bio and page allocation
1517 * @reading: data direction is READ
1519 * copy the kernel address into a bio suitable for io to a block
1520 * device. Returns an error pointer in case of error.
1522 struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1523 gfp_t gfp_mask
, int reading
)
1526 struct bio_vec
*bvec
;
1529 bio
= bio_copy_user(q
, NULL
, (unsigned long)data
, len
, 1, gfp_mask
);
1536 bio_for_each_segment_all(bvec
, bio
, i
) {
1537 char *addr
= page_address(bvec
->bv_page
);
1539 memcpy(addr
, p
, bvec
->bv_len
);
1544 bio
->bi_end_io
= bio_copy_kern_endio
;
1548 EXPORT_SYMBOL(bio_copy_kern
);
1551 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1552 * for performing direct-IO in BIOs.
1554 * The problem is that we cannot run set_page_dirty() from interrupt context
1555 * because the required locks are not interrupt-safe. So what we can do is to
1556 * mark the pages dirty _before_ performing IO. And in interrupt context,
1557 * check that the pages are still dirty. If so, fine. If not, redirty them
1558 * in process context.
1560 * We special-case compound pages here: normally this means reads into hugetlb
1561 * pages. The logic in here doesn't really work right for compound pages
1562 * because the VM does not uniformly chase down the head page in all cases.
1563 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1564 * handle them at all. So we skip compound pages here at an early stage.
1566 * Note that this code is very hard to test under normal circumstances because
1567 * direct-io pins the pages with get_user_pages(). This makes
1568 * is_page_cache_freeable return false, and the VM will not clean the pages.
1569 * But other code (eg, flusher threads) could clean the pages if they are mapped
1572 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1573 * deferred bio dirtying paths.
1577 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1579 void bio_set_pages_dirty(struct bio
*bio
)
1581 struct bio_vec
*bvec
;
1584 bio_for_each_segment_all(bvec
, bio
, i
) {
1585 struct page
*page
= bvec
->bv_page
;
1587 if (page
&& !PageCompound(page
))
1588 set_page_dirty_lock(page
);
1592 static void bio_release_pages(struct bio
*bio
)
1594 struct bio_vec
*bvec
;
1597 bio_for_each_segment_all(bvec
, bio
, i
) {
1598 struct page
*page
= bvec
->bv_page
;
1606 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1607 * If they are, then fine. If, however, some pages are clean then they must
1608 * have been written out during the direct-IO read. So we take another ref on
1609 * the BIO and the offending pages and re-dirty the pages in process context.
1611 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1612 * here on. It will run one page_cache_release() against each page and will
1613 * run one bio_put() against the BIO.
1616 static void bio_dirty_fn(struct work_struct
*work
);
1618 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
);
1619 static DEFINE_SPINLOCK(bio_dirty_lock
);
1620 static struct bio
*bio_dirty_list
;
1623 * This runs in process context
1625 static void bio_dirty_fn(struct work_struct
*work
)
1627 unsigned long flags
;
1630 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1631 bio
= bio_dirty_list
;
1632 bio_dirty_list
= NULL
;
1633 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1636 struct bio
*next
= bio
->bi_private
;
1638 bio_set_pages_dirty(bio
);
1639 bio_release_pages(bio
);
1645 void bio_check_pages_dirty(struct bio
*bio
)
1647 struct bio_vec
*bvec
;
1648 int nr_clean_pages
= 0;
1651 bio_for_each_segment_all(bvec
, bio
, i
) {
1652 struct page
*page
= bvec
->bv_page
;
1654 if (PageDirty(page
) || PageCompound(page
)) {
1655 page_cache_release(page
);
1656 bvec
->bv_page
= NULL
;
1662 if (nr_clean_pages
) {
1663 unsigned long flags
;
1665 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1666 bio
->bi_private
= bio_dirty_list
;
1667 bio_dirty_list
= bio
;
1668 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1669 schedule_work(&bio_dirty_work
);
1675 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1676 void bio_flush_dcache_pages(struct bio
*bi
)
1679 struct bio_vec
*bvec
;
1681 bio_for_each_segment(bvec
, bi
, i
)
1682 flush_dcache_page(bvec
->bv_page
);
1684 EXPORT_SYMBOL(bio_flush_dcache_pages
);
1688 * bio_endio - end I/O on a bio
1690 * @error: error, if any
1693 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1694 * preferred way to end I/O on a bio, it takes care of clearing
1695 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1696 * established -Exxxx (-EIO, for instance) error values in case
1697 * something went wrong. No one should call bi_end_io() directly on a
1698 * bio unless they own it and thus know that it has an end_io
1701 void bio_endio(struct bio
*bio
, int error
)
1704 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1705 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1708 trace_block_bio_complete(bio
, error
);
1711 bio
->bi_end_io(bio
, error
);
1713 EXPORT_SYMBOL(bio_endio
);
1715 void bio_pair_release(struct bio_pair
*bp
)
1717 if (atomic_dec_and_test(&bp
->cnt
)) {
1718 struct bio
*master
= bp
->bio1
.bi_private
;
1720 bio_endio(master
, bp
->error
);
1721 mempool_free(bp
, bp
->bio2
.bi_private
);
1724 EXPORT_SYMBOL(bio_pair_release
);
1726 static void bio_pair_end_1(struct bio
*bi
, int err
)
1728 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio1
);
1733 bio_pair_release(bp
);
1736 static void bio_pair_end_2(struct bio
*bi
, int err
)
1738 struct bio_pair
*bp
= container_of(bi
, struct bio_pair
, bio2
);
1743 bio_pair_release(bp
);
1747 * split a bio - only worry about a bio with a single page in its iovec
1749 struct bio_pair
*bio_split(struct bio
*bi
, int first_sectors
)
1751 struct bio_pair
*bp
= mempool_alloc(bio_split_pool
, GFP_NOIO
);
1756 trace_block_split(bdev_get_queue(bi
->bi_bdev
), bi
,
1757 bi
->bi_sector
+ first_sectors
);
1759 BUG_ON(bio_segments(bi
) > 1);
1760 atomic_set(&bp
->cnt
, 3);
1764 bp
->bio2
.bi_sector
+= first_sectors
;
1765 bp
->bio2
.bi_size
-= first_sectors
<< 9;
1766 bp
->bio1
.bi_size
= first_sectors
<< 9;
1768 if (bi
->bi_vcnt
!= 0) {
1769 bp
->bv1
= *bio_iovec(bi
);
1770 bp
->bv2
= *bio_iovec(bi
);
1772 if (bio_is_rw(bi
)) {
1773 bp
->bv2
.bv_offset
+= first_sectors
<< 9;
1774 bp
->bv2
.bv_len
-= first_sectors
<< 9;
1775 bp
->bv1
.bv_len
= first_sectors
<< 9;
1778 bp
->bio1
.bi_io_vec
= &bp
->bv1
;
1779 bp
->bio2
.bi_io_vec
= &bp
->bv2
;
1781 bp
->bio1
.bi_max_vecs
= 1;
1782 bp
->bio2
.bi_max_vecs
= 1;
1785 bp
->bio1
.bi_end_io
= bio_pair_end_1
;
1786 bp
->bio2
.bi_end_io
= bio_pair_end_2
;
1788 bp
->bio1
.bi_private
= bi
;
1789 bp
->bio2
.bi_private
= bio_split_pool
;
1791 if (bio_integrity(bi
))
1792 bio_integrity_split(bi
, bp
, first_sectors
);
1796 EXPORT_SYMBOL(bio_split
);
1799 * bio_sector_offset - Find hardware sector offset in bio
1800 * @bio: bio to inspect
1801 * @index: bio_vec index
1802 * @offset: offset in bv_page
1804 * Return the number of hardware sectors between beginning of bio
1805 * and an end point indicated by a bio_vec index and an offset
1806 * within that vector's page.
1808 sector_t
bio_sector_offset(struct bio
*bio
, unsigned short index
,
1809 unsigned int offset
)
1811 unsigned int sector_sz
;
1816 sector_sz
= queue_logical_block_size(bio
->bi_bdev
->bd_disk
->queue
);
1819 if (index
>= bio
->bi_idx
)
1820 index
= bio
->bi_vcnt
- 1;
1822 bio_for_each_segment_all(bv
, bio
, i
) {
1824 if (offset
> bv
->bv_offset
)
1825 sectors
+= (offset
- bv
->bv_offset
) / sector_sz
;
1829 sectors
+= bv
->bv_len
/ sector_sz
;
1834 EXPORT_SYMBOL(bio_sector_offset
);
1837 * create memory pools for biovec's in a bio_set.
1838 * use the global biovec slabs created for general use.
1840 mempool_t
*biovec_create_pool(struct bio_set
*bs
, int pool_entries
)
1842 struct biovec_slab
*bp
= bvec_slabs
+ BIOVEC_MAX_IDX
;
1844 return mempool_create_slab_pool(pool_entries
, bp
->slab
);
1847 void bioset_free(struct bio_set
*bs
)
1849 if (bs
->rescue_workqueue
)
1850 destroy_workqueue(bs
->rescue_workqueue
);
1853 mempool_destroy(bs
->bio_pool
);
1856 mempool_destroy(bs
->bvec_pool
);
1858 bioset_integrity_free(bs
);
1863 EXPORT_SYMBOL(bioset_free
);
1866 * bioset_create - Create a bio_set
1867 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1868 * @front_pad: Number of bytes to allocate in front of the returned bio
1871 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1872 * to ask for a number of bytes to be allocated in front of the bio.
1873 * Front pad allocation is useful for embedding the bio inside
1874 * another structure, to avoid allocating extra data to go with the bio.
1875 * Note that the bio must be embedded at the END of that structure always,
1876 * or things will break badly.
1878 struct bio_set
*bioset_create(unsigned int pool_size
, unsigned int front_pad
)
1880 unsigned int back_pad
= BIO_INLINE_VECS
* sizeof(struct bio_vec
);
1883 bs
= kzalloc(sizeof(*bs
), GFP_KERNEL
);
1887 bs
->front_pad
= front_pad
;
1889 spin_lock_init(&bs
->rescue_lock
);
1890 bio_list_init(&bs
->rescue_list
);
1891 INIT_WORK(&bs
->rescue_work
, bio_alloc_rescue
);
1893 bs
->bio_slab
= bio_find_or_create_slab(front_pad
+ back_pad
);
1894 if (!bs
->bio_slab
) {
1899 bs
->bio_pool
= mempool_create_slab_pool(pool_size
, bs
->bio_slab
);
1903 bs
->bvec_pool
= biovec_create_pool(bs
, pool_size
);
1907 bs
->rescue_workqueue
= alloc_workqueue("bioset", WQ_MEM_RECLAIM
, 0);
1908 if (!bs
->rescue_workqueue
)
1916 EXPORT_SYMBOL(bioset_create
);
1918 #ifdef CONFIG_BLK_CGROUP
1920 * bio_associate_current - associate a bio with %current
1923 * Associate @bio with %current if it hasn't been associated yet. Block
1924 * layer will treat @bio as if it were issued by %current no matter which
1925 * task actually issues it.
1927 * This function takes an extra reference of @task's io_context and blkcg
1928 * which will be put when @bio is released. The caller must own @bio,
1929 * ensure %current->io_context exists, and is responsible for synchronizing
1930 * calls to this function.
1932 int bio_associate_current(struct bio
*bio
)
1934 struct io_context
*ioc
;
1935 struct cgroup_subsys_state
*css
;
1940 ioc
= current
->io_context
;
1944 /* acquire active ref on @ioc and associate */
1945 get_io_context_active(ioc
);
1948 /* associate blkcg if exists */
1950 css
= task_subsys_state(current
, blkio_subsys_id
);
1951 if (css
&& css_tryget(css
))
1959 * bio_disassociate_task - undo bio_associate_current()
1962 void bio_disassociate_task(struct bio
*bio
)
1965 put_io_context(bio
->bi_ioc
);
1969 css_put(bio
->bi_css
);
1974 #endif /* CONFIG_BLK_CGROUP */
1976 static void __init
biovec_init_slabs(void)
1980 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
1982 struct biovec_slab
*bvs
= bvec_slabs
+ i
;
1984 if (bvs
->nr_vecs
<= BIO_INLINE_VECS
) {
1989 size
= bvs
->nr_vecs
* sizeof(struct bio_vec
);
1990 bvs
->slab
= kmem_cache_create(bvs
->name
, size
, 0,
1991 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
1995 static int __init
init_bio(void)
1999 bio_slabs
= kzalloc(bio_slab_max
* sizeof(struct bio_slab
), GFP_KERNEL
);
2001 panic("bio: can't allocate bios\n");
2003 bio_integrity_init();
2004 biovec_init_slabs();
2006 fs_bio_set
= bioset_create(BIO_POOL_SIZE
, 0);
2008 panic("bio: can't allocate bios\n");
2010 if (bioset_integrity_create(fs_bio_set
, BIO_POOL_SIZE
))
2011 panic("bio: can't create integrity pool\n");
2013 bio_split_pool
= mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES
,
2014 sizeof(struct bio_pair
));
2015 if (!bio_split_pool
)
2016 panic("bio: can't create split pool\n");
2020 subsys_initcall(init_bio
);