]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/bio.c
block: add support for limiting gaps in SG lists
[mirror_ubuntu-bionic-kernel.git] / block / bio.c
1 /*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/uio.h>
23 #include <linux/iocontext.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/export.h>
28 #include <linux/mempool.h>
29 #include <linux/workqueue.h>
30 #include <linux/cgroup.h>
31 #include <scsi/sg.h> /* for struct sg_iovec */
32
33 #include <trace/events/block.h>
34
35 /*
36 * Test patch to inline a certain number of bi_io_vec's inside the bio
37 * itself, to shrink a bio data allocation from two mempool calls to one
38 */
39 #define BIO_INLINE_VECS 4
40
41 /*
42 * if you change this list, also change bvec_alloc or things will
43 * break badly! cannot be bigger than what you can fit into an
44 * unsigned short
45 */
46 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
47 static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
48 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
49 };
50 #undef BV
51
52 /*
53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
54 * IO code that does not need private memory pools.
55 */
56 struct bio_set *fs_bio_set;
57 EXPORT_SYMBOL(fs_bio_set);
58
59 /*
60 * Our slab pool management
61 */
62 struct bio_slab {
63 struct kmem_cache *slab;
64 unsigned int slab_ref;
65 unsigned int slab_size;
66 char name[8];
67 };
68 static DEFINE_MUTEX(bio_slab_lock);
69 static struct bio_slab *bio_slabs;
70 static unsigned int bio_slab_nr, bio_slab_max;
71
72 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
73 {
74 unsigned int sz = sizeof(struct bio) + extra_size;
75 struct kmem_cache *slab = NULL;
76 struct bio_slab *bslab, *new_bio_slabs;
77 unsigned int new_bio_slab_max;
78 unsigned int i, entry = -1;
79
80 mutex_lock(&bio_slab_lock);
81
82 i = 0;
83 while (i < bio_slab_nr) {
84 bslab = &bio_slabs[i];
85
86 if (!bslab->slab && entry == -1)
87 entry = i;
88 else if (bslab->slab_size == sz) {
89 slab = bslab->slab;
90 bslab->slab_ref++;
91 break;
92 }
93 i++;
94 }
95
96 if (slab)
97 goto out_unlock;
98
99 if (bio_slab_nr == bio_slab_max && entry == -1) {
100 new_bio_slab_max = bio_slab_max << 1;
101 new_bio_slabs = krealloc(bio_slabs,
102 new_bio_slab_max * sizeof(struct bio_slab),
103 GFP_KERNEL);
104 if (!new_bio_slabs)
105 goto out_unlock;
106 bio_slab_max = new_bio_slab_max;
107 bio_slabs = new_bio_slabs;
108 }
109 if (entry == -1)
110 entry = bio_slab_nr++;
111
112 bslab = &bio_slabs[entry];
113
114 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
115 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
116 if (!slab)
117 goto out_unlock;
118
119 bslab->slab = slab;
120 bslab->slab_ref = 1;
121 bslab->slab_size = sz;
122 out_unlock:
123 mutex_unlock(&bio_slab_lock);
124 return slab;
125 }
126
127 static void bio_put_slab(struct bio_set *bs)
128 {
129 struct bio_slab *bslab = NULL;
130 unsigned int i;
131
132 mutex_lock(&bio_slab_lock);
133
134 for (i = 0; i < bio_slab_nr; i++) {
135 if (bs->bio_slab == bio_slabs[i].slab) {
136 bslab = &bio_slabs[i];
137 break;
138 }
139 }
140
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
142 goto out;
143
144 WARN_ON(!bslab->slab_ref);
145
146 if (--bslab->slab_ref)
147 goto out;
148
149 kmem_cache_destroy(bslab->slab);
150 bslab->slab = NULL;
151
152 out:
153 mutex_unlock(&bio_slab_lock);
154 }
155
156 unsigned int bvec_nr_vecs(unsigned short idx)
157 {
158 return bvec_slabs[idx].nr_vecs;
159 }
160
161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
162 {
163 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
164
165 if (idx == BIOVEC_MAX_IDX)
166 mempool_free(bv, pool);
167 else {
168 struct biovec_slab *bvs = bvec_slabs + idx;
169
170 kmem_cache_free(bvs->slab, bv);
171 }
172 }
173
174 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
175 mempool_t *pool)
176 {
177 struct bio_vec *bvl;
178
179 /*
180 * see comment near bvec_array define!
181 */
182 switch (nr) {
183 case 1:
184 *idx = 0;
185 break;
186 case 2 ... 4:
187 *idx = 1;
188 break;
189 case 5 ... 16:
190 *idx = 2;
191 break;
192 case 17 ... 64:
193 *idx = 3;
194 break;
195 case 65 ... 128:
196 *idx = 4;
197 break;
198 case 129 ... BIO_MAX_PAGES:
199 *idx = 5;
200 break;
201 default:
202 return NULL;
203 }
204
205 /*
206 * idx now points to the pool we want to allocate from. only the
207 * 1-vec entry pool is mempool backed.
208 */
209 if (*idx == BIOVEC_MAX_IDX) {
210 fallback:
211 bvl = mempool_alloc(pool, gfp_mask);
212 } else {
213 struct biovec_slab *bvs = bvec_slabs + *idx;
214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
215
216 /*
217 * Make this allocation restricted and don't dump info on
218 * allocation failures, since we'll fallback to the mempool
219 * in case of failure.
220 */
221 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
222
223 /*
224 * Try a slab allocation. If this fails and __GFP_WAIT
225 * is set, retry with the 1-entry mempool
226 */
227 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
228 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
229 *idx = BIOVEC_MAX_IDX;
230 goto fallback;
231 }
232 }
233
234 return bvl;
235 }
236
237 static void __bio_free(struct bio *bio)
238 {
239 bio_disassociate_task(bio);
240
241 if (bio_integrity(bio))
242 bio_integrity_free(bio);
243 }
244
245 static void bio_free(struct bio *bio)
246 {
247 struct bio_set *bs = bio->bi_pool;
248 void *p;
249
250 __bio_free(bio);
251
252 if (bs) {
253 if (bio_flagged(bio, BIO_OWNS_VEC))
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
255
256 /*
257 * If we have front padding, adjust the bio pointer before freeing
258 */
259 p = bio;
260 p -= bs->front_pad;
261
262 mempool_free(p, bs->bio_pool);
263 } else {
264 /* Bio was allocated by bio_kmalloc() */
265 kfree(bio);
266 }
267 }
268
269 void bio_init(struct bio *bio)
270 {
271 memset(bio, 0, sizeof(*bio));
272 bio->bi_flags = 1 << BIO_UPTODATE;
273 atomic_set(&bio->bi_remaining, 1);
274 atomic_set(&bio->bi_cnt, 1);
275 }
276 EXPORT_SYMBOL(bio_init);
277
278 /**
279 * bio_reset - reinitialize a bio
280 * @bio: bio to reset
281 *
282 * Description:
283 * After calling bio_reset(), @bio will be in the same state as a freshly
284 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
285 * preserved are the ones that are initialized by bio_alloc_bioset(). See
286 * comment in struct bio.
287 */
288 void bio_reset(struct bio *bio)
289 {
290 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
291
292 __bio_free(bio);
293
294 memset(bio, 0, BIO_RESET_BYTES);
295 bio->bi_flags = flags|(1 << BIO_UPTODATE);
296 atomic_set(&bio->bi_remaining, 1);
297 }
298 EXPORT_SYMBOL(bio_reset);
299
300 static void bio_chain_endio(struct bio *bio, int error)
301 {
302 bio_endio(bio->bi_private, error);
303 bio_put(bio);
304 }
305
306 /**
307 * bio_chain - chain bio completions
308 * @bio: the target bio
309 * @parent: the @bio's parent bio
310 *
311 * The caller won't have a bi_end_io called when @bio completes - instead,
312 * @parent's bi_end_io won't be called until both @parent and @bio have
313 * completed; the chained bio will also be freed when it completes.
314 *
315 * The caller must not set bi_private or bi_end_io in @bio.
316 */
317 void bio_chain(struct bio *bio, struct bio *parent)
318 {
319 BUG_ON(bio->bi_private || bio->bi_end_io);
320
321 bio->bi_private = parent;
322 bio->bi_end_io = bio_chain_endio;
323 atomic_inc(&parent->bi_remaining);
324 }
325 EXPORT_SYMBOL(bio_chain);
326
327 static void bio_alloc_rescue(struct work_struct *work)
328 {
329 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
330 struct bio *bio;
331
332 while (1) {
333 spin_lock(&bs->rescue_lock);
334 bio = bio_list_pop(&bs->rescue_list);
335 spin_unlock(&bs->rescue_lock);
336
337 if (!bio)
338 break;
339
340 generic_make_request(bio);
341 }
342 }
343
344 static void punt_bios_to_rescuer(struct bio_set *bs)
345 {
346 struct bio_list punt, nopunt;
347 struct bio *bio;
348
349 /*
350 * In order to guarantee forward progress we must punt only bios that
351 * were allocated from this bio_set; otherwise, if there was a bio on
352 * there for a stacking driver higher up in the stack, processing it
353 * could require allocating bios from this bio_set, and doing that from
354 * our own rescuer would be bad.
355 *
356 * Since bio lists are singly linked, pop them all instead of trying to
357 * remove from the middle of the list:
358 */
359
360 bio_list_init(&punt);
361 bio_list_init(&nopunt);
362
363 while ((bio = bio_list_pop(current->bio_list)))
364 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
365
366 *current->bio_list = nopunt;
367
368 spin_lock(&bs->rescue_lock);
369 bio_list_merge(&bs->rescue_list, &punt);
370 spin_unlock(&bs->rescue_lock);
371
372 queue_work(bs->rescue_workqueue, &bs->rescue_work);
373 }
374
375 /**
376 * bio_alloc_bioset - allocate a bio for I/O
377 * @gfp_mask: the GFP_ mask given to the slab allocator
378 * @nr_iovecs: number of iovecs to pre-allocate
379 * @bs: the bio_set to allocate from.
380 *
381 * Description:
382 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
383 * backed by the @bs's mempool.
384 *
385 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
386 * able to allocate a bio. This is due to the mempool guarantees. To make this
387 * work, callers must never allocate more than 1 bio at a time from this pool.
388 * Callers that need to allocate more than 1 bio must always submit the
389 * previously allocated bio for IO before attempting to allocate a new one.
390 * Failure to do so can cause deadlocks under memory pressure.
391 *
392 * Note that when running under generic_make_request() (i.e. any block
393 * driver), bios are not submitted until after you return - see the code in
394 * generic_make_request() that converts recursion into iteration, to prevent
395 * stack overflows.
396 *
397 * This would normally mean allocating multiple bios under
398 * generic_make_request() would be susceptible to deadlocks, but we have
399 * deadlock avoidance code that resubmits any blocked bios from a rescuer
400 * thread.
401 *
402 * However, we do not guarantee forward progress for allocations from other
403 * mempools. Doing multiple allocations from the same mempool under
404 * generic_make_request() should be avoided - instead, use bio_set's front_pad
405 * for per bio allocations.
406 *
407 * RETURNS:
408 * Pointer to new bio on success, NULL on failure.
409 */
410 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
411 {
412 gfp_t saved_gfp = gfp_mask;
413 unsigned front_pad;
414 unsigned inline_vecs;
415 unsigned long idx = BIO_POOL_NONE;
416 struct bio_vec *bvl = NULL;
417 struct bio *bio;
418 void *p;
419
420 if (!bs) {
421 if (nr_iovecs > UIO_MAXIOV)
422 return NULL;
423
424 p = kmalloc(sizeof(struct bio) +
425 nr_iovecs * sizeof(struct bio_vec),
426 gfp_mask);
427 front_pad = 0;
428 inline_vecs = nr_iovecs;
429 } else {
430 /*
431 * generic_make_request() converts recursion to iteration; this
432 * means if we're running beneath it, any bios we allocate and
433 * submit will not be submitted (and thus freed) until after we
434 * return.
435 *
436 * This exposes us to a potential deadlock if we allocate
437 * multiple bios from the same bio_set() while running
438 * underneath generic_make_request(). If we were to allocate
439 * multiple bios (say a stacking block driver that was splitting
440 * bios), we would deadlock if we exhausted the mempool's
441 * reserve.
442 *
443 * We solve this, and guarantee forward progress, with a rescuer
444 * workqueue per bio_set. If we go to allocate and there are
445 * bios on current->bio_list, we first try the allocation
446 * without __GFP_WAIT; if that fails, we punt those bios we
447 * would be blocking to the rescuer workqueue before we retry
448 * with the original gfp_flags.
449 */
450
451 if (current->bio_list && !bio_list_empty(current->bio_list))
452 gfp_mask &= ~__GFP_WAIT;
453
454 p = mempool_alloc(bs->bio_pool, gfp_mask);
455 if (!p && gfp_mask != saved_gfp) {
456 punt_bios_to_rescuer(bs);
457 gfp_mask = saved_gfp;
458 p = mempool_alloc(bs->bio_pool, gfp_mask);
459 }
460
461 front_pad = bs->front_pad;
462 inline_vecs = BIO_INLINE_VECS;
463 }
464
465 if (unlikely(!p))
466 return NULL;
467
468 bio = p + front_pad;
469 bio_init(bio);
470
471 if (nr_iovecs > inline_vecs) {
472 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
473 if (!bvl && gfp_mask != saved_gfp) {
474 punt_bios_to_rescuer(bs);
475 gfp_mask = saved_gfp;
476 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
477 }
478
479 if (unlikely(!bvl))
480 goto err_free;
481
482 bio->bi_flags |= 1 << BIO_OWNS_VEC;
483 } else if (nr_iovecs) {
484 bvl = bio->bi_inline_vecs;
485 }
486
487 bio->bi_pool = bs;
488 bio->bi_flags |= idx << BIO_POOL_OFFSET;
489 bio->bi_max_vecs = nr_iovecs;
490 bio->bi_io_vec = bvl;
491 return bio;
492
493 err_free:
494 mempool_free(p, bs->bio_pool);
495 return NULL;
496 }
497 EXPORT_SYMBOL(bio_alloc_bioset);
498
499 void zero_fill_bio(struct bio *bio)
500 {
501 unsigned long flags;
502 struct bio_vec bv;
503 struct bvec_iter iter;
504
505 bio_for_each_segment(bv, bio, iter) {
506 char *data = bvec_kmap_irq(&bv, &flags);
507 memset(data, 0, bv.bv_len);
508 flush_dcache_page(bv.bv_page);
509 bvec_kunmap_irq(data, &flags);
510 }
511 }
512 EXPORT_SYMBOL(zero_fill_bio);
513
514 /**
515 * bio_put - release a reference to a bio
516 * @bio: bio to release reference to
517 *
518 * Description:
519 * Put a reference to a &struct bio, either one you have gotten with
520 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
521 **/
522 void bio_put(struct bio *bio)
523 {
524 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
525
526 /*
527 * last put frees it
528 */
529 if (atomic_dec_and_test(&bio->bi_cnt))
530 bio_free(bio);
531 }
532 EXPORT_SYMBOL(bio_put);
533
534 inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
535 {
536 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
537 blk_recount_segments(q, bio);
538
539 return bio->bi_phys_segments;
540 }
541 EXPORT_SYMBOL(bio_phys_segments);
542
543 /**
544 * __bio_clone_fast - clone a bio that shares the original bio's biovec
545 * @bio: destination bio
546 * @bio_src: bio to clone
547 *
548 * Clone a &bio. Caller will own the returned bio, but not
549 * the actual data it points to. Reference count of returned
550 * bio will be one.
551 *
552 * Caller must ensure that @bio_src is not freed before @bio.
553 */
554 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
555 {
556 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
557
558 /*
559 * most users will be overriding ->bi_bdev with a new target,
560 * so we don't set nor calculate new physical/hw segment counts here
561 */
562 bio->bi_bdev = bio_src->bi_bdev;
563 bio->bi_flags |= 1 << BIO_CLONED;
564 bio->bi_rw = bio_src->bi_rw;
565 bio->bi_iter = bio_src->bi_iter;
566 bio->bi_io_vec = bio_src->bi_io_vec;
567 }
568 EXPORT_SYMBOL(__bio_clone_fast);
569
570 /**
571 * bio_clone_fast - clone a bio that shares the original bio's biovec
572 * @bio: bio to clone
573 * @gfp_mask: allocation priority
574 * @bs: bio_set to allocate from
575 *
576 * Like __bio_clone_fast, only also allocates the returned bio
577 */
578 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
579 {
580 struct bio *b;
581
582 b = bio_alloc_bioset(gfp_mask, 0, bs);
583 if (!b)
584 return NULL;
585
586 __bio_clone_fast(b, bio);
587
588 if (bio_integrity(bio)) {
589 int ret;
590
591 ret = bio_integrity_clone(b, bio, gfp_mask);
592
593 if (ret < 0) {
594 bio_put(b);
595 return NULL;
596 }
597 }
598
599 return b;
600 }
601 EXPORT_SYMBOL(bio_clone_fast);
602
603 /**
604 * bio_clone_bioset - clone a bio
605 * @bio_src: bio to clone
606 * @gfp_mask: allocation priority
607 * @bs: bio_set to allocate from
608 *
609 * Clone bio. Caller will own the returned bio, but not the actual data it
610 * points to. Reference count of returned bio will be one.
611 */
612 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
613 struct bio_set *bs)
614 {
615 struct bvec_iter iter;
616 struct bio_vec bv;
617 struct bio *bio;
618
619 /*
620 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
621 * bio_src->bi_io_vec to bio->bi_io_vec.
622 *
623 * We can't do that anymore, because:
624 *
625 * - The point of cloning the biovec is to produce a bio with a biovec
626 * the caller can modify: bi_idx and bi_bvec_done should be 0.
627 *
628 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
629 * we tried to clone the whole thing bio_alloc_bioset() would fail.
630 * But the clone should succeed as long as the number of biovecs we
631 * actually need to allocate is fewer than BIO_MAX_PAGES.
632 *
633 * - Lastly, bi_vcnt should not be looked at or relied upon by code
634 * that does not own the bio - reason being drivers don't use it for
635 * iterating over the biovec anymore, so expecting it to be kept up
636 * to date (i.e. for clones that share the parent biovec) is just
637 * asking for trouble and would force extra work on
638 * __bio_clone_fast() anyways.
639 */
640
641 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
642 if (!bio)
643 return NULL;
644
645 bio->bi_bdev = bio_src->bi_bdev;
646 bio->bi_rw = bio_src->bi_rw;
647 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
648 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
649
650 if (bio->bi_rw & REQ_DISCARD)
651 goto integrity_clone;
652
653 if (bio->bi_rw & REQ_WRITE_SAME) {
654 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
655 goto integrity_clone;
656 }
657
658 bio_for_each_segment(bv, bio_src, iter)
659 bio->bi_io_vec[bio->bi_vcnt++] = bv;
660
661 integrity_clone:
662 if (bio_integrity(bio_src)) {
663 int ret;
664
665 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
666 if (ret < 0) {
667 bio_put(bio);
668 return NULL;
669 }
670 }
671
672 return bio;
673 }
674 EXPORT_SYMBOL(bio_clone_bioset);
675
676 /**
677 * bio_get_nr_vecs - return approx number of vecs
678 * @bdev: I/O target
679 *
680 * Return the approximate number of pages we can send to this target.
681 * There's no guarantee that you will be able to fit this number of pages
682 * into a bio, it does not account for dynamic restrictions that vary
683 * on offset.
684 */
685 int bio_get_nr_vecs(struct block_device *bdev)
686 {
687 struct request_queue *q = bdev_get_queue(bdev);
688 int nr_pages;
689
690 nr_pages = min_t(unsigned,
691 queue_max_segments(q),
692 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
693
694 return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
695
696 }
697 EXPORT_SYMBOL(bio_get_nr_vecs);
698
699 static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
700 *page, unsigned int len, unsigned int offset,
701 unsigned int max_sectors)
702 {
703 int retried_segments = 0;
704 struct bio_vec *bvec;
705
706 /*
707 * cloned bio must not modify vec list
708 */
709 if (unlikely(bio_flagged(bio, BIO_CLONED)))
710 return 0;
711
712 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
713 return 0;
714
715 /*
716 * For filesystems with a blocksize smaller than the pagesize
717 * we will often be called with the same page as last time and
718 * a consecutive offset. Optimize this special case.
719 */
720 if (bio->bi_vcnt > 0) {
721 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
722
723 if (page == prev->bv_page &&
724 offset == prev->bv_offset + prev->bv_len) {
725 unsigned int prev_bv_len = prev->bv_len;
726 prev->bv_len += len;
727
728 if (q->merge_bvec_fn) {
729 struct bvec_merge_data bvm = {
730 /* prev_bvec is already charged in
731 bi_size, discharge it in order to
732 simulate merging updated prev_bvec
733 as new bvec. */
734 .bi_bdev = bio->bi_bdev,
735 .bi_sector = bio->bi_iter.bi_sector,
736 .bi_size = bio->bi_iter.bi_size -
737 prev_bv_len,
738 .bi_rw = bio->bi_rw,
739 };
740
741 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
742 prev->bv_len -= len;
743 return 0;
744 }
745 }
746
747 goto done;
748 }
749
750 /*
751 * If the queue doesn't support SG gaps and adding this
752 * offset would create a gap, disallow it.
753 */
754 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
755 bvec_gap_to_prev(prev, offset))
756 return 0;
757 }
758
759 if (bio->bi_vcnt >= bio->bi_max_vecs)
760 return 0;
761
762 /*
763 * we might lose a segment or two here, but rather that than
764 * make this too complex.
765 */
766
767 while (bio->bi_phys_segments >= queue_max_segments(q)) {
768
769 if (retried_segments)
770 return 0;
771
772 retried_segments = 1;
773 blk_recount_segments(q, bio);
774 }
775
776 /*
777 * setup the new entry, we might clear it again later if we
778 * cannot add the page
779 */
780 bvec = &bio->bi_io_vec[bio->bi_vcnt];
781 bvec->bv_page = page;
782 bvec->bv_len = len;
783 bvec->bv_offset = offset;
784
785 /*
786 * if queue has other restrictions (eg varying max sector size
787 * depending on offset), it can specify a merge_bvec_fn in the
788 * queue to get further control
789 */
790 if (q->merge_bvec_fn) {
791 struct bvec_merge_data bvm = {
792 .bi_bdev = bio->bi_bdev,
793 .bi_sector = bio->bi_iter.bi_sector,
794 .bi_size = bio->bi_iter.bi_size,
795 .bi_rw = bio->bi_rw,
796 };
797
798 /*
799 * merge_bvec_fn() returns number of bytes it can accept
800 * at this offset
801 */
802 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
803 bvec->bv_page = NULL;
804 bvec->bv_len = 0;
805 bvec->bv_offset = 0;
806 return 0;
807 }
808 }
809
810 /* If we may be able to merge these biovecs, force a recount */
811 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
812 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
813
814 bio->bi_vcnt++;
815 bio->bi_phys_segments++;
816 done:
817 bio->bi_iter.bi_size += len;
818 return len;
819 }
820
821 /**
822 * bio_add_pc_page - attempt to add page to bio
823 * @q: the target queue
824 * @bio: destination bio
825 * @page: page to add
826 * @len: vec entry length
827 * @offset: vec entry offset
828 *
829 * Attempt to add a page to the bio_vec maplist. This can fail for a
830 * number of reasons, such as the bio being full or target block device
831 * limitations. The target block device must allow bio's up to PAGE_SIZE,
832 * so it is always possible to add a single page to an empty bio.
833 *
834 * This should only be used by REQ_PC bios.
835 */
836 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
837 unsigned int len, unsigned int offset)
838 {
839 return __bio_add_page(q, bio, page, len, offset,
840 queue_max_hw_sectors(q));
841 }
842 EXPORT_SYMBOL(bio_add_pc_page);
843
844 /**
845 * bio_add_page - attempt to add page to bio
846 * @bio: destination bio
847 * @page: page to add
848 * @len: vec entry length
849 * @offset: vec entry offset
850 *
851 * Attempt to add a page to the bio_vec maplist. This can fail for a
852 * number of reasons, such as the bio being full or target block device
853 * limitations. The target block device must allow bio's up to PAGE_SIZE,
854 * so it is always possible to add a single page to an empty bio.
855 */
856 int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
857 unsigned int offset)
858 {
859 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
860 unsigned int max_sectors;
861
862 max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
863 if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size)
864 max_sectors = len >> 9;
865
866 return __bio_add_page(q, bio, page, len, offset, max_sectors);
867 }
868 EXPORT_SYMBOL(bio_add_page);
869
870 struct submit_bio_ret {
871 struct completion event;
872 int error;
873 };
874
875 static void submit_bio_wait_endio(struct bio *bio, int error)
876 {
877 struct submit_bio_ret *ret = bio->bi_private;
878
879 ret->error = error;
880 complete(&ret->event);
881 }
882
883 /**
884 * submit_bio_wait - submit a bio, and wait until it completes
885 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
886 * @bio: The &struct bio which describes the I/O
887 *
888 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
889 * bio_endio() on failure.
890 */
891 int submit_bio_wait(int rw, struct bio *bio)
892 {
893 struct submit_bio_ret ret;
894
895 rw |= REQ_SYNC;
896 init_completion(&ret.event);
897 bio->bi_private = &ret;
898 bio->bi_end_io = submit_bio_wait_endio;
899 submit_bio(rw, bio);
900 wait_for_completion(&ret.event);
901
902 return ret.error;
903 }
904 EXPORT_SYMBOL(submit_bio_wait);
905
906 /**
907 * bio_advance - increment/complete a bio by some number of bytes
908 * @bio: bio to advance
909 * @bytes: number of bytes to complete
910 *
911 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
912 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
913 * be updated on the last bvec as well.
914 *
915 * @bio will then represent the remaining, uncompleted portion of the io.
916 */
917 void bio_advance(struct bio *bio, unsigned bytes)
918 {
919 if (bio_integrity(bio))
920 bio_integrity_advance(bio, bytes);
921
922 bio_advance_iter(bio, &bio->bi_iter, bytes);
923 }
924 EXPORT_SYMBOL(bio_advance);
925
926 /**
927 * bio_alloc_pages - allocates a single page for each bvec in a bio
928 * @bio: bio to allocate pages for
929 * @gfp_mask: flags for allocation
930 *
931 * Allocates pages up to @bio->bi_vcnt.
932 *
933 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
934 * freed.
935 */
936 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
937 {
938 int i;
939 struct bio_vec *bv;
940
941 bio_for_each_segment_all(bv, bio, i) {
942 bv->bv_page = alloc_page(gfp_mask);
943 if (!bv->bv_page) {
944 while (--bv >= bio->bi_io_vec)
945 __free_page(bv->bv_page);
946 return -ENOMEM;
947 }
948 }
949
950 return 0;
951 }
952 EXPORT_SYMBOL(bio_alloc_pages);
953
954 /**
955 * bio_copy_data - copy contents of data buffers from one chain of bios to
956 * another
957 * @src: source bio list
958 * @dst: destination bio list
959 *
960 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
961 * @src and @dst as linked lists of bios.
962 *
963 * Stops when it reaches the end of either @src or @dst - that is, copies
964 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
965 */
966 void bio_copy_data(struct bio *dst, struct bio *src)
967 {
968 struct bvec_iter src_iter, dst_iter;
969 struct bio_vec src_bv, dst_bv;
970 void *src_p, *dst_p;
971 unsigned bytes;
972
973 src_iter = src->bi_iter;
974 dst_iter = dst->bi_iter;
975
976 while (1) {
977 if (!src_iter.bi_size) {
978 src = src->bi_next;
979 if (!src)
980 break;
981
982 src_iter = src->bi_iter;
983 }
984
985 if (!dst_iter.bi_size) {
986 dst = dst->bi_next;
987 if (!dst)
988 break;
989
990 dst_iter = dst->bi_iter;
991 }
992
993 src_bv = bio_iter_iovec(src, src_iter);
994 dst_bv = bio_iter_iovec(dst, dst_iter);
995
996 bytes = min(src_bv.bv_len, dst_bv.bv_len);
997
998 src_p = kmap_atomic(src_bv.bv_page);
999 dst_p = kmap_atomic(dst_bv.bv_page);
1000
1001 memcpy(dst_p + dst_bv.bv_offset,
1002 src_p + src_bv.bv_offset,
1003 bytes);
1004
1005 kunmap_atomic(dst_p);
1006 kunmap_atomic(src_p);
1007
1008 bio_advance_iter(src, &src_iter, bytes);
1009 bio_advance_iter(dst, &dst_iter, bytes);
1010 }
1011 }
1012 EXPORT_SYMBOL(bio_copy_data);
1013
1014 struct bio_map_data {
1015 int nr_sgvecs;
1016 int is_our_pages;
1017 struct sg_iovec sgvecs[];
1018 };
1019
1020 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
1021 const struct sg_iovec *iov, int iov_count,
1022 int is_our_pages)
1023 {
1024 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
1025 bmd->nr_sgvecs = iov_count;
1026 bmd->is_our_pages = is_our_pages;
1027 bio->bi_private = bmd;
1028 }
1029
1030 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1031 gfp_t gfp_mask)
1032 {
1033 if (iov_count > UIO_MAXIOV)
1034 return NULL;
1035
1036 return kmalloc(sizeof(struct bio_map_data) +
1037 sizeof(struct sg_iovec) * iov_count, gfp_mask);
1038 }
1039
1040 static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
1041 int to_user, int from_user, int do_free_page)
1042 {
1043 int ret = 0, i;
1044 struct bio_vec *bvec;
1045 int iov_idx = 0;
1046 unsigned int iov_off = 0;
1047
1048 bio_for_each_segment_all(bvec, bio, i) {
1049 char *bv_addr = page_address(bvec->bv_page);
1050 unsigned int bv_len = bvec->bv_len;
1051
1052 while (bv_len && iov_idx < iov_count) {
1053 unsigned int bytes;
1054 char __user *iov_addr;
1055
1056 bytes = min_t(unsigned int,
1057 iov[iov_idx].iov_len - iov_off, bv_len);
1058 iov_addr = iov[iov_idx].iov_base + iov_off;
1059
1060 if (!ret) {
1061 if (to_user)
1062 ret = copy_to_user(iov_addr, bv_addr,
1063 bytes);
1064
1065 if (from_user)
1066 ret = copy_from_user(bv_addr, iov_addr,
1067 bytes);
1068
1069 if (ret)
1070 ret = -EFAULT;
1071 }
1072
1073 bv_len -= bytes;
1074 bv_addr += bytes;
1075 iov_addr += bytes;
1076 iov_off += bytes;
1077
1078 if (iov[iov_idx].iov_len == iov_off) {
1079 iov_idx++;
1080 iov_off = 0;
1081 }
1082 }
1083
1084 if (do_free_page)
1085 __free_page(bvec->bv_page);
1086 }
1087
1088 return ret;
1089 }
1090
1091 /**
1092 * bio_uncopy_user - finish previously mapped bio
1093 * @bio: bio being terminated
1094 *
1095 * Free pages allocated from bio_copy_user() and write back data
1096 * to user space in case of a read.
1097 */
1098 int bio_uncopy_user(struct bio *bio)
1099 {
1100 struct bio_map_data *bmd = bio->bi_private;
1101 struct bio_vec *bvec;
1102 int ret = 0, i;
1103
1104 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1105 /*
1106 * if we're in a workqueue, the request is orphaned, so
1107 * don't copy into a random user address space, just free.
1108 */
1109 if (current->mm)
1110 ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
1111 bio_data_dir(bio) == READ,
1112 0, bmd->is_our_pages);
1113 else if (bmd->is_our_pages)
1114 bio_for_each_segment_all(bvec, bio, i)
1115 __free_page(bvec->bv_page);
1116 }
1117 kfree(bmd);
1118 bio_put(bio);
1119 return ret;
1120 }
1121 EXPORT_SYMBOL(bio_uncopy_user);
1122
1123 /**
1124 * bio_copy_user_iov - copy user data to bio
1125 * @q: destination block queue
1126 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1127 * @iov: the iovec.
1128 * @iov_count: number of elements in the iovec
1129 * @write_to_vm: bool indicating writing to pages or not
1130 * @gfp_mask: memory allocation flags
1131 *
1132 * Prepares and returns a bio for indirect user io, bouncing data
1133 * to/from kernel pages as necessary. Must be paired with
1134 * call bio_uncopy_user() on io completion.
1135 */
1136 struct bio *bio_copy_user_iov(struct request_queue *q,
1137 struct rq_map_data *map_data,
1138 const struct sg_iovec *iov, int iov_count,
1139 int write_to_vm, gfp_t gfp_mask)
1140 {
1141 struct bio_map_data *bmd;
1142 struct bio_vec *bvec;
1143 struct page *page;
1144 struct bio *bio;
1145 int i, ret;
1146 int nr_pages = 0;
1147 unsigned int len = 0;
1148 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
1149
1150 for (i = 0; i < iov_count; i++) {
1151 unsigned long uaddr;
1152 unsigned long end;
1153 unsigned long start;
1154
1155 uaddr = (unsigned long)iov[i].iov_base;
1156 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1157 start = uaddr >> PAGE_SHIFT;
1158
1159 /*
1160 * Overflow, abort
1161 */
1162 if (end < start)
1163 return ERR_PTR(-EINVAL);
1164
1165 nr_pages += end - start;
1166 len += iov[i].iov_len;
1167 }
1168
1169 if (offset)
1170 nr_pages++;
1171
1172 bmd = bio_alloc_map_data(iov_count, gfp_mask);
1173 if (!bmd)
1174 return ERR_PTR(-ENOMEM);
1175
1176 ret = -ENOMEM;
1177 bio = bio_kmalloc(gfp_mask, nr_pages);
1178 if (!bio)
1179 goto out_bmd;
1180
1181 if (!write_to_vm)
1182 bio->bi_rw |= REQ_WRITE;
1183
1184 ret = 0;
1185
1186 if (map_data) {
1187 nr_pages = 1 << map_data->page_order;
1188 i = map_data->offset / PAGE_SIZE;
1189 }
1190 while (len) {
1191 unsigned int bytes = PAGE_SIZE;
1192
1193 bytes -= offset;
1194
1195 if (bytes > len)
1196 bytes = len;
1197
1198 if (map_data) {
1199 if (i == map_data->nr_entries * nr_pages) {
1200 ret = -ENOMEM;
1201 break;
1202 }
1203
1204 page = map_data->pages[i / nr_pages];
1205 page += (i % nr_pages);
1206
1207 i++;
1208 } else {
1209 page = alloc_page(q->bounce_gfp | gfp_mask);
1210 if (!page) {
1211 ret = -ENOMEM;
1212 break;
1213 }
1214 }
1215
1216 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1217 break;
1218
1219 len -= bytes;
1220 offset = 0;
1221 }
1222
1223 if (ret)
1224 goto cleanup;
1225
1226 /*
1227 * success
1228 */
1229 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
1230 (map_data && map_data->from_user)) {
1231 ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
1232 if (ret)
1233 goto cleanup;
1234 }
1235
1236 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
1237 return bio;
1238 cleanup:
1239 if (!map_data)
1240 bio_for_each_segment_all(bvec, bio, i)
1241 __free_page(bvec->bv_page);
1242
1243 bio_put(bio);
1244 out_bmd:
1245 kfree(bmd);
1246 return ERR_PTR(ret);
1247 }
1248
1249 /**
1250 * bio_copy_user - copy user data to bio
1251 * @q: destination block queue
1252 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1253 * @uaddr: start of user address
1254 * @len: length in bytes
1255 * @write_to_vm: bool indicating writing to pages or not
1256 * @gfp_mask: memory allocation flags
1257 *
1258 * Prepares and returns a bio for indirect user io, bouncing data
1259 * to/from kernel pages as necessary. Must be paired with
1260 * call bio_uncopy_user() on io completion.
1261 */
1262 struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
1263 unsigned long uaddr, unsigned int len,
1264 int write_to_vm, gfp_t gfp_mask)
1265 {
1266 struct sg_iovec iov;
1267
1268 iov.iov_base = (void __user *)uaddr;
1269 iov.iov_len = len;
1270
1271 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
1272 }
1273 EXPORT_SYMBOL(bio_copy_user);
1274
1275 static struct bio *__bio_map_user_iov(struct request_queue *q,
1276 struct block_device *bdev,
1277 const struct sg_iovec *iov, int iov_count,
1278 int write_to_vm, gfp_t gfp_mask)
1279 {
1280 int i, j;
1281 int nr_pages = 0;
1282 struct page **pages;
1283 struct bio *bio;
1284 int cur_page = 0;
1285 int ret, offset;
1286
1287 for (i = 0; i < iov_count; i++) {
1288 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1289 unsigned long len = iov[i].iov_len;
1290 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1291 unsigned long start = uaddr >> PAGE_SHIFT;
1292
1293 /*
1294 * Overflow, abort
1295 */
1296 if (end < start)
1297 return ERR_PTR(-EINVAL);
1298
1299 nr_pages += end - start;
1300 /*
1301 * buffer must be aligned to at least hardsector size for now
1302 */
1303 if (uaddr & queue_dma_alignment(q))
1304 return ERR_PTR(-EINVAL);
1305 }
1306
1307 if (!nr_pages)
1308 return ERR_PTR(-EINVAL);
1309
1310 bio = bio_kmalloc(gfp_mask, nr_pages);
1311 if (!bio)
1312 return ERR_PTR(-ENOMEM);
1313
1314 ret = -ENOMEM;
1315 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1316 if (!pages)
1317 goto out;
1318
1319 for (i = 0; i < iov_count; i++) {
1320 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1321 unsigned long len = iov[i].iov_len;
1322 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1323 unsigned long start = uaddr >> PAGE_SHIFT;
1324 const int local_nr_pages = end - start;
1325 const int page_limit = cur_page + local_nr_pages;
1326
1327 ret = get_user_pages_fast(uaddr, local_nr_pages,
1328 write_to_vm, &pages[cur_page]);
1329 if (ret < local_nr_pages) {
1330 ret = -EFAULT;
1331 goto out_unmap;
1332 }
1333
1334 offset = uaddr & ~PAGE_MASK;
1335 for (j = cur_page; j < page_limit; j++) {
1336 unsigned int bytes = PAGE_SIZE - offset;
1337
1338 if (len <= 0)
1339 break;
1340
1341 if (bytes > len)
1342 bytes = len;
1343
1344 /*
1345 * sorry...
1346 */
1347 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1348 bytes)
1349 break;
1350
1351 len -= bytes;
1352 offset = 0;
1353 }
1354
1355 cur_page = j;
1356 /*
1357 * release the pages we didn't map into the bio, if any
1358 */
1359 while (j < page_limit)
1360 page_cache_release(pages[j++]);
1361 }
1362
1363 kfree(pages);
1364
1365 /*
1366 * set data direction, and check if mapped pages need bouncing
1367 */
1368 if (!write_to_vm)
1369 bio->bi_rw |= REQ_WRITE;
1370
1371 bio->bi_bdev = bdev;
1372 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1373 return bio;
1374
1375 out_unmap:
1376 for (i = 0; i < nr_pages; i++) {
1377 if(!pages[i])
1378 break;
1379 page_cache_release(pages[i]);
1380 }
1381 out:
1382 kfree(pages);
1383 bio_put(bio);
1384 return ERR_PTR(ret);
1385 }
1386
1387 /**
1388 * bio_map_user - map user address into bio
1389 * @q: the struct request_queue for the bio
1390 * @bdev: destination block device
1391 * @uaddr: start of user address
1392 * @len: length in bytes
1393 * @write_to_vm: bool indicating writing to pages or not
1394 * @gfp_mask: memory allocation flags
1395 *
1396 * Map the user space address into a bio suitable for io to a block
1397 * device. Returns an error pointer in case of error.
1398 */
1399 struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1400 unsigned long uaddr, unsigned int len, int write_to_vm,
1401 gfp_t gfp_mask)
1402 {
1403 struct sg_iovec iov;
1404
1405 iov.iov_base = (void __user *)uaddr;
1406 iov.iov_len = len;
1407
1408 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1409 }
1410 EXPORT_SYMBOL(bio_map_user);
1411
1412 /**
1413 * bio_map_user_iov - map user sg_iovec table into bio
1414 * @q: the struct request_queue for the bio
1415 * @bdev: destination block device
1416 * @iov: the iovec.
1417 * @iov_count: number of elements in the iovec
1418 * @write_to_vm: bool indicating writing to pages or not
1419 * @gfp_mask: memory allocation flags
1420 *
1421 * Map the user space address into a bio suitable for io to a block
1422 * device. Returns an error pointer in case of error.
1423 */
1424 struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1425 const struct sg_iovec *iov, int iov_count,
1426 int write_to_vm, gfp_t gfp_mask)
1427 {
1428 struct bio *bio;
1429
1430 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1431 gfp_mask);
1432 if (IS_ERR(bio))
1433 return bio;
1434
1435 /*
1436 * subtle -- if __bio_map_user() ended up bouncing a bio,
1437 * it would normally disappear when its bi_end_io is run.
1438 * however, we need it for the unmap, so grab an extra
1439 * reference to it
1440 */
1441 bio_get(bio);
1442
1443 return bio;
1444 }
1445
1446 static void __bio_unmap_user(struct bio *bio)
1447 {
1448 struct bio_vec *bvec;
1449 int i;
1450
1451 /*
1452 * make sure we dirty pages we wrote to
1453 */
1454 bio_for_each_segment_all(bvec, bio, i) {
1455 if (bio_data_dir(bio) == READ)
1456 set_page_dirty_lock(bvec->bv_page);
1457
1458 page_cache_release(bvec->bv_page);
1459 }
1460
1461 bio_put(bio);
1462 }
1463
1464 /**
1465 * bio_unmap_user - unmap a bio
1466 * @bio: the bio being unmapped
1467 *
1468 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1469 * a process context.
1470 *
1471 * bio_unmap_user() may sleep.
1472 */
1473 void bio_unmap_user(struct bio *bio)
1474 {
1475 __bio_unmap_user(bio);
1476 bio_put(bio);
1477 }
1478 EXPORT_SYMBOL(bio_unmap_user);
1479
1480 static void bio_map_kern_endio(struct bio *bio, int err)
1481 {
1482 bio_put(bio);
1483 }
1484
1485 static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1486 unsigned int len, gfp_t gfp_mask)
1487 {
1488 unsigned long kaddr = (unsigned long)data;
1489 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1490 unsigned long start = kaddr >> PAGE_SHIFT;
1491 const int nr_pages = end - start;
1492 int offset, i;
1493 struct bio *bio;
1494
1495 bio = bio_kmalloc(gfp_mask, nr_pages);
1496 if (!bio)
1497 return ERR_PTR(-ENOMEM);
1498
1499 offset = offset_in_page(kaddr);
1500 for (i = 0; i < nr_pages; i++) {
1501 unsigned int bytes = PAGE_SIZE - offset;
1502
1503 if (len <= 0)
1504 break;
1505
1506 if (bytes > len)
1507 bytes = len;
1508
1509 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1510 offset) < bytes)
1511 break;
1512
1513 data += bytes;
1514 len -= bytes;
1515 offset = 0;
1516 }
1517
1518 bio->bi_end_io = bio_map_kern_endio;
1519 return bio;
1520 }
1521
1522 /**
1523 * bio_map_kern - map kernel address into bio
1524 * @q: the struct request_queue for the bio
1525 * @data: pointer to buffer to map
1526 * @len: length in bytes
1527 * @gfp_mask: allocation flags for bio allocation
1528 *
1529 * Map the kernel address into a bio suitable for io to a block
1530 * device. Returns an error pointer in case of error.
1531 */
1532 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1533 gfp_t gfp_mask)
1534 {
1535 struct bio *bio;
1536
1537 bio = __bio_map_kern(q, data, len, gfp_mask);
1538 if (IS_ERR(bio))
1539 return bio;
1540
1541 if (bio->bi_iter.bi_size == len)
1542 return bio;
1543
1544 /*
1545 * Don't support partial mappings.
1546 */
1547 bio_put(bio);
1548 return ERR_PTR(-EINVAL);
1549 }
1550 EXPORT_SYMBOL(bio_map_kern);
1551
1552 static void bio_copy_kern_endio(struct bio *bio, int err)
1553 {
1554 struct bio_vec *bvec;
1555 const int read = bio_data_dir(bio) == READ;
1556 struct bio_map_data *bmd = bio->bi_private;
1557 int i;
1558 char *p = bmd->sgvecs[0].iov_base;
1559
1560 bio_for_each_segment_all(bvec, bio, i) {
1561 char *addr = page_address(bvec->bv_page);
1562
1563 if (read)
1564 memcpy(p, addr, bvec->bv_len);
1565
1566 __free_page(bvec->bv_page);
1567 p += bvec->bv_len;
1568 }
1569
1570 kfree(bmd);
1571 bio_put(bio);
1572 }
1573
1574 /**
1575 * bio_copy_kern - copy kernel address into bio
1576 * @q: the struct request_queue for the bio
1577 * @data: pointer to buffer to copy
1578 * @len: length in bytes
1579 * @gfp_mask: allocation flags for bio and page allocation
1580 * @reading: data direction is READ
1581 *
1582 * copy the kernel address into a bio suitable for io to a block
1583 * device. Returns an error pointer in case of error.
1584 */
1585 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1586 gfp_t gfp_mask, int reading)
1587 {
1588 struct bio *bio;
1589 struct bio_vec *bvec;
1590 int i;
1591
1592 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1593 if (IS_ERR(bio))
1594 return bio;
1595
1596 if (!reading) {
1597 void *p = data;
1598
1599 bio_for_each_segment_all(bvec, bio, i) {
1600 char *addr = page_address(bvec->bv_page);
1601
1602 memcpy(addr, p, bvec->bv_len);
1603 p += bvec->bv_len;
1604 }
1605 }
1606
1607 bio->bi_end_io = bio_copy_kern_endio;
1608
1609 return bio;
1610 }
1611 EXPORT_SYMBOL(bio_copy_kern);
1612
1613 /*
1614 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1615 * for performing direct-IO in BIOs.
1616 *
1617 * The problem is that we cannot run set_page_dirty() from interrupt context
1618 * because the required locks are not interrupt-safe. So what we can do is to
1619 * mark the pages dirty _before_ performing IO. And in interrupt context,
1620 * check that the pages are still dirty. If so, fine. If not, redirty them
1621 * in process context.
1622 *
1623 * We special-case compound pages here: normally this means reads into hugetlb
1624 * pages. The logic in here doesn't really work right for compound pages
1625 * because the VM does not uniformly chase down the head page in all cases.
1626 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1627 * handle them at all. So we skip compound pages here at an early stage.
1628 *
1629 * Note that this code is very hard to test under normal circumstances because
1630 * direct-io pins the pages with get_user_pages(). This makes
1631 * is_page_cache_freeable return false, and the VM will not clean the pages.
1632 * But other code (eg, flusher threads) could clean the pages if they are mapped
1633 * pagecache.
1634 *
1635 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1636 * deferred bio dirtying paths.
1637 */
1638
1639 /*
1640 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1641 */
1642 void bio_set_pages_dirty(struct bio *bio)
1643 {
1644 struct bio_vec *bvec;
1645 int i;
1646
1647 bio_for_each_segment_all(bvec, bio, i) {
1648 struct page *page = bvec->bv_page;
1649
1650 if (page && !PageCompound(page))
1651 set_page_dirty_lock(page);
1652 }
1653 }
1654
1655 static void bio_release_pages(struct bio *bio)
1656 {
1657 struct bio_vec *bvec;
1658 int i;
1659
1660 bio_for_each_segment_all(bvec, bio, i) {
1661 struct page *page = bvec->bv_page;
1662
1663 if (page)
1664 put_page(page);
1665 }
1666 }
1667
1668 /*
1669 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1670 * If they are, then fine. If, however, some pages are clean then they must
1671 * have been written out during the direct-IO read. So we take another ref on
1672 * the BIO and the offending pages and re-dirty the pages in process context.
1673 *
1674 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1675 * here on. It will run one page_cache_release() against each page and will
1676 * run one bio_put() against the BIO.
1677 */
1678
1679 static void bio_dirty_fn(struct work_struct *work);
1680
1681 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1682 static DEFINE_SPINLOCK(bio_dirty_lock);
1683 static struct bio *bio_dirty_list;
1684
1685 /*
1686 * This runs in process context
1687 */
1688 static void bio_dirty_fn(struct work_struct *work)
1689 {
1690 unsigned long flags;
1691 struct bio *bio;
1692
1693 spin_lock_irqsave(&bio_dirty_lock, flags);
1694 bio = bio_dirty_list;
1695 bio_dirty_list = NULL;
1696 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1697
1698 while (bio) {
1699 struct bio *next = bio->bi_private;
1700
1701 bio_set_pages_dirty(bio);
1702 bio_release_pages(bio);
1703 bio_put(bio);
1704 bio = next;
1705 }
1706 }
1707
1708 void bio_check_pages_dirty(struct bio *bio)
1709 {
1710 struct bio_vec *bvec;
1711 int nr_clean_pages = 0;
1712 int i;
1713
1714 bio_for_each_segment_all(bvec, bio, i) {
1715 struct page *page = bvec->bv_page;
1716
1717 if (PageDirty(page) || PageCompound(page)) {
1718 page_cache_release(page);
1719 bvec->bv_page = NULL;
1720 } else {
1721 nr_clean_pages++;
1722 }
1723 }
1724
1725 if (nr_clean_pages) {
1726 unsigned long flags;
1727
1728 spin_lock_irqsave(&bio_dirty_lock, flags);
1729 bio->bi_private = bio_dirty_list;
1730 bio_dirty_list = bio;
1731 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1732 schedule_work(&bio_dirty_work);
1733 } else {
1734 bio_put(bio);
1735 }
1736 }
1737
1738 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1739 void bio_flush_dcache_pages(struct bio *bi)
1740 {
1741 struct bio_vec bvec;
1742 struct bvec_iter iter;
1743
1744 bio_for_each_segment(bvec, bi, iter)
1745 flush_dcache_page(bvec.bv_page);
1746 }
1747 EXPORT_SYMBOL(bio_flush_dcache_pages);
1748 #endif
1749
1750 /**
1751 * bio_endio - end I/O on a bio
1752 * @bio: bio
1753 * @error: error, if any
1754 *
1755 * Description:
1756 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1757 * preferred way to end I/O on a bio, it takes care of clearing
1758 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1759 * established -Exxxx (-EIO, for instance) error values in case
1760 * something went wrong. No one should call bi_end_io() directly on a
1761 * bio unless they own it and thus know that it has an end_io
1762 * function.
1763 **/
1764 void bio_endio(struct bio *bio, int error)
1765 {
1766 while (bio) {
1767 BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
1768
1769 if (error)
1770 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1771 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1772 error = -EIO;
1773
1774 if (!atomic_dec_and_test(&bio->bi_remaining))
1775 return;
1776
1777 /*
1778 * Need to have a real endio function for chained bios,
1779 * otherwise various corner cases will break (like stacking
1780 * block devices that save/restore bi_end_io) - however, we want
1781 * to avoid unbounded recursion and blowing the stack. Tail call
1782 * optimization would handle this, but compiling with frame
1783 * pointers also disables gcc's sibling call optimization.
1784 */
1785 if (bio->bi_end_io == bio_chain_endio) {
1786 struct bio *parent = bio->bi_private;
1787 bio_put(bio);
1788 bio = parent;
1789 } else {
1790 if (bio->bi_end_io)
1791 bio->bi_end_io(bio, error);
1792 bio = NULL;
1793 }
1794 }
1795 }
1796 EXPORT_SYMBOL(bio_endio);
1797
1798 /**
1799 * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
1800 * @bio: bio
1801 * @error: error, if any
1802 *
1803 * For code that has saved and restored bi_end_io; thing hard before using this
1804 * function, probably you should've cloned the entire bio.
1805 **/
1806 void bio_endio_nodec(struct bio *bio, int error)
1807 {
1808 atomic_inc(&bio->bi_remaining);
1809 bio_endio(bio, error);
1810 }
1811 EXPORT_SYMBOL(bio_endio_nodec);
1812
1813 /**
1814 * bio_split - split a bio
1815 * @bio: bio to split
1816 * @sectors: number of sectors to split from the front of @bio
1817 * @gfp: gfp mask
1818 * @bs: bio set to allocate from
1819 *
1820 * Allocates and returns a new bio which represents @sectors from the start of
1821 * @bio, and updates @bio to represent the remaining sectors.
1822 *
1823 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
1824 * responsibility to ensure that @bio is not freed before the split.
1825 */
1826 struct bio *bio_split(struct bio *bio, int sectors,
1827 gfp_t gfp, struct bio_set *bs)
1828 {
1829 struct bio *split = NULL;
1830
1831 BUG_ON(sectors <= 0);
1832 BUG_ON(sectors >= bio_sectors(bio));
1833
1834 split = bio_clone_fast(bio, gfp, bs);
1835 if (!split)
1836 return NULL;
1837
1838 split->bi_iter.bi_size = sectors << 9;
1839
1840 if (bio_integrity(split))
1841 bio_integrity_trim(split, 0, sectors);
1842
1843 bio_advance(bio, split->bi_iter.bi_size);
1844
1845 return split;
1846 }
1847 EXPORT_SYMBOL(bio_split);
1848
1849 /**
1850 * bio_trim - trim a bio
1851 * @bio: bio to trim
1852 * @offset: number of sectors to trim from the front of @bio
1853 * @size: size we want to trim @bio to, in sectors
1854 */
1855 void bio_trim(struct bio *bio, int offset, int size)
1856 {
1857 /* 'bio' is a cloned bio which we need to trim to match
1858 * the given offset and size.
1859 */
1860
1861 size <<= 9;
1862 if (offset == 0 && size == bio->bi_iter.bi_size)
1863 return;
1864
1865 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1866
1867 bio_advance(bio, offset << 9);
1868
1869 bio->bi_iter.bi_size = size;
1870 }
1871 EXPORT_SYMBOL_GPL(bio_trim);
1872
1873 /*
1874 * create memory pools for biovec's in a bio_set.
1875 * use the global biovec slabs created for general use.
1876 */
1877 mempool_t *biovec_create_pool(int pool_entries)
1878 {
1879 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1880
1881 return mempool_create_slab_pool(pool_entries, bp->slab);
1882 }
1883
1884 void bioset_free(struct bio_set *bs)
1885 {
1886 if (bs->rescue_workqueue)
1887 destroy_workqueue(bs->rescue_workqueue);
1888
1889 if (bs->bio_pool)
1890 mempool_destroy(bs->bio_pool);
1891
1892 if (bs->bvec_pool)
1893 mempool_destroy(bs->bvec_pool);
1894
1895 bioset_integrity_free(bs);
1896 bio_put_slab(bs);
1897
1898 kfree(bs);
1899 }
1900 EXPORT_SYMBOL(bioset_free);
1901
1902 /**
1903 * bioset_create - Create a bio_set
1904 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1905 * @front_pad: Number of bytes to allocate in front of the returned bio
1906 *
1907 * Description:
1908 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1909 * to ask for a number of bytes to be allocated in front of the bio.
1910 * Front pad allocation is useful for embedding the bio inside
1911 * another structure, to avoid allocating extra data to go with the bio.
1912 * Note that the bio must be embedded at the END of that structure always,
1913 * or things will break badly.
1914 */
1915 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1916 {
1917 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1918 struct bio_set *bs;
1919
1920 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1921 if (!bs)
1922 return NULL;
1923
1924 bs->front_pad = front_pad;
1925
1926 spin_lock_init(&bs->rescue_lock);
1927 bio_list_init(&bs->rescue_list);
1928 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1929
1930 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1931 if (!bs->bio_slab) {
1932 kfree(bs);
1933 return NULL;
1934 }
1935
1936 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1937 if (!bs->bio_pool)
1938 goto bad;
1939
1940 bs->bvec_pool = biovec_create_pool(pool_size);
1941 if (!bs->bvec_pool)
1942 goto bad;
1943
1944 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1945 if (!bs->rescue_workqueue)
1946 goto bad;
1947
1948 return bs;
1949 bad:
1950 bioset_free(bs);
1951 return NULL;
1952 }
1953 EXPORT_SYMBOL(bioset_create);
1954
1955 #ifdef CONFIG_BLK_CGROUP
1956 /**
1957 * bio_associate_current - associate a bio with %current
1958 * @bio: target bio
1959 *
1960 * Associate @bio with %current if it hasn't been associated yet. Block
1961 * layer will treat @bio as if it were issued by %current no matter which
1962 * task actually issues it.
1963 *
1964 * This function takes an extra reference of @task's io_context and blkcg
1965 * which will be put when @bio is released. The caller must own @bio,
1966 * ensure %current->io_context exists, and is responsible for synchronizing
1967 * calls to this function.
1968 */
1969 int bio_associate_current(struct bio *bio)
1970 {
1971 struct io_context *ioc;
1972 struct cgroup_subsys_state *css;
1973
1974 if (bio->bi_ioc)
1975 return -EBUSY;
1976
1977 ioc = current->io_context;
1978 if (!ioc)
1979 return -ENOENT;
1980
1981 /* acquire active ref on @ioc and associate */
1982 get_io_context_active(ioc);
1983 bio->bi_ioc = ioc;
1984
1985 /* associate blkcg if exists */
1986 rcu_read_lock();
1987 css = task_css(current, blkio_cgrp_id);
1988 if (css && css_tryget_online(css))
1989 bio->bi_css = css;
1990 rcu_read_unlock();
1991
1992 return 0;
1993 }
1994
1995 /**
1996 * bio_disassociate_task - undo bio_associate_current()
1997 * @bio: target bio
1998 */
1999 void bio_disassociate_task(struct bio *bio)
2000 {
2001 if (bio->bi_ioc) {
2002 put_io_context(bio->bi_ioc);
2003 bio->bi_ioc = NULL;
2004 }
2005 if (bio->bi_css) {
2006 css_put(bio->bi_css);
2007 bio->bi_css = NULL;
2008 }
2009 }
2010
2011 #endif /* CONFIG_BLK_CGROUP */
2012
2013 static void __init biovec_init_slabs(void)
2014 {
2015 int i;
2016
2017 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
2018 int size;
2019 struct biovec_slab *bvs = bvec_slabs + i;
2020
2021 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2022 bvs->slab = NULL;
2023 continue;
2024 }
2025
2026 size = bvs->nr_vecs * sizeof(struct bio_vec);
2027 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2028 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2029 }
2030 }
2031
2032 static int __init init_bio(void)
2033 {
2034 bio_slab_max = 2;
2035 bio_slab_nr = 0;
2036 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2037 if (!bio_slabs)
2038 panic("bio: can't allocate bios\n");
2039
2040 bio_integrity_init();
2041 biovec_init_slabs();
2042
2043 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2044 if (!fs_bio_set)
2045 panic("bio: can't allocate bios\n");
2046
2047 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2048 panic("bio: can't create integrity pool\n");
2049
2050 return 0;
2051 }
2052 subsys_initcall(init_bio);