2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
21 #include <linux/highmem.h>
22 #include <linux/mempool.h>
23 #include <linux/ioprio.h>
26 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
27 #include <linux/blk_types.h>
32 #define BIO_BUG_ON BUG_ON
37 #define BIO_MAX_PAGES 256
39 #define bio_prio(bio) (bio)->bi_ioprio
40 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
42 #define bio_iter_iovec(bio, iter) \
43 bvec_iter_bvec((bio)->bi_io_vec, (iter))
45 #define bio_iter_page(bio, iter) \
46 bvec_iter_page((bio)->bi_io_vec, (iter))
47 #define bio_iter_len(bio, iter) \
48 bvec_iter_len((bio)->bi_io_vec, (iter))
49 #define bio_iter_offset(bio, iter) \
50 bvec_iter_offset((bio)->bi_io_vec, (iter))
52 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
53 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
54 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
56 #define bio_multiple_segments(bio) \
57 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
59 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
60 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
62 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
63 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
66 * Return the data direction, READ or WRITE.
68 #define bio_data_dir(bio) \
69 (op_is_write(bio_op(bio)) ? WRITE : READ)
72 * Check whether this bio carries any data or not. A NULL bio is allowed.
74 static inline bool bio_has_data(struct bio
*bio
)
77 bio
->bi_iter
.bi_size
&&
78 bio_op(bio
) != REQ_OP_DISCARD
&&
79 bio_op(bio
) != REQ_OP_SECURE_ERASE
&&
80 bio_op(bio
) != REQ_OP_WRITE_ZEROES
)
86 static inline bool bio_no_advance_iter(struct bio
*bio
)
88 return bio_op(bio
) == REQ_OP_DISCARD
||
89 bio_op(bio
) == REQ_OP_SECURE_ERASE
||
90 bio_op(bio
) == REQ_OP_WRITE_SAME
||
91 bio_op(bio
) == REQ_OP_WRITE_ZEROES
;
94 static inline bool bio_mergeable(struct bio
*bio
)
96 if (bio
->bi_opf
& REQ_NOMERGE_FLAGS
)
102 static inline unsigned int bio_cur_bytes(struct bio
*bio
)
104 if (bio_has_data(bio
))
105 return bio_iovec(bio
).bv_len
;
106 else /* dataless requests such as discard */
107 return bio
->bi_iter
.bi_size
;
110 static inline void *bio_data(struct bio
*bio
)
112 if (bio_has_data(bio
))
113 return page_address(bio_page(bio
)) + bio_offset(bio
);
118 static inline bool bio_full(struct bio
*bio
)
120 return bio
->bi_vcnt
>= bio
->bi_max_vecs
;
123 #define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \
124 for (bv = bvec_init_iter_all(&iter_all); \
125 (iter_all.done < (bvl)->bv_len) && \
126 (mp_bvec_next_segment((bvl), &iter_all), 1); \
127 iter_all.done += bv->bv_len, i += 1)
130 * drivers should _never_ use the all version - the bio may have been split
131 * before it got to the driver and the driver won't own all of it
133 #define bio_for_each_segment_all(bvl, bio, i, iter_all) \
134 for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \
135 mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all)
137 static inline void bio_advance_iter(struct bio
*bio
, struct bvec_iter
*iter
,
140 iter
->bi_sector
+= bytes
>> 9;
142 if (bio_no_advance_iter(bio
))
143 iter
->bi_size
-= bytes
;
145 bvec_iter_advance(bio
->bi_io_vec
, iter
, bytes
);
146 /* TODO: It is reasonable to complete bio with error here. */
149 #define __bio_for_each_segment(bvl, bio, iter, start) \
150 for (iter = (start); \
152 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
153 bio_advance_iter((bio), &(iter), (bvl).bv_len))
155 #define bio_for_each_segment(bvl, bio, iter) \
156 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
158 #define __bio_for_each_bvec(bvl, bio, iter, start) \
159 for (iter = (start); \
161 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
162 bio_advance_iter((bio), &(iter), (bvl).bv_len))
164 /* iterate over multi-page bvec */
165 #define bio_for_each_bvec(bvl, bio, iter) \
166 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
168 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
170 static inline unsigned bio_segments(struct bio
*bio
)
174 struct bvec_iter iter
;
177 * We special case discard/write same/write zeroes, because they
178 * interpret bi_size differently:
181 switch (bio_op(bio
)) {
183 case REQ_OP_SECURE_ERASE
:
184 case REQ_OP_WRITE_ZEROES
:
186 case REQ_OP_WRITE_SAME
:
192 bio_for_each_segment(bv
, bio
, iter
)
199 * get a reference to a bio, so it won't disappear. the intended use is
203 * submit_bio(rw, bio);
204 * if (bio->bi_flags ...)
208 * without the bio_get(), it could potentially complete I/O before submit_bio
209 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
212 static inline void bio_get(struct bio
*bio
)
214 bio
->bi_flags
|= (1 << BIO_REFFED
);
215 smp_mb__before_atomic();
216 atomic_inc(&bio
->__bi_cnt
);
219 static inline void bio_cnt_set(struct bio
*bio
, unsigned int count
)
222 bio
->bi_flags
|= (1 << BIO_REFFED
);
223 smp_mb__before_atomic();
225 atomic_set(&bio
->__bi_cnt
, count
);
228 static inline bool bio_flagged(struct bio
*bio
, unsigned int bit
)
230 return (bio
->bi_flags
& (1U << bit
)) != 0;
233 static inline void bio_set_flag(struct bio
*bio
, unsigned int bit
)
235 bio
->bi_flags
|= (1U << bit
);
238 static inline void bio_clear_flag(struct bio
*bio
, unsigned int bit
)
240 bio
->bi_flags
&= ~(1U << bit
);
243 static inline void bio_get_first_bvec(struct bio
*bio
, struct bio_vec
*bv
)
245 *bv
= bio_iovec(bio
);
248 static inline void bio_get_last_bvec(struct bio
*bio
, struct bio_vec
*bv
)
250 struct bvec_iter iter
= bio
->bi_iter
;
253 if (unlikely(!bio_multiple_segments(bio
))) {
254 *bv
= bio_iovec(bio
);
258 bio_advance_iter(bio
, &iter
, iter
.bi_size
);
260 if (!iter
.bi_bvec_done
)
261 idx
= iter
.bi_idx
- 1;
262 else /* in the middle of bvec */
265 *bv
= bio
->bi_io_vec
[idx
];
268 * iter.bi_bvec_done records actual length of the last bvec
269 * if this bio ends in the middle of one io vector
271 if (iter
.bi_bvec_done
)
272 bv
->bv_len
= iter
.bi_bvec_done
;
275 static inline struct bio_vec
*bio_first_bvec_all(struct bio
*bio
)
277 WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
));
278 return bio
->bi_io_vec
;
281 static inline struct page
*bio_first_page_all(struct bio
*bio
)
283 return bio_first_bvec_all(bio
)->bv_page
;
286 static inline struct bio_vec
*bio_last_bvec_all(struct bio
*bio
)
288 WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
));
289 return &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
293 BIP_BLOCK_INTEGRITY
= 1 << 0, /* block layer owns integrity data */
294 BIP_MAPPED_INTEGRITY
= 1 << 1, /* ref tag has been remapped */
295 BIP_CTRL_NOCHECK
= 1 << 2, /* disable HBA integrity checking */
296 BIP_DISK_NOCHECK
= 1 << 3, /* disable disk integrity checking */
297 BIP_IP_CHECKSUM
= 1 << 4, /* IP checksum */
301 * bio integrity payload
303 struct bio_integrity_payload
{
304 struct bio
*bip_bio
; /* parent bio */
306 struct bvec_iter bip_iter
;
308 unsigned short bip_slab
; /* slab the bip came from */
309 unsigned short bip_vcnt
; /* # of integrity bio_vecs */
310 unsigned short bip_max_vcnt
; /* integrity bio_vec slots */
311 unsigned short bip_flags
; /* control flags */
313 struct bvec_iter bio_iter
; /* for rewinding parent bio */
315 struct work_struct bip_work
; /* I/O completion */
317 struct bio_vec
*bip_vec
;
318 struct bio_vec bip_inline_vecs
[0];/* embedded bvec array */
321 #if defined(CONFIG_BLK_DEV_INTEGRITY)
323 static inline struct bio_integrity_payload
*bio_integrity(struct bio
*bio
)
325 if (bio
->bi_opf
& REQ_INTEGRITY
)
326 return bio
->bi_integrity
;
331 static inline bool bio_integrity_flagged(struct bio
*bio
, enum bip_flags flag
)
333 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
336 return bip
->bip_flags
& flag
;
341 static inline sector_t
bip_get_seed(struct bio_integrity_payload
*bip
)
343 return bip
->bip_iter
.bi_sector
;
346 static inline void bip_set_seed(struct bio_integrity_payload
*bip
,
349 bip
->bip_iter
.bi_sector
= seed
;
352 #endif /* CONFIG_BLK_DEV_INTEGRITY */
354 extern void bio_trim(struct bio
*bio
, int offset
, int size
);
355 extern struct bio
*bio_split(struct bio
*bio
, int sectors
,
356 gfp_t gfp
, struct bio_set
*bs
);
359 * bio_next_split - get next @sectors from a bio, splitting if necessary
361 * @sectors: number of sectors to split from the front of @bio
363 * @bs: bio set to allocate from
365 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
366 * than @sectors, returns the original bio unchanged.
368 static inline struct bio
*bio_next_split(struct bio
*bio
, int sectors
,
369 gfp_t gfp
, struct bio_set
*bs
)
371 if (sectors
>= bio_sectors(bio
))
374 return bio_split(bio
, sectors
, gfp
, bs
);
378 BIOSET_NEED_BVECS
= BIT(0),
379 BIOSET_NEED_RESCUER
= BIT(1),
381 extern int bioset_init(struct bio_set
*, unsigned int, unsigned int, int flags
);
382 extern void bioset_exit(struct bio_set
*);
383 extern int biovec_init_pool(mempool_t
*pool
, int pool_entries
);
384 extern int bioset_init_from_src(struct bio_set
*bs
, struct bio_set
*src
);
386 extern struct bio
*bio_alloc_bioset(gfp_t
, unsigned int, struct bio_set
*);
387 extern void bio_put(struct bio
*);
389 extern void __bio_clone_fast(struct bio
*, struct bio
*);
390 extern struct bio
*bio_clone_fast(struct bio
*, gfp_t
, struct bio_set
*);
392 extern struct bio_set fs_bio_set
;
394 static inline struct bio
*bio_alloc(gfp_t gfp_mask
, unsigned int nr_iovecs
)
396 return bio_alloc_bioset(gfp_mask
, nr_iovecs
, &fs_bio_set
);
399 static inline struct bio
*bio_kmalloc(gfp_t gfp_mask
, unsigned int nr_iovecs
)
401 return bio_alloc_bioset(gfp_mask
, nr_iovecs
, NULL
);
404 extern blk_qc_t
submit_bio(struct bio
*);
406 extern void bio_endio(struct bio
*);
408 static inline void bio_io_error(struct bio
*bio
)
410 bio
->bi_status
= BLK_STS_IOERR
;
414 static inline void bio_wouldblock_error(struct bio
*bio
)
416 bio
->bi_status
= BLK_STS_AGAIN
;
420 struct request_queue
;
421 extern int bio_phys_segments(struct request_queue
*, struct bio
*);
423 extern int submit_bio_wait(struct bio
*bio
);
424 extern void bio_advance(struct bio
*, unsigned);
426 extern void bio_init(struct bio
*bio
, struct bio_vec
*table
,
427 unsigned short max_vecs
);
428 extern void bio_uninit(struct bio
*);
429 extern void bio_reset(struct bio
*);
430 void bio_chain(struct bio
*, struct bio
*);
432 extern int bio_add_page(struct bio
*, struct page
*, unsigned int,unsigned int);
433 extern int bio_add_pc_page(struct request_queue
*, struct bio
*, struct page
*,
434 unsigned int, unsigned int);
435 bool __bio_try_merge_page(struct bio
*bio
, struct page
*page
,
436 unsigned int len
, unsigned int off
, bool same_page
);
437 void __bio_add_page(struct bio
*bio
, struct page
*page
,
438 unsigned int len
, unsigned int off
);
439 int bio_iov_iter_get_pages(struct bio
*bio
, struct iov_iter
*iter
);
441 extern struct bio
*bio_map_user_iov(struct request_queue
*,
442 struct iov_iter
*, gfp_t
);
443 extern void bio_unmap_user(struct bio
*);
444 extern struct bio
*bio_map_kern(struct request_queue
*, void *, unsigned int,
446 extern struct bio
*bio_copy_kern(struct request_queue
*, void *, unsigned int,
448 extern void bio_set_pages_dirty(struct bio
*bio
);
449 extern void bio_check_pages_dirty(struct bio
*bio
);
451 void generic_start_io_acct(struct request_queue
*q
, int op
,
452 unsigned long sectors
, struct hd_struct
*part
);
453 void generic_end_io_acct(struct request_queue
*q
, int op
,
454 struct hd_struct
*part
,
455 unsigned long start_time
);
457 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
458 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
460 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
461 extern void bio_flush_dcache_pages(struct bio
*bi
);
463 static inline void bio_flush_dcache_pages(struct bio
*bi
)
468 extern void bio_copy_data_iter(struct bio
*dst
, struct bvec_iter
*dst_iter
,
469 struct bio
*src
, struct bvec_iter
*src_iter
);
470 extern void bio_copy_data(struct bio
*dst
, struct bio
*src
);
471 extern void bio_list_copy_data(struct bio
*dst
, struct bio
*src
);
472 extern void bio_free_pages(struct bio
*bio
);
474 extern struct bio
*bio_copy_user_iov(struct request_queue
*,
475 struct rq_map_data
*,
478 extern int bio_uncopy_user(struct bio
*);
479 void zero_fill_bio_iter(struct bio
*bio
, struct bvec_iter iter
);
481 static inline void zero_fill_bio(struct bio
*bio
)
483 zero_fill_bio_iter(bio
, bio
->bi_iter
);
486 extern struct bio_vec
*bvec_alloc(gfp_t
, int, unsigned long *, mempool_t
*);
487 extern void bvec_free(mempool_t
*, struct bio_vec
*, unsigned int);
488 extern unsigned int bvec_nr_vecs(unsigned short idx
);
489 extern const char *bio_devname(struct bio
*bio
, char *buffer
);
491 #define bio_set_dev(bio, bdev) \
493 if ((bio)->bi_disk != (bdev)->bd_disk) \
494 bio_clear_flag(bio, BIO_THROTTLED);\
495 (bio)->bi_disk = (bdev)->bd_disk; \
496 (bio)->bi_partno = (bdev)->bd_partno; \
497 bio_associate_blkg(bio); \
500 #define bio_copy_dev(dst, src) \
502 (dst)->bi_disk = (src)->bi_disk; \
503 (dst)->bi_partno = (src)->bi_partno; \
504 bio_clone_blkg_association(dst, src); \
507 #define bio_dev(bio) \
508 disk_devt((bio)->bi_disk)
510 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
511 void bio_associate_blkg_from_page(struct bio
*bio
, struct page
*page
);
513 static inline void bio_associate_blkg_from_page(struct bio
*bio
,
514 struct page
*page
) { }
517 #ifdef CONFIG_BLK_CGROUP
518 void bio_disassociate_blkg(struct bio
*bio
);
519 void bio_associate_blkg(struct bio
*bio
);
520 void bio_associate_blkg_from_css(struct bio
*bio
,
521 struct cgroup_subsys_state
*css
);
522 void bio_clone_blkg_association(struct bio
*dst
, struct bio
*src
);
523 #else /* CONFIG_BLK_CGROUP */
524 static inline void bio_disassociate_blkg(struct bio
*bio
) { }
525 static inline void bio_associate_blkg(struct bio
*bio
) { }
526 static inline void bio_associate_blkg_from_css(struct bio
*bio
,
527 struct cgroup_subsys_state
*css
)
529 static inline void bio_clone_blkg_association(struct bio
*dst
,
531 #endif /* CONFIG_BLK_CGROUP */
533 #ifdef CONFIG_HIGHMEM
535 * remember never ever reenable interrupts between a bvec_kmap_irq and
538 static inline char *bvec_kmap_irq(struct bio_vec
*bvec
, unsigned long *flags
)
543 * might not be a highmem page, but the preempt/irq count
544 * balancing is a lot nicer this way
546 local_irq_save(*flags
);
547 addr
= (unsigned long) kmap_atomic(bvec
->bv_page
);
549 BUG_ON(addr
& ~PAGE_MASK
);
551 return (char *) addr
+ bvec
->bv_offset
;
554 static inline void bvec_kunmap_irq(char *buffer
, unsigned long *flags
)
556 unsigned long ptr
= (unsigned long) buffer
& PAGE_MASK
;
558 kunmap_atomic((void *) ptr
);
559 local_irq_restore(*flags
);
563 static inline char *bvec_kmap_irq(struct bio_vec
*bvec
, unsigned long *flags
)
565 return page_address(bvec
->bv_page
) + bvec
->bv_offset
;
568 static inline void bvec_kunmap_irq(char *buffer
, unsigned long *flags
)
575 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
577 * A bio_list anchors a singly-linked list of bios chained through the bi_next
578 * member of the bio. The bio_list also caches the last list member to allow
579 * fast access to the tail.
586 static inline int bio_list_empty(const struct bio_list
*bl
)
588 return bl
->head
== NULL
;
591 static inline void bio_list_init(struct bio_list
*bl
)
593 bl
->head
= bl
->tail
= NULL
;
596 #define BIO_EMPTY_LIST { NULL, NULL }
598 #define bio_list_for_each(bio, bl) \
599 for (bio = (bl)->head; bio; bio = bio->bi_next)
601 static inline unsigned bio_list_size(const struct bio_list
*bl
)
606 bio_list_for_each(bio
, bl
)
612 static inline void bio_list_add(struct bio_list
*bl
, struct bio
*bio
)
617 bl
->tail
->bi_next
= bio
;
624 static inline void bio_list_add_head(struct bio_list
*bl
, struct bio
*bio
)
626 bio
->bi_next
= bl
->head
;
634 static inline void bio_list_merge(struct bio_list
*bl
, struct bio_list
*bl2
)
640 bl
->tail
->bi_next
= bl2
->head
;
642 bl
->head
= bl2
->head
;
644 bl
->tail
= bl2
->tail
;
647 static inline void bio_list_merge_head(struct bio_list
*bl
,
648 struct bio_list
*bl2
)
654 bl2
->tail
->bi_next
= bl
->head
;
656 bl
->tail
= bl2
->tail
;
658 bl
->head
= bl2
->head
;
661 static inline struct bio
*bio_list_peek(struct bio_list
*bl
)
666 static inline struct bio
*bio_list_pop(struct bio_list
*bl
)
668 struct bio
*bio
= bl
->head
;
671 bl
->head
= bl
->head
->bi_next
;
681 static inline struct bio
*bio_list_get(struct bio_list
*bl
)
683 struct bio
*bio
= bl
->head
;
685 bl
->head
= bl
->tail
= NULL
;
691 * Increment chain count for the bio. Make sure the CHAIN flag update
692 * is visible before the raised count.
694 static inline void bio_inc_remaining(struct bio
*bio
)
696 bio_set_flag(bio
, BIO_CHAIN
);
697 smp_mb__before_atomic();
698 atomic_inc(&bio
->__bi_remaining
);
702 * bio_set is used to allow other portions of the IO system to
703 * allocate their own private memory pools for bio and iovec structures.
704 * These memory pools in turn all allocate from the bio_slab
705 * and the bvec_slabs[].
707 #define BIO_POOL_SIZE 2
710 struct kmem_cache
*bio_slab
;
711 unsigned int front_pad
;
715 #if defined(CONFIG_BLK_DEV_INTEGRITY)
716 mempool_t bio_integrity_pool
;
717 mempool_t bvec_integrity_pool
;
721 * Deadlock avoidance for stacking block drivers: see comments in
722 * bio_alloc_bioset() for details
724 spinlock_t rescue_lock
;
725 struct bio_list rescue_list
;
726 struct work_struct rescue_work
;
727 struct workqueue_struct
*rescue_workqueue
;
733 struct kmem_cache
*slab
;
736 static inline bool bioset_initialized(struct bio_set
*bs
)
738 return bs
->bio_slab
!= NULL
;
742 * a small number of entries is fine, not going to be performance critical.
743 * basically we just need to survive
745 #define BIO_SPLIT_ENTRIES 2
747 #if defined(CONFIG_BLK_DEV_INTEGRITY)
749 #define bip_for_each_vec(bvl, bip, iter) \
750 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
752 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
754 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
756 extern struct bio_integrity_payload
*bio_integrity_alloc(struct bio
*, gfp_t
, unsigned int);
757 extern int bio_integrity_add_page(struct bio
*, struct page
*, unsigned int, unsigned int);
758 extern bool bio_integrity_prep(struct bio
*);
759 extern void bio_integrity_advance(struct bio
*, unsigned int);
760 extern void bio_integrity_trim(struct bio
*);
761 extern int bio_integrity_clone(struct bio
*, struct bio
*, gfp_t
);
762 extern int bioset_integrity_create(struct bio_set
*, int);
763 extern void bioset_integrity_free(struct bio_set
*);
764 extern void bio_integrity_init(void);
766 #else /* CONFIG_BLK_DEV_INTEGRITY */
768 static inline void *bio_integrity(struct bio
*bio
)
773 static inline int bioset_integrity_create(struct bio_set
*bs
, int pool_size
)
778 static inline void bioset_integrity_free (struct bio_set
*bs
)
783 static inline bool bio_integrity_prep(struct bio
*bio
)
788 static inline int bio_integrity_clone(struct bio
*bio
, struct bio
*bio_src
,
794 static inline void bio_integrity_advance(struct bio
*bio
,
795 unsigned int bytes_done
)
800 static inline void bio_integrity_trim(struct bio
*bio
)
805 static inline void bio_integrity_init(void)
810 static inline bool bio_integrity_flagged(struct bio
*bio
, enum bip_flags flag
)
815 static inline void *bio_integrity_alloc(struct bio
* bio
, gfp_t gfp
,
818 return ERR_PTR(-EINVAL
);
821 static inline int bio_integrity_add_page(struct bio
*bio
, struct page
*page
,
822 unsigned int len
, unsigned int offset
)
827 #endif /* CONFIG_BLK_DEV_INTEGRITY */
830 * Mark a bio as polled. Note that for async polled IO, the caller must
831 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
832 * We cannot block waiting for requests on polled IO, as those completions
833 * must be found by the caller. This is different than IRQ driven IO, where
834 * it's safe to wait for IO to complete.
836 static inline void bio_set_polled(struct bio
*bio
, struct kiocb
*kiocb
)
838 bio
->bi_opf
|= REQ_HIPRI
;
839 if (!is_sync_kiocb(kiocb
))
840 bio
->bi_opf
|= REQ_NOWAIT
;
843 #endif /* CONFIG_BLOCK */
844 #endif /* __LINUX_BIO_H */