]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/bio.h
2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
21 #include <linux/highmem.h>
22 #include <linux/mempool.h>
23 #include <linux/ioprio.h>
24 #include <linux/bug.h>
30 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
31 #include <linux/blk_types.h>
36 #define BIO_BUG_ON BUG_ON
41 #define BIO_MAX_PAGES 256
43 #define bio_prio(bio) (bio)->bi_ioprio
44 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
46 #define bio_iter_iovec(bio, iter) \
47 bvec_iter_bvec((bio)->bi_io_vec, (iter))
49 #define bio_iter_page(bio, iter) \
50 bvec_iter_page((bio)->bi_io_vec, (iter))
51 #define bio_iter_len(bio, iter) \
52 bvec_iter_len((bio)->bi_io_vec, (iter))
53 #define bio_iter_offset(bio, iter) \
54 bvec_iter_offset((bio)->bi_io_vec, (iter))
56 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
57 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
58 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
60 #define bio_multiple_segments(bio) \
61 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
62 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
63 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
66 * Return the data direction, READ or WRITE.
68 #define bio_data_dir(bio) \
69 (op_is_write(bio_op(bio)) ? WRITE : READ)
72 * Check whether this bio carries any data or not. A NULL bio is allowed.
74 static inline bool bio_has_data(struct bio
*bio
)
77 bio
->bi_iter
.bi_size
&&
78 bio_op(bio
) != REQ_OP_DISCARD
&&
79 bio_op(bio
) != REQ_OP_SECURE_ERASE
&&
80 bio_op(bio
) != REQ_OP_WRITE_ZEROES
)
86 static inline bool bio_no_advance_iter(struct bio
*bio
)
88 return bio_op(bio
) == REQ_OP_DISCARD
||
89 bio_op(bio
) == REQ_OP_SECURE_ERASE
||
90 bio_op(bio
) == REQ_OP_WRITE_SAME
||
91 bio_op(bio
) == REQ_OP_WRITE_ZEROES
;
94 static inline bool bio_mergeable(struct bio
*bio
)
96 if (bio
->bi_opf
& REQ_NOMERGE_FLAGS
)
102 static inline unsigned int bio_cur_bytes(struct bio
*bio
)
104 if (bio_has_data(bio
))
105 return bio_iovec(bio
).bv_len
;
106 else /* dataless requests such as discard */
107 return bio
->bi_iter
.bi_size
;
110 static inline void *bio_data(struct bio
*bio
)
112 if (bio_has_data(bio
))
113 return page_address(bio_page(bio
)) + bio_offset(bio
);
121 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
122 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
125 * queues that have highmem support enabled may still need to revert to
126 * PIO transfers occasionally and thus map high pages temporarily. For
127 * permanent PIO fall back, user is probably better off disabling highmem
128 * I/O completely on that queue (see ide-dma for example)
130 #define __bio_kmap_atomic(bio, iter) \
131 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
132 bio_iter_iovec((bio), (iter)).bv_offset)
134 #define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
140 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
141 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
142 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
145 * allow arch override, for eg virtualized architectures (put in asm/io.h)
147 #ifndef BIOVEC_PHYS_MERGEABLE
148 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
149 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
152 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
153 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
154 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
155 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
158 * drivers should _never_ use the all version - the bio may have been split
159 * before it got to the driver and the driver won't own all of it
161 #define bio_for_each_segment_all(bvl, bio, i) \
162 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
164 static inline void bio_advance_iter(struct bio
*bio
, struct bvec_iter
*iter
,
167 iter
->bi_sector
+= bytes
>> 9;
169 if (bio_no_advance_iter(bio
))
170 iter
->bi_size
-= bytes
;
172 bvec_iter_advance(bio
->bi_io_vec
, iter
, bytes
);
175 #define __bio_for_each_segment(bvl, bio, iter, start) \
176 for (iter = (start); \
178 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
179 bio_advance_iter((bio), &(iter), (bvl).bv_len))
181 #define bio_for_each_segment(bvl, bio, iter) \
182 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
184 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
186 static inline unsigned __bio_segments(struct bio
*bio
, struct bvec_iter
*bvec
)
190 struct bvec_iter iter
;
193 * We special case discard/write same/write zeroes, because they
194 * interpret bi_size differently:
197 switch (bio_op(bio
)) {
199 case REQ_OP_SECURE_ERASE
:
200 case REQ_OP_WRITE_ZEROES
:
202 case REQ_OP_WRITE_SAME
:
208 __bio_for_each_segment(bv
, bio
, iter
, *bvec
)
214 static inline unsigned bio_segments(struct bio
*bio
)
216 return __bio_segments(bio
, &bio
->bi_iter
);
220 * get a reference to a bio, so it won't disappear. the intended use is
224 * submit_bio(rw, bio);
225 * if (bio->bi_flags ...)
229 * without the bio_get(), it could potentially complete I/O before submit_bio
230 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
233 static inline void bio_get(struct bio
*bio
)
235 bio
->bi_flags
|= (1 << BIO_REFFED
);
236 smp_mb__before_atomic();
237 atomic_inc(&bio
->__bi_cnt
);
240 static inline void bio_cnt_set(struct bio
*bio
, unsigned int count
)
243 bio
->bi_flags
|= (1 << BIO_REFFED
);
244 smp_mb__before_atomic();
246 atomic_set(&bio
->__bi_cnt
, count
);
249 static inline bool bio_flagged(struct bio
*bio
, unsigned int bit
)
251 return (bio
->bi_flags
& (1U << bit
)) != 0;
254 static inline void bio_set_flag(struct bio
*bio
, unsigned int bit
)
256 bio
->bi_flags
|= (1U << bit
);
259 static inline void bio_clear_flag(struct bio
*bio
, unsigned int bit
)
261 bio
->bi_flags
&= ~(1U << bit
);
264 static inline void bio_get_first_bvec(struct bio
*bio
, struct bio_vec
*bv
)
266 *bv
= bio_iovec(bio
);
269 static inline void bio_get_last_bvec(struct bio
*bio
, struct bio_vec
*bv
)
271 struct bvec_iter iter
= bio
->bi_iter
;
274 if (unlikely(!bio_multiple_segments(bio
))) {
275 *bv
= bio_iovec(bio
);
279 bio_advance_iter(bio
, &iter
, iter
.bi_size
);
281 if (!iter
.bi_bvec_done
)
282 idx
= iter
.bi_idx
- 1;
283 else /* in the middle of bvec */
286 *bv
= bio
->bi_io_vec
[idx
];
289 * iter.bi_bvec_done records actual length of the last bvec
290 * if this bio ends in the middle of one io vector
292 if (iter
.bi_bvec_done
)
293 bv
->bv_len
= iter
.bi_bvec_done
;
297 BIP_BLOCK_INTEGRITY
= 1 << 0, /* block layer owns integrity data */
298 BIP_MAPPED_INTEGRITY
= 1 << 1, /* ref tag has been remapped */
299 BIP_CTRL_NOCHECK
= 1 << 2, /* disable HBA integrity checking */
300 BIP_DISK_NOCHECK
= 1 << 3, /* disable disk integrity checking */
301 BIP_IP_CHECKSUM
= 1 << 4, /* IP checksum */
305 * bio integrity payload
307 struct bio_integrity_payload
{
308 struct bio
*bip_bio
; /* parent bio */
310 struct bvec_iter bip_iter
;
312 bio_end_io_t
*bip_end_io
; /* saved I/O completion fn */
314 unsigned short bip_slab
; /* slab the bip came from */
315 unsigned short bip_vcnt
; /* # of integrity bio_vecs */
316 unsigned short bip_max_vcnt
; /* integrity bio_vec slots */
317 unsigned short bip_flags
; /* control flags */
319 struct work_struct bip_work
; /* I/O completion */
321 struct bio_vec
*bip_vec
;
322 struct bio_vec bip_inline_vecs
[0];/* embedded bvec array */
325 #if defined(CONFIG_BLK_DEV_INTEGRITY)
327 static inline struct bio_integrity_payload
*bio_integrity(struct bio
*bio
)
329 if (bio
->bi_opf
& REQ_INTEGRITY
)
330 return bio
->bi_integrity
;
335 static inline bool bio_integrity_flagged(struct bio
*bio
, enum bip_flags flag
)
337 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
340 return bip
->bip_flags
& flag
;
345 static inline sector_t
bip_get_seed(struct bio_integrity_payload
*bip
)
347 return bip
->bip_iter
.bi_sector
;
350 static inline void bip_set_seed(struct bio_integrity_payload
*bip
,
353 bip
->bip_iter
.bi_sector
= seed
;
356 #endif /* CONFIG_BLK_DEV_INTEGRITY */
358 extern void bio_trim(struct bio
*bio
, int offset
, int size
);
359 extern struct bio
*bio_split(struct bio
*bio
, int sectors
,
360 gfp_t gfp
, struct bio_set
*bs
);
363 * bio_next_split - get next @sectors from a bio, splitting if necessary
365 * @sectors: number of sectors to split from the front of @bio
367 * @bs: bio set to allocate from
369 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
370 * than @sectors, returns the original bio unchanged.
372 static inline struct bio
*bio_next_split(struct bio
*bio
, int sectors
,
373 gfp_t gfp
, struct bio_set
*bs
)
375 if (sectors
>= bio_sectors(bio
))
378 return bio_split(bio
, sectors
, gfp
, bs
);
381 extern struct bio_set
*bioset_create(unsigned int, unsigned int);
382 extern struct bio_set
*bioset_create_nobvec(unsigned int, unsigned int);
383 extern void bioset_free(struct bio_set
*);
384 extern mempool_t
*biovec_create_pool(int pool_entries
);
386 extern struct bio
*bio_alloc_bioset(gfp_t
, unsigned int, struct bio_set
*);
387 extern void bio_put(struct bio
*);
389 extern void __bio_clone_fast(struct bio
*, struct bio
*);
390 extern struct bio
*bio_clone_fast(struct bio
*, gfp_t
, struct bio_set
*);
391 extern struct bio
*bio_clone_bioset(struct bio
*, gfp_t
, struct bio_set
*bs
);
392 extern struct bio
*bio_clone_bioset_partial(struct bio
*, gfp_t
,
393 struct bio_set
*, int, int);
395 extern struct bio_set
*fs_bio_set
;
397 static inline struct bio
*bio_alloc(gfp_t gfp_mask
, unsigned int nr_iovecs
)
399 return bio_alloc_bioset(gfp_mask
, nr_iovecs
, fs_bio_set
);
402 static inline struct bio
*bio_clone(struct bio
*bio
, gfp_t gfp_mask
)
404 return bio_clone_bioset(bio
, gfp_mask
, fs_bio_set
);
407 static inline struct bio
*bio_kmalloc(gfp_t gfp_mask
, unsigned int nr_iovecs
)
409 return bio_alloc_bioset(gfp_mask
, nr_iovecs
, NULL
);
412 static inline struct bio
*bio_clone_kmalloc(struct bio
*bio
, gfp_t gfp_mask
)
414 return bio_clone_bioset(bio
, gfp_mask
, NULL
);
418 extern blk_qc_t
submit_bio(struct bio
*);
420 extern void bio_endio(struct bio
*);
422 static inline void bio_io_error(struct bio
*bio
)
424 bio
->bi_error
= -EIO
;
428 struct request_queue
;
429 extern int bio_phys_segments(struct request_queue
*, struct bio
*);
431 extern int submit_bio_wait(struct bio
*bio
);
432 extern void bio_advance(struct bio
*, unsigned);
434 extern void bio_init(struct bio
*bio
, struct bio_vec
*table
,
435 unsigned short max_vecs
);
436 extern void bio_reset(struct bio
*);
437 void bio_chain(struct bio
*, struct bio
*);
439 extern int bio_add_page(struct bio
*, struct page
*, unsigned int,unsigned int);
440 extern int bio_add_pc_page(struct request_queue
*, struct bio
*, struct page
*,
441 unsigned int, unsigned int);
442 int bio_iov_iter_get_pages(struct bio
*bio
, struct iov_iter
*iter
);
444 extern struct bio
*bio_map_user_iov(struct request_queue
*,
445 const struct iov_iter
*, gfp_t
);
446 extern void bio_unmap_user(struct bio
*);
447 extern struct bio
*bio_map_kern(struct request_queue
*, void *, unsigned int,
449 extern struct bio
*bio_copy_kern(struct request_queue
*, void *, unsigned int,
451 extern void bio_set_pages_dirty(struct bio
*bio
);
452 extern void bio_check_pages_dirty(struct bio
*bio
);
454 void generic_start_io_acct(int rw
, unsigned long sectors
,
455 struct hd_struct
*part
);
456 void generic_end_io_acct(int rw
, struct hd_struct
*part
,
457 unsigned long start_time
);
459 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
460 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
462 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
463 extern void bio_flush_dcache_pages(struct bio
*bi
);
465 static inline void bio_flush_dcache_pages(struct bio
*bi
)
470 extern void bio_copy_data(struct bio
*dst
, struct bio
*src
);
471 extern int bio_alloc_pages(struct bio
*bio
, gfp_t gfp
);
472 extern void bio_free_pages(struct bio
*bio
);
474 extern struct bio
*bio_copy_user_iov(struct request_queue
*,
475 struct rq_map_data
*,
476 const struct iov_iter
*,
478 extern int bio_uncopy_user(struct bio
*);
479 void zero_fill_bio(struct bio
*bio
);
480 extern struct bio_vec
*bvec_alloc(gfp_t
, int, unsigned long *, mempool_t
*);
481 extern void bvec_free(mempool_t
*, struct bio_vec
*, unsigned int);
482 extern unsigned int bvec_nr_vecs(unsigned short idx
);
484 #ifdef CONFIG_BLK_CGROUP
485 int bio_associate_blkcg(struct bio
*bio
, struct cgroup_subsys_state
*blkcg_css
);
486 int bio_associate_current(struct bio
*bio
);
487 void bio_disassociate_task(struct bio
*bio
);
488 void bio_clone_blkcg_association(struct bio
*dst
, struct bio
*src
);
489 #else /* CONFIG_BLK_CGROUP */
490 static inline int bio_associate_blkcg(struct bio
*bio
,
491 struct cgroup_subsys_state
*blkcg_css
) { return 0; }
492 static inline int bio_associate_current(struct bio
*bio
) { return -ENOENT
; }
493 static inline void bio_disassociate_task(struct bio
*bio
) { }
494 static inline void bio_clone_blkcg_association(struct bio
*dst
,
496 #endif /* CONFIG_BLK_CGROUP */
498 #ifdef CONFIG_HIGHMEM
500 * remember never ever reenable interrupts between a bvec_kmap_irq and
503 static inline char *bvec_kmap_irq(struct bio_vec
*bvec
, unsigned long *flags
)
508 * might not be a highmem page, but the preempt/irq count
509 * balancing is a lot nicer this way
511 local_irq_save(*flags
);
512 addr
= (unsigned long) kmap_atomic(bvec
->bv_page
);
514 BUG_ON(addr
& ~PAGE_MASK
);
516 return (char *) addr
+ bvec
->bv_offset
;
519 static inline void bvec_kunmap_irq(char *buffer
, unsigned long *flags
)
521 unsigned long ptr
= (unsigned long) buffer
& PAGE_MASK
;
523 kunmap_atomic((void *) ptr
);
524 local_irq_restore(*flags
);
528 static inline char *bvec_kmap_irq(struct bio_vec
*bvec
, unsigned long *flags
)
530 return page_address(bvec
->bv_page
) + bvec
->bv_offset
;
533 static inline void bvec_kunmap_irq(char *buffer
, unsigned long *flags
)
539 static inline char *__bio_kmap_irq(struct bio
*bio
, struct bvec_iter iter
,
540 unsigned long *flags
)
542 return bvec_kmap_irq(&bio_iter_iovec(bio
, iter
), flags
);
544 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
546 #define bio_kmap_irq(bio, flags) \
547 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
548 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
551 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
553 * A bio_list anchors a singly-linked list of bios chained through the bi_next
554 * member of the bio. The bio_list also caches the last list member to allow
555 * fast access to the tail.
562 static inline int bio_list_empty(const struct bio_list
*bl
)
564 return bl
->head
== NULL
;
567 static inline void bio_list_init(struct bio_list
*bl
)
569 bl
->head
= bl
->tail
= NULL
;
572 #define BIO_EMPTY_LIST { NULL, NULL }
574 #define bio_list_for_each(bio, bl) \
575 for (bio = (bl)->head; bio; bio = bio->bi_next)
577 static inline unsigned bio_list_size(const struct bio_list
*bl
)
582 bio_list_for_each(bio
, bl
)
588 static inline void bio_list_add(struct bio_list
*bl
, struct bio
*bio
)
593 bl
->tail
->bi_next
= bio
;
600 static inline void bio_list_add_head(struct bio_list
*bl
, struct bio
*bio
)
602 bio
->bi_next
= bl
->head
;
610 static inline void bio_list_merge(struct bio_list
*bl
, struct bio_list
*bl2
)
616 bl
->tail
->bi_next
= bl2
->head
;
618 bl
->head
= bl2
->head
;
620 bl
->tail
= bl2
->tail
;
623 static inline void bio_list_merge_head(struct bio_list
*bl
,
624 struct bio_list
*bl2
)
630 bl2
->tail
->bi_next
= bl
->head
;
632 bl
->tail
= bl2
->tail
;
634 bl
->head
= bl2
->head
;
637 static inline struct bio
*bio_list_peek(struct bio_list
*bl
)
642 static inline struct bio
*bio_list_pop(struct bio_list
*bl
)
644 struct bio
*bio
= bl
->head
;
647 bl
->head
= bl
->head
->bi_next
;
657 static inline struct bio
*bio_list_get(struct bio_list
*bl
)
659 struct bio
*bio
= bl
->head
;
661 bl
->head
= bl
->tail
= NULL
;
667 * Increment chain count for the bio. Make sure the CHAIN flag update
668 * is visible before the raised count.
670 static inline void bio_inc_remaining(struct bio
*bio
)
672 bio_set_flag(bio
, BIO_CHAIN
);
673 smp_mb__before_atomic();
674 atomic_inc(&bio
->__bi_remaining
);
678 * bio_set is used to allow other portions of the IO system to
679 * allocate their own private memory pools for bio and iovec structures.
680 * These memory pools in turn all allocate from the bio_slab
681 * and the bvec_slabs[].
683 #define BIO_POOL_SIZE 2
686 struct kmem_cache
*bio_slab
;
687 unsigned int front_pad
;
690 mempool_t
*bvec_pool
;
691 #if defined(CONFIG_BLK_DEV_INTEGRITY)
692 mempool_t
*bio_integrity_pool
;
693 mempool_t
*bvec_integrity_pool
;
697 * Deadlock avoidance for stacking block drivers: see comments in
698 * bio_alloc_bioset() for details
700 spinlock_t rescue_lock
;
701 struct bio_list rescue_list
;
702 struct work_struct rescue_work
;
703 struct workqueue_struct
*rescue_workqueue
;
709 struct kmem_cache
*slab
;
713 * a small number of entries is fine, not going to be performance critical.
714 * basically we just need to survive
716 #define BIO_SPLIT_ENTRIES 2
718 #if defined(CONFIG_BLK_DEV_INTEGRITY)
720 #define bip_for_each_vec(bvl, bip, iter) \
721 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
723 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
725 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
727 extern struct bio_integrity_payload
*bio_integrity_alloc(struct bio
*, gfp_t
, unsigned int);
728 extern void bio_integrity_free(struct bio
*);
729 extern int bio_integrity_add_page(struct bio
*, struct page
*, unsigned int, unsigned int);
730 extern bool bio_integrity_enabled(struct bio
*bio
);
731 extern int bio_integrity_prep(struct bio
*);
732 extern void bio_integrity_endio(struct bio
*);
733 extern void bio_integrity_advance(struct bio
*, unsigned int);
734 extern void bio_integrity_trim(struct bio
*, unsigned int, unsigned int);
735 extern int bio_integrity_clone(struct bio
*, struct bio
*, gfp_t
);
736 extern int bioset_integrity_create(struct bio_set
*, int);
737 extern void bioset_integrity_free(struct bio_set
*);
738 extern void bio_integrity_init(void);
740 #else /* CONFIG_BLK_DEV_INTEGRITY */
742 static inline void *bio_integrity(struct bio
*bio
)
747 static inline bool bio_integrity_enabled(struct bio
*bio
)
752 static inline int bioset_integrity_create(struct bio_set
*bs
, int pool_size
)
757 static inline void bioset_integrity_free (struct bio_set
*bs
)
762 static inline int bio_integrity_prep(struct bio
*bio
)
767 static inline void bio_integrity_free(struct bio
*bio
)
772 static inline int bio_integrity_clone(struct bio
*bio
, struct bio
*bio_src
,
778 static inline void bio_integrity_advance(struct bio
*bio
,
779 unsigned int bytes_done
)
784 static inline void bio_integrity_trim(struct bio
*bio
, unsigned int offset
,
785 unsigned int sectors
)
790 static inline void bio_integrity_init(void)
795 static inline bool bio_integrity_flagged(struct bio
*bio
, enum bip_flags flag
)
800 static inline void *bio_integrity_alloc(struct bio
* bio
, gfp_t gfp
,
803 return ERR_PTR(-EINVAL
);
806 static inline int bio_integrity_add_page(struct bio
*bio
, struct page
*page
,
807 unsigned int len
, unsigned int offset
)
812 #endif /* CONFIG_BLK_DEV_INTEGRITY */
814 #endif /* CONFIG_BLOCK */
815 #endif /* __LINUX_BIO_H */