]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/bio.h
block_dev: get rid of blksize bits calculation
[mirror_ubuntu-bionic-kernel.git] / include / linux / bio.h
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7cc01581 10 *
1da177e4
LT
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18#ifndef __LINUX_BIO_H
19#define __LINUX_BIO_H
20
21#include <linux/highmem.h>
22#include <linux/mempool.h>
22e2c507 23#include <linux/ioprio.h>
187f1882 24#include <linux/bug.h>
1da177e4 25
02a5e0ac
DH
26#ifdef CONFIG_BLOCK
27
1da177e4
LT
28#include <asm/io.h>
29
7cc01581
TH
30/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
31#include <linux/blk_types.h>
32
1da177e4
LT
33#define BIO_DEBUG
34
35#ifdef BIO_DEBUG
36#define BIO_BUG_ON BUG_ON
37#else
38#define BIO_BUG_ON
39#endif
40
d84a8477 41#define BIO_MAX_PAGES 256
1da177e4 42
43b62ce3
MC
43#define bio_prio(bio) (bio)->bi_ioprio
44#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
22e2c507 45
4550dd6c
KO
46#define bio_iter_iovec(bio, iter) \
47 bvec_iter_bvec((bio)->bi_io_vec, (iter))
48
49#define bio_iter_page(bio, iter) \
50 bvec_iter_page((bio)->bi_io_vec, (iter))
51#define bio_iter_len(bio, iter) \
52 bvec_iter_len((bio)->bi_io_vec, (iter))
53#define bio_iter_offset(bio, iter) \
54 bvec_iter_offset((bio)->bi_io_vec, (iter))
55
56#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
57#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
58#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
7988613b 59
458b76ed
KO
60#define bio_multiple_segments(bio) \
61 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
4f024f37
KO
62#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
63#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
bf2de6f5 64
d3849953
CH
65/*
66 * Return the data direction, READ or WRITE.
67 */
68#define bio_data_dir(bio) \
69 (op_is_write(bio_op(bio)) ? WRITE : READ)
70
458b76ed
KO
71/*
72 * Check whether this bio carries any data or not. A NULL bio is allowed.
73 */
74static inline bool bio_has_data(struct bio *bio)
75{
76 if (bio &&
77 bio->bi_iter.bi_size &&
7afafc8a
AH
78 bio_op(bio) != REQ_OP_DISCARD &&
79 bio_op(bio) != REQ_OP_SECURE_ERASE)
458b76ed
KO
80 return true;
81
82 return false;
83}
84
95fe6c1a
MC
85static inline bool bio_no_advance_iter(struct bio *bio)
86{
7afafc8a
AH
87 return bio_op(bio) == REQ_OP_DISCARD ||
88 bio_op(bio) == REQ_OP_SECURE_ERASE ||
89 bio_op(bio) == REQ_OP_WRITE_SAME;
95fe6c1a
MC
90}
91
458b76ed
KO
92static inline bool bio_mergeable(struct bio *bio)
93{
1eff9d32 94 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
458b76ed
KO
95 return false;
96
97 return true;
98}
99
2e46e8b2 100static inline unsigned int bio_cur_bytes(struct bio *bio)
bf2de6f5 101{
458b76ed 102 if (bio_has_data(bio))
a4ad39b1 103 return bio_iovec(bio).bv_len;
fb2dce86 104 else /* dataless requests such as discard */
4f024f37 105 return bio->bi_iter.bi_size;
bf2de6f5
JA
106}
107
108static inline void *bio_data(struct bio *bio)
109{
458b76ed 110 if (bio_has_data(bio))
bf2de6f5
JA
111 return page_address(bio_page(bio)) + bio_offset(bio);
112
113 return NULL;
114}
1da177e4
LT
115
116/*
117 * will die
118 */
119#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
120#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
121
122/*
123 * queues that have highmem support enabled may still need to revert to
124 * PIO transfers occasionally and thus map high pages temporarily. For
125 * permanent PIO fall back, user is probably better off disabling highmem
126 * I/O completely on that queue (see ide-dma for example)
127 */
f619d254
KO
128#define __bio_kmap_atomic(bio, iter) \
129 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
130 bio_iter_iovec((bio), (iter)).bv_offset)
1da177e4 131
f619d254 132#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
1da177e4
LT
133
134/*
135 * merge helpers etc
136 */
137
f92131c3
JF
138/* Default implementation of BIOVEC_PHYS_MERGEABLE */
139#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
140 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
141
1da177e4
LT
142/*
143 * allow arch override, for eg virtualized architectures (put in asm/io.h)
144 */
145#ifndef BIOVEC_PHYS_MERGEABLE
146#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
f92131c3 147 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
1da177e4
LT
148#endif
149
1da177e4
LT
150#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
151 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
152#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
ae03bf63 153 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
1da177e4 154
d74c6d51
KO
155/*
156 * drivers should _never_ use the all version - the bio may have been split
157 * before it got to the driver and the driver won't own all of it
158 */
159#define bio_for_each_segment_all(bvl, bio, i) \
f619d254 160 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
d74c6d51 161
4550dd6c
KO
162static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
163 unsigned bytes)
164{
165 iter->bi_sector += bytes >> 9;
166
95fe6c1a 167 if (bio_no_advance_iter(bio))
4550dd6c
KO
168 iter->bi_size -= bytes;
169 else
170 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
171}
172
7988613b
KO
173#define __bio_for_each_segment(bvl, bio, iter, start) \
174 for (iter = (start); \
4550dd6c
KO
175 (iter).bi_size && \
176 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
177 bio_advance_iter((bio), &(iter), (bvl).bv_len))
7988613b
KO
178
179#define bio_for_each_segment(bvl, bio, iter) \
180 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
181
4550dd6c 182#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
1da177e4 183
458b76ed
KO
184static inline unsigned bio_segments(struct bio *bio)
185{
186 unsigned segs = 0;
187 struct bio_vec bv;
188 struct bvec_iter iter;
189
8423ae3d
KO
190 /*
191 * We special case discard/write same, because they interpret bi_size
192 * differently:
193 */
194
95fe6c1a 195 if (bio_op(bio) == REQ_OP_DISCARD)
8423ae3d
KO
196 return 1;
197
7afafc8a
AH
198 if (bio_op(bio) == REQ_OP_SECURE_ERASE)
199 return 1;
200
95fe6c1a 201 if (bio_op(bio) == REQ_OP_WRITE_SAME)
8423ae3d
KO
202 return 1;
203
458b76ed
KO
204 bio_for_each_segment(bv, bio, iter)
205 segs++;
206
207 return segs;
208}
209
1da177e4
LT
210/*
211 * get a reference to a bio, so it won't disappear. the intended use is
212 * something like:
213 *
214 * bio_get(bio);
215 * submit_bio(rw, bio);
216 * if (bio->bi_flags ...)
217 * do_something
218 * bio_put(bio);
219 *
220 * without the bio_get(), it could potentially complete I/O before submit_bio
221 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
222 * runs
223 */
dac56212
JA
224static inline void bio_get(struct bio *bio)
225{
226 bio->bi_flags |= (1 << BIO_REFFED);
227 smp_mb__before_atomic();
228 atomic_inc(&bio->__bi_cnt);
229}
230
231static inline void bio_cnt_set(struct bio *bio, unsigned int count)
232{
233 if (count != 1) {
234 bio->bi_flags |= (1 << BIO_REFFED);
235 smp_mb__before_atomic();
236 }
237 atomic_set(&bio->__bi_cnt, count);
238}
1da177e4 239
b7c44ed9
JA
240static inline bool bio_flagged(struct bio *bio, unsigned int bit)
241{
2c68f6dc 242 return (bio->bi_flags & (1U << bit)) != 0;
b7c44ed9
JA
243}
244
245static inline void bio_set_flag(struct bio *bio, unsigned int bit)
246{
2c68f6dc 247 bio->bi_flags |= (1U << bit);
b7c44ed9
JA
248}
249
250static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
251{
2c68f6dc 252 bio->bi_flags &= ~(1U << bit);
b7c44ed9
JA
253}
254
7bcd79ac
ML
255static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
256{
257 *bv = bio_iovec(bio);
258}
259
260static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
261{
262 struct bvec_iter iter = bio->bi_iter;
263 int idx;
264
7bcd79ac
ML
265 if (unlikely(!bio_multiple_segments(bio))) {
266 *bv = bio_iovec(bio);
267 return;
268 }
269
270 bio_advance_iter(bio, &iter, iter.bi_size);
271
272 if (!iter.bi_bvec_done)
273 idx = iter.bi_idx - 1;
274 else /* in the middle of bvec */
275 idx = iter.bi_idx;
276
277 *bv = bio->bi_io_vec[idx];
278
279 /*
280 * iter.bi_bvec_done records actual length of the last bvec
281 * if this bio ends in the middle of one io vector
282 */
283 if (iter.bi_bvec_done)
284 bv->bv_len = iter.bi_bvec_done;
285}
286
c611529e
MP
287enum bip_flags {
288 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
289 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
290 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
291 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
292 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
293};
294
7ba1ba12
MP
295/*
296 * bio integrity payload
297 */
298struct bio_integrity_payload {
299 struct bio *bip_bio; /* parent bio */
7ba1ba12 300
d57a5f7c 301 struct bvec_iter bip_iter;
7ba1ba12 302
d57a5f7c 303 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
7ba1ba12 304
7878cba9 305 unsigned short bip_slab; /* slab the bip came from */
7ba1ba12 306 unsigned short bip_vcnt; /* # of integrity bio_vecs */
cbcd1054 307 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
b1f01388 308 unsigned short bip_flags; /* control flags */
7ba1ba12
MP
309
310 struct work_struct bip_work; /* I/O completion */
6fda981c
KO
311
312 struct bio_vec *bip_vec;
313 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
7ba1ba12 314};
18593088 315
06c1e390
KB
316#if defined(CONFIG_BLK_DEV_INTEGRITY)
317
318static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
319{
1eff9d32 320 if (bio->bi_opf & REQ_INTEGRITY)
06c1e390
KB
321 return bio->bi_integrity;
322
323 return NULL;
324}
325
c611529e
MP
326static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
327{
328 struct bio_integrity_payload *bip = bio_integrity(bio);
329
330 if (bip)
331 return bip->bip_flags & flag;
332
333 return false;
334}
b1f01388 335
18593088
MP
336static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
337{
338 return bip->bip_iter.bi_sector;
339}
340
341static inline void bip_set_seed(struct bio_integrity_payload *bip,
342 sector_t seed)
343{
344 bip->bip_iter.bi_sector = seed;
345}
346
7ba1ba12 347#endif /* CONFIG_BLK_DEV_INTEGRITY */
1da177e4 348
6678d83f 349extern void bio_trim(struct bio *bio, int offset, int size);
20d0189b
KO
350extern struct bio *bio_split(struct bio *bio, int sectors,
351 gfp_t gfp, struct bio_set *bs);
352
353/**
354 * bio_next_split - get next @sectors from a bio, splitting if necessary
355 * @bio: bio to split
356 * @sectors: number of sectors to split from the front of @bio
357 * @gfp: gfp mask
358 * @bs: bio set to allocate from
359 *
360 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
361 * than @sectors, returns the original bio unchanged.
362 */
363static inline struct bio *bio_next_split(struct bio *bio, int sectors,
364 gfp_t gfp, struct bio_set *bs)
365{
366 if (sectors >= bio_sectors(bio))
367 return bio;
368
369 return bio_split(bio, sectors, gfp, bs);
370}
371
bb799ca0 372extern struct bio_set *bioset_create(unsigned int, unsigned int);
d8f429e1 373extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
1da177e4 374extern void bioset_free(struct bio_set *);
a6c39cb4 375extern mempool_t *biovec_create_pool(int pool_entries);
1da177e4 376
dd0fc66f 377extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
1da177e4
LT
378extern void bio_put(struct bio *);
379
59d276fe
KO
380extern void __bio_clone_fast(struct bio *, struct bio *);
381extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
bf800ef1
KO
382extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
383
3f86a82a
KO
384extern struct bio_set *fs_bio_set;
385
386static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
387{
388 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
389}
390
bf800ef1
KO
391static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
392{
393 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
394}
395
3f86a82a
KO
396static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
397{
398 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
399}
400
bf800ef1
KO
401static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
402{
403 return bio_clone_bioset(bio, gfp_mask, NULL);
404
405}
406
1e3914d4
CH
407extern blk_qc_t submit_bio(struct bio *);
408
4246a0b6
CH
409extern void bio_endio(struct bio *);
410
411static inline void bio_io_error(struct bio *bio)
412{
413 bio->bi_error = -EIO;
414 bio_endio(bio);
415}
416
1da177e4
LT
417struct request_queue;
418extern int bio_phys_segments(struct request_queue *, struct bio *);
1da177e4 419
4e49ea4a 420extern int submit_bio_wait(struct bio *bio);
054bdf64
KO
421extern void bio_advance(struct bio *, unsigned);
422
1da177e4 423extern void bio_init(struct bio *);
f44b48c7 424extern void bio_reset(struct bio *);
196d38bc 425void bio_chain(struct bio *, struct bio *);
1da177e4
LT
426
427extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
6e68af66
MC
428extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
429 unsigned int, unsigned int);
2cefe4db 430int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
152e283f 431struct rq_map_data;
f1970baf 432extern struct bio *bio_map_user_iov(struct request_queue *,
26e49cfc 433 const struct iov_iter *, gfp_t);
1da177e4 434extern void bio_unmap_user(struct bio *);
df46b9a4 435extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
27496a8c 436 gfp_t);
68154e90
FT
437extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
438 gfp_t, int);
1da177e4
LT
439extern void bio_set_pages_dirty(struct bio *bio);
440extern void bio_check_pages_dirty(struct bio *bio);
2d4dc890 441
394ffa50
GZ
442void generic_start_io_acct(int rw, unsigned long sectors,
443 struct hd_struct *part);
444void generic_end_io_acct(int rw, struct hd_struct *part,
445 unsigned long start_time);
446
2d4dc890
IL
447#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
448# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
449#endif
450#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
451extern void bio_flush_dcache_pages(struct bio *bi);
452#else
453static inline void bio_flush_dcache_pages(struct bio *bi)
454{
455}
456#endif
457
16ac3d63 458extern void bio_copy_data(struct bio *dst, struct bio *src);
a0787606 459extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
491221f8 460extern void bio_free_pages(struct bio *bio);
16ac3d63 461
152e283f 462extern struct bio *bio_copy_user_iov(struct request_queue *,
86d564c8 463 struct rq_map_data *,
26e49cfc
KO
464 const struct iov_iter *,
465 gfp_t);
1da177e4
LT
466extern int bio_uncopy_user(struct bio *);
467void zero_fill_bio(struct bio *bio);
9f060e22
KO
468extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
469extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
7ba1ba12 470extern unsigned int bvec_nr_vecs(unsigned short idx);
51d654e1 471
852c788f 472#ifdef CONFIG_BLK_CGROUP
1d933cf0 473int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
852c788f
TH
474int bio_associate_current(struct bio *bio);
475void bio_disassociate_task(struct bio *bio);
20bd723e 476void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
852c788f 477#else /* CONFIG_BLK_CGROUP */
1d933cf0
TH
478static inline int bio_associate_blkcg(struct bio *bio,
479 struct cgroup_subsys_state *blkcg_css) { return 0; }
852c788f
TH
480static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
481static inline void bio_disassociate_task(struct bio *bio) { }
20bd723e
PV
482static inline void bio_clone_blkcg_association(struct bio *dst,
483 struct bio *src) { }
852c788f
TH
484#endif /* CONFIG_BLK_CGROUP */
485
1da177e4
LT
486#ifdef CONFIG_HIGHMEM
487/*
20b636bf
AB
488 * remember never ever reenable interrupts between a bvec_kmap_irq and
489 * bvec_kunmap_irq!
1da177e4 490 */
4f570f99 491static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
1da177e4
LT
492{
493 unsigned long addr;
494
495 /*
496 * might not be a highmem page, but the preempt/irq count
497 * balancing is a lot nicer this way
498 */
499 local_irq_save(*flags);
e8e3c3d6 500 addr = (unsigned long) kmap_atomic(bvec->bv_page);
1da177e4
LT
501
502 BUG_ON(addr & ~PAGE_MASK);
503
504 return (char *) addr + bvec->bv_offset;
505}
506
4f570f99 507static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
1da177e4
LT
508{
509 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
510
e8e3c3d6 511 kunmap_atomic((void *) ptr);
1da177e4
LT
512 local_irq_restore(*flags);
513}
514
515#else
11a691be
GU
516static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
517{
518 return page_address(bvec->bv_page) + bvec->bv_offset;
519}
520
521static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
522{
523 *flags = 0;
524}
1da177e4
LT
525#endif
526
f619d254 527static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
1da177e4
LT
528 unsigned long *flags)
529{
f619d254 530 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
1da177e4
LT
531}
532#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
533
534#define bio_kmap_irq(bio, flags) \
f619d254 535 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
1da177e4
LT
536#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
537
8f3d8ba2 538/*
e686307f 539 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
8f3d8ba2
CH
540 *
541 * A bio_list anchors a singly-linked list of bios chained through the bi_next
542 * member of the bio. The bio_list also caches the last list member to allow
543 * fast access to the tail.
544 */
545struct bio_list {
546 struct bio *head;
547 struct bio *tail;
548};
549
550static inline int bio_list_empty(const struct bio_list *bl)
551{
552 return bl->head == NULL;
553}
554
555static inline void bio_list_init(struct bio_list *bl)
556{
557 bl->head = bl->tail = NULL;
558}
559
320ae51f
JA
560#define BIO_EMPTY_LIST { NULL, NULL }
561
8f3d8ba2
CH
562#define bio_list_for_each(bio, bl) \
563 for (bio = (bl)->head; bio; bio = bio->bi_next)
564
565static inline unsigned bio_list_size(const struct bio_list *bl)
566{
567 unsigned sz = 0;
568 struct bio *bio;
569
570 bio_list_for_each(bio, bl)
571 sz++;
572
573 return sz;
574}
575
576static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
577{
578 bio->bi_next = NULL;
579
580 if (bl->tail)
581 bl->tail->bi_next = bio;
582 else
583 bl->head = bio;
584
585 bl->tail = bio;
586}
587
588static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
589{
590 bio->bi_next = bl->head;
591
592 bl->head = bio;
593
594 if (!bl->tail)
595 bl->tail = bio;
596}
597
598static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
599{
600 if (!bl2->head)
601 return;
602
603 if (bl->tail)
604 bl->tail->bi_next = bl2->head;
605 else
606 bl->head = bl2->head;
607
608 bl->tail = bl2->tail;
609}
610
611static inline void bio_list_merge_head(struct bio_list *bl,
612 struct bio_list *bl2)
613{
614 if (!bl2->head)
615 return;
616
617 if (bl->head)
618 bl2->tail->bi_next = bl->head;
619 else
620 bl->tail = bl2->tail;
621
622 bl->head = bl2->head;
623}
624
13685a16
GU
625static inline struct bio *bio_list_peek(struct bio_list *bl)
626{
627 return bl->head;
628}
629
8f3d8ba2
CH
630static inline struct bio *bio_list_pop(struct bio_list *bl)
631{
632 struct bio *bio = bl->head;
633
634 if (bio) {
635 bl->head = bl->head->bi_next;
636 if (!bl->head)
637 bl->tail = NULL;
638
639 bio->bi_next = NULL;
640 }
641
642 return bio;
643}
644
645static inline struct bio *bio_list_get(struct bio_list *bl)
646{
647 struct bio *bio = bl->head;
648
649 bl->head = bl->tail = NULL;
650
651 return bio;
652}
653
0ef5a50c
MS
654/*
655 * Increment chain count for the bio. Make sure the CHAIN flag update
656 * is visible before the raised count.
657 */
658static inline void bio_inc_remaining(struct bio *bio)
659{
660 bio_set_flag(bio, BIO_CHAIN);
661 smp_mb__before_atomic();
662 atomic_inc(&bio->__bi_remaining);
663}
664
57fb233f
KO
665/*
666 * bio_set is used to allow other portions of the IO system to
667 * allocate their own private memory pools for bio and iovec structures.
668 * These memory pools in turn all allocate from the bio_slab
669 * and the bvec_slabs[].
670 */
671#define BIO_POOL_SIZE 2
57fb233f
KO
672
673struct bio_set {
674 struct kmem_cache *bio_slab;
675 unsigned int front_pad;
676
677 mempool_t *bio_pool;
9f060e22 678 mempool_t *bvec_pool;
57fb233f
KO
679#if defined(CONFIG_BLK_DEV_INTEGRITY)
680 mempool_t *bio_integrity_pool;
9f060e22 681 mempool_t *bvec_integrity_pool;
57fb233f 682#endif
df2cb6da
KO
683
684 /*
685 * Deadlock avoidance for stacking block drivers: see comments in
686 * bio_alloc_bioset() for details
687 */
688 spinlock_t rescue_lock;
689 struct bio_list rescue_list;
690 struct work_struct rescue_work;
691 struct workqueue_struct *rescue_workqueue;
57fb233f
KO
692};
693
694struct biovec_slab {
695 int nr_vecs;
696 char *name;
697 struct kmem_cache *slab;
698};
699
700/*
701 * a small number of entries is fine, not going to be performance critical.
702 * basically we just need to survive
703 */
704#define BIO_SPLIT_ENTRIES 2
705
7ba1ba12
MP
706#if defined(CONFIG_BLK_DEV_INTEGRITY)
707
d57a5f7c
KO
708#define bip_for_each_vec(bvl, bip, iter) \
709 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
7ba1ba12 710
13f05c8d
MP
711#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
712 for_each_bio(_bio) \
713 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
714
7ba1ba12 715extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
1e2a410f 716extern void bio_integrity_free(struct bio *);
7ba1ba12 717extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
e7258c1a 718extern bool bio_integrity_enabled(struct bio *bio);
7ba1ba12 719extern int bio_integrity_prep(struct bio *);
4246a0b6 720extern void bio_integrity_endio(struct bio *);
7ba1ba12
MP
721extern void bio_integrity_advance(struct bio *, unsigned int);
722extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
1e2a410f 723extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
7878cba9
MP
724extern int bioset_integrity_create(struct bio_set *, int);
725extern void bioset_integrity_free(struct bio_set *);
726extern void bio_integrity_init(void);
7ba1ba12
MP
727
728#else /* CONFIG_BLK_DEV_INTEGRITY */
729
c611529e 730static inline void *bio_integrity(struct bio *bio)
6898e3bd 731{
c611529e 732 return NULL;
6898e3bd
MP
733}
734
e7258c1a 735static inline bool bio_integrity_enabled(struct bio *bio)
6898e3bd 736{
e7258c1a 737 return false;
6898e3bd
MP
738}
739
740static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
741{
742 return 0;
743}
744
745static inline void bioset_integrity_free (struct bio_set *bs)
746{
747 return;
748}
749
750static inline int bio_integrity_prep(struct bio *bio)
751{
752 return 0;
753}
754
1e2a410f 755static inline void bio_integrity_free(struct bio *bio)
6898e3bd
MP
756{
757 return;
758}
759
0c614e2d 760static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
1e2a410f 761 gfp_t gfp_mask)
0c614e2d
SR
762{
763 return 0;
764}
6898e3bd 765
6898e3bd
MP
766static inline void bio_integrity_advance(struct bio *bio,
767 unsigned int bytes_done)
768{
769 return;
770}
771
772static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
773 unsigned int sectors)
774{
775 return;
776}
777
778static inline void bio_integrity_init(void)
779{
780 return;
781}
7ba1ba12 782
c611529e
MP
783static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
784{
785 return false;
786}
787
06c1e390
KB
788static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
789 unsigned int nr)
790{
791 return ERR_PTR(-EINVAL);
792}
793
794static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
795 unsigned int len, unsigned int offset)
796{
797 return 0;
798}
799
7ba1ba12
MP
800#endif /* CONFIG_BLK_DEV_INTEGRITY */
801
02a5e0ac 802#endif /* CONFIG_BLOCK */
1da177e4 803#endif /* __LINUX_BIO_H */