]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/linux/bio.h
block: add bio_kmalloc()
[mirror_ubuntu-zesty-kernel.git] / include / linux / bio.h
1 /*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20 #ifndef __LINUX_BIO_H
21 #define __LINUX_BIO_H
22
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
25 #include <linux/ioprio.h>
26
27 #ifdef CONFIG_BLOCK
28
29 #include <asm/io.h>
30
31 #define BIO_DEBUG
32
33 #ifdef BIO_DEBUG
34 #define BIO_BUG_ON BUG_ON
35 #else
36 #define BIO_BUG_ON
37 #endif
38
39 #define BIO_MAX_PAGES 256
40 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
41 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
42
43 /*
44 * was unsigned short, but we might as well be ready for > 64kB I/O pages
45 */
46 struct bio_vec {
47 struct page *bv_page;
48 unsigned int bv_len;
49 unsigned int bv_offset;
50 };
51
52 struct bio_set;
53 struct bio;
54 struct bio_integrity_payload;
55 typedef void (bio_end_io_t) (struct bio *, int);
56 typedef void (bio_destructor_t) (struct bio *);
57
58 /*
59 * main unit of I/O for the block layer and lower layers (ie drivers and
60 * stacking drivers)
61 */
62 struct bio {
63 sector_t bi_sector; /* device address in 512 byte
64 sectors */
65 struct bio *bi_next; /* request queue link */
66 struct block_device *bi_bdev;
67 unsigned long bi_flags; /* status, command, etc */
68 unsigned long bi_rw; /* bottom bits READ/WRITE,
69 * top bits priority
70 */
71
72 unsigned short bi_vcnt; /* how many bio_vec's */
73 unsigned short bi_idx; /* current index into bvl_vec */
74
75 /* Number of segments in this BIO after
76 * physical address coalescing is performed.
77 */
78 unsigned int bi_phys_segments;
79
80 unsigned int bi_size; /* residual I/O count */
81
82 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
83
84 unsigned int bi_comp_cpu; /* completion CPU */
85
86 struct bio_vec *bi_io_vec; /* the actual vec list */
87
88 bio_end_io_t *bi_end_io;
89 atomic_t bi_cnt; /* pin count */
90
91 void *bi_private;
92 #if defined(CONFIG_BLK_DEV_INTEGRITY)
93 struct bio_integrity_payload *bi_integrity; /* data integrity */
94 #endif
95
96 bio_destructor_t *bi_destructor; /* destructor */
97 };
98
99 /*
100 * bio flags
101 */
102 #define BIO_UPTODATE 0 /* ok after I/O completion */
103 #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
104 #define BIO_EOF 2 /* out-out-bounds error */
105 #define BIO_SEG_VALID 3 /* bi_phys_segments valid */
106 #define BIO_CLONED 4 /* doesn't own data */
107 #define BIO_BOUNCED 5 /* bio is a bounce bio */
108 #define BIO_USER_MAPPED 6 /* contains user pages */
109 #define BIO_EOPNOTSUPP 7 /* not supported */
110 #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
111 #define BIO_NULL_MAPPED 9 /* contains invalid user pages */
112 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
113
114 /*
115 * top 4 bits of bio flags indicate the pool this bio came from
116 */
117 #define BIO_POOL_BITS (4)
118 #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
119 #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
120 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
121
122 /*
123 * bio bi_rw flags
124 *
125 * bit 0 -- read (not set) or write (set)
126 * bit 1 -- rw-ahead when set
127 * bit 2 -- barrier
128 * bit 3 -- fail fast, don't want low level driver retries
129 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
130 * bit 5 -- metadata request
131 * bit 6 -- discard sectors
132 */
133 #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
134 #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
135 #define BIO_RW_BARRIER 2
136 #define BIO_RW_FAILFAST 3
137 #define BIO_RW_SYNC 4
138 #define BIO_RW_META 5
139 #define BIO_RW_DISCARD 6
140
141 /*
142 * upper 16 bits of bi_rw define the io priority of this bio
143 */
144 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
145 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
146 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
147
148 #define bio_set_prio(bio, prio) do { \
149 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
150 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
151 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
152 } while (0)
153
154 /*
155 * various member access, note that bio_data should of course not be used
156 * on highmem page vectors
157 */
158 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
159 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
160 #define bio_page(bio) bio_iovec((bio))->bv_page
161 #define bio_offset(bio) bio_iovec((bio))->bv_offset
162 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
163 #define bio_sectors(bio) ((bio)->bi_size >> 9)
164 #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
165 #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
166 #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
167 #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
168 #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
169 #define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
170 #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
171
172 static inline unsigned int bio_cur_sectors(struct bio *bio)
173 {
174 if (bio->bi_vcnt)
175 return bio_iovec(bio)->bv_len >> 9;
176 else /* dataless requests such as discard */
177 return bio->bi_size >> 9;
178 }
179
180 static inline void *bio_data(struct bio *bio)
181 {
182 if (bio->bi_vcnt)
183 return page_address(bio_page(bio)) + bio_offset(bio);
184
185 return NULL;
186 }
187
188 /*
189 * will die
190 */
191 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
192 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
193
194 /*
195 * queues that have highmem support enabled may still need to revert to
196 * PIO transfers occasionally and thus map high pages temporarily. For
197 * permanent PIO fall back, user is probably better off disabling highmem
198 * I/O completely on that queue (see ide-dma for example)
199 */
200 #define __bio_kmap_atomic(bio, idx, kmtype) \
201 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
202 bio_iovec_idx((bio), (idx))->bv_offset)
203
204 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
205
206 /*
207 * merge helpers etc
208 */
209
210 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
211 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
212
213 /*
214 * allow arch override, for eg virtualized architectures (put in asm/io.h)
215 */
216 #ifndef BIOVEC_PHYS_MERGEABLE
217 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
218 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
219 #endif
220
221 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
222 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
223 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
224 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
225 #define BIO_SEG_BOUNDARY(q, b1, b2) \
226 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
227
228 #define bio_io_error(bio) bio_endio((bio), -EIO)
229
230 /*
231 * drivers should not use the __ version unless they _really_ want to
232 * run through the entire bio and not just pending pieces
233 */
234 #define __bio_for_each_segment(bvl, bio, i, start_idx) \
235 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
236 i < (bio)->bi_vcnt; \
237 bvl++, i++)
238
239 #define bio_for_each_segment(bvl, bio, i) \
240 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
241
242 /*
243 * get a reference to a bio, so it won't disappear. the intended use is
244 * something like:
245 *
246 * bio_get(bio);
247 * submit_bio(rw, bio);
248 * if (bio->bi_flags ...)
249 * do_something
250 * bio_put(bio);
251 *
252 * without the bio_get(), it could potentially complete I/O before submit_bio
253 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
254 * runs
255 */
256 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
257
258 #if defined(CONFIG_BLK_DEV_INTEGRITY)
259 /*
260 * bio integrity payload
261 */
262 struct bio_integrity_payload {
263 struct bio *bip_bio; /* parent bio */
264 struct bio_vec *bip_vec; /* integrity data vector */
265
266 sector_t bip_sector; /* virtual start sector */
267
268 void *bip_buf; /* generated integrity data */
269 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
270
271 int bip_error; /* saved I/O error */
272 unsigned int bip_size;
273
274 unsigned short bip_pool; /* pool the ivec came from */
275 unsigned short bip_vcnt; /* # of integrity bio_vecs */
276 unsigned short bip_idx; /* current bip_vec index */
277
278 struct work_struct bip_work; /* I/O completion */
279 };
280 #endif /* CONFIG_BLK_DEV_INTEGRITY */
281
282 /*
283 * A bio_pair is used when we need to split a bio.
284 * This can only happen for a bio that refers to just one
285 * page of data, and in the unusual situation when the
286 * page crosses a chunk/device boundary
287 *
288 * The address of the master bio is stored in bio1.bi_private
289 * The address of the pool the pair was allocated from is stored
290 * in bio2.bi_private
291 */
292 struct bio_pair {
293 struct bio bio1, bio2;
294 struct bio_vec bv1, bv2;
295 #if defined(CONFIG_BLK_DEV_INTEGRITY)
296 struct bio_integrity_payload bip1, bip2;
297 struct bio_vec iv1, iv2;
298 #endif
299 atomic_t cnt;
300 int error;
301 };
302 extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
303 int first_sectors);
304 extern mempool_t *bio_split_pool;
305 extern void bio_pair_release(struct bio_pair *dbio);
306
307 extern struct bio_set *bioset_create(int, int);
308 extern void bioset_free(struct bio_set *);
309
310 extern struct bio *bio_alloc(gfp_t, int);
311 extern struct bio *bio_kmalloc(gfp_t, int);
312 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
313 extern void bio_put(struct bio *);
314 extern void bio_free(struct bio *, struct bio_set *);
315
316 extern void bio_endio(struct bio *, int);
317 struct request_queue;
318 extern int bio_phys_segments(struct request_queue *, struct bio *);
319
320 extern void __bio_clone(struct bio *, struct bio *);
321 extern struct bio *bio_clone(struct bio *, gfp_t);
322
323 extern void bio_init(struct bio *);
324
325 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
326 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
327 unsigned int, unsigned int);
328 extern int bio_get_nr_vecs(struct block_device *);
329 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
330 unsigned long, unsigned int, int, gfp_t);
331 struct sg_iovec;
332 struct rq_map_data;
333 extern struct bio *bio_map_user_iov(struct request_queue *,
334 struct block_device *,
335 struct sg_iovec *, int, int, gfp_t);
336 extern void bio_unmap_user(struct bio *);
337 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
338 gfp_t);
339 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
340 gfp_t, int);
341 extern void bio_set_pages_dirty(struct bio *bio);
342 extern void bio_check_pages_dirty(struct bio *bio);
343 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
344 unsigned long, unsigned int, int, gfp_t);
345 extern struct bio *bio_copy_user_iov(struct request_queue *,
346 struct rq_map_data *, struct sg_iovec *,
347 int, int, gfp_t);
348 extern int bio_uncopy_user(struct bio *);
349 void zero_fill_bio(struct bio *bio);
350 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
351 extern unsigned int bvec_nr_vecs(unsigned short idx);
352
353 /*
354 * Allow queuer to specify a completion CPU for this bio
355 */
356 static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
357 {
358 bio->bi_comp_cpu = cpu;
359 }
360
361 /*
362 * bio_set is used to allow other portions of the IO system to
363 * allocate their own private memory pools for bio and iovec structures.
364 * These memory pools in turn all allocate from the bio_slab
365 * and the bvec_slabs[].
366 */
367 #define BIO_POOL_SIZE 2
368 #define BIOVEC_NR_POOLS 6
369
370 struct bio_set {
371 mempool_t *bio_pool;
372 #if defined(CONFIG_BLK_DEV_INTEGRITY)
373 mempool_t *bio_integrity_pool;
374 #endif
375 mempool_t *bvec_pools[BIOVEC_NR_POOLS];
376 };
377
378 struct biovec_slab {
379 int nr_vecs;
380 char *name;
381 struct kmem_cache *slab;
382 };
383
384 extern struct bio_set *fs_bio_set;
385
386 /*
387 * a small number of entries is fine, not going to be performance critical.
388 * basically we just need to survive
389 */
390 #define BIO_SPLIT_ENTRIES 2
391
392 #ifdef CONFIG_HIGHMEM
393 /*
394 * remember to add offset! and never ever reenable interrupts between a
395 * bvec_kmap_irq and bvec_kunmap_irq!!
396 *
397 * This function MUST be inlined - it plays with the CPU interrupt flags.
398 */
399 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
400 {
401 unsigned long addr;
402
403 /*
404 * might not be a highmem page, but the preempt/irq count
405 * balancing is a lot nicer this way
406 */
407 local_irq_save(*flags);
408 addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
409
410 BUG_ON(addr & ~PAGE_MASK);
411
412 return (char *) addr + bvec->bv_offset;
413 }
414
415 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
416 {
417 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
418
419 kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
420 local_irq_restore(*flags);
421 }
422
423 #else
424 #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
425 #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
426 #endif
427
428 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
429 unsigned long *flags)
430 {
431 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
432 }
433 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
434
435 #define bio_kmap_irq(bio, flags) \
436 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
437 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
438
439 /*
440 * Check whether this bio carries any data or not. A NULL bio is allowed.
441 */
442 static inline int bio_has_data(struct bio *bio)
443 {
444 return bio && bio->bi_io_vec != NULL;
445 }
446
447 #if defined(CONFIG_BLK_DEV_INTEGRITY)
448
449 #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
450 #define bip_vec(bip) bip_vec_idx(bip, 0)
451
452 #define __bip_for_each_vec(bvl, bip, i, start_idx) \
453 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
454 i < (bip)->bip_vcnt; \
455 bvl++, i++)
456
457 #define bip_for_each_vec(bvl, bip, i) \
458 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
459
460 static inline int bio_integrity(struct bio *bio)
461 {
462 #if defined(CONFIG_BLK_DEV_INTEGRITY)
463 return bio->bi_integrity != NULL;
464 #else
465 return 0;
466 #endif
467 }
468
469 extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
470 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
471 extern void bio_integrity_free(struct bio *, struct bio_set *);
472 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
473 extern int bio_integrity_enabled(struct bio *bio);
474 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
475 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
476 extern int bio_integrity_prep(struct bio *);
477 extern void bio_integrity_endio(struct bio *, int);
478 extern void bio_integrity_advance(struct bio *, unsigned int);
479 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
480 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
481 extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *);
482 extern int bioset_integrity_create(struct bio_set *, int);
483 extern void bioset_integrity_free(struct bio_set *);
484 extern void bio_integrity_init_slab(void);
485
486 #else /* CONFIG_BLK_DEV_INTEGRITY */
487
488 #define bio_integrity(a) (0)
489 #define bioset_integrity_create(a, b) (0)
490 #define bio_integrity_prep(a) (0)
491 #define bio_integrity_enabled(a) (0)
492 #define bio_integrity_clone(a, b, c) (0)
493 #define bioset_integrity_free(a) do { } while (0)
494 #define bio_integrity_free(a, b) do { } while (0)
495 #define bio_integrity_endio(a, b) do { } while (0)
496 #define bio_integrity_advance(a, b) do { } while (0)
497 #define bio_integrity_trim(a, b, c) do { } while (0)
498 #define bio_integrity_split(a, b, c) do { } while (0)
499 #define bio_integrity_set_tag(a, b, c) do { } while (0)
500 #define bio_integrity_get_tag(a, b, c) do { } while (0)
501 #define bio_integrity_init_slab(a) do { } while (0)
502
503 #endif /* CONFIG_BLK_DEV_INTEGRITY */
504
505 #endif /* CONFIG_BLOCK */
506 #endif /* __LINUX_BIO_H */