]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/linux/bio.h
Allow elevators to sort/merge discard requests
[mirror_ubuntu-zesty-kernel.git] / include / linux / bio.h
1 /*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20 #ifndef __LINUX_BIO_H
21 #define __LINUX_BIO_H
22
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
25 #include <linux/ioprio.h>
26
27 #ifdef CONFIG_BLOCK
28
29 /* Platforms may set this to teach the BIO layer about IOMMU hardware. */
30 #include <asm/io.h>
31
32 #if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
33 #define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
34 #define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
35 #else
36 #define BIOVEC_VIRT_START_SIZE(x) 0
37 #define BIOVEC_VIRT_OVERSIZE(x) 0
38 #endif
39
40 #ifndef BIO_VMERGE_BOUNDARY
41 #define BIO_VMERGE_BOUNDARY 0
42 #endif
43
44 #define BIO_DEBUG
45
46 #ifdef BIO_DEBUG
47 #define BIO_BUG_ON BUG_ON
48 #else
49 #define BIO_BUG_ON
50 #endif
51
52 #define BIO_MAX_PAGES 256
53 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
54 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
55
56 /*
57 * was unsigned short, but we might as well be ready for > 64kB I/O pages
58 */
59 struct bio_vec {
60 struct page *bv_page;
61 unsigned int bv_len;
62 unsigned int bv_offset;
63 };
64
65 struct bio_set;
66 struct bio;
67 struct bio_integrity_payload;
68 typedef void (bio_end_io_t) (struct bio *, int);
69 typedef void (bio_destructor_t) (struct bio *);
70
71 /*
72 * main unit of I/O for the block layer and lower layers (ie drivers and
73 * stacking drivers)
74 */
75 struct bio {
76 sector_t bi_sector; /* device address in 512 byte
77 sectors */
78 struct bio *bi_next; /* request queue link */
79 struct block_device *bi_bdev;
80 unsigned long bi_flags; /* status, command, etc */
81 unsigned long bi_rw; /* bottom bits READ/WRITE,
82 * top bits priority
83 */
84
85 unsigned short bi_vcnt; /* how many bio_vec's */
86 unsigned short bi_idx; /* current index into bvl_vec */
87
88 /* Number of segments in this BIO after
89 * physical address coalescing is performed.
90 */
91 unsigned short bi_phys_segments;
92
93 /* Number of segments after physical and DMA remapping
94 * hardware coalescing is performed.
95 */
96 unsigned short bi_hw_segments;
97
98 unsigned int bi_size; /* residual I/O count */
99
100 /*
101 * To keep track of the max hw size, we account for the
102 * sizes of the first and last virtually mergeable segments
103 * in this bio
104 */
105 unsigned int bi_hw_front_size;
106 unsigned int bi_hw_back_size;
107
108 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
109
110 struct bio_vec *bi_io_vec; /* the actual vec list */
111
112 bio_end_io_t *bi_end_io;
113 atomic_t bi_cnt; /* pin count */
114
115 void *bi_private;
116 #if defined(CONFIG_BLK_DEV_INTEGRITY)
117 struct bio_integrity_payload *bi_integrity; /* data integrity */
118 #endif
119
120 bio_destructor_t *bi_destructor; /* destructor */
121 };
122
123 /*
124 * bio flags
125 */
126 #define BIO_UPTODATE 0 /* ok after I/O completion */
127 #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
128 #define BIO_EOF 2 /* out-out-bounds error */
129 #define BIO_SEG_VALID 3 /* nr_hw_seg valid */
130 #define BIO_CLONED 4 /* doesn't own data */
131 #define BIO_BOUNCED 5 /* bio is a bounce bio */
132 #define BIO_USER_MAPPED 6 /* contains user pages */
133 #define BIO_EOPNOTSUPP 7 /* not supported */
134 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
135
136 /*
137 * top 4 bits of bio flags indicate the pool this bio came from
138 */
139 #define BIO_POOL_BITS (4)
140 #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
141 #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
142 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
143
144 /*
145 * bio bi_rw flags
146 *
147 * bit 0 -- read (not set) or write (set)
148 * bit 1 -- rw-ahead when set
149 * bit 2 -- barrier
150 * bit 3 -- fail fast, don't want low level driver retries
151 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
152 * bit 5 -- metadata request
153 * bit 6 -- discard sectors
154 */
155 #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
156 #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
157 #define BIO_RW_BARRIER 2
158 #define BIO_RW_FAILFAST 3
159 #define BIO_RW_SYNC 4
160 #define BIO_RW_META 5
161 #define BIO_RW_DISCARD 6
162
163 /*
164 * upper 16 bits of bi_rw define the io priority of this bio
165 */
166 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
167 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
168 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
169
170 #define bio_set_prio(bio, prio) do { \
171 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
172 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
173 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
174 } while (0)
175
176 /*
177 * various member access, note that bio_data should of course not be used
178 * on highmem page vectors
179 */
180 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
181 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
182 #define bio_page(bio) bio_iovec((bio))->bv_page
183 #define bio_offset(bio) bio_iovec((bio))->bv_offset
184 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
185 #define bio_sectors(bio) ((bio)->bi_size >> 9)
186 #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
187 #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
188 #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
189 #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
190 #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
191 #define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
192 #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
193
194 static inline unsigned int bio_cur_sectors(struct bio *bio)
195 {
196 if (bio->bi_vcnt)
197 return bio_iovec(bio)->bv_len >> 9;
198 else /* dataless requests such as discard */
199 return bio->bi_size >> 9;
200 }
201
202 static inline void *bio_data(struct bio *bio)
203 {
204 if (bio->bi_vcnt)
205 return page_address(bio_page(bio)) + bio_offset(bio);
206
207 return NULL;
208 }
209
210 /*
211 * will die
212 */
213 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
214 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
215
216 /*
217 * queues that have highmem support enabled may still need to revert to
218 * PIO transfers occasionally and thus map high pages temporarily. For
219 * permanent PIO fall back, user is probably better off disabling highmem
220 * I/O completely on that queue (see ide-dma for example)
221 */
222 #define __bio_kmap_atomic(bio, idx, kmtype) \
223 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
224 bio_iovec_idx((bio), (idx))->bv_offset)
225
226 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
227
228 /*
229 * merge helpers etc
230 */
231
232 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
233 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
234
235 /*
236 * allow arch override, for eg virtualized architectures (put in asm/io.h)
237 */
238 #ifndef BIOVEC_PHYS_MERGEABLE
239 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
240 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
241 #endif
242
243 #define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
244 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
245 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
246 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
247 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
248 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
249 #define BIO_SEG_BOUNDARY(q, b1, b2) \
250 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
251
252 #define bio_io_error(bio) bio_endio((bio), -EIO)
253
254 /*
255 * drivers should not use the __ version unless they _really_ want to
256 * run through the entire bio and not just pending pieces
257 */
258 #define __bio_for_each_segment(bvl, bio, i, start_idx) \
259 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
260 i < (bio)->bi_vcnt; \
261 bvl++, i++)
262
263 #define bio_for_each_segment(bvl, bio, i) \
264 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
265
266 /*
267 * get a reference to a bio, so it won't disappear. the intended use is
268 * something like:
269 *
270 * bio_get(bio);
271 * submit_bio(rw, bio);
272 * if (bio->bi_flags ...)
273 * do_something
274 * bio_put(bio);
275 *
276 * without the bio_get(), it could potentially complete I/O before submit_bio
277 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
278 * runs
279 */
280 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
281
282 #if defined(CONFIG_BLK_DEV_INTEGRITY)
283 /*
284 * bio integrity payload
285 */
286 struct bio_integrity_payload {
287 struct bio *bip_bio; /* parent bio */
288 struct bio_vec *bip_vec; /* integrity data vector */
289
290 sector_t bip_sector; /* virtual start sector */
291
292 void *bip_buf; /* generated integrity data */
293 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
294
295 int bip_error; /* saved I/O error */
296 unsigned int bip_size;
297
298 unsigned short bip_pool; /* pool the ivec came from */
299 unsigned short bip_vcnt; /* # of integrity bio_vecs */
300 unsigned short bip_idx; /* current bip_vec index */
301
302 struct work_struct bip_work; /* I/O completion */
303 };
304 #endif /* CONFIG_BLK_DEV_INTEGRITY */
305
306 /*
307 * A bio_pair is used when we need to split a bio.
308 * This can only happen for a bio that refers to just one
309 * page of data, and in the unusual situation when the
310 * page crosses a chunk/device boundary
311 *
312 * The address of the master bio is stored in bio1.bi_private
313 * The address of the pool the pair was allocated from is stored
314 * in bio2.bi_private
315 */
316 struct bio_pair {
317 struct bio bio1, bio2;
318 struct bio_vec bv1, bv2;
319 #if defined(CONFIG_BLK_DEV_INTEGRITY)
320 struct bio_integrity_payload bip1, bip2;
321 struct bio_vec iv1, iv2;
322 #endif
323 atomic_t cnt;
324 int error;
325 };
326 extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
327 int first_sectors);
328 extern mempool_t *bio_split_pool;
329 extern void bio_pair_release(struct bio_pair *dbio);
330
331 extern struct bio_set *bioset_create(int, int);
332 extern void bioset_free(struct bio_set *);
333
334 extern struct bio *bio_alloc(gfp_t, int);
335 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
336 extern void bio_put(struct bio *);
337 extern void bio_free(struct bio *, struct bio_set *);
338
339 extern void bio_endio(struct bio *, int);
340 struct request_queue;
341 extern int bio_phys_segments(struct request_queue *, struct bio *);
342 extern int bio_hw_segments(struct request_queue *, struct bio *);
343
344 extern void __bio_clone(struct bio *, struct bio *);
345 extern struct bio *bio_clone(struct bio *, gfp_t);
346
347 extern void bio_init(struct bio *);
348
349 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
350 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
351 unsigned int, unsigned int);
352 extern int bio_get_nr_vecs(struct block_device *);
353 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
354 unsigned long, unsigned int, int);
355 struct sg_iovec;
356 extern struct bio *bio_map_user_iov(struct request_queue *,
357 struct block_device *,
358 struct sg_iovec *, int, int);
359 extern void bio_unmap_user(struct bio *);
360 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
361 gfp_t);
362 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
363 gfp_t, int);
364 extern void bio_set_pages_dirty(struct bio *bio);
365 extern void bio_check_pages_dirty(struct bio *bio);
366 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
367 extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
368 int, int);
369 extern int bio_uncopy_user(struct bio *);
370 void zero_fill_bio(struct bio *bio);
371 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
372 extern unsigned int bvec_nr_vecs(unsigned short idx);
373
374 /*
375 * bio_set is used to allow other portions of the IO system to
376 * allocate their own private memory pools for bio and iovec structures.
377 * These memory pools in turn all allocate from the bio_slab
378 * and the bvec_slabs[].
379 */
380 #define BIO_POOL_SIZE 2
381 #define BIOVEC_NR_POOLS 6
382
383 struct bio_set {
384 mempool_t *bio_pool;
385 #if defined(CONFIG_BLK_DEV_INTEGRITY)
386 mempool_t *bio_integrity_pool;
387 #endif
388 mempool_t *bvec_pools[BIOVEC_NR_POOLS];
389 };
390
391 struct biovec_slab {
392 int nr_vecs;
393 char *name;
394 struct kmem_cache *slab;
395 };
396
397 extern struct bio_set *fs_bio_set;
398
399 /*
400 * a small number of entries is fine, not going to be performance critical.
401 * basically we just need to survive
402 */
403 #define BIO_SPLIT_ENTRIES 2
404
405 #ifdef CONFIG_HIGHMEM
406 /*
407 * remember to add offset! and never ever reenable interrupts between a
408 * bvec_kmap_irq and bvec_kunmap_irq!!
409 *
410 * This function MUST be inlined - it plays with the CPU interrupt flags.
411 */
412 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
413 {
414 unsigned long addr;
415
416 /*
417 * might not be a highmem page, but the preempt/irq count
418 * balancing is a lot nicer this way
419 */
420 local_irq_save(*flags);
421 addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
422
423 BUG_ON(addr & ~PAGE_MASK);
424
425 return (char *) addr + bvec->bv_offset;
426 }
427
428 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
429 {
430 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
431
432 kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
433 local_irq_restore(*flags);
434 }
435
436 #else
437 #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
438 #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
439 #endif
440
441 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
442 unsigned long *flags)
443 {
444 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
445 }
446 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
447
448 #define bio_kmap_irq(bio, flags) \
449 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
450 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
451
452 /*
453 * Check whether this bio carries any data or not. A NULL bio is allowed.
454 */
455 static inline int bio_has_data(struct bio *bio)
456 {
457 return bio && bio->bi_io_vec != NULL;
458 }
459
460 #if defined(CONFIG_BLK_DEV_INTEGRITY)
461
462 #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
463 #define bip_vec(bip) bip_vec_idx(bip, 0)
464
465 #define __bip_for_each_vec(bvl, bip, i, start_idx) \
466 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
467 i < (bip)->bip_vcnt; \
468 bvl++, i++)
469
470 #define bip_for_each_vec(bvl, bip, i) \
471 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
472
473 static inline int bio_integrity(struct bio *bio)
474 {
475 #if defined(CONFIG_BLK_DEV_INTEGRITY)
476 return bio->bi_integrity != NULL;
477 #else
478 return 0;
479 #endif
480 }
481
482 extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
483 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
484 extern void bio_integrity_free(struct bio *, struct bio_set *);
485 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
486 extern int bio_integrity_enabled(struct bio *bio);
487 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
488 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
489 extern int bio_integrity_prep(struct bio *);
490 extern void bio_integrity_endio(struct bio *, int);
491 extern void bio_integrity_advance(struct bio *, unsigned int);
492 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
493 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
494 extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *);
495 extern int bioset_integrity_create(struct bio_set *, int);
496 extern void bioset_integrity_free(struct bio_set *);
497 extern void bio_integrity_init_slab(void);
498
499 #else /* CONFIG_BLK_DEV_INTEGRITY */
500
501 #define bio_integrity(a) (0)
502 #define bioset_integrity_create(a, b) (0)
503 #define bio_integrity_prep(a) (0)
504 #define bio_integrity_enabled(a) (0)
505 #define bio_integrity_clone(a, b, c) (0)
506 #define bioset_integrity_free(a) do { } while (0)
507 #define bio_integrity_free(a, b) do { } while (0)
508 #define bio_integrity_endio(a, b) do { } while (0)
509 #define bio_integrity_advance(a, b) do { } while (0)
510 #define bio_integrity_trim(a, b, c) do { } while (0)
511 #define bio_integrity_split(a, b, c) do { } while (0)
512 #define bio_integrity_set_tag(a, b, c) do { } while (0)
513 #define bio_integrity_get_tag(a, b, c) do { } while (0)
514 #define bio_integrity_init_slab(a) do { } while (0)
515
516 #endif /* CONFIG_BLK_DEV_INTEGRITY */
517
518 #endif /* CONFIG_BLOCK */
519 #endif /* __LINUX_BIO_H */