]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/bio.h
52b9cbc3e4dac80e2229a0d65be621b7b0f15ebc
[mirror_ubuntu-artful-kernel.git] / include / linux / bio.h
1 /*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20 #ifndef __LINUX_BIO_H
21 #define __LINUX_BIO_H
22
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
25 #include <linux/ioprio.h>
26 #include <linux/bug.h>
27
28 #ifdef CONFIG_BLOCK
29
30 #include <asm/io.h>
31
32 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
33 #include <linux/blk_types.h>
34
35 #define BIO_DEBUG
36
37 #ifdef BIO_DEBUG
38 #define BIO_BUG_ON BUG_ON
39 #else
40 #define BIO_BUG_ON
41 #endif
42
43 #define BIO_MAX_PAGES 256
44 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
45 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
47 /*
48 * upper 16 bits of bi_rw define the io priority of this bio
49 */
50 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
53
54 #define bio_set_prio(bio, prio) do { \
55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
58 } while (0)
59
60 /*
61 * various member access, note that bio_data should of course not be used
62 * on highmem page vectors
63 */
64 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
65 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
66 #define bio_page(bio) bio_iovec((bio))->bv_page
67 #define bio_offset(bio) bio_iovec((bio))->bv_offset
68 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
69 #define bio_sectors(bio) ((bio)->bi_size >> 9)
70
71 static inline unsigned int bio_cur_bytes(struct bio *bio)
72 {
73 if (bio->bi_vcnt)
74 return bio_iovec(bio)->bv_len;
75 else /* dataless requests such as discard */
76 return bio->bi_size;
77 }
78
79 static inline void *bio_data(struct bio *bio)
80 {
81 if (bio->bi_vcnt)
82 return page_address(bio_page(bio)) + bio_offset(bio);
83
84 return NULL;
85 }
86
87 static inline int bio_has_allocated_vec(struct bio *bio)
88 {
89 return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
90 }
91
92 /*
93 * will die
94 */
95 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
96 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
97
98 /*
99 * queues that have highmem support enabled may still need to revert to
100 * PIO transfers occasionally and thus map high pages temporarily. For
101 * permanent PIO fall back, user is probably better off disabling highmem
102 * I/O completely on that queue (see ide-dma for example)
103 */
104 #define __bio_kmap_atomic(bio, idx, kmtype) \
105 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
106 bio_iovec_idx((bio), (idx))->bv_offset)
107
108 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
109
110 /*
111 * merge helpers etc
112 */
113
114 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
115 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
116
117 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
118 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
119 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
120
121 /*
122 * allow arch override, for eg virtualized architectures (put in asm/io.h)
123 */
124 #ifndef BIOVEC_PHYS_MERGEABLE
125 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
126 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
127 #endif
128
129 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
130 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
131 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
132 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
133 #define BIO_SEG_BOUNDARY(q, b1, b2) \
134 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
135
136 #define bio_io_error(bio) bio_endio((bio), -EIO)
137
138 /*
139 * drivers should not use the __ version unless they _really_ want to
140 * run through the entire bio and not just pending pieces
141 */
142 #define __bio_for_each_segment(bvl, bio, i, start_idx) \
143 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
144 i < (bio)->bi_vcnt; \
145 bvl++, i++)
146
147 #define bio_for_each_segment(bvl, bio, i) \
148 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
149
150 /*
151 * get a reference to a bio, so it won't disappear. the intended use is
152 * something like:
153 *
154 * bio_get(bio);
155 * submit_bio(rw, bio);
156 * if (bio->bi_flags ...)
157 * do_something
158 * bio_put(bio);
159 *
160 * without the bio_get(), it could potentially complete I/O before submit_bio
161 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
162 * runs
163 */
164 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
165
166 #if defined(CONFIG_BLK_DEV_INTEGRITY)
167 /*
168 * bio integrity payload
169 */
170 struct bio_integrity_payload {
171 struct bio *bip_bio; /* parent bio */
172
173 sector_t bip_sector; /* virtual start sector */
174
175 void *bip_buf; /* generated integrity data */
176 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
177
178 unsigned int bip_size;
179
180 unsigned short bip_slab; /* slab the bip came from */
181 unsigned short bip_vcnt; /* # of integrity bio_vecs */
182 unsigned short bip_idx; /* current bip_vec index */
183
184 struct work_struct bip_work; /* I/O completion */
185 struct bio_vec bip_vec[0]; /* embedded bvec array */
186 };
187 #endif /* CONFIG_BLK_DEV_INTEGRITY */
188
189 /*
190 * A bio_pair is used when we need to split a bio.
191 * This can only happen for a bio that refers to just one
192 * page of data, and in the unusual situation when the
193 * page crosses a chunk/device boundary
194 *
195 * The address of the master bio is stored in bio1.bi_private
196 * The address of the pool the pair was allocated from is stored
197 * in bio2.bi_private
198 */
199 struct bio_pair {
200 struct bio bio1, bio2;
201 struct bio_vec bv1, bv2;
202 #if defined(CONFIG_BLK_DEV_INTEGRITY)
203 struct bio_integrity_payload bip1, bip2;
204 struct bio_vec iv1, iv2;
205 #endif
206 atomic_t cnt;
207 int error;
208 };
209 extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
210 extern void bio_pair_release(struct bio_pair *dbio);
211
212 extern struct bio_set *bioset_create(unsigned int, unsigned int);
213 extern void bioset_free(struct bio_set *);
214
215 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
216 extern void bio_put(struct bio *);
217
218 extern void __bio_clone(struct bio *, struct bio *);
219 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
220
221 extern struct bio_set *fs_bio_set;
222
223 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
224 {
225 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
226 }
227
228 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
229 {
230 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
231 }
232
233 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
234 {
235 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
236 }
237
238 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
239 {
240 return bio_clone_bioset(bio, gfp_mask, NULL);
241
242 }
243
244 extern void bio_endio(struct bio *, int);
245 struct request_queue;
246 extern int bio_phys_segments(struct request_queue *, struct bio *);
247
248 extern void bio_init(struct bio *);
249 extern void bio_reset(struct bio *);
250
251 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
252 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
253 unsigned int, unsigned int);
254 extern int bio_get_nr_vecs(struct block_device *);
255 extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
256 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
257 unsigned long, unsigned int, int, gfp_t);
258 struct sg_iovec;
259 struct rq_map_data;
260 extern struct bio *bio_map_user_iov(struct request_queue *,
261 struct block_device *,
262 struct sg_iovec *, int, int, gfp_t);
263 extern void bio_unmap_user(struct bio *);
264 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
265 gfp_t);
266 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
267 gfp_t, int);
268 extern void bio_set_pages_dirty(struct bio *bio);
269 extern void bio_check_pages_dirty(struct bio *bio);
270
271 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
272 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
273 #endif
274 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
275 extern void bio_flush_dcache_pages(struct bio *bi);
276 #else
277 static inline void bio_flush_dcache_pages(struct bio *bi)
278 {
279 }
280 #endif
281
282 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
283 unsigned long, unsigned int, int, gfp_t);
284 extern struct bio *bio_copy_user_iov(struct request_queue *,
285 struct rq_map_data *, struct sg_iovec *,
286 int, int, gfp_t);
287 extern int bio_uncopy_user(struct bio *);
288 void zero_fill_bio(struct bio *bio);
289 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
290 extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
291 extern unsigned int bvec_nr_vecs(unsigned short idx);
292
293 #ifdef CONFIG_BLK_CGROUP
294 int bio_associate_current(struct bio *bio);
295 void bio_disassociate_task(struct bio *bio);
296 #else /* CONFIG_BLK_CGROUP */
297 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
298 static inline void bio_disassociate_task(struct bio *bio) { }
299 #endif /* CONFIG_BLK_CGROUP */
300
301 /*
302 * bio_set is used to allow other portions of the IO system to
303 * allocate their own private memory pools for bio and iovec structures.
304 * These memory pools in turn all allocate from the bio_slab
305 * and the bvec_slabs[].
306 */
307 #define BIO_POOL_SIZE 2
308 #define BIOVEC_NR_POOLS 6
309 #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
310
311 struct bio_set {
312 struct kmem_cache *bio_slab;
313 unsigned int front_pad;
314
315 mempool_t *bio_pool;
316 #if defined(CONFIG_BLK_DEV_INTEGRITY)
317 mempool_t *bio_integrity_pool;
318 #endif
319 mempool_t *bvec_pool;
320 };
321
322 struct biovec_slab {
323 int nr_vecs;
324 char *name;
325 struct kmem_cache *slab;
326 };
327
328 /*
329 * a small number of entries is fine, not going to be performance critical.
330 * basically we just need to survive
331 */
332 #define BIO_SPLIT_ENTRIES 2
333
334 #ifdef CONFIG_HIGHMEM
335 /*
336 * remember never ever reenable interrupts between a bvec_kmap_irq and
337 * bvec_kunmap_irq!
338 */
339 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
340 {
341 unsigned long addr;
342
343 /*
344 * might not be a highmem page, but the preempt/irq count
345 * balancing is a lot nicer this way
346 */
347 local_irq_save(*flags);
348 addr = (unsigned long) kmap_atomic(bvec->bv_page);
349
350 BUG_ON(addr & ~PAGE_MASK);
351
352 return (char *) addr + bvec->bv_offset;
353 }
354
355 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
356 {
357 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
358
359 kunmap_atomic((void *) ptr);
360 local_irq_restore(*flags);
361 }
362
363 #else
364 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
365 {
366 return page_address(bvec->bv_page) + bvec->bv_offset;
367 }
368
369 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
370 {
371 *flags = 0;
372 }
373 #endif
374
375 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
376 unsigned long *flags)
377 {
378 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
379 }
380 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
381
382 #define bio_kmap_irq(bio, flags) \
383 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
384 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
385
386 /*
387 * Check whether this bio carries any data or not. A NULL bio is allowed.
388 */
389 static inline int bio_has_data(struct bio *bio)
390 {
391 return bio && bio->bi_io_vec != NULL;
392 }
393
394 /*
395 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
396 *
397 * A bio_list anchors a singly-linked list of bios chained through the bi_next
398 * member of the bio. The bio_list also caches the last list member to allow
399 * fast access to the tail.
400 */
401 struct bio_list {
402 struct bio *head;
403 struct bio *tail;
404 };
405
406 static inline int bio_list_empty(const struct bio_list *bl)
407 {
408 return bl->head == NULL;
409 }
410
411 static inline void bio_list_init(struct bio_list *bl)
412 {
413 bl->head = bl->tail = NULL;
414 }
415
416 #define bio_list_for_each(bio, bl) \
417 for (bio = (bl)->head; bio; bio = bio->bi_next)
418
419 static inline unsigned bio_list_size(const struct bio_list *bl)
420 {
421 unsigned sz = 0;
422 struct bio *bio;
423
424 bio_list_for_each(bio, bl)
425 sz++;
426
427 return sz;
428 }
429
430 static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
431 {
432 bio->bi_next = NULL;
433
434 if (bl->tail)
435 bl->tail->bi_next = bio;
436 else
437 bl->head = bio;
438
439 bl->tail = bio;
440 }
441
442 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
443 {
444 bio->bi_next = bl->head;
445
446 bl->head = bio;
447
448 if (!bl->tail)
449 bl->tail = bio;
450 }
451
452 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
453 {
454 if (!bl2->head)
455 return;
456
457 if (bl->tail)
458 bl->tail->bi_next = bl2->head;
459 else
460 bl->head = bl2->head;
461
462 bl->tail = bl2->tail;
463 }
464
465 static inline void bio_list_merge_head(struct bio_list *bl,
466 struct bio_list *bl2)
467 {
468 if (!bl2->head)
469 return;
470
471 if (bl->head)
472 bl2->tail->bi_next = bl->head;
473 else
474 bl->tail = bl2->tail;
475
476 bl->head = bl2->head;
477 }
478
479 static inline struct bio *bio_list_peek(struct bio_list *bl)
480 {
481 return bl->head;
482 }
483
484 static inline struct bio *bio_list_pop(struct bio_list *bl)
485 {
486 struct bio *bio = bl->head;
487
488 if (bio) {
489 bl->head = bl->head->bi_next;
490 if (!bl->head)
491 bl->tail = NULL;
492
493 bio->bi_next = NULL;
494 }
495
496 return bio;
497 }
498
499 static inline struct bio *bio_list_get(struct bio_list *bl)
500 {
501 struct bio *bio = bl->head;
502
503 bl->head = bl->tail = NULL;
504
505 return bio;
506 }
507
508 #if defined(CONFIG_BLK_DEV_INTEGRITY)
509
510 #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
511 #define bip_vec(bip) bip_vec_idx(bip, 0)
512
513 #define __bip_for_each_vec(bvl, bip, i, start_idx) \
514 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
515 i < (bip)->bip_vcnt; \
516 bvl++, i++)
517
518 #define bip_for_each_vec(bvl, bip, i) \
519 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
520
521 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
522 for_each_bio(_bio) \
523 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
524
525 #define bio_integrity(bio) (bio->bi_integrity != NULL)
526
527 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
528 extern void bio_integrity_free(struct bio *);
529 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
530 extern int bio_integrity_enabled(struct bio *bio);
531 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
532 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
533 extern int bio_integrity_prep(struct bio *);
534 extern void bio_integrity_endio(struct bio *, int);
535 extern void bio_integrity_advance(struct bio *, unsigned int);
536 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
537 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
538 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
539 extern int bioset_integrity_create(struct bio_set *, int);
540 extern void bioset_integrity_free(struct bio_set *);
541 extern void bio_integrity_init(void);
542
543 #else /* CONFIG_BLK_DEV_INTEGRITY */
544
545 static inline int bio_integrity(struct bio *bio)
546 {
547 return 0;
548 }
549
550 static inline int bio_integrity_enabled(struct bio *bio)
551 {
552 return 0;
553 }
554
555 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
556 {
557 return 0;
558 }
559
560 static inline void bioset_integrity_free (struct bio_set *bs)
561 {
562 return;
563 }
564
565 static inline int bio_integrity_prep(struct bio *bio)
566 {
567 return 0;
568 }
569
570 static inline void bio_integrity_free(struct bio *bio)
571 {
572 return;
573 }
574
575 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
576 gfp_t gfp_mask)
577 {
578 return 0;
579 }
580
581 static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
582 int sectors)
583 {
584 return;
585 }
586
587 static inline void bio_integrity_advance(struct bio *bio,
588 unsigned int bytes_done)
589 {
590 return;
591 }
592
593 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
594 unsigned int sectors)
595 {
596 return;
597 }
598
599 static inline void bio_integrity_init(void)
600 {
601 return;
602 }
603
604 #endif /* CONFIG_BLK_DEV_INTEGRITY */
605
606 #endif /* CONFIG_BLOCK */
607 #endif /* __LINUX_BIO_H */