]> git.proxmox.com Git - mirror_zfs-debian.git/blame - include/linux/blkdev_compat.h
New upstream version 0.7.9
[mirror_zfs-debian.git] / include / linux / blkdev_compat.h
CommitLineData
60101509
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
61e90960 21
60101509 22/*
61e90960 23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
60101509
BB
24 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
25 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * LLNL-CODE-403049.
27 */
28
61e90960 29#ifndef _ZFS_BLKDEV_H
a08ee875 30#define _ZFS_BLKDEV_H
60101509
BB
31
32#include <linux/blkdev.h>
33#include <linux/elevator.h>
cae5b340 34#include <linux/backing-dev.h>
60101509
BB
35
36#ifndef HAVE_FMODE_T
37typedef unsigned __bitwise__ fmode_t;
38#endif /* HAVE_FMODE_T */
39
42f7b73b
AX
40#ifndef HAVE_BLK_QUEUE_FLAG_SET
41static inline void
42blk_queue_flag_set(unsigned int flag, struct request_queue *q)
43{
44 queue_flag_set(flag, q);
45}
46#endif
47
48#ifndef HAVE_BLK_QUEUE_FLAG_CLEAR
49static inline void
50blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
51{
52 queue_flag_clear(flag, q);
53}
54#endif
55
b18019d2 56/*
87dac73d
AX
57 * 4.7 - 4.x API,
58 * The blk_queue_write_cache() interface has replaced blk_queue_flush()
59 * interface. However, the new interface is GPL-only thus we implement
60 * our own trivial wrapper when the GPL-only version is detected.
61 *
62 * 2.6.36 - 4.6 API,
b18019d2
ED
63 * The blk_queue_flush() interface has replaced blk_queue_ordered()
64 * interface. However, while the old interface was available to all the
65 * new one is GPL-only. Thus if the GPL-only version is detected we
87dac73d
AX
66 * implement our own trivial helper.
67 *
68 * 2.6.x - 2.6.35
69 * Legacy blk_queue_ordered() interface.
b18019d2 70 */
b18019d2 71static inline void
87dac73d 72blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
b18019d2 73{
87dac73d 74#if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
87dac73d 75 if (wc)
42f7b73b 76 blk_queue_flag_set(QUEUE_FLAG_WC, q);
87dac73d 77 else
42f7b73b 78 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
87dac73d 79 if (fua)
42f7b73b 80 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
87dac73d 81 else
42f7b73b 82 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
87dac73d
AX
83#elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
84 blk_queue_write_cache(q, wc, fua);
85#elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
86 if (wc)
87 q->flush_flags |= REQ_FLUSH;
88 if (fua)
89 q->flush_flags |= REQ_FUA;
90#elif defined(HAVE_BLK_QUEUE_FLUSH)
91 blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0));
92#else
93 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL);
94#endif
b18019d2 95}
87dac73d 96
8326eb46
BB
97/*
98 * Most of the blk_* macros were removed in 2.6.36. Ostensibly this was
99 * done to improve readability and allow easier grepping. However, from
100 * a portability stand point the macros are helpful. Therefore the needed
101 * macros are redefined here if they are missing from the kernel.
102 */
103#ifndef blk_fs_request
a08ee875 104#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
8326eb46
BB
105#endif
106
3517f0b7
BB
107/*
108 * 2.6.27 API change,
109 * The blk_queue_stackable() queue flag was added in 2.6.27 to handle dm
110 * stacking drivers. Prior to this request stacking drivers were detected
111 * by checking (q->request_fn == NULL), for earlier kernels we revert to
112 * this legacy behavior.
113 */
114#ifndef blk_queue_stackable
a08ee875 115#define blk_queue_stackable(q) ((q)->request_fn == NULL)
3517f0b7
BB
116#endif
117
34037afe
ED
118/*
119 * 2.6.34 API change,
120 * The blk_queue_max_hw_sectors() function replaces blk_queue_max_sectors().
121 */
122#ifndef HAVE_BLK_QUEUE_MAX_HW_SECTORS
a08ee875 123#define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors
34037afe
ED
124static inline void
125__blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
126{
127 blk_queue_max_sectors(q, max_hw_sectors);
128}
129#endif
130
131/*
132 * 2.6.34 API change,
133 * The blk_queue_max_segments() function consolidates
134 * blk_queue_max_hw_segments() and blk_queue_max_phys_segments().
135 */
136#ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
a08ee875 137#define blk_queue_max_segments __blk_queue_max_segments
34037afe
ED
138static inline void
139__blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
140{
141 blk_queue_max_phys_segments(q, max_segments);
142 blk_queue_max_hw_segments(q, max_segments);
143}
144#endif
145
cae5b340
AX
146static inline void
147blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
148{
149#ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC
150 q->backing_dev_info->ra_pages = ra_pages;
151#else
152 q->backing_dev_info.ra_pages = ra_pages;
153#endif
154}
155
42f7b73b
AX
156#ifndef HAVE_GET_DISK_AND_MODULE
157static inline struct kobject *
158get_disk_and_module(struct gendisk *disk)
159{
160 return (get_disk(disk));
161}
162#endif
163
60101509
BB
164#ifndef HAVE_GET_DISK_RO
165static inline int
166get_disk_ro(struct gendisk *disk)
167{
168 int policy = 0;
169
170 if (disk->part[0])
171 policy = disk->part[0]->policy;
172
a08ee875 173 return (policy);
60101509
BB
174}
175#endif /* HAVE_GET_DISK_RO */
176
ea04106b
AX
177#ifdef HAVE_BIO_BVEC_ITER
178#define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
179#define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
180#define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx
cae5b340 181#define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done
e10b0808
AX
182#define bio_for_each_segment4(bv, bvp, b, i) \
183 bio_for_each_segment((bv), (b), (i))
184typedef struct bvec_iter bvec_iterator_t;
ea04106b
AX
185#else
186#define BIO_BI_SECTOR(bio) (bio)->bi_sector
187#define BIO_BI_SIZE(bio) (bio)->bi_size
188#define BIO_BI_IDX(bio) (bio)->bi_idx
cae5b340 189#define BIO_BI_SKIP(bio) (0)
e10b0808
AX
190#define bio_for_each_segment4(bv, bvp, b, i) \
191 bio_for_each_segment((bvp), (b), (i))
192typedef int bvec_iterator_t;
ea04106b
AX
193#endif
194
61e90960
BB
195/*
196 * Portable helper for correctly setting the FAILFAST flags. The
197 * correct usage has changed 3 times from 2.6.12 to 2.6.38.
198 */
2959d94a
BB
199static inline void
200bio_set_flags_failfast(struct block_device *bdev, int *flags)
201{
f4af6bb7 202#ifdef CONFIG_BUG
2959d94a 203 /*
f4af6bb7
BB
204 * Disable FAILFAST for loopback devices because of the
205 * following incorrect BUG_ON() in loop_make_request().
2959d94a
BB
206 * This support is also disabled for md devices because the
207 * test suite layers md devices on top of loopback devices.
208 * This may be removed when the loopback driver is fixed.
209 *
210 * BUG_ON(!lo || (rw != READ && rw != WRITE));
211 */
2959d94a
BB
212 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
213 (MAJOR(bdev->bd_dev) == MD_MAJOR))
214 return;
215
216#ifdef BLOCK_EXT_MAJOR
217 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
218 return;
219#endif /* BLOCK_EXT_MAJOR */
220#endif /* CONFIG_BUG */
f4af6bb7 221
e10b0808 222#if defined(HAVE_BIO_RW_FAILFAST_DTD)
f4af6bb7 223 /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */
a08ee875
LG
224 *flags |= (
225 (1 << BIO_RW_FAILFAST_DEV) |
226 (1 << BIO_RW_FAILFAST_TRANSPORT) |
227 (1 << BIO_RW_FAILFAST_DRIVER));
e10b0808 228#elif defined(HAVE_REQ_FAILFAST_MASK)
a08ee875
LG
229 /*
230 * REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
231 * the BIO_* and REQ_* flags were unified under REQ_* flags.
232 */
f4af6bb7 233 *flags |= REQ_FAILFAST_MASK;
e10b0808
AX
234#else
235#error "Undefined block IO FAILFAST interface."
236#endif
2959d94a
BB
237}
238
61e90960
BB
239/*
240 * Maximum disk label length, it may be undefined for some kernels.
241 */
60101509 242#ifndef DISK_NAME_LEN
a08ee875 243#define DISK_NAME_LEN 32
60101509
BB
244#endif /* DISK_NAME_LEN */
245
cae5b340
AX
246#ifdef HAVE_BIO_BI_STATUS
247static inline int
248bi_status_to_errno(blk_status_t status)
249{
250 switch (status) {
251 case BLK_STS_OK:
252 return (0);
253 case BLK_STS_NOTSUPP:
254 return (EOPNOTSUPP);
255 case BLK_STS_TIMEOUT:
256 return (ETIMEDOUT);
257 case BLK_STS_NOSPC:
258 return (ENOSPC);
259 case BLK_STS_TRANSPORT:
260 return (ENOLINK);
261 case BLK_STS_TARGET:
262 return (EREMOTEIO);
263 case BLK_STS_NEXUS:
264 return (EBADE);
265 case BLK_STS_MEDIUM:
266 return (ENODATA);
267 case BLK_STS_PROTECTION:
268 return (EILSEQ);
269 case BLK_STS_RESOURCE:
270 return (ENOMEM);
271 case BLK_STS_AGAIN:
272 return (EAGAIN);
273 case BLK_STS_IOERR:
274 return (EIO);
275 default:
276 return (EIO);
277 }
278}
279
280static inline blk_status_t
281errno_to_bi_status(int error)
282{
283 switch (error) {
284 case 0:
285 return (BLK_STS_OK);
286 case EOPNOTSUPP:
287 return (BLK_STS_NOTSUPP);
288 case ETIMEDOUT:
289 return (BLK_STS_TIMEOUT);
290 case ENOSPC:
291 return (BLK_STS_NOSPC);
292 case ENOLINK:
293 return (BLK_STS_TRANSPORT);
294 case EREMOTEIO:
295 return (BLK_STS_TARGET);
296 case EBADE:
297 return (BLK_STS_NEXUS);
298 case ENODATA:
299 return (BLK_STS_MEDIUM);
300 case EILSEQ:
301 return (BLK_STS_PROTECTION);
302 case ENOMEM:
303 return (BLK_STS_RESOURCE);
304 case EAGAIN:
305 return (BLK_STS_AGAIN);
306 case EIO:
307 return (BLK_STS_IOERR);
308 default:
309 return (BLK_STS_IOERR);
310 }
311}
312#endif /* HAVE_BIO_BI_STATUS */
313
61e90960 314/*
e10b0808
AX
315 * 4.3 API change
316 * The bio_endio() prototype changed slightly. These are helper
317 * macro's to ensure the prototype and invocation are handled.
61e90960 318 */
e10b0808 319#ifdef HAVE_1ARG_BIO_END_IO_T
cae5b340
AX
320#ifdef HAVE_BIO_BI_STATUS
321#define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status)
322#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
323#define BIO_END_IO(bio, error) bio_set_bi_status(bio, error)
324static inline void
325bio_set_bi_status(struct bio *bio, int error)
326{
327 ASSERT3S(error, <=, 0);
328 bio->bi_status = errno_to_bi_status(-error);
329 bio_endio(bio);
330}
331#else
332#define BIO_END_IO_ERROR(bio) (-(bio->bi_error))
e10b0808 333#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
cae5b340
AX
334#define BIO_END_IO(bio, error) bio_set_bi_error(bio, error)
335static inline void
336bio_set_bi_error(struct bio *bio, int error)
337{
338 ASSERT3S(error, <=, 0);
339 bio->bi_error = error;
340 bio_endio(bio);
341}
342#endif /* HAVE_BIO_BI_STATUS */
343
61e90960 344#else
e10b0808
AX
345#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z)
346#define BIO_END_IO(bio, error) bio_endio(bio, error);
347#endif /* HAVE_1ARG_BIO_END_IO_T */
61e90960
BB
348
349/*
45066d1f
BB
350 * 2.6.38 - 2.6.x API,
351 * blkdev_get_by_path()
352 * blkdev_put()
353 *
354 * 2.6.28 - 2.6.37 API,
355 * open_bdev_exclusive()
356 * close_bdev_exclusive()
357 *
358 * 2.6.12 - 2.6.27 API,
359 * open_bdev_excl()
360 * close_bdev_excl()
361 *
61e90960
BB
362 * Used to exclusively open a block device from within the kernel.
363 */
45066d1f 364#if defined(HAVE_BLKDEV_GET_BY_PATH)
a08ee875 365#define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
45066d1f 366 (md) | FMODE_EXCL, hld)
a08ee875 367#define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL)
45066d1f 368#elif defined(HAVE_OPEN_BDEV_EXCLUSIVE)
a08ee875
LG
369#define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
370#define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md)
61e90960 371#else
a08ee875
LG
372#define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
373#define vdev_bdev_close(bdev, md) close_bdev_excl(bdev)
45066d1f 374#endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */
61e90960
BB
375
376/*
377 * 2.6.22 API change
378 * The function invalidate_bdev() lost it's second argument because
379 * it was unused.
380 */
381#ifdef HAVE_1ARG_INVALIDATE_BDEV
a08ee875 382#define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev)
61e90960 383#else
a08ee875 384#define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1)
61e90960
BB
385#endif /* HAVE_1ARG_INVALIDATE_BDEV */
386
2b7ab9d4
BB
387/*
388 * 2.6.27 API change
68d83c55 389 * The function was exported for use, prior to this it existed but the
2b7ab9d4 390 * symbol was not exported.
68d83c55
AX
391 *
392 * 4.4.0-6.21 API change for Ubuntu
393 * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions.
2b7ab9d4 394 */
68d83c55
AX
395#ifdef HAVE_1ARG_LOOKUP_BDEV
396#define vdev_lookup_bdev(path) lookup_bdev(path)
397#else
398#ifdef HAVE_2ARGS_LOOKUP_BDEV
399#define vdev_lookup_bdev(path) lookup_bdev(path, 0)
400#else
401#define vdev_lookup_bdev(path) ERR_PTR(-ENOTSUP)
402#endif /* HAVE_2ARGS_LOOKUP_BDEV */
403#endif /* HAVE_1ARG_LOOKUP_BDEV */
2b7ab9d4 404
61e90960
BB
405/*
406 * 2.6.30 API change
2404b014
BB
407 * To ensure good performance preferentially use the physical block size
408 * for proper alignment. The physical size is supposed to be the internal
409 * sector size used by the device. This is often 4096 byte for AF devices,
410 * while a smaller 512 byte logical size is supported for compatibility.
411 *
412 * Unfortunately, many drives still misreport their physical sector size.
413 * For devices which are known to lie you may need to manually set this
414 * at pool creation time with 'zpool create -o ashift=12 ...'.
415 *
416 * When the physical block size interface isn't available, we fall back to
417 * the logical block size interface and then the older hard sector size.
61e90960 418 */
2404b014 419#ifdef HAVE_BDEV_PHYSICAL_BLOCK_SIZE
a08ee875
LG
420#define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev)
421#else
422#ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
423#define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev)
61e90960 424#else
a08ee875
LG
425#define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev)
426#endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */
2404b014 427#endif /* HAVE_BDEV_PHYSICAL_BLOCK_SIZE */
61e90960 428
68d83c55
AX
429#ifndef HAVE_BIO_SET_OP_ATTRS
430/*
431 * Kernels without bio_set_op_attrs use bi_rw for the bio flags.
432 */
433static inline void
434bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags)
435{
436 bio->bi_rw |= rw | flags;
437}
438#endif
439
96801d29 440/*
68d83c55
AX
441 * bio_set_flush - Set the appropriate flags in a bio to guarantee
442 * data are on non-volatile media on completion.
443 *
444 * 2.6.X - 2.6.36 API,
445 * WRITE_BARRIER - Tells the block layer to commit all previously submitted
446 * writes to stable storage before this one is started and that the current
447 * write is on stable storage upon completion. Also prevents reordering
448 * on both sides of the current operation.
449 *
450 * 2.6.37 - 4.8 API,
451 * Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a
452 * replacement for WRITE_BARRIER to allow expressing richer semantics
453 * to the block layer. It's up to the block layer to implement the
454 * semantics correctly. Use the WRITE_FLUSH_FUA flag combination.
455 *
456 * 4.8 - 4.9 API,
457 * REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous
458 * ZoL releases, prefer the WRITE_FLUSH_FUA flag set if it's available.
459 *
460 * 4.10 API,
461 * The read/write flags and their modifiers, including WRITE_FLUSH,
462 * WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in
463 * torvalds/linux@70fd7614 and replaced by direct flag modification
464 * of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH.
96801d29 465 */
68d83c55
AX
466static inline void
467bio_set_flush(struct bio *bio)
468{
22929307
AX
469#if defined(REQ_PREFLUSH) /* >= 4.10 */
470 bio_set_op_attrs(bio, 0, REQ_PREFLUSH);
68d83c55
AX
471#elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */
472 bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA);
22929307
AX
473#elif defined(WRITE_BARRIER) /* < 2.6.37 */
474 bio_set_op_attrs(bio, 0, WRITE_BARRIER);
96801d29 475#else
68d83c55 476#error "Allowing the build will cause bio_set_flush requests to be ignored."
e10b0808 477#endif
68d83c55 478}
87dac73d
AX
479
480/*
481 * 4.8 - 4.x API,
482 * REQ_OP_FLUSH
483 *
484 * 4.8-rc0 - 4.8-rc1,
485 * REQ_PREFLUSH
486 *
487 * 2.6.36 - 4.7 API,
488 * REQ_FLUSH
489 *
490 * 2.6.x - 2.6.35 API,
491 * HAVE_BIO_RW_BARRIER
492 *
493 * Used to determine if a cache flush has been requested. This check has
494 * been left intentionally broad in order to cover both a legacy flush
495 * and the new preflush behavior introduced in Linux 4.8. This is correct
496 * in all cases but may have a performance impact for some kernels. It
497 * has the advantage of minimizing kernel specific changes in the zvol code.
68d83c55 498 *
87dac73d
AX
499 */
500static inline boolean_t
501bio_is_flush(struct bio *bio)
502{
503#if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF)
504 return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH));
505#elif defined(REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF)
506 return (bio->bi_opf & REQ_PREFLUSH);
507#elif defined(REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF)
508 return (bio->bi_rw & REQ_PREFLUSH);
68d83c55
AX
509#elif defined(REQ_FLUSH)
510 return (bio->bi_rw & REQ_FLUSH);
22929307
AX
511#elif defined(HAVE_BIO_RW_BARRIER)
512 return (bio->bi_rw & (1 << BIO_RW_BARRIER));
87dac73d 513#else
cae5b340 514#error "Allowing the build will cause flush requests to be ignored."
96801d29 515#endif
87dac73d 516}
96801d29 517
30930fba 518/*
87dac73d
AX
519 * 4.8 - 4.x API,
520 * REQ_FUA flag moved to bio->bi_opf
521 *
522 * 2.6.x - 4.7 API,
523 * REQ_FUA
30930fba 524 */
87dac73d
AX
525static inline boolean_t
526bio_is_fua(struct bio *bio)
527{
528#if defined(HAVE_BIO_BI_OPF)
529 return (bio->bi_opf & REQ_FUA);
530#elif defined(REQ_FUA)
531 return (bio->bi_rw & REQ_FUA);
e10b0808 532#else
cae5b340 533#error "Allowing the build will cause fua requests to be ignored."
30930fba 534#endif
87dac73d
AX
535}
536
537/*
538 * 4.8 - 4.x API,
539 * REQ_OP_DISCARD
540 *
541 * 2.6.36 - 4.7 API,
542 * REQ_DISCARD
543 *
544 * 2.6.28 - 2.6.35 API,
545 * BIO_RW_DISCARD
546 *
547 * In all cases the normal I/O path is used for discards. The only
548 * difference is how the kernel tags individual I/Os as discards.
68d83c55
AX
549 *
550 * Note that 2.6.32 era kernels provide both BIO_RW_DISCARD and REQ_DISCARD,
551 * where BIO_RW_DISCARD is the correct interface. Therefore, it is important
552 * that the HAVE_BIO_RW_DISCARD check occur before the REQ_DISCARD check.
87dac73d
AX
553 */
554static inline boolean_t
555bio_is_discard(struct bio *bio)
556{
557#if defined(HAVE_REQ_OP_DISCARD)
558 return (bio_op(bio) == REQ_OP_DISCARD);
87dac73d
AX
559#elif defined(HAVE_BIO_RW_DISCARD)
560 return (bio->bi_rw & (1 << BIO_RW_DISCARD));
68d83c55
AX
561#elif defined(REQ_DISCARD)
562 return (bio->bi_rw & REQ_DISCARD);
e10b0808 563#else
cae5b340
AX
564/* potentially triggering the DMU_MAX_ACCESS assertion. */
565#error "Allowing the build will cause discard requests to become writes."
e10b0808 566#endif
87dac73d
AX
567}
568
569/*
570 * 4.8 - 4.x API,
571 * REQ_OP_SECURE_ERASE
572 *
573 * 2.6.36 - 4.7 API,
574 * REQ_SECURE
575 *
576 * 2.6.x - 2.6.35 API,
577 * Unsupported by kernel
578 */
579static inline boolean_t
580bio_is_secure_erase(struct bio *bio)
581{
582#if defined(HAVE_REQ_OP_SECURE_ERASE)
583 return (bio_op(bio) == REQ_OP_SECURE_ERASE);
584#elif defined(REQ_SECURE)
585 return (bio->bi_rw & REQ_SECURE);
586#else
587 return (0);
588#endif
589}
30930fba 590
ee5fd0bb
ED
591/*
592 * 2.6.33 API change
593 * Discard granularity and alignment restrictions may now be set. For
594 * older kernels which do not support this it is safe to skip it.
595 */
596#ifdef HAVE_DISCARD_GRANULARITY
597static inline void
598blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
599{
600 q->limits.discard_granularity = dg;
601}
602#else
a08ee875 603#define blk_queue_discard_granularity(x, dg) ((void)0)
ee5fd0bb
ED
604#endif /* HAVE_DISCARD_GRANULARITY */
605
61e90960
BB
606/*
607 * Default Linux IO Scheduler,
608 * Setting the scheduler to noop will allow the Linux IO scheduler to
609 * still perform front and back merging, while leaving the request
610 * ordering and prioritization to the ZFS IO scheduler.
611 */
612#define VDEV_SCHEDULER "noop"
60101509 613
8128bd89
BB
614/*
615 * A common holder for vdev_bdev_open() is used to relax the exclusive open
616 * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to
617 * allow them to open the device multiple times. Other kernel callers and
618 * user space processes which don't pass this value will get EBUSY. This is
619 * currently required for the correct operation of hot spares.
620 */
a08ee875 621#define VDEV_HOLDER ((void *)0x2401de7)
8128bd89 622
cae5b340
AX
623static inline void
624blk_generic_start_io_acct(struct request_queue *q, int rw,
625 unsigned long sectors, struct hd_struct *part)
626{
627#if defined(HAVE_GENERIC_IO_ACCT_3ARG)
628 generic_start_io_acct(rw, sectors, part);
629#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
630 generic_start_io_acct(q, rw, sectors, part);
e10b0808 631#endif
cae5b340
AX
632}
633
634static inline void
635blk_generic_end_io_acct(struct request_queue *q, int rw,
636 struct hd_struct *part, unsigned long start_time)
637{
638#if defined(HAVE_GENERIC_IO_ACCT_3ARG)
639 generic_end_io_acct(rw, part, start_time);
640#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
641 generic_end_io_acct(q, rw, part, start_time);
642#endif
643}
e10b0808 644
61e90960 645#endif /* _ZFS_BLKDEV_H */