]>
Commit | Line | Data |
---|---|---|
60101509 BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
61e90960 | 21 | |
60101509 | 22 | /* |
61e90960 | 23 | * Copyright (C) 2011 Lawrence Livermore National Security, LLC. |
60101509 BB |
24 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). |
25 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
26 | * LLNL-CODE-403049. | |
27 | */ | |
28 | ||
61e90960 | 29 | #ifndef _ZFS_BLKDEV_H |
d1d7e268 | 30 | #define _ZFS_BLKDEV_H |
60101509 BB |
31 | |
32 | #include <linux/blkdev.h> | |
33 | #include <linux/elevator.h> | |
bc17f104 | 34 | #include <linux/backing-dev.h> |
60101509 BB |
35 | |
36 | #ifndef HAVE_FMODE_T | |
37 | typedef unsigned __bitwise__ fmode_t; | |
38 | #endif /* HAVE_FMODE_T */ | |
39 | ||
10f88c5c GDN |
40 | #ifndef HAVE_BLK_QUEUE_FLAG_SET |
41 | static inline void | |
42 | blk_queue_flag_set(unsigned int flag, struct request_queue *q) | |
43 | { | |
d6bb2217 | 44 | queue_flag_set(flag, q); |
10f88c5c GDN |
45 | } |
46 | #endif | |
47 | ||
48 | #ifndef HAVE_BLK_QUEUE_FLAG_CLEAR | |
49 | static inline void | |
50 | blk_queue_flag_clear(unsigned int flag, struct request_queue *q) | |
51 | { | |
d6bb2217 | 52 | queue_flag_clear(flag, q); |
10f88c5c GDN |
53 | } |
54 | #endif | |
55 | ||
b18019d2 | 56 | /* |
cf41432c BB |
57 | * 4.7 - 4.x API, |
58 | * The blk_queue_write_cache() interface has replaced blk_queue_flush() | |
59 | * interface. However, the new interface is GPL-only thus we implement | |
60 | * our own trivial wrapper when the GPL-only version is detected. | |
61 | * | |
62 | * 2.6.36 - 4.6 API, | |
b18019d2 ED |
63 | * The blk_queue_flush() interface has replaced blk_queue_ordered() |
64 | * interface. However, while the old interface was available to all the | |
65 | * new one is GPL-only. Thus if the GPL-only version is detected we | |
cf41432c BB |
66 | * implement our own trivial helper. |
67 | * | |
68 | * 2.6.x - 2.6.35 | |
69 | * Legacy blk_queue_ordered() interface. | |
68e8f59a | 70 | */ |
68e8f59a | 71 | static inline void |
cf41432c | 72 | blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua) |
68e8f59a | 73 | { |
cf41432c | 74 | #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY) |
68e8f59a | 75 | if (wc) |
d6bb2217 | 76 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
68e8f59a | 77 | else |
d6bb2217 | 78 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
68e8f59a | 79 | if (fua) |
d6bb2217 | 80 | blk_queue_flag_set(QUEUE_FLAG_FUA, q); |
68e8f59a | 81 | else |
d6bb2217 | 82 | blk_queue_flag_clear(QUEUE_FLAG_FUA, q); |
cf41432c BB |
83 | #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE) |
84 | blk_queue_write_cache(q, wc, fua); | |
85 | #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) | |
86 | if (wc) | |
87 | q->flush_flags |= REQ_FLUSH; | |
88 | if (fua) | |
89 | q->flush_flags |= REQ_FUA; | |
90 | #elif defined(HAVE_BLK_QUEUE_FLUSH) | |
91 | blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0)); | |
92 | #else | |
93 | blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL); | |
68e8f59a | 94 | #endif |
cf41432c | 95 | } |
68e8f59a | 96 | |
8326eb46 BB |
97 | /* |
98 | * Most of the blk_* macros were removed in 2.6.36. Ostensibly this was | |
99 | * done to improve readability and allow easier grepping. However, from | |
100 | * a portability stand point the macros are helpful. Therefore the needed | |
101 | * macros are redefined here if they are missing from the kernel. | |
102 | */ | |
103 | #ifndef blk_fs_request | |
d1d7e268 | 104 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
8326eb46 BB |
105 | #endif |
106 | ||
3517f0b7 BB |
107 | /* |
108 | * 2.6.27 API change, | |
109 | * The blk_queue_stackable() queue flag was added in 2.6.27 to handle dm | |
110 | * stacking drivers. Prior to this request stacking drivers were detected | |
111 | * by checking (q->request_fn == NULL), for earlier kernels we revert to | |
112 | * this legacy behavior. | |
113 | */ | |
114 | #ifndef blk_queue_stackable | |
d1d7e268 | 115 | #define blk_queue_stackable(q) ((q)->request_fn == NULL) |
3517f0b7 BB |
116 | #endif |
117 | ||
34037afe ED |
118 | /* |
119 | * 2.6.34 API change, | |
120 | * The blk_queue_max_hw_sectors() function replaces blk_queue_max_sectors(). | |
121 | */ | |
122 | #ifndef HAVE_BLK_QUEUE_MAX_HW_SECTORS | |
d1d7e268 | 123 | #define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors |
34037afe ED |
124 | static inline void |
125 | __blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | |
126 | { | |
127 | blk_queue_max_sectors(q, max_hw_sectors); | |
128 | } | |
129 | #endif | |
130 | ||
131 | /* | |
132 | * 2.6.34 API change, | |
133 | * The blk_queue_max_segments() function consolidates | |
134 | * blk_queue_max_hw_segments() and blk_queue_max_phys_segments(). | |
135 | */ | |
136 | #ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS | |
d1d7e268 | 137 | #define blk_queue_max_segments __blk_queue_max_segments |
34037afe ED |
138 | static inline void |
139 | __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) | |
140 | { | |
141 | blk_queue_max_phys_segments(q, max_segments); | |
142 | blk_queue_max_hw_segments(q, max_segments); | |
143 | } | |
144 | #endif | |
145 | ||
bc17f104 RY |
146 | static inline void |
147 | blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages) | |
148 | { | |
149 | #ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC | |
150 | q->backing_dev_info->ra_pages = ra_pages; | |
151 | #else | |
152 | q->backing_dev_info.ra_pages = ra_pages; | |
153 | #endif | |
154 | } | |
155 | ||
dd3e1e30 GDN |
156 | #ifndef HAVE_GET_DISK_AND_MODULE |
157 | static inline struct kobject * | |
158 | get_disk_and_module(struct gendisk *disk) | |
159 | { | |
160 | return (get_disk(disk)); | |
161 | } | |
162 | #endif | |
163 | ||
60101509 BB |
164 | #ifndef HAVE_GET_DISK_RO |
165 | static inline int | |
166 | get_disk_ro(struct gendisk *disk) | |
167 | { | |
168 | int policy = 0; | |
169 | ||
170 | if (disk->part[0]) | |
171 | policy = disk->part[0]->policy; | |
172 | ||
d1d7e268 | 173 | return (policy); |
60101509 BB |
174 | } |
175 | #endif /* HAVE_GET_DISK_RO */ | |
176 | ||
d4541210 CC |
177 | #ifdef HAVE_BIO_BVEC_ITER |
178 | #define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector | |
179 | #define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size | |
180 | #define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx | |
2727b9d3 | 181 | #define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done |
37f9dac5 RY |
182 | #define bio_for_each_segment4(bv, bvp, b, i) \ |
183 | bio_for_each_segment((bv), (b), (i)) | |
184 | typedef struct bvec_iter bvec_iterator_t; | |
d4541210 CC |
185 | #else |
186 | #define BIO_BI_SECTOR(bio) (bio)->bi_sector | |
187 | #define BIO_BI_SIZE(bio) (bio)->bi_size | |
188 | #define BIO_BI_IDX(bio) (bio)->bi_idx | |
2727b9d3 | 189 | #define BIO_BI_SKIP(bio) (0) |
37f9dac5 RY |
190 | #define bio_for_each_segment4(bv, bvp, b, i) \ |
191 | bio_for_each_segment((bvp), (b), (i)) | |
192 | typedef int bvec_iterator_t; | |
d4541210 CC |
193 | #endif |
194 | ||
61e90960 BB |
195 | /* |
196 | * Portable helper for correctly setting the FAILFAST flags. The | |
197 | * correct usage has changed 3 times from 2.6.12 to 2.6.38. | |
198 | */ | |
2959d94a BB |
199 | static inline void |
200 | bio_set_flags_failfast(struct block_device *bdev, int *flags) | |
201 | { | |
f4af6bb7 | 202 | #ifdef CONFIG_BUG |
2959d94a | 203 | /* |
f4af6bb7 BB |
204 | * Disable FAILFAST for loopback devices because of the |
205 | * following incorrect BUG_ON() in loop_make_request(). | |
2959d94a BB |
206 | * This support is also disabled for md devices because the |
207 | * test suite layers md devices on top of loopback devices. | |
208 | * This may be removed when the loopback driver is fixed. | |
209 | * | |
210 | * BUG_ON(!lo || (rw != READ && rw != WRITE)); | |
211 | */ | |
2959d94a BB |
212 | if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) || |
213 | (MAJOR(bdev->bd_dev) == MD_MAJOR)) | |
214 | return; | |
215 | ||
216 | #ifdef BLOCK_EXT_MAJOR | |
217 | if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) | |
218 | return; | |
219 | #endif /* BLOCK_EXT_MAJOR */ | |
220 | #endif /* CONFIG_BUG */ | |
f4af6bb7 | 221 | |
e4853338 | 222 | #if defined(HAVE_BIO_RW_FAILFAST_DTD) |
f4af6bb7 | 223 | /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */ |
d1d7e268 MK |
224 | *flags |= ( |
225 | (1 << BIO_RW_FAILFAST_DEV) | | |
226 | (1 << BIO_RW_FAILFAST_TRANSPORT) | | |
227 | (1 << BIO_RW_FAILFAST_DRIVER)); | |
e4853338 | 228 | #elif defined(HAVE_REQ_FAILFAST_MASK) |
d1d7e268 MK |
229 | /* |
230 | * REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx, | |
231 | * the BIO_* and REQ_* flags were unified under REQ_* flags. | |
232 | */ | |
f4af6bb7 | 233 | *flags |= REQ_FAILFAST_MASK; |
e4853338 TC |
234 | #else |
235 | #error "Undefined block IO FAILFAST interface." | |
236 | #endif | |
2959d94a BB |
237 | } |
238 | ||
61e90960 BB |
239 | /* |
240 | * Maximum disk label length, it may be undefined for some kernels. | |
241 | */ | |
60101509 | 242 | #ifndef DISK_NAME_LEN |
d1d7e268 | 243 | #define DISK_NAME_LEN 32 |
60101509 BB |
244 | #endif /* DISK_NAME_LEN */ |
245 | ||
36ba27e9 BB |
246 | #ifdef HAVE_BIO_BI_STATUS |
247 | static inline int | |
248 | bi_status_to_errno(blk_status_t status) | |
249 | { | |
250 | switch (status) { | |
251 | case BLK_STS_OK: | |
252 | return (0); | |
253 | case BLK_STS_NOTSUPP: | |
254 | return (EOPNOTSUPP); | |
255 | case BLK_STS_TIMEOUT: | |
256 | return (ETIMEDOUT); | |
257 | case BLK_STS_NOSPC: | |
258 | return (ENOSPC); | |
259 | case BLK_STS_TRANSPORT: | |
260 | return (ENOLINK); | |
261 | case BLK_STS_TARGET: | |
262 | return (EREMOTEIO); | |
263 | case BLK_STS_NEXUS: | |
264 | return (EBADE); | |
265 | case BLK_STS_MEDIUM: | |
266 | return (ENODATA); | |
267 | case BLK_STS_PROTECTION: | |
268 | return (EILSEQ); | |
269 | case BLK_STS_RESOURCE: | |
270 | return (ENOMEM); | |
271 | case BLK_STS_AGAIN: | |
272 | return (EAGAIN); | |
273 | case BLK_STS_IOERR: | |
274 | return (EIO); | |
275 | default: | |
276 | return (EIO); | |
277 | } | |
278 | } | |
279 | ||
280 | static inline blk_status_t | |
281 | errno_to_bi_status(int error) | |
282 | { | |
283 | switch (error) { | |
284 | case 0: | |
285 | return (BLK_STS_OK); | |
286 | case EOPNOTSUPP: | |
287 | return (BLK_STS_NOTSUPP); | |
288 | case ETIMEDOUT: | |
289 | return (BLK_STS_TIMEOUT); | |
290 | case ENOSPC: | |
291 | return (BLK_STS_NOSPC); | |
292 | case ENOLINK: | |
293 | return (BLK_STS_TRANSPORT); | |
294 | case EREMOTEIO: | |
295 | return (BLK_STS_TARGET); | |
296 | case EBADE: | |
297 | return (BLK_STS_NEXUS); | |
298 | case ENODATA: | |
299 | return (BLK_STS_MEDIUM); | |
300 | case EILSEQ: | |
301 | return (BLK_STS_PROTECTION); | |
302 | case ENOMEM: | |
303 | return (BLK_STS_RESOURCE); | |
304 | case EAGAIN: | |
305 | return (BLK_STS_AGAIN); | |
306 | case EIO: | |
307 | return (BLK_STS_IOERR); | |
308 | default: | |
309 | return (BLK_STS_IOERR); | |
310 | } | |
311 | } | |
312 | #endif /* HAVE_BIO_BI_STATUS */ | |
313 | ||
61e90960 | 314 | /* |
784a7fe5 LW |
315 | * 4.3 API change |
316 | * The bio_endio() prototype changed slightly. These are helper | |
317 | * macro's to ensure the prototype and invocation are handled. | |
61e90960 | 318 | */ |
784a7fe5 | 319 | #ifdef HAVE_1ARG_BIO_END_IO_T |
36ba27e9 BB |
320 | #ifdef HAVE_BIO_BI_STATUS |
321 | #define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status) | |
322 | #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) | |
323 | #define BIO_END_IO(bio, error) bio_set_bi_status(bio, error) | |
324 | static inline void | |
325 | bio_set_bi_status(struct bio *bio, int error) | |
326 | { | |
327 | ASSERT3S(error, <=, 0); | |
328 | bio->bi_status = errno_to_bi_status(-error); | |
329 | bio_endio(bio); | |
330 | } | |
331 | #else | |
332 | #define BIO_END_IO_ERROR(bio) (-(bio->bi_error)) | |
784a7fe5 | 333 | #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x) |
36ba27e9 BB |
334 | #define BIO_END_IO(bio, error) bio_set_bi_error(bio, error) |
335 | static inline void | |
336 | bio_set_bi_error(struct bio *bio, int error) | |
337 | { | |
338 | ASSERT3S(error, <=, 0); | |
339 | bio->bi_error = error; | |
340 | bio_endio(bio); | |
341 | } | |
342 | #endif /* HAVE_BIO_BI_STATUS */ | |
343 | ||
61e90960 | 344 | #else |
784a7fe5 LW |
345 | #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z) |
346 | #define BIO_END_IO(bio, error) bio_endio(bio, error); | |
347 | #endif /* HAVE_1ARG_BIO_END_IO_T */ | |
61e90960 BB |
348 | |
349 | /* | |
45066d1f BB |
350 | * 2.6.38 - 2.6.x API, |
351 | * blkdev_get_by_path() | |
352 | * blkdev_put() | |
353 | * | |
354 | * 2.6.28 - 2.6.37 API, | |
355 | * open_bdev_exclusive() | |
356 | * close_bdev_exclusive() | |
357 | * | |
358 | * 2.6.12 - 2.6.27 API, | |
359 | * open_bdev_excl() | |
360 | * close_bdev_excl() | |
361 | * | |
61e90960 BB |
362 | * Used to exclusively open a block device from within the kernel. |
363 | */ | |
45066d1f | 364 | #if defined(HAVE_BLKDEV_GET_BY_PATH) |
d1d7e268 | 365 | #define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \ |
45066d1f | 366 | (md) | FMODE_EXCL, hld) |
d1d7e268 | 367 | #define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL) |
45066d1f | 368 | #elif defined(HAVE_OPEN_BDEV_EXCLUSIVE) |
d1d7e268 MK |
369 | #define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld) |
370 | #define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md) | |
61e90960 | 371 | #else |
d1d7e268 MK |
372 | #define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld) |
373 | #define vdev_bdev_close(bdev, md) close_bdev_excl(bdev) | |
45066d1f | 374 | #endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */ |
61e90960 BB |
375 | |
376 | /* | |
377 | * 2.6.22 API change | |
378 | * The function invalidate_bdev() lost it's second argument because | |
379 | * it was unused. | |
380 | */ | |
381 | #ifdef HAVE_1ARG_INVALIDATE_BDEV | |
d1d7e268 | 382 | #define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev) |
61e90960 | 383 | #else |
d1d7e268 | 384 | #define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1) |
61e90960 BB |
385 | #endif /* HAVE_1ARG_INVALIDATE_BDEV */ |
386 | ||
2b7ab9d4 BB |
387 | /* |
388 | * 2.6.27 API change | |
e02aaf17 | 389 | * The function was exported for use, prior to this it existed but the |
2b7ab9d4 | 390 | * symbol was not exported. |
e02aaf17 HM |
391 | * |
392 | * 4.4.0-6.21 API change for Ubuntu | |
393 | * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions. | |
2b7ab9d4 | 394 | */ |
e02aaf17 HM |
395 | #ifdef HAVE_1ARG_LOOKUP_BDEV |
396 | #define vdev_lookup_bdev(path) lookup_bdev(path) | |
397 | #else | |
398 | #ifdef HAVE_2ARGS_LOOKUP_BDEV | |
399 | #define vdev_lookup_bdev(path) lookup_bdev(path, 0) | |
400 | #else | |
401 | #define vdev_lookup_bdev(path) ERR_PTR(-ENOTSUP) | |
402 | #endif /* HAVE_2ARGS_LOOKUP_BDEV */ | |
403 | #endif /* HAVE_1ARG_LOOKUP_BDEV */ | |
2b7ab9d4 | 404 | |
61e90960 BB |
405 | /* |
406 | * 2.6.30 API change | |
2404b014 BB |
407 | * To ensure good performance preferentially use the physical block size |
408 | * for proper alignment. The physical size is supposed to be the internal | |
409 | * sector size used by the device. This is often 4096 byte for AF devices, | |
410 | * while a smaller 512 byte logical size is supported for compatibility. | |
411 | * | |
412 | * Unfortunately, many drives still misreport their physical sector size. | |
413 | * For devices which are known to lie you may need to manually set this | |
414 | * at pool creation time with 'zpool create -o ashift=12 ...'. | |
415 | * | |
416 | * When the physical block size interface isn't available, we fall back to | |
417 | * the logical block size interface and then the older hard sector size. | |
61e90960 | 418 | */ |
2404b014 | 419 | #ifdef HAVE_BDEV_PHYSICAL_BLOCK_SIZE |
d1d7e268 MK |
420 | #define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev) |
421 | #else | |
422 | #ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE | |
423 | #define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev) | |
61e90960 | 424 | #else |
d1d7e268 MK |
425 | #define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev) |
426 | #endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */ | |
2404b014 | 427 | #endif /* HAVE_BDEV_PHYSICAL_BLOCK_SIZE */ |
61e90960 | 428 | |
a5e046ea | 429 | #ifndef HAVE_BIO_SET_OP_ATTRS |
96801d29 | 430 | /* |
a5e046ea | 431 | * Kernels without bio_set_op_attrs use bi_rw for the bio flags. |
96801d29 | 432 | */ |
a5e046ea TC |
433 | static inline void |
434 | bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags) | |
435 | { | |
436 | bio->bi_rw |= rw | flags; | |
437 | } | |
438 | #endif | |
439 | ||
440 | /* | |
441 | * bio_set_flush - Set the appropriate flags in a bio to guarantee | |
442 | * data are on non-volatile media on completion. | |
443 | * | |
444 | * 2.6.X - 2.6.36 API, | |
445 | * WRITE_BARRIER - Tells the block layer to commit all previously submitted | |
446 | * writes to stable storage before this one is started and that the current | |
447 | * write is on stable storage upon completion. Also prevents reordering | |
448 | * on both sides of the current operation. | |
449 | * | |
450 | * 2.6.37 - 4.8 API, | |
451 | * Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a | |
452 | * replacement for WRITE_BARRIER to allow expressing richer semantics | |
453 | * to the block layer. It's up to the block layer to implement the | |
454 | * semantics correctly. Use the WRITE_FLUSH_FUA flag combination. | |
455 | * | |
456 | * 4.8 - 4.9 API, | |
457 | * REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous | |
458 | * ZoL releases, prefer the WRITE_FLUSH_FUA flag set if it's available. | |
459 | * | |
460 | * 4.10 API, | |
461 | * The read/write flags and their modifiers, including WRITE_FLUSH, | |
462 | * WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in | |
463 | * torvalds/linux@70fd7614 and replaced by direct flag modification | |
464 | * of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH. | |
465 | */ | |
466 | static inline void | |
467 | bio_set_flush(struct bio *bio) | |
468 | { | |
46300986 TH |
469 | #if defined(REQ_PREFLUSH) /* >= 4.10 */ |
470 | bio_set_op_attrs(bio, 0, REQ_PREFLUSH); | |
a5e046ea TC |
471 | #elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */ |
472 | bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA); | |
46300986 TH |
473 | #elif defined(WRITE_BARRIER) /* < 2.6.37 */ |
474 | bio_set_op_attrs(bio, 0, WRITE_BARRIER); | |
76e5f6fe | 475 | #else |
a5e046ea | 476 | #error "Allowing the build will cause bio_set_flush requests to be ignored." |
76e5f6fe | 477 | #endif |
a5e046ea | 478 | } |
76e5f6fe | 479 | |
cf41432c BB |
480 | /* |
481 | * 4.8 - 4.x API, | |
482 | * REQ_OP_FLUSH | |
483 | * | |
484 | * 4.8-rc0 - 4.8-rc1, | |
485 | * REQ_PREFLUSH | |
486 | * | |
487 | * 2.6.36 - 4.7 API, | |
488 | * REQ_FLUSH | |
489 | * | |
490 | * 2.6.x - 2.6.35 API, | |
491 | * HAVE_BIO_RW_BARRIER | |
492 | * | |
493 | * Used to determine if a cache flush has been requested. This check has | |
494 | * been left intentionally broad in order to cover both a legacy flush | |
495 | * and the new preflush behavior introduced in Linux 4.8. This is correct | |
496 | * in all cases but may have a performance impact for some kernels. It | |
497 | * has the advantage of minimizing kernel specific changes in the zvol code. | |
6eb73b00 | 498 | * |
cf41432c BB |
499 | */ |
500 | static inline boolean_t | |
501 | bio_is_flush(struct bio *bio) | |
502 | { | |
503 | #if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF) | |
504 | return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH)); | |
505 | #elif defined(REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF) | |
506 | return (bio->bi_opf & REQ_PREFLUSH); | |
507 | #elif defined(REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF) | |
508 | return (bio->bi_rw & REQ_PREFLUSH); | |
6eb73b00 BB |
509 | #elif defined(REQ_FLUSH) |
510 | return (bio->bi_rw & REQ_FLUSH); | |
46300986 TH |
511 | #elif defined(HAVE_BIO_RW_BARRIER) |
512 | return (bio->bi_rw & (1 << BIO_RW_BARRIER)); | |
96801d29 | 513 | #else |
5fc73c46 | 514 | #error "Allowing the build will cause flush requests to be ignored." |
96801d29 | 515 | #endif |
cf41432c | 516 | } |
76e5f6fe | 517 | |
cf41432c BB |
518 | /* |
519 | * 4.8 - 4.x API, | |
520 | * REQ_FUA flag moved to bio->bi_opf | |
521 | * | |
522 | * 2.6.x - 4.7 API, | |
523 | * REQ_FUA | |
524 | */ | |
525 | static inline boolean_t | |
526 | bio_is_fua(struct bio *bio) | |
527 | { | |
528 | #if defined(HAVE_BIO_BI_OPF) | |
529 | return (bio->bi_opf & REQ_FUA); | |
530 | #elif defined(REQ_FUA) | |
531 | return (bio->bi_rw & REQ_FUA); | |
532 | #else | |
5fc73c46 | 533 | #error "Allowing the build will cause fua requests to be ignored." |
37f9dac5 | 534 | #endif |
cf41432c | 535 | } |
96801d29 | 536 | |
30930fba | 537 | /* |
cf41432c BB |
538 | * 4.8 - 4.x API, |
539 | * REQ_OP_DISCARD | |
3b86aeb2 CC |
540 | * |
541 | * 2.6.36 - 4.7 API, | |
542 | * REQ_DISCARD | |
543 | * | |
cf41432c BB |
544 | * 2.6.28 - 2.6.35 API, |
545 | * BIO_RW_DISCARD | |
3b86aeb2 CC |
546 | * |
547 | * In all cases the normal I/O path is used for discards. The only | |
548 | * difference is how the kernel tags individual I/Os as discards. | |
6eb73b00 BB |
549 | * |
550 | * Note that 2.6.32 era kernels provide both BIO_RW_DISCARD and REQ_DISCARD, | |
551 | * where BIO_RW_DISCARD is the correct interface. Therefore, it is important | |
552 | * that the HAVE_BIO_RW_DISCARD check occur before the REQ_DISCARD check. | |
30930fba | 553 | */ |
3b86aeb2 CC |
554 | static inline boolean_t |
555 | bio_is_discard(struct bio *bio) | |
556 | { | |
cf41432c BB |
557 | #if defined(HAVE_REQ_OP_DISCARD) |
558 | return (bio_op(bio) == REQ_OP_DISCARD); | |
cf41432c BB |
559 | #elif defined(HAVE_BIO_RW_DISCARD) |
560 | return (bio->bi_rw & (1 << BIO_RW_DISCARD)); | |
6eb73b00 BB |
561 | #elif defined(REQ_DISCARD) |
562 | return (bio->bi_rw & REQ_DISCARD); | |
37f9dac5 | 563 | #else |
5fc73c46 | 564 | /* potentially triggering the DMU_MAX_ACCESS assertion. */ |
565 | #error "Allowing the build will cause discard requests to become writes." | |
30930fba | 566 | #endif |
3b86aeb2 | 567 | } |
cf41432c BB |
568 | |
569 | /* | |
570 | * 4.8 - 4.x API, | |
571 | * REQ_OP_SECURE_ERASE | |
572 | * | |
573 | * 2.6.36 - 4.7 API, | |
574 | * REQ_SECURE | |
575 | * | |
576 | * 2.6.x - 2.6.35 API, | |
577 | * Unsupported by kernel | |
578 | */ | |
579 | static inline boolean_t | |
580 | bio_is_secure_erase(struct bio *bio) | |
581 | { | |
582 | #if defined(HAVE_REQ_OP_SECURE_ERASE) | |
583 | return (bio_op(bio) == REQ_OP_SECURE_ERASE); | |
584 | #elif defined(REQ_SECURE) | |
585 | return (bio->bi_rw & REQ_SECURE); | |
37f9dac5 | 586 | #else |
cf41432c | 587 | return (0); |
37f9dac5 | 588 | #endif |
cf41432c | 589 | } |
30930fba | 590 | |
ee5fd0bb ED |
591 | /* |
592 | * 2.6.33 API change | |
593 | * Discard granularity and alignment restrictions may now be set. For | |
594 | * older kernels which do not support this it is safe to skip it. | |
595 | */ | |
596 | #ifdef HAVE_DISCARD_GRANULARITY | |
597 | static inline void | |
598 | blk_queue_discard_granularity(struct request_queue *q, unsigned int dg) | |
599 | { | |
600 | q->limits.discard_granularity = dg; | |
601 | } | |
602 | #else | |
d1d7e268 | 603 | #define blk_queue_discard_granularity(x, dg) ((void)0) |
ee5fd0bb ED |
604 | #endif /* HAVE_DISCARD_GRANULARITY */ |
605 | ||
61e90960 BB |
606 | /* |
607 | * Default Linux IO Scheduler, | |
608 | * Setting the scheduler to noop will allow the Linux IO scheduler to | |
609 | * still perform front and back merging, while leaving the request | |
610 | * ordering and prioritization to the ZFS IO scheduler. | |
611 | */ | |
612 | #define VDEV_SCHEDULER "noop" | |
60101509 | 613 | |
8128bd89 BB |
614 | /* |
615 | * A common holder for vdev_bdev_open() is used to relax the exclusive open | |
616 | * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to | |
617 | * allow them to open the device multiple times. Other kernel callers and | |
618 | * user space processes which don't pass this value will get EBUSY. This is | |
619 | * currently required for the correct operation of hot spares. | |
620 | */ | |
d1d7e268 | 621 | #define VDEV_HOLDER ((void *)0x2401de7) |
8128bd89 | 622 | |
692e55b8 | 623 | static inline void |
787acae0 GDN |
624 | blk_generic_start_io_acct(struct request_queue *q, int rw, |
625 | unsigned long sectors, struct hd_struct *part) | |
692e55b8 | 626 | { |
787acae0 GDN |
627 | #if defined(HAVE_GENERIC_IO_ACCT_3ARG) |
628 | generic_start_io_acct(rw, sectors, part); | |
629 | #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) | |
630 | generic_start_io_acct(q, rw, sectors, part); | |
631 | #endif | |
692e55b8 CC |
632 | } |
633 | ||
634 | static inline void | |
787acae0 GDN |
635 | blk_generic_end_io_acct(struct request_queue *q, int rw, |
636 | struct hd_struct *part, unsigned long start_time) | |
692e55b8 | 637 | { |
787acae0 GDN |
638 | #if defined(HAVE_GENERIC_IO_ACCT_3ARG) |
639 | generic_end_io_acct(rw, part, start_time); | |
640 | #elif defined(HAVE_GENERIC_IO_ACCT_4ARG) | |
641 | generic_end_io_acct(q, rw, part, start_time); | |
8198d18c | 642 | #endif |
787acae0 | 643 | } |
8198d18c | 644 | |
61e90960 | 645 | #endif /* _ZFS_BLKDEV_H */ |