]> git.proxmox.com Git - mirror_zfs.git/blob - include/linux/blkdev_compat.h
cstyle: Resolve C style issues
[mirror_zfs.git] / include / linux / blkdev_compat.h
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
25 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * LLNL-CODE-403049.
27 */
28
29 #ifndef _ZFS_BLKDEV_H
30 #define _ZFS_BLKDEV_H
31
32 #include <linux/blkdev.h>
33 #include <linux/elevator.h>
34
35 #ifndef HAVE_FMODE_T
36 typedef unsigned __bitwise__ fmode_t;
37 #endif /* HAVE_FMODE_T */
38
39 #ifndef HAVE_BLK_FETCH_REQUEST
40 static inline struct request *
41 blk_fetch_request(struct request_queue *q)
42 {
43 struct request *req;
44
45 req = elv_next_request(q);
46 if (req)
47 blkdev_dequeue_request(req);
48
49 return (req);
50 }
51 #endif /* HAVE_BLK_FETCH_REQUEST */
52
53 #ifndef HAVE_BLK_REQUEUE_REQUEST
54 static inline void
55 blk_requeue_request(request_queue_t *q, struct request *req)
56 {
57 elv_requeue_request(q, req);
58 }
59 #endif /* HAVE_BLK_REQUEUE_REQUEST */
60
61 #ifndef HAVE_BLK_END_REQUEST
62 static inline bool
63 __blk_end_request(struct request *req, int error, unsigned int nr_bytes)
64 {
65 LIST_HEAD(list);
66
67 /*
68 * Request has already been dequeued but 2.6.18 version of
69 * end_request() unconditionally dequeues the request so we
70 * add it to a local list to prevent hitting the BUG_ON.
71 */
72 list_add(&req->queuelist, &list);
73
74 /*
75 * The old API required the driver to end each segment and not
76 * the entire request. In our case we always need to end the
77 * entire request partial requests are not supported.
78 */
79 req->hard_cur_sectors = nr_bytes >> 9;
80 end_request(req, ((error == 0) ? 1 : error));
81
82 return (0);
83 }
84
85 static inline bool
86 blk_end_request(struct request *req, int error, unsigned int nr_bytes)
87 {
88 struct request_queue *q = req->q;
89 bool rc;
90
91 spin_lock_irq(q->queue_lock);
92 rc = __blk_end_request(req, error, nr_bytes);
93 spin_unlock_irq(q->queue_lock);
94
95 return (rc);
96 }
97 #else
98 #ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
99 /*
100 * Define required to avoid conflicting 2.6.29 non-static prototype for a
101 * GPL-only version of the helper. As of 2.6.31 the helper is available
102 * to non-GPL modules and is not explicitly exported GPL-only.
103 */
104 #define __blk_end_request __blk_end_request_x
105 #define blk_end_request blk_end_request_x
106
107 static inline bool
108 __blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
109 {
110 /*
111 * The old API required the driver to end each segment and not
112 * the entire request. In our case we always need to end the
113 * entire request partial requests are not supported.
114 */
115 req->hard_cur_sectors = nr_bytes >> 9;
116 end_request(req, ((error == 0) ? 1 : error));
117
118 return (0);
119 }
120 static inline bool
121 blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
122 {
123 struct request_queue *q = req->q;
124 bool rc;
125
126 spin_lock_irq(q->queue_lock);
127 rc = __blk_end_request_x(req, error, nr_bytes);
128 spin_unlock_irq(q->queue_lock);
129
130 return (rc);
131 }
132 #endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
133 #endif /* HAVE_BLK_END_REQUEST */
134
135 /*
136 * 2.6.36 API change,
137 * The blk_queue_flush() interface has replaced blk_queue_ordered()
138 * interface. However, while the old interface was available to all the
139 * new one is GPL-only. Thus if the GPL-only version is detected we
140 * implement our own trivial helper compatibility funcion. The hope is
141 * that long term this function will be opened up.
142 */
143 #if defined(HAVE_BLK_QUEUE_FLUSH) && defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
144 #define blk_queue_flush __blk_queue_flush
145 static inline void
146 __blk_queue_flush(struct request_queue *q, unsigned int flags)
147 {
148 q->flush_flags = flags & (REQ_FLUSH | REQ_FUA);
149 }
150 #endif /* HAVE_BLK_QUEUE_FLUSH && HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
151
152 #ifndef HAVE_BLK_RQ_POS
153 static inline sector_t
154 blk_rq_pos(struct request *req)
155 {
156 return (req->sector);
157 }
158 #endif /* HAVE_BLK_RQ_POS */
159
160 #ifndef HAVE_BLK_RQ_SECTORS
161 static inline unsigned int
162 blk_rq_sectors(struct request *req)
163 {
164 return (req->nr_sectors);
165 }
166 #endif /* HAVE_BLK_RQ_SECTORS */
167
168 #if !defined(HAVE_BLK_RQ_BYTES) || defined(HAVE_BLK_RQ_BYTES_GPL_ONLY)
169 /*
170 * Define required to avoid conflicting 2.6.29 non-static prototype for a
171 * GPL-only version of the helper. As of 2.6.31 the helper is available
172 * to non-GPL modules in the form of a static inline in the header.
173 */
174 #define blk_rq_bytes __blk_rq_bytes
175 static inline unsigned int
176 __blk_rq_bytes(struct request *req)
177 {
178 return (blk_rq_sectors(req) << 9);
179 }
180 #endif /* !HAVE_BLK_RQ_BYTES || HAVE_BLK_RQ_BYTES_GPL_ONLY */
181
182 /*
183 * Most of the blk_* macros were removed in 2.6.36. Ostensibly this was
184 * done to improve readability and allow easier grepping. However, from
185 * a portability stand point the macros are helpful. Therefore the needed
186 * macros are redefined here if they are missing from the kernel.
187 */
188 #ifndef blk_fs_request
189 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
190 #endif
191
192 /*
193 * 2.6.27 API change,
194 * The blk_queue_stackable() queue flag was added in 2.6.27 to handle dm
195 * stacking drivers. Prior to this request stacking drivers were detected
196 * by checking (q->request_fn == NULL), for earlier kernels we revert to
197 * this legacy behavior.
198 */
199 #ifndef blk_queue_stackable
200 #define blk_queue_stackable(q) ((q)->request_fn == NULL)
201 #endif
202
203 /*
204 * 2.6.34 API change,
205 * The blk_queue_max_hw_sectors() function replaces blk_queue_max_sectors().
206 */
207 #ifndef HAVE_BLK_QUEUE_MAX_HW_SECTORS
208 #define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors
209 static inline void
210 __blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
211 {
212 blk_queue_max_sectors(q, max_hw_sectors);
213 }
214 #endif
215
216 /*
217 * 2.6.34 API change,
218 * The blk_queue_max_segments() function consolidates
219 * blk_queue_max_hw_segments() and blk_queue_max_phys_segments().
220 */
221 #ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
222 #define blk_queue_max_segments __blk_queue_max_segments
223 static inline void
224 __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
225 {
226 blk_queue_max_phys_segments(q, max_segments);
227 blk_queue_max_hw_segments(q, max_segments);
228 }
229 #endif
230
231 /*
232 * 2.6.30 API change,
233 * The blk_queue_physical_block_size() function was introduced to
234 * indicate the smallest I/O the device can write without incurring
235 * a read-modify-write penalty. For older kernels this is a no-op.
236 */
237 #ifndef HAVE_BLK_QUEUE_PHYSICAL_BLOCK_SIZE
238 #define blk_queue_physical_block_size(q, x) ((void)(0))
239 #endif
240
241 /*
242 * 2.6.30 API change,
243 * The blk_queue_io_opt() function was added to indicate the optimal
244 * I/O size for the device. For older kernels this is a no-op.
245 */
246 #ifndef HAVE_BLK_QUEUE_IO_OPT
247 #define blk_queue_io_opt(q, x) ((void)(0))
248 #endif
249
250 #ifndef HAVE_GET_DISK_RO
251 static inline int
252 get_disk_ro(struct gendisk *disk)
253 {
254 int policy = 0;
255
256 if (disk->part[0])
257 policy = disk->part[0]->policy;
258
259 return (policy);
260 }
261 #endif /* HAVE_GET_DISK_RO */
262
263 #ifndef HAVE_RQ_IS_SYNC
264 static inline bool
265 rq_is_sync(struct request *req)
266 {
267 return (req->flags & REQ_RW_SYNC);
268 }
269 #endif /* HAVE_RQ_IS_SYNC */
270
271 #ifndef HAVE_RQ_FOR_EACH_SEGMENT
272 struct req_iterator {
273 int i;
274 struct bio *bio;
275 };
276
277 #define for_each_bio(_bio) \
278 for (; _bio; _bio = _bio->bi_next)
279
280 #define __rq_for_each_bio(_bio, rq) \
281 if ((rq->bio)) \
282 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
283
284 #define rq_for_each_segment(bvl, _rq, _iter) \
285 __rq_for_each_bio(_iter.bio, _rq) \
286 bio_for_each_segment(bvl, _iter.bio, _iter.i)
287 #endif /* HAVE_RQ_FOR_EACH_SEGMENT */
288
289 /*
290 * Portable helper for correctly setting the FAILFAST flags. The
291 * correct usage has changed 3 times from 2.6.12 to 2.6.38.
292 */
293 static inline void
294 bio_set_flags_failfast(struct block_device *bdev, int *flags)
295 {
296 #ifdef CONFIG_BUG
297 /*
298 * Disable FAILFAST for loopback devices because of the
299 * following incorrect BUG_ON() in loop_make_request().
300 * This support is also disabled for md devices because the
301 * test suite layers md devices on top of loopback devices.
302 * This may be removed when the loopback driver is fixed.
303 *
304 * BUG_ON(!lo || (rw != READ && rw != WRITE));
305 */
306 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
307 (MAJOR(bdev->bd_dev) == MD_MAJOR))
308 return;
309
310 #ifdef BLOCK_EXT_MAJOR
311 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
312 return;
313 #endif /* BLOCK_EXT_MAJOR */
314 #endif /* CONFIG_BUG */
315
316 #ifdef HAVE_BIO_RW_FAILFAST_DTD
317 /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */
318 *flags |= (
319 (1 << BIO_RW_FAILFAST_DEV) |
320 (1 << BIO_RW_FAILFAST_TRANSPORT) |
321 (1 << BIO_RW_FAILFAST_DRIVER));
322 #else
323 #ifdef HAVE_BIO_RW_FAILFAST
324 /* BIO_RW_FAILFAST preferred interface from 2.6.12 - 2.6.27 */
325 *flags |= (1 << BIO_RW_FAILFAST);
326 #else
327 #ifdef HAVE_REQ_FAILFAST_MASK
328 /*
329 * REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
330 * the BIO_* and REQ_* flags were unified under REQ_* flags.
331 */
332 *flags |= REQ_FAILFAST_MASK;
333 #endif /* HAVE_REQ_FAILFAST_MASK */
334 #endif /* HAVE_BIO_RW_FAILFAST */
335 #endif /* HAVE_BIO_RW_FAILFAST_DTD */
336 }
337
338 /*
339 * Maximum disk label length, it may be undefined for some kernels.
340 */
341 #ifndef DISK_NAME_LEN
342 #define DISK_NAME_LEN 32
343 #endif /* DISK_NAME_LEN */
344
345 /*
346 * 2.6.24 API change,
347 * The bio_end_io() prototype changed slightly. These are helper
348 * macro's to ensure the prototype and return value are handled.
349 */
350 #ifdef HAVE_2ARGS_BIO_END_IO_T
351 #define BIO_END_IO_PROTO(fn, x, y, z) static void fn(struct bio *x, int z)
352 #define BIO_END_IO_RETURN(rc) return
353 #else
354 #define BIO_END_IO_PROTO(fn, x, y, z) static int fn( \
355 struct bio *x, \
356 unsigned int y, \
357 int z)
358 #define BIO_END_IO_RETURN(rc) return rc
359 #endif /* HAVE_2ARGS_BIO_END_IO_T */
360
361 /*
362 * 2.6.38 - 2.6.x API,
363 * blkdev_get_by_path()
364 * blkdev_put()
365 *
366 * 2.6.28 - 2.6.37 API,
367 * open_bdev_exclusive()
368 * close_bdev_exclusive()
369 *
370 * 2.6.12 - 2.6.27 API,
371 * open_bdev_excl()
372 * close_bdev_excl()
373 *
374 * Used to exclusively open a block device from within the kernel.
375 */
376 #if defined(HAVE_BLKDEV_GET_BY_PATH)
377 #define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
378 (md) | FMODE_EXCL, hld)
379 #define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL)
380 #elif defined(HAVE_OPEN_BDEV_EXCLUSIVE)
381 #define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
382 #define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md)
383 #else
384 #define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
385 #define vdev_bdev_close(bdev, md) close_bdev_excl(bdev)
386 #endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */
387
388 /*
389 * 2.6.22 API change
390 * The function invalidate_bdev() lost it's second argument because
391 * it was unused.
392 */
393 #ifdef HAVE_1ARG_INVALIDATE_BDEV
394 #define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev)
395 #else
396 #define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1)
397 #endif /* HAVE_1ARG_INVALIDATE_BDEV */
398
399 /*
400 * 2.6.27 API change
401 * The function was exported for use, prior to this it existed by the
402 * symbol was not exported.
403 */
404 #ifndef HAVE_LOOKUP_BDEV
405 #define lookup_bdev(path) ERR_PTR(-ENOTSUP)
406 #endif
407
408 /*
409 * 2.6.30 API change
410 * To ensure good performance preferentially use the physical block size
411 * for proper alignment. The physical size is supposed to be the internal
412 * sector size used by the device. This is often 4096 byte for AF devices,
413 * while a smaller 512 byte logical size is supported for compatibility.
414 *
415 * Unfortunately, many drives still misreport their physical sector size.
416 * For devices which are known to lie you may need to manually set this
417 * at pool creation time with 'zpool create -o ashift=12 ...'.
418 *
419 * When the physical block size interface isn't available, we fall back to
420 * the logical block size interface and then the older hard sector size.
421 */
422 #ifdef HAVE_BDEV_PHYSICAL_BLOCK_SIZE
423 #define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev)
424 #else
425 #ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
426 #define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev)
427 #else
428 #define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev)
429 #endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */
430 #endif /* HAVE_BDEV_PHYSICAL_BLOCK_SIZE */
431
432 /*
433 * 2.6.37 API change
434 * The WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags have been
435 * introduced as a replacement for WRITE_BARRIER. This was done to
436 * allow richer semantics to be expressed to the block layer. It is
437 * the block layers responsibility to choose the correct way to
438 * implement these semantics.
439 *
440 * The existence of these flags implies that REQ_FLUSH an REQ_FUA are
441 * defined. Thus we can safely define VDEV_REQ_FLUSH and VDEV_REQ_FUA
442 * compatibility macros.
443 */
444 #ifdef WRITE_FLUSH_FUA
445 #define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA
446 #define VDEV_REQ_FLUSH REQ_FLUSH
447 #define VDEV_REQ_FUA REQ_FUA
448 #else
449 #define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER
450 #define VDEV_REQ_FLUSH REQ_HARDBARRIER
451 #define VDEV_REQ_FUA REQ_HARDBARRIER
452 #endif
453
454 /*
455 * 2.6.32 API change
456 * Use the normal I/O patch for discards.
457 */
458 #ifdef REQ_DISCARD
459 #define VDEV_REQ_DISCARD REQ_DISCARD
460 #endif
461
462 /*
463 * 2.6.33 API change
464 * Discard granularity and alignment restrictions may now be set. For
465 * older kernels which do not support this it is safe to skip it.
466 */
467 #ifdef HAVE_DISCARD_GRANULARITY
468 static inline void
469 blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
470 {
471 q->limits.discard_granularity = dg;
472 }
473 #else
474 #define blk_queue_discard_granularity(x, dg) ((void)0)
475 #endif /* HAVE_DISCARD_GRANULARITY */
476
477 /*
478 * Default Linux IO Scheduler,
479 * Setting the scheduler to noop will allow the Linux IO scheduler to
480 * still perform front and back merging, while leaving the request
481 * ordering and prioritization to the ZFS IO scheduler.
482 */
483 #define VDEV_SCHEDULER "noop"
484
485 /*
486 * A common holder for vdev_bdev_open() is used to relax the exclusive open
487 * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to
488 * allow them to open the device multiple times. Other kernel callers and
489 * user space processes which don't pass this value will get EBUSY. This is
490 * currently required for the correct operation of hot spares.
491 */
492 #define VDEV_HOLDER ((void *)0x2401de7)
493
494 #endif /* _ZFS_BLKDEV_H */