]> git.proxmox.com Git - mirror_zfs.git/blame - include/linux/blkdev_compat.h
Support "sync=always" for ZVOLs.
[mirror_zfs.git] / include / linux / blkdev_compat.h
CommitLineData
60101509
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
61e90960 21
60101509 22/*
61e90960 23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
60101509
BB
24 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
25 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * LLNL-CODE-403049.
27 */
28
61e90960
BB
29#ifndef _ZFS_BLKDEV_H
30#define _ZFS_BLKDEV_H
60101509
BB
31
32#include <linux/blkdev.h>
33#include <linux/elevator.h>
34
35#ifndef HAVE_FMODE_T
36typedef unsigned __bitwise__ fmode_t;
37#endif /* HAVE_FMODE_T */
38
39#ifndef HAVE_BLK_FETCH_REQUEST
40static inline struct request *
41blk_fetch_request(struct request_queue *q)
42{
43 struct request *req;
44
45 req = elv_next_request(q);
46 if (req)
47 blkdev_dequeue_request(req);
48
49 return req;
50}
51#endif /* HAVE_BLK_FETCH_REQUEST */
52
53#ifndef HAVE_BLK_REQUEUE_REQUEST
54static inline void
55blk_requeue_request(request_queue_t *q, struct request *req)
56{
57 elv_requeue_request(q, req);
58}
59#endif /* HAVE_BLK_REQUEUE_REQUEST */
60
61#ifndef HAVE_BLK_END_REQUEST
62static inline bool
63__blk_end_request(struct request *req, int error, unsigned int nr_bytes)
64{
65 LIST_HEAD(list);
66
67 /*
68 * Request has already been dequeued but 2.6.18 version of
69 * end_request() unconditionally dequeues the request so we
70 * add it to a local list to prevent hitting the BUG_ON.
71 */
72 list_add(&req->queuelist, &list);
73
74 /*
75 * The old API required the driver to end each segment and not
76 * the entire request. In our case we always need to end the
77 * entire request partial requests are not supported.
78 */
79 req->hard_cur_sectors = nr_bytes >> 9;
80 end_request(req, ((error == 0) ? 1 : error));
81
82 return 0;
83}
84
85static inline bool
86blk_end_request(struct request *req, int error, unsigned int nr_bytes)
87{
88 struct request_queue *q = req->q;
89 bool rc;
90
91 spin_lock_irq(q->queue_lock);
92 rc = __blk_end_request(req, error, nr_bytes);
93 spin_unlock_irq(q->queue_lock);
94
95 return rc;
96}
97#else
98# ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
99/*
100 * Define required to avoid conflicting 2.6.29 non-static prototype for a
101 * GPL-only version of the helper. As of 2.6.31 the helper is available
102 * to non-GPL modules and is not explicitly exported GPL-only.
103 */
104# define __blk_end_request __blk_end_request_x
105# define blk_end_request blk_end_request_x
106
107static inline bool
108__blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
109{
110 /*
111 * The old API required the driver to end each segment and not
112 * the entire request. In our case we always need to end the
113 * entire request partial requests are not supported.
114 */
115 req->hard_cur_sectors = nr_bytes >> 9;
116 end_request(req, ((error == 0) ? 1 : error));
117
118 return 0;
119}
120static inline bool
121blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
122{
123 struct request_queue *q = req->q;
124 bool rc;
125
126 spin_lock_irq(q->queue_lock);
127 rc = __blk_end_request_x(req, error, nr_bytes);
128 spin_unlock_irq(q->queue_lock);
129
130 return rc;
131}
132# endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
133#endif /* HAVE_BLK_END_REQUEST */
134
135#ifndef HAVE_BLK_RQ_POS
136static inline sector_t
137blk_rq_pos(struct request *req)
138{
139 return req->sector;
140}
141#endif /* HAVE_BLK_RQ_POS */
142
143#ifndef HAVE_BLK_RQ_SECTORS
144static inline unsigned int
145blk_rq_sectors(struct request *req)
146{
147 return req->nr_sectors;
148}
149#endif /* HAVE_BLK_RQ_SECTORS */
150
151#if !defined(HAVE_BLK_RQ_BYTES) || defined(HAVE_BLK_RQ_BYTES_GPL_ONLY)
152/*
153 * Define required to avoid conflicting 2.6.29 non-static prototype for a
154 * GPL-only version of the helper. As of 2.6.31 the helper is available
155 * to non-GPL modules in the form of a static inline in the header.
156 */
157#define blk_rq_bytes __blk_rq_bytes
158static inline unsigned int
159__blk_rq_bytes(struct request *req)
160{
161 return blk_rq_sectors(req) << 9;
162}
163#endif /* !HAVE_BLK_RQ_BYTES || HAVE_BLK_RQ_BYTES_GPL_ONLY */
164
8326eb46
BB
165/*
166 * Most of the blk_* macros were removed in 2.6.36. Ostensibly this was
167 * done to improve readability and allow easier grepping. However, from
168 * a portability stand point the macros are helpful. Therefore the needed
169 * macros are redefined here if they are missing from the kernel.
170 */
171#ifndef blk_fs_request
172#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
173#endif
174
3517f0b7
BB
175/*
176 * 2.6.27 API change,
177 * The blk_queue_stackable() queue flag was added in 2.6.27 to handle dm
178 * stacking drivers. Prior to this request stacking drivers were detected
179 * by checking (q->request_fn == NULL), for earlier kernels we revert to
180 * this legacy behavior.
181 */
182#ifndef blk_queue_stackable
183#define blk_queue_stackable(q) ((q)->request_fn == NULL)
184#endif
185
60101509
BB
186#ifndef HAVE_GET_DISK_RO
187static inline int
188get_disk_ro(struct gendisk *disk)
189{
190 int policy = 0;
191
192 if (disk->part[0])
193 policy = disk->part[0]->policy;
194
195 return policy;
196}
197#endif /* HAVE_GET_DISK_RO */
198
199#ifndef HAVE_RQ_IS_SYNC
200static inline bool
201rq_is_sync(struct request *req)
202{
203 return (req->flags & REQ_RW_SYNC);
204}
205#endif /* HAVE_RQ_IS_SYNC */
206
207#ifndef HAVE_RQ_FOR_EACH_SEGMENT
208struct req_iterator {
209 int i;
210 struct bio *bio;
211};
212
213# define for_each_bio(_bio) \
214 for (; _bio; _bio = _bio->bi_next)
215
216# define __rq_for_each_bio(_bio, rq) \
217 if ((rq->bio)) \
218 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
219
220# define rq_for_each_segment(bvl, _rq, _iter) \
221 __rq_for_each_bio(_iter.bio, _rq) \
222 bio_for_each_segment(bvl, _iter.bio, _iter.i)
223#endif /* HAVE_RQ_FOR_EACH_SEGMENT */
224
61e90960
BB
225/*
226 * Portable helper for correctly setting the FAILFAST flags. The
227 * correct usage has changed 3 times from 2.6.12 to 2.6.38.
228 */
2959d94a
BB
229static inline void
230bio_set_flags_failfast(struct block_device *bdev, int *flags)
231{
f4af6bb7 232#ifdef CONFIG_BUG
2959d94a 233 /*
f4af6bb7
BB
234 * Disable FAILFAST for loopback devices because of the
235 * following incorrect BUG_ON() in loop_make_request().
2959d94a
BB
236 * This support is also disabled for md devices because the
237 * test suite layers md devices on top of loopback devices.
238 * This may be removed when the loopback driver is fixed.
239 *
240 * BUG_ON(!lo || (rw != READ && rw != WRITE));
241 */
2959d94a
BB
242 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
243 (MAJOR(bdev->bd_dev) == MD_MAJOR))
244 return;
245
246#ifdef BLOCK_EXT_MAJOR
247 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
248 return;
249#endif /* BLOCK_EXT_MAJOR */
250#endif /* CONFIG_BUG */
f4af6bb7
BB
251
252#ifdef HAVE_BIO_RW_FAILFAST_DTD
253 /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */
2959d94a
BB
254 *flags |=
255 ((1 << BIO_RW_FAILFAST_DEV) |
256 (1 << BIO_RW_FAILFAST_TRANSPORT) |
257 (1 << BIO_RW_FAILFAST_DRIVER));
f4af6bb7
BB
258#else
259# ifdef HAVE_BIO_RW_FAILFAST
260 /* BIO_RW_FAILFAST preferred interface from 2.6.12 - 2.6.27 */
2959d94a 261 *flags |= (1 << BIO_RW_FAILFAST);
f4af6bb7
BB
262# else
263# ifdef HAVE_REQ_FAILFAST_MASK
264 /* REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
265 * the BIO_* and REQ_* flags were unified under REQ_* flags. */
266 *flags |= REQ_FAILFAST_MASK;
267# endif /* HAVE_REQ_FAILFAST_MASK */
268# endif /* HAVE_BIO_RW_FAILFAST */
269#endif /* HAVE_BIO_RW_FAILFAST_DTD */
2959d94a
BB
270}
271
61e90960
BB
272/*
273 * Maximum disk label length, it may be undefined for some kernels.
274 */
60101509
BB
275#ifndef DISK_NAME_LEN
276#define DISK_NAME_LEN 32
277#endif /* DISK_NAME_LEN */
278
61e90960
BB
279/*
280 * 2.6.24 API change,
281 * The bio_end_io() prototype changed slightly. These are helper
282 * macro's to ensure the prototype and return value are handled.
283 */
284#ifdef HAVE_2ARGS_BIO_END_IO_T
285# define BIO_END_IO_PROTO(fn, x, y, z) static void fn(struct bio *x, int z)
286# define BIO_END_IO_RETURN(rc) return
287#else
288# define BIO_END_IO_PROTO(fn, x, y, z) static int fn(struct bio *x, \
289 unsigned int y, int z)
290# define BIO_END_IO_RETURN(rc) return rc
291#endif /* HAVE_2ARGS_BIO_END_IO_T */
292
293/*
45066d1f
BB
294 * 2.6.38 - 2.6.x API,
295 * blkdev_get_by_path()
296 * blkdev_put()
297 *
298 * 2.6.28 - 2.6.37 API,
299 * open_bdev_exclusive()
300 * close_bdev_exclusive()
301 *
302 * 2.6.12 - 2.6.27 API,
303 * open_bdev_excl()
304 * close_bdev_excl()
305 *
61e90960
BB
306 * Used to exclusively open a block device from within the kernel.
307 */
45066d1f
BB
308#if defined(HAVE_BLKDEV_GET_BY_PATH)
309# define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
310 (md) | FMODE_EXCL, hld)
311# define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL)
312#elif defined(HAVE_OPEN_BDEV_EXCLUSIVE)
61e90960
BB
313# define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
314# define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md)
315#else
316# define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
317# define vdev_bdev_close(bdev, md) close_bdev_excl(bdev)
45066d1f 318#endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */
61e90960
BB
319
320/*
321 * 2.6.22 API change
322 * The function invalidate_bdev() lost it's second argument because
323 * it was unused.
324 */
325#ifdef HAVE_1ARG_INVALIDATE_BDEV
326# define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev)
327#else
328# define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1)
329#endif /* HAVE_1ARG_INVALIDATE_BDEV */
330
331/*
332 * 2.6.30 API change
333 * Change to make it explicit there this is the logical block size.
334 */
335#ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
336# define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev)
337#else
338# define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev)
339#endif
340
96801d29
BB
341/*
342 * 2.6.37 API change
343 * The WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags have been
344 * introduced as a replacement for WRITE_BARRIER. This was done to
345 * allow richer semantics to be expressed to the block layer. It is
346 * the block layers responsibility to choose the correct way to
347 * implement these semantics.
348 */
349#ifdef WRITE_FLUSH_FUA
350# define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA
351#else
352# define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER
353#endif
354
61e90960
BB
355/*
356 * Default Linux IO Scheduler,
357 * Setting the scheduler to noop will allow the Linux IO scheduler to
358 * still perform front and back merging, while leaving the request
359 * ordering and prioritization to the ZFS IO scheduler.
360 */
361#define VDEV_SCHEDULER "noop"
60101509 362
61e90960 363#endif /* _ZFS_BLKDEV_H */