]> git.proxmox.com Git - mirror_zfs.git/blame - include/sys/blkdev.h
Remove inconsistent use of EOPNOTSUPP
[mirror_zfs.git] / include / sys / blkdev.h
CommitLineData
60101509
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 */
27
28#ifndef _SYS_BLKDEV_H
29#define _SYS_BLKDEV_H
30
31#ifdef _KERNEL
32
33#include <linux/blkdev.h>
34#include <linux/elevator.h>
35
36#ifndef HAVE_FMODE_T
37typedef unsigned __bitwise__ fmode_t;
38#endif /* HAVE_FMODE_T */
39
40#ifndef HAVE_BLK_FETCH_REQUEST
41static inline struct request *
42blk_fetch_request(struct request_queue *q)
43{
44 struct request *req;
45
46 req = elv_next_request(q);
47 if (req)
48 blkdev_dequeue_request(req);
49
50 return req;
51}
52#endif /* HAVE_BLK_FETCH_REQUEST */
53
54#ifndef HAVE_BLK_REQUEUE_REQUEST
55static inline void
56blk_requeue_request(request_queue_t *q, struct request *req)
57{
58 elv_requeue_request(q, req);
59}
60#endif /* HAVE_BLK_REQUEUE_REQUEST */
61
62#ifndef HAVE_BLK_END_REQUEST
63static inline bool
64__blk_end_request(struct request *req, int error, unsigned int nr_bytes)
65{
66 LIST_HEAD(list);
67
68 /*
69 * Request has already been dequeued but 2.6.18 version of
70 * end_request() unconditionally dequeues the request so we
71 * add it to a local list to prevent hitting the BUG_ON.
72 */
73 list_add(&req->queuelist, &list);
74
75 /*
76 * The old API required the driver to end each segment and not
77 * the entire request. In our case we always need to end the
78 * entire request partial requests are not supported.
79 */
80 req->hard_cur_sectors = nr_bytes >> 9;
81 end_request(req, ((error == 0) ? 1 : error));
82
83 return 0;
84}
85
86static inline bool
87blk_end_request(struct request *req, int error, unsigned int nr_bytes)
88{
89 struct request_queue *q = req->q;
90 bool rc;
91
92 spin_lock_irq(q->queue_lock);
93 rc = __blk_end_request(req, error, nr_bytes);
94 spin_unlock_irq(q->queue_lock);
95
96 return rc;
97}
98#else
99# ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
100/*
101 * Define required to avoid conflicting 2.6.29 non-static prototype for a
102 * GPL-only version of the helper. As of 2.6.31 the helper is available
103 * to non-GPL modules and is not explicitly exported GPL-only.
104 */
105# define __blk_end_request __blk_end_request_x
106# define blk_end_request blk_end_request_x
107
108static inline bool
109__blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
110{
111 /*
112 * The old API required the driver to end each segment and not
113 * the entire request. In our case we always need to end the
114 * entire request partial requests are not supported.
115 */
116 req->hard_cur_sectors = nr_bytes >> 9;
117 end_request(req, ((error == 0) ? 1 : error));
118
119 return 0;
120}
121static inline bool
122blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
123{
124 struct request_queue *q = req->q;
125 bool rc;
126
127 spin_lock_irq(q->queue_lock);
128 rc = __blk_end_request_x(req, error, nr_bytes);
129 spin_unlock_irq(q->queue_lock);
130
131 return rc;
132}
133# endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
134#endif /* HAVE_BLK_END_REQUEST */
135
136#ifndef HAVE_BLK_RQ_POS
137static inline sector_t
138blk_rq_pos(struct request *req)
139{
140 return req->sector;
141}
142#endif /* HAVE_BLK_RQ_POS */
143
144#ifndef HAVE_BLK_RQ_SECTORS
145static inline unsigned int
146blk_rq_sectors(struct request *req)
147{
148 return req->nr_sectors;
149}
150#endif /* HAVE_BLK_RQ_SECTORS */
151
152#if !defined(HAVE_BLK_RQ_BYTES) || defined(HAVE_BLK_RQ_BYTES_GPL_ONLY)
153/*
154 * Define required to avoid conflicting 2.6.29 non-static prototype for a
155 * GPL-only version of the helper. As of 2.6.31 the helper is available
156 * to non-GPL modules in the form of a static inline in the header.
157 */
158#define blk_rq_bytes __blk_rq_bytes
159static inline unsigned int
160__blk_rq_bytes(struct request *req)
161{
162 return blk_rq_sectors(req) << 9;
163}
164#endif /* !HAVE_BLK_RQ_BYTES || HAVE_BLK_RQ_BYTES_GPL_ONLY */
165
166#ifndef HAVE_GET_DISK_RO
167static inline int
168get_disk_ro(struct gendisk *disk)
169{
170 int policy = 0;
171
172 if (disk->part[0])
173 policy = disk->part[0]->policy;
174
175 return policy;
176}
177#endif /* HAVE_GET_DISK_RO */
178
179#ifndef HAVE_RQ_IS_SYNC
180static inline bool
181rq_is_sync(struct request *req)
182{
183 return (req->flags & REQ_RW_SYNC);
184}
185#endif /* HAVE_RQ_IS_SYNC */
186
187#ifndef HAVE_RQ_FOR_EACH_SEGMENT
188struct req_iterator {
189 int i;
190 struct bio *bio;
191};
192
193# define for_each_bio(_bio) \
194 for (; _bio; _bio = _bio->bi_next)
195
196# define __rq_for_each_bio(_bio, rq) \
197 if ((rq->bio)) \
198 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
199
200# define rq_for_each_segment(bvl, _rq, _iter) \
201 __rq_for_each_bio(_iter.bio, _rq) \
202 bio_for_each_segment(bvl, _iter.bio, _iter.i)
203#endif /* HAVE_RQ_FOR_EACH_SEGMENT */
204
2959d94a
BB
205static inline void
206bio_set_flags_failfast(struct block_device *bdev, int *flags)
207{
208#ifdef HAVE_BIO_RW_FAILFAST
209 /*
210 * Disable BIO_RW_FAILFAST_* for loopback devices because of
211 * the following incorrect BUG_ON() in loop_make_request().
212 * This support is also disabled for md devices because the
213 * test suite layers md devices on top of loopback devices.
214 * This may be removed when the loopback driver is fixed.
215 *
216 * BUG_ON(!lo || (rw != READ && rw != WRITE));
217 */
218#ifdef CONFIG_BUG
219 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
220 (MAJOR(bdev->bd_dev) == MD_MAJOR))
221 return;
222
223#ifdef BLOCK_EXT_MAJOR
224 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
225 return;
226#endif /* BLOCK_EXT_MAJOR */
227#endif /* CONFIG_BUG */
228 *flags |=
229 ((1 << BIO_RW_FAILFAST_DEV) |
230 (1 << BIO_RW_FAILFAST_TRANSPORT) |
231 (1 << BIO_RW_FAILFAST_DRIVER));
232#else /* !HAVE_BIO_RW_FAILFAST */
233 *flags |= (1 << BIO_RW_FAILFAST);
234#endif /* HAVE_BIO_RW_FAILFAST */
235}
236
60101509
BB
237#ifndef DISK_NAME_LEN
238#define DISK_NAME_LEN 32
239#endif /* DISK_NAME_LEN */
240
241#endif /* KERNEL */
242
243#endif /* _SYS_BLKDEV_H */