]> git.proxmox.com Git - mirror_zfs-debian.git/blobdiff - include/linux/blkdev_compat.h
Imported Upstream version 0.6.5.3
[mirror_zfs-debian.git] / include / linux / blkdev_compat.h
index 8566033fcfd22525254e0fd96ca864397df5a906..868b89c55cbbad13825bf07cb1b3c5ee09e31c11 100644 (file)
 typedef unsigned __bitwise__ fmode_t;
 #endif /* HAVE_FMODE_T */
 
-#ifndef HAVE_BLK_FETCH_REQUEST
-static inline struct request *
-blk_fetch_request(struct request_queue *q)
-{
-       struct request *req;
-
-       req = elv_next_request(q);
-       if (req)
-               blkdev_dequeue_request(req);
-
-       return (req);
-}
-#endif /* HAVE_BLK_FETCH_REQUEST */
-
-#ifndef HAVE_BLK_REQUEUE_REQUEST
-static inline void
-blk_requeue_request(request_queue_t *q, struct request *req)
-{
-       elv_requeue_request(q, req);
-}
-#endif /* HAVE_BLK_REQUEUE_REQUEST */
-
-#ifndef HAVE_BLK_END_REQUEST
-static inline bool
-__blk_end_request(struct request *req, int error, unsigned int nr_bytes)
-{
-       LIST_HEAD(list);
-
-       /*
-        * Request has already been dequeued but 2.6.18 version of
-        * end_request() unconditionally dequeues the request so we
-        * add it to a local list to prevent hitting the BUG_ON.
-        */
-       list_add(&req->queuelist, &list);
-
-       /*
-        * The old API required the driver to end each segment and not
-        * the entire request.  In our case we always need to end the
-        * entire request partial requests are not supported.
-        */
-       req->hard_cur_sectors = nr_bytes >> 9;
-       end_request(req, ((error == 0) ? 1 : error));
-
-       return (0);
-}
-
-static inline bool
-blk_end_request(struct request *req, int error, unsigned int nr_bytes)
-{
-       struct request_queue *q = req->q;
-       bool rc;
-
-       spin_lock_irq(q->queue_lock);
-       rc = __blk_end_request(req, error, nr_bytes);
-       spin_unlock_irq(q->queue_lock);
-
-       return (rc);
-}
-#else
-#ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
-/*
- * Define required to avoid conflicting 2.6.29 non-static prototype for a
- * GPL-only version of the helper.  As of 2.6.31 the helper is available
- * to non-GPL modules and is not explicitly exported GPL-only.
- */
-#define        __blk_end_request __blk_end_request_x
-#define        blk_end_request blk_end_request_x
-
-static inline bool
-__blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
-{
-       /*
-        * The old API required the driver to end each segment and not
-        * the entire request.  In our case we always need to end the
-        * entire request partial requests are not supported.
-        */
-       req->hard_cur_sectors = nr_bytes >> 9;
-       end_request(req, ((error == 0) ? 1 : error));
-
-       return (0);
-}
-static inline bool
-blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
-{
-       struct request_queue *q = req->q;
-       bool rc;
-
-       spin_lock_irq(q->queue_lock);
-       rc = __blk_end_request_x(req, error, nr_bytes);
-       spin_unlock_irq(q->queue_lock);
-
-       return (rc);
-}
-#endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
-#endif /* HAVE_BLK_END_REQUEST */
-
 /*
  * 2.6.36 API change,
  * The blk_queue_flush() interface has replaced blk_queue_ordered()
@@ -148,37 +52,6 @@ __blk_queue_flush(struct request_queue *q, unsigned int flags)
        q->flush_flags = flags & (REQ_FLUSH | REQ_FUA);
 }
 #endif /* HAVE_BLK_QUEUE_FLUSH && HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
-
-#ifndef HAVE_BLK_RQ_POS
-static inline sector_t
-blk_rq_pos(struct request *req)
-{
-       return (req->sector);
-}
-#endif /* HAVE_BLK_RQ_POS */
-
-#ifndef HAVE_BLK_RQ_SECTORS
-static inline unsigned int
-blk_rq_sectors(struct request *req)
-{
-       return (req->nr_sectors);
-}
-#endif /* HAVE_BLK_RQ_SECTORS */
-
-#if !defined(HAVE_BLK_RQ_BYTES) || defined(HAVE_BLK_RQ_BYTES_GPL_ONLY)
-/*
- * Define required to avoid conflicting 2.6.29 non-static prototype for a
- * GPL-only version of the helper.  As of 2.6.31 the helper is available
- * to non-GPL modules in the form of a static inline in the header.
- */
-#define        blk_rq_bytes __blk_rq_bytes
-static inline unsigned int
-__blk_rq_bytes(struct request *req)
-{
-       return (blk_rq_sectors(req) << 9);
-}
-#endif /* !HAVE_BLK_RQ_BYTES || HAVE_BLK_RQ_BYTES_GPL_ONLY */
-
 /*
  * Most of the blk_* macros were removed in 2.6.36.  Ostensibly this was
  * done to improve readability and allow easier grepping.  However, from
@@ -228,25 +101,6 @@ __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
 }
 #endif
 
-/*
- * 2.6.30 API change,
- * The blk_queue_physical_block_size() function was introduced to
- * indicate the smallest I/O the device can write without incurring
- * a read-modify-write penalty.  For older kernels this is a no-op.
- */
-#ifndef HAVE_BLK_QUEUE_PHYSICAL_BLOCK_SIZE
-#define        blk_queue_physical_block_size(q, x)     ((void)(0))
-#endif
-
-/*
- * 2.6.30 API change,
- * The blk_queue_io_opt() function was added to indicate the optimal
- * I/O size for the device.  For older kernels this is a no-op.
- */
-#ifndef HAVE_BLK_QUEUE_IO_OPT
-#define        blk_queue_io_opt(q, x)                  ((void)(0))
-#endif
-
 #ifndef HAVE_GET_DISK_RO
 static inline int
 get_disk_ro(struct gendisk *disk)
@@ -260,64 +114,20 @@ get_disk_ro(struct gendisk *disk)
 }
 #endif /* HAVE_GET_DISK_RO */
 
-#ifndef HAVE_RQ_IS_SYNC
-static inline bool
-rq_is_sync(struct request *req)
-{
-       return (req->flags & REQ_RW_SYNC);
-}
-#endif /* HAVE_RQ_IS_SYNC */
-
-#ifndef HAVE_RQ_FOR_EACH_SEGMENT
-struct req_iterator {
-       int i;
-       struct bio *bio;
-};
-
-#define        for_each_bio(_bio)              \
-       for (; _bio; _bio = _bio->bi_next)
-
-#define        __rq_for_each_bio(_bio, rq)     \
-       if ((rq->bio))                  \
-               for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
-
-#define        rq_for_each_segment(bvl, _rq, _iter)                    \
-       __rq_for_each_bio(_iter.bio, _rq)                       \
-               bio_for_each_segment(bvl, _iter.bio, _iter.i)
-
-#define        HAVE_RQ_FOR_EACH_SEGMENT_BVP 1
-#endif /* HAVE_RQ_FOR_EACH_SEGMENT */
-
-/*
- * 3.14 API change
- * rq_for_each_segment changed from taking bio_vec * to taking bio_vec.
- * We provide rq_for_each_segment4 which takes both.
- * You should not modify the fields in @bv and @bvp.
- *
- * Note: the if-else is just to inject the assignment before the loop body.
- */
-#ifdef HAVE_RQ_FOR_EACH_SEGMENT_BVP
-#define        rq_for_each_segment4(bv, bvp, rq, iter) \
-       rq_for_each_segment(bvp, rq, iter)      \
-               if ((bv = *bvp), 0)             \
-                       ;                       \
-               else
-#else
-#define        rq_for_each_segment4(bv, bvp, rq, iter) \
-       rq_for_each_segment(bv, rq, iter)       \
-               if ((bvp = &bv), 0)             \
-                       ;                       \
-               else
-#endif
-
 #ifdef HAVE_BIO_BVEC_ITER
 #define        BIO_BI_SECTOR(bio)      (bio)->bi_iter.bi_sector
 #define        BIO_BI_SIZE(bio)        (bio)->bi_iter.bi_size
 #define        BIO_BI_IDX(bio)         (bio)->bi_iter.bi_idx
+#define        bio_for_each_segment4(bv, bvp, b, i)    \
+       bio_for_each_segment((bv), (b), (i))
+typedef struct bvec_iter bvec_iterator_t;
 #else
 #define        BIO_BI_SECTOR(bio)      (bio)->bi_sector
 #define        BIO_BI_SIZE(bio)        (bio)->bi_size
 #define        BIO_BI_IDX(bio)         (bio)->bi_idx
+#define        bio_for_each_segment4(bv, bvp, b, i)    \
+       bio_for_each_segment((bvp), (b), (i))
+typedef int bvec_iterator_t;
 #endif
 
 /*
@@ -347,26 +157,21 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
 #endif /* BLOCK_EXT_MAJOR */
 #endif /* CONFIG_BUG */
 
-#ifdef HAVE_BIO_RW_FAILFAST_DTD
+#if defined(HAVE_BIO_RW_FAILFAST_DTD)
        /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */
        *flags |= (
            (1 << BIO_RW_FAILFAST_DEV) |
            (1 << BIO_RW_FAILFAST_TRANSPORT) |
            (1 << BIO_RW_FAILFAST_DRIVER));
-#else
-#ifdef HAVE_BIO_RW_FAILFAST
-       /* BIO_RW_FAILFAST preferred interface from 2.6.12 - 2.6.27 */
-       *flags |= (1 << BIO_RW_FAILFAST);
-#else
-#ifdef HAVE_REQ_FAILFAST_MASK
+#elif defined(HAVE_REQ_FAILFAST_MASK)
        /*
         * REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
         * the BIO_* and REQ_* flags were unified under REQ_* flags.
         */
        *flags |= REQ_FAILFAST_MASK;
-#endif /* HAVE_REQ_FAILFAST_MASK */
-#endif /* HAVE_BIO_RW_FAILFAST */
-#endif /* HAVE_BIO_RW_FAILFAST_DTD */
+#else
+#error "Undefined block IO FAILFAST interface."
+#endif
 }
 
 /*
@@ -377,20 +182,17 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
 #endif /* DISK_NAME_LEN */
 
 /*
- * 2.6.24 API change,
- * The bio_end_io() prototype changed slightly.  These are helper
- * macro's to ensure the prototype and return value are handled.
+ * 4.3 API change
+ * The bio_endio() prototype changed slightly.  These are helper
+ * macro's to ensure the prototype and invocation are handled.
  */
-#ifdef HAVE_2ARGS_BIO_END_IO_T
-#define        BIO_END_IO_PROTO(fn, x, y, z)   static void fn(struct bio *x, int z)
-#define        BIO_END_IO_RETURN(rc)           return
+#ifdef HAVE_1ARG_BIO_END_IO_T
+#define        BIO_END_IO_PROTO(fn, x, z)      static void fn(struct bio *x)
+#define        BIO_END_IO(bio, error)          bio->bi_error = error; bio_endio(bio);
 #else
-#define        BIO_END_IO_PROTO(fn, x, y, z)   static int fn( \
-                                           struct bio *x, \
-                                           unsigned int y, \
-                                           int z)
-#define        BIO_END_IO_RETURN(rc)           return rc
-#endif /* HAVE_2ARGS_BIO_END_IO_T */
+#define        BIO_END_IO_PROTO(fn, x, z)      static void fn(struct bio *x, int z)
+#define        BIO_END_IO(bio, error)          bio_endio(bio, error);
+#endif /* HAVE_1ARG_BIO_END_IO_T */
 
 /*
  * 2.6.38 - 2.6.x API,
@@ -481,17 +283,30 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
 #define        VDEV_REQ_FUA                    REQ_FUA
 #else
 #define        VDEV_WRITE_FLUSH_FUA            WRITE_BARRIER
+#ifdef HAVE_BIO_RW_BARRIER
+#define        VDEV_REQ_FLUSH                  (1 << BIO_RW_BARRIER)
+#define        VDEV_REQ_FUA                    (1 << BIO_RW_BARRIER)
+#else
 #define        VDEV_REQ_FLUSH                  REQ_HARDBARRIER
-#define        VDEV_REQ_FUA                    REQ_HARDBARRIER
+#define        VDEV_REQ_FUA                    REQ_FUA
+#endif
 #endif
 
 /*
  * 2.6.32 API change
  * Use the normal I/O patch for discards.
  */
-#ifdef REQ_DISCARD
+#ifdef QUEUE_FLAG_DISCARD
+#ifdef HAVE_BIO_RW_DISCARD
+#define        VDEV_REQ_DISCARD                (1 << BIO_RW_DISCARD)
+#else
 #define        VDEV_REQ_DISCARD                REQ_DISCARD
 #endif
+#else
+#error "Allowing the build will cause discard requests to become writes "
+       "potentially triggering the DMU_MAX_ACCESS assertion. Please file a "
+       "an issue report at: https://github.com/zfsonlinux/zfs/issues/new"
+#endif
 
 /*
  * 2.6.33 API change
@@ -525,4 +340,9 @@ blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
  */
 #define        VDEV_HOLDER                     ((void *)0x2401de7)
 
+#ifndef HAVE_GENERIC_IO_ACCT
+#define        generic_start_io_acct(rw, slen, part)           ((void)0)
+#define        generic_end_io_acct(rw, part, start_jiffies)    ((void)0)
+#endif
+
 #endif /* _ZFS_BLKDEV_H */