]> git.proxmox.com Git - mirror_zfs.git/blobdiff - include/linux/blkdev_compat.h
cstyle: Resolve C style issues
[mirror_zfs.git] / include / linux / blkdev_compat.h
index ec9926ffd9fe1be50e47178958cadf6845c08e9b..e45601bc8280410407e3be578cfaad74227bf6b6 100644 (file)
@@ -27,7 +27,7 @@
  */
 
 #ifndef _ZFS_BLKDEV_H
-#define _ZFS_BLKDEV_H
+#define        _ZFS_BLKDEV_H
 
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
@@ -46,7 +46,7 @@ blk_fetch_request(struct request_queue *q)
        if (req)
                blkdev_dequeue_request(req);
 
-       return req;
+       return (req);
 }
 #endif /* HAVE_BLK_FETCH_REQUEST */
 
@@ -79,7 +79,7 @@ __blk_end_request(struct request *req, int error, unsigned int nr_bytes)
        req->hard_cur_sectors = nr_bytes >> 9;
        end_request(req, ((error == 0) ? 1 : error));
 
-       return 0;
+       return (0);
 }
 
 static inline bool
@@ -92,17 +92,17 @@ blk_end_request(struct request *req, int error, unsigned int nr_bytes)
        rc = __blk_end_request(req, error, nr_bytes);
        spin_unlock_irq(q->queue_lock);
 
-       return rc;
+       return (rc);
 }
 #else
-# ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
+#ifdef HAVE_BLK_END_REQUEST_GPL_ONLY
 /*
  * Define required to avoid conflicting 2.6.29 non-static prototype for a
  * GPL-only version of the helper.  As of 2.6.31 the helper is available
  * to non-GPL modules and is not explicitly exported GPL-only.
  */
-# define __blk_end_request __blk_end_request_x
-# define blk_end_request blk_end_request_x
+#define        __blk_end_request __blk_end_request_x
+#define        blk_end_request blk_end_request_x
 
 static inline bool
 __blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
@@ -115,7 +115,7 @@ __blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
        req->hard_cur_sectors = nr_bytes >> 9;
        end_request(req, ((error == 0) ? 1 : error));
 
-       return 0;
+       return (0);
 }
 static inline bool
 blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
@@ -127,9 +127,9 @@ blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
        rc = __blk_end_request_x(req, error, nr_bytes);
        spin_unlock_irq(q->queue_lock);
 
-       return rc;
+       return (rc);
 }
-# endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
+#endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */
 #endif /* HAVE_BLK_END_REQUEST */
 
 /*
@@ -141,7 +141,7 @@ blk_end_request_x(struct request *req, int error, unsigned int nr_bytes)
  * that long term this function will be opened up.
  */
 #if defined(HAVE_BLK_QUEUE_FLUSH) && defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
-#define blk_queue_flush __blk_queue_flush
+#define        blk_queue_flush __blk_queue_flush
 static inline void
 __blk_queue_flush(struct request_queue *q, unsigned int flags)
 {
@@ -153,7 +153,7 @@ __blk_queue_flush(struct request_queue *q, unsigned int flags)
 static inline sector_t
 blk_rq_pos(struct request *req)
 {
-       return req->sector;
+       return (req->sector);
 }
 #endif /* HAVE_BLK_RQ_POS */
 
@@ -161,7 +161,7 @@ blk_rq_pos(struct request *req)
 static inline unsigned int
 blk_rq_sectors(struct request *req)
 {
-       return req->nr_sectors;
+       return (req->nr_sectors);
 }
 #endif /* HAVE_BLK_RQ_SECTORS */
 
@@ -171,11 +171,11 @@ blk_rq_sectors(struct request *req)
  * GPL-only version of the helper.  As of 2.6.31 the helper is available
  * to non-GPL modules in the form of a static inline in the header.
  */
-#define blk_rq_bytes __blk_rq_bytes
+#define        blk_rq_bytes __blk_rq_bytes
 static inline unsigned int
 __blk_rq_bytes(struct request *req)
 {
-       return blk_rq_sectors(req) << 9;
+       return (blk_rq_sectors(req) << 9);
 }
 #endif /* !HAVE_BLK_RQ_BYTES || HAVE_BLK_RQ_BYTES_GPL_ONLY */
 
@@ -186,7 +186,7 @@ __blk_rq_bytes(struct request *req)
  * macros are redefined here if they are missing from the kernel.
  */
 #ifndef blk_fs_request
-#define blk_fs_request(rq)     ((rq)->cmd_type == REQ_TYPE_FS)
+#define        blk_fs_request(rq)      ((rq)->cmd_type == REQ_TYPE_FS)
 #endif
 
 /*
@@ -197,7 +197,7 @@ __blk_rq_bytes(struct request *req)
  * this legacy behavior.
  */
 #ifndef blk_queue_stackable
-#define blk_queue_stackable(q) ((q)->request_fn == NULL)
+#define        blk_queue_stackable(q)  ((q)->request_fn == NULL)
 #endif
 
 /*
@@ -205,7 +205,7 @@ __blk_rq_bytes(struct request *req)
  * The blk_queue_max_hw_sectors() function replaces blk_queue_max_sectors().
  */
 #ifndef HAVE_BLK_QUEUE_MAX_HW_SECTORS
-#define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors
+#define        blk_queue_max_hw_sectors __blk_queue_max_hw_sectors
 static inline void
 __blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 {
@@ -219,7 +219,7 @@ __blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
  * blk_queue_max_hw_segments() and blk_queue_max_phys_segments().
  */
 #ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
-#define blk_queue_max_segments __blk_queue_max_segments
+#define        blk_queue_max_segments __blk_queue_max_segments
 static inline void
 __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
 {
@@ -235,7 +235,7 @@ __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
  * a read-modify-write penalty.  For older kernels this is a no-op.
  */
 #ifndef HAVE_BLK_QUEUE_PHYSICAL_BLOCK_SIZE
-#define blk_queue_physical_block_size(q, x)    ((void)(0))
+#define        blk_queue_physical_block_size(q, x)     ((void)(0))
 #endif
 
 /*
@@ -244,7 +244,7 @@ __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
  * I/O size for the device.  For older kernels this is a no-op.
  */
 #ifndef HAVE_BLK_QUEUE_IO_OPT
-#define blk_queue_io_opt(q, x)                 ((void)(0))
+#define        blk_queue_io_opt(q, x)                  ((void)(0))
 #endif
 
 #ifndef HAVE_GET_DISK_RO
@@ -256,7 +256,7 @@ get_disk_ro(struct gendisk *disk)
        if (disk->part[0])
                policy = disk->part[0]->policy;
 
-       return policy;
+       return (policy);
 }
 #endif /* HAVE_GET_DISK_RO */
 
@@ -274,14 +274,14 @@ struct req_iterator {
        struct bio *bio;
 };
 
-# define for_each_bio(_bio)              \
+#define        for_each_bio(_bio)              \
        for (; _bio; _bio = _bio->bi_next)
 
-# define __rq_for_each_bio(_bio, rq)    \
+#define        __rq_for_each_bio(_bio, rq)     \
        if ((rq->bio))                  \
                for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
 
-# define rq_for_each_segment(bvl, _rq, _iter)                   \
+#define        rq_for_each_segment(bvl, _rq, _iter)                    \
        __rq_for_each_bio(_iter.bio, _rq)                       \
                bio_for_each_segment(bvl, _iter.bio, _iter.i)
 #endif /* HAVE_RQ_FOR_EACH_SEGMENT */
@@ -315,21 +315,23 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
 
 #ifdef HAVE_BIO_RW_FAILFAST_DTD
        /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */
-       *flags |=
-           ((1 << BIO_RW_FAILFAST_DEV) |
-            (1 << BIO_RW_FAILFAST_TRANSPORT) |
-            (1 << BIO_RW_FAILFAST_DRIVER));
+       *flags |= (
+           (1 << BIO_RW_FAILFAST_DEV) |
+           (1 << BIO_RW_FAILFAST_TRANSPORT) |
+           (1 << BIO_RW_FAILFAST_DRIVER));
 #else
-# ifdef HAVE_BIO_RW_FAILFAST
+#ifdef HAVE_BIO_RW_FAILFAST
        /* BIO_RW_FAILFAST preferred interface from 2.6.12 - 2.6.27 */
        *flags |= (1 << BIO_RW_FAILFAST);
-# else
-#  ifdef HAVE_REQ_FAILFAST_MASK
-       /* REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
-        * the BIO_* and REQ_* flags were unified under REQ_* flags. */
+#else
+#ifdef HAVE_REQ_FAILFAST_MASK
+       /*
+        * REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
+        * the BIO_* and REQ_* flags were unified under REQ_* flags.
+        */
        *flags |= REQ_FAILFAST_MASK;
-#  endif /* HAVE_REQ_FAILFAST_MASK */
-# endif /* HAVE_BIO_RW_FAILFAST */
+#endif /* HAVE_REQ_FAILFAST_MASK */
+#endif /* HAVE_BIO_RW_FAILFAST */
 #endif /* HAVE_BIO_RW_FAILFAST_DTD */
 }
 
@@ -337,7 +339,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * Maximum disk label length, it may be undefined for some kernels.
  */
 #ifndef DISK_NAME_LEN
-#define DISK_NAME_LEN  32
+#define        DISK_NAME_LEN   32
 #endif /* DISK_NAME_LEN */
 
 /*
@@ -346,12 +348,14 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * macro's to ensure the prototype and return value are handled.
  */
 #ifdef HAVE_2ARGS_BIO_END_IO_T
-# define BIO_END_IO_PROTO(fn, x, y, z) static void fn(struct bio *x, int z)
-# define BIO_END_IO_RETURN(rc)         return
+#define        BIO_END_IO_PROTO(fn, x, y, z)   static void fn(struct bio *x, int z)
+#define        BIO_END_IO_RETURN(rc)           return
 #else
-# define BIO_END_IO_PROTO(fn, x, y, z) static int fn(struct bio *x, \
-                                                     unsigned int y, int z)
-# define BIO_END_IO_RETURN(rc)         return rc
+#define        BIO_END_IO_PROTO(fn, x, y, z)   static int fn( \
+                                           struct bio *x, \
+                                           unsigned int y, \
+                                           int z)
+#define        BIO_END_IO_RETURN(rc)           return rc
 #endif /* HAVE_2ARGS_BIO_END_IO_T */
 
 /*
@@ -370,15 +374,15 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * Used to exclusively open a block device from within the kernel.
  */
 #if defined(HAVE_BLKDEV_GET_BY_PATH)
-# define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
+#define        vdev_bdev_open(path, md, hld)   blkdev_get_by_path(path, \
                                            (md) | FMODE_EXCL, hld)
-# define vdev_bdev_close(bdev, md)     blkdev_put(bdev, (md) | FMODE_EXCL)
+#define        vdev_bdev_close(bdev, md)       blkdev_put(bdev, (md) | FMODE_EXCL)
 #elif defined(HAVE_OPEN_BDEV_EXCLUSIVE)
-# define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
-# define vdev_bdev_close(bdev, md)     close_bdev_exclusive(bdev, md)
+#define        vdev_bdev_open(path, md, hld)   open_bdev_exclusive(path, md, hld)
+#define        vdev_bdev_close(bdev, md)       close_bdev_exclusive(bdev, md)
 #else
-# define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
-# define vdev_bdev_close(bdev, md)     close_bdev_excl(bdev)
+#define        vdev_bdev_open(path, md, hld)   open_bdev_excl(path, md, hld)
+#define        vdev_bdev_close(bdev, md)       close_bdev_excl(bdev)
 #endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */
 
 /*
@@ -387,9 +391,9 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * it was unused.
  */
 #ifdef HAVE_1ARG_INVALIDATE_BDEV
-# define vdev_bdev_invalidate(bdev)    invalidate_bdev(bdev)
+#define        vdev_bdev_invalidate(bdev)      invalidate_bdev(bdev)
 #else
-# define vdev_bdev_invalidate(bdev)    invalidate_bdev(bdev, 1)
+#define        vdev_bdev_invalidate(bdev)      invalidate_bdev(bdev, 1)
 #endif /* HAVE_1ARG_INVALIDATE_BDEV */
 
 /*
@@ -398,7 +402,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * symbol was not exported.
  */
 #ifndef HAVE_LOOKUP_BDEV
-# define lookup_bdev(path)             ERR_PTR(-ENOTSUP)
+#define        lookup_bdev(path)               ERR_PTR(-ENOTSUP)
 #endif
 
 /*
@@ -416,13 +420,13 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * the logical block size interface and then the older hard sector size.
  */
 #ifdef HAVE_BDEV_PHYSICAL_BLOCK_SIZE
-# define vdev_bdev_block_size(bdev)    bdev_physical_block_size(bdev)
+#define        vdev_bdev_block_size(bdev)      bdev_physical_block_size(bdev)
+#else
+#ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
+#define        vdev_bdev_block_size(bdev)      bdev_logical_block_size(bdev)
 #else
-# ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
-#  define vdev_bdev_block_size(bdev)   bdev_logical_block_size(bdev)
-# else
-#  define vdev_bdev_block_size(bdev)   bdev_hardsect_size(bdev)
-# endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */
+#define        vdev_bdev_block_size(bdev)      bdev_hardsect_size(bdev)
+#endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */
 #endif /* HAVE_BDEV_PHYSICAL_BLOCK_SIZE */
 
 /*
@@ -438,13 +442,13 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * compatibility macros.
  */
 #ifdef WRITE_FLUSH_FUA
-# define VDEV_WRITE_FLUSH_FUA          WRITE_FLUSH_FUA
-# define VDEV_REQ_FLUSH                        REQ_FLUSH
-# define VDEV_REQ_FUA                  REQ_FUA
+#define        VDEV_WRITE_FLUSH_FUA            WRITE_FLUSH_FUA
+#define        VDEV_REQ_FLUSH                  REQ_FLUSH
+#define        VDEV_REQ_FUA                    REQ_FUA
 #else
-# define VDEV_WRITE_FLUSH_FUA          WRITE_BARRIER
-# define VDEV_REQ_FLUSH                        REQ_HARDBARRIER
-# define VDEV_REQ_FUA                  REQ_HARDBARRIER
+#define        VDEV_WRITE_FLUSH_FUA            WRITE_BARRIER
+#define        VDEV_REQ_FLUSH                  REQ_HARDBARRIER
+#define        VDEV_REQ_FUA                    REQ_HARDBARRIER
 #endif
 
 /*
@@ -452,7 +456,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags)
  * Use the normal I/O patch for discards.
  */
 #ifdef REQ_DISCARD
-# define VDEV_REQ_DISCARD              REQ_DISCARD
+#define        VDEV_REQ_DISCARD                REQ_DISCARD
 #endif
 
 /*
@@ -467,7 +471,7 @@ blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
        q->limits.discard_granularity = dg;
 }
 #else
-#define blk_queue_discard_granularity(x, dg)   ((void)0)
+#define        blk_queue_discard_granularity(x, dg)    ((void)0)
 #endif /* HAVE_DISCARD_GRANULARITY */
 
 /*
@@ -485,6 +489,6 @@ blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
  * user space processes which don't pass this value will get EBUSY.  This is
  * currently required for the correct operation of hot spares.
  */
-#define VDEV_HOLDER                    ((void *)0x2401de7)
+#define        VDEV_HOLDER                     ((void *)0x2401de7)
 
 #endif /* _ZFS_BLKDEV_H */