]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
block: add QUEUE_FLAG_DAX for devices to advertise their DAX support
authorToshi Kani <toshi.kani@hpe.com>
Thu, 23 Jun 2016 21:05:50 +0000 (17:05 -0400)
committerJens Axboe <axboe@fb.com>
Thu, 21 Jul 2016 03:01:01 +0000 (21:01 -0600)
Currently, presence of direct_access() in block_device_operations
indicates support of DAX on its block device.  Because
block_device_operations is instantiated with 'const', this DAX
capablity may not be enabled conditinally.

In preparation for supporting DAX to device-mapper devices, add
QUEUE_FLAG_DAX to request_queue flags to advertise their DAX
support.  This will allow to set the DAX capability based on how
mapped device is composed.

Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: <linux-s390@vger.kernel.org>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/block/brd.c
drivers/nvdimm/pmem.c
drivers/s390/block/dcssblk.c
fs/block_dev.c
include/linux/blkdev.h

index f5b0d6f4e09fab1852e8f3625076ec729624bc91..dd96a935fba04ea8a4d55dbe7d7038889cb08561 100644 (file)
@@ -509,7 +509,9 @@ static struct brd_device *brd_alloc(int i)
        blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
        brd->brd_queue->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
-
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+       queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
+#endif
        disk = brd->brd_disk = alloc_disk(max_part);
        if (!disk)
                goto out_free_queue;
index 608fc4464574e1d9edb4037c53b6998afdb11ebe..53b701b2f73ee7d6a75e5407a18bdce8c3cbae13 100644 (file)
@@ -283,6 +283,7 @@ static int pmem_attach_disk(struct device *dev,
        blk_queue_max_hw_sectors(q, UINT_MAX);
        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;
 
        disk = alloc_disk_node(0, nid);
index bed53c46dd90657f5432dd053f4cee8e774763b8..093e9e18e7e745246454b9bf02a2a70a4898e68a 100644 (file)
@@ -618,6 +618,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
        dev_info->gd->driverfs_dev = &dev_info->dev;
        blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
        blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
+       queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
 
        seg_byte_size = (dev_info->end - dev_info->start + 1);
        set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
index 71ccab1d22c6133623ac640dffe30ad858afabe4..d012be4ab977970edd889653e5edc89c03aa45e6 100644 (file)
@@ -493,7 +493,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
 
        if (size < 0)
                return size;
-       if (!ops->direct_access)
+       if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
                return -EOPNOTSUPP;
        if ((sector + DIV_ROUND_UP(size, 512)) >
                                        part_nr_sects_read(bdev->bd_part))
@@ -1287,7 +1287,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                bdev->bd_disk = disk;
                bdev->bd_queue = disk->queue;
                bdev->bd_contains = bdev;
-               if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
+               if (IS_ENABLED(CONFIG_BLK_DEV_DAX) &&
+                   blk_queue_dax(disk->queue))
                        bdev->bd_inode->i_flags = S_DAX;
                else
                        bdev->bd_inode->i_flags = 0;
index d116d3b52c732aa966555d0e71042baee7ed650d..9d84c98b5c7974315e7161537e73b41af02c99b1 100644 (file)
@@ -505,6 +505,7 @@ struct request_queue {
 #define QUEUE_FLAG_WC         23       /* Write back caching */
 #define QUEUE_FLAG_FUA        24       /* device supports FUA writes */
 #define QUEUE_FLAG_FLUSH_NQ    25      /* flush not queueuable */
+#define QUEUE_FLAG_DAX         26      /* device supports DAX */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -594,6 +595,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_queue_discard(q)   test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 #define blk_queue_secdiscard(q)        (blk_queue_discard(q) && \
        test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+#define blk_queue_dax(q)       test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \