]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/blk-sysfs.c
block: add async variant of blkdev_issue_zeroout
[mirror_ubuntu-bionic-kernel.git] / block / blk-sysfs.c
index 9cc8d7c5439a98422eb5a99a66ea79314ff7baa7..a9784149176902beab4b25223989c2d702819f40 100644 (file)
@@ -13,6 +13,7 @@
 
 #include "blk.h"
 #include "blk-mq.h"
+#include "blk-wbt.h"
 
 struct queue_sysfs_entry {
        struct attribute attr;
@@ -41,6 +42,19 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
        return count;
 }
 
+static ssize_t queue_var_store64(s64 *var, const char *page)
+{
+       int err;
+       s64 v;
+
+       err = kstrtos64(page, 10, &v);
+       if (err < 0)
+               return err;
+
+       *var = v;
+       return 0;
+}
+
 static ssize_t queue_requests_show(struct request_queue *q, char *page)
 {
        return queue_var_show(q->nr_requests, (page));
@@ -130,6 +144,11 @@ static ssize_t queue_physical_block_size_show(struct request_queue *q, char *pag
        return queue_var_show(queue_physical_block_size(q), page);
 }
 
+static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(q->limits.chunk_sectors, page);
+}
+
 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
 {
        return queue_var_show(queue_io_min(q), page);
@@ -257,6 +276,18 @@ QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
 #undef QUEUE_SYSFS_BIT_FNS
 
+static ssize_t queue_zoned_show(struct request_queue *q, char *page)
+{
+       switch (blk_queue_zoned_model(q)) {
+       case BLK_ZONED_HA:
+               return sprintf(page, "host-aware\n");
+       case BLK_ZONED_HM:
+               return sprintf(page, "host-managed\n");
+       default:
+               return sprintf(page, "none\n");
+       }
+}
+
 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
 {
        return queue_var_show((blk_queue_nomerges(q) << 1) |
@@ -319,6 +350,38 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
        return ret;
 }
 
+static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
+{
+       int val;
+
+       if (q->poll_nsec == -1)
+               val = -1;
+       else
+               val = q->poll_nsec / 1000;
+
+       return sprintf(page, "%d\n", val);
+}
+
+static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
+                               size_t count)
+{
+       int err, val;
+
+       if (!q->mq_ops || !q->mq_ops->poll)
+               return -EINVAL;
+
+       err = kstrtoint(page, 10, &val);
+       if (err < 0)
+               return err;
+
+       if (val == -1)
+               q->poll_nsec = -1;
+       else
+               q->poll_nsec = val * 1000;
+
+       return count;
+}
+
 static ssize_t queue_poll_show(struct request_queue *q, char *page)
 {
        return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
@@ -347,6 +410,50 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
        return ret;
 }
 
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+       if (!q->rq_wb)
+               return -EINVAL;
+
+       return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+                                 size_t count)
+{
+       struct rq_wb *rwb;
+       ssize_t ret;
+       s64 val;
+
+       ret = queue_var_store64(&val, page);
+       if (ret < 0)
+               return ret;
+       if (val < -1)
+               return -EINVAL;
+
+       rwb = q->rq_wb;
+       if (!rwb) {
+               ret = wbt_init(q);
+               if (ret)
+                       return ret;
+
+               rwb = q->rq_wb;
+               if (!rwb)
+                       return -EINVAL;
+       }
+
+       if (val == -1)
+               rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+       else if (val >= 0)
+               rwb->min_lat_nsec = val * 1000ULL;
+
+       if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+               rwb->enable_state = WBT_STATE_ON_MANUAL;
+
+       wbt_update_limits(rwb);
+       return count;
+}
+
 static ssize_t queue_wc_show(struct request_queue *q, char *page)
 {
        if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -384,6 +491,26 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page)
        return queue_var_show(blk_queue_dax(q), page);
 }
 
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+       return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+                       pre, (long long) stat->nr_samples,
+                       (long long) stat->mean, (long long) stat->min,
+                       (long long) stat->max);
+}
+
+static ssize_t queue_stats_show(struct request_queue *q, char *page)
+{
+       struct blk_rq_stat stat[2];
+       ssize_t ret;
+
+       blk_queue_stat_get(q, stat);
+
+       ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
+       ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
+       return ret;
+}
+
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -443,6 +570,11 @@ static struct queue_sysfs_entry queue_physical_block_size_entry = {
        .show = queue_physical_block_size_show,
 };
 
+static struct queue_sysfs_entry queue_chunk_sectors_entry = {
+       .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
+       .show = queue_chunk_sectors_show,
+};
+
 static struct queue_sysfs_entry queue_io_min_entry = {
        .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
        .show = queue_io_min_show,
@@ -485,6 +617,11 @@ static struct queue_sysfs_entry queue_nonrot_entry = {
        .store = queue_store_nonrot,
 };
 
+static struct queue_sysfs_entry queue_zoned_entry = {
+       .attr = {.name = "zoned", .mode = S_IRUGO },
+       .show = queue_zoned_show,
+};
+
 static struct queue_sysfs_entry queue_nomerges_entry = {
        .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
        .show = queue_nomerges_show,
@@ -515,6 +652,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
        .store = queue_poll_store,
 };
 
+static struct queue_sysfs_entry queue_poll_delay_entry = {
+       .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_poll_delay_show,
+       .store = queue_poll_delay_store,
+};
+
 static struct queue_sysfs_entry queue_wc_entry = {
        .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
        .show = queue_wc_show,
@@ -526,6 +669,17 @@ static struct queue_sysfs_entry queue_dax_entry = {
        .show = queue_dax_show,
 };
 
+static struct queue_sysfs_entry queue_stats_entry = {
+       .attr = {.name = "stats", .mode = S_IRUGO },
+       .show = queue_stats_show,
+};
+
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+       .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_wb_lat_show,
+       .store = queue_wb_lat_store,
+};
+
 static struct attribute *default_attrs[] = {
        &queue_requests_entry.attr,
        &queue_ra_entry.attr,
@@ -538,6 +692,7 @@ static struct attribute *default_attrs[] = {
        &queue_hw_sector_size_entry.attr,
        &queue_logical_block_size_entry.attr,
        &queue_physical_block_size_entry.attr,
+       &queue_chunk_sectors_entry.attr,
        &queue_io_min_entry.attr,
        &queue_io_opt_entry.attr,
        &queue_discard_granularity_entry.attr,
@@ -546,6 +701,7 @@ static struct attribute *default_attrs[] = {
        &queue_discard_zeroes_data_entry.attr,
        &queue_write_same_max_entry.attr,
        &queue_nonrot_entry.attr,
+       &queue_zoned_entry.attr,
        &queue_nomerges_entry.attr,
        &queue_rq_affinity_entry.attr,
        &queue_iostats_entry.attr,
@@ -553,6 +709,9 @@ static struct attribute *default_attrs[] = {
        &queue_poll_entry.attr,
        &queue_wc_entry.attr,
        &queue_dax_entry.attr,
+       &queue_stats_entry.attr,
+       &queue_wb_lat_entry.attr,
+       &queue_poll_delay_entry.attr,
        NULL,
 };
 
@@ -627,6 +786,7 @@ static void blk_release_queue(struct kobject *kobj)
        struct request_queue *q =
                container_of(kobj, struct request_queue, kobj);
 
+       wbt_exit(q);
        bdi_exit(&q->backing_dev_info);
        blkcg_exit_queue(q);
 
@@ -667,6 +827,23 @@ struct kobj_type blk_queue_ktype = {
        .release        = blk_release_queue,
 };
 
+static void blk_wb_init(struct request_queue *q)
+{
+#ifndef CONFIG_BLK_WBT_MQ
+       if (q->mq_ops)
+               return;
+#endif
+#ifndef CONFIG_BLK_WBT_SQ
+       if (q->request_fn)
+               return;
+#endif
+
+       /*
+        * If this fails, we don't get throttling
+        */
+       wbt_init(q);
+}
+
 int blk_register_queue(struct gendisk *disk)
 {
        int ret;
@@ -706,6 +883,8 @@ int blk_register_queue(struct gendisk *disk)
        if (q->mq_ops)
                blk_mq_register_dev(dev, q);
 
+       blk_wb_init(q);
+
        if (!q->request_fn)
                return 0;