]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/vdev_queue.c
OpenZFS 9102 - zfs should be able to initialize storage devices
[mirror_zfs.git] / module / zfs / vdev_queue.c
index 4f29d0043b070b38e7ec618da5102e530e5ed091..939699cb837349f159867e7d032d05cfa68aac14 100644 (file)
@@ -24,7 +24,7 @@
  */
 
 /*
- * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
  */
 
 #include <sys/zfs_context.h>
@@ -154,6 +154,8 @@ uint32_t zfs_vdev_scrub_min_active = 1;
 uint32_t zfs_vdev_scrub_max_active = 2;
 uint32_t zfs_vdev_removal_min_active = 1;
 uint32_t zfs_vdev_removal_max_active = 2;
+uint32_t zfs_vdev_initializing_min_active = 1;
+uint32_t zfs_vdev_initializing_max_active = 1;
 
 /*
  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
@@ -191,6 +193,15 @@ int zfs_vdev_queue_depth_pct = 1000;
 int zfs_vdev_queue_depth_pct = 300;
 #endif
 
+/*
+ * When performing allocations for a given metaslab, we want to make sure that
+ * there are enough IOs to aggregate together to improve throughput. We want to
+ * ensure that there are at least 128k worth of IOs that can be aggregated, and
+ * we assume that the average allocation size is 4k, so we need the queue depth
+ * to be 32 per allocator to get good aggregation of sequential writes.
+ */
+int zfs_vdev_def_queue_depth = 32;
+
 
 int
 vdev_queue_offset_compare(const void *x1, const void *x2)
@@ -252,6 +263,8 @@ vdev_queue_class_min_active(zio_priority_t p)
                return (zfs_vdev_scrub_min_active);
        case ZIO_PRIORITY_REMOVAL:
                return (zfs_vdev_removal_min_active);
+       case ZIO_PRIORITY_INITIALIZING:
+               return (zfs_vdev_initializing_min_active);
        default:
                panic("invalid priority %u", p);
                return (0);
@@ -322,6 +335,8 @@ vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
                return (zfs_vdev_scrub_max_active);
        case ZIO_PRIORITY_REMOVAL:
                return (zfs_vdev_removal_max_active);
+       case ZIO_PRIORITY_INITIALIZING:
+               return (zfs_vdev_initializing_max_active);
        default:
                panic("invalid priority %u", p);
                return (0);
@@ -420,16 +435,16 @@ static void
 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
 {
        spa_t *spa = zio->io_spa;
-       spa_stats_history_t *ssh = &spa->spa_stats.io_history;
+       spa_history_kstat_t *shk = &spa->spa_stats.io_history;
 
        ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
        avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
        avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
 
-       if (ssh->kstat != NULL) {
-               mutex_enter(&ssh->lock);
-               kstat_waitq_enter(ssh->kstat->ks_data);
-               mutex_exit(&ssh->lock);
+       if (shk->kstat != NULL) {
+               mutex_enter(&shk->lock);
+               kstat_waitq_enter(shk->kstat->ks_data);
+               mutex_exit(&shk->lock);
        }
 }
 
@@ -437,16 +452,16 @@ static void
 vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
 {
        spa_t *spa = zio->io_spa;
-       spa_stats_history_t *ssh = &spa->spa_stats.io_history;
+       spa_history_kstat_t *shk = &spa->spa_stats.io_history;
 
        ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
        avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
        avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
 
-       if (ssh->kstat != NULL) {
-               mutex_enter(&ssh->lock);
-               kstat_waitq_exit(ssh->kstat->ks_data);
-               mutex_exit(&ssh->lock);
+       if (shk->kstat != NULL) {
+               mutex_enter(&shk->lock);
+               kstat_waitq_exit(shk->kstat->ks_data);
+               mutex_exit(&shk->lock);
        }
 }
 
@@ -454,17 +469,17 @@ static void
 vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
 {
        spa_t *spa = zio->io_spa;
-       spa_stats_history_t *ssh = &spa->spa_stats.io_history;
+       spa_history_kstat_t *shk = &spa->spa_stats.io_history;
 
        ASSERT(MUTEX_HELD(&vq->vq_lock));
        ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
        vq->vq_class[zio->io_priority].vqc_active++;
        avl_add(&vq->vq_active_tree, zio);
 
-       if (ssh->kstat != NULL) {
-               mutex_enter(&ssh->lock);
-               kstat_runq_enter(ssh->kstat->ks_data);
-               mutex_exit(&ssh->lock);
+       if (shk->kstat != NULL) {
+               mutex_enter(&shk->lock);
+               kstat_runq_enter(shk->kstat->ks_data);
+               mutex_exit(&shk->lock);
        }
 }
 
@@ -472,17 +487,17 @@ static void
 vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
 {
        spa_t *spa = zio->io_spa;
-       spa_stats_history_t *ssh = &spa->spa_stats.io_history;
+       spa_history_kstat_t *shk = &spa->spa_stats.io_history;
 
        ASSERT(MUTEX_HELD(&vq->vq_lock));
        ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
        vq->vq_class[zio->io_priority].vqc_active--;
        avl_remove(&vq->vq_active_tree, zio);
 
-       if (ssh->kstat != NULL) {
-               kstat_io_t *ksio = ssh->kstat->ks_data;
+       if (shk->kstat != NULL) {
+               kstat_io_t *ksio = shk->kstat->ks_data;
 
-               mutex_enter(&ssh->lock);
+               mutex_enter(&shk->lock);
                kstat_runq_exit(ksio);
                if (zio->io_type == ZIO_TYPE_READ) {
                        ksio->reads++;
@@ -491,7 +506,7 @@ vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
                        ksio->writes++;
                        ksio->nwritten += zio->io_size;
                }
-               mutex_exit(&ssh->lock);
+               mutex_exit(&shk->lock);
        }
 }
 
@@ -709,8 +724,8 @@ again:
        }
 
        /*
-        * For LBA-ordered queues (async / scrub), issue the i/o which follows
-        * the most recently issued i/o in LBA (offset) order.
+        * For LBA-ordered queues (async / scrub / initializing), issue the
+        * i/o which follows the most recently issued i/o in LBA (offset) order.
         *
         * For FIFO queues (sync), issue the i/o with the lowest timestamp.
         */
@@ -766,13 +781,15 @@ vdev_queue_io(zio_t *zio)
                if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
                    zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
                    zio->io_priority != ZIO_PRIORITY_SCRUB &&
-                   zio->io_priority != ZIO_PRIORITY_REMOVAL)
+                   zio->io_priority != ZIO_PRIORITY_REMOVAL &&
+                   zio->io_priority != ZIO_PRIORITY_INITIALIZING)
                        zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
        } else {
                ASSERT(zio->io_type == ZIO_TYPE_WRITE);
                if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
                    zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
-                   zio->io_priority != ZIO_PRIORITY_REMOVAL)
+                   zio->io_priority != ZIO_PRIORITY_REMOVAL &&
+                   zio->io_priority != ZIO_PRIORITY_INITIALIZING)
                        zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
        }
 
@@ -829,6 +846,15 @@ vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
        vdev_queue_t *vq = &zio->io_vd->vdev_queue;
        avl_tree_t *tree;
 
+       /*
+        * ZIO_PRIORITY_NOW is used by the vdev cache code and the aggregate zio
+        * code to issue IOs without adding them to the vdev queue. In this
+        * case, the zio is already going to be issued as quickly as possible
+        * and so it doesn't need any reprioitization to help.
+        */
+       if (zio->io_priority == ZIO_PRIORITY_NOW)
+               return;
+
        ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
        ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
 
@@ -920,11 +946,29 @@ module_param(zfs_vdev_async_write_min_active, int, 0644);
 MODULE_PARM_DESC(zfs_vdev_async_write_min_active,
        "Min active async write I/Os per vdev");
 
+module_param(zfs_vdev_initializing_max_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_initializing_max_active,
+       "Max active initializing I/Os per vdev");
+
+module_param(zfs_vdev_initializing_min_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_initializing_min_active,
+       "Min active initializing I/Os per vdev");
+
+module_param(zfs_vdev_removal_max_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_removal_max_active,
+       "Max active removal I/Os per vdev");
+
+module_param(zfs_vdev_removal_min_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_removal_min_active,
+       "Min active removal I/Os per vdev");
+
 module_param(zfs_vdev_scrub_max_active, int, 0644);
-MODULE_PARM_DESC(zfs_vdev_scrub_max_active, "Max active scrub I/Os per vdev");
+MODULE_PARM_DESC(zfs_vdev_scrub_max_active,
+       "Max active scrub I/Os per vdev");
 
 module_param(zfs_vdev_scrub_min_active, int, 0644);
-MODULE_PARM_DESC(zfs_vdev_scrub_min_active, "Min active scrub I/Os per vdev");
+MODULE_PARM_DESC(zfs_vdev_scrub_min_active,
+       "Min active scrub I/Os per vdev");
 
 module_param(zfs_vdev_sync_read_max_active, int, 0644);
 MODULE_PARM_DESC(zfs_vdev_sync_read_max_active,