]> git.proxmox.com Git - mirror_zfs.git/blobdiff - module/zfs/vdev_queue.c
OpenZFS 9102 - zfs should be able to initialize storage devices
[mirror_zfs.git] / module / zfs / vdev_queue.c
index 89cdf7d81099708cc726cc24744dfe2037e6da37..939699cb837349f159867e7d032d05cfa68aac14 100644 (file)
@@ -154,6 +154,8 @@ uint32_t zfs_vdev_scrub_min_active = 1;
 uint32_t zfs_vdev_scrub_max_active = 2;
 uint32_t zfs_vdev_removal_min_active = 1;
 uint32_t zfs_vdev_removal_max_active = 2;
+uint32_t zfs_vdev_initializing_min_active = 1;
+uint32_t zfs_vdev_initializing_max_active = 1;
 
 /*
  * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
@@ -261,6 +263,8 @@ vdev_queue_class_min_active(zio_priority_t p)
                return (zfs_vdev_scrub_min_active);
        case ZIO_PRIORITY_REMOVAL:
                return (zfs_vdev_removal_min_active);
+       case ZIO_PRIORITY_INITIALIZING:
+               return (zfs_vdev_initializing_min_active);
        default:
                panic("invalid priority %u", p);
                return (0);
@@ -331,6 +335,8 @@ vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
                return (zfs_vdev_scrub_max_active);
        case ZIO_PRIORITY_REMOVAL:
                return (zfs_vdev_removal_max_active);
+       case ZIO_PRIORITY_INITIALIZING:
+               return (zfs_vdev_initializing_max_active);
        default:
                panic("invalid priority %u", p);
                return (0);
@@ -718,8 +724,8 @@ again:
        }
 
        /*
-        * For LBA-ordered queues (async / scrub), issue the i/o which follows
-        * the most recently issued i/o in LBA (offset) order.
+        * For LBA-ordered queues (async / scrub / initializing), issue the
+        * i/o which follows the most recently issued i/o in LBA (offset) order.
         *
         * For FIFO queues (sync), issue the i/o with the lowest timestamp.
         */
@@ -775,13 +781,15 @@ vdev_queue_io(zio_t *zio)
                if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
                    zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
                    zio->io_priority != ZIO_PRIORITY_SCRUB &&
-                   zio->io_priority != ZIO_PRIORITY_REMOVAL)
+                   zio->io_priority != ZIO_PRIORITY_REMOVAL &&
+                   zio->io_priority != ZIO_PRIORITY_INITIALIZING)
                        zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
        } else {
                ASSERT(zio->io_type == ZIO_TYPE_WRITE);
                if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
                    zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
-                   zio->io_priority != ZIO_PRIORITY_REMOVAL)
+                   zio->io_priority != ZIO_PRIORITY_REMOVAL &&
+                   zio->io_priority != ZIO_PRIORITY_INITIALIZING)
                        zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
        }
 
@@ -938,11 +946,29 @@ module_param(zfs_vdev_async_write_min_active, int, 0644);
 MODULE_PARM_DESC(zfs_vdev_async_write_min_active,
        "Min active async write I/Os per vdev");
 
+module_param(zfs_vdev_initializing_max_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_initializing_max_active,
+       "Max active initializing I/Os per vdev");
+
+module_param(zfs_vdev_initializing_min_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_initializing_min_active,
+       "Min active initializing I/Os per vdev");
+
+module_param(zfs_vdev_removal_max_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_removal_max_active,
+       "Max active removal I/Os per vdev");
+
+module_param(zfs_vdev_removal_min_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_removal_min_active,
+       "Min active removal I/Os per vdev");
+
 module_param(zfs_vdev_scrub_max_active, int, 0644);
-MODULE_PARM_DESC(zfs_vdev_scrub_max_active, "Max active scrub I/Os per vdev");
+MODULE_PARM_DESC(zfs_vdev_scrub_max_active,
+       "Max active scrub I/Os per vdev");
 
 module_param(zfs_vdev_scrub_min_active, int, 0644);
-MODULE_PARM_DESC(zfs_vdev_scrub_min_active, "Min active scrub I/Os per vdev");
+MODULE_PARM_DESC(zfs_vdev_scrub_min_active,
+       "Min active scrub I/Os per vdev");
 
 module_param(zfs_vdev_sync_read_max_active, int, 0644);
 MODULE_PARM_DESC(zfs_vdev_sync_read_max_active,