]> git.proxmox.com Git - mirror_zfs.git/commitdiff
Extend zpool-iostat to account for ZIO_PRIORITY_REBUILD (#12319)
authorTrevor Bautista <trevrb@trevrb.net>
Thu, 26 Aug 2021 18:26:49 +0000 (11:26 -0700)
committerGitHub <noreply@github.com>
Thu, 26 Aug 2021 18:26:49 +0000 (11:26 -0700)
Previously, zpool-iostat did not display any data regarding rebuild I/Os
in either the latency/size histograms (-w/-l/-r) or the queue data (-q).
This fix essentially utilizes the existing infrastructure for tracking
rebuild queue data and displays this data in the proper places within
zpool-iostat's output.

Signed-off-by: Trevor Bautista <tbautista@newmexicoconsortium.org>
Signed-off-by: Trevor Bautista <tbautista@lanl.gov>
Co-authored-by: Trevor Bautista <tbautista@newmexicoconsortium.org>
Reviewed-by: Richard Elling <Richard.Elling@RichardElling.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
cmd/zpool/zpool_main.c
cmd/zpool_influxdb/zpool_influxdb.c
include/sys/fs/zfs.h
man/man8/zpool-iostat.8
module/zfs/vdev.c
module/zfs/vdev_label.c

index a053bd65dbdbf1057e92585d4eec460c19512fa9..884f0a62f50953e694bb2ec911092525b6adea87 100644 (file)
@@ -211,7 +211,7 @@ enum iostat_type {
  * of all the nvlists a flag requires.  Also specifies the order in
  * which data gets printed in zpool iostat.
  */
-static const char *vsx_type_to_nvlist[IOS_COUNT][13] = {
+static const char *vsx_type_to_nvlist[IOS_COUNT][15] = {
        [IOS_L_HISTO] = {
            ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
            ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
@@ -223,6 +223,7 @@ static const char *vsx_type_to_nvlist[IOS_COUNT][13] = {
            ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
            ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
            ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
            NULL},
        [IOS_LATENCY] = {
            ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
@@ -230,6 +231,7 @@ static const char *vsx_type_to_nvlist[IOS_COUNT][13] = {
            ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
            ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
            ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
+           ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
            NULL},
        [IOS_QUEUES] = {
            ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
@@ -238,6 +240,7 @@ static const char *vsx_type_to_nvlist[IOS_COUNT][13] = {
            ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
            ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
            ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
+           ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
            NULL},
        [IOS_RQ_HISTO] = {
            ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
@@ -252,6 +255,8 @@ static const char *vsx_type_to_nvlist[IOS_COUNT][13] = {
            ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
            ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
            ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
+           ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
+           ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
            NULL},
 };
 
@@ -3844,22 +3849,23 @@ typedef struct name_and_columns {
        unsigned int columns;   /* Center name to this number of columns */
 } name_and_columns_t;
 
-#define        IOSTAT_MAX_LABELS       13      /* Max number of labels on one line */
+#define        IOSTAT_MAX_LABELS       15      /* Max number of labels on one line */
 
 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
 {
        [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
            {NULL}},
        [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
-           {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {NULL}},
+           {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1},
+           {NULL}},
        [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
            {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
-           {"trimq_write", 2}, {NULL}},
+           {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}},
        [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
            {"asyncq_wait", 2}, {NULL}},
        [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
            {"async_read", 2}, {"async_write", 2}, {"scrub", 2},
-           {"trim", 2}, {NULL}},
+           {"trim", 2}, {"rebuild", 2}, {NULL}},
 };
 
 /* Shorthand - if "columns" field not set, default to 1 column */
@@ -3868,14 +3874,17 @@ static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
        [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
            {"write"}, {NULL}},
        [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
-           {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {NULL}},
+           {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"},
+           {NULL}},
        [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
            {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
-           {"pend"}, {"activ"}, {NULL}},
+           {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
        [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
-           {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {NULL}},
+           {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"},
+           {NULL}},
        [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
-           {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, {NULL}},
+           {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
+           {"ind"}, {"agg"}, {NULL}},
 };
 
 static const char *histo_to_title[] = {
@@ -4507,6 +4516,8 @@ print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv,
                ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
                ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
                ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
+               ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
+               ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
        };
 
        struct stat_array *nva;
@@ -4546,6 +4557,7 @@ print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
                ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
                ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
                ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
+               ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
        };
        struct stat_array *nva;
 
index 8fc8e7717c538140f993678b630f70d73e1bf290..417d48f3aab1f5eb42d623270e6585b28191f046 100644 (file)
@@ -411,6 +411,7 @@ print_vdev_latency_stats(nvlist_t *nvroot, const char *pool_name,
 #ifdef ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO
            {ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,    "trim", 0},
 #endif
+           {ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,    "rebuild", 0},
            {NULL,      NULL}
        };
 
@@ -506,6 +507,8 @@ print_vdev_size_stats(nvlist_t *nvroot, const char *pool_name,
            {ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,    "trim_write_ind"},
            {ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,    "trim_write_agg"},
 #endif
+           {ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,    "rebuild_write_ind"},
+           {ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,    "rebuild_write_agg"},
            {NULL,      NULL}
        };
 
@@ -585,11 +588,13 @@ print_queue_stats(nvlist_t *nvroot, const char *pool_name,
            {ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,    "async_r_active"},
            {ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,    "async_w_active"},
            {ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,      "async_scrub_active"},
+           {ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,    "rebuild_active"},
            {ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,       "sync_r_pend"},
            {ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,       "sync_w_pend"},
            {ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,      "async_r_pend"},
            {ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,      "async_w_pend"},
            {ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,        "async_scrub_pend"},
+           {ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,      "rebuild_pend"},
            {NULL,      NULL}
        };
 
@@ -636,11 +641,13 @@ print_top_level_vdev_stats(nvlist_t *nvroot, const char *pool_name)
            {ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, "async_r_active_queue"},
            {ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, "async_w_active_queue"},
            {ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, "async_scrub_active_queue"},
+           {ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, "rebuild_active_queue"},
            {ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, "sync_r_pend_queue"},
            {ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, "sync_w_pend_queue"},
            {ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, "async_r_pend_queue"},
            {ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, "async_w_pend_queue"},
            {ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, "async_scrub_pend_queue"},
+           {ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, "rebuild_pend_queue"},
            {NULL, NULL}
        };
 
index a6b704ec87a0c59701de5b54b023695b87d3e641..2af11fc7196ddefac4823a1bd769ae34973c2761 100644 (file)
@@ -643,6 +643,7 @@ typedef struct zpool_load_policy {
 #define        ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE  "vdev_async_w_active_queue"
 #define        ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE    "vdev_async_scrub_active_queue"
 #define        ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE     "vdev_async_trim_active_queue"
+#define        ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE  "vdev_rebuild_active_queue"
 
 /* Queue sizes */
 #define        ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE     "vdev_sync_r_pend_queue"
@@ -651,6 +652,7 @@ typedef struct zpool_load_policy {
 #define        ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE    "vdev_async_w_pend_queue"
 #define        ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE      "vdev_async_scrub_pend_queue"
 #define        ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE       "vdev_async_trim_pend_queue"
+#define        ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE    "vdev_rebuild_pend_queue"
 
 /* Latency read/write histogram stats */
 #define        ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO       "vdev_tot_r_lat_histo"
@@ -663,6 +665,7 @@ typedef struct zpool_load_policy {
 #define        ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO     "vdev_async_w_lat_histo"
 #define        ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO       "vdev_scrub_histo"
 #define        ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO        "vdev_trim_histo"
+#define        ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO     "vdev_rebuild_histo"
 
 /* Request size histograms */
 #define        ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO      "vdev_sync_ind_r_histo"
@@ -671,12 +674,14 @@ typedef struct zpool_load_policy {
 #define        ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO     "vdev_async_ind_w_histo"
 #define        ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO       "vdev_ind_scrub_histo"
 #define        ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO        "vdev_ind_trim_histo"
+#define        ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO     "vdev_ind_rebuild_histo"
 #define        ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO      "vdev_sync_agg_r_histo"
 #define        ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO      "vdev_sync_agg_w_histo"
 #define        ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO     "vdev_async_agg_r_histo"
 #define        ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO     "vdev_async_agg_w_histo"
 #define        ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO       "vdev_agg_scrub_histo"
 #define        ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO        "vdev_agg_trim_histo"
+#define        ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO     "vdev_agg_rebuild_histo"
 
 /* Number of slow IOs */
 #define        ZPOOL_CONFIG_VDEV_SLOW_IOS              "vdev_slow_ios"
index 0e64aa71b1d60eb0e2601261d467807933172041..969c74cf398e68817a3260fb2c12be3a5684f548 100644 (file)
@@ -201,6 +201,9 @@ Does not include disk time.
 .It Sy scrub
 Amount of time I/O spent in scrub queue.
 Does not include disk time.
+.It Sy rebuild
+Amount of time I/O spent in rebuild queue.
+Does not include disk time.
 .El
 .It Fl l
 Include average latency statistics:
@@ -221,6 +224,9 @@ Does not include disk time.
 .It Sy trim
 Average queuing time in trim queue.
 Does not include disk time.
+.It Sy rebuild
+Average queuing time in rebuild queue.
+Does not include disk time.
 .El
 .It Fl q
 Include active queue statistics.
@@ -242,6 +248,8 @@ Current number of entries in asynchronous priority queues.
 Current number of entries in scrub queue.
 .It Sy trimq_write
 Current number of entries in trim queue.
+.It Sy rebuildq_write
+Current number of entries in rebuild queue.
 .El
 .Pp
 All queue statistics are instantaneous measurements of the number of
index 47a475135302b7c426882e9ea41316785dbaab79..2763bd8de1c498d02744079a125d7f1ca2320f63 100644 (file)
@@ -4583,13 +4583,10 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
                         *   ZIO_PRIORITY_ASYNC_READ,
                         *   ZIO_PRIORITY_ASYNC_WRITE,
                         *   ZIO_PRIORITY_SCRUB,
-                        *   ZIO_PRIORITY_TRIM.
+                        *   ZIO_PRIORITY_TRIM,
+                        *   ZIO_PRIORITY_REBUILD.
                         */
-                       if (priority == ZIO_PRIORITY_REBUILD) {
-                               priority = ((type == ZIO_TYPE_WRITE) ?
-                                   ZIO_PRIORITY_ASYNC_WRITE :
-                                   ZIO_PRIORITY_SCRUB);
-                       } else if (priority == ZIO_PRIORITY_INITIALIZING) {
+                       if (priority == ZIO_PRIORITY_INITIALIZING) {
                                ASSERT3U(type, ==, ZIO_TYPE_WRITE);
                                priority = ZIO_PRIORITY_ASYNC_WRITE;
                        } else if (priority == ZIO_PRIORITY_REMOVAL) {
index 04202a9f89600bc1fcbe93576fe37ea5c7b2a9f9..cdb4cb6e565fce986ff39f9c163afc6ce846f615 100644 (file)
@@ -256,6 +256,9 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
        fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
            vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
 
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
+           vsx->vsx_active_queue[ZIO_PRIORITY_REBUILD]);
+
        /* ZIOs pending */
        fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
            vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
@@ -275,6 +278,9 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
        fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
            vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
 
+       fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
+           vsx->vsx_pend_queue[ZIO_PRIORITY_REBUILD]);
+
        /* Histograms */
        fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
            vsx->vsx_total_histo[ZIO_TYPE_READ],
@@ -316,6 +322,10 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
            vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
            ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
 
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
+           vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD],
+           ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD]));
+
        /* Request sizes */
        fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
            vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
@@ -341,6 +351,10 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
            vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
            ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
 
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
+           vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD],
+           ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD]));
+
        fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
            vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
            ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
@@ -365,6 +379,10 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
            vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
            ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
 
+       fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
+           vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD],
+           ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD]));
+
        /* IO delays */
        fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);