]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zpool/zpool_iter.c
OpenZFS 9185 - Enable testing over NFS in ZFS performance tests
[mirror_zfs.git] / cmd / zpool / zpool_iter.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
28 */
29
30 #include <libintl.h>
31 #include <libuutil.h>
32 #include <stddef.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <thread_pool.h>
37
38 #include <libzfs.h>
39 #include <libzutil.h>
40 #include <sys/zfs_context.h>
41 #include <sys/wait.h>
42
43 #include "zpool_util.h"
44
45 /*
46 * Private interface for iterating over pools specified on the command line.
47 * Most consumers will call for_each_pool, but in order to support iostat, we
48 * allow fined grained control through the zpool_list_t interface.
49 */
50
51 typedef struct zpool_node {
52 zpool_handle_t *zn_handle;
53 uu_avl_node_t zn_avlnode;
54 int zn_mark;
55 } zpool_node_t;
56
57 struct zpool_list {
58 boolean_t zl_findall;
59 uu_avl_t *zl_avl;
60 uu_avl_pool_t *zl_pool;
61 zprop_list_t **zl_proplist;
62 };
63
64 /* ARGSUSED */
65 static int
66 zpool_compare(const void *larg, const void *rarg, void *unused)
67 {
68 zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
69 zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
70 const char *lname = zpool_get_name(l);
71 const char *rname = zpool_get_name(r);
72
73 return (strcmp(lname, rname));
74 }
75
76 /*
77 * Callback function for pool_list_get(). Adds the given pool to the AVL tree
78 * of known pools.
79 */
80 static int
81 add_pool(zpool_handle_t *zhp, void *data)
82 {
83 zpool_list_t *zlp = data;
84 zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
85 uu_avl_index_t idx;
86
87 node->zn_handle = zhp;
88 uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
89 if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
90 if (zlp->zl_proplist &&
91 zpool_expand_proplist(zhp, zlp->zl_proplist) != 0) {
92 zpool_close(zhp);
93 free(node);
94 return (-1);
95 }
96 uu_avl_insert(zlp->zl_avl, node, idx);
97 } else {
98 zpool_close(zhp);
99 free(node);
100 return (-1);
101 }
102
103 return (0);
104 }
105
106 /*
107 * Create a list of pools based on the given arguments. If we're given no
108 * arguments, then iterate over all pools in the system and add them to the AVL
109 * tree. Otherwise, add only those pool explicitly specified on the command
110 * line.
111 */
112 zpool_list_t *
113 pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
114 {
115 zpool_list_t *zlp;
116
117 zlp = safe_malloc(sizeof (zpool_list_t));
118
119 zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
120 offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
121
122 if (zlp->zl_pool == NULL)
123 zpool_no_memory();
124
125 if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
126 UU_DEFAULT)) == NULL)
127 zpool_no_memory();
128
129 zlp->zl_proplist = proplist;
130
131 if (argc == 0) {
132 (void) zpool_iter(g_zfs, add_pool, zlp);
133 zlp->zl_findall = B_TRUE;
134 } else {
135 int i;
136
137 for (i = 0; i < argc; i++) {
138 zpool_handle_t *zhp;
139
140 if ((zhp = zpool_open_canfail(g_zfs, argv[i])) !=
141 NULL) {
142 if (add_pool(zhp, zlp) != 0)
143 *err = B_TRUE;
144 } else {
145 *err = B_TRUE;
146 }
147 }
148 }
149
150 return (zlp);
151 }
152
153 /*
154 * Search for any new pools, adding them to the list. We only add pools when no
155 * options were given on the command line. Otherwise, we keep the list fixed as
156 * those that were explicitly specified.
157 */
158 void
159 pool_list_update(zpool_list_t *zlp)
160 {
161 if (zlp->zl_findall)
162 (void) zpool_iter(g_zfs, add_pool, zlp);
163 }
164
165 /*
166 * Iterate over all pools in the list, executing the callback for each
167 */
168 int
169 pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
170 void *data)
171 {
172 zpool_node_t *node, *next_node;
173 int ret = 0;
174
175 for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
176 next_node = uu_avl_next(zlp->zl_avl, node);
177 if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
178 unavail)
179 ret |= func(node->zn_handle, data);
180 }
181
182 return (ret);
183 }
184
185 /*
186 * Remove the given pool from the list. When running iostat, we want to remove
187 * those pools that no longer exist.
188 */
189 void
190 pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
191 {
192 zpool_node_t search, *node;
193
194 search.zn_handle = zhp;
195 if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
196 uu_avl_remove(zlp->zl_avl, node);
197 zpool_close(node->zn_handle);
198 free(node);
199 }
200 }
201
202 /*
203 * Free all the handles associated with this list.
204 */
205 void
206 pool_list_free(zpool_list_t *zlp)
207 {
208 uu_avl_walk_t *walk;
209 zpool_node_t *node;
210
211 if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
212 (void) fprintf(stderr,
213 gettext("internal error: out of memory"));
214 exit(1);
215 }
216
217 while ((node = uu_avl_walk_next(walk)) != NULL) {
218 uu_avl_remove(zlp->zl_avl, node);
219 zpool_close(node->zn_handle);
220 free(node);
221 }
222
223 uu_avl_walk_end(walk);
224 uu_avl_destroy(zlp->zl_avl);
225 uu_avl_pool_destroy(zlp->zl_pool);
226
227 free(zlp);
228 }
229
230 /*
231 * Returns the number of elements in the pool list.
232 */
233 int
234 pool_list_count(zpool_list_t *zlp)
235 {
236 return (uu_avl_numnodes(zlp->zl_avl));
237 }
238
239 /*
240 * High level function which iterates over all pools given on the command line,
241 * using the pool_list_* interfaces.
242 */
243 int
244 for_each_pool(int argc, char **argv, boolean_t unavail,
245 zprop_list_t **proplist, zpool_iter_f func, void *data)
246 {
247 zpool_list_t *list;
248 int ret = 0;
249
250 if ((list = pool_list_get(argc, argv, proplist, &ret)) == NULL)
251 return (1);
252
253 if (pool_list_iter(list, unavail, func, data) != 0)
254 ret = 1;
255
256 pool_list_free(list);
257
258 return (ret);
259 }
260
261 static int
262 for_each_vdev_cb(zpool_handle_t *zhp, nvlist_t *nv, pool_vdev_iter_f func,
263 void *data)
264 {
265 nvlist_t **child;
266 uint_t c, children;
267 int ret = 0;
268 int i;
269 char *type;
270
271 const char *list[] = {
272 ZPOOL_CONFIG_SPARES,
273 ZPOOL_CONFIG_L2CACHE,
274 ZPOOL_CONFIG_CHILDREN
275 };
276
277 for (i = 0; i < ARRAY_SIZE(list); i++) {
278 if (nvlist_lookup_nvlist_array(nv, list[i], &child,
279 &children) == 0) {
280 for (c = 0; c < children; c++) {
281 uint64_t ishole = 0;
282
283 (void) nvlist_lookup_uint64(child[c],
284 ZPOOL_CONFIG_IS_HOLE, &ishole);
285
286 if (ishole)
287 continue;
288
289 ret |= for_each_vdev_cb(zhp, child[c], func,
290 data);
291 }
292 }
293 }
294
295 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
296 return (ret);
297
298 /* Don't run our function on root vdevs */
299 if (strcmp(type, VDEV_TYPE_ROOT) != 0) {
300 ret |= func(zhp, nv, data);
301 }
302
303 return (ret);
304 }
305
306 /*
307 * This is the equivalent of for_each_pool() for vdevs. It iterates thorough
308 * all vdevs in the pool, ignoring root vdevs and holes, calling func() on
309 * each one.
310 *
311 * @zhp: Zpool handle
312 * @func: Function to call on each vdev
313 * @data: Custom data to pass to the function
314 */
315 int
316 for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data)
317 {
318 nvlist_t *config, *nvroot = NULL;
319
320 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
321 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
322 &nvroot) == 0);
323 }
324 return (for_each_vdev_cb(zhp, nvroot, func, data));
325 }
326
327 /*
328 * Process the vcdl->vdev_cmd_data[] array to figure out all the unique column
329 * names and their widths. When this function is done, vcdl->uniq_cols,
330 * vcdl->uniq_cols_cnt, and vcdl->uniq_cols_width will be filled in.
331 */
332 static void
333 process_unique_cmd_columns(vdev_cmd_data_list_t *vcdl)
334 {
335 char **uniq_cols = NULL, **tmp = NULL;
336 int *uniq_cols_width;
337 vdev_cmd_data_t *data;
338 int cnt = 0;
339 int k;
340
341 /* For each vdev */
342 for (int i = 0; i < vcdl->count; i++) {
343 data = &vcdl->data[i];
344 /* For each column the vdev reported */
345 for (int j = 0; j < data->cols_cnt; j++) {
346 /* Is this column in our list of unique column names? */
347 for (k = 0; k < cnt; k++) {
348 if (strcmp(data->cols[j], uniq_cols[k]) == 0)
349 break; /* yes it is */
350 }
351 if (k == cnt) {
352 /* No entry for column, add to list */
353 tmp = realloc(uniq_cols, sizeof (*uniq_cols) *
354 (cnt + 1));
355 if (tmp == NULL)
356 break; /* Nothing we can do... */
357 uniq_cols = tmp;
358 uniq_cols[cnt] = data->cols[j];
359 cnt++;
360 }
361 }
362 }
363
364 /*
365 * We now have a list of all the unique column names. Figure out the
366 * max width of each column by looking at the column name and all its
367 * values.
368 */
369 uniq_cols_width = safe_malloc(sizeof (*uniq_cols_width) * cnt);
370 for (int i = 0; i < cnt; i++) {
371 /* Start off with the column title's width */
372 uniq_cols_width[i] = strlen(uniq_cols[i]);
373 /* For each vdev */
374 for (int j = 0; j < vcdl->count; j++) {
375 /* For each of the vdev's values in a column */
376 data = &vcdl->data[j];
377 for (k = 0; k < data->cols_cnt; k++) {
378 /* Does this vdev have a value for this col? */
379 if (strcmp(data->cols[k], uniq_cols[i]) == 0) {
380 /* Is the value width larger? */
381 uniq_cols_width[i] =
382 MAX(uniq_cols_width[i],
383 strlen(data->lines[k]));
384 }
385 }
386 }
387 }
388
389 vcdl->uniq_cols = uniq_cols;
390 vcdl->uniq_cols_cnt = cnt;
391 vcdl->uniq_cols_width = uniq_cols_width;
392 }
393
394
395 /*
396 * Process a line of command output
397 *
398 * When running 'zpool iostat|status -c' the lines of output can either be
399 * in the form of:
400 *
401 * column_name=value
402 *
403 * Or just:
404 *
405 * value
406 *
407 * Process the column_name (if any) and value.
408 *
409 * Returns 0 if line was processed, and there are more lines can still be
410 * processed.
411 *
412 * Returns 1 if this was the last line to process, or error.
413 */
414 static int
415 vdev_process_cmd_output(vdev_cmd_data_t *data, char *line)
416 {
417 char *col = NULL;
418 char *val = line;
419 char *equals;
420 char **tmp;
421
422 if (line == NULL)
423 return (1);
424
425 equals = strchr(line, '=');
426 if (equals != NULL) {
427 /*
428 * We have a 'column=value' type line. Split it into the
429 * column and value strings by turning the '=' into a '\0'.
430 */
431 *equals = '\0';
432 col = line;
433 val = equals + 1;
434 } else {
435 val = line;
436 }
437
438 /* Do we already have a column by this name? If so, skip it. */
439 if (col != NULL) {
440 for (int i = 0; i < data->cols_cnt; i++) {
441 if (strcmp(col, data->cols[i]) == 0)
442 return (0); /* Duplicate, skip */
443 }
444 }
445
446 if (val != NULL) {
447 tmp = realloc(data->lines,
448 (data->lines_cnt + 1) * sizeof (*data->lines));
449 if (tmp == NULL)
450 return (1);
451
452 data->lines = tmp;
453 data->lines[data->lines_cnt] = strdup(val);
454 data->lines_cnt++;
455 }
456
457 if (col != NULL) {
458 tmp = realloc(data->cols,
459 (data->cols_cnt + 1) * sizeof (*data->cols));
460 if (tmp == NULL)
461 return (1);
462
463 data->cols = tmp;
464 data->cols[data->cols_cnt] = strdup(col);
465 data->cols_cnt++;
466 }
467
468 if (val != NULL && col == NULL)
469 return (1);
470
471 return (0);
472 }
473
474 /*
475 * Run the cmd and store results in *data.
476 */
477 static void
478 vdev_run_cmd(vdev_cmd_data_t *data, char *cmd)
479 {
480 int rc;
481 char *argv[2] = {cmd, 0};
482 char *env[5] = {"PATH=/bin:/sbin:/usr/bin:/usr/sbin", NULL, NULL, NULL,
483 NULL};
484 char **lines = NULL;
485 int lines_cnt = 0;
486 int i;
487
488 /* Setup our custom environment variables */
489 rc = asprintf(&env[1], "VDEV_PATH=%s",
490 data->path ? data->path : "");
491 if (rc == -1)
492 goto out;
493
494 rc = asprintf(&env[2], "VDEV_UPATH=%s",
495 data->upath ? data->upath : "");
496 if (rc == -1)
497 goto out;
498
499 rc = asprintf(&env[3], "VDEV_ENC_SYSFS_PATH=%s",
500 data->vdev_enc_sysfs_path ?
501 data->vdev_enc_sysfs_path : "");
502 if (rc == -1)
503 goto out;
504
505 /* Run the command */
506 rc = libzfs_run_process_get_stdout_nopath(cmd, argv, env, &lines,
507 &lines_cnt);
508 if (rc != 0)
509 goto out;
510
511 /* Process the output we got */
512 for (i = 0; i < lines_cnt; i++)
513 if (vdev_process_cmd_output(data, lines[i]) != 0)
514 break;
515
516 out:
517 if (lines != NULL)
518 libzfs_free_str_array(lines, lines_cnt);
519
520 /* Start with i = 1 since env[0] was statically allocated */
521 for (i = 1; i < ARRAY_SIZE(env); i++)
522 if (env[i] != NULL)
523 free(env[i]);
524 }
525
526 /*
527 * Generate the search path for zpool iostat/status -c scripts.
528 * The string returned must be freed.
529 */
530 char *
531 zpool_get_cmd_search_path(void)
532 {
533 const char *env;
534 char *sp = NULL;
535
536 env = getenv("ZPOOL_SCRIPTS_PATH");
537 if (env != NULL)
538 return (strdup(env));
539
540 env = getenv("HOME");
541 if (env != NULL) {
542 if (asprintf(&sp, "%s/.zpool.d:%s",
543 env, ZPOOL_SCRIPTS_DIR) != -1) {
544 return (sp);
545 }
546 }
547
548 if (asprintf(&sp, "%s", ZPOOL_SCRIPTS_DIR) != -1)
549 return (sp);
550
551 return (NULL);
552 }
553
554 /* Thread function run for each vdev */
555 static void
556 vdev_run_cmd_thread(void *cb_cmd_data)
557 {
558 vdev_cmd_data_t *data = cb_cmd_data;
559 char *cmd = NULL, *cmddup, *cmdrest;
560
561 cmddup = strdup(data->cmd);
562 if (cmddup == NULL)
563 return;
564
565 cmdrest = cmddup;
566 while ((cmd = strtok_r(cmdrest, ",", &cmdrest))) {
567 char *dir = NULL, *sp, *sprest;
568 char fullpath[MAXPATHLEN];
569
570 if (strchr(cmd, '/') != NULL)
571 continue;
572
573 sp = zpool_get_cmd_search_path();
574 if (sp == NULL)
575 continue;
576
577 sprest = sp;
578 while ((dir = strtok_r(sprest, ":", &sprest))) {
579 if (snprintf(fullpath, sizeof (fullpath),
580 "%s/%s", dir, cmd) == -1)
581 continue;
582
583 if (access(fullpath, X_OK) == 0) {
584 vdev_run_cmd(data, fullpath);
585 break;
586 }
587 }
588 free(sp);
589 }
590 free(cmddup);
591 }
592
593 /* For each vdev in the pool run a command */
594 static int
595 for_each_vdev_run_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_vcdl)
596 {
597 vdev_cmd_data_list_t *vcdl = cb_vcdl;
598 vdev_cmd_data_t *data;
599 char *path = NULL;
600 char *vname = NULL;
601 char *vdev_enc_sysfs_path = NULL;
602 int i, match = 0;
603
604 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
605 return (1);
606
607 nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
608 &vdev_enc_sysfs_path);
609
610 /* Spares show more than once if they're in use, so skip if exists */
611 for (i = 0; i < vcdl->count; i++) {
612 if ((strcmp(vcdl->data[i].path, path) == 0) &&
613 (strcmp(vcdl->data[i].pool, zpool_get_name(zhp)) == 0)) {
614 /* vdev already exists, skip it */
615 return (0);
616 }
617 }
618
619 /* Check for whitelisted vdevs here, if any */
620 for (i = 0; i < vcdl->vdev_names_count; i++) {
621 vname = zpool_vdev_name(g_zfs, zhp, nv, vcdl->cb_name_flags);
622 if (strcmp(vcdl->vdev_names[i], vname) == 0) {
623 free(vname);
624 match = 1;
625 break; /* match */
626 }
627 free(vname);
628 }
629
630 /* If we whitelisted vdevs, and this isn't one of them, then bail out */
631 if (!match && vcdl->vdev_names_count)
632 return (0);
633
634 /*
635 * Resize our array and add in the new element.
636 */
637 if (!(vcdl->data = realloc(vcdl->data,
638 sizeof (*vcdl->data) * (vcdl->count + 1))))
639 return (ENOMEM); /* couldn't realloc */
640
641 data = &vcdl->data[vcdl->count];
642
643 data->pool = strdup(zpool_get_name(zhp));
644 data->path = strdup(path);
645 data->upath = zfs_get_underlying_path(path);
646 data->cmd = vcdl->cmd;
647 data->lines = data->cols = NULL;
648 data->lines_cnt = data->cols_cnt = 0;
649 if (vdev_enc_sysfs_path)
650 data->vdev_enc_sysfs_path = strdup(vdev_enc_sysfs_path);
651 else
652 data->vdev_enc_sysfs_path = NULL;
653
654 vcdl->count++;
655
656 return (0);
657 }
658
659 /* Get the names and count of the vdevs */
660 static int
661 all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
662 {
663 return (for_each_vdev(zhp, for_each_vdev_run_cb, cb_vcdl));
664 }
665
666 /*
667 * Now that vcdl is populated with our complete list of vdevs, spawn
668 * off the commands.
669 */
670 static void
671 all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
672 {
673 tpool_t *t;
674
675 t = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
676 if (t == NULL)
677 return;
678
679 /* Spawn off the command for each vdev */
680 for (int i = 0; i < vcdl->count; i++) {
681 (void) tpool_dispatch(t, vdev_run_cmd_thread,
682 (void *) &vcdl->data[i]);
683 }
684
685 /* Wait for threads to finish */
686 tpool_wait(t);
687 tpool_destroy(t);
688 }
689
690 /*
691 * Run command 'cmd' on all vdevs in all pools in argv. Saves the first line of
692 * output from the command in vcdk->data[].line for all vdevs. If you want
693 * to run the command on only certain vdevs, fill in g_zfs, vdev_names,
694 * vdev_names_count, and cb_name_flags. Otherwise leave them as zero.
695 *
696 * Returns a vdev_cmd_data_list_t that must be freed with
697 * free_vdev_cmd_data_list();
698 */
699 vdev_cmd_data_list_t *
700 all_pools_for_each_vdev_run(int argc, char **argv, char *cmd,
701 libzfs_handle_t *g_zfs, char **vdev_names, int vdev_names_count,
702 int cb_name_flags)
703 {
704 vdev_cmd_data_list_t *vcdl;
705 vcdl = safe_malloc(sizeof (vdev_cmd_data_list_t));
706 vcdl->cmd = cmd;
707
708 vcdl->vdev_names = vdev_names;
709 vcdl->vdev_names_count = vdev_names_count;
710 vcdl->cb_name_flags = cb_name_flags;
711 vcdl->g_zfs = g_zfs;
712
713 /* Gather our list of all vdevs in all pools */
714 for_each_pool(argc, argv, B_TRUE, NULL,
715 all_pools_for_each_vdev_gather_cb, vcdl);
716
717 /* Run command on all vdevs in all pools */
718 all_pools_for_each_vdev_run_vcdl(vcdl);
719
720 /*
721 * vcdl->data[] now contains all the column names and values for each
722 * vdev. We need to process that into a master list of unique column
723 * names, and figure out the width of each column.
724 */
725 process_unique_cmd_columns(vcdl);
726
727 return (vcdl);
728 }
729
730 /*
731 * Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
732 */
733 void
734 free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl)
735 {
736 free(vcdl->uniq_cols);
737 free(vcdl->uniq_cols_width);
738
739 for (int i = 0; i < vcdl->count; i++) {
740 free(vcdl->data[i].path);
741 free(vcdl->data[i].pool);
742 free(vcdl->data[i].upath);
743
744 for (int j = 0; j < vcdl->data[i].lines_cnt; j++)
745 free(vcdl->data[i].lines[j]);
746
747 free(vcdl->data[i].lines);
748
749 for (int j = 0; j < vcdl->data[i].cols_cnt; j++)
750 free(vcdl->data[i].cols[j]);
751
752 free(vcdl->data[i].cols);
753 free(vcdl->data[i].vdev_enc_sysfs_path);
754 }
755 free(vcdl->data);
756 free(vcdl);
757 }