]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zpool/zpool_vdev.c
cstyle: Resolve C style issues
[mirror_zfs.git] / cmd / zpool / zpool_vdev.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Functions to convert between a list of vdevs and an nvlist representing the
28 * configuration. Each entry in the list can be one of:
29 *
30 * Device vdevs
31 * disk=(path=..., devid=...)
32 * file=(path=...)
33 *
34 * Group vdevs
35 * raidz[1|2]=(...)
36 * mirror=(...)
37 *
38 * Hot spares
39 *
40 * While the underlying implementation supports it, group vdevs cannot contain
41 * other group vdevs. All userland verification of devices is contained within
42 * this file. If successful, the nvlist returned can be passed directly to the
43 * kernel; we've done as much verification as possible in userland.
44 *
45 * Hot spares are a special case, and passed down as an array of disk vdevs, at
46 * the same level as the root of the vdev tree.
47 *
48 * The only function exported by this file is 'make_root_vdev'. The
49 * function performs several passes:
50 *
51 * 1. Construct the vdev specification. Performs syntax validation and
52 * makes sure each device is valid.
53 * 2. Check for devices in use. Using libblkid to make sure that no
54 * devices are also in use. Some can be overridden using the 'force'
55 * flag, others cannot.
56 * 3. Check for replication errors if the 'force' flag is not specified.
57 * validates that the replication level is consistent across the
58 * entire pool.
59 * 4. Call libzfs to label any whole disks with an EFI label.
60 */
61
62 #include <assert.h>
63 #include <ctype.h>
64 #include <devid.h>
65 #include <errno.h>
66 #include <fcntl.h>
67 #include <libintl.h>
68 #include <libnvpair.h>
69 #include <limits.h>
70 #include <scsi/scsi.h>
71 #include <scsi/sg.h>
72 #include <stdio.h>
73 #include <string.h>
74 #include <unistd.h>
75 #include <sys/efi_partition.h>
76 #include <sys/stat.h>
77 #include <sys/vtoc.h>
78 #include <sys/mntent.h>
79 #include <uuid/uuid.h>
80 #ifdef HAVE_LIBBLKID
81 #include <blkid/blkid.h>
82 #else
83 #define blkid_cache void *
84 #endif /* HAVE_LIBBLKID */
85
86 #include "zpool_util.h"
87 #include <sys/zfs_context.h>
88
89 /*
90 * For any given vdev specification, we can have multiple errors. The
91 * vdev_error() function keeps track of whether we have seen an error yet, and
92 * prints out a header if its the first error we've seen.
93 */
94 boolean_t error_seen;
95 boolean_t is_force;
96
97 typedef struct vdev_disk_db_entry
98 {
99 char id[24];
100 int sector_size;
101 } vdev_disk_db_entry_t;
102
103 /*
104 * Database of block devices that lie about physical sector sizes. The
105 * identification string must be precisely 24 characters to avoid false
106 * negatives
107 */
108 static vdev_disk_db_entry_t vdev_disk_database[] = {
109 {"ATA ADATA SSD S396 3", 8192},
110 {"ATA APPLE SSD SM128E", 8192},
111 {"ATA APPLE SSD SM256E", 8192},
112 {"ATA APPLE SSD SM512E", 8192},
113 {"ATA APPLE SSD SM768E", 8192},
114 {"ATA C400-MTFDDAC064M", 8192},
115 {"ATA C400-MTFDDAC128M", 8192},
116 {"ATA C400-MTFDDAC256M", 8192},
117 {"ATA C400-MTFDDAC512M", 8192},
118 {"ATA Corsair Force 3 ", 8192},
119 {"ATA Corsair Force GS", 8192},
120 {"ATA INTEL SSDSA2CT04", 8192},
121 {"ATA INTEL SSDSA2BZ10", 8192},
122 {"ATA INTEL SSDSA2BZ20", 8192},
123 {"ATA INTEL SSDSA2BZ30", 8192},
124 {"ATA INTEL SSDSA2CW04", 8192},
125 {"ATA INTEL SSDSA2CW08", 8192},
126 {"ATA INTEL SSDSA2CW12", 8192},
127 {"ATA INTEL SSDSA2CW16", 8192},
128 {"ATA INTEL SSDSA2CW30", 8192},
129 {"ATA INTEL SSDSA2CW60", 8192},
130 {"ATA INTEL SSDSC2BA10", 8192},
131 {"ATA INTEL SSDSC2BA20", 8192},
132 {"ATA INTEL SSDSC2BA40", 8192},
133 {"ATA INTEL SSDSC2BA80", 8192},
134 {"ATA INTEL SSDSC2BB08", 8192},
135 {"ATA INTEL SSDSC2BB12", 8192},
136 {"ATA INTEL SSDSC2BB16", 8192},
137 {"ATA INTEL SSDSC2BB24", 8192},
138 {"ATA INTEL SSDSC2BB30", 8192},
139 {"ATA INTEL SSDSC2BB40", 8192},
140 {"ATA INTEL SSDSC2BB48", 8192},
141 {"ATA INTEL SSDSC2BB60", 8192},
142 {"ATA INTEL SSDSC2BB80", 8192},
143 {"ATA INTEL SSDSC2CT06", 8192},
144 {"ATA INTEL SSDSC2CT12", 8192},
145 {"ATA INTEL SSDSC2CT18", 8192},
146 {"ATA INTEL SSDSC2CT24", 8192},
147 {"ATA INTEL SSDSC2CW06", 8192},
148 {"ATA INTEL SSDSC2CW12", 8192},
149 {"ATA INTEL SSDSC2CW18", 8192},
150 {"ATA INTEL SSDSC2CW24", 8192},
151 {"ATA INTEL SSDSC2CW48", 8192},
152 {"ATA KINGSTON SH100S3", 8192},
153 {"ATA KINGSTON SH103S3", 8192},
154 {"ATA M4-CT064M4SSD2 ", 8192},
155 {"ATA M4-CT128M4SSD2 ", 8192},
156 {"ATA M4-CT256M4SSD2 ", 8192},
157 {"ATA M4-CT512M4SSD2 ", 8192},
158 {"ATA OCZ-AGILITY2 ", 8192},
159 {"ATA OCZ-AGILITY3 ", 8192},
160 {"ATA OCZ-VERTEX2 3.5 ", 8192},
161 {"ATA OCZ-VERTEX3 ", 8192},
162 {"ATA OCZ-VERTEX3 LT ", 8192},
163 {"ATA OCZ-VERTEX3 MI ", 8192},
164 {"ATA OCZ-VERTEX4 ", 8192},
165 {"ATA SAMSUNG MZ7WD120", 8192},
166 {"ATA SAMSUNG MZ7WD240", 8192},
167 {"ATA SAMSUNG MZ7WD480", 8192},
168 {"ATA SAMSUNG MZ7WD960", 8192},
169 {"ATA SAMSUNG SSD 830 ", 8192},
170 {"ATA Samsung SSD 840 ", 8192},
171 {"ATA SanDisk SSD U100", 8192},
172 {"ATA TOSHIBA THNSNH06", 8192},
173 {"ATA TOSHIBA THNSNH12", 8192},
174 {"ATA TOSHIBA THNSNH25", 8192},
175 {"ATA TOSHIBA THNSNH51", 8192},
176 {"ATA APPLE SSD TS064C", 4096},
177 {"ATA APPLE SSD TS128C", 4096},
178 {"ATA APPLE SSD TS256C", 4096},
179 {"ATA APPLE SSD TS512C", 4096},
180 {"ATA INTEL SSDSA2M040", 4096},
181 {"ATA INTEL SSDSA2M080", 4096},
182 {"ATA INTEL SSDSA2M160", 4096},
183 {"ATA INTEL SSDSC2MH12", 4096},
184 {"ATA INTEL SSDSC2MH25", 4096},
185 {"ATA OCZ CORE_SSD ", 4096},
186 {"ATA OCZ-VERTEX ", 4096},
187 {"ATA SAMSUNG MCCOE32G", 4096},
188 {"ATA SAMSUNG MCCOE64G", 4096},
189 {"ATA SAMSUNG SSD PM80", 4096},
190 /* Imported from Open Solaris */
191 {"ATA MARVELL SD88SA02", 4096},
192 /* Advanced format Hard drives */
193 {"ATA Hitachi HDS5C303", 4096},
194 {"ATA SAMSUNG HD204UI ", 4096},
195 {"ATA ST2000DL004 HD20", 4096},
196 {"ATA WDC WD10EARS-00M", 4096},
197 {"ATA WDC WD10EARS-00S", 4096},
198 {"ATA WDC WD10EARS-00Z", 4096},
199 {"ATA WDC WD15EARS-00M", 4096},
200 {"ATA WDC WD15EARS-00S", 4096},
201 {"ATA WDC WD15EARS-00Z", 4096},
202 {"ATA WDC WD20EARS-00M", 4096},
203 {"ATA WDC WD20EARS-00S", 4096},
204 {"ATA WDC WD20EARS-00Z", 4096},
205 /* Virtual disks: Assume zvols with default volblocksize */
206 #if 0
207 {"ATA QEMU HARDDISK ", 8192},
208 {"IET VIRTUAL-DISK ", 8192},
209 {"OI COMSTAR ", 8192},
210 {"SUN COMSTAR ", 8192},
211 {"NETAPP LUN ", 8192},
212 #endif
213 };
214
215 static const int vdev_disk_database_size =
216 sizeof (vdev_disk_database) / sizeof (vdev_disk_database[0]);
217
218 #define INQ_REPLY_LEN 96
219 #define INQ_CMD_LEN 6
220
221 static boolean_t
222 check_sector_size_database(char *path, int *sector_size)
223 {
224 unsigned char inq_buff[INQ_REPLY_LEN];
225 unsigned char sense_buffer[32];
226 unsigned char inq_cmd_blk[INQ_CMD_LEN] =
227 {INQUIRY, 0, 0, 0, INQ_REPLY_LEN, 0};
228 sg_io_hdr_t io_hdr;
229 int error;
230 int fd;
231 int i;
232
233 /* Prepare INQUIRY command */
234 memset(&io_hdr, 0, sizeof (sg_io_hdr_t));
235 io_hdr.interface_id = 'S';
236 io_hdr.cmd_len = sizeof (inq_cmd_blk);
237 io_hdr.mx_sb_len = sizeof (sense_buffer);
238 io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
239 io_hdr.dxfer_len = INQ_REPLY_LEN;
240 io_hdr.dxferp = inq_buff;
241 io_hdr.cmdp = inq_cmd_blk;
242 io_hdr.sbp = sense_buffer;
243 io_hdr.timeout = 10; /* 10 milliseconds is ample time */
244
245 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
246 return (B_FALSE);
247
248 error = ioctl(fd, SG_IO, (unsigned long) &io_hdr);
249
250 (void) close(fd);
251
252 if (error < 0)
253 return (B_FALSE);
254
255 if ((io_hdr.info & SG_INFO_OK_MASK) != SG_INFO_OK)
256 return (B_FALSE);
257
258 for (i = 0; i < vdev_disk_database_size; i++) {
259 if (memcmp(inq_buff + 8, vdev_disk_database[i].id, 24))
260 continue;
261
262 *sector_size = vdev_disk_database[i].sector_size;
263 return (B_TRUE);
264 }
265
266 return (B_FALSE);
267 }
268
269 /*PRINTFLIKE1*/
270 static void
271 vdev_error(const char *fmt, ...)
272 {
273 va_list ap;
274
275 if (!error_seen) {
276 (void) fprintf(stderr, gettext("invalid vdev specification\n"));
277 if (!is_force)
278 (void) fprintf(stderr, gettext("use '-f' to override "
279 "the following errors:\n"));
280 else
281 (void) fprintf(stderr, gettext("the following errors "
282 "must be manually repaired:\n"));
283 error_seen = B_TRUE;
284 }
285
286 va_start(ap, fmt);
287 (void) vfprintf(stderr, fmt, ap);
288 va_end(ap);
289 }
290
291 /*
292 * Check that a file is valid. All we can do in this case is check that it's
293 * not in use by another pool, and not in use by swap.
294 */
295 static int
296 check_file(const char *file, boolean_t force, boolean_t isspare)
297 {
298 char *name;
299 int fd;
300 int ret = 0;
301 pool_state_t state;
302 boolean_t inuse;
303
304 if ((fd = open(file, O_RDONLY)) < 0)
305 return (0);
306
307 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) {
308 const char *desc;
309
310 switch (state) {
311 case POOL_STATE_ACTIVE:
312 desc = gettext("active");
313 break;
314
315 case POOL_STATE_EXPORTED:
316 desc = gettext("exported");
317 break;
318
319 case POOL_STATE_POTENTIALLY_ACTIVE:
320 desc = gettext("potentially active");
321 break;
322
323 default:
324 desc = gettext("unknown");
325 break;
326 }
327
328 /*
329 * Allow hot spares to be shared between pools.
330 */
331 if (state == POOL_STATE_SPARE && isspare)
332 return (0);
333
334 if (state == POOL_STATE_ACTIVE ||
335 state == POOL_STATE_SPARE || !force) {
336 switch (state) {
337 case POOL_STATE_SPARE:
338 vdev_error(gettext("%s is reserved as a hot "
339 "spare for pool %s\n"), file, name);
340 break;
341 default:
342 vdev_error(gettext("%s is part of %s pool "
343 "'%s'\n"), file, desc, name);
344 break;
345 }
346 ret = -1;
347 }
348
349 free(name);
350 }
351
352 (void) close(fd);
353 return (ret);
354 }
355
356 static void
357 check_error(int err)
358 {
359 (void) fprintf(stderr, gettext("warning: device in use checking "
360 "failed: %s\n"), strerror(err));
361 }
362
363 static int
364 check_slice(const char *path, blkid_cache cache, int force, boolean_t isspare)
365 {
366 int err;
367 #ifdef HAVE_LIBBLKID
368 char *value;
369
370 /* No valid type detected device is safe to use */
371 value = blkid_get_tag_value(cache, "TYPE", path);
372 if (value == NULL)
373 return (0);
374
375 /*
376 * If libblkid detects a ZFS device, we check the device
377 * using check_file() to see if it's safe. The one safe
378 * case is a spare device shared between multiple pools.
379 */
380 if (strcmp(value, "zfs") == 0) {
381 err = check_file(path, force, isspare);
382 } else {
383 if (force) {
384 err = 0;
385 } else {
386 err = -1;
387 vdev_error(gettext("%s contains a filesystem of "
388 "type '%s'\n"), path, value);
389 }
390 }
391
392 free(value);
393 #else
394 err = check_file(path, force, isspare);
395 #endif /* HAVE_LIBBLKID */
396
397 return (err);
398 }
399
400 /*
401 * Validate a whole disk. Iterate over all slices on the disk and make sure
402 * that none is in use by calling check_slice().
403 */
404 static int
405 check_disk(const char *path, blkid_cache cache, int force,
406 boolean_t isspare, boolean_t iswholedisk)
407 {
408 struct dk_gpt *vtoc;
409 char slice_path[MAXPATHLEN];
410 int err = 0;
411 int fd, i;
412
413 /* This is not a wholedisk we only check the given partition */
414 if (!iswholedisk)
415 return (check_slice(path, cache, force, isspare));
416
417 /*
418 * When the device is a whole disk try to read the efi partition
419 * label. If this is successful we safely check the all of the
420 * partitions. However, when it fails it may simply be because
421 * the disk is partitioned via the MBR. Since we currently can
422 * not easily decode the MBR return a failure and prompt to the
423 * user to use force option since we cannot check the partitions.
424 */
425 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0) {
426 check_error(errno);
427 return (-1);
428 }
429
430 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
431 (void) close(fd);
432
433 if (force) {
434 return (0);
435 } else {
436 vdev_error(gettext("%s does not contain an EFI "
437 "label but it may contain partition\n"
438 "information in the MBR.\n"), path);
439 return (-1);
440 }
441 }
442
443 /*
444 * The primary efi partition label is damaged however the secondary
445 * label at the end of the device is intact. Rather than use this
446 * label we should play it safe and treat this as a non efi device.
447 */
448 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
449 efi_free(vtoc);
450 (void) close(fd);
451
452 if (force) {
453 /* Partitions will no be created using the backup */
454 return (0);
455 } else {
456 vdev_error(gettext("%s contains a corrupt primary "
457 "EFI label.\n"), path);
458 return (-1);
459 }
460 }
461
462 for (i = 0; i < vtoc->efi_nparts; i++) {
463
464 if (vtoc->efi_parts[i].p_tag == V_UNASSIGNED ||
465 uuid_is_null((uchar_t *)&vtoc->efi_parts[i].p_guid))
466 continue;
467
468 if (strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0)
469 (void) snprintf(slice_path, sizeof (slice_path),
470 "%s%s%d", path, "-part", i+1);
471 else
472 (void) snprintf(slice_path, sizeof (slice_path),
473 "%s%s%d", path, isdigit(path[strlen(path)-1]) ?
474 "p" : "", i+1);
475
476 err = check_slice(slice_path, cache, force, isspare);
477 if (err)
478 break;
479 }
480
481 efi_free(vtoc);
482 (void) close(fd);
483
484 return (err);
485 }
486
487 static int
488 check_device(const char *path, boolean_t force,
489 boolean_t isspare, boolean_t iswholedisk)
490 {
491 static blkid_cache cache = NULL;
492
493 #ifdef HAVE_LIBBLKID
494 /*
495 * There is no easy way to add a correct blkid_put_cache() call,
496 * memory will be reclaimed when the command exits.
497 */
498 if (cache == NULL) {
499 int err;
500
501 if ((err = blkid_get_cache(&cache, NULL)) != 0) {
502 check_error(err);
503 return (-1);
504 }
505
506 if ((err = blkid_probe_all(cache)) != 0) {
507 blkid_put_cache(cache);
508 check_error(err);
509 return (-1);
510 }
511 }
512 #endif /* HAVE_LIBBLKID */
513
514 return (check_disk(path, cache, force, isspare, iswholedisk));
515 }
516
517 /*
518 * By "whole disk" we mean an entire physical disk (something we can
519 * label, toggle the write cache on, etc.) as opposed to the full
520 * capacity of a pseudo-device such as lofi or did. We act as if we
521 * are labeling the disk, which should be a pretty good test of whether
522 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
523 * it isn't.
524 */
525 static boolean_t
526 is_whole_disk(const char *path)
527 {
528 struct dk_gpt *label;
529 int fd;
530
531 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
532 return (B_FALSE);
533 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
534 (void) close(fd);
535 return (B_FALSE);
536 }
537 efi_free(label);
538 (void) close(fd);
539 return (B_TRUE);
540 }
541
542 /*
543 * This may be a shorthand device path or it could be total gibberish.
544 * Check to see if it is a known device available in zfs_vdev_paths.
545 * As part of this check, see if we've been given an entire disk
546 * (minus the slice number).
547 */
548 static int
549 is_shorthand_path(const char *arg, char *path,
550 struct stat64 *statbuf, boolean_t *wholedisk)
551 {
552 int error;
553
554 error = zfs_resolve_shortname(arg, path, MAXPATHLEN);
555 if (error == 0) {
556 *wholedisk = is_whole_disk(path);
557 if (*wholedisk || (stat64(path, statbuf) == 0))
558 return (0);
559 }
560
561 strlcpy(path, arg, sizeof (path));
562 memset(statbuf, 0, sizeof (*statbuf));
563 *wholedisk = B_FALSE;
564
565 return (error);
566 }
567
568 /*
569 * Determine if the given path is a hot spare within the given configuration.
570 * If no configuration is given we rely solely on the label.
571 */
572 static boolean_t
573 is_spare(nvlist_t *config, const char *path)
574 {
575 int fd;
576 pool_state_t state;
577 char *name = NULL;
578 nvlist_t *label;
579 uint64_t guid, spareguid;
580 nvlist_t *nvroot;
581 nvlist_t **spares;
582 uint_t i, nspares;
583 boolean_t inuse;
584
585 if ((fd = open(path, O_RDONLY)) < 0)
586 return (B_FALSE);
587
588 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
589 !inuse ||
590 state != POOL_STATE_SPARE ||
591 zpool_read_label(fd, &label) != 0) {
592 free(name);
593 (void) close(fd);
594 return (B_FALSE);
595 }
596 free(name);
597 (void) close(fd);
598
599 if (config == NULL)
600 return (B_TRUE);
601
602 verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
603 nvlist_free(label);
604
605 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
606 &nvroot) == 0);
607 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
608 &spares, &nspares) == 0) {
609 for (i = 0; i < nspares; i++) {
610 verify(nvlist_lookup_uint64(spares[i],
611 ZPOOL_CONFIG_GUID, &spareguid) == 0);
612 if (spareguid == guid)
613 return (B_TRUE);
614 }
615 }
616
617 return (B_FALSE);
618 }
619
620 /*
621 * Create a leaf vdev. Determine if this is a file or a device. If it's a
622 * device, fill in the device id to make a complete nvlist. Valid forms for a
623 * leaf vdev are:
624 *
625 * /dev/xxx Complete disk path
626 * /xxx Full path to file
627 * xxx Shorthand for <zfs_vdev_paths>/xxx
628 */
629 static nvlist_t *
630 make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
631 {
632 char path[MAXPATHLEN];
633 struct stat64 statbuf;
634 nvlist_t *vdev = NULL;
635 char *type = NULL;
636 boolean_t wholedisk = B_FALSE;
637 uint64_t ashift = 0;
638 int err;
639
640 /*
641 * Determine what type of vdev this is, and put the full path into
642 * 'path'. We detect whether this is a device of file afterwards by
643 * checking the st_mode of the file.
644 */
645 if (arg[0] == '/') {
646 /*
647 * Complete device or file path. Exact type is determined by
648 * examining the file descriptor afterwards. Symbolic links
649 * are resolved to their real paths for the is_whole_disk()
650 * and S_ISBLK/S_ISREG type checks. However, we are careful
651 * to store the given path as ZPOOL_CONFIG_PATH to ensure we
652 * can leverage udev's persistent device labels.
653 */
654 if (realpath(arg, path) == NULL) {
655 (void) fprintf(stderr,
656 gettext("cannot resolve path '%s'\n"), arg);
657 return (NULL);
658 }
659
660 wholedisk = is_whole_disk(path);
661 if (!wholedisk && (stat64(path, &statbuf) != 0)) {
662 (void) fprintf(stderr,
663 gettext("cannot open '%s': %s\n"),
664 path, strerror(errno));
665 return (NULL);
666 }
667
668 /* After is_whole_disk() check restore original passed path */
669 strlcpy(path, arg, MAXPATHLEN);
670 } else {
671 err = is_shorthand_path(arg, path, &statbuf, &wholedisk);
672 if (err != 0) {
673 /*
674 * If we got ENOENT, then the user gave us
675 * gibberish, so try to direct them with a
676 * reasonable error message. Otherwise,
677 * regurgitate strerror() since it's the best we
678 * can do.
679 */
680 if (err == ENOENT) {
681 (void) fprintf(stderr,
682 gettext("cannot open '%s': no such "
683 "device in %s\n"), arg, DISK_ROOT);
684 (void) fprintf(stderr,
685 gettext("must be a full path or "
686 "shorthand device name\n"));
687 return (NULL);
688 } else {
689 (void) fprintf(stderr,
690 gettext("cannot open '%s': %s\n"),
691 path, strerror(errno));
692 return (NULL);
693 }
694 }
695 }
696
697 /*
698 * Determine whether this is a device or a file.
699 */
700 if (wholedisk || S_ISBLK(statbuf.st_mode)) {
701 type = VDEV_TYPE_DISK;
702 } else if (S_ISREG(statbuf.st_mode)) {
703 type = VDEV_TYPE_FILE;
704 } else {
705 (void) fprintf(stderr, gettext("cannot use '%s': must be a "
706 "block device or regular file\n"), path);
707 return (NULL);
708 }
709
710 /*
711 * Finally, we have the complete device or file, and we know that it is
712 * acceptable to use. Construct the nvlist to describe this vdev. All
713 * vdevs have a 'path' element, and devices also have a 'devid' element.
714 */
715 verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
716 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
717 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
718 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_LOG, is_log) == 0);
719 if (strcmp(type, VDEV_TYPE_DISK) == 0)
720 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
721 (uint64_t)wholedisk) == 0);
722
723 /*
724 * Override defaults if custom properties are provided.
725 */
726 if (props != NULL) {
727 char *value = NULL;
728
729 if (nvlist_lookup_string(props,
730 zpool_prop_to_name(ZPOOL_PROP_ASHIFT), &value) == 0)
731 zfs_nicestrtonum(NULL, value, &ashift);
732 }
733
734 /*
735 * If the device is known to incorrectly report its physical sector
736 * size explicitly provide the known correct value.
737 */
738 if (ashift == 0) {
739 int sector_size;
740
741 if (check_sector_size_database(path, &sector_size) == B_TRUE)
742 ashift = highbit(sector_size) - 1;
743 }
744
745 if (ashift > 0)
746 nvlist_add_uint64(vdev, ZPOOL_CONFIG_ASHIFT, ashift);
747
748 return (vdev);
749 }
750
751 /*
752 * Go through and verify the replication level of the pool is consistent.
753 * Performs the following checks:
754 *
755 * For the new spec, verifies that devices in mirrors and raidz are the
756 * same size.
757 *
758 * If the current configuration already has inconsistent replication
759 * levels, ignore any other potential problems in the new spec.
760 *
761 * Otherwise, make sure that the current spec (if there is one) and the new
762 * spec have consistent replication levels.
763 */
764 typedef struct replication_level {
765 char *zprl_type;
766 uint64_t zprl_children;
767 uint64_t zprl_parity;
768 } replication_level_t;
769
770 #define ZPOOL_FUZZ (16 * 1024 * 1024)
771
772 /*
773 * Given a list of toplevel vdevs, return the current replication level. If
774 * the config is inconsistent, then NULL is returned. If 'fatal' is set, then
775 * an error message will be displayed for each self-inconsistent vdev.
776 */
777 static replication_level_t *
778 get_replication(nvlist_t *nvroot, boolean_t fatal)
779 {
780 nvlist_t **top;
781 uint_t t, toplevels;
782 nvlist_t **child;
783 uint_t c, children;
784 nvlist_t *nv;
785 char *type;
786 replication_level_t lastrep = { 0 }, rep, *ret;
787 boolean_t dontreport;
788
789 ret = safe_malloc(sizeof (replication_level_t));
790
791 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
792 &top, &toplevels) == 0);
793
794 lastrep.zprl_type = NULL;
795 for (t = 0; t < toplevels; t++) {
796 uint64_t is_log = B_FALSE;
797
798 nv = top[t];
799
800 /*
801 * For separate logs we ignore the top level vdev replication
802 * constraints.
803 */
804 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
805 if (is_log)
806 continue;
807
808 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE,
809 &type) == 0);
810 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
811 &child, &children) != 0) {
812 /*
813 * This is a 'file' or 'disk' vdev.
814 */
815 rep.zprl_type = type;
816 rep.zprl_children = 1;
817 rep.zprl_parity = 0;
818 } else {
819 uint64_t vdev_size;
820
821 /*
822 * This is a mirror or RAID-Z vdev. Go through and make
823 * sure the contents are all the same (files vs. disks),
824 * keeping track of the number of elements in the
825 * process.
826 *
827 * We also check that the size of each vdev (if it can
828 * be determined) is the same.
829 */
830 rep.zprl_type = type;
831 rep.zprl_children = 0;
832
833 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
834 verify(nvlist_lookup_uint64(nv,
835 ZPOOL_CONFIG_NPARITY,
836 &rep.zprl_parity) == 0);
837 assert(rep.zprl_parity != 0);
838 } else {
839 rep.zprl_parity = 0;
840 }
841
842 /*
843 * The 'dontreport' variable indicates that we've
844 * already reported an error for this spec, so don't
845 * bother doing it again.
846 */
847 type = NULL;
848 dontreport = 0;
849 vdev_size = -1ULL;
850 for (c = 0; c < children; c++) {
851 nvlist_t *cnv = child[c];
852 char *path;
853 struct stat64 statbuf;
854 uint64_t size = -1ULL;
855 char *childtype;
856 int fd, err;
857
858 rep.zprl_children++;
859
860 verify(nvlist_lookup_string(cnv,
861 ZPOOL_CONFIG_TYPE, &childtype) == 0);
862
863 /*
864 * If this is a replacing or spare vdev, then
865 * get the real first child of the vdev.
866 */
867 if (strcmp(childtype,
868 VDEV_TYPE_REPLACING) == 0 ||
869 strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
870 nvlist_t **rchild;
871 uint_t rchildren;
872
873 verify(nvlist_lookup_nvlist_array(cnv,
874 ZPOOL_CONFIG_CHILDREN, &rchild,
875 &rchildren) == 0);
876 assert(rchildren == 2);
877 cnv = rchild[0];
878
879 verify(nvlist_lookup_string(cnv,
880 ZPOOL_CONFIG_TYPE,
881 &childtype) == 0);
882 }
883
884 verify(nvlist_lookup_string(cnv,
885 ZPOOL_CONFIG_PATH, &path) == 0);
886
887 /*
888 * If we have a raidz/mirror that combines disks
889 * with files, report it as an error.
890 */
891 if (!dontreport && type != NULL &&
892 strcmp(type, childtype) != 0) {
893 if (ret != NULL)
894 free(ret);
895 ret = NULL;
896 if (fatal)
897 vdev_error(gettext(
898 "mismatched replication "
899 "level: %s contains both "
900 "files and devices\n"),
901 rep.zprl_type);
902 else
903 return (NULL);
904 dontreport = B_TRUE;
905 }
906
907 /*
908 * According to stat(2), the value of 'st_size'
909 * is undefined for block devices and character
910 * devices. But there is no effective way to
911 * determine the real size in userland.
912 *
913 * Instead, we'll take advantage of an
914 * implementation detail of spec_size(). If the
915 * device is currently open, then we (should)
916 * return a valid size.
917 *
918 * If we still don't get a valid size (indicated
919 * by a size of 0 or MAXOFFSET_T), then ignore
920 * this device altogether.
921 */
922 if ((fd = open(path, O_RDONLY)) >= 0) {
923 err = fstat64(fd, &statbuf);
924 (void) close(fd);
925 } else {
926 err = stat64(path, &statbuf);
927 }
928
929 if (err != 0 ||
930 statbuf.st_size == 0 ||
931 statbuf.st_size == MAXOFFSET_T)
932 continue;
933
934 size = statbuf.st_size;
935
936 /*
937 * Also make sure that devices and
938 * slices have a consistent size. If
939 * they differ by a significant amount
940 * (~16MB) then report an error.
941 */
942 if (!dontreport &&
943 (vdev_size != -1ULL &&
944 (labs(size - vdev_size) >
945 ZPOOL_FUZZ))) {
946 if (ret != NULL)
947 free(ret);
948 ret = NULL;
949 if (fatal)
950 vdev_error(gettext(
951 "%s contains devices of "
952 "different sizes\n"),
953 rep.zprl_type);
954 else
955 return (NULL);
956 dontreport = B_TRUE;
957 }
958
959 type = childtype;
960 vdev_size = size;
961 }
962 }
963
964 /*
965 * At this point, we have the replication of the last toplevel
966 * vdev in 'rep'. Compare it to 'lastrep' to see if its
967 * different.
968 */
969 if (lastrep.zprl_type != NULL) {
970 if (strcmp(lastrep.zprl_type, rep.zprl_type) != 0) {
971 if (ret != NULL)
972 free(ret);
973 ret = NULL;
974 if (fatal)
975 vdev_error(gettext(
976 "mismatched replication level: "
977 "both %s and %s vdevs are "
978 "present\n"),
979 lastrep.zprl_type, rep.zprl_type);
980 else
981 return (NULL);
982 } else if (lastrep.zprl_parity != rep.zprl_parity) {
983 if (ret)
984 free(ret);
985 ret = NULL;
986 if (fatal)
987 vdev_error(gettext(
988 "mismatched replication level: "
989 "both %llu and %llu device parity "
990 "%s vdevs are present\n"),
991 lastrep.zprl_parity,
992 rep.zprl_parity,
993 rep.zprl_type);
994 else
995 return (NULL);
996 } else if (lastrep.zprl_children != rep.zprl_children) {
997 if (ret)
998 free(ret);
999 ret = NULL;
1000 if (fatal)
1001 vdev_error(gettext(
1002 "mismatched replication level: "
1003 "both %llu-way and %llu-way %s "
1004 "vdevs are present\n"),
1005 lastrep.zprl_children,
1006 rep.zprl_children,
1007 rep.zprl_type);
1008 else
1009 return (NULL);
1010 }
1011 }
1012 lastrep = rep;
1013 }
1014
1015 if (ret != NULL)
1016 *ret = rep;
1017
1018 return (ret);
1019 }
1020
1021 /*
1022 * Check the replication level of the vdev spec against the current pool. Calls
1023 * get_replication() to make sure the new spec is self-consistent. If the pool
1024 * has a consistent replication level, then we ignore any errors. Otherwise,
1025 * report any difference between the two.
1026 */
1027 static int
1028 check_replication(nvlist_t *config, nvlist_t *newroot)
1029 {
1030 nvlist_t **child;
1031 uint_t children;
1032 replication_level_t *current = NULL, *new;
1033 int ret;
1034
1035 /*
1036 * If we have a current pool configuration, check to see if it's
1037 * self-consistent. If not, simply return success.
1038 */
1039 if (config != NULL) {
1040 nvlist_t *nvroot;
1041
1042 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1043 &nvroot) == 0);
1044 if ((current = get_replication(nvroot, B_FALSE)) == NULL)
1045 return (0);
1046 }
1047 /*
1048 * for spares there may be no children, and therefore no
1049 * replication level to check
1050 */
1051 if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN,
1052 &child, &children) != 0) || (children == 0)) {
1053 free(current);
1054 return (0);
1055 }
1056
1057 /*
1058 * If all we have is logs then there's no replication level to check.
1059 */
1060 if (num_logs(newroot) == children) {
1061 free(current);
1062 return (0);
1063 }
1064
1065 /*
1066 * Get the replication level of the new vdev spec, reporting any
1067 * inconsistencies found.
1068 */
1069 if ((new = get_replication(newroot, B_TRUE)) == NULL) {
1070 free(current);
1071 return (-1);
1072 }
1073
1074 /*
1075 * Check to see if the new vdev spec matches the replication level of
1076 * the current pool.
1077 */
1078 ret = 0;
1079 if (current != NULL) {
1080 if (strcmp(current->zprl_type, new->zprl_type) != 0) {
1081 vdev_error(gettext(
1082 "mismatched replication level: pool uses %s "
1083 "and new vdev is %s\n"),
1084 current->zprl_type, new->zprl_type);
1085 ret = -1;
1086 } else if (current->zprl_parity != new->zprl_parity) {
1087 vdev_error(gettext(
1088 "mismatched replication level: pool uses %llu "
1089 "device parity and new vdev uses %llu\n"),
1090 current->zprl_parity, new->zprl_parity);
1091 ret = -1;
1092 } else if (current->zprl_children != new->zprl_children) {
1093 vdev_error(gettext(
1094 "mismatched replication level: pool uses %llu-way "
1095 "%s and new vdev uses %llu-way %s\n"),
1096 current->zprl_children, current->zprl_type,
1097 new->zprl_children, new->zprl_type);
1098 ret = -1;
1099 }
1100 }
1101
1102 free(new);
1103 if (current != NULL)
1104 free(current);
1105
1106 return (ret);
1107 }
1108
1109 static int
1110 zero_label(char *path)
1111 {
1112 const int size = 4096;
1113 char buf[size];
1114 int err, fd;
1115
1116 if ((fd = open(path, O_WRONLY|O_EXCL)) < 0) {
1117 (void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
1118 path, strerror(errno));
1119 return (-1);
1120 }
1121
1122 memset(buf, 0, size);
1123 err = write(fd, buf, size);
1124 (void) fdatasync(fd);
1125 (void) close(fd);
1126
1127 if (err == -1) {
1128 (void) fprintf(stderr, gettext("cannot zero first %d bytes "
1129 "of '%s': %s\n"), size, path, strerror(errno));
1130 return (-1);
1131 }
1132
1133 if (err != size) {
1134 (void) fprintf(stderr, gettext("could only zero %d/%d bytes "
1135 "of '%s'\n"), err, size, path);
1136 return (-1);
1137 }
1138
1139 return (0);
1140 }
1141
1142 /*
1143 * Go through and find any whole disks in the vdev specification, labelling them
1144 * as appropriate. When constructing the vdev spec, we were unable to open this
1145 * device in order to provide a devid. Now that we have labelled the disk and
1146 * know that slice 0 is valid, we can construct the devid now.
1147 *
1148 * If the disk was already labeled with an EFI label, we will have gotten the
1149 * devid already (because we were able to open the whole disk). Otherwise, we
1150 * need to get the devid after we label the disk.
1151 */
1152 static int
1153 make_disks(zpool_handle_t *zhp, nvlist_t *nv)
1154 {
1155 nvlist_t **child;
1156 uint_t c, children;
1157 char *type, *path;
1158 char devpath[MAXPATHLEN];
1159 char udevpath[MAXPATHLEN];
1160 uint64_t wholedisk;
1161 struct stat64 statbuf;
1162 int is_exclusive = 0;
1163 int fd;
1164 int ret;
1165
1166 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
1167
1168 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1169 &child, &children) != 0) {
1170
1171 if (strcmp(type, VDEV_TYPE_DISK) != 0)
1172 return (0);
1173
1174 /*
1175 * We have a disk device. If this is a whole disk write
1176 * out the efi partition table, otherwise write zero's to
1177 * the first 4k of the partition. This is to ensure that
1178 * libblkid will not misidentify the partition due to a
1179 * magic value left by the previous filesystem.
1180 */
1181 verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
1182 verify(!nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1183 &wholedisk));
1184
1185 if (!wholedisk) {
1186 (void) zero_label(path);
1187 return (0);
1188 }
1189
1190 if (realpath(path, devpath) == NULL) {
1191 ret = errno;
1192 (void) fprintf(stderr,
1193 gettext("cannot resolve path '%s'\n"), path);
1194 return (ret);
1195 }
1196
1197 /*
1198 * Remove any previously existing symlink from a udev path to
1199 * the device before labeling the disk. This makes
1200 * zpool_label_disk_wait() truly wait for the new link to show
1201 * up instead of returning if it finds an old link still in
1202 * place. Otherwise there is a window between when udev
1203 * deletes and recreates the link during which access attempts
1204 * will fail with ENOENT.
1205 */
1206 strncpy(udevpath, path, MAXPATHLEN);
1207 (void) zfs_append_partition(udevpath, MAXPATHLEN);
1208
1209 fd = open(devpath, O_RDWR|O_EXCL);
1210 if (fd == -1) {
1211 if (errno == EBUSY)
1212 is_exclusive = 1;
1213 } else {
1214 (void) close(fd);
1215 }
1216
1217 /*
1218 * If the partition exists, contains a valid spare label,
1219 * and is opened exclusively there is no need to partition
1220 * it. Hot spares have already been partitioned and are
1221 * held open exclusively by the kernel as a safety measure.
1222 *
1223 * If the provided path is for a /dev/disk/ device its
1224 * symbolic link will be removed, partition table created,
1225 * and then block until udev creates the new link.
1226 */
1227 if (!is_exclusive || !is_spare(NULL, udevpath)) {
1228 ret = strncmp(udevpath, UDISK_ROOT, strlen(UDISK_ROOT));
1229 if (ret == 0) {
1230 ret = lstat64(udevpath, &statbuf);
1231 if (ret == 0 && S_ISLNK(statbuf.st_mode))
1232 (void) unlink(udevpath);
1233 }
1234
1235 if (zpool_label_disk(g_zfs, zhp,
1236 strrchr(devpath, '/') + 1) == -1)
1237 return (-1);
1238
1239 ret = zpool_label_disk_wait(udevpath, DISK_LABEL_WAIT);
1240 if (ret) {
1241 (void) fprintf(stderr, gettext("cannot "
1242 "resolve path '%s': %d\n"), udevpath, ret);
1243 return (-1);
1244 }
1245
1246 (void) zero_label(udevpath);
1247 }
1248
1249 /*
1250 * Update the path to refer to the partition. The presence of
1251 * the 'whole_disk' field indicates to the CLI that we should
1252 * chop off the partition number when displaying the device in
1253 * future output.
1254 */
1255 verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, udevpath) == 0);
1256
1257 return (0);
1258 }
1259
1260 for (c = 0; c < children; c++)
1261 if ((ret = make_disks(zhp, child[c])) != 0)
1262 return (ret);
1263
1264 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1265 &child, &children) == 0)
1266 for (c = 0; c < children; c++)
1267 if ((ret = make_disks(zhp, child[c])) != 0)
1268 return (ret);
1269
1270 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1271 &child, &children) == 0)
1272 for (c = 0; c < children; c++)
1273 if ((ret = make_disks(zhp, child[c])) != 0)
1274 return (ret);
1275
1276 return (0);
1277 }
1278
1279 /*
1280 * Go through and find any devices that are in use. We rely on libdiskmgt for
1281 * the majority of this task.
1282 */
1283 static int
1284 check_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
1285 boolean_t replacing, boolean_t isspare)
1286 {
1287 nvlist_t **child;
1288 uint_t c, children;
1289 char *type, *path;
1290 int ret = 0;
1291 char buf[MAXPATHLEN];
1292 uint64_t wholedisk = B_FALSE;
1293
1294 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
1295
1296 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1297 &child, &children) != 0) {
1298
1299 verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
1300 if (strcmp(type, VDEV_TYPE_DISK) == 0)
1301 verify(!nvlist_lookup_uint64(nv,
1302 ZPOOL_CONFIG_WHOLE_DISK, &wholedisk));
1303
1304 /*
1305 * As a generic check, we look to see if this is a replace of a
1306 * hot spare within the same pool. If so, we allow it
1307 * regardless of what libblkid or zpool_in_use() says.
1308 */
1309 if (replacing) {
1310 (void) strlcpy(buf, path, sizeof (buf));
1311 if (wholedisk) {
1312 ret = zfs_append_partition(buf, sizeof (buf));
1313 if (ret == -1)
1314 return (-1);
1315 }
1316
1317 if (is_spare(config, buf))
1318 return (0);
1319 }
1320
1321 if (strcmp(type, VDEV_TYPE_DISK) == 0)
1322 ret = check_device(path, force, isspare, wholedisk);
1323
1324 if (strcmp(type, VDEV_TYPE_FILE) == 0)
1325 ret = check_file(path, force, isspare);
1326
1327 return (ret);
1328 }
1329
1330 for (c = 0; c < children; c++)
1331 if ((ret = check_in_use(config, child[c], force,
1332 replacing, B_FALSE)) != 0)
1333 return (ret);
1334
1335 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1336 &child, &children) == 0)
1337 for (c = 0; c < children; c++)
1338 if ((ret = check_in_use(config, child[c], force,
1339 replacing, B_TRUE)) != 0)
1340 return (ret);
1341
1342 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1343 &child, &children) == 0)
1344 for (c = 0; c < children; c++)
1345 if ((ret = check_in_use(config, child[c], force,
1346 replacing, B_FALSE)) != 0)
1347 return (ret);
1348
1349 return (0);
1350 }
1351
1352 static const char *
1353 is_grouping(const char *type, int *mindev, int *maxdev)
1354 {
1355 if (strncmp(type, "raidz", 5) == 0) {
1356 const char *p = type + 5;
1357 char *end;
1358 long nparity;
1359
1360 if (*p == '\0') {
1361 nparity = 1;
1362 } else if (*p == '0') {
1363 return (NULL); /* no zero prefixes allowed */
1364 } else {
1365 errno = 0;
1366 nparity = strtol(p, &end, 10);
1367 if (errno != 0 || nparity < 1 || nparity >= 255 ||
1368 *end != '\0')
1369 return (NULL);
1370 }
1371
1372 if (mindev != NULL)
1373 *mindev = nparity + 1;
1374 if (maxdev != NULL)
1375 *maxdev = 255;
1376 return (VDEV_TYPE_RAIDZ);
1377 }
1378
1379 if (maxdev != NULL)
1380 *maxdev = INT_MAX;
1381
1382 if (strcmp(type, "mirror") == 0) {
1383 if (mindev != NULL)
1384 *mindev = 2;
1385 return (VDEV_TYPE_MIRROR);
1386 }
1387
1388 if (strcmp(type, "spare") == 0) {
1389 if (mindev != NULL)
1390 *mindev = 1;
1391 return (VDEV_TYPE_SPARE);
1392 }
1393
1394 if (strcmp(type, "log") == 0) {
1395 if (mindev != NULL)
1396 *mindev = 1;
1397 return (VDEV_TYPE_LOG);
1398 }
1399
1400 if (strcmp(type, "cache") == 0) {
1401 if (mindev != NULL)
1402 *mindev = 1;
1403 return (VDEV_TYPE_L2CACHE);
1404 }
1405
1406 return (NULL);
1407 }
1408
1409 /*
1410 * Construct a syntactically valid vdev specification,
1411 * and ensure that all devices and files exist and can be opened.
1412 * Note: we don't bother freeing anything in the error paths
1413 * because the program is just going to exit anyway.
1414 */
1415 nvlist_t *
1416 construct_spec(nvlist_t *props, int argc, char **argv)
1417 {
1418 nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
1419 int t, toplevels, mindev, maxdev, nspares, nlogs, nl2cache;
1420 const char *type;
1421 uint64_t is_log;
1422 boolean_t seen_logs;
1423
1424 top = NULL;
1425 toplevels = 0;
1426 spares = NULL;
1427 l2cache = NULL;
1428 nspares = 0;
1429 nlogs = 0;
1430 nl2cache = 0;
1431 is_log = B_FALSE;
1432 seen_logs = B_FALSE;
1433
1434 while (argc > 0) {
1435 nv = NULL;
1436
1437 /*
1438 * If it's a mirror or raidz, the subsequent arguments are
1439 * its leaves -- until we encounter the next mirror or raidz.
1440 */
1441 if ((type = is_grouping(argv[0], &mindev, &maxdev)) != NULL) {
1442 nvlist_t **child = NULL;
1443 int c, children = 0;
1444
1445 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
1446 if (spares != NULL) {
1447 (void) fprintf(stderr,
1448 gettext("invalid vdev "
1449 "specification: 'spare' can be "
1450 "specified only once\n"));
1451 return (NULL);
1452 }
1453 is_log = B_FALSE;
1454 }
1455
1456 if (strcmp(type, VDEV_TYPE_LOG) == 0) {
1457 if (seen_logs) {
1458 (void) fprintf(stderr,
1459 gettext("invalid vdev "
1460 "specification: 'log' can be "
1461 "specified only once\n"));
1462 return (NULL);
1463 }
1464 seen_logs = B_TRUE;
1465 is_log = B_TRUE;
1466 argc--;
1467 argv++;
1468 /*
1469 * A log is not a real grouping device.
1470 * We just set is_log and continue.
1471 */
1472 continue;
1473 }
1474
1475 if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
1476 if (l2cache != NULL) {
1477 (void) fprintf(stderr,
1478 gettext("invalid vdev "
1479 "specification: 'cache' can be "
1480 "specified only once\n"));
1481 return (NULL);
1482 }
1483 is_log = B_FALSE;
1484 }
1485
1486 if (is_log) {
1487 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
1488 (void) fprintf(stderr,
1489 gettext("invalid vdev "
1490 "specification: unsupported 'log' "
1491 "device: %s\n"), type);
1492 return (NULL);
1493 }
1494 nlogs++;
1495 }
1496
1497 for (c = 1; c < argc; c++) {
1498 if (is_grouping(argv[c], NULL, NULL) != NULL)
1499 break;
1500 children++;
1501 child = realloc(child,
1502 children * sizeof (nvlist_t *));
1503 if (child == NULL)
1504 zpool_no_memory();
1505 if ((nv = make_leaf_vdev(props, argv[c],
1506 B_FALSE)) == NULL)
1507 return (NULL);
1508 child[children - 1] = nv;
1509 }
1510
1511 if (children < mindev) {
1512 (void) fprintf(stderr, gettext("invalid vdev "
1513 "specification: %s requires at least %d "
1514 "devices\n"), argv[0], mindev);
1515 return (NULL);
1516 }
1517
1518 if (children > maxdev) {
1519 (void) fprintf(stderr, gettext("invalid vdev "
1520 "specification: %s supports no more than "
1521 "%d devices\n"), argv[0], maxdev);
1522 return (NULL);
1523 }
1524
1525 argc -= c;
1526 argv += c;
1527
1528 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
1529 spares = child;
1530 nspares = children;
1531 continue;
1532 } else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
1533 l2cache = child;
1534 nl2cache = children;
1535 continue;
1536 } else {
1537 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
1538 0) == 0);
1539 verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
1540 type) == 0);
1541 verify(nvlist_add_uint64(nv,
1542 ZPOOL_CONFIG_IS_LOG, is_log) == 0);
1543 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
1544 verify(nvlist_add_uint64(nv,
1545 ZPOOL_CONFIG_NPARITY,
1546 mindev - 1) == 0);
1547 }
1548 verify(nvlist_add_nvlist_array(nv,
1549 ZPOOL_CONFIG_CHILDREN, child,
1550 children) == 0);
1551
1552 for (c = 0; c < children; c++)
1553 nvlist_free(child[c]);
1554 free(child);
1555 }
1556 } else {
1557 /*
1558 * We have a device. Pass off to make_leaf_vdev() to
1559 * construct the appropriate nvlist describing the vdev.
1560 */
1561 if ((nv = make_leaf_vdev(props, argv[0],
1562 is_log)) == NULL)
1563 return (NULL);
1564 if (is_log)
1565 nlogs++;
1566 argc--;
1567 argv++;
1568 }
1569
1570 toplevels++;
1571 top = realloc(top, toplevels * sizeof (nvlist_t *));
1572 if (top == NULL)
1573 zpool_no_memory();
1574 top[toplevels - 1] = nv;
1575 }
1576
1577 if (toplevels == 0 && nspares == 0 && nl2cache == 0) {
1578 (void) fprintf(stderr, gettext("invalid vdev "
1579 "specification: at least one toplevel vdev must be "
1580 "specified\n"));
1581 return (NULL);
1582 }
1583
1584 if (seen_logs && nlogs == 0) {
1585 (void) fprintf(stderr, gettext("invalid vdev specification: "
1586 "log requires at least 1 device\n"));
1587 return (NULL);
1588 }
1589
1590 /*
1591 * Finally, create nvroot and add all top-level vdevs to it.
1592 */
1593 verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
1594 verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
1595 VDEV_TYPE_ROOT) == 0);
1596 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1597 top, toplevels) == 0);
1598 if (nspares != 0)
1599 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1600 spares, nspares) == 0);
1601 if (nl2cache != 0)
1602 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1603 l2cache, nl2cache) == 0);
1604
1605 for (t = 0; t < toplevels; t++)
1606 nvlist_free(top[t]);
1607 for (t = 0; t < nspares; t++)
1608 nvlist_free(spares[t]);
1609 for (t = 0; t < nl2cache; t++)
1610 nvlist_free(l2cache[t]);
1611 if (spares)
1612 free(spares);
1613 if (l2cache)
1614 free(l2cache);
1615 free(top);
1616
1617 return (nvroot);
1618 }
1619
1620 nvlist_t *
1621 split_mirror_vdev(zpool_handle_t *zhp, char *newname, nvlist_t *props,
1622 splitflags_t flags, int argc, char **argv)
1623 {
1624 nvlist_t *newroot = NULL, **child;
1625 uint_t c, children;
1626
1627 if (argc > 0) {
1628 if ((newroot = construct_spec(props, argc, argv)) == NULL) {
1629 (void) fprintf(stderr, gettext("Unable to build a "
1630 "pool from the specified devices\n"));
1631 return (NULL);
1632 }
1633
1634 if (!flags.dryrun && make_disks(zhp, newroot) != 0) {
1635 nvlist_free(newroot);
1636 return (NULL);
1637 }
1638
1639 /* avoid any tricks in the spec */
1640 verify(nvlist_lookup_nvlist_array(newroot,
1641 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
1642 for (c = 0; c < children; c++) {
1643 char *path;
1644 const char *type;
1645 int min, max;
1646
1647 verify(nvlist_lookup_string(child[c],
1648 ZPOOL_CONFIG_PATH, &path) == 0);
1649 if ((type = is_grouping(path, &min, &max)) != NULL) {
1650 (void) fprintf(stderr, gettext("Cannot use "
1651 "'%s' as a device for splitting\n"), type);
1652 nvlist_free(newroot);
1653 return (NULL);
1654 }
1655 }
1656 }
1657
1658 if (zpool_vdev_split(zhp, newname, &newroot, props, flags) != 0) {
1659 if (newroot != NULL)
1660 nvlist_free(newroot);
1661 return (NULL);
1662 }
1663
1664 return (newroot);
1665 }
1666
1667 /*
1668 * Get and validate the contents of the given vdev specification. This ensures
1669 * that the nvlist returned is well-formed, that all the devices exist, and that
1670 * they are not currently in use by any other known consumer. The 'poolconfig'
1671 * parameter is the current configuration of the pool when adding devices
1672 * existing pool, and is used to perform additional checks, such as changing the
1673 * replication level of the pool. It can be 'NULL' to indicate that this is a
1674 * new pool. The 'force' flag controls whether devices should be forcefully
1675 * added, even if they appear in use.
1676 */
1677 nvlist_t *
1678 make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force, int check_rep,
1679 boolean_t replacing, boolean_t dryrun, int argc, char **argv)
1680 {
1681 nvlist_t *newroot;
1682 nvlist_t *poolconfig = NULL;
1683 is_force = force;
1684
1685 /*
1686 * Construct the vdev specification. If this is successful, we know
1687 * that we have a valid specification, and that all devices can be
1688 * opened.
1689 */
1690 if ((newroot = construct_spec(props, argc, argv)) == NULL)
1691 return (NULL);
1692
1693 if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL))
1694 return (NULL);
1695
1696 /*
1697 * Validate each device to make sure that its not shared with another
1698 * subsystem. We do this even if 'force' is set, because there are some
1699 * uses (such as a dedicated dump device) that even '-f' cannot
1700 * override.
1701 */
1702 if (check_in_use(poolconfig, newroot, force, replacing, B_FALSE) != 0) {
1703 nvlist_free(newroot);
1704 return (NULL);
1705 }
1706
1707 /*
1708 * Check the replication level of the given vdevs and report any errors
1709 * found. We include the existing pool spec, if any, as we need to
1710 * catch changes against the existing replication level.
1711 */
1712 if (check_rep && check_replication(poolconfig, newroot) != 0) {
1713 nvlist_free(newroot);
1714 return (NULL);
1715 }
1716
1717 /*
1718 * Run through the vdev specification and label any whole disks found.
1719 */
1720 if (!dryrun && make_disks(zhp, newroot) != 0) {
1721 nvlist_free(newroot);
1722 return (NULL);
1723 }
1724
1725 return (newroot);
1726 }