]> git.proxmox.com Git - mirror_zfs-debian.git/blob - cmd/zpool/zpool_vdev.c
New upstream version 0.7.11
[mirror_zfs-debian.git] / cmd / zpool / zpool_vdev.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2016 Intel Corporation.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
27 */
28
29 /*
30 * Functions to convert between a list of vdevs and an nvlist representing the
31 * configuration. Each entry in the list can be one of:
32 *
33 * Device vdevs
34 * disk=(path=..., devid=...)
35 * file=(path=...)
36 *
37 * Group vdevs
38 * raidz[1|2]=(...)
39 * mirror=(...)
40 *
41 * Hot spares
42 *
43 * While the underlying implementation supports it, group vdevs cannot contain
44 * other group vdevs. All userland verification of devices is contained within
45 * this file. If successful, the nvlist returned can be passed directly to the
46 * kernel; we've done as much verification as possible in userland.
47 *
48 * Hot spares are a special case, and passed down as an array of disk vdevs, at
49 * the same level as the root of the vdev tree.
50 *
51 * The only function exported by this file is 'make_root_vdev'. The
52 * function performs several passes:
53 *
54 * 1. Construct the vdev specification. Performs syntax validation and
55 * makes sure each device is valid.
56 * 2. Check for devices in use. Using libblkid to make sure that no
57 * devices are also in use. Some can be overridden using the 'force'
58 * flag, others cannot.
59 * 3. Check for replication errors if the 'force' flag is not specified.
60 * validates that the replication level is consistent across the
61 * entire pool.
62 * 4. Call libzfs to label any whole disks with an EFI label.
63 */
64
65 #include <assert.h>
66 #include <ctype.h>
67 #include <devid.h>
68 #include <errno.h>
69 #include <fcntl.h>
70 #include <libintl.h>
71 #include <libnvpair.h>
72 #include <limits.h>
73 #include <sys/spa.h>
74 #include <scsi/scsi.h>
75 #include <scsi/sg.h>
76 #include <stdio.h>
77 #include <string.h>
78 #include <unistd.h>
79 #include <sys/efi_partition.h>
80 #include <sys/stat.h>
81 #include <sys/vtoc.h>
82 #include <sys/mntent.h>
83 #include <uuid/uuid.h>
84 #include <blkid/blkid.h>
85 #include "zpool_util.h"
86 #include <sys/zfs_context.h>
87
88 /*
89 * For any given vdev specification, we can have multiple errors. The
90 * vdev_error() function keeps track of whether we have seen an error yet, and
91 * prints out a header if its the first error we've seen.
92 */
93 boolean_t error_seen;
94 boolean_t is_force;
95
96 typedef struct vdev_disk_db_entry
97 {
98 char id[24];
99 int sector_size;
100 } vdev_disk_db_entry_t;
101
102 /*
103 * Database of block devices that lie about physical sector sizes. The
104 * identification string must be precisely 24 characters to avoid false
105 * negatives
106 */
107 static vdev_disk_db_entry_t vdev_disk_database[] = {
108 {"ATA ADATA SSD S396 3", 8192},
109 {"ATA APPLE SSD SM128E", 8192},
110 {"ATA APPLE SSD SM256E", 8192},
111 {"ATA APPLE SSD SM512E", 8192},
112 {"ATA APPLE SSD SM768E", 8192},
113 {"ATA C400-MTFDDAC064M", 8192},
114 {"ATA C400-MTFDDAC128M", 8192},
115 {"ATA C400-MTFDDAC256M", 8192},
116 {"ATA C400-MTFDDAC512M", 8192},
117 {"ATA Corsair Force 3 ", 8192},
118 {"ATA Corsair Force GS", 8192},
119 {"ATA INTEL SSDSA2CT04", 8192},
120 {"ATA INTEL SSDSA2BZ10", 8192},
121 {"ATA INTEL SSDSA2BZ20", 8192},
122 {"ATA INTEL SSDSA2BZ30", 8192},
123 {"ATA INTEL SSDSA2CW04", 8192},
124 {"ATA INTEL SSDSA2CW08", 8192},
125 {"ATA INTEL SSDSA2CW12", 8192},
126 {"ATA INTEL SSDSA2CW16", 8192},
127 {"ATA INTEL SSDSA2CW30", 8192},
128 {"ATA INTEL SSDSA2CW60", 8192},
129 {"ATA INTEL SSDSC2CT06", 8192},
130 {"ATA INTEL SSDSC2CT12", 8192},
131 {"ATA INTEL SSDSC2CT18", 8192},
132 {"ATA INTEL SSDSC2CT24", 8192},
133 {"ATA INTEL SSDSC2CW06", 8192},
134 {"ATA INTEL SSDSC2CW12", 8192},
135 {"ATA INTEL SSDSC2CW18", 8192},
136 {"ATA INTEL SSDSC2CW24", 8192},
137 {"ATA INTEL SSDSC2CW48", 8192},
138 {"ATA KINGSTON SH100S3", 8192},
139 {"ATA KINGSTON SH103S3", 8192},
140 {"ATA M4-CT064M4SSD2 ", 8192},
141 {"ATA M4-CT128M4SSD2 ", 8192},
142 {"ATA M4-CT256M4SSD2 ", 8192},
143 {"ATA M4-CT512M4SSD2 ", 8192},
144 {"ATA OCZ-AGILITY2 ", 8192},
145 {"ATA OCZ-AGILITY3 ", 8192},
146 {"ATA OCZ-VERTEX2 3.5 ", 8192},
147 {"ATA OCZ-VERTEX3 ", 8192},
148 {"ATA OCZ-VERTEX3 LT ", 8192},
149 {"ATA OCZ-VERTEX3 MI ", 8192},
150 {"ATA OCZ-VERTEX4 ", 8192},
151 {"ATA SAMSUNG MZ7WD120", 8192},
152 {"ATA SAMSUNG MZ7WD240", 8192},
153 {"ATA SAMSUNG MZ7WD480", 8192},
154 {"ATA SAMSUNG MZ7WD960", 8192},
155 {"ATA SAMSUNG SSD 830 ", 8192},
156 {"ATA Samsung SSD 840 ", 8192},
157 {"ATA SanDisk SSD U100", 8192},
158 {"ATA TOSHIBA THNSNH06", 8192},
159 {"ATA TOSHIBA THNSNH12", 8192},
160 {"ATA TOSHIBA THNSNH25", 8192},
161 {"ATA TOSHIBA THNSNH51", 8192},
162 {"ATA APPLE SSD TS064C", 4096},
163 {"ATA APPLE SSD TS128C", 4096},
164 {"ATA APPLE SSD TS256C", 4096},
165 {"ATA APPLE SSD TS512C", 4096},
166 {"ATA INTEL SSDSA2M040", 4096},
167 {"ATA INTEL SSDSA2M080", 4096},
168 {"ATA INTEL SSDSA2M160", 4096},
169 {"ATA INTEL SSDSC2MH12", 4096},
170 {"ATA INTEL SSDSC2MH25", 4096},
171 {"ATA OCZ CORE_SSD ", 4096},
172 {"ATA OCZ-VERTEX ", 4096},
173 {"ATA SAMSUNG MCCOE32G", 4096},
174 {"ATA SAMSUNG MCCOE64G", 4096},
175 {"ATA SAMSUNG SSD PM80", 4096},
176 /* Flash drives optimized for 4KB IOs on larger pages */
177 {"ATA INTEL SSDSC2BA10", 4096},
178 {"ATA INTEL SSDSC2BA20", 4096},
179 {"ATA INTEL SSDSC2BA40", 4096},
180 {"ATA INTEL SSDSC2BA80", 4096},
181 {"ATA INTEL SSDSC2BB08", 4096},
182 {"ATA INTEL SSDSC2BB12", 4096},
183 {"ATA INTEL SSDSC2BB16", 4096},
184 {"ATA INTEL SSDSC2BB24", 4096},
185 {"ATA INTEL SSDSC2BB30", 4096},
186 {"ATA INTEL SSDSC2BB40", 4096},
187 {"ATA INTEL SSDSC2BB48", 4096},
188 {"ATA INTEL SSDSC2BB60", 4096},
189 {"ATA INTEL SSDSC2BB80", 4096},
190 {"ATA INTEL SSDSC2BW24", 4096},
191 {"ATA INTEL SSDSC2BP24", 4096},
192 {"ATA INTEL SSDSC2BP48", 4096},
193 {"NA SmrtStorSDLKAE9W", 4096},
194 {"NVMe Amazon EC2 NVMe ", 4096},
195 /* Imported from Open Solaris */
196 {"ATA MARVELL SD88SA02", 4096},
197 /* Advanced format Hard drives */
198 {"ATA Hitachi HDS5C303", 4096},
199 {"ATA SAMSUNG HD204UI ", 4096},
200 {"ATA ST2000DL004 HD20", 4096},
201 {"ATA WDC WD10EARS-00M", 4096},
202 {"ATA WDC WD10EARS-00S", 4096},
203 {"ATA WDC WD10EARS-00Z", 4096},
204 {"ATA WDC WD15EARS-00M", 4096},
205 {"ATA WDC WD15EARS-00S", 4096},
206 {"ATA WDC WD15EARS-00Z", 4096},
207 {"ATA WDC WD20EARS-00M", 4096},
208 {"ATA WDC WD20EARS-00S", 4096},
209 {"ATA WDC WD20EARS-00Z", 4096},
210 {"ATA WDC WD1600BEVT-0", 4096},
211 {"ATA WDC WD2500BEVT-0", 4096},
212 {"ATA WDC WD3200BEVT-0", 4096},
213 {"ATA WDC WD5000BEVT-0", 4096},
214 /* Virtual disks: Assume zvols with default volblocksize */
215 #if 0
216 {"ATA QEMU HARDDISK ", 8192},
217 {"IET VIRTUAL-DISK ", 8192},
218 {"OI COMSTAR ", 8192},
219 {"SUN COMSTAR ", 8192},
220 {"NETAPP LUN ", 8192},
221 #endif
222 };
223
224 static const int vdev_disk_database_size =
225 sizeof (vdev_disk_database) / sizeof (vdev_disk_database[0]);
226
227 #define INQ_REPLY_LEN 96
228 #define INQ_CMD_LEN 6
229
230 static boolean_t
231 check_sector_size_database(char *path, int *sector_size)
232 {
233 unsigned char inq_buff[INQ_REPLY_LEN];
234 unsigned char sense_buffer[32];
235 unsigned char inq_cmd_blk[INQ_CMD_LEN] =
236 {INQUIRY, 0, 0, 0, INQ_REPLY_LEN, 0};
237 sg_io_hdr_t io_hdr;
238 int error;
239 int fd;
240 int i;
241
242 /* Prepare INQUIRY command */
243 memset(&io_hdr, 0, sizeof (sg_io_hdr_t));
244 io_hdr.interface_id = 'S';
245 io_hdr.cmd_len = sizeof (inq_cmd_blk);
246 io_hdr.mx_sb_len = sizeof (sense_buffer);
247 io_hdr.dxfer_direction = SG_DXFER_FROM_DEV;
248 io_hdr.dxfer_len = INQ_REPLY_LEN;
249 io_hdr.dxferp = inq_buff;
250 io_hdr.cmdp = inq_cmd_blk;
251 io_hdr.sbp = sense_buffer;
252 io_hdr.timeout = 10; /* 10 milliseconds is ample time */
253
254 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
255 return (B_FALSE);
256
257 error = ioctl(fd, SG_IO, (unsigned long) &io_hdr);
258
259 (void) close(fd);
260
261 if (error < 0)
262 return (B_FALSE);
263
264 if ((io_hdr.info & SG_INFO_OK_MASK) != SG_INFO_OK)
265 return (B_FALSE);
266
267 for (i = 0; i < vdev_disk_database_size; i++) {
268 if (memcmp(inq_buff + 8, vdev_disk_database[i].id, 24))
269 continue;
270
271 *sector_size = vdev_disk_database[i].sector_size;
272 return (B_TRUE);
273 }
274
275 return (B_FALSE);
276 }
277
278 /*PRINTFLIKE1*/
279 static void
280 vdev_error(const char *fmt, ...)
281 {
282 va_list ap;
283
284 if (!error_seen) {
285 (void) fprintf(stderr, gettext("invalid vdev specification\n"));
286 if (!is_force)
287 (void) fprintf(stderr, gettext("use '-f' to override "
288 "the following errors:\n"));
289 else
290 (void) fprintf(stderr, gettext("the following errors "
291 "must be manually repaired:\n"));
292 error_seen = B_TRUE;
293 }
294
295 va_start(ap, fmt);
296 (void) vfprintf(stderr, fmt, ap);
297 va_end(ap);
298 }
299
300 /*
301 * Check that a file is valid. All we can do in this case is check that it's
302 * not in use by another pool, and not in use by swap.
303 */
304 static int
305 check_file(const char *file, boolean_t force, boolean_t isspare)
306 {
307 char *name;
308 int fd;
309 int ret = 0;
310 pool_state_t state;
311 boolean_t inuse;
312
313 if ((fd = open(file, O_RDONLY)) < 0)
314 return (0);
315
316 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) {
317 const char *desc;
318
319 switch (state) {
320 case POOL_STATE_ACTIVE:
321 desc = gettext("active");
322 break;
323
324 case POOL_STATE_EXPORTED:
325 desc = gettext("exported");
326 break;
327
328 case POOL_STATE_POTENTIALLY_ACTIVE:
329 desc = gettext("potentially active");
330 break;
331
332 default:
333 desc = gettext("unknown");
334 break;
335 }
336
337 /*
338 * Allow hot spares to be shared between pools.
339 */
340 if (state == POOL_STATE_SPARE && isspare) {
341 free(name);
342 (void) close(fd);
343 return (0);
344 }
345
346 if (state == POOL_STATE_ACTIVE ||
347 state == POOL_STATE_SPARE || !force) {
348 switch (state) {
349 case POOL_STATE_SPARE:
350 vdev_error(gettext("%s is reserved as a hot "
351 "spare for pool %s\n"), file, name);
352 break;
353 default:
354 vdev_error(gettext("%s is part of %s pool "
355 "'%s'\n"), file, desc, name);
356 break;
357 }
358 ret = -1;
359 }
360
361 free(name);
362 }
363
364 (void) close(fd);
365 return (ret);
366 }
367
368 static int
369 check_slice(const char *path, blkid_cache cache, int force, boolean_t isspare)
370 {
371 int err;
372 char *value;
373
374 /* No valid type detected device is safe to use */
375 value = blkid_get_tag_value(cache, "TYPE", path);
376 if (value == NULL)
377 return (0);
378
379 /*
380 * If libblkid detects a ZFS device, we check the device
381 * using check_file() to see if it's safe. The one safe
382 * case is a spare device shared between multiple pools.
383 */
384 if (strcmp(value, "zfs_member") == 0) {
385 err = check_file(path, force, isspare);
386 } else {
387 if (force) {
388 err = 0;
389 } else {
390 err = -1;
391 vdev_error(gettext("%s contains a filesystem of "
392 "type '%s'\n"), path, value);
393 }
394 }
395
396 free(value);
397
398 return (err);
399 }
400
401 /*
402 * Validate that a disk including all partitions are safe to use.
403 *
404 * For EFI labeled disks this can done relatively easily with the libefi
405 * library. The partition numbers are extracted from the label and used
406 * to generate the expected /dev/ paths. Each partition can then be
407 * checked for conflicts.
408 *
409 * For non-EFI labeled disks (MBR/EBR/etc) the same process is possible
410 * but due to the lack of a readily available libraries this scanning is
411 * not implemented. Instead only the device path as given is checked.
412 */
413 static int
414 check_disk(const char *path, blkid_cache cache, int force,
415 boolean_t isspare, boolean_t iswholedisk)
416 {
417 struct dk_gpt *vtoc;
418 char slice_path[MAXPATHLEN];
419 int err = 0;
420 int fd, i;
421
422 if (!iswholedisk)
423 return (check_slice(path, cache, force, isspare));
424
425 if ((fd = open(path, O_RDONLY|O_DIRECT|O_EXCL)) < 0) {
426 char *value = blkid_get_tag_value(cache, "TYPE", path);
427 (void) fprintf(stderr, gettext("%s is in use and contains "
428 "a %s filesystem.\n"), path, value ? value : "unknown");
429 return (-1);
430 }
431
432 /*
433 * Expected to fail for non-EFI labled disks. Just check the device
434 * as given and do not attempt to detect and scan partitions.
435 */
436 err = efi_alloc_and_read(fd, &vtoc);
437 if (err) {
438 (void) close(fd);
439 return (check_slice(path, cache, force, isspare));
440 }
441
442 /*
443 * The primary efi partition label is damaged however the secondary
444 * label at the end of the device is intact. Rather than use this
445 * label we should play it safe and treat this as a non efi device.
446 */
447 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
448 efi_free(vtoc);
449 (void) close(fd);
450
451 if (force) {
452 /* Partitions will now be created using the backup */
453 return (0);
454 } else {
455 vdev_error(gettext("%s contains a corrupt primary "
456 "EFI label.\n"), path);
457 return (-1);
458 }
459 }
460
461 for (i = 0; i < vtoc->efi_nparts; i++) {
462
463 if (vtoc->efi_parts[i].p_tag == V_UNASSIGNED ||
464 uuid_is_null((uchar_t *)&vtoc->efi_parts[i].p_guid))
465 continue;
466
467 if (strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0)
468 (void) snprintf(slice_path, sizeof (slice_path),
469 "%s%s%d", path, "-part", i+1);
470 else
471 (void) snprintf(slice_path, sizeof (slice_path),
472 "%s%s%d", path, isdigit(path[strlen(path)-1]) ?
473 "p" : "", i+1);
474
475 err = check_slice(slice_path, cache, force, isspare);
476 if (err)
477 break;
478 }
479
480 efi_free(vtoc);
481 (void) close(fd);
482
483 return (err);
484 }
485
486 static int
487 check_device(const char *path, boolean_t force,
488 boolean_t isspare, boolean_t iswholedisk)
489 {
490 blkid_cache cache;
491 int error;
492
493 error = blkid_get_cache(&cache, NULL);
494 if (error != 0) {
495 (void) fprintf(stderr, gettext("unable to access the blkid "
496 "cache.\n"));
497 return (-1);
498 }
499
500 error = check_disk(path, cache, force, isspare, iswholedisk);
501 blkid_put_cache(cache);
502
503 return (error);
504 }
505
506 /*
507 * This may be a shorthand device path or it could be total gibberish.
508 * Check to see if it is a known device available in zfs_vdev_paths.
509 * As part of this check, see if we've been given an entire disk
510 * (minus the slice number).
511 */
512 static int
513 is_shorthand_path(const char *arg, char *path, size_t path_size,
514 struct stat64 *statbuf, boolean_t *wholedisk)
515 {
516 int error;
517
518 error = zfs_resolve_shortname(arg, path, path_size);
519 if (error == 0) {
520 *wholedisk = zfs_dev_is_whole_disk(path);
521 if (*wholedisk || (stat64(path, statbuf) == 0))
522 return (0);
523 }
524
525 strlcpy(path, arg, path_size);
526 memset(statbuf, 0, sizeof (*statbuf));
527 *wholedisk = B_FALSE;
528
529 return (error);
530 }
531
532 /*
533 * Determine if the given path is a hot spare within the given configuration.
534 * If no configuration is given we rely solely on the label.
535 */
536 static boolean_t
537 is_spare(nvlist_t *config, const char *path)
538 {
539 int fd;
540 pool_state_t state;
541 char *name = NULL;
542 nvlist_t *label;
543 uint64_t guid, spareguid;
544 nvlist_t *nvroot;
545 nvlist_t **spares;
546 uint_t i, nspares;
547 boolean_t inuse;
548
549 if ((fd = open(path, O_RDONLY)) < 0)
550 return (B_FALSE);
551
552 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
553 !inuse ||
554 state != POOL_STATE_SPARE ||
555 zpool_read_label(fd, &label, NULL) != 0) {
556 free(name);
557 (void) close(fd);
558 return (B_FALSE);
559 }
560 free(name);
561 (void) close(fd);
562
563 if (config == NULL) {
564 nvlist_free(label);
565 return (B_TRUE);
566 }
567
568 verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
569 nvlist_free(label);
570
571 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
572 &nvroot) == 0);
573 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
574 &spares, &nspares) == 0) {
575 for (i = 0; i < nspares; i++) {
576 verify(nvlist_lookup_uint64(spares[i],
577 ZPOOL_CONFIG_GUID, &spareguid) == 0);
578 if (spareguid == guid)
579 return (B_TRUE);
580 }
581 }
582
583 return (B_FALSE);
584 }
585
586 /*
587 * Create a leaf vdev. Determine if this is a file or a device. If it's a
588 * device, fill in the device id to make a complete nvlist. Valid forms for a
589 * leaf vdev are:
590 *
591 * /dev/xxx Complete disk path
592 * /xxx Full path to file
593 * xxx Shorthand for <zfs_vdev_paths>/xxx
594 */
595 static nvlist_t *
596 make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
597 {
598 char path[MAXPATHLEN];
599 struct stat64 statbuf;
600 nvlist_t *vdev = NULL;
601 char *type = NULL;
602 boolean_t wholedisk = B_FALSE;
603 uint64_t ashift = 0;
604 int err;
605
606 /*
607 * Determine what type of vdev this is, and put the full path into
608 * 'path'. We detect whether this is a device of file afterwards by
609 * checking the st_mode of the file.
610 */
611 if (arg[0] == '/') {
612 /*
613 * Complete device or file path. Exact type is determined by
614 * examining the file descriptor afterwards. Symbolic links
615 * are resolved to their real paths to determine whole disk
616 * and S_ISBLK/S_ISREG type checks. However, we are careful
617 * to store the given path as ZPOOL_CONFIG_PATH to ensure we
618 * can leverage udev's persistent device labels.
619 */
620 if (realpath(arg, path) == NULL) {
621 (void) fprintf(stderr,
622 gettext("cannot resolve path '%s'\n"), arg);
623 return (NULL);
624 }
625
626 wholedisk = zfs_dev_is_whole_disk(path);
627 if (!wholedisk && (stat64(path, &statbuf) != 0)) {
628 (void) fprintf(stderr,
629 gettext("cannot open '%s': %s\n"),
630 path, strerror(errno));
631 return (NULL);
632 }
633
634 /* After whole disk check restore original passed path */
635 strlcpy(path, arg, sizeof (path));
636 } else {
637 err = is_shorthand_path(arg, path, sizeof (path),
638 &statbuf, &wholedisk);
639 if (err != 0) {
640 /*
641 * If we got ENOENT, then the user gave us
642 * gibberish, so try to direct them with a
643 * reasonable error message. Otherwise,
644 * regurgitate strerror() since it's the best we
645 * can do.
646 */
647 if (err == ENOENT) {
648 (void) fprintf(stderr,
649 gettext("cannot open '%s': no such "
650 "device in %s\n"), arg, DISK_ROOT);
651 (void) fprintf(stderr,
652 gettext("must be a full path or "
653 "shorthand device name\n"));
654 return (NULL);
655 } else {
656 (void) fprintf(stderr,
657 gettext("cannot open '%s': %s\n"),
658 path, strerror(errno));
659 return (NULL);
660 }
661 }
662 }
663
664 /*
665 * Determine whether this is a device or a file.
666 */
667 if (wholedisk || S_ISBLK(statbuf.st_mode)) {
668 type = VDEV_TYPE_DISK;
669 } else if (S_ISREG(statbuf.st_mode)) {
670 type = VDEV_TYPE_FILE;
671 } else {
672 (void) fprintf(stderr, gettext("cannot use '%s': must be a "
673 "block device or regular file\n"), path);
674 return (NULL);
675 }
676
677 /*
678 * Finally, we have the complete device or file, and we know that it is
679 * acceptable to use. Construct the nvlist to describe this vdev. All
680 * vdevs have a 'path' element, and devices also have a 'devid' element.
681 */
682 verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
683 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
684 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
685 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_LOG, is_log) == 0);
686 if (strcmp(type, VDEV_TYPE_DISK) == 0)
687 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
688 (uint64_t)wholedisk) == 0);
689
690 /*
691 * Override defaults if custom properties are provided.
692 */
693 if (props != NULL) {
694 char *value = NULL;
695
696 if (nvlist_lookup_string(props,
697 zpool_prop_to_name(ZPOOL_PROP_ASHIFT), &value) == 0) {
698 if (zfs_nicestrtonum(NULL, value, &ashift) != 0) {
699 (void) fprintf(stderr,
700 gettext("ashift must be a number.\n"));
701 return (NULL);
702 }
703 if (ashift != 0 &&
704 (ashift < ASHIFT_MIN || ashift > ASHIFT_MAX)) {
705 (void) fprintf(stderr,
706 gettext("invalid 'ashift=%" PRIu64 "' "
707 "property: only values between %" PRId32 " "
708 "and %" PRId32 " are allowed.\n"),
709 ashift, ASHIFT_MIN, ASHIFT_MAX);
710 return (NULL);
711 }
712 }
713 }
714
715 /*
716 * If the device is known to incorrectly report its physical sector
717 * size explicitly provide the known correct value.
718 */
719 if (ashift == 0) {
720 int sector_size;
721
722 if (check_sector_size_database(path, &sector_size) == B_TRUE)
723 ashift = highbit64(sector_size) - 1;
724 }
725
726 if (ashift > 0)
727 (void) nvlist_add_uint64(vdev, ZPOOL_CONFIG_ASHIFT, ashift);
728
729 return (vdev);
730 }
731
732 /*
733 * Go through and verify the replication level of the pool is consistent.
734 * Performs the following checks:
735 *
736 * For the new spec, verifies that devices in mirrors and raidz are the
737 * same size.
738 *
739 * If the current configuration already has inconsistent replication
740 * levels, ignore any other potential problems in the new spec.
741 *
742 * Otherwise, make sure that the current spec (if there is one) and the new
743 * spec have consistent replication levels.
744 */
745 typedef struct replication_level {
746 char *zprl_type;
747 uint64_t zprl_children;
748 uint64_t zprl_parity;
749 } replication_level_t;
750
751 #define ZPOOL_FUZZ (16 * 1024 * 1024)
752
753 static boolean_t
754 is_raidz_mirror(replication_level_t *a, replication_level_t *b,
755 replication_level_t **raidz, replication_level_t **mirror)
756 {
757 if (strcmp(a->zprl_type, "raidz") == 0 &&
758 strcmp(b->zprl_type, "mirror") == 0) {
759 *raidz = a;
760 *mirror = b;
761 return (B_TRUE);
762 }
763 return (B_FALSE);
764 }
765
766 /*
767 * Given a list of toplevel vdevs, return the current replication level. If
768 * the config is inconsistent, then NULL is returned. If 'fatal' is set, then
769 * an error message will be displayed for each self-inconsistent vdev.
770 */
771 static replication_level_t *
772 get_replication(nvlist_t *nvroot, boolean_t fatal)
773 {
774 nvlist_t **top;
775 uint_t t, toplevels;
776 nvlist_t **child;
777 uint_t c, children;
778 nvlist_t *nv;
779 char *type;
780 replication_level_t lastrep = {0};
781 replication_level_t rep;
782 replication_level_t *ret;
783 replication_level_t *raidz, *mirror;
784 boolean_t dontreport;
785
786 ret = safe_malloc(sizeof (replication_level_t));
787
788 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
789 &top, &toplevels) == 0);
790
791 for (t = 0; t < toplevels; t++) {
792 uint64_t is_log = B_FALSE;
793
794 nv = top[t];
795
796 /*
797 * For separate logs we ignore the top level vdev replication
798 * constraints.
799 */
800 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
801 if (is_log)
802 continue;
803
804 /* Ignore holes introduced by removing aux devices */
805 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
806 if (strcmp(type, VDEV_TYPE_HOLE) == 0)
807 continue;
808
809 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
810 &child, &children) != 0) {
811 /*
812 * This is a 'file' or 'disk' vdev.
813 */
814 rep.zprl_type = type;
815 rep.zprl_children = 1;
816 rep.zprl_parity = 0;
817 } else {
818 uint64_t vdev_size;
819
820 /*
821 * This is a mirror or RAID-Z vdev. Go through and make
822 * sure the contents are all the same (files vs. disks),
823 * keeping track of the number of elements in the
824 * process.
825 *
826 * We also check that the size of each vdev (if it can
827 * be determined) is the same.
828 */
829 rep.zprl_type = type;
830 rep.zprl_children = 0;
831
832 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
833 verify(nvlist_lookup_uint64(nv,
834 ZPOOL_CONFIG_NPARITY,
835 &rep.zprl_parity) == 0);
836 assert(rep.zprl_parity != 0);
837 } else {
838 rep.zprl_parity = 0;
839 }
840
841 /*
842 * The 'dontreport' variable indicates that we've
843 * already reported an error for this spec, so don't
844 * bother doing it again.
845 */
846 type = NULL;
847 dontreport = 0;
848 vdev_size = -1ULL;
849 for (c = 0; c < children; c++) {
850 nvlist_t *cnv = child[c];
851 char *path;
852 struct stat64 statbuf;
853 uint64_t size = -1ULL;
854 char *childtype;
855 int fd, err;
856
857 rep.zprl_children++;
858
859 verify(nvlist_lookup_string(cnv,
860 ZPOOL_CONFIG_TYPE, &childtype) == 0);
861
862 /*
863 * If this is a replacing or spare vdev, then
864 * get the real first child of the vdev: do this
865 * in a loop because replacing and spare vdevs
866 * can be nested.
867 */
868 while (strcmp(childtype,
869 VDEV_TYPE_REPLACING) == 0 ||
870 strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
871 nvlist_t **rchild;
872 uint_t rchildren;
873
874 verify(nvlist_lookup_nvlist_array(cnv,
875 ZPOOL_CONFIG_CHILDREN, &rchild,
876 &rchildren) == 0);
877 assert(rchildren == 2);
878 cnv = rchild[0];
879
880 verify(nvlist_lookup_string(cnv,
881 ZPOOL_CONFIG_TYPE,
882 &childtype) == 0);
883 }
884
885 verify(nvlist_lookup_string(cnv,
886 ZPOOL_CONFIG_PATH, &path) == 0);
887
888 /*
889 * If we have a raidz/mirror that combines disks
890 * with files, report it as an error.
891 */
892 if (!dontreport && type != NULL &&
893 strcmp(type, childtype) != 0) {
894 if (ret != NULL)
895 free(ret);
896 ret = NULL;
897 if (fatal)
898 vdev_error(gettext(
899 "mismatched replication "
900 "level: %s contains both "
901 "files and devices\n"),
902 rep.zprl_type);
903 else
904 return (NULL);
905 dontreport = B_TRUE;
906 }
907
908 /*
909 * According to stat(2), the value of 'st_size'
910 * is undefined for block devices and character
911 * devices. But there is no effective way to
912 * determine the real size in userland.
913 *
914 * Instead, we'll take advantage of an
915 * implementation detail of spec_size(). If the
916 * device is currently open, then we (should)
917 * return a valid size.
918 *
919 * If we still don't get a valid size (indicated
920 * by a size of 0 or MAXOFFSET_T), then ignore
921 * this device altogether.
922 */
923 if ((fd = open(path, O_RDONLY)) >= 0) {
924 err = fstat64_blk(fd, &statbuf);
925 (void) close(fd);
926 } else {
927 err = stat64(path, &statbuf);
928 }
929
930 if (err != 0 ||
931 statbuf.st_size == 0 ||
932 statbuf.st_size == MAXOFFSET_T)
933 continue;
934
935 size = statbuf.st_size;
936
937 /*
938 * Also make sure that devices and
939 * slices have a consistent size. If
940 * they differ by a significant amount
941 * (~16MB) then report an error.
942 */
943 if (!dontreport &&
944 (vdev_size != -1ULL &&
945 (labs(size - vdev_size) >
946 ZPOOL_FUZZ))) {
947 if (ret != NULL)
948 free(ret);
949 ret = NULL;
950 if (fatal)
951 vdev_error(gettext(
952 "%s contains devices of "
953 "different sizes\n"),
954 rep.zprl_type);
955 else
956 return (NULL);
957 dontreport = B_TRUE;
958 }
959
960 type = childtype;
961 vdev_size = size;
962 }
963 }
964
965 /*
966 * At this point, we have the replication of the last toplevel
967 * vdev in 'rep'. Compare it to 'lastrep' to see if its
968 * different.
969 */
970 if (lastrep.zprl_type != NULL) {
971 if (is_raidz_mirror(&lastrep, &rep, &raidz, &mirror) ||
972 is_raidz_mirror(&rep, &lastrep, &raidz, &mirror)) {
973 /*
974 * Accepted raidz and mirror when they can
975 * handle the same number of disk failures.
976 */
977 if (raidz->zprl_parity !=
978 mirror->zprl_children - 1) {
979 if (ret != NULL)
980 free(ret);
981 ret = NULL;
982 if (fatal)
983 vdev_error(gettext(
984 "mismatched replication "
985 "level: "
986 "%s and %s vdevs with "
987 "different redundancy, "
988 "%llu vs. %llu (%llu-way) "
989 "are present\n"),
990 raidz->zprl_type,
991 mirror->zprl_type,
992 raidz->zprl_parity,
993 mirror->zprl_children - 1,
994 mirror->zprl_children);
995 else
996 return (NULL);
997 }
998 } else if (strcmp(lastrep.zprl_type, rep.zprl_type) !=
999 0) {
1000 if (ret != NULL)
1001 free(ret);
1002 ret = NULL;
1003 if (fatal)
1004 vdev_error(gettext(
1005 "mismatched replication level: "
1006 "both %s and %s vdevs are "
1007 "present\n"),
1008 lastrep.zprl_type, rep.zprl_type);
1009 else
1010 return (NULL);
1011 } else if (lastrep.zprl_parity != rep.zprl_parity) {
1012 if (ret)
1013 free(ret);
1014 ret = NULL;
1015 if (fatal)
1016 vdev_error(gettext(
1017 "mismatched replication level: "
1018 "both %llu and %llu device parity "
1019 "%s vdevs are present\n"),
1020 lastrep.zprl_parity,
1021 rep.zprl_parity,
1022 rep.zprl_type);
1023 else
1024 return (NULL);
1025 } else if (lastrep.zprl_children != rep.zprl_children) {
1026 if (ret)
1027 free(ret);
1028 ret = NULL;
1029 if (fatal)
1030 vdev_error(gettext(
1031 "mismatched replication level: "
1032 "both %llu-way and %llu-way %s "
1033 "vdevs are present\n"),
1034 lastrep.zprl_children,
1035 rep.zprl_children,
1036 rep.zprl_type);
1037 else
1038 return (NULL);
1039 }
1040 }
1041 lastrep = rep;
1042 }
1043
1044 if (ret != NULL)
1045 *ret = rep;
1046
1047 return (ret);
1048 }
1049
1050 /*
1051 * Check the replication level of the vdev spec against the current pool. Calls
1052 * get_replication() to make sure the new spec is self-consistent. If the pool
1053 * has a consistent replication level, then we ignore any errors. Otherwise,
1054 * report any difference between the two.
1055 */
1056 static int
1057 check_replication(nvlist_t *config, nvlist_t *newroot)
1058 {
1059 nvlist_t **child;
1060 uint_t children;
1061 replication_level_t *current = NULL, *new;
1062 replication_level_t *raidz, *mirror;
1063 int ret;
1064
1065 /*
1066 * If we have a current pool configuration, check to see if it's
1067 * self-consistent. If not, simply return success.
1068 */
1069 if (config != NULL) {
1070 nvlist_t *nvroot;
1071
1072 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1073 &nvroot) == 0);
1074 if ((current = get_replication(nvroot, B_FALSE)) == NULL)
1075 return (0);
1076 }
1077 /*
1078 * for spares there may be no children, and therefore no
1079 * replication level to check
1080 */
1081 if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN,
1082 &child, &children) != 0) || (children == 0)) {
1083 free(current);
1084 return (0);
1085 }
1086
1087 /*
1088 * If all we have is logs then there's no replication level to check.
1089 */
1090 if (num_logs(newroot) == children) {
1091 free(current);
1092 return (0);
1093 }
1094
1095 /*
1096 * Get the replication level of the new vdev spec, reporting any
1097 * inconsistencies found.
1098 */
1099 if ((new = get_replication(newroot, B_TRUE)) == NULL) {
1100 free(current);
1101 return (-1);
1102 }
1103
1104 /*
1105 * Check to see if the new vdev spec matches the replication level of
1106 * the current pool.
1107 */
1108 ret = 0;
1109 if (current != NULL) {
1110 if (is_raidz_mirror(current, new, &raidz, &mirror) ||
1111 is_raidz_mirror(new, current, &raidz, &mirror)) {
1112 if (raidz->zprl_parity != mirror->zprl_children - 1) {
1113 vdev_error(gettext(
1114 "mismatched replication level: pool and "
1115 "new vdev with different redundancy, %s "
1116 "and %s vdevs, %llu vs. %llu (%llu-way)\n"),
1117 raidz->zprl_type,
1118 mirror->zprl_type,
1119 raidz->zprl_parity,
1120 mirror->zprl_children - 1,
1121 mirror->zprl_children);
1122 ret = -1;
1123 }
1124 } else if (strcmp(current->zprl_type, new->zprl_type) != 0) {
1125 vdev_error(gettext(
1126 "mismatched replication level: pool uses %s "
1127 "and new vdev is %s\n"),
1128 current->zprl_type, new->zprl_type);
1129 ret = -1;
1130 } else if (current->zprl_parity != new->zprl_parity) {
1131 vdev_error(gettext(
1132 "mismatched replication level: pool uses %llu "
1133 "device parity and new vdev uses %llu\n"),
1134 current->zprl_parity, new->zprl_parity);
1135 ret = -1;
1136 } else if (current->zprl_children != new->zprl_children) {
1137 vdev_error(gettext(
1138 "mismatched replication level: pool uses %llu-way "
1139 "%s and new vdev uses %llu-way %s\n"),
1140 current->zprl_children, current->zprl_type,
1141 new->zprl_children, new->zprl_type);
1142 ret = -1;
1143 }
1144 }
1145
1146 free(new);
1147 if (current != NULL)
1148 free(current);
1149
1150 return (ret);
1151 }
1152
1153 static int
1154 zero_label(char *path)
1155 {
1156 const int size = 4096;
1157 char buf[size];
1158 int err, fd;
1159
1160 if ((fd = open(path, O_WRONLY|O_EXCL)) < 0) {
1161 (void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
1162 path, strerror(errno));
1163 return (-1);
1164 }
1165
1166 memset(buf, 0, size);
1167 err = write(fd, buf, size);
1168 (void) fdatasync(fd);
1169 (void) close(fd);
1170
1171 if (err == -1) {
1172 (void) fprintf(stderr, gettext("cannot zero first %d bytes "
1173 "of '%s': %s\n"), size, path, strerror(errno));
1174 return (-1);
1175 }
1176
1177 if (err != size) {
1178 (void) fprintf(stderr, gettext("could only zero %d/%d bytes "
1179 "of '%s'\n"), err, size, path);
1180 return (-1);
1181 }
1182
1183 return (0);
1184 }
1185
1186 /*
1187 * Go through and find any whole disks in the vdev specification, labelling them
1188 * as appropriate. When constructing the vdev spec, we were unable to open this
1189 * device in order to provide a devid. Now that we have labelled the disk and
1190 * know that slice 0 is valid, we can construct the devid now.
1191 *
1192 * If the disk was already labeled with an EFI label, we will have gotten the
1193 * devid already (because we were able to open the whole disk). Otherwise, we
1194 * need to get the devid after we label the disk.
1195 */
1196 static int
1197 make_disks(zpool_handle_t *zhp, nvlist_t *nv)
1198 {
1199 nvlist_t **child;
1200 uint_t c, children;
1201 char *type, *path;
1202 char devpath[MAXPATHLEN];
1203 char udevpath[MAXPATHLEN];
1204 uint64_t wholedisk;
1205 struct stat64 statbuf;
1206 int is_exclusive = 0;
1207 int fd;
1208 int ret;
1209
1210 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
1211
1212 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1213 &child, &children) != 0) {
1214
1215 if (strcmp(type, VDEV_TYPE_DISK) != 0)
1216 return (0);
1217
1218 /*
1219 * We have a disk device. If this is a whole disk write
1220 * out the efi partition table, otherwise write zero's to
1221 * the first 4k of the partition. This is to ensure that
1222 * libblkid will not misidentify the partition due to a
1223 * magic value left by the previous filesystem.
1224 */
1225 verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
1226 verify(!nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1227 &wholedisk));
1228
1229 if (!wholedisk) {
1230 /*
1231 * Update device id string for mpath nodes (Linux only)
1232 */
1233 if (is_mpath_whole_disk(path))
1234 update_vdev_config_dev_strs(nv);
1235
1236 if (!is_spare(NULL, path))
1237 (void) zero_label(path);
1238 return (0);
1239 }
1240
1241 if (realpath(path, devpath) == NULL) {
1242 ret = errno;
1243 (void) fprintf(stderr,
1244 gettext("cannot resolve path '%s'\n"), path);
1245 return (ret);
1246 }
1247
1248 /*
1249 * Remove any previously existing symlink from a udev path to
1250 * the device before labeling the disk. This ensures that
1251 * only newly created links are used. Otherwise there is a
1252 * window between when udev deletes and recreates the link
1253 * during which access attempts will fail with ENOENT.
1254 */
1255 strlcpy(udevpath, path, MAXPATHLEN);
1256 (void) zfs_append_partition(udevpath, MAXPATHLEN);
1257
1258 fd = open(devpath, O_RDWR|O_EXCL);
1259 if (fd == -1) {
1260 if (errno == EBUSY)
1261 is_exclusive = 1;
1262 } else {
1263 (void) close(fd);
1264 }
1265
1266 /*
1267 * If the partition exists, contains a valid spare label,
1268 * and is opened exclusively there is no need to partition
1269 * it. Hot spares have already been partitioned and are
1270 * held open exclusively by the kernel as a safety measure.
1271 *
1272 * If the provided path is for a /dev/disk/ device its
1273 * symbolic link will be removed, partition table created,
1274 * and then block until udev creates the new link.
1275 */
1276 if (!is_exclusive || !is_spare(NULL, udevpath)) {
1277 char *devnode = strrchr(devpath, '/') + 1;
1278
1279 ret = strncmp(udevpath, UDISK_ROOT, strlen(UDISK_ROOT));
1280 if (ret == 0) {
1281 ret = lstat64(udevpath, &statbuf);
1282 if (ret == 0 && S_ISLNK(statbuf.st_mode))
1283 (void) unlink(udevpath);
1284 }
1285
1286 /*
1287 * When labeling a pool the raw device node name
1288 * is provided as it appears under /dev/.
1289 */
1290 if (zpool_label_disk(g_zfs, zhp, devnode) == -1)
1291 return (-1);
1292
1293 /*
1294 * Wait for udev to signal the device is available
1295 * by the provided path.
1296 */
1297 ret = zpool_label_disk_wait(udevpath, DISK_LABEL_WAIT);
1298 if (ret) {
1299 (void) fprintf(stderr,
1300 gettext("missing link: %s was "
1301 "partitioned but %s is missing\n"),
1302 devnode, udevpath);
1303 return (ret);
1304 }
1305
1306 ret = zero_label(udevpath);
1307 if (ret)
1308 return (ret);
1309 }
1310
1311 /*
1312 * Update the path to refer to the partition. The presence of
1313 * the 'whole_disk' field indicates to the CLI that we should
1314 * chop off the partition number when displaying the device in
1315 * future output.
1316 */
1317 verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, udevpath) == 0);
1318
1319 /*
1320 * Update device id strings for whole disks (Linux only)
1321 */
1322 update_vdev_config_dev_strs(nv);
1323
1324 return (0);
1325 }
1326
1327 for (c = 0; c < children; c++)
1328 if ((ret = make_disks(zhp, child[c])) != 0)
1329 return (ret);
1330
1331 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1332 &child, &children) == 0)
1333 for (c = 0; c < children; c++)
1334 if ((ret = make_disks(zhp, child[c])) != 0)
1335 return (ret);
1336
1337 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1338 &child, &children) == 0)
1339 for (c = 0; c < children; c++)
1340 if ((ret = make_disks(zhp, child[c])) != 0)
1341 return (ret);
1342
1343 return (0);
1344 }
1345
1346 /*
1347 * Go through and find any devices that are in use. We rely on libdiskmgt for
1348 * the majority of this task.
1349 */
1350 static boolean_t
1351 is_device_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
1352 boolean_t replacing, boolean_t isspare)
1353 {
1354 nvlist_t **child;
1355 uint_t c, children;
1356 char *type, *path;
1357 int ret = 0;
1358 char buf[MAXPATHLEN];
1359 uint64_t wholedisk = B_FALSE;
1360 boolean_t anyinuse = B_FALSE;
1361
1362 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
1363
1364 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1365 &child, &children) != 0) {
1366
1367 verify(!nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path));
1368 if (strcmp(type, VDEV_TYPE_DISK) == 0)
1369 verify(!nvlist_lookup_uint64(nv,
1370 ZPOOL_CONFIG_WHOLE_DISK, &wholedisk));
1371
1372 /*
1373 * As a generic check, we look to see if this is a replace of a
1374 * hot spare within the same pool. If so, we allow it
1375 * regardless of what libblkid or zpool_in_use() says.
1376 */
1377 if (replacing) {
1378 (void) strlcpy(buf, path, sizeof (buf));
1379 if (wholedisk) {
1380 ret = zfs_append_partition(buf, sizeof (buf));
1381 if (ret == -1)
1382 return (-1);
1383 }
1384
1385 if (is_spare(config, buf))
1386 return (B_FALSE);
1387 }
1388
1389 if (strcmp(type, VDEV_TYPE_DISK) == 0)
1390 ret = check_device(path, force, isspare, wholedisk);
1391
1392 else if (strcmp(type, VDEV_TYPE_FILE) == 0)
1393 ret = check_file(path, force, isspare);
1394
1395 return (ret != 0);
1396 }
1397
1398 for (c = 0; c < children; c++)
1399 if (is_device_in_use(config, child[c], force, replacing,
1400 B_FALSE))
1401 anyinuse = B_TRUE;
1402
1403 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1404 &child, &children) == 0)
1405 for (c = 0; c < children; c++)
1406 if (is_device_in_use(config, child[c], force, replacing,
1407 B_TRUE))
1408 anyinuse = B_TRUE;
1409
1410 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1411 &child, &children) == 0)
1412 for (c = 0; c < children; c++)
1413 if (is_device_in_use(config, child[c], force, replacing,
1414 B_FALSE))
1415 anyinuse = B_TRUE;
1416
1417 return (anyinuse);
1418 }
1419
1420 static const char *
1421 is_grouping(const char *type, int *mindev, int *maxdev)
1422 {
1423 if (strncmp(type, "raidz", 5) == 0) {
1424 const char *p = type + 5;
1425 char *end;
1426 long nparity;
1427
1428 if (*p == '\0') {
1429 nparity = 1;
1430 } else if (*p == '0') {
1431 return (NULL); /* no zero prefixes allowed */
1432 } else {
1433 errno = 0;
1434 nparity = strtol(p, &end, 10);
1435 if (errno != 0 || nparity < 1 || nparity >= 255 ||
1436 *end != '\0')
1437 return (NULL);
1438 }
1439
1440 if (mindev != NULL)
1441 *mindev = nparity + 1;
1442 if (maxdev != NULL)
1443 *maxdev = 255;
1444 return (VDEV_TYPE_RAIDZ);
1445 }
1446
1447 if (maxdev != NULL)
1448 *maxdev = INT_MAX;
1449
1450 if (strcmp(type, "mirror") == 0) {
1451 if (mindev != NULL)
1452 *mindev = 2;
1453 return (VDEV_TYPE_MIRROR);
1454 }
1455
1456 if (strcmp(type, "spare") == 0) {
1457 if (mindev != NULL)
1458 *mindev = 1;
1459 return (VDEV_TYPE_SPARE);
1460 }
1461
1462 if (strcmp(type, "log") == 0) {
1463 if (mindev != NULL)
1464 *mindev = 1;
1465 return (VDEV_TYPE_LOG);
1466 }
1467
1468 if (strcmp(type, "cache") == 0) {
1469 if (mindev != NULL)
1470 *mindev = 1;
1471 return (VDEV_TYPE_L2CACHE);
1472 }
1473
1474 return (NULL);
1475 }
1476
1477 /*
1478 * Construct a syntactically valid vdev specification,
1479 * and ensure that all devices and files exist and can be opened.
1480 * Note: we don't bother freeing anything in the error paths
1481 * because the program is just going to exit anyway.
1482 */
1483 nvlist_t *
1484 construct_spec(nvlist_t *props, int argc, char **argv)
1485 {
1486 nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
1487 int t, toplevels, mindev, maxdev, nspares, nlogs, nl2cache;
1488 const char *type;
1489 uint64_t is_log;
1490 boolean_t seen_logs;
1491
1492 top = NULL;
1493 toplevels = 0;
1494 spares = NULL;
1495 l2cache = NULL;
1496 nspares = 0;
1497 nlogs = 0;
1498 nl2cache = 0;
1499 is_log = B_FALSE;
1500 seen_logs = B_FALSE;
1501 nvroot = NULL;
1502
1503 while (argc > 0) {
1504 nv = NULL;
1505
1506 /*
1507 * If it's a mirror or raidz, the subsequent arguments are
1508 * its leaves -- until we encounter the next mirror or raidz.
1509 */
1510 if ((type = is_grouping(argv[0], &mindev, &maxdev)) != NULL) {
1511 nvlist_t **child = NULL;
1512 int c, children = 0;
1513
1514 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
1515 if (spares != NULL) {
1516 (void) fprintf(stderr,
1517 gettext("invalid vdev "
1518 "specification: 'spare' can be "
1519 "specified only once\n"));
1520 goto spec_out;
1521 }
1522 is_log = B_FALSE;
1523 }
1524
1525 if (strcmp(type, VDEV_TYPE_LOG) == 0) {
1526 if (seen_logs) {
1527 (void) fprintf(stderr,
1528 gettext("invalid vdev "
1529 "specification: 'log' can be "
1530 "specified only once\n"));
1531 goto spec_out;
1532 }
1533 seen_logs = B_TRUE;
1534 is_log = B_TRUE;
1535 argc--;
1536 argv++;
1537 /*
1538 * A log is not a real grouping device.
1539 * We just set is_log and continue.
1540 */
1541 continue;
1542 }
1543
1544 if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
1545 if (l2cache != NULL) {
1546 (void) fprintf(stderr,
1547 gettext("invalid vdev "
1548 "specification: 'cache' can be "
1549 "specified only once\n"));
1550 goto spec_out;
1551 }
1552 is_log = B_FALSE;
1553 }
1554
1555 if (is_log) {
1556 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
1557 (void) fprintf(stderr,
1558 gettext("invalid vdev "
1559 "specification: unsupported 'log' "
1560 "device: %s\n"), type);
1561 goto spec_out;
1562 }
1563 nlogs++;
1564 }
1565
1566 for (c = 1; c < argc; c++) {
1567 if (is_grouping(argv[c], NULL, NULL) != NULL)
1568 break;
1569 children++;
1570 child = realloc(child,
1571 children * sizeof (nvlist_t *));
1572 if (child == NULL)
1573 zpool_no_memory();
1574 if ((nv = make_leaf_vdev(props, argv[c],
1575 B_FALSE)) == NULL) {
1576 for (c = 0; c < children - 1; c++)
1577 nvlist_free(child[c]);
1578 free(child);
1579 goto spec_out;
1580 }
1581
1582 child[children - 1] = nv;
1583 }
1584
1585 if (children < mindev) {
1586 (void) fprintf(stderr, gettext("invalid vdev "
1587 "specification: %s requires at least %d "
1588 "devices\n"), argv[0], mindev);
1589 for (c = 0; c < children; c++)
1590 nvlist_free(child[c]);
1591 free(child);
1592 goto spec_out;
1593 }
1594
1595 if (children > maxdev) {
1596 (void) fprintf(stderr, gettext("invalid vdev "
1597 "specification: %s supports no more than "
1598 "%d devices\n"), argv[0], maxdev);
1599 for (c = 0; c < children; c++)
1600 nvlist_free(child[c]);
1601 free(child);
1602 goto spec_out;
1603 }
1604
1605 argc -= c;
1606 argv += c;
1607
1608 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
1609 spares = child;
1610 nspares = children;
1611 continue;
1612 } else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
1613 l2cache = child;
1614 nl2cache = children;
1615 continue;
1616 } else {
1617 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
1618 0) == 0);
1619 verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
1620 type) == 0);
1621 verify(nvlist_add_uint64(nv,
1622 ZPOOL_CONFIG_IS_LOG, is_log) == 0);
1623 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
1624 verify(nvlist_add_uint64(nv,
1625 ZPOOL_CONFIG_NPARITY,
1626 mindev - 1) == 0);
1627 }
1628 verify(nvlist_add_nvlist_array(nv,
1629 ZPOOL_CONFIG_CHILDREN, child,
1630 children) == 0);
1631
1632 for (c = 0; c < children; c++)
1633 nvlist_free(child[c]);
1634 free(child);
1635 }
1636 } else {
1637 /*
1638 * We have a device. Pass off to make_leaf_vdev() to
1639 * construct the appropriate nvlist describing the vdev.
1640 */
1641 if ((nv = make_leaf_vdev(props, argv[0],
1642 is_log)) == NULL)
1643 goto spec_out;
1644
1645 if (is_log)
1646 nlogs++;
1647 argc--;
1648 argv++;
1649 }
1650
1651 toplevels++;
1652 top = realloc(top, toplevels * sizeof (nvlist_t *));
1653 if (top == NULL)
1654 zpool_no_memory();
1655 top[toplevels - 1] = nv;
1656 }
1657
1658 if (toplevels == 0 && nspares == 0 && nl2cache == 0) {
1659 (void) fprintf(stderr, gettext("invalid vdev "
1660 "specification: at least one toplevel vdev must be "
1661 "specified\n"));
1662 goto spec_out;
1663 }
1664
1665 if (seen_logs && nlogs == 0) {
1666 (void) fprintf(stderr, gettext("invalid vdev specification: "
1667 "log requires at least 1 device\n"));
1668 goto spec_out;
1669 }
1670
1671 /*
1672 * Finally, create nvroot and add all top-level vdevs to it.
1673 */
1674 verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
1675 verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
1676 VDEV_TYPE_ROOT) == 0);
1677 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1678 top, toplevels) == 0);
1679 if (nspares != 0)
1680 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1681 spares, nspares) == 0);
1682 if (nl2cache != 0)
1683 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1684 l2cache, nl2cache) == 0);
1685
1686 spec_out:
1687 for (t = 0; t < toplevels; t++)
1688 nvlist_free(top[t]);
1689 for (t = 0; t < nspares; t++)
1690 nvlist_free(spares[t]);
1691 for (t = 0; t < nl2cache; t++)
1692 nvlist_free(l2cache[t]);
1693
1694 free(spares);
1695 free(l2cache);
1696 free(top);
1697
1698 return (nvroot);
1699 }
1700
1701 nvlist_t *
1702 split_mirror_vdev(zpool_handle_t *zhp, char *newname, nvlist_t *props,
1703 splitflags_t flags, int argc, char **argv)
1704 {
1705 nvlist_t *newroot = NULL, **child;
1706 uint_t c, children;
1707
1708 if (argc > 0) {
1709 if ((newroot = construct_spec(props, argc, argv)) == NULL) {
1710 (void) fprintf(stderr, gettext("Unable to build a "
1711 "pool from the specified devices\n"));
1712 return (NULL);
1713 }
1714
1715 if (!flags.dryrun && make_disks(zhp, newroot) != 0) {
1716 nvlist_free(newroot);
1717 return (NULL);
1718 }
1719
1720 /* avoid any tricks in the spec */
1721 verify(nvlist_lookup_nvlist_array(newroot,
1722 ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
1723 for (c = 0; c < children; c++) {
1724 char *path;
1725 const char *type;
1726 int min, max;
1727
1728 verify(nvlist_lookup_string(child[c],
1729 ZPOOL_CONFIG_PATH, &path) == 0);
1730 if ((type = is_grouping(path, &min, &max)) != NULL) {
1731 (void) fprintf(stderr, gettext("Cannot use "
1732 "'%s' as a device for splitting\n"), type);
1733 nvlist_free(newroot);
1734 return (NULL);
1735 }
1736 }
1737 }
1738
1739 if (zpool_vdev_split(zhp, newname, &newroot, props, flags) != 0) {
1740 nvlist_free(newroot);
1741 return (NULL);
1742 }
1743
1744 return (newroot);
1745 }
1746
1747 /*
1748 * Get and validate the contents of the given vdev specification. This ensures
1749 * that the nvlist returned is well-formed, that all the devices exist, and that
1750 * they are not currently in use by any other known consumer. The 'poolconfig'
1751 * parameter is the current configuration of the pool when adding devices
1752 * existing pool, and is used to perform additional checks, such as changing the
1753 * replication level of the pool. It can be 'NULL' to indicate that this is a
1754 * new pool. The 'force' flag controls whether devices should be forcefully
1755 * added, even if they appear in use.
1756 */
1757 nvlist_t *
1758 make_root_vdev(zpool_handle_t *zhp, nvlist_t *props, int force, int check_rep,
1759 boolean_t replacing, boolean_t dryrun, int argc, char **argv)
1760 {
1761 nvlist_t *newroot;
1762 nvlist_t *poolconfig = NULL;
1763 is_force = force;
1764
1765 /*
1766 * Construct the vdev specification. If this is successful, we know
1767 * that we have a valid specification, and that all devices can be
1768 * opened.
1769 */
1770 if ((newroot = construct_spec(props, argc, argv)) == NULL)
1771 return (NULL);
1772
1773 if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL)) {
1774 nvlist_free(newroot);
1775 return (NULL);
1776 }
1777
1778 /*
1779 * Validate each device to make sure that its not shared with another
1780 * subsystem. We do this even if 'force' is set, because there are some
1781 * uses (such as a dedicated dump device) that even '-f' cannot
1782 * override.
1783 */
1784 if (is_device_in_use(poolconfig, newroot, force, replacing, B_FALSE)) {
1785 nvlist_free(newroot);
1786 return (NULL);
1787 }
1788
1789 /*
1790 * Check the replication level of the given vdevs and report any errors
1791 * found. We include the existing pool spec, if any, as we need to
1792 * catch changes against the existing replication level.
1793 */
1794 if (check_rep && check_replication(poolconfig, newroot) != 0) {
1795 nvlist_free(newroot);
1796 return (NULL);
1797 }
1798
1799 /*
1800 * Run through the vdev specification and label any whole disks found.
1801 */
1802 if (!dryrun && make_disks(zhp, newroot) != 0) {
1803 nvlist_free(newroot);
1804 return (NULL);
1805 }
1806
1807 return (newroot);
1808 }