]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zed/agents/zfs_mod.c
Add libzutil for libzfs or libzpool consumers
[mirror_zfs.git] / cmd / zed / agents / zfs_mod.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016, 2017, Intel Corporation.
26 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27 */
28
29 /*
30 * ZFS syseventd module.
31 *
32 * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
33 *
34 * The purpose of this module is to identify when devices are added to the
35 * system, and appropriately online or replace the affected vdevs.
36 *
37 * When a device is added to the system:
38 *
39 * 1. Search for any vdevs whose devid matches that of the newly added
40 * device.
41 *
42 * 2. If no vdevs are found, then search for any vdevs whose udev path
43 * matches that of the new device.
44 *
45 * 3. If no vdevs match by either method, then ignore the event.
46 *
47 * 4. Attempt to online the device with a flag to indicate that it should
48 * be unspared when resilvering completes. If this succeeds, then the
49 * same device was inserted and we should continue normally.
50 *
51 * 5. If the pool does not have the 'autoreplace' property set, attempt to
52 * online the device again without the unspare flag, which will
53 * generate a FMA fault.
54 *
55 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
56 * is a whole disk, then label the new disk and attempt a 'zpool
57 * replace'.
58 *
59 * The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
60 * event indicates that a device failed to open during pool load, but the
61 * autoreplace property was set. In this case, we deferred the associated
62 * FMA fault until our module had a chance to process the autoreplace logic.
63 * If the device could not be replaced, then the second online attempt will
64 * trigger the FMA fault that we skipped earlier.
65 *
66 * ZFS on Linux porting notes:
67 * Linux udev provides a disk insert for both the disk and the partition
68 *
69 */
70
71 #include <ctype.h>
72 #include <devid.h>
73 #include <fcntl.h>
74 #include <libnvpair.h>
75 #include <libzfs.h>
76 #include <libzutil.h>
77 #include <limits.h>
78 #include <stddef.h>
79 #include <stdlib.h>
80 #include <string.h>
81 #include <syslog.h>
82 #include <sys/list.h>
83 #include <sys/sunddi.h>
84 #include <sys/sysevent/eventdefs.h>
85 #include <sys/sysevent/dev.h>
86 #include <thread_pool.h>
87 #include <pthread.h>
88 #include <unistd.h>
89 #include <errno.h>
90 #include "zfs_agents.h"
91 #include "../zed_log.h"
92
93 #define DEV_BYID_PATH "/dev/disk/by-id/"
94 #define DEV_BYPATH_PATH "/dev/disk/by-path/"
95 #define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
96
97 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
98
99 libzfs_handle_t *g_zfshdl;
100 list_t g_pool_list; /* list of unavailable pools at initialization */
101 list_t g_device_list; /* list of disks with asynchronous label request */
102 tpool_t *g_tpool;
103 boolean_t g_enumeration_done;
104 pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
105
106 typedef struct unavailpool {
107 zpool_handle_t *uap_zhp;
108 list_node_t uap_node;
109 } unavailpool_t;
110
111 typedef struct pendingdev {
112 char pd_physpath[128];
113 list_node_t pd_node;
114 } pendingdev_t;
115
116 static int
117 zfs_toplevel_state(zpool_handle_t *zhp)
118 {
119 nvlist_t *nvroot;
120 vdev_stat_t *vs;
121 unsigned int c;
122
123 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
124 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
125 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
126 (uint64_t **)&vs, &c) == 0);
127 return (vs->vs_state);
128 }
129
130 static int
131 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
132 {
133 zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
134 zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
135
136 if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
137 unavailpool_t *uap;
138 uap = malloc(sizeof (unavailpool_t));
139 uap->uap_zhp = zhp;
140 list_insert_tail((list_t *)data, uap);
141 } else {
142 zpool_close(zhp);
143 }
144 return (0);
145 }
146
147 /*
148 * Two stage replace on Linux
149 * since we get disk notifications
150 * we can wait for partitioned disk slice to show up!
151 *
152 * First stage tags the disk, initiates async partitioning, and returns
153 * Second stage finds the tag and proceeds to ZFS labeling/replace
154 *
155 * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
156 *
157 * 1. physical match with no fs, no partition
158 * tag it top, partition disk
159 *
160 * 2. physical match again, see partion and tag
161 *
162 */
163
164 /*
165 * The device associated with the given vdev (either by devid or physical path)
166 * has been added to the system. If 'isdisk' is set, then we only attempt a
167 * replacement if it's a whole disk. This also implies that we should label the
168 * disk first.
169 *
170 * First, we attempt to online the device (making sure to undo any spare
171 * operation when finished). If this succeeds, then we're done. If it fails,
172 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
173 * but that the label was not what we expected. If the 'autoreplace' property
174 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
175 * replace'. If the online is successful, but the new state is something else
176 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
177 * race, and we should avoid attempting to relabel the disk.
178 *
179 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
180 */
181 static void
182 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
183 {
184 char *path;
185 vdev_state_t newstate;
186 nvlist_t *nvroot, *newvd;
187 pendingdev_t *device;
188 uint64_t wholedisk = 0ULL;
189 uint64_t offline = 0ULL;
190 uint64_t guid = 0ULL;
191 char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
192 char rawpath[PATH_MAX], fullpath[PATH_MAX];
193 char devpath[PATH_MAX];
194 int ret;
195 int is_dm = 0;
196 int is_sd = 0;
197 uint_t c;
198 vdev_stat_t *vs;
199
200 if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
201 return;
202
203 /* Skip healthy disks */
204 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
205 (uint64_t **)&vs, &c) == 0);
206 if (vs->vs_state == VDEV_STATE_HEALTHY) {
207 zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
208 __func__, path);
209 return;
210 }
211
212 (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
213 (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
214 &enc_sysfs_path);
215 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
216 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
217 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
218
219 if (offline)
220 return; /* don't intervene if it was taken offline */
221
222 is_dm = zfs_dev_is_dm(path);
223 zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
224 " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
225 physpath ? physpath : "NULL", wholedisk, is_dm,
226 (long long unsigned int)guid);
227
228 /*
229 * The VDEV guid is preferred for identification (gets passed in path)
230 */
231 if (guid != 0) {
232 (void) snprintf(fullpath, sizeof (fullpath), "%llu",
233 (long long unsigned int)guid);
234 } else {
235 /*
236 * otherwise use path sans partition suffix for whole disks
237 */
238 (void) strlcpy(fullpath, path, sizeof (fullpath));
239 if (wholedisk) {
240 char *spath = zfs_strip_partition(fullpath);
241 if (!spath) {
242 zed_log_msg(LOG_INFO, "%s: Can't alloc",
243 __func__);
244 return;
245 }
246
247 (void) strlcpy(fullpath, spath, sizeof (fullpath));
248 free(spath);
249 }
250 }
251
252 /*
253 * Attempt to online the device.
254 */
255 if (zpool_vdev_online(zhp, fullpath,
256 ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
257 (newstate == VDEV_STATE_HEALTHY ||
258 newstate == VDEV_STATE_DEGRADED)) {
259 zed_log_msg(LOG_INFO, " zpool_vdev_online: vdev %s is %s",
260 fullpath, (newstate == VDEV_STATE_HEALTHY) ?
261 "HEALTHY" : "DEGRADED");
262 return;
263 }
264
265 /*
266 * vdev_id alias rule for using scsi_debug devices (FMA automated
267 * testing)
268 */
269 if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
270 is_sd = 1;
271
272 /*
273 * If the pool doesn't have the autoreplace property set, then use
274 * vdev online to trigger a FMA fault by posting an ereport.
275 */
276 if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
277 !(wholedisk || is_dm) || (physpath == NULL)) {
278 (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
279 &newstate);
280 zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
281 "not a whole disk for '%s'", fullpath);
282 return;
283 }
284
285 /*
286 * Convert physical path into its current device node. Rawpath
287 * needs to be /dev/disk/by-vdev for a scsi_debug device since
288 * /dev/disk/by-path will not be present.
289 */
290 (void) snprintf(rawpath, sizeof (rawpath), "%s%s",
291 is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
292
293 if (realpath(rawpath, devpath) == NULL && !is_dm) {
294 zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
295 rawpath, strerror(errno));
296
297 (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
298 &newstate);
299
300 zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s)",
301 fullpath, libzfs_error_description(g_zfshdl));
302 return;
303 }
304
305 /* Only autoreplace bad disks */
306 if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
307 (vs->vs_state != VDEV_STATE_FAULTED) &&
308 (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
309 return;
310 }
311
312 nvlist_lookup_string(vdev, "new_devid", &new_devid);
313
314 if (is_dm) {
315 /* Don't label device mapper or multipath disks. */
316 } else if (!labeled) {
317 /*
318 * we're auto-replacing a raw disk, so label it first
319 */
320 char *leafname;
321
322 /*
323 * If this is a request to label a whole disk, then attempt to
324 * write out the label. Before we can label the disk, we need
325 * to map the physical string that was matched on to the under
326 * lying device node.
327 *
328 * If any part of this process fails, then do a force online
329 * to trigger a ZFS fault for the device (and any hot spare
330 * replacement).
331 */
332 leafname = strrchr(devpath, '/') + 1;
333
334 /*
335 * If this is a request to label a whole disk, then attempt to
336 * write out the label.
337 */
338 if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
339 zed_log_msg(LOG_INFO, " zpool_label_disk: could not "
340 "label '%s' (%s)", leafname,
341 libzfs_error_description(g_zfshdl));
342
343 (void) zpool_vdev_online(zhp, fullpath,
344 ZFS_ONLINE_FORCEFAULT, &newstate);
345 return;
346 }
347
348 /*
349 * The disk labeling is asynchronous on Linux. Just record
350 * this label request and return as there will be another
351 * disk add event for the partition after the labeling is
352 * completed.
353 */
354 device = malloc(sizeof (pendingdev_t));
355 (void) strlcpy(device->pd_physpath, physpath,
356 sizeof (device->pd_physpath));
357 list_insert_tail(&g_device_list, device);
358
359 zed_log_msg(LOG_INFO, " zpool_label_disk: async '%s' (%llu)",
360 leafname, (u_longlong_t)guid);
361
362 return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
363
364 } else /* labeled */ {
365 boolean_t found = B_FALSE;
366 /*
367 * match up with request above to label the disk
368 */
369 for (device = list_head(&g_device_list); device != NULL;
370 device = list_next(&g_device_list, device)) {
371 if (strcmp(physpath, device->pd_physpath) == 0) {
372 list_remove(&g_device_list, device);
373 free(device);
374 found = B_TRUE;
375 break;
376 }
377 zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
378 physpath, device->pd_physpath);
379 }
380 if (!found) {
381 /* unexpected partition slice encountered */
382 zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
383 fullpath);
384 (void) zpool_vdev_online(zhp, fullpath,
385 ZFS_ONLINE_FORCEFAULT, &newstate);
386 return;
387 }
388
389 zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
390 physpath, (u_longlong_t)guid);
391
392 (void) snprintf(devpath, sizeof (devpath), "%s%s",
393 DEV_BYID_PATH, new_devid);
394 }
395
396 /*
397 * Construct the root vdev to pass to zpool_vdev_attach(). While adding
398 * the entire vdev structure is harmless, we construct a reduced set of
399 * path/physpath/wholedisk to keep it simple.
400 */
401 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
402 zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
403 return;
404 }
405 if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
406 zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
407 nvlist_free(nvroot);
408 return;
409 }
410
411 if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
412 nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
413 nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
414 (physpath != NULL && nvlist_add_string(newvd,
415 ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
416 (enc_sysfs_path != NULL && nvlist_add_string(newvd,
417 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
418 nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
419 nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
420 nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
421 1) != 0) {
422 zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
423 nvlist_free(newvd);
424 nvlist_free(nvroot);
425 return;
426 }
427
428 nvlist_free(newvd);
429
430 /*
431 * Wait for udev to verify the links exist, then auto-replace
432 * the leaf disk at same physical location.
433 */
434 if (zpool_label_disk_wait(path, 3000) != 0) {
435 zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
436 "disk %s is missing", path);
437 nvlist_free(nvroot);
438 return;
439 }
440
441 ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
442
443 zed_log_msg(LOG_INFO, " zpool_vdev_replace: %s with %s (%s)",
444 fullpath, path, (ret == 0) ? "no errors" :
445 libzfs_error_description(g_zfshdl));
446
447 nvlist_free(nvroot);
448 }
449
450 /*
451 * Utility functions to find a vdev matching given criteria.
452 */
453 typedef struct dev_data {
454 const char *dd_compare;
455 const char *dd_prop;
456 zfs_process_func_t dd_func;
457 boolean_t dd_found;
458 boolean_t dd_islabeled;
459 uint64_t dd_pool_guid;
460 uint64_t dd_vdev_guid;
461 const char *dd_new_devid;
462 } dev_data_t;
463
464 static void
465 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
466 {
467 dev_data_t *dp = data;
468 char *path = NULL;
469 uint_t c, children;
470 nvlist_t **child;
471
472 /*
473 * First iterate over any children.
474 */
475 if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
476 &child, &children) == 0) {
477 for (c = 0; c < children; c++)
478 zfs_iter_vdev(zhp, child[c], data);
479 return;
480 }
481
482 /* once a vdev was matched and processed there is nothing left to do */
483 if (dp->dd_found)
484 return;
485
486 /*
487 * Match by GUID if available otherwise fallback to devid or physical
488 */
489 if (dp->dd_vdev_guid != 0) {
490 uint64_t guid;
491
492 if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
493 &guid) != 0 || guid != dp->dd_vdev_guid) {
494 return;
495 }
496 zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
497 dp->dd_found = B_TRUE;
498
499 } else if (dp->dd_compare != NULL) {
500 /*
501 * NOTE: On Linux there is an event for partition, so unlike
502 * illumos, substring matching is not required to accommodate
503 * the partition suffix. An exact match will be present in
504 * the dp->dd_compare value.
505 */
506 if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
507 strcmp(dp->dd_compare, path) != 0)
508 return;
509
510 zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
511 dp->dd_prop, path);
512 dp->dd_found = B_TRUE;
513
514 /* pass the new devid for use by replacing code */
515 if (dp->dd_new_devid != NULL) {
516 (void) nvlist_add_string(nvl, "new_devid",
517 dp->dd_new_devid);
518 }
519 }
520
521 (dp->dd_func)(zhp, nvl, dp->dd_islabeled);
522 }
523
524 void
525 zfs_enable_ds(void *arg)
526 {
527 unavailpool_t *pool = (unavailpool_t *)arg;
528
529 (void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
530 zpool_close(pool->uap_zhp);
531 free(pool);
532 }
533
534 static int
535 zfs_iter_pool(zpool_handle_t *zhp, void *data)
536 {
537 nvlist_t *config, *nvl;
538 dev_data_t *dp = data;
539 uint64_t pool_guid;
540 unavailpool_t *pool;
541
542 zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
543 zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
544
545 /*
546 * For each vdev in this pool, look for a match to apply dd_func
547 */
548 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
549 if (dp->dd_pool_guid == 0 ||
550 (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
551 &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
552 (void) nvlist_lookup_nvlist(config,
553 ZPOOL_CONFIG_VDEV_TREE, &nvl);
554 zfs_iter_vdev(zhp, nvl, data);
555 }
556 }
557
558 /*
559 * if this pool was originally unavailable,
560 * then enable its datasets asynchronously
561 */
562 if (g_enumeration_done) {
563 for (pool = list_head(&g_pool_list); pool != NULL;
564 pool = list_next(&g_pool_list, pool)) {
565
566 if (strcmp(zpool_get_name(zhp),
567 zpool_get_name(pool->uap_zhp)))
568 continue;
569 if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
570 list_remove(&g_pool_list, pool);
571 (void) tpool_dispatch(g_tpool, zfs_enable_ds,
572 pool);
573 break;
574 }
575 }
576 }
577
578 zpool_close(zhp);
579 return (dp->dd_found); /* cease iteration after a match */
580 }
581
582 /*
583 * Given a physical device location, iterate over all
584 * (pool, vdev) pairs which correspond to that location.
585 */
586 static boolean_t
587 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
588 boolean_t is_slice)
589 {
590 dev_data_t data = { 0 };
591
592 data.dd_compare = physical;
593 data.dd_func = func;
594 data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
595 data.dd_found = B_FALSE;
596 data.dd_islabeled = is_slice;
597 data.dd_new_devid = devid; /* used by auto replace code */
598
599 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
600
601 return (data.dd_found);
602 }
603
604 /*
605 * Given a device identifier, find any vdevs with a matching devid.
606 * On Linux we can match devid directly which is always a whole disk.
607 */
608 static boolean_t
609 devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
610 {
611 dev_data_t data = { 0 };
612
613 data.dd_compare = devid;
614 data.dd_func = func;
615 data.dd_prop = ZPOOL_CONFIG_DEVID;
616 data.dd_found = B_FALSE;
617 data.dd_islabeled = is_slice;
618 data.dd_new_devid = devid;
619
620 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
621
622 return (data.dd_found);
623 }
624
625 /*
626 * Handle a EC_DEV_ADD.ESC_DISK event.
627 *
628 * illumos
629 * Expects: DEV_PHYS_PATH string in schema
630 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
631 *
632 * path: '/dev/dsk/c0t1d0s0' (persistent)
633 * devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
634 * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
635 *
636 * linux
637 * provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
638 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
639 *
640 * path: '/dev/sdc1' (not persistent)
641 * devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
642 * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
643 */
644 static int
645 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
646 {
647 char *devpath = NULL, *devid;
648 boolean_t is_slice;
649
650 /*
651 * Expecting a devid string and an optional physical location
652 */
653 if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0)
654 return (-1);
655
656 (void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
657
658 is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
659
660 zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
661 devid, devpath ? devpath : "NULL", is_slice);
662
663 /*
664 * Iterate over all vdevs looking for a match in the folllowing order:
665 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
666 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
667 *
668 * For disks, we only want to pay attention to vdevs marked as whole
669 * disks or are a multipath device.
670 */
671 if (!devid_iter(devid, zfs_process_add, is_slice) && devpath != NULL)
672 (void) devphys_iter(devpath, devid, zfs_process_add, is_slice);
673
674 return (0);
675 }
676
677 /*
678 * Called when we receive a VDEV_CHECK event, which indicates a device could not
679 * be opened during initial pool open, but the autoreplace property was set on
680 * the pool. In this case, we treat it as if it were an add event.
681 */
682 static int
683 zfs_deliver_check(nvlist_t *nvl)
684 {
685 dev_data_t data = { 0 };
686
687 if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
688 &data.dd_pool_guid) != 0 ||
689 nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
690 &data.dd_vdev_guid) != 0 ||
691 data.dd_vdev_guid == 0)
692 return (0);
693
694 zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
695 data.dd_pool_guid, data.dd_vdev_guid);
696
697 data.dd_func = zfs_process_add;
698
699 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
700
701 return (0);
702 }
703
704 static int
705 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
706 {
707 char *devname = data;
708 boolean_t avail_spare, l2cache;
709 nvlist_t *tgt;
710 int error;
711
712 zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
713 devname, zpool_get_name(zhp));
714
715 if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
716 &avail_spare, &l2cache, NULL)) != NULL) {
717 char *path, fullpath[MAXPATHLEN];
718 uint64_t wholedisk;
719
720 error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
721 if (error) {
722 zpool_close(zhp);
723 return (0);
724 }
725
726 error = nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
727 &wholedisk);
728 if (error)
729 wholedisk = 0;
730
731 if (wholedisk) {
732 path = strrchr(path, '/');
733 if (path != NULL) {
734 path = zfs_strip_partition(path + 1);
735 if (path == NULL) {
736 zpool_close(zhp);
737 return (0);
738 }
739 } else {
740 zpool_close(zhp);
741 return (0);
742 }
743
744 (void) strlcpy(fullpath, path, sizeof (fullpath));
745 free(path);
746
747 /*
748 * We need to reopen the pool associated with this
749 * device so that the kernel can update the size of
750 * the expanded device. When expanding there is no
751 * need to restart the scrub from the beginning.
752 */
753 boolean_t scrub_restart = B_FALSE;
754 (void) zpool_reopen_one(zhp, &scrub_restart);
755 } else {
756 (void) strlcpy(fullpath, path, sizeof (fullpath));
757 }
758
759 if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
760 vdev_state_t newstate;
761
762 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
763 error = zpool_vdev_online(zhp, fullpath, 0,
764 &newstate);
765 zed_log_msg(LOG_INFO, "zfsdle_vdev_online: "
766 "setting device '%s' to ONLINE state "
767 "in pool '%s': %d", fullpath,
768 zpool_get_name(zhp), error);
769 }
770 }
771 zpool_close(zhp);
772 return (1);
773 }
774 zpool_close(zhp);
775 return (0);
776 }
777
778 /*
779 * This function handles the ESC_DEV_DLE device change event. Use the
780 * provided vdev guid when looking up a disk or partition, when the guid
781 * is not present assume the entire disk is owned by ZFS and append the
782 * expected -part1 partition information then lookup by physical path.
783 */
784 static int
785 zfs_deliver_dle(nvlist_t *nvl)
786 {
787 char *devname, name[MAXPATHLEN];
788 uint64_t guid;
789
790 if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
791 sprintf(name, "%llu", (u_longlong_t)guid);
792 } else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
793 strlcpy(name, devname, MAXPATHLEN);
794 zfs_append_partition(name, MAXPATHLEN);
795 } else {
796 zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
797 }
798
799 if (zpool_iter(g_zfshdl, zfsdle_vdev_online, name) != 1) {
800 zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
801 "found", name);
802 return (1);
803 }
804
805 return (0);
806 }
807
808 /*
809 * syseventd daemon module event handler
810 *
811 * Handles syseventd daemon zfs device related events:
812 *
813 * EC_DEV_ADD.ESC_DISK
814 * EC_DEV_STATUS.ESC_DEV_DLE
815 * EC_ZFS.ESC_ZFS_VDEV_CHECK
816 *
817 * Note: assumes only one thread active at a time (not thread safe)
818 */
819 static int
820 zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
821 {
822 int ret;
823 boolean_t is_lofi = B_FALSE, is_check = B_FALSE, is_dle = B_FALSE;
824
825 if (strcmp(class, EC_DEV_ADD) == 0) {
826 /*
827 * We're mainly interested in disk additions, but we also listen
828 * for new loop devices, to allow for simplified testing.
829 */
830 if (strcmp(subclass, ESC_DISK) == 0)
831 is_lofi = B_FALSE;
832 else if (strcmp(subclass, ESC_LOFI) == 0)
833 is_lofi = B_TRUE;
834 else
835 return (0);
836
837 is_check = B_FALSE;
838 } else if (strcmp(class, EC_ZFS) == 0 &&
839 strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
840 /*
841 * This event signifies that a device failed to open
842 * during pool load, but the 'autoreplace' property was
843 * set, so we should pretend it's just been added.
844 */
845 is_check = B_TRUE;
846 } else if (strcmp(class, EC_DEV_STATUS) == 0 &&
847 strcmp(subclass, ESC_DEV_DLE) == 0) {
848 is_dle = B_TRUE;
849 } else {
850 return (0);
851 }
852
853 if (is_dle)
854 ret = zfs_deliver_dle(nvl);
855 else if (is_check)
856 ret = zfs_deliver_check(nvl);
857 else
858 ret = zfs_deliver_add(nvl, is_lofi);
859
860 return (ret);
861 }
862
863 /*ARGSUSED*/
864 static void *
865 zfs_enum_pools(void *arg)
866 {
867 (void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
868 /*
869 * Linux - instead of using a thread pool, each list entry
870 * will spawn a thread when an unavailable pool transitions
871 * to available. zfs_slm_fini will wait for these threads.
872 */
873 g_enumeration_done = B_TRUE;
874 return (NULL);
875 }
876
877 /*
878 * called from zed daemon at startup
879 *
880 * sent messages from zevents or udev monitor
881 *
882 * For now, each agent has it's own libzfs instance
883 */
884 int
885 zfs_slm_init()
886 {
887 if ((g_zfshdl = libzfs_init()) == NULL)
888 return (-1);
889
890 /*
891 * collect a list of unavailable pools (asynchronously,
892 * since this can take a while)
893 */
894 list_create(&g_pool_list, sizeof (struct unavailpool),
895 offsetof(struct unavailpool, uap_node));
896
897 if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
898 list_destroy(&g_pool_list);
899 libzfs_fini(g_zfshdl);
900 return (-1);
901 }
902
903 list_create(&g_device_list, sizeof (struct pendingdev),
904 offsetof(struct pendingdev, pd_node));
905
906 return (0);
907 }
908
909 void
910 zfs_slm_fini()
911 {
912 unavailpool_t *pool;
913 pendingdev_t *device;
914
915 /* wait for zfs_enum_pools thread to complete */
916 (void) pthread_join(g_zfs_tid, NULL);
917 /* destroy the thread pool */
918 if (g_tpool != NULL) {
919 tpool_wait(g_tpool);
920 tpool_destroy(g_tpool);
921 }
922
923 while ((pool = (list_head(&g_pool_list))) != NULL) {
924 list_remove(&g_pool_list, pool);
925 zpool_close(pool->uap_zhp);
926 free(pool);
927 }
928 list_destroy(&g_pool_list);
929
930 while ((device = (list_head(&g_device_list))) != NULL) {
931 list_remove(&g_device_list, device);
932 free(device);
933 }
934 list_destroy(&g_device_list);
935
936 libzfs_fini(g_zfshdl);
937 }
938
939 void
940 zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
941 {
942 zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
943 (void) zfs_slm_deliver_event(class, subclass, nvl);
944 }