]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/zed/agents/zfs_mod.c
Added no_scrub_restart flag to zpool reopen
[mirror_zfs.git] / cmd / zed / agents / zfs_mod.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016, 2017, Intel Corporation.
26 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27 */
28
29 /*
30 * ZFS syseventd module.
31 *
32 * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
33 *
34 * The purpose of this module is to identify when devices are added to the
35 * system, and appropriately online or replace the affected vdevs.
36 *
37 * When a device is added to the system:
38 *
39 * 1. Search for any vdevs whose devid matches that of the newly added
40 * device.
41 *
42 * 2. If no vdevs are found, then search for any vdevs whose udev path
43 * matches that of the new device.
44 *
45 * 3. If no vdevs match by either method, then ignore the event.
46 *
47 * 4. Attempt to online the device with a flag to indicate that it should
48 * be unspared when resilvering completes. If this succeeds, then the
49 * same device was inserted and we should continue normally.
50 *
51 * 5. If the pool does not have the 'autoreplace' property set, attempt to
52 * online the device again without the unspare flag, which will
53 * generate a FMA fault.
54 *
55 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
56 * is a whole disk, then label the new disk and attempt a 'zpool
57 * replace'.
58 *
59 * The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
60 * event indicates that a device failed to open during pool load, but the
61 * autoreplace property was set. In this case, we deferred the associated
62 * FMA fault until our module had a chance to process the autoreplace logic.
63 * If the device could not be replaced, then the second online attempt will
64 * trigger the FMA fault that we skipped earlier.
65 *
66 * ZFS on Linux porting notes:
67 * In lieu of a thread pool, just spawn a thread on demmand.
68 * Linux udev provides a disk insert for both the disk and the partition
69 *
70 */
71
72 #include <ctype.h>
73 #include <devid.h>
74 #include <fcntl.h>
75 #include <libnvpair.h>
76 #include <libzfs.h>
77 #include <limits.h>
78 #include <stddef.h>
79 #include <stdlib.h>
80 #include <string.h>
81 #include <syslog.h>
82 #include <sys/list.h>
83 #include <sys/sunddi.h>
84 #include <sys/sysevent/eventdefs.h>
85 #include <sys/sysevent/dev.h>
86 #include <pthread.h>
87 #include <unistd.h>
88 #include "zfs_agents.h"
89 #include "../zed_log.h"
90
91 #define DEV_BYID_PATH "/dev/disk/by-id/"
92 #define DEV_BYPATH_PATH "/dev/disk/by-path/"
93 #define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
94
95 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
96
97 libzfs_handle_t *g_zfshdl;
98 list_t g_pool_list; /* list of unavailable pools at initialization */
99 list_t g_device_list; /* list of disks with asynchronous label request */
100 boolean_t g_enumeration_done;
101 pthread_t g_zfs_tid;
102
103 typedef struct unavailpool {
104 zpool_handle_t *uap_zhp;
105 pthread_t uap_enable_tid; /* dataset enable thread if activated */
106 list_node_t uap_node;
107 } unavailpool_t;
108
109 typedef struct pendingdev {
110 char pd_physpath[128];
111 list_node_t pd_node;
112 } pendingdev_t;
113
114 static int
115 zfs_toplevel_state(zpool_handle_t *zhp)
116 {
117 nvlist_t *nvroot;
118 vdev_stat_t *vs;
119 unsigned int c;
120
121 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
122 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
123 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
124 (uint64_t **)&vs, &c) == 0);
125 return (vs->vs_state);
126 }
127
128 static int
129 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
130 {
131 zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
132 zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
133
134 if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
135 unavailpool_t *uap;
136 uap = malloc(sizeof (unavailpool_t));
137 uap->uap_zhp = zhp;
138 uap->uap_enable_tid = 0;
139 list_insert_tail((list_t *)data, uap);
140 } else {
141 zpool_close(zhp);
142 }
143 return (0);
144 }
145
146 /*
147 * Two stage replace on Linux
148 * since we get disk notifications
149 * we can wait for partitioned disk slice to show up!
150 *
151 * First stage tags the disk, initiates async partitioning, and returns
152 * Second stage finds the tag and proceeds to ZFS labeling/replace
153 *
154 * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
155 *
156 * 1. physical match with no fs, no partition
157 * tag it top, partition disk
158 *
159 * 2. physical match again, see partion and tag
160 *
161 */
162
163 /*
164 * The device associated with the given vdev (either by devid or physical path)
165 * has been added to the system. If 'isdisk' is set, then we only attempt a
166 * replacement if it's a whole disk. This also implies that we should label the
167 * disk first.
168 *
169 * First, we attempt to online the device (making sure to undo any spare
170 * operation when finished). If this succeeds, then we're done. If it fails,
171 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
172 * but that the label was not what we expected. If the 'autoreplace' property
173 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
174 * replace'. If the online is successful, but the new state is something else
175 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
176 * race, and we should avoid attempting to relabel the disk.
177 *
178 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
179 */
180 static void
181 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
182 {
183 char *path;
184 vdev_state_t newstate;
185 nvlist_t *nvroot, *newvd;
186 pendingdev_t *device;
187 uint64_t wholedisk = 0ULL;
188 uint64_t offline = 0ULL;
189 uint64_t guid = 0ULL;
190 char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
191 char rawpath[PATH_MAX], fullpath[PATH_MAX];
192 char devpath[PATH_MAX];
193 int ret;
194 int is_dm = 0;
195 int is_sd = 0;
196 uint_t c;
197 vdev_stat_t *vs;
198
199 if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
200 return;
201
202 /* Skip healthy disks */
203 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
204 (uint64_t **)&vs, &c) == 0);
205 if (vs->vs_state == VDEV_STATE_HEALTHY) {
206 zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
207 __func__, path);
208 return;
209 }
210
211 (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
212 (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
213 &enc_sysfs_path);
214 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
215 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
216 (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
217
218 if (offline)
219 return; /* don't intervene if it was taken offline */
220
221 is_dm = zfs_dev_is_dm(path);
222 zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
223 " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp), path,
224 physpath ? physpath : "NULL", wholedisk, is_dm,
225 (long long unsigned int)guid);
226
227 /*
228 * The VDEV guid is preferred for identification (gets passed in path)
229 */
230 if (guid != 0) {
231 (void) snprintf(fullpath, sizeof (fullpath), "%llu",
232 (long long unsigned int)guid);
233 } else {
234 /*
235 * otherwise use path sans partition suffix for whole disks
236 */
237 (void) strlcpy(fullpath, path, sizeof (fullpath));
238 if (wholedisk) {
239 char *spath = zfs_strip_partition(fullpath);
240 if (!spath) {
241 zed_log_msg(LOG_INFO, "%s: Can't alloc",
242 __func__);
243 return;
244 }
245
246 (void) strlcpy(fullpath, spath, sizeof (fullpath));
247 free(spath);
248 }
249 }
250
251 /*
252 * Attempt to online the device.
253 */
254 if (zpool_vdev_online(zhp, fullpath,
255 ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
256 (newstate == VDEV_STATE_HEALTHY ||
257 newstate == VDEV_STATE_DEGRADED)) {
258 zed_log_msg(LOG_INFO, " zpool_vdev_online: vdev %s is %s",
259 fullpath, (newstate == VDEV_STATE_HEALTHY) ?
260 "HEALTHY" : "DEGRADED");
261 return;
262 }
263
264 /*
265 * vdev_id alias rule for using scsi_debug devices (FMA automated
266 * testing)
267 */
268 if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
269 is_sd = 1;
270
271 /*
272 * If the pool doesn't have the autoreplace property set, then use
273 * vdev online to trigger a FMA fault by posting an ereport.
274 */
275 if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
276 !(wholedisk || is_dm) || (physpath == NULL)) {
277 (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
278 &newstate);
279 zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
280 "not a whole disk for '%s'", fullpath);
281 return;
282 }
283
284 /*
285 * Convert physical path into its current device node. Rawpath
286 * needs to be /dev/disk/by-vdev for a scsi_debug device since
287 * /dev/disk/by-path will not be present.
288 */
289 (void) snprintf(rawpath, sizeof (rawpath), "%s%s",
290 is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
291
292 if (realpath(rawpath, devpath) == NULL && !is_dm) {
293 zed_log_msg(LOG_INFO, " realpath: %s failed (%s)",
294 rawpath, strerror(errno));
295
296 (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
297 &newstate);
298
299 zed_log_msg(LOG_INFO, " zpool_vdev_online: %s FORCEFAULT (%s)",
300 fullpath, libzfs_error_description(g_zfshdl));
301 return;
302 }
303
304 /* Only autoreplace bad disks */
305 if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
306 (vs->vs_state != VDEV_STATE_FAULTED) &&
307 (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
308 return;
309 }
310
311 nvlist_lookup_string(vdev, "new_devid", &new_devid);
312
313 if (is_dm) {
314 /* Don't label device mapper or multipath disks. */
315 } else if (!labeled) {
316 /*
317 * we're auto-replacing a raw disk, so label it first
318 */
319 char *leafname;
320
321 /*
322 * If this is a request to label a whole disk, then attempt to
323 * write out the label. Before we can label the disk, we need
324 * to map the physical string that was matched on to the under
325 * lying device node.
326 *
327 * If any part of this process fails, then do a force online
328 * to trigger a ZFS fault for the device (and any hot spare
329 * replacement).
330 */
331 leafname = strrchr(devpath, '/') + 1;
332
333 /*
334 * If this is a request to label a whole disk, then attempt to
335 * write out the label.
336 */
337 if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
338 zed_log_msg(LOG_INFO, " zpool_label_disk: could not "
339 "label '%s' (%s)", leafname,
340 libzfs_error_description(g_zfshdl));
341
342 (void) zpool_vdev_online(zhp, fullpath,
343 ZFS_ONLINE_FORCEFAULT, &newstate);
344 return;
345 }
346
347 /*
348 * The disk labeling is asynchronous on Linux. Just record
349 * this label request and return as there will be another
350 * disk add event for the partition after the labeling is
351 * completed.
352 */
353 device = malloc(sizeof (pendingdev_t));
354 (void) strlcpy(device->pd_physpath, physpath,
355 sizeof (device->pd_physpath));
356 list_insert_tail(&g_device_list, device);
357
358 zed_log_msg(LOG_INFO, " zpool_label_disk: async '%s' (%llu)",
359 leafname, (u_longlong_t)guid);
360
361 return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
362
363 } else /* labeled */ {
364 boolean_t found = B_FALSE;
365 /*
366 * match up with request above to label the disk
367 */
368 for (device = list_head(&g_device_list); device != NULL;
369 device = list_next(&g_device_list, device)) {
370 if (strcmp(physpath, device->pd_physpath) == 0) {
371 list_remove(&g_device_list, device);
372 free(device);
373 found = B_TRUE;
374 break;
375 }
376 zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
377 physpath, device->pd_physpath);
378 }
379 if (!found) {
380 /* unexpected partition slice encountered */
381 zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
382 fullpath);
383 (void) zpool_vdev_online(zhp, fullpath,
384 ZFS_ONLINE_FORCEFAULT, &newstate);
385 return;
386 }
387
388 zed_log_msg(LOG_INFO, " zpool_label_disk: resume '%s' (%llu)",
389 physpath, (u_longlong_t)guid);
390
391 (void) snprintf(devpath, sizeof (devpath), "%s%s",
392 DEV_BYID_PATH, new_devid);
393 }
394
395 /*
396 * Construct the root vdev to pass to zpool_vdev_attach(). While adding
397 * the entire vdev structure is harmless, we construct a reduced set of
398 * path/physpath/wholedisk to keep it simple.
399 */
400 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
401 zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
402 return;
403 }
404 if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
405 zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
406 nvlist_free(nvroot);
407 return;
408 }
409
410 if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
411 nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
412 nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
413 (physpath != NULL && nvlist_add_string(newvd,
414 ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
415 (enc_sysfs_path != NULL && nvlist_add_string(newvd,
416 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
417 nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
418 nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
419 nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
420 1) != 0) {
421 zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
422 nvlist_free(newvd);
423 nvlist_free(nvroot);
424 return;
425 }
426
427 nvlist_free(newvd);
428
429 /*
430 * auto replace a leaf disk at same physical location
431 */
432 ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
433
434 zed_log_msg(LOG_INFO, " zpool_vdev_replace: %s with %s (%s)",
435 fullpath, path, (ret == 0) ? "no errors" :
436 libzfs_error_description(g_zfshdl));
437
438 nvlist_free(nvroot);
439 }
440
441 /*
442 * Utility functions to find a vdev matching given criteria.
443 */
444 typedef struct dev_data {
445 const char *dd_compare;
446 const char *dd_prop;
447 zfs_process_func_t dd_func;
448 boolean_t dd_found;
449 boolean_t dd_islabeled;
450 uint64_t dd_pool_guid;
451 uint64_t dd_vdev_guid;
452 const char *dd_new_devid;
453 } dev_data_t;
454
455 static void
456 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
457 {
458 dev_data_t *dp = data;
459 char *path = NULL;
460 uint_t c, children;
461 nvlist_t **child;
462
463 /*
464 * First iterate over any children.
465 */
466 if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
467 &child, &children) == 0) {
468 for (c = 0; c < children; c++)
469 zfs_iter_vdev(zhp, child[c], data);
470 return;
471 }
472
473 /* once a vdev was matched and processed there is nothing left to do */
474 if (dp->dd_found)
475 return;
476
477 /*
478 * Match by GUID if available otherwise fallback to devid or physical
479 */
480 if (dp->dd_vdev_guid != 0) {
481 uint64_t guid;
482
483 if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
484 &guid) != 0 || guid != dp->dd_vdev_guid) {
485 return;
486 }
487 zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
488 dp->dd_found = B_TRUE;
489
490 } else if (dp->dd_compare != NULL) {
491 /*
492 * NOTE: On Linux there is an event for partition, so unlike
493 * illumos, substring matching is not required to accommodate
494 * the partition suffix. An exact match will be present in
495 * the dp->dd_compare value.
496 */
497 if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
498 strcmp(dp->dd_compare, path) != 0)
499 return;
500
501 zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
502 dp->dd_prop, path);
503 dp->dd_found = B_TRUE;
504
505 /* pass the new devid for use by replacing code */
506 if (dp->dd_new_devid != NULL) {
507 (void) nvlist_add_string(nvl, "new_devid",
508 dp->dd_new_devid);
509 }
510 }
511
512 (dp->dd_func)(zhp, nvl, dp->dd_islabeled);
513 }
514
515 static void *
516 zfs_enable_ds(void *arg)
517 {
518 unavailpool_t *pool = (unavailpool_t *)arg;
519
520 assert(pool->uap_enable_tid = pthread_self());
521
522 (void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
523 zpool_close(pool->uap_zhp);
524 pool->uap_zhp = NULL;
525
526 /* Note: zfs_slm_fini() will cleanup this pool entry on exit */
527 return (NULL);
528 }
529
530 static int
531 zfs_iter_pool(zpool_handle_t *zhp, void *data)
532 {
533 nvlist_t *config, *nvl;
534 dev_data_t *dp = data;
535 uint64_t pool_guid;
536 unavailpool_t *pool;
537
538 zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
539 zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
540
541 /*
542 * For each vdev in this pool, look for a match to apply dd_func
543 */
544 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
545 if (dp->dd_pool_guid == 0 ||
546 (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
547 &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
548 (void) nvlist_lookup_nvlist(config,
549 ZPOOL_CONFIG_VDEV_TREE, &nvl);
550 zfs_iter_vdev(zhp, nvl, data);
551 }
552 }
553
554 /*
555 * if this pool was originally unavailable,
556 * then enable its datasets asynchronously
557 */
558 if (g_enumeration_done) {
559 for (pool = list_head(&g_pool_list); pool != NULL;
560 pool = list_next(&g_pool_list, pool)) {
561
562 if (pool->uap_enable_tid != 0)
563 continue; /* entry already processed */
564 if (strcmp(zpool_get_name(zhp),
565 zpool_get_name(pool->uap_zhp)))
566 continue;
567 if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
568 /* send to a background thread; keep on list */
569 (void) pthread_create(&pool->uap_enable_tid,
570 NULL, zfs_enable_ds, pool);
571 break;
572 }
573 }
574 }
575
576 zpool_close(zhp);
577 return (dp->dd_found); /* cease iteration after a match */
578 }
579
580 /*
581 * Given a physical device location, iterate over all
582 * (pool, vdev) pairs which correspond to that location.
583 */
584 static boolean_t
585 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
586 boolean_t is_slice)
587 {
588 dev_data_t data = { 0 };
589
590 data.dd_compare = physical;
591 data.dd_func = func;
592 data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
593 data.dd_found = B_FALSE;
594 data.dd_islabeled = is_slice;
595 data.dd_new_devid = devid; /* used by auto replace code */
596
597 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
598
599 return (data.dd_found);
600 }
601
602 /*
603 * Given a device identifier, find any vdevs with a matching devid.
604 * On Linux we can match devid directly which is always a whole disk.
605 */
606 static boolean_t
607 devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
608 {
609 dev_data_t data = { 0 };
610
611 data.dd_compare = devid;
612 data.dd_func = func;
613 data.dd_prop = ZPOOL_CONFIG_DEVID;
614 data.dd_found = B_FALSE;
615 data.dd_islabeled = is_slice;
616 data.dd_new_devid = devid;
617
618 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
619
620 return (data.dd_found);
621 }
622
623 /*
624 * Handle a EC_DEV_ADD.ESC_DISK event.
625 *
626 * illumos
627 * Expects: DEV_PHYS_PATH string in schema
628 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
629 *
630 * path: '/dev/dsk/c0t1d0s0' (persistent)
631 * devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
632 * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
633 *
634 * linux
635 * provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
636 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
637 *
638 * path: '/dev/sdc1' (not persistent)
639 * devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
640 * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
641 */
642 static int
643 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
644 {
645 char *devpath = NULL, *devid;
646 boolean_t is_slice;
647
648 /*
649 * Expecting a devid string and an optional physical location
650 */
651 if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0)
652 return (-1);
653
654 (void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
655
656 is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
657
658 zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
659 devid, devpath ? devpath : "NULL", is_slice);
660
661 /*
662 * Iterate over all vdevs looking for a match in the folllowing order:
663 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
664 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
665 *
666 * For disks, we only want to pay attention to vdevs marked as whole
667 * disks or are a multipath device.
668 */
669 if (!devid_iter(devid, zfs_process_add, is_slice) && devpath != NULL)
670 (void) devphys_iter(devpath, devid, zfs_process_add, is_slice);
671
672 return (0);
673 }
674
675 /*
676 * Called when we receive a VDEV_CHECK event, which indicates a device could not
677 * be opened during initial pool open, but the autoreplace property was set on
678 * the pool. In this case, we treat it as if it were an add event.
679 */
680 static int
681 zfs_deliver_check(nvlist_t *nvl)
682 {
683 dev_data_t data = { 0 };
684
685 if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
686 &data.dd_pool_guid) != 0 ||
687 nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
688 &data.dd_vdev_guid) != 0 ||
689 data.dd_vdev_guid == 0)
690 return (0);
691
692 zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
693 data.dd_pool_guid, data.dd_vdev_guid);
694
695 data.dd_func = zfs_process_add;
696
697 (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
698
699 return (0);
700 }
701
702 static int
703 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
704 {
705 char *devname = data;
706 boolean_t avail_spare, l2cache;
707 vdev_state_t newstate;
708 nvlist_t *tgt;
709
710 zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
711 devname, zpool_get_name(zhp));
712
713 if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
714 &avail_spare, &l2cache, NULL)) != NULL) {
715 char *path, fullpath[MAXPATHLEN];
716 uint64_t wholedisk = 0ULL;
717
718 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
719 &path) == 0);
720 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
721 &wholedisk) == 0);
722
723 (void) strlcpy(fullpath, path, sizeof (fullpath));
724 if (wholedisk) {
725 char *spath = zfs_strip_partition(fullpath);
726 boolean_t scrub_restart = B_TRUE;
727
728 if (!spath) {
729 zed_log_msg(LOG_INFO, "%s: Can't alloc",
730 __func__);
731 return (0);
732 }
733
734 (void) strlcpy(fullpath, spath, sizeof (fullpath));
735 free(spath);
736
737 /*
738 * We need to reopen the pool associated with this
739 * device so that the kernel can update the size
740 * of the expanded device.
741 */
742 (void) zpool_reopen_one(zhp, &scrub_restart);
743 }
744
745 if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
746 zed_log_msg(LOG_INFO, "zfsdle_vdev_online: setting "
747 "device '%s' to ONLINE state in pool '%s'",
748 fullpath, zpool_get_name(zhp));
749 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
750 (void) zpool_vdev_online(zhp, fullpath, 0,
751 &newstate);
752 }
753 zpool_close(zhp);
754 return (1);
755 }
756 zpool_close(zhp);
757 return (0);
758 }
759
760 /*
761 * This function handles the ESC_DEV_DLE event.
762 */
763 static int
764 zfs_deliver_dle(nvlist_t *nvl)
765 {
766 char *devname;
767
768 if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
769 zed_log_msg(LOG_INFO, "zfs_deliver_dle: no physpath");
770 return (-1);
771 }
772
773 if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
774 zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
775 "found", devname);
776 return (1);
777 }
778 return (0);
779 }
780
781 /*
782 * syseventd daemon module event handler
783 *
784 * Handles syseventd daemon zfs device related events:
785 *
786 * EC_DEV_ADD.ESC_DISK
787 * EC_DEV_STATUS.ESC_DEV_DLE
788 * EC_ZFS.ESC_ZFS_VDEV_CHECK
789 *
790 * Note: assumes only one thread active at a time (not thread safe)
791 */
792 static int
793 zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
794 {
795 int ret;
796 boolean_t is_lofi = B_FALSE, is_check = B_FALSE, is_dle = B_FALSE;
797
798 if (strcmp(class, EC_DEV_ADD) == 0) {
799 /*
800 * We're mainly interested in disk additions, but we also listen
801 * for new loop devices, to allow for simplified testing.
802 */
803 if (strcmp(subclass, ESC_DISK) == 0)
804 is_lofi = B_FALSE;
805 else if (strcmp(subclass, ESC_LOFI) == 0)
806 is_lofi = B_TRUE;
807 else
808 return (0);
809
810 is_check = B_FALSE;
811 } else if (strcmp(class, EC_ZFS) == 0 &&
812 strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
813 /*
814 * This event signifies that a device failed to open
815 * during pool load, but the 'autoreplace' property was
816 * set, so we should pretend it's just been added.
817 */
818 is_check = B_TRUE;
819 } else if (strcmp(class, EC_DEV_STATUS) == 0 &&
820 strcmp(subclass, ESC_DEV_DLE) == 0) {
821 is_dle = B_TRUE;
822 } else {
823 return (0);
824 }
825
826 if (is_dle)
827 ret = zfs_deliver_dle(nvl);
828 else if (is_check)
829 ret = zfs_deliver_check(nvl);
830 else
831 ret = zfs_deliver_add(nvl, is_lofi);
832
833 return (ret);
834 }
835
836 /*ARGSUSED*/
837 static void *
838 zfs_enum_pools(void *arg)
839 {
840 (void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
841 /*
842 * Linux - instead of using a thread pool, each list entry
843 * will spawn a thread when an unavailable pool transitions
844 * to available. zfs_slm_fini will wait for these threads.
845 */
846 g_enumeration_done = B_TRUE;
847 return (NULL);
848 }
849
850 /*
851 * called from zed daemon at startup
852 *
853 * sent messages from zevents or udev monitor
854 *
855 * For now, each agent has it's own libzfs instance
856 */
857 int
858 zfs_slm_init()
859 {
860 if ((g_zfshdl = __libzfs_init()) == NULL)
861 return (-1);
862
863 /*
864 * collect a list of unavailable pools (asynchronously,
865 * since this can take a while)
866 */
867 list_create(&g_pool_list, sizeof (struct unavailpool),
868 offsetof(struct unavailpool, uap_node));
869
870 if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
871 list_destroy(&g_pool_list);
872 __libzfs_fini(g_zfshdl);
873 return (-1);
874 }
875
876 list_create(&g_device_list, sizeof (struct pendingdev),
877 offsetof(struct pendingdev, pd_node));
878
879 return (0);
880 }
881
882 void
883 zfs_slm_fini()
884 {
885 unavailpool_t *pool;
886 pendingdev_t *device;
887
888 /* wait for zfs_enum_pools thread to complete */
889 (void) pthread_join(g_zfs_tid, NULL);
890
891 while ((pool = (list_head(&g_pool_list))) != NULL) {
892 /*
893 * each pool entry has two possibilities
894 * 1. was made available (so wait for zfs_enable_ds thread)
895 * 2. still unavailable (just close the pool)
896 */
897 if (pool->uap_enable_tid)
898 (void) pthread_join(pool->uap_enable_tid, NULL);
899 else if (pool->uap_zhp != NULL)
900 zpool_close(pool->uap_zhp);
901
902 list_remove(&g_pool_list, pool);
903 free(pool);
904 }
905 list_destroy(&g_pool_list);
906
907 while ((device = (list_head(&g_device_list))) != NULL) {
908 list_remove(&g_device_list, device);
909 free(device);
910 }
911 list_destroy(&g_device_list);
912
913 __libzfs_fini(g_zfshdl);
914 }
915
916 void
917 zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
918 {
919 zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
920 (void) zfs_slm_deliver_event(class, subclass, nvl);
921 }