1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25  * Copyright (c) 2016, 2017, Intel Corporation.
26  * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27  */
28 
29 /*
30  * ZFS syseventd module.
31  *
32  * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
33  *
34  * The purpose of this module is to identify when devices are added to the
35  * system, and appropriately online or replace the affected vdevs.
36  *
37  * When a device is added to the system:
38  *
39  * 	1. Search for any vdevs whose devid matches that of the newly added
40  *	   device.
41  *
42  * 	2. If no vdevs are found, then search for any vdevs whose udev path
43  *	   matches that of the new device.
44  *
45  *	3. If no vdevs match by either method, then ignore the event.
46  *
47  * 	4. Attempt to online the device with a flag to indicate that it should
48  *	   be unspared when resilvering completes.  If this succeeds, then the
49  *	   same device was inserted and we should continue normally.
50  *
51  *	5. If the pool does not have the 'autoreplace' property set, attempt to
52  *	   online the device again without the unspare flag, which will
53  *	   generate a FMA fault.
54  *
55  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
56  *	   is a whole disk, then label the new disk and attempt a 'zpool
57  *	   replace'.
58  *
59  * The module responds to EC_DEV_ADD events.  The special ESC_ZFS_VDEV_CHECK
60  * event indicates that a device failed to open during pool load, but the
61  * autoreplace property was set.  In this case, we deferred the associated
62  * FMA fault until our module had a chance to process the autoreplace logic.
63  * If the device could not be replaced, then the second online attempt will
64  * trigger the FMA fault that we skipped earlier.
65  *
66  * On Linux udev provides a disk insert for both the disk and the partition.
67  */
68 
69 #include <ctype.h>
70 #include <fcntl.h>
71 #include <libnvpair.h>
72 #include <libzfs.h>
73 #include <libzutil.h>
74 #include <limits.h>
75 #include <stddef.h>
76 #include <stdlib.h>
77 #include <string.h>
78 #include <syslog.h>
79 #include <sys/list.h>
80 #include <sys/sunddi.h>
81 #include <sys/sysevent/eventdefs.h>
82 #include <sys/sysevent/dev.h>
83 #include <thread_pool.h>
84 #include <pthread.h>
85 #include <unistd.h>
86 #include <errno.h>
87 #include "zfs_agents.h"
88 #include "../zed_log.h"
89 
90 #define	DEV_BYID_PATH	"/dev/disk/by-id/"
91 #define	DEV_BYPATH_PATH	"/dev/disk/by-path/"
92 #define	DEV_BYVDEV_PATH	"/dev/disk/by-vdev/"
93 
94 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
95 
96 libzfs_handle_t *g_zfshdl;
97 list_t g_pool_list;	/* list of unavailable pools at initialization */
98 list_t g_device_list;	/* list of disks with asynchronous label request */
99 tpool_t *g_tpool;
100 boolean_t g_enumeration_done;
101 pthread_t g_zfs_tid;	/* zfs_enum_pools() thread */
102 
103 typedef struct unavailpool {
104 	zpool_handle_t	*uap_zhp;
105 	list_node_t	uap_node;
106 } unavailpool_t;
107 
108 typedef struct pendingdev {
109 	char		pd_physpath[128];
110 	list_node_t	pd_node;
111 } pendingdev_t;
112 
113 static int
114 zfs_toplevel_state(zpool_handle_t *zhp)
115 {
116 	nvlist_t *nvroot;
117 	vdev_stat_t *vs;
118 	unsigned int c;
119 
120 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
121 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
122 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
123 	    (uint64_t **)&vs, &c) == 0);
124 	return (vs->vs_state);
125 }
126 
127 static int
128 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
129 {
130 	zed_log_msg(LOG_INFO, "zfs_unavail_pool: examining '%s' (state %d)",
131 	    zpool_get_name(zhp), (int)zfs_toplevel_state(zhp));
132 
133 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
134 		unavailpool_t *uap;
135 		uap = malloc(sizeof (unavailpool_t));
136 		uap->uap_zhp = zhp;
137 		list_insert_tail((list_t *)data, uap);
138 	} else {
139 		zpool_close(zhp);
140 	}
141 	return (0);
142 }
143 
144 /*
145  * Two stage replace on Linux
146  * since we get disk notifications
147  * we can wait for partitioned disk slice to show up!
148  *
149  * First stage tags the disk, initiates async partitioning, and returns
150  * Second stage finds the tag and proceeds to ZFS labeling/replace
151  *
152  * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
153  *
154  * 1. physical match with no fs, no partition
155  *	tag it top, partition disk
156  *
157  * 2. physical match again, see partition and tag
158  *
159  */
160 
161 /*
162  * The device associated with the given vdev (either by devid or physical path)
163  * has been added to the system.  If 'isdisk' is set, then we only attempt a
164  * replacement if it's a whole disk.  This also implies that we should label the
165  * disk first.
166  *
167  * First, we attempt to online the device (making sure to undo any spare
168  * operation when finished).  If this succeeds, then we're done.  If it fails,
169  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
170  * but that the label was not what we expected.  If the 'autoreplace' property
171  * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
172  * replace'.  If the online is successful, but the new state is something else
173  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
174  * race, and we should avoid attempting to relabel the disk.
175  *
176  * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
177  */
178 static void
179 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
180 {
181 	char *path;
182 	vdev_state_t newstate;
183 	nvlist_t *nvroot, *newvd;
184 	pendingdev_t *device;
185 	uint64_t wholedisk = 0ULL;
186 	uint64_t offline = 0ULL;
187 	uint64_t guid = 0ULL;
188 	char *physpath = NULL, *new_devid = NULL, *enc_sysfs_path = NULL;
189 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
190 	char devpath[PATH_MAX];
191 	int ret;
192 	boolean_t is_dm = B_FALSE;
193 	boolean_t is_sd = B_FALSE;
194 	uint_t c;
195 	vdev_stat_t *vs;
196 
197 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
198 		return;
199 
200 	/* Skip healthy disks */
201 	verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
202 	    (uint64_t **)&vs, &c) == 0);
203 	if (vs->vs_state == VDEV_STATE_HEALTHY) {
204 		zed_log_msg(LOG_INFO, "%s: %s is already healthy, skip it.",
205 		    __func__, path);
206 		return;
207 	}
208 
209 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
210 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
211 	    &enc_sysfs_path);
212 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
213 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
214 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &guid);
215 
216 	if (offline)
217 		return;  /* don't intervene if it was taken offline */
218 
219 	is_dm = zfs_dev_is_dm(path);
220 	zed_log_msg(LOG_INFO, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
221 	    " wholedisk %d, %s dm (guid %llu)", zpool_get_name(zhp), path,
222 	    physpath ? physpath : "NULL", wholedisk, is_dm ? "is" : "not",
223 	    (long long unsigned int)guid);
224 
225 	/*
226 	 * The VDEV guid is preferred for identification (gets passed in path)
227 	 */
228 	if (guid != 0) {
229 		(void) snprintf(fullpath, sizeof (fullpath), "%llu",
230 		    (long long unsigned int)guid);
231 	} else {
232 		/*
233 		 * otherwise use path sans partition suffix for whole disks
234 		 */
235 		(void) strlcpy(fullpath, path, sizeof (fullpath));
236 		if (wholedisk) {
237 			char *spath = zfs_strip_partition(fullpath);
238 			if (!spath) {
239 				zed_log_msg(LOG_INFO, "%s: Can't alloc",
240 				    __func__);
241 				return;
242 			}
243 
244 			(void) strlcpy(fullpath, spath, sizeof (fullpath));
245 			free(spath);
246 		}
247 	}
248 
249 	/*
250 	 * Attempt to online the device.
251 	 */
252 	if (zpool_vdev_online(zhp, fullpath,
253 	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
254 	    (newstate == VDEV_STATE_HEALTHY ||
255 	    newstate == VDEV_STATE_DEGRADED)) {
256 		zed_log_msg(LOG_INFO, "  zpool_vdev_online: vdev %s is %s",
257 		    fullpath, (newstate == VDEV_STATE_HEALTHY) ?
258 		    "HEALTHY" : "DEGRADED");
259 		return;
260 	}
261 
262 	/*
263 	 * vdev_id alias rule for using scsi_debug devices (FMA automated
264 	 * testing)
265 	 */
266 	if (physpath != NULL && strcmp("scsidebug", physpath) == 0)
267 		is_sd = B_TRUE;
268 
269 	/*
270 	 * If the pool doesn't have the autoreplace property set, then use
271 	 * vdev online to trigger a FMA fault by posting an ereport.
272 	 */
273 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
274 	    !(wholedisk || is_dm) || (physpath == NULL)) {
275 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
276 		    &newstate);
277 		zed_log_msg(LOG_INFO, "Pool's autoreplace is not enabled or "
278 		    "not a whole disk for '%s'", fullpath);
279 		return;
280 	}
281 
282 	/*
283 	 * Convert physical path into its current device node.  Rawpath
284 	 * needs to be /dev/disk/by-vdev for a scsi_debug device since
285 	 * /dev/disk/by-path will not be present.
286 	 */
287 	(void) snprintf(rawpath, sizeof (rawpath), "%s%s",
288 	    is_sd ? DEV_BYVDEV_PATH : DEV_BYPATH_PATH, physpath);
289 
290 	if (realpath(rawpath, devpath) == NULL && !is_dm) {
291 		zed_log_msg(LOG_INFO, "  realpath: %s failed (%s)",
292 		    rawpath, strerror(errno));
293 
294 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
295 		    &newstate);
296 
297 		zed_log_msg(LOG_INFO, "  zpool_vdev_online: %s FORCEFAULT (%s)",
298 		    fullpath, libzfs_error_description(g_zfshdl));
299 		return;
300 	}
301 
302 	/* Only autoreplace bad disks */
303 	if ((vs->vs_state != VDEV_STATE_DEGRADED) &&
304 	    (vs->vs_state != VDEV_STATE_FAULTED) &&
305 	    (vs->vs_state != VDEV_STATE_CANT_OPEN)) {
306 		return;
307 	}
308 
309 	nvlist_lookup_string(vdev, "new_devid", &new_devid);
310 
311 	if (is_dm) {
312 		/* Don't label device mapper or multipath disks. */
313 	} else if (!labeled) {
314 		/*
315 		 * we're auto-replacing a raw disk, so label it first
316 		 */
317 		char *leafname;
318 
319 		/*
320 		 * If this is a request to label a whole disk, then attempt to
321 		 * write out the label.  Before we can label the disk, we need
322 		 * to map the physical string that was matched on to the under
323 		 * lying device node.
324 		 *
325 		 * If any part of this process fails, then do a force online
326 		 * to trigger a ZFS fault for the device (and any hot spare
327 		 * replacement).
328 		 */
329 		leafname = strrchr(devpath, '/') + 1;
330 
331 		/*
332 		 * If this is a request to label a whole disk, then attempt to
333 		 * write out the label.
334 		 */
335 		if (zpool_label_disk(g_zfshdl, zhp, leafname) != 0) {
336 			zed_log_msg(LOG_INFO, "  zpool_label_disk: could not "
337 			    "label '%s' (%s)", leafname,
338 			    libzfs_error_description(g_zfshdl));
339 
340 			(void) zpool_vdev_online(zhp, fullpath,
341 			    ZFS_ONLINE_FORCEFAULT, &newstate);
342 			return;
343 		}
344 
345 		/*
346 		 * The disk labeling is asynchronous on Linux. Just record
347 		 * this label request and return as there will be another
348 		 * disk add event for the partition after the labeling is
349 		 * completed.
350 		 */
351 		device = malloc(sizeof (pendingdev_t));
352 		(void) strlcpy(device->pd_physpath, physpath,
353 		    sizeof (device->pd_physpath));
354 		list_insert_tail(&g_device_list, device);
355 
356 		zed_log_msg(LOG_INFO, "  zpool_label_disk: async '%s' (%llu)",
357 		    leafname, (u_longlong_t)guid);
358 
359 		return;	/* resumes at EC_DEV_ADD.ESC_DISK for partition */
360 
361 	} else /* labeled */ {
362 		boolean_t found = B_FALSE;
363 		/*
364 		 * match up with request above to label the disk
365 		 */
366 		for (device = list_head(&g_device_list); device != NULL;
367 		    device = list_next(&g_device_list, device)) {
368 			if (strcmp(physpath, device->pd_physpath) == 0) {
369 				list_remove(&g_device_list, device);
370 				free(device);
371 				found = B_TRUE;
372 				break;
373 			}
374 			zed_log_msg(LOG_INFO, "zpool_label_disk: %s != %s",
375 			    physpath, device->pd_physpath);
376 		}
377 		if (!found) {
378 			/* unexpected partition slice encountered */
379 			zed_log_msg(LOG_INFO, "labeled disk %s unexpected here",
380 			    fullpath);
381 			(void) zpool_vdev_online(zhp, fullpath,
382 			    ZFS_ONLINE_FORCEFAULT, &newstate);
383 			return;
384 		}
385 
386 		zed_log_msg(LOG_INFO, "  zpool_label_disk: resume '%s' (%llu)",
387 		    physpath, (u_longlong_t)guid);
388 
389 		(void) snprintf(devpath, sizeof (devpath), "%s%s",
390 		    DEV_BYID_PATH, new_devid);
391 	}
392 
393 	/*
394 	 * Construct the root vdev to pass to zpool_vdev_attach().  While adding
395 	 * the entire vdev structure is harmless, we construct a reduced set of
396 	 * path/physpath/wholedisk to keep it simple.
397 	 */
398 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) {
399 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
400 		return;
401 	}
402 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
403 		zed_log_msg(LOG_WARNING, "zfs_mod: nvlist_alloc out of memory");
404 		nvlist_free(nvroot);
405 		return;
406 	}
407 
408 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
409 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
410 	    nvlist_add_string(newvd, ZPOOL_CONFIG_DEVID, new_devid) != 0 ||
411 	    (physpath != NULL && nvlist_add_string(newvd,
412 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
413 	    (enc_sysfs_path != NULL && nvlist_add_string(newvd,
414 	    ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH, enc_sysfs_path) != 0) ||
415 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
416 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
417 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
418 	    1) != 0) {
419 		zed_log_msg(LOG_WARNING, "zfs_mod: unable to add nvlist pairs");
420 		nvlist_free(newvd);
421 		nvlist_free(nvroot);
422 		return;
423 	}
424 
425 	nvlist_free(newvd);
426 
427 	/*
428 	 * Wait for udev to verify the links exist, then auto-replace
429 	 * the leaf disk at same physical location.
430 	 */
431 	if (zpool_label_disk_wait(path, 3000) != 0) {
432 		zed_log_msg(LOG_WARNING, "zfs_mod: expected replacement "
433 		    "disk %s is missing", path);
434 		nvlist_free(nvroot);
435 		return;
436 	}
437 
438 	/*
439 	 * Prefer sequential resilvering when supported (mirrors and dRAID),
440 	 * otherwise fallback to a traditional healing resilver.
441 	 */
442 	ret = zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE, B_TRUE);
443 	if (ret != 0) {
444 		ret = zpool_vdev_attach(zhp, fullpath, path, nvroot,
445 		    B_TRUE, B_FALSE);
446 	}
447 
448 	zed_log_msg(LOG_INFO, "  zpool_vdev_replace: %s with %s (%s)",
449 	    fullpath, path, (ret == 0) ? "no errors" :
450 	    libzfs_error_description(g_zfshdl));
451 
452 	nvlist_free(nvroot);
453 }
454 
455 /*
456  * Utility functions to find a vdev matching given criteria.
457  */
458 typedef struct dev_data {
459 	const char		*dd_compare;
460 	const char		*dd_prop;
461 	zfs_process_func_t	dd_func;
462 	boolean_t		dd_found;
463 	boolean_t		dd_islabeled;
464 	uint64_t		dd_pool_guid;
465 	uint64_t		dd_vdev_guid;
466 	const char		*dd_new_devid;
467 } dev_data_t;
468 
469 static void
470 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
471 {
472 	dev_data_t *dp = data;
473 	char *path = NULL;
474 	uint_t c, children;
475 	nvlist_t **child;
476 
477 	/*
478 	 * First iterate over any children.
479 	 */
480 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
481 	    &child, &children) == 0) {
482 		for (c = 0; c < children; c++)
483 			zfs_iter_vdev(zhp, child[c], data);
484 	}
485 
486 	/*
487 	 * Iterate over any spares and cache devices
488 	 */
489 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_SPARES,
490 	    &child, &children) == 0) {
491 		for (c = 0; c < children; c++)
492 			zfs_iter_vdev(zhp, child[c], data);
493 	}
494 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_L2CACHE,
495 	    &child, &children) == 0) {
496 		for (c = 0; c < children; c++)
497 			zfs_iter_vdev(zhp, child[c], data);
498 	}
499 
500 	/* once a vdev was matched and processed there is nothing left to do */
501 	if (dp->dd_found)
502 		return;
503 
504 	/*
505 	 * Match by GUID if available otherwise fallback to devid or physical
506 	 */
507 	if (dp->dd_vdev_guid != 0) {
508 		uint64_t guid;
509 
510 		if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
511 		    &guid) != 0 || guid != dp->dd_vdev_guid) {
512 			return;
513 		}
514 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched on %llu", guid);
515 		dp->dd_found = B_TRUE;
516 
517 	} else if (dp->dd_compare != NULL) {
518 		/*
519 		 * NOTE: On Linux there is an event for partition, so unlike
520 		 * illumos, substring matching is not required to accommodate
521 		 * the partition suffix. An exact match will be present in
522 		 * the dp->dd_compare value.
523 		 */
524 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
525 		    strcmp(dp->dd_compare, path) != 0)
526 			return;
527 
528 		zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched %s on %s",
529 		    dp->dd_prop, path);
530 		dp->dd_found = B_TRUE;
531 
532 		/* pass the new devid for use by replacing code */
533 		if (dp->dd_new_devid != NULL) {
534 			(void) nvlist_add_string(nvl, "new_devid",
535 			    dp->dd_new_devid);
536 		}
537 	}
538 
539 	(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
540 }
541 
542 static void
543 zfs_enable_ds(void *arg)
544 {
545 	unavailpool_t *pool = (unavailpool_t *)arg;
546 
547 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
548 	zpool_close(pool->uap_zhp);
549 	free(pool);
550 }
551 
552 static int
553 zfs_iter_pool(zpool_handle_t *zhp, void *data)
554 {
555 	nvlist_t *config, *nvl;
556 	dev_data_t *dp = data;
557 	uint64_t pool_guid;
558 	unavailpool_t *pool;
559 
560 	zed_log_msg(LOG_INFO, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
561 	    zpool_get_name(zhp), dp->dd_vdev_guid ? "GUID" : dp->dd_prop);
562 
563 	/*
564 	 * For each vdev in this pool, look for a match to apply dd_func
565 	 */
566 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
567 		if (dp->dd_pool_guid == 0 ||
568 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
569 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
570 			(void) nvlist_lookup_nvlist(config,
571 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
572 			zfs_iter_vdev(zhp, nvl, data);
573 		}
574 	}
575 
576 	/*
577 	 * if this pool was originally unavailable,
578 	 * then enable its datasets asynchronously
579 	 */
580 	if (g_enumeration_done)  {
581 		for (pool = list_head(&g_pool_list); pool != NULL;
582 		    pool = list_next(&g_pool_list, pool)) {
583 
584 			if (strcmp(zpool_get_name(zhp),
585 			    zpool_get_name(pool->uap_zhp)))
586 				continue;
587 			if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
588 				list_remove(&g_pool_list, pool);
589 				(void) tpool_dispatch(g_tpool, zfs_enable_ds,
590 				    pool);
591 				break;
592 			}
593 		}
594 	}
595 
596 	zpool_close(zhp);
597 	return (dp->dd_found);	/* cease iteration after a match */
598 }
599 
600 /*
601  * Given a physical device location, iterate over all
602  * (pool, vdev) pairs which correspond to that location.
603  */
604 static boolean_t
605 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
606     boolean_t is_slice)
607 {
608 	dev_data_t data = { 0 };
609 
610 	data.dd_compare = physical;
611 	data.dd_func = func;
612 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
613 	data.dd_found = B_FALSE;
614 	data.dd_islabeled = is_slice;
615 	data.dd_new_devid = devid;	/* used by auto replace code */
616 
617 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
618 
619 	return (data.dd_found);
620 }
621 
622 /*
623  * Given a device identifier, find any vdevs with a matching devid.
624  * On Linux we can match devid directly which is always a whole disk.
625  */
626 static boolean_t
627 devid_iter(const char *devid, zfs_process_func_t func, boolean_t is_slice)
628 {
629 	dev_data_t data = { 0 };
630 
631 	data.dd_compare = devid;
632 	data.dd_func = func;
633 	data.dd_prop = ZPOOL_CONFIG_DEVID;
634 	data.dd_found = B_FALSE;
635 	data.dd_islabeled = is_slice;
636 	data.dd_new_devid = devid;
637 
638 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
639 
640 	return (data.dd_found);
641 }
642 
643 /*
644  * Given a device guid, find any vdevs with a matching guid.
645  */
646 static boolean_t
647 guid_iter(uint64_t pool_guid, uint64_t vdev_guid, const char *devid,
648     zfs_process_func_t func, boolean_t is_slice)
649 {
650 	dev_data_t data = { 0 };
651 
652 	data.dd_func = func;
653 	data.dd_found = B_FALSE;
654 	data.dd_pool_guid = pool_guid;
655 	data.dd_vdev_guid = vdev_guid;
656 	data.dd_islabeled = is_slice;
657 	data.dd_new_devid = devid;
658 
659 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
660 
661 	return (data.dd_found);
662 }
663 
664 /*
665  * Handle a EC_DEV_ADD.ESC_DISK event.
666  *
667  * illumos
668  *	Expects: DEV_PHYS_PATH string in schema
669  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
670  *
671  *      path: '/dev/dsk/c0t1d0s0' (persistent)
672  *     devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
673  * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
674  *
675  * linux
676  *	provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
677  *	Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
678  *
679  *      path: '/dev/sdc1' (not persistent)
680  *     devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
681  * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
682  */
683 static int
684 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
685 {
686 	char *devpath = NULL, *devid;
687 	uint64_t pool_guid = 0, vdev_guid = 0;
688 	boolean_t is_slice;
689 
690 	/*
691 	 * Expecting a devid string and an optional physical location and guid
692 	 */
693 	if (nvlist_lookup_string(nvl, DEV_IDENTIFIER, &devid) != 0)
694 		return (-1);
695 
696 	(void) nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath);
697 	(void) nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID, &pool_guid);
698 	(void) nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &vdev_guid);
699 
700 	is_slice = (nvlist_lookup_boolean(nvl, DEV_IS_PART) == 0);
701 
702 	zed_log_msg(LOG_INFO, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
703 	    devid, devpath ? devpath : "NULL", is_slice);
704 
705 	/*
706 	 * Iterate over all vdevs looking for a match in the following order:
707 	 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
708 	 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
709 	 * 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
710 	 */
711 	if (devid_iter(devid, zfs_process_add, is_slice))
712 		return (0);
713 	if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
714 	    is_slice))
715 		return (0);
716 	if (vdev_guid != 0)
717 		(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
718 		    is_slice);
719 
720 	return (0);
721 }
722 
723 /*
724  * Called when we receive a VDEV_CHECK event, which indicates a device could not
725  * be opened during initial pool open, but the autoreplace property was set on
726  * the pool.  In this case, we treat it as if it were an add event.
727  */
728 static int
729 zfs_deliver_check(nvlist_t *nvl)
730 {
731 	dev_data_t data = { 0 };
732 
733 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
734 	    &data.dd_pool_guid) != 0 ||
735 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
736 	    &data.dd_vdev_guid) != 0 ||
737 	    data.dd_vdev_guid == 0)
738 		return (0);
739 
740 	zed_log_msg(LOG_INFO, "zfs_deliver_check: pool '%llu', vdev %llu",
741 	    data.dd_pool_guid, data.dd_vdev_guid);
742 
743 	data.dd_func = zfs_process_add;
744 
745 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
746 
747 	return (0);
748 }
749 
750 static int
751 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
752 {
753 	char *devname = data;
754 	boolean_t avail_spare, l2cache;
755 	nvlist_t *tgt;
756 	int error;
757 
758 	zed_log_msg(LOG_INFO, "zfsdle_vdev_online: searching for '%s' in '%s'",
759 	    devname, zpool_get_name(zhp));
760 
761 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
762 	    &avail_spare, &l2cache, NULL)) != NULL) {
763 		char *path, fullpath[MAXPATHLEN];
764 		uint64_t wholedisk;
765 
766 		error = nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &path);
767 		if (error) {
768 			zpool_close(zhp);
769 			return (0);
770 		}
771 
772 		error = nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
773 		    &wholedisk);
774 		if (error)
775 			wholedisk = 0;
776 
777 		if (wholedisk) {
778 			path = strrchr(path, '/');
779 			if (path != NULL) {
780 				path = zfs_strip_partition(path + 1);
781 				if (path == NULL) {
782 					zpool_close(zhp);
783 					return (0);
784 				}
785 			} else {
786 				zpool_close(zhp);
787 				return (0);
788 			}
789 
790 			(void) strlcpy(fullpath, path, sizeof (fullpath));
791 			free(path);
792 
793 			/*
794 			 * We need to reopen the pool associated with this
795 			 * device so that the kernel can update the size of
796 			 * the expanded device.  When expanding there is no
797 			 * need to restart the scrub from the beginning.
798 			 */
799 			boolean_t scrub_restart = B_FALSE;
800 			(void) zpool_reopen_one(zhp, &scrub_restart);
801 		} else {
802 			(void) strlcpy(fullpath, path, sizeof (fullpath));
803 		}
804 
805 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
806 			vdev_state_t newstate;
807 
808 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
809 				error = zpool_vdev_online(zhp, fullpath, 0,
810 				    &newstate);
811 				zed_log_msg(LOG_INFO, "zfsdle_vdev_online: "
812 				    "setting device '%s' to ONLINE state "
813 				    "in pool '%s': %d", fullpath,
814 				    zpool_get_name(zhp), error);
815 			}
816 		}
817 		zpool_close(zhp);
818 		return (1);
819 	}
820 	zpool_close(zhp);
821 	return (0);
822 }
823 
824 /*
825  * This function handles the ESC_DEV_DLE device change event.  Use the
826  * provided vdev guid when looking up a disk or partition, when the guid
827  * is not present assume the entire disk is owned by ZFS and append the
828  * expected -part1 partition information then lookup by physical path.
829  */
830 static int
831 zfs_deliver_dle(nvlist_t *nvl)
832 {
833 	char *devname, name[MAXPATHLEN];
834 	uint64_t guid;
835 
836 	if (nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID, &guid) == 0) {
837 		sprintf(name, "%llu", (u_longlong_t)guid);
838 	} else if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) == 0) {
839 		strlcpy(name, devname, MAXPATHLEN);
840 		zfs_append_partition(name, MAXPATHLEN);
841 	} else {
842 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: no guid or physpath");
843 	}
844 
845 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, name) != 1) {
846 		zed_log_msg(LOG_INFO, "zfs_deliver_dle: device '%s' not "
847 		    "found", name);
848 		return (1);
849 	}
850 
851 	return (0);
852 }
853 
854 /*
855  * syseventd daemon module event handler
856  *
857  * Handles syseventd daemon zfs device related events:
858  *
859  *	EC_DEV_ADD.ESC_DISK
860  *	EC_DEV_STATUS.ESC_DEV_DLE
861  *	EC_ZFS.ESC_ZFS_VDEV_CHECK
862  *
863  * Note: assumes only one thread active at a time (not thread safe)
864  */
865 static int
866 zfs_slm_deliver_event(const char *class, const char *subclass, nvlist_t *nvl)
867 {
868 	int ret;
869 	boolean_t is_lofi = B_FALSE, is_check = B_FALSE, is_dle = B_FALSE;
870 
871 	if (strcmp(class, EC_DEV_ADD) == 0) {
872 		/*
873 		 * We're mainly interested in disk additions, but we also listen
874 		 * for new loop devices, to allow for simplified testing.
875 		 */
876 		if (strcmp(subclass, ESC_DISK) == 0)
877 			is_lofi = B_FALSE;
878 		else if (strcmp(subclass, ESC_LOFI) == 0)
879 			is_lofi = B_TRUE;
880 		else
881 			return (0);
882 
883 		is_check = B_FALSE;
884 	} else if (strcmp(class, EC_ZFS) == 0 &&
885 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
886 		/*
887 		 * This event signifies that a device failed to open
888 		 * during pool load, but the 'autoreplace' property was
889 		 * set, so we should pretend it's just been added.
890 		 */
891 		is_check = B_TRUE;
892 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
893 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
894 		is_dle = B_TRUE;
895 	} else {
896 		return (0);
897 	}
898 
899 	if (is_dle)
900 		ret = zfs_deliver_dle(nvl);
901 	else if (is_check)
902 		ret = zfs_deliver_check(nvl);
903 	else
904 		ret = zfs_deliver_add(nvl, is_lofi);
905 
906 	return (ret);
907 }
908 
909 /*ARGSUSED*/
910 static void *
911 zfs_enum_pools(void *arg)
912 {
913 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
914 	/*
915 	 * Linux - instead of using a thread pool, each list entry
916 	 * will spawn a thread when an unavailable pool transitions
917 	 * to available. zfs_slm_fini will wait for these threads.
918 	 */
919 	g_enumeration_done = B_TRUE;
920 	return (NULL);
921 }
922 
923 /*
924  * called from zed daemon at startup
925  *
926  * sent messages from zevents or udev monitor
927  *
928  * For now, each agent has its own libzfs instance
929  */
930 int
931 zfs_slm_init()
932 {
933 	if ((g_zfshdl = libzfs_init()) == NULL)
934 		return (-1);
935 
936 	/*
937 	 * collect a list of unavailable pools (asynchronously,
938 	 * since this can take a while)
939 	 */
940 	list_create(&g_pool_list, sizeof (struct unavailpool),
941 	    offsetof(struct unavailpool, uap_node));
942 
943 	if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
944 		list_destroy(&g_pool_list);
945 		libzfs_fini(g_zfshdl);
946 		return (-1);
947 	}
948 
949 	pthread_setname_np(g_zfs_tid, "enum-pools");
950 	list_create(&g_device_list, sizeof (struct pendingdev),
951 	    offsetof(struct pendingdev, pd_node));
952 
953 	return (0);
954 }
955 
956 void
957 zfs_slm_fini()
958 {
959 	unavailpool_t *pool;
960 	pendingdev_t *device;
961 
962 	/* wait for zfs_enum_pools thread to complete */
963 	(void) pthread_join(g_zfs_tid, NULL);
964 	/* destroy the thread pool */
965 	if (g_tpool != NULL) {
966 		tpool_wait(g_tpool);
967 		tpool_destroy(g_tpool);
968 	}
969 
970 	while ((pool = (list_head(&g_pool_list))) != NULL) {
971 		list_remove(&g_pool_list, pool);
972 		zpool_close(pool->uap_zhp);
973 		free(pool);
974 	}
975 	list_destroy(&g_pool_list);
976 
977 	while ((device = (list_head(&g_device_list))) != NULL) {
978 		list_remove(&g_device_list, device);
979 		free(device);
980 	}
981 	list_destroy(&g_device_list);
982 
983 	libzfs_fini(g_zfshdl);
984 }
985 
986 void
987 zfs_slm_event(const char *class, const char *subclass, nvlist_t *nvl)
988 {
989 	zed_log_msg(LOG_INFO, "zfs_slm_event: %s.%s", class, subclass);
990 	(void) zfs_slm_deliver_event(class, subclass, nvl);
991 }
992