1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <ctype.h>
28 #include <errno.h>
29 #include <devid.h>
30 #include <fcntl.h>
31 #include <libintl.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <strings.h>
35 #include <unistd.h>
36 #include <sys/efi_partition.h>
37 #include <sys/vtoc.h>
38 #include <sys/zfs_ioctl.h>
39 #include <dlfcn.h>
40 
41 #include "zfs_namecheck.h"
42 #include "zfs_prop.h"
43 #include "libzfs_impl.h"
44 #include "zfs_comutil.h"
45 
46 const char *hist_event_table[LOG_END] = {
47 	"invalid event",
48 	"pool create",
49 	"vdev add",
50 	"pool remove",
51 	"pool destroy",
52 	"pool export",
53 	"pool import",
54 	"vdev attach",
55 	"vdev replace",
56 	"vdev detach",
57 	"vdev online",
58 	"vdev offline",
59 	"vdev upgrade",
60 	"pool clear",
61 	"pool scrub",
62 	"pool property set",
63 	"create",
64 	"clone",
65 	"destroy",
66 	"destroy_begin_sync",
67 	"inherit",
68 	"property set",
69 	"quota set",
70 	"permission update",
71 	"permission remove",
72 	"permission who remove",
73 	"promote",
74 	"receive",
75 	"rename",
76 	"reservation set",
77 	"replay_inc_sync",
78 	"replay_full_sync",
79 	"rollback",
80 	"snapshot",
81 	"filesystem version upgrade",
82 	"refquota set",
83 	"refreservation set",
84 	"pool scrub done",
85 	"user hold",
86 	"user release",
87 	"pool split",
88 };
89 
90 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
91 
92 #if defined(__i386) || defined(__amd64)
93 #define	BOOTCMD	"installgrub(1M)"
94 #else
95 #define	BOOTCMD	"installboot(1M)"
96 #endif
97 
98 #define	DISK_ROOT	"/dev/dsk"
99 #define	RDISK_ROOT	"/dev/rdsk"
100 #define	BACKUP_SLICE	"s2"
101 
102 /*
103  * ====================================================================
104  *   zpool property functions
105  * ====================================================================
106  */
107 
108 static int
109 zpool_get_all_props(zpool_handle_t *zhp)
110 {
111 	zfs_cmd_t zc = { 0 };
112 	libzfs_handle_t *hdl = zhp->zpool_hdl;
113 
114 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
115 
116 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
117 		return (-1);
118 
119 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
120 		if (errno == ENOMEM) {
121 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
122 				zcmd_free_nvlists(&zc);
123 				return (-1);
124 			}
125 		} else {
126 			zcmd_free_nvlists(&zc);
127 			return (-1);
128 		}
129 	}
130 
131 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
132 		zcmd_free_nvlists(&zc);
133 		return (-1);
134 	}
135 
136 	zcmd_free_nvlists(&zc);
137 
138 	return (0);
139 }
140 
141 static int
142 zpool_props_refresh(zpool_handle_t *zhp)
143 {
144 	nvlist_t *old_props;
145 
146 	old_props = zhp->zpool_props;
147 
148 	if (zpool_get_all_props(zhp) != 0)
149 		return (-1);
150 
151 	nvlist_free(old_props);
152 	return (0);
153 }
154 
155 static char *
156 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
157     zprop_source_t *src)
158 {
159 	nvlist_t *nv, *nvl;
160 	uint64_t ival;
161 	char *value;
162 	zprop_source_t source;
163 
164 	nvl = zhp->zpool_props;
165 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
167 		source = ival;
168 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
169 	} else {
170 		source = ZPROP_SRC_DEFAULT;
171 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
172 			value = "-";
173 	}
174 
175 	if (src)
176 		*src = source;
177 
178 	return (value);
179 }
180 
181 uint64_t
182 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
183 {
184 	nvlist_t *nv, *nvl;
185 	uint64_t value;
186 	zprop_source_t source;
187 
188 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
189 		/*
190 		 * zpool_get_all_props() has most likely failed because
191 		 * the pool is faulted, but if all we need is the top level
192 		 * vdev's guid then get it from the zhp config nvlist.
193 		 */
194 		if ((prop == ZPOOL_PROP_GUID) &&
195 		    (nvlist_lookup_nvlist(zhp->zpool_config,
196 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
197 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
198 		    == 0)) {
199 			return (value);
200 		}
201 		return (zpool_prop_default_numeric(prop));
202 	}
203 
204 	nvl = zhp->zpool_props;
205 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
206 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
207 		source = value;
208 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
209 	} else {
210 		source = ZPROP_SRC_DEFAULT;
211 		value = zpool_prop_default_numeric(prop);
212 	}
213 
214 	if (src)
215 		*src = source;
216 
217 	return (value);
218 }
219 
220 /*
221  * Map VDEV STATE to printed strings.
222  */
223 char *
224 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
225 {
226 	switch (state) {
227 	case VDEV_STATE_CLOSED:
228 	case VDEV_STATE_OFFLINE:
229 		return (gettext("OFFLINE"));
230 	case VDEV_STATE_REMOVED:
231 		return (gettext("REMOVED"));
232 	case VDEV_STATE_CANT_OPEN:
233 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
234 			return (gettext("FAULTED"));
235 		else if (aux == VDEV_AUX_SPLIT_POOL)
236 			return (gettext("SPLIT"));
237 		else
238 			return (gettext("UNAVAIL"));
239 	case VDEV_STATE_FAULTED:
240 		return (gettext("FAULTED"));
241 	case VDEV_STATE_DEGRADED:
242 		return (gettext("DEGRADED"));
243 	case VDEV_STATE_HEALTHY:
244 		return (gettext("ONLINE"));
245 	}
246 
247 	return (gettext("UNKNOWN"));
248 }
249 
250 /*
251  * Get a zpool property value for 'prop' and return the value in
252  * a pre-allocated buffer.
253  */
254 int
255 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
256     zprop_source_t *srctype)
257 {
258 	uint64_t intval;
259 	const char *strval;
260 	zprop_source_t src = ZPROP_SRC_NONE;
261 	nvlist_t *nvroot;
262 	vdev_stat_t *vs;
263 	uint_t vsc;
264 
265 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
266 		switch (prop) {
267 		case ZPOOL_PROP_NAME:
268 			(void) strlcpy(buf, zpool_get_name(zhp), len);
269 			break;
270 
271 		case ZPOOL_PROP_HEALTH:
272 			(void) strlcpy(buf, "FAULTED", len);
273 			break;
274 
275 		case ZPOOL_PROP_GUID:
276 			intval = zpool_get_prop_int(zhp, prop, &src);
277 			(void) snprintf(buf, len, "%llu", intval);
278 			break;
279 
280 		case ZPOOL_PROP_ALTROOT:
281 		case ZPOOL_PROP_CACHEFILE:
282 			if (zhp->zpool_props != NULL ||
283 			    zpool_get_all_props(zhp) == 0) {
284 				(void) strlcpy(buf,
285 				    zpool_get_prop_string(zhp, prop, &src),
286 				    len);
287 				if (srctype != NULL)
288 					*srctype = src;
289 				return (0);
290 			}
291 			/* FALLTHROUGH */
292 		default:
293 			(void) strlcpy(buf, "-", len);
294 			break;
295 		}
296 
297 		if (srctype != NULL)
298 			*srctype = src;
299 		return (0);
300 	}
301 
302 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
303 	    prop != ZPOOL_PROP_NAME)
304 		return (-1);
305 
306 	switch (zpool_prop_get_type(prop)) {
307 	case PROP_TYPE_STRING:
308 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
309 		    len);
310 		break;
311 
312 	case PROP_TYPE_NUMBER:
313 		intval = zpool_get_prop_int(zhp, prop, &src);
314 
315 		switch (prop) {
316 		case ZPOOL_PROP_SIZE:
317 		case ZPOOL_PROP_ALLOCATED:
318 		case ZPOOL_PROP_FREE:
319 			(void) zfs_nicenum(intval, buf, len);
320 			break;
321 
322 		case ZPOOL_PROP_CAPACITY:
323 			(void) snprintf(buf, len, "%llu%%",
324 			    (u_longlong_t)intval);
325 			break;
326 
327 		case ZPOOL_PROP_DEDUPRATIO:
328 			(void) snprintf(buf, len, "%llu.%02llux",
329 			    (u_longlong_t)(intval / 100),
330 			    (u_longlong_t)(intval % 100));
331 			break;
332 
333 		case ZPOOL_PROP_HEALTH:
334 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
335 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
336 			verify(nvlist_lookup_uint64_array(nvroot,
337 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
338 
339 			(void) strlcpy(buf, zpool_state_to_name(intval,
340 			    vs->vs_aux), len);
341 			break;
342 		default:
343 			(void) snprintf(buf, len, "%llu", intval);
344 		}
345 		break;
346 
347 	case PROP_TYPE_INDEX:
348 		intval = zpool_get_prop_int(zhp, prop, &src);
349 		if (zpool_prop_index_to_string(prop, intval, &strval)
350 		    != 0)
351 			return (-1);
352 		(void) strlcpy(buf, strval, len);
353 		break;
354 
355 	default:
356 		abort();
357 	}
358 
359 	if (srctype)
360 		*srctype = src;
361 
362 	return (0);
363 }
364 
365 /*
366  * Check if the bootfs name has the same pool name as it is set to.
367  * Assuming bootfs is a valid dataset name.
368  */
369 static boolean_t
370 bootfs_name_valid(const char *pool, char *bootfs)
371 {
372 	int len = strlen(pool);
373 
374 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
375 		return (B_FALSE);
376 
377 	if (strncmp(pool, bootfs, len) == 0 &&
378 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
379 		return (B_TRUE);
380 
381 	return (B_FALSE);
382 }
383 
384 /*
385  * Inspect the configuration to determine if any of the devices contain
386  * an EFI label.
387  */
388 static boolean_t
389 pool_uses_efi(nvlist_t *config)
390 {
391 	nvlist_t **child;
392 	uint_t c, children;
393 
394 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
395 	    &child, &children) != 0)
396 		return (read_efi_label(config, NULL) >= 0);
397 
398 	for (c = 0; c < children; c++) {
399 		if (pool_uses_efi(child[c]))
400 			return (B_TRUE);
401 	}
402 	return (B_FALSE);
403 }
404 
405 static boolean_t
406 pool_is_bootable(zpool_handle_t *zhp)
407 {
408 	char bootfs[ZPOOL_MAXNAMELEN];
409 
410 	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
411 	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
412 	    sizeof (bootfs)) != 0);
413 }
414 
415 
416 /*
417  * Given an nvlist of zpool properties to be set, validate that they are
418  * correct, and parse any numeric properties (index, boolean, etc) if they are
419  * specified as strings.
420  */
421 static nvlist_t *
422 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
423     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
424 {
425 	nvpair_t *elem;
426 	nvlist_t *retprops;
427 	zpool_prop_t prop;
428 	char *strval;
429 	uint64_t intval;
430 	char *slash;
431 	struct stat64 statbuf;
432 	zpool_handle_t *zhp;
433 	nvlist_t *nvroot;
434 
435 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
436 		(void) no_memory(hdl);
437 		return (NULL);
438 	}
439 
440 	elem = NULL;
441 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
442 		const char *propname = nvpair_name(elem);
443 
444 		/*
445 		 * Make sure this property is valid and applies to this type.
446 		 */
447 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
448 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
449 			    "invalid property '%s'"), propname);
450 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
451 			goto error;
452 		}
453 
454 		if (zpool_prop_readonly(prop)) {
455 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
456 			    "is readonly"), propname);
457 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
458 			goto error;
459 		}
460 
461 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
462 		    &strval, &intval, errbuf) != 0)
463 			goto error;
464 
465 		/*
466 		 * Perform additional checking for specific properties.
467 		 */
468 		switch (prop) {
469 		case ZPOOL_PROP_VERSION:
470 			if (intval < version || intval > SPA_VERSION) {
471 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
472 				    "property '%s' number %d is invalid."),
473 				    propname, intval);
474 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
475 				goto error;
476 			}
477 			break;
478 
479 		case ZPOOL_PROP_BOOTFS:
480 			if (create_or_import) {
481 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
482 				    "property '%s' cannot be set at creation "
483 				    "or import time"), propname);
484 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
485 				goto error;
486 			}
487 
488 			if (version < SPA_VERSION_BOOTFS) {
489 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 				    "pool must be upgraded to support "
491 				    "'%s' property"), propname);
492 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
493 				goto error;
494 			}
495 
496 			/*
497 			 * bootfs property value has to be a dataset name and
498 			 * the dataset has to be in the same pool as it sets to.
499 			 */
500 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
501 			    strval)) {
502 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
503 				    "is an invalid name"), strval);
504 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
505 				goto error;
506 			}
507 
508 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
509 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
510 				    "could not open pool '%s'"), poolname);
511 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
512 				goto error;
513 			}
514 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
515 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
516 
517 			/*
518 			 * bootfs property cannot be set on a disk which has
519 			 * been EFI labeled.
520 			 */
521 			if (pool_uses_efi(nvroot)) {
522 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
523 				    "property '%s' not supported on "
524 				    "EFI labeled devices"), propname);
525 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
526 				zpool_close(zhp);
527 				goto error;
528 			}
529 			zpool_close(zhp);
530 			break;
531 
532 		case ZPOOL_PROP_ALTROOT:
533 			if (!create_or_import) {
534 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 				    "property '%s' can only be set during pool "
536 				    "creation or import"), propname);
537 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
538 				goto error;
539 			}
540 
541 			if (strval[0] != '/') {
542 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 				    "bad alternate root '%s'"), strval);
544 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
545 				goto error;
546 			}
547 			break;
548 
549 		case ZPOOL_PROP_CACHEFILE:
550 			if (strval[0] == '\0')
551 				break;
552 
553 			if (strcmp(strval, "none") == 0)
554 				break;
555 
556 			if (strval[0] != '/') {
557 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
558 				    "property '%s' must be empty, an "
559 				    "absolute path, or 'none'"), propname);
560 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
561 				goto error;
562 			}
563 
564 			slash = strrchr(strval, '/');
565 
566 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
567 			    strcmp(slash, "/..") == 0) {
568 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
569 				    "'%s' is not a valid file"), strval);
570 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
571 				goto error;
572 			}
573 
574 			*slash = '\0';
575 
576 			if (strval[0] != '\0' &&
577 			    (stat64(strval, &statbuf) != 0 ||
578 			    !S_ISDIR(statbuf.st_mode))) {
579 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
580 				    "'%s' is not a valid directory"),
581 				    strval);
582 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
583 				goto error;
584 			}
585 
586 			*slash = '/';
587 			break;
588 		}
589 	}
590 
591 	return (retprops);
592 error:
593 	nvlist_free(retprops);
594 	return (NULL);
595 }
596 
597 /*
598  * Set zpool property : propname=propval.
599  */
600 int
601 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
602 {
603 	zfs_cmd_t zc = { 0 };
604 	int ret = -1;
605 	char errbuf[1024];
606 	nvlist_t *nvl = NULL;
607 	nvlist_t *realprops;
608 	uint64_t version;
609 
610 	(void) snprintf(errbuf, sizeof (errbuf),
611 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
612 	    zhp->zpool_name);
613 
614 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
615 		return (no_memory(zhp->zpool_hdl));
616 
617 	if (nvlist_add_string(nvl, propname, propval) != 0) {
618 		nvlist_free(nvl);
619 		return (no_memory(zhp->zpool_hdl));
620 	}
621 
622 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
623 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
624 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
625 		nvlist_free(nvl);
626 		return (-1);
627 	}
628 
629 	nvlist_free(nvl);
630 	nvl = realprops;
631 
632 	/*
633 	 * Execute the corresponding ioctl() to set this property.
634 	 */
635 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
636 
637 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
638 		nvlist_free(nvl);
639 		return (-1);
640 	}
641 
642 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
643 
644 	zcmd_free_nvlists(&zc);
645 	nvlist_free(nvl);
646 
647 	if (ret)
648 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
649 	else
650 		(void) zpool_props_refresh(zhp);
651 
652 	return (ret);
653 }
654 
655 int
656 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
657 {
658 	libzfs_handle_t *hdl = zhp->zpool_hdl;
659 	zprop_list_t *entry;
660 	char buf[ZFS_MAXPROPLEN];
661 
662 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
663 		return (-1);
664 
665 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
666 
667 		if (entry->pl_fixed)
668 			continue;
669 
670 		if (entry->pl_prop != ZPROP_INVAL &&
671 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
672 		    NULL) == 0) {
673 			if (strlen(buf) > entry->pl_width)
674 				entry->pl_width = strlen(buf);
675 		}
676 	}
677 
678 	return (0);
679 }
680 
681 
682 /*
683  * Don't start the slice at the default block of 34; many storage
684  * devices will use a stripe width of 128k, so start there instead.
685  */
686 #define	NEW_START_BLOCK	256
687 
688 /*
689  * Validate the given pool name, optionally putting an extended error message in
690  * 'buf'.
691  */
692 boolean_t
693 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
694 {
695 	namecheck_err_t why;
696 	char what;
697 	int ret;
698 
699 	ret = pool_namecheck(pool, &why, &what);
700 
701 	/*
702 	 * The rules for reserved pool names were extended at a later point.
703 	 * But we need to support users with existing pools that may now be
704 	 * invalid.  So we only check for this expanded set of names during a
705 	 * create (or import), and only in userland.
706 	 */
707 	if (ret == 0 && !isopen &&
708 	    (strncmp(pool, "mirror", 6) == 0 ||
709 	    strncmp(pool, "raidz", 5) == 0 ||
710 	    strncmp(pool, "spare", 5) == 0 ||
711 	    strcmp(pool, "log") == 0)) {
712 		if (hdl != NULL)
713 			zfs_error_aux(hdl,
714 			    dgettext(TEXT_DOMAIN, "name is reserved"));
715 		return (B_FALSE);
716 	}
717 
718 
719 	if (ret != 0) {
720 		if (hdl != NULL) {
721 			switch (why) {
722 			case NAME_ERR_TOOLONG:
723 				zfs_error_aux(hdl,
724 				    dgettext(TEXT_DOMAIN, "name is too long"));
725 				break;
726 
727 			case NAME_ERR_INVALCHAR:
728 				zfs_error_aux(hdl,
729 				    dgettext(TEXT_DOMAIN, "invalid character "
730 				    "'%c' in pool name"), what);
731 				break;
732 
733 			case NAME_ERR_NOLETTER:
734 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
735 				    "name must begin with a letter"));
736 				break;
737 
738 			case NAME_ERR_RESERVED:
739 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
740 				    "name is reserved"));
741 				break;
742 
743 			case NAME_ERR_DISKLIKE:
744 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
745 				    "pool name is reserved"));
746 				break;
747 
748 			case NAME_ERR_LEADING_SLASH:
749 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
750 				    "leading slash in name"));
751 				break;
752 
753 			case NAME_ERR_EMPTY_COMPONENT:
754 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
755 				    "empty component in name"));
756 				break;
757 
758 			case NAME_ERR_TRAILING_SLASH:
759 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
760 				    "trailing slash in name"));
761 				break;
762 
763 			case NAME_ERR_MULTIPLE_AT:
764 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
765 				    "multiple '@' delimiters in name"));
766 				break;
767 
768 			}
769 		}
770 		return (B_FALSE);
771 	}
772 
773 	return (B_TRUE);
774 }
775 
776 /*
777  * Open a handle to the given pool, even if the pool is currently in the FAULTED
778  * state.
779  */
780 zpool_handle_t *
781 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
782 {
783 	zpool_handle_t *zhp;
784 	boolean_t missing;
785 
786 	/*
787 	 * Make sure the pool name is valid.
788 	 */
789 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
790 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
791 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
792 		    pool);
793 		return (NULL);
794 	}
795 
796 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
797 		return (NULL);
798 
799 	zhp->zpool_hdl = hdl;
800 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
801 
802 	if (zpool_refresh_stats(zhp, &missing) != 0) {
803 		zpool_close(zhp);
804 		return (NULL);
805 	}
806 
807 	if (missing) {
808 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
809 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
810 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
811 		zpool_close(zhp);
812 		return (NULL);
813 	}
814 
815 	return (zhp);
816 }
817 
818 /*
819  * Like the above, but silent on error.  Used when iterating over pools (because
820  * the configuration cache may be out of date).
821  */
822 int
823 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
824 {
825 	zpool_handle_t *zhp;
826 	boolean_t missing;
827 
828 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
829 		return (-1);
830 
831 	zhp->zpool_hdl = hdl;
832 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
833 
834 	if (zpool_refresh_stats(zhp, &missing) != 0) {
835 		zpool_close(zhp);
836 		return (-1);
837 	}
838 
839 	if (missing) {
840 		zpool_close(zhp);
841 		*ret = NULL;
842 		return (0);
843 	}
844 
845 	*ret = zhp;
846 	return (0);
847 }
848 
849 /*
850  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
851  * state.
852  */
853 zpool_handle_t *
854 zpool_open(libzfs_handle_t *hdl, const char *pool)
855 {
856 	zpool_handle_t *zhp;
857 
858 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
859 		return (NULL);
860 
861 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
862 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
863 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
864 		zpool_close(zhp);
865 		return (NULL);
866 	}
867 
868 	return (zhp);
869 }
870 
871 /*
872  * Close the handle.  Simply frees the memory associated with the handle.
873  */
874 void
875 zpool_close(zpool_handle_t *zhp)
876 {
877 	if (zhp->zpool_config)
878 		nvlist_free(zhp->zpool_config);
879 	if (zhp->zpool_old_config)
880 		nvlist_free(zhp->zpool_old_config);
881 	if (zhp->zpool_props)
882 		nvlist_free(zhp->zpool_props);
883 	free(zhp);
884 }
885 
886 /*
887  * Return the name of the pool.
888  */
889 const char *
890 zpool_get_name(zpool_handle_t *zhp)
891 {
892 	return (zhp->zpool_name);
893 }
894 
895 
896 /*
897  * Return the state of the pool (ACTIVE or UNAVAILABLE)
898  */
899 int
900 zpool_get_state(zpool_handle_t *zhp)
901 {
902 	return (zhp->zpool_state);
903 }
904 
905 /*
906  * Create the named pool, using the provided vdev list.  It is assumed
907  * that the consumer has already validated the contents of the nvlist, so we
908  * don't have to worry about error semantics.
909  */
910 int
911 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
912     nvlist_t *props, nvlist_t *fsprops)
913 {
914 	zfs_cmd_t zc = { 0 };
915 	nvlist_t *zc_fsprops = NULL;
916 	nvlist_t *zc_props = NULL;
917 	char msg[1024];
918 	char *altroot;
919 	int ret = -1;
920 
921 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
922 	    "cannot create '%s'"), pool);
923 
924 	if (!zpool_name_valid(hdl, B_FALSE, pool))
925 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
926 
927 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
928 		return (-1);
929 
930 	if (props) {
931 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
932 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
933 			goto create_failed;
934 		}
935 	}
936 
937 	if (fsprops) {
938 		uint64_t zoned;
939 		char *zonestr;
940 
941 		zoned = ((nvlist_lookup_string(fsprops,
942 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
943 		    strcmp(zonestr, "on") == 0);
944 
945 		if ((zc_fsprops = zfs_valid_proplist(hdl,
946 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
947 			goto create_failed;
948 		}
949 		if (!zc_props &&
950 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
951 			goto create_failed;
952 		}
953 		if (nvlist_add_nvlist(zc_props,
954 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
955 			goto create_failed;
956 		}
957 	}
958 
959 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
960 		goto create_failed;
961 
962 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
963 
964 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
965 
966 		zcmd_free_nvlists(&zc);
967 		nvlist_free(zc_props);
968 		nvlist_free(zc_fsprops);
969 
970 		switch (errno) {
971 		case EBUSY:
972 			/*
973 			 * This can happen if the user has specified the same
974 			 * device multiple times.  We can't reliably detect this
975 			 * until we try to add it and see we already have a
976 			 * label.
977 			 */
978 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
979 			    "one or more vdevs refer to the same device"));
980 			return (zfs_error(hdl, EZFS_BADDEV, msg));
981 
982 		case EOVERFLOW:
983 			/*
984 			 * This occurs when one of the devices is below
985 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
986 			 * device was the problem device since there's no
987 			 * reliable way to determine device size from userland.
988 			 */
989 			{
990 				char buf[64];
991 
992 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
993 
994 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
995 				    "one or more devices is less than the "
996 				    "minimum size (%s)"), buf);
997 			}
998 			return (zfs_error(hdl, EZFS_BADDEV, msg));
999 
1000 		case ENOSPC:
1001 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1002 			    "one or more devices is out of space"));
1003 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1004 
1005 		case ENOTBLK:
1006 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1007 			    "cache device must be a disk or disk slice"));
1008 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1009 
1010 		default:
1011 			return (zpool_standard_error(hdl, errno, msg));
1012 		}
1013 	}
1014 
1015 	/*
1016 	 * If this is an alternate root pool, then we automatically set the
1017 	 * mountpoint of the root dataset to be '/'.
1018 	 */
1019 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1020 	    &altroot) == 0) {
1021 		zfs_handle_t *zhp;
1022 
1023 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1024 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1025 		    "/") == 0);
1026 
1027 		zfs_close(zhp);
1028 	}
1029 
1030 create_failed:
1031 	zcmd_free_nvlists(&zc);
1032 	nvlist_free(zc_props);
1033 	nvlist_free(zc_fsprops);
1034 	return (ret);
1035 }
1036 
1037 /*
1038  * Destroy the given pool.  It is up to the caller to ensure that there are no
1039  * datasets left in the pool.
1040  */
1041 int
1042 zpool_destroy(zpool_handle_t *zhp)
1043 {
1044 	zfs_cmd_t zc = { 0 };
1045 	zfs_handle_t *zfp = NULL;
1046 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1047 	char msg[1024];
1048 
1049 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1050 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1051 	    ZFS_TYPE_FILESYSTEM)) == NULL)
1052 		return (-1);
1053 
1054 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1055 
1056 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1057 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1058 		    "cannot destroy '%s'"), zhp->zpool_name);
1059 
1060 		if (errno == EROFS) {
1061 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1062 			    "one or more devices is read only"));
1063 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1064 		} else {
1065 			(void) zpool_standard_error(hdl, errno, msg);
1066 		}
1067 
1068 		if (zfp)
1069 			zfs_close(zfp);
1070 		return (-1);
1071 	}
1072 
1073 	if (zfp) {
1074 		remove_mountpoint(zfp);
1075 		zfs_close(zfp);
1076 	}
1077 
1078 	return (0);
1079 }
1080 
1081 /*
1082  * Add the given vdevs to the pool.  The caller must have already performed the
1083  * necessary verification to ensure that the vdev specification is well-formed.
1084  */
1085 int
1086 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1087 {
1088 	zfs_cmd_t zc = { 0 };
1089 	int ret;
1090 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1091 	char msg[1024];
1092 	nvlist_t **spares, **l2cache;
1093 	uint_t nspares, nl2cache;
1094 
1095 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1096 	    "cannot add to '%s'"), zhp->zpool_name);
1097 
1098 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1099 	    SPA_VERSION_SPARES &&
1100 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1101 	    &spares, &nspares) == 0) {
1102 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1103 		    "upgraded to add hot spares"));
1104 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1105 	}
1106 
1107 	if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1108 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1109 		uint64_t s;
1110 
1111 		for (s = 0; s < nspares; s++) {
1112 			char *path;
1113 
1114 			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1115 			    &path) == 0 && pool_uses_efi(spares[s])) {
1116 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1117 				    "device '%s' contains an EFI label and "
1118 				    "cannot be used on root pools."),
1119 				    zpool_vdev_name(hdl, NULL, spares[s],
1120 				    B_FALSE));
1121 				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1122 			}
1123 		}
1124 	}
1125 
1126 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1127 	    SPA_VERSION_L2CACHE &&
1128 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1129 	    &l2cache, &nl2cache) == 0) {
1130 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1131 		    "upgraded to add cache devices"));
1132 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1133 	}
1134 
1135 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1136 		return (-1);
1137 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1138 
1139 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1140 		switch (errno) {
1141 		case EBUSY:
1142 			/*
1143 			 * This can happen if the user has specified the same
1144 			 * device multiple times.  We can't reliably detect this
1145 			 * until we try to add it and see we already have a
1146 			 * label.
1147 			 */
1148 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1149 			    "one or more vdevs refer to the same device"));
1150 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1151 			break;
1152 
1153 		case EOVERFLOW:
1154 			/*
1155 			 * This occurrs when one of the devices is below
1156 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1157 			 * device was the problem device since there's no
1158 			 * reliable way to determine device size from userland.
1159 			 */
1160 			{
1161 				char buf[64];
1162 
1163 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1164 
1165 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1166 				    "device is less than the minimum "
1167 				    "size (%s)"), buf);
1168 			}
1169 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1170 			break;
1171 
1172 		case ENOTSUP:
1173 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1174 			    "pool must be upgraded to add these vdevs"));
1175 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1176 			break;
1177 
1178 		case EDOM:
1179 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1180 			    "root pool can not have multiple vdevs"
1181 			    " or separate logs"));
1182 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1183 			break;
1184 
1185 		case ENOTBLK:
1186 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1187 			    "cache device must be a disk or disk slice"));
1188 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1189 			break;
1190 
1191 		default:
1192 			(void) zpool_standard_error(hdl, errno, msg);
1193 		}
1194 
1195 		ret = -1;
1196 	} else {
1197 		ret = 0;
1198 	}
1199 
1200 	zcmd_free_nvlists(&zc);
1201 
1202 	return (ret);
1203 }
1204 
1205 /*
1206  * Exports the pool from the system.  The caller must ensure that there are no
1207  * mounted datasets in the pool.
1208  */
1209 int
1210 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1211 {
1212 	zfs_cmd_t zc = { 0 };
1213 	char msg[1024];
1214 
1215 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1216 	    "cannot export '%s'"), zhp->zpool_name);
1217 
1218 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1219 	zc.zc_cookie = force;
1220 	zc.zc_guid = hardforce;
1221 
1222 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1223 		switch (errno) {
1224 		case EXDEV:
1225 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1226 			    "use '-f' to override the following errors:\n"
1227 			    "'%s' has an active shared spare which could be"
1228 			    " used by other pools once '%s' is exported."),
1229 			    zhp->zpool_name, zhp->zpool_name);
1230 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1231 			    msg));
1232 		default:
1233 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1234 			    msg));
1235 		}
1236 	}
1237 
1238 	return (0);
1239 }
1240 
1241 int
1242 zpool_export(zpool_handle_t *zhp, boolean_t force)
1243 {
1244 	return (zpool_export_common(zhp, force, B_FALSE));
1245 }
1246 
1247 int
1248 zpool_export_force(zpool_handle_t *zhp)
1249 {
1250 	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1251 }
1252 
1253 static void
1254 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1255     nvlist_t *rbi)
1256 {
1257 	uint64_t rewindto;
1258 	int64_t loss = -1;
1259 	struct tm t;
1260 	char timestr[128];
1261 
1262 	if (!hdl->libzfs_printerr || rbi == NULL)
1263 		return;
1264 
1265 	if (nvlist_lookup_uint64(rbi, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1266 		return;
1267 	(void) nvlist_lookup_int64(rbi, ZPOOL_CONFIG_REWIND_TIME, &loss);
1268 
1269 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1270 	    strftime(timestr, 128, 0, &t) != 0) {
1271 		if (dryrun) {
1272 			(void) printf(dgettext(TEXT_DOMAIN,
1273 			    "Would be able to return %s "
1274 			    "to its state as of %s.\n"),
1275 			    name, timestr);
1276 		} else {
1277 			(void) printf(dgettext(TEXT_DOMAIN,
1278 			    "Pool %s returned to its state as of %s.\n"),
1279 			    name, timestr);
1280 		}
1281 		if (loss > 120) {
1282 			(void) printf(dgettext(TEXT_DOMAIN,
1283 			    "%s approximately %lld "),
1284 			    dryrun ? "Would discard" : "Discarded",
1285 			    (loss + 30) / 60);
1286 			(void) printf(dgettext(TEXT_DOMAIN,
1287 			    "minutes of transactions.\n"));
1288 		} else if (loss > 0) {
1289 			(void) printf(dgettext(TEXT_DOMAIN,
1290 			    "%s approximately %lld "),
1291 			    dryrun ? "Would discard" : "Discarded", loss);
1292 			(void) printf(dgettext(TEXT_DOMAIN,
1293 			    "seconds of transactions.\n"));
1294 		}
1295 	}
1296 }
1297 
1298 void
1299 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1300     nvlist_t *config)
1301 {
1302 	int64_t loss = -1;
1303 	uint64_t edata = UINT64_MAX;
1304 	uint64_t rewindto;
1305 	struct tm t;
1306 	char timestr[128];
1307 
1308 	if (!hdl->libzfs_printerr)
1309 		return;
1310 
1311 	if (reason >= 0)
1312 		(void) printf(dgettext(TEXT_DOMAIN, "action: "));
1313 	else
1314 		(void) printf(dgettext(TEXT_DOMAIN, "\t"));
1315 
1316 	/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1317 	if (nvlist_lookup_uint64(config,
1318 	    ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1319 		goto no_info;
1320 
1321 	(void) nvlist_lookup_int64(config, ZPOOL_CONFIG_REWIND_TIME, &loss);
1322 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1323 	    &edata);
1324 
1325 	(void) printf(dgettext(TEXT_DOMAIN,
1326 	    "Recovery is possible, but will result in some data loss.\n"));
1327 
1328 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1329 	    strftime(timestr, 128, 0, &t) != 0) {
1330 		(void) printf(dgettext(TEXT_DOMAIN,
1331 		    "\tReturning the pool to its state as of %s\n"
1332 		    "\tshould correct the problem.  "),
1333 		    timestr);
1334 	} else {
1335 		(void) printf(dgettext(TEXT_DOMAIN,
1336 		    "\tReverting the pool to an earlier state "
1337 		    "should correct the problem.\n\t"));
1338 	}
1339 
1340 	if (loss > 120) {
1341 		(void) printf(dgettext(TEXT_DOMAIN,
1342 		    "Approximately %lld minutes of data\n"
1343 		    "\tmust be discarded, irreversibly.  "), (loss + 30) / 60);
1344 	} else if (loss > 0) {
1345 		(void) printf(dgettext(TEXT_DOMAIN,
1346 		    "Approximately %lld seconds of data\n"
1347 		    "\tmust be discarded, irreversibly.  "), loss);
1348 	}
1349 	if (edata != 0 && edata != UINT64_MAX) {
1350 		if (edata == 1) {
1351 			(void) printf(dgettext(TEXT_DOMAIN,
1352 			    "After rewind, at least\n"
1353 			    "\tone persistent user-data error will remain.  "));
1354 		} else {
1355 			(void) printf(dgettext(TEXT_DOMAIN,
1356 			    "After rewind, several\n"
1357 			    "\tpersistent user-data errors will remain.  "));
1358 		}
1359 	}
1360 	(void) printf(dgettext(TEXT_DOMAIN,
1361 	    "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1362 	    reason >= 0 ? "clear" : "import", name);
1363 
1364 	(void) printf(dgettext(TEXT_DOMAIN,
1365 	    "A scrub of the pool\n"
1366 	    "\tis strongly recommended after recovery.\n"));
1367 	return;
1368 
1369 no_info:
1370 	(void) printf(dgettext(TEXT_DOMAIN,
1371 	    "Destroy and re-create the pool from\n\ta backup source.\n"));
1372 }
1373 
1374 /*
1375  * zpool_import() is a contracted interface. Should be kept the same
1376  * if possible.
1377  *
1378  * Applications should use zpool_import_props() to import a pool with
1379  * new properties value to be set.
1380  */
1381 int
1382 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1383     char *altroot)
1384 {
1385 	nvlist_t *props = NULL;
1386 	int ret;
1387 
1388 	if (altroot != NULL) {
1389 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1390 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1391 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1392 			    newname));
1393 		}
1394 
1395 		if (nvlist_add_string(props,
1396 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1397 		    nvlist_add_string(props,
1398 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1399 			nvlist_free(props);
1400 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1401 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1402 			    newname));
1403 		}
1404 	}
1405 
1406 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1407 	if (props)
1408 		nvlist_free(props);
1409 	return (ret);
1410 }
1411 
1412 /*
1413  * Import the given pool using the known configuration and a list of
1414  * properties to be set. The configuration should have come from
1415  * zpool_find_import(). The 'newname' parameters control whether the pool
1416  * is imported with a different name.
1417  */
1418 int
1419 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1420     nvlist_t *props, boolean_t importfaulted)
1421 {
1422 	zfs_cmd_t zc = { 0 };
1423 	zpool_rewind_policy_t policy;
1424 	nvlist_t *nvi = NULL;
1425 	char *thename;
1426 	char *origname;
1427 	uint64_t returned_size;
1428 	int ret;
1429 	char errbuf[1024];
1430 
1431 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1432 	    &origname) == 0);
1433 
1434 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1435 	    "cannot import pool '%s'"), origname);
1436 
1437 	if (newname != NULL) {
1438 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1439 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1440 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1441 			    newname));
1442 		thename = (char *)newname;
1443 	} else {
1444 		thename = origname;
1445 	}
1446 
1447 	if (props) {
1448 		uint64_t version;
1449 
1450 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1451 		    &version) == 0);
1452 
1453 		if ((props = zpool_valid_proplist(hdl, origname,
1454 		    props, version, B_TRUE, errbuf)) == NULL) {
1455 			return (-1);
1456 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1457 			nvlist_free(props);
1458 			return (-1);
1459 		}
1460 	}
1461 
1462 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1463 
1464 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1465 	    &zc.zc_guid) == 0);
1466 
1467 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1468 		nvlist_free(props);
1469 		return (-1);
1470 	}
1471 	returned_size =  zc.zc_nvlist_conf_size + 512;
1472 	if (zcmd_alloc_dst_nvlist(hdl, &zc, returned_size) != 0) {
1473 		nvlist_free(props);
1474 		return (-1);
1475 	}
1476 
1477 	zc.zc_cookie = (uint64_t)importfaulted;
1478 	ret = 0;
1479 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1480 		char desc[1024];
1481 
1482 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1483 		zpool_get_rewind_policy(config, &policy);
1484 		/*
1485 		 * Dry-run failed, but we print out what success
1486 		 * looks like if we found a best txg
1487 		 */
1488 		if ((policy.zrp_request & ZPOOL_TRY_REWIND) && nvi) {
1489 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1490 			    B_TRUE, nvi);
1491 			nvlist_free(nvi);
1492 			return (-1);
1493 		}
1494 
1495 		if (newname == NULL)
1496 			(void) snprintf(desc, sizeof (desc),
1497 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1498 			    thename);
1499 		else
1500 			(void) snprintf(desc, sizeof (desc),
1501 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1502 			    origname, thename);
1503 
1504 		switch (errno) {
1505 		case ENOTSUP:
1506 			/*
1507 			 * Unsupported version.
1508 			 */
1509 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1510 			break;
1511 
1512 		case EINVAL:
1513 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1514 			break;
1515 
1516 		case EROFS:
1517 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1518 			    "one or more devices is read only"));
1519 			(void) zfs_error(hdl, EZFS_BADDEV, desc);
1520 			break;
1521 
1522 		default:
1523 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1524 			(void) zpool_standard_error(hdl, errno, desc);
1525 			zpool_explain_recover(hdl,
1526 			    newname ? origname : thename, -errno, nvi);
1527 			nvlist_free(nvi);
1528 			break;
1529 		}
1530 
1531 		ret = -1;
1532 	} else {
1533 		zpool_handle_t *zhp;
1534 
1535 		/*
1536 		 * This should never fail, but play it safe anyway.
1537 		 */
1538 		if (zpool_open_silent(hdl, thename, &zhp) != 0)
1539 			ret = -1;
1540 		else if (zhp != NULL)
1541 			zpool_close(zhp);
1542 		(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
1543 		zpool_get_rewind_policy(config, &policy);
1544 		if (policy.zrp_request &
1545 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1546 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1547 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
1548 			    nvi);
1549 		}
1550 		nvlist_free(nvi);
1551 		return (0);
1552 	}
1553 
1554 	zcmd_free_nvlists(&zc);
1555 	nvlist_free(props);
1556 
1557 	return (ret);
1558 }
1559 
1560 /*
1561  * Scrub the pool.
1562  */
1563 int
1564 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1565 {
1566 	zfs_cmd_t zc = { 0 };
1567 	char msg[1024];
1568 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1569 
1570 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1571 	zc.zc_cookie = type;
1572 
1573 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1574 		return (0);
1575 
1576 	(void) snprintf(msg, sizeof (msg),
1577 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1578 
1579 	if (errno == EBUSY)
1580 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1581 	else
1582 		return (zpool_standard_error(hdl, errno, msg));
1583 }
1584 
1585 /*
1586  * Find a vdev that matches the search criteria specified. We use the
1587  * the nvpair name to determine how we should look for the device.
1588  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1589  * spare; but FALSE if its an INUSE spare.
1590  */
1591 static nvlist_t *
1592 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1593     boolean_t *l2cache, boolean_t *log)
1594 {
1595 	uint_t c, children;
1596 	nvlist_t **child;
1597 	nvlist_t *ret;
1598 	uint64_t is_log;
1599 	char *srchkey;
1600 	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1601 
1602 	/* Nothing to look for */
1603 	if (search == NULL || pair == NULL)
1604 		return (NULL);
1605 
1606 	/* Obtain the key we will use to search */
1607 	srchkey = nvpair_name(pair);
1608 
1609 	switch (nvpair_type(pair)) {
1610 	case DATA_TYPE_UINT64: {
1611 		uint64_t srchval, theguid, present;
1612 
1613 		verify(nvpair_value_uint64(pair, &srchval) == 0);
1614 		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1615 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1616 			    &present) == 0) {
1617 				/*
1618 				 * If the device has never been present since
1619 				 * import, the only reliable way to match the
1620 				 * vdev is by GUID.
1621 				 */
1622 				verify(nvlist_lookup_uint64(nv,
1623 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
1624 				if (theguid == srchval)
1625 					return (nv);
1626 			}
1627 		}
1628 		break;
1629 	}
1630 
1631 	case DATA_TYPE_STRING: {
1632 		char *srchval, *val;
1633 
1634 		verify(nvpair_value_string(pair, &srchval) == 0);
1635 		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1636 			break;
1637 
1638 		/*
1639 		 * Search for the requested value. We special case the search
1640 		 * for ZPOOL_CONFIG_PATH when it's a wholedisk and when
1641 		 * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1642 		 * Otherwise, all other searches are simple string compares.
1643 		 */
1644 		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
1645 			uint64_t wholedisk = 0;
1646 
1647 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1648 			    &wholedisk);
1649 			if (wholedisk) {
1650 				/*
1651 				 * For whole disks, the internal path has 's0',
1652 				 * but the path passed in by the user doesn't.
1653 				 */
1654 				if (strlen(srchval) == strlen(val) - 2 &&
1655 				    strncmp(srchval, val, strlen(srchval)) == 0)
1656 					return (nv);
1657 				break;
1658 			}
1659 		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1660 			char *type, *idx, *end, *p;
1661 			uint64_t id, vdev_id;
1662 
1663 			/*
1664 			 * Determine our vdev type, keeping in mind
1665 			 * that the srchval is composed of a type and
1666 			 * vdev id pair (i.e. mirror-4).
1667 			 */
1668 			if ((type = strdup(srchval)) == NULL)
1669 				return (NULL);
1670 
1671 			if ((p = strrchr(type, '-')) == NULL) {
1672 				free(type);
1673 				break;
1674 			}
1675 			idx = p + 1;
1676 			*p = '\0';
1677 
1678 			/*
1679 			 * If the types don't match then keep looking.
1680 			 */
1681 			if (strncmp(val, type, strlen(val)) != 0) {
1682 				free(type);
1683 				break;
1684 			}
1685 
1686 			verify(strncmp(type, VDEV_TYPE_RAIDZ,
1687 			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1688 			    strncmp(type, VDEV_TYPE_MIRROR,
1689 			    strlen(VDEV_TYPE_MIRROR)) == 0);
1690 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1691 			    &id) == 0);
1692 
1693 			errno = 0;
1694 			vdev_id = strtoull(idx, &end, 10);
1695 
1696 			free(type);
1697 			if (errno != 0)
1698 				return (NULL);
1699 
1700 			/*
1701 			 * Now verify that we have the correct vdev id.
1702 			 */
1703 			if (vdev_id == id)
1704 				return (nv);
1705 		}
1706 
1707 		/*
1708 		 * Common case
1709 		 */
1710 		if (strcmp(srchval, val) == 0)
1711 			return (nv);
1712 		break;
1713 	}
1714 
1715 	default:
1716 		break;
1717 	}
1718 
1719 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1720 	    &child, &children) != 0)
1721 		return (NULL);
1722 
1723 	for (c = 0; c < children; c++) {
1724 		if ((ret = vdev_to_nvlist_iter(child[c], search,
1725 		    avail_spare, l2cache, NULL)) != NULL) {
1726 			/*
1727 			 * The 'is_log' value is only set for the toplevel
1728 			 * vdev, not the leaf vdevs.  So we always lookup the
1729 			 * log device from the root of the vdev tree (where
1730 			 * 'log' is non-NULL).
1731 			 */
1732 			if (log != NULL &&
1733 			    nvlist_lookup_uint64(child[c],
1734 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1735 			    is_log) {
1736 				*log = B_TRUE;
1737 			}
1738 			return (ret);
1739 		}
1740 	}
1741 
1742 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1743 	    &child, &children) == 0) {
1744 		for (c = 0; c < children; c++) {
1745 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1746 			    avail_spare, l2cache, NULL)) != NULL) {
1747 				*avail_spare = B_TRUE;
1748 				return (ret);
1749 			}
1750 		}
1751 	}
1752 
1753 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1754 	    &child, &children) == 0) {
1755 		for (c = 0; c < children; c++) {
1756 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1757 			    avail_spare, l2cache, NULL)) != NULL) {
1758 				*l2cache = B_TRUE;
1759 				return (ret);
1760 			}
1761 		}
1762 	}
1763 
1764 	return (NULL);
1765 }
1766 
1767 /*
1768  * Given a physical path (minus the "/devices" prefix), find the
1769  * associated vdev.
1770  */
1771 nvlist_t *
1772 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1773     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1774 {
1775 	nvlist_t *search, *nvroot, *ret;
1776 
1777 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1778 	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1779 
1780 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1781 	    &nvroot) == 0);
1782 
1783 	*avail_spare = B_FALSE;
1784 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1785 	nvlist_free(search);
1786 
1787 	return (ret);
1788 }
1789 
1790 /*
1791  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1792  */
1793 boolean_t
1794 zpool_vdev_is_interior(const char *name)
1795 {
1796 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1797 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1798 		return (B_TRUE);
1799 	return (B_FALSE);
1800 }
1801 
1802 nvlist_t *
1803 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1804     boolean_t *l2cache, boolean_t *log)
1805 {
1806 	char buf[MAXPATHLEN];
1807 	char *end;
1808 	nvlist_t *nvroot, *search, *ret;
1809 	uint64_t guid;
1810 
1811 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1812 
1813 	guid = strtoull(path, &end, 10);
1814 	if (guid != 0 && *end == '\0') {
1815 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1816 	} else if (zpool_vdev_is_interior(path)) {
1817 		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1818 	} else if (path[0] != '/') {
1819 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1820 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1821 	} else {
1822 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1823 	}
1824 
1825 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1826 	    &nvroot) == 0);
1827 
1828 	*avail_spare = B_FALSE;
1829 	*l2cache = B_FALSE;
1830 	if (log != NULL)
1831 		*log = B_FALSE;
1832 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1833 	nvlist_free(search);
1834 
1835 	return (ret);
1836 }
1837 
1838 static int
1839 vdev_online(nvlist_t *nv)
1840 {
1841 	uint64_t ival;
1842 
1843 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1844 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1845 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1846 		return (0);
1847 
1848 	return (1);
1849 }
1850 
1851 /*
1852  * Helper function for zpool_get_physpaths().
1853  */
1854 static int
1855 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1856     size_t *bytes_written)
1857 {
1858 	size_t bytes_left, pos, rsz;
1859 	char *tmppath;
1860 	const char *format;
1861 
1862 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1863 	    &tmppath) != 0)
1864 		return (EZFS_NODEVICE);
1865 
1866 	pos = *bytes_written;
1867 	bytes_left = physpath_size - pos;
1868 	format = (pos == 0) ? "%s" : " %s";
1869 
1870 	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1871 	*bytes_written += rsz;
1872 
1873 	if (rsz >= bytes_left) {
1874 		/* if physpath was not copied properly, clear it */
1875 		if (bytes_left != 0) {
1876 			physpath[pos] = 0;
1877 		}
1878 		return (EZFS_NOSPC);
1879 	}
1880 	return (0);
1881 }
1882 
1883 static int
1884 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1885     size_t *rsz, boolean_t is_spare)
1886 {
1887 	char *type;
1888 	int ret;
1889 
1890 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1891 		return (EZFS_INVALCONFIG);
1892 
1893 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1894 		/*
1895 		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1896 		 * For a spare vdev, we only want to boot from the active
1897 		 * spare device.
1898 		 */
1899 		if (is_spare) {
1900 			uint64_t spare = 0;
1901 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1902 			    &spare);
1903 			if (!spare)
1904 				return (EZFS_INVALCONFIG);
1905 		}
1906 
1907 		if (vdev_online(nv)) {
1908 			if ((ret = vdev_get_one_physpath(nv, physpath,
1909 			    phypath_size, rsz)) != 0)
1910 				return (ret);
1911 		}
1912 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1913 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1914 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1915 		nvlist_t **child;
1916 		uint_t count;
1917 		int i, ret;
1918 
1919 		if (nvlist_lookup_nvlist_array(nv,
1920 		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1921 			return (EZFS_INVALCONFIG);
1922 
1923 		for (i = 0; i < count; i++) {
1924 			ret = vdev_get_physpaths(child[i], physpath,
1925 			    phypath_size, rsz, is_spare);
1926 			if (ret == EZFS_NOSPC)
1927 				return (ret);
1928 		}
1929 	}
1930 
1931 	return (EZFS_POOL_INVALARG);
1932 }
1933 
1934 /*
1935  * Get phys_path for a root pool config.
1936  * Return 0 on success; non-zero on failure.
1937  */
1938 static int
1939 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1940 {
1941 	size_t rsz;
1942 	nvlist_t *vdev_root;
1943 	nvlist_t **child;
1944 	uint_t count;
1945 	char *type;
1946 
1947 	rsz = 0;
1948 
1949 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1950 	    &vdev_root) != 0)
1951 		return (EZFS_INVALCONFIG);
1952 
1953 	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1954 	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1955 	    &child, &count) != 0)
1956 		return (EZFS_INVALCONFIG);
1957 
1958 	/*
1959 	 * root pool can not have EFI labeled disks and can only have
1960 	 * a single top-level vdev.
1961 	 */
1962 	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
1963 	    pool_uses_efi(vdev_root))
1964 		return (EZFS_POOL_INVALARG);
1965 
1966 	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
1967 	    B_FALSE);
1968 
1969 	/* No online devices */
1970 	if (rsz == 0)
1971 		return (EZFS_NODEVICE);
1972 
1973 	return (0);
1974 }
1975 
1976 /*
1977  * Get phys_path for a root pool
1978  * Return 0 on success; non-zero on failure.
1979  */
1980 int
1981 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
1982 {
1983 	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
1984 	    phypath_size));
1985 }
1986 
1987 /*
1988  * If the device has being dynamically expanded then we need to relabel
1989  * the disk to use the new unallocated space.
1990  */
1991 static int
1992 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
1993 {
1994 	char path[MAXPATHLEN];
1995 	char errbuf[1024];
1996 	int fd, error;
1997 	int (*_efi_use_whole_disk)(int);
1998 
1999 	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2000 	    "efi_use_whole_disk")) == NULL)
2001 		return (-1);
2002 
2003 	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2004 
2005 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2006 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2007 		    "relabel '%s': unable to open device"), name);
2008 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2009 	}
2010 
2011 	/*
2012 	 * It's possible that we might encounter an error if the device
2013 	 * does not have any unallocated space left. If so, we simply
2014 	 * ignore that error and continue on.
2015 	 */
2016 	error = _efi_use_whole_disk(fd);
2017 	(void) close(fd);
2018 	if (error && error != VT_ENOSPC) {
2019 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2020 		    "relabel '%s': unable to read disk capacity"), name);
2021 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2022 	}
2023 	return (0);
2024 }
2025 
2026 /*
2027  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2028  * ZFS_ONLINE_* flags.
2029  */
2030 int
2031 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2032     vdev_state_t *newstate)
2033 {
2034 	zfs_cmd_t zc = { 0 };
2035 	char msg[1024];
2036 	nvlist_t *tgt;
2037 	boolean_t avail_spare, l2cache, islog;
2038 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2039 
2040 	if (flags & ZFS_ONLINE_EXPAND) {
2041 		(void) snprintf(msg, sizeof (msg),
2042 		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2043 	} else {
2044 		(void) snprintf(msg, sizeof (msg),
2045 		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2046 	}
2047 
2048 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2049 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2050 	    &islog)) == NULL)
2051 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2052 
2053 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2054 
2055 	if (avail_spare)
2056 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2057 
2058 	if (flags & ZFS_ONLINE_EXPAND ||
2059 	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2060 		char *pathname = NULL;
2061 		uint64_t wholedisk = 0;
2062 
2063 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2064 		    &wholedisk);
2065 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2066 		    &pathname) == 0);
2067 
2068 		/*
2069 		 * XXX - L2ARC 1.0 devices can't support expansion.
2070 		 */
2071 		if (l2cache) {
2072 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2073 			    "cannot expand cache devices"));
2074 			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2075 		}
2076 
2077 		if (wholedisk) {
2078 			pathname += strlen(DISK_ROOT) + 1;
2079 			(void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
2080 		}
2081 	}
2082 
2083 	zc.zc_cookie = VDEV_STATE_ONLINE;
2084 	zc.zc_obj = flags;
2085 
2086 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2087 		if (errno == EINVAL) {
2088 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2089 			    "from this pool into a new one.  Use '%s' "
2090 			    "instead"), "zpool detach");
2091 			return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2092 		}
2093 		return (zpool_standard_error(hdl, errno, msg));
2094 	}
2095 
2096 	*newstate = zc.zc_cookie;
2097 	return (0);
2098 }
2099 
2100 /*
2101  * Take the specified vdev offline
2102  */
2103 int
2104 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2105 {
2106 	zfs_cmd_t zc = { 0 };
2107 	char msg[1024];
2108 	nvlist_t *tgt;
2109 	boolean_t avail_spare, l2cache;
2110 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2111 
2112 	(void) snprintf(msg, sizeof (msg),
2113 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2114 
2115 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2116 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2117 	    NULL)) == NULL)
2118 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2119 
2120 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2121 
2122 	if (avail_spare)
2123 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2124 
2125 	zc.zc_cookie = VDEV_STATE_OFFLINE;
2126 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2127 
2128 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2129 		return (0);
2130 
2131 	switch (errno) {
2132 	case EBUSY:
2133 
2134 		/*
2135 		 * There are no other replicas of this device.
2136 		 */
2137 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2138 
2139 	case EEXIST:
2140 		/*
2141 		 * The log device has unplayed logs
2142 		 */
2143 		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2144 
2145 	default:
2146 		return (zpool_standard_error(hdl, errno, msg));
2147 	}
2148 }
2149 
2150 /*
2151  * Mark the given vdev faulted.
2152  */
2153 int
2154 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2155 {
2156 	zfs_cmd_t zc = { 0 };
2157 	char msg[1024];
2158 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2159 
2160 	(void) snprintf(msg, sizeof (msg),
2161 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2162 
2163 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2164 	zc.zc_guid = guid;
2165 	zc.zc_cookie = VDEV_STATE_FAULTED;
2166 	zc.zc_obj = aux;
2167 
2168 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2169 		return (0);
2170 
2171 	switch (errno) {
2172 	case EBUSY:
2173 
2174 		/*
2175 		 * There are no other replicas of this device.
2176 		 */
2177 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2178 
2179 	default:
2180 		return (zpool_standard_error(hdl, errno, msg));
2181 	}
2182 
2183 }
2184 
2185 /*
2186  * Mark the given vdev degraded.
2187  */
2188 int
2189 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2190 {
2191 	zfs_cmd_t zc = { 0 };
2192 	char msg[1024];
2193 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2194 
2195 	(void) snprintf(msg, sizeof (msg),
2196 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2197 
2198 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2199 	zc.zc_guid = guid;
2200 	zc.zc_cookie = VDEV_STATE_DEGRADED;
2201 	zc.zc_obj = aux;
2202 
2203 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2204 		return (0);
2205 
2206 	return (zpool_standard_error(hdl, errno, msg));
2207 }
2208 
2209 /*
2210  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2211  * a hot spare.
2212  */
2213 static boolean_t
2214 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2215 {
2216 	nvlist_t **child;
2217 	uint_t c, children;
2218 	char *type;
2219 
2220 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2221 	    &children) == 0) {
2222 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2223 		    &type) == 0);
2224 
2225 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2226 		    children == 2 && child[which] == tgt)
2227 			return (B_TRUE);
2228 
2229 		for (c = 0; c < children; c++)
2230 			if (is_replacing_spare(child[c], tgt, which))
2231 				return (B_TRUE);
2232 	}
2233 
2234 	return (B_FALSE);
2235 }
2236 
2237 /*
2238  * Attach new_disk (fully described by nvroot) to old_disk.
2239  * If 'replacing' is specified, the new disk will replace the old one.
2240  */
2241 int
2242 zpool_vdev_attach(zpool_handle_t *zhp,
2243     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2244 {
2245 	zfs_cmd_t zc = { 0 };
2246 	char msg[1024];
2247 	int ret;
2248 	nvlist_t *tgt;
2249 	boolean_t avail_spare, l2cache, islog;
2250 	uint64_t val;
2251 	char *path, *newname;
2252 	nvlist_t **child;
2253 	uint_t children;
2254 	nvlist_t *config_root;
2255 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2256 	boolean_t rootpool = pool_is_bootable(zhp);
2257 
2258 	if (replacing)
2259 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2260 		    "cannot replace %s with %s"), old_disk, new_disk);
2261 	else
2262 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2263 		    "cannot attach %s to %s"), new_disk, old_disk);
2264 
2265 	/*
2266 	 * If this is a root pool, make sure that we're not attaching an
2267 	 * EFI labeled device.
2268 	 */
2269 	if (rootpool && pool_uses_efi(nvroot)) {
2270 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2271 		    "EFI labeled devices are not supported on root pools."));
2272 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2273 	}
2274 
2275 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2276 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2277 	    &islog)) == 0)
2278 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2279 
2280 	if (avail_spare)
2281 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2282 
2283 	if (l2cache)
2284 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2285 
2286 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2287 	zc.zc_cookie = replacing;
2288 
2289 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2290 	    &child, &children) != 0 || children != 1) {
2291 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2292 		    "new device must be a single disk"));
2293 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2294 	}
2295 
2296 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2297 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2298 
2299 	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2300 		return (-1);
2301 
2302 	/*
2303 	 * If the target is a hot spare that has been swapped in, we can only
2304 	 * replace it with another hot spare.
2305 	 */
2306 	if (replacing &&
2307 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2308 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2309 	    NULL) == NULL || !avail_spare) &&
2310 	    is_replacing_spare(config_root, tgt, 1)) {
2311 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2312 		    "can only be replaced by another hot spare"));
2313 		free(newname);
2314 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2315 	}
2316 
2317 	/*
2318 	 * If we are attempting to replace a spare, it canot be applied to an
2319 	 * already spared device.
2320 	 */
2321 	if (replacing &&
2322 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2323 	    zpool_find_vdev(zhp, newname, &avail_spare,
2324 	    &l2cache, NULL) != NULL && avail_spare &&
2325 	    is_replacing_spare(config_root, tgt, 0)) {
2326 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2327 		    "device has already been replaced with a spare"));
2328 		free(newname);
2329 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2330 	}
2331 
2332 	free(newname);
2333 
2334 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2335 		return (-1);
2336 
2337 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2338 
2339 	zcmd_free_nvlists(&zc);
2340 
2341 	if (ret == 0) {
2342 		if (rootpool) {
2343 			/*
2344 			 * XXX - This should be removed once we can
2345 			 * automatically install the bootblocks on the
2346 			 * newly attached disk.
2347 			 */
2348 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2349 			    "be sure to invoke %s to make '%s' bootable.\n"),
2350 			    BOOTCMD, new_disk);
2351 
2352 			/*
2353 			 * XXX need a better way to prevent user from
2354 			 * booting up a half-baked vdev.
2355 			 */
2356 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2357 			    "sure to wait until resilver is done "
2358 			    "before rebooting.\n"));
2359 		}
2360 		return (0);
2361 	}
2362 
2363 	switch (errno) {
2364 	case ENOTSUP:
2365 		/*
2366 		 * Can't attach to or replace this type of vdev.
2367 		 */
2368 		if (replacing) {
2369 			if (islog)
2370 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2371 				    "cannot replace a log with a spare"));
2372 			else
2373 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2374 				    "cannot replace a replacing device"));
2375 		} else {
2376 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2377 			    "can only attach to mirrors and top-level "
2378 			    "disks"));
2379 		}
2380 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2381 		break;
2382 
2383 	case EINVAL:
2384 		/*
2385 		 * The new device must be a single disk.
2386 		 */
2387 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2388 		    "new device must be a single disk"));
2389 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2390 		break;
2391 
2392 	case EBUSY:
2393 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2394 		    new_disk);
2395 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2396 		break;
2397 
2398 	case EOVERFLOW:
2399 		/*
2400 		 * The new device is too small.
2401 		 */
2402 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2403 		    "device is too small"));
2404 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2405 		break;
2406 
2407 	case EDOM:
2408 		/*
2409 		 * The new device has a different alignment requirement.
2410 		 */
2411 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2412 		    "devices have different sector alignment"));
2413 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2414 		break;
2415 
2416 	case ENAMETOOLONG:
2417 		/*
2418 		 * The resulting top-level vdev spec won't fit in the label.
2419 		 */
2420 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2421 		break;
2422 
2423 	default:
2424 		(void) zpool_standard_error(hdl, errno, msg);
2425 	}
2426 
2427 	return (-1);
2428 }
2429 
2430 /*
2431  * Detach the specified device.
2432  */
2433 int
2434 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2435 {
2436 	zfs_cmd_t zc = { 0 };
2437 	char msg[1024];
2438 	nvlist_t *tgt;
2439 	boolean_t avail_spare, l2cache;
2440 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2441 
2442 	(void) snprintf(msg, sizeof (msg),
2443 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2444 
2445 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2446 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2447 	    NULL)) == 0)
2448 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2449 
2450 	if (avail_spare)
2451 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2452 
2453 	if (l2cache)
2454 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2455 
2456 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2457 
2458 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2459 		return (0);
2460 
2461 	switch (errno) {
2462 
2463 	case ENOTSUP:
2464 		/*
2465 		 * Can't detach from this type of vdev.
2466 		 */
2467 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2468 		    "applicable to mirror and replacing vdevs"));
2469 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2470 		break;
2471 
2472 	case EBUSY:
2473 		/*
2474 		 * There are no other replicas of this device.
2475 		 */
2476 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2477 		break;
2478 
2479 	default:
2480 		(void) zpool_standard_error(hdl, errno, msg);
2481 	}
2482 
2483 	return (-1);
2484 }
2485 
2486 /*
2487  * Find a mirror vdev in the source nvlist.
2488  *
2489  * The mchild array contains a list of disks in one of the top-level mirrors
2490  * of the source pool.  The schild array contains a list of disks that the
2491  * user specified on the command line.  We loop over the mchild array to
2492  * see if any entry in the schild array matches.
2493  *
2494  * If a disk in the mchild array is found in the schild array, we return
2495  * the index of that entry.  Otherwise we return -1.
2496  */
2497 static int
2498 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2499     nvlist_t **schild, uint_t schildren)
2500 {
2501 	uint_t mc;
2502 
2503 	for (mc = 0; mc < mchildren; mc++) {
2504 		uint_t sc;
2505 		char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2506 		    mchild[mc], B_FALSE);
2507 
2508 		for (sc = 0; sc < schildren; sc++) {
2509 			char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2510 			    schild[sc], B_FALSE);
2511 			boolean_t result = (strcmp(mpath, spath) == 0);
2512 
2513 			free(spath);
2514 			if (result) {
2515 				free(mpath);
2516 				return (mc);
2517 			}
2518 		}
2519 
2520 		free(mpath);
2521 	}
2522 
2523 	return (-1);
2524 }
2525 
2526 /*
2527  * Split a mirror pool.  If newroot points to null, then a new nvlist
2528  * is generated and it is the responsibility of the caller to free it.
2529  */
2530 int
2531 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2532     nvlist_t *props, splitflags_t flags)
2533 {
2534 	zfs_cmd_t zc = { 0 };
2535 	char msg[1024];
2536 	nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2537 	nvlist_t **varray = NULL, *zc_props = NULL;
2538 	uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2539 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2540 	uint64_t vers;
2541 	boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2542 	int retval = 0;
2543 
2544 	(void) snprintf(msg, sizeof (msg),
2545 	    dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2546 
2547 	if (!zpool_name_valid(hdl, B_FALSE, newname))
2548 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2549 
2550 	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2551 		(void) fprintf(stderr, gettext("Internal error: unable to "
2552 		    "retrieve pool configuration\n"));
2553 		return (-1);
2554 	}
2555 
2556 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2557 	    == 0);
2558 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2559 
2560 	if (props) {
2561 		if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2562 		    props, vers, B_TRUE, msg)) == NULL)
2563 			return (-1);
2564 	}
2565 
2566 	if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2567 	    &children) != 0) {
2568 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2569 		    "Source pool is missing vdev tree"));
2570 		if (zc_props)
2571 			nvlist_free(zc_props);
2572 		return (-1);
2573 	}
2574 
2575 	varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2576 	vcount = 0;
2577 
2578 	if (*newroot == NULL ||
2579 	    nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2580 	    &newchild, &newchildren) != 0)
2581 		newchildren = 0;
2582 
2583 	for (c = 0; c < children; c++) {
2584 		uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2585 		char *type;
2586 		nvlist_t **mchild, *vdev;
2587 		uint_t mchildren;
2588 		int entry;
2589 
2590 		/*
2591 		 * Unlike cache & spares, slogs are stored in the
2592 		 * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
2593 		 */
2594 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2595 		    &is_log);
2596 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2597 		    &is_hole);
2598 		if (is_log || is_hole) {
2599 			/*
2600 			 * Create a hole vdev and put it in the config.
2601 			 */
2602 			if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2603 				goto out;
2604 			if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2605 			    VDEV_TYPE_HOLE) != 0)
2606 				goto out;
2607 			if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2608 			    1) != 0)
2609 				goto out;
2610 			if (lastlog == 0)
2611 				lastlog = vcount;
2612 			varray[vcount++] = vdev;
2613 			continue;
2614 		}
2615 		lastlog = 0;
2616 		verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2617 		    == 0);
2618 		if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2619 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2620 			    "Source pool must be composed only of mirrors\n"));
2621 			retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2622 			goto out;
2623 		}
2624 
2625 		verify(nvlist_lookup_nvlist_array(child[c],
2626 		    ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2627 
2628 		/* find or add an entry for this top-level vdev */
2629 		if (newchildren > 0 &&
2630 		    (entry = find_vdev_entry(zhp, mchild, mchildren,
2631 		    newchild, newchildren)) >= 0) {
2632 			/* We found a disk that the user specified. */
2633 			vdev = mchild[entry];
2634 			++found;
2635 		} else {
2636 			/* User didn't specify a disk for this vdev. */
2637 			vdev = mchild[mchildren - 1];
2638 		}
2639 
2640 		if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2641 			goto out;
2642 	}
2643 
2644 	/* did we find every disk the user specified? */
2645 	if (found != newchildren) {
2646 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2647 		    "include at most one disk from each mirror"));
2648 		retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2649 		goto out;
2650 	}
2651 
2652 	/* Prepare the nvlist for populating. */
2653 	if (*newroot == NULL) {
2654 		if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2655 			goto out;
2656 		freelist = B_TRUE;
2657 		if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2658 		    VDEV_TYPE_ROOT) != 0)
2659 			goto out;
2660 	} else {
2661 		verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2662 	}
2663 
2664 	/* Add all the children we found */
2665 	if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2666 	    lastlog == 0 ? vcount : lastlog) != 0)
2667 		goto out;
2668 
2669 	/*
2670 	 * If we're just doing a dry run, exit now with success.
2671 	 */
2672 	if (flags.dryrun) {
2673 		memory_err = B_FALSE;
2674 		freelist = B_FALSE;
2675 		goto out;
2676 	}
2677 
2678 	/* now build up the config list & call the ioctl */
2679 	if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2680 		goto out;
2681 
2682 	if (nvlist_add_nvlist(newconfig,
2683 	    ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2684 	    nvlist_add_string(newconfig,
2685 	    ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2686 	    nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2687 		goto out;
2688 
2689 	/*
2690 	 * The new pool is automatically part of the namespace unless we
2691 	 * explicitly export it.
2692 	 */
2693 	if (!flags.import)
2694 		zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2695 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2696 	(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2697 	if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2698 		goto out;
2699 	if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2700 		goto out;
2701 
2702 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2703 		retval = zpool_standard_error(hdl, errno, msg);
2704 		goto out;
2705 	}
2706 
2707 	freelist = B_FALSE;
2708 	memory_err = B_FALSE;
2709 
2710 out:
2711 	if (varray != NULL) {
2712 		int v;
2713 
2714 		for (v = 0; v < vcount; v++)
2715 			nvlist_free(varray[v]);
2716 		free(varray);
2717 	}
2718 	zcmd_free_nvlists(&zc);
2719 	if (zc_props)
2720 		nvlist_free(zc_props);
2721 	if (newconfig)
2722 		nvlist_free(newconfig);
2723 	if (freelist) {
2724 		nvlist_free(*newroot);
2725 		*newroot = NULL;
2726 	}
2727 
2728 	if (retval != 0)
2729 		return (retval);
2730 
2731 	if (memory_err)
2732 		return (no_memory(hdl));
2733 
2734 	return (0);
2735 }
2736 
2737 /*
2738  * Remove the given device.  Currently, this is supported only for hot spares
2739  * and level 2 cache devices.
2740  */
2741 int
2742 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2743 {
2744 	zfs_cmd_t zc = { 0 };
2745 	char msg[1024];
2746 	nvlist_t *tgt;
2747 	boolean_t avail_spare, l2cache, islog;
2748 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2749 	uint64_t version;
2750 
2751 	(void) snprintf(msg, sizeof (msg),
2752 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2753 
2754 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2755 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2756 	    &islog)) == 0)
2757 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2758 	/*
2759 	 * XXX - this should just go away.
2760 	 */
2761 	if (!avail_spare && !l2cache && !islog) {
2762 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2763 		    "only inactive hot spares, cache, top-level, "
2764 		    "or log devices can be removed"));
2765 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2766 	}
2767 
2768 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2769 	if (islog && version < SPA_VERSION_HOLES) {
2770 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2771 		    "pool must be upgrade to support log removal"));
2772 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
2773 	}
2774 
2775 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2776 
2777 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2778 		return (0);
2779 
2780 	return (zpool_standard_error(hdl, errno, msg));
2781 }
2782 
2783 /*
2784  * Clear the errors for the pool, or the particular device if specified.
2785  */
2786 int
2787 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
2788 {
2789 	zfs_cmd_t zc = { 0 };
2790 	char msg[1024];
2791 	nvlist_t *tgt;
2792 	zpool_rewind_policy_t policy;
2793 	boolean_t avail_spare, l2cache;
2794 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2795 	nvlist_t *nvi = NULL;
2796 
2797 	if (path)
2798 		(void) snprintf(msg, sizeof (msg),
2799 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2800 		    path);
2801 	else
2802 		(void) snprintf(msg, sizeof (msg),
2803 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2804 		    zhp->zpool_name);
2805 
2806 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2807 	if (path) {
2808 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2809 		    &l2cache, NULL)) == 0)
2810 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2811 
2812 		/*
2813 		 * Don't allow error clearing for hot spares.  Do allow
2814 		 * error clearing for l2cache devices.
2815 		 */
2816 		if (avail_spare)
2817 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2818 
2819 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2820 		    &zc.zc_guid) == 0);
2821 	}
2822 
2823 	zpool_get_rewind_policy(rewindnvl, &policy);
2824 	zc.zc_cookie = policy.zrp_request;
2825 
2826 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 8192) != 0)
2827 		return (-1);
2828 
2829 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, rewindnvl) != 0)
2830 		return (-1);
2831 
2832 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0 ||
2833 	    ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
2834 	    errno != EPERM && errno != EACCES)) {
2835 		if (policy.zrp_request &
2836 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2837 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
2838 			zpool_rewind_exclaim(hdl, zc.zc_name,
2839 			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
2840 			    nvi);
2841 			nvlist_free(nvi);
2842 		}
2843 		zcmd_free_nvlists(&zc);
2844 		return (0);
2845 	}
2846 
2847 	zcmd_free_nvlists(&zc);
2848 	return (zpool_standard_error(hdl, errno, msg));
2849 }
2850 
2851 /*
2852  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2853  */
2854 int
2855 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2856 {
2857 	zfs_cmd_t zc = { 0 };
2858 	char msg[1024];
2859 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2860 
2861 	(void) snprintf(msg, sizeof (msg),
2862 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2863 	    guid);
2864 
2865 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2866 	zc.zc_guid = guid;
2867 
2868 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2869 		return (0);
2870 
2871 	return (zpool_standard_error(hdl, errno, msg));
2872 }
2873 
2874 /*
2875  * Convert from a devid string to a path.
2876  */
2877 static char *
2878 devid_to_path(char *devid_str)
2879 {
2880 	ddi_devid_t devid;
2881 	char *minor;
2882 	char *path;
2883 	devid_nmlist_t *list = NULL;
2884 	int ret;
2885 
2886 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2887 		return (NULL);
2888 
2889 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2890 
2891 	devid_str_free(minor);
2892 	devid_free(devid);
2893 
2894 	if (ret != 0)
2895 		return (NULL);
2896 
2897 	if ((path = strdup(list[0].devname)) == NULL)
2898 		return (NULL);
2899 
2900 	devid_free_nmlist(list);
2901 
2902 	return (path);
2903 }
2904 
2905 /*
2906  * Convert from a path to a devid string.
2907  */
2908 static char *
2909 path_to_devid(const char *path)
2910 {
2911 	int fd;
2912 	ddi_devid_t devid;
2913 	char *minor, *ret;
2914 
2915 	if ((fd = open(path, O_RDONLY)) < 0)
2916 		return (NULL);
2917 
2918 	minor = NULL;
2919 	ret = NULL;
2920 	if (devid_get(fd, &devid) == 0) {
2921 		if (devid_get_minor_name(fd, &minor) == 0)
2922 			ret = devid_str_encode(devid, minor);
2923 		if (minor != NULL)
2924 			devid_str_free(minor);
2925 		devid_free(devid);
2926 	}
2927 	(void) close(fd);
2928 
2929 	return (ret);
2930 }
2931 
2932 /*
2933  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2934  * ignore any failure here, since a common case is for an unprivileged user to
2935  * type 'zpool status', and we'll display the correct information anyway.
2936  */
2937 static void
2938 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2939 {
2940 	zfs_cmd_t zc = { 0 };
2941 
2942 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2943 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2944 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2945 	    &zc.zc_guid) == 0);
2946 
2947 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2948 }
2949 
2950 /*
2951  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2952  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2953  * We also check if this is a whole disk, in which case we strip off the
2954  * trailing 's0' slice name.
2955  *
2956  * This routine is also responsible for identifying when disks have been
2957  * reconfigured in a new location.  The kernel will have opened the device by
2958  * devid, but the path will still refer to the old location.  To catch this, we
2959  * first do a path -> devid translation (which is fast for the common case).  If
2960  * the devid matches, we're done.  If not, we do a reverse devid -> path
2961  * translation and issue the appropriate ioctl() to update the path of the vdev.
2962  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2963  * of these checks.
2964  */
2965 char *
2966 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
2967     boolean_t verbose)
2968 {
2969 	char *path, *devid;
2970 	uint64_t value;
2971 	char buf[64];
2972 	vdev_stat_t *vs;
2973 	uint_t vsc;
2974 
2975 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2976 	    &value) == 0) {
2977 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2978 		    &value) == 0);
2979 		(void) snprintf(buf, sizeof (buf), "%llu",
2980 		    (u_longlong_t)value);
2981 		path = buf;
2982 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2983 
2984 		/*
2985 		 * If the device is dead (faulted, offline, etc) then don't
2986 		 * bother opening it.  Otherwise we may be forcing the user to
2987 		 * open a misbehaving device, which can have undesirable
2988 		 * effects.
2989 		 */
2990 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2991 		    (uint64_t **)&vs, &vsc) != 0 ||
2992 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2993 		    zhp != NULL &&
2994 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2995 			/*
2996 			 * Determine if the current path is correct.
2997 			 */
2998 			char *newdevid = path_to_devid(path);
2999 
3000 			if (newdevid == NULL ||
3001 			    strcmp(devid, newdevid) != 0) {
3002 				char *newpath;
3003 
3004 				if ((newpath = devid_to_path(devid)) != NULL) {
3005 					/*
3006 					 * Update the path appropriately.
3007 					 */
3008 					set_path(zhp, nv, newpath);
3009 					if (nvlist_add_string(nv,
3010 					    ZPOOL_CONFIG_PATH, newpath) == 0)
3011 						verify(nvlist_lookup_string(nv,
3012 						    ZPOOL_CONFIG_PATH,
3013 						    &path) == 0);
3014 					free(newpath);
3015 				}
3016 			}
3017 
3018 			if (newdevid)
3019 				devid_str_free(newdevid);
3020 		}
3021 
3022 		if (strncmp(path, "/dev/dsk/", 9) == 0)
3023 			path += 9;
3024 
3025 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3026 		    &value) == 0 && value) {
3027 			char *tmp = zfs_strdup(hdl, path);
3028 			if (tmp == NULL)
3029 				return (NULL);
3030 			tmp[strlen(path) - 2] = '\0';
3031 			return (tmp);
3032 		}
3033 	} else {
3034 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3035 
3036 		/*
3037 		 * If it's a raidz device, we need to stick in the parity level.
3038 		 */
3039 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3040 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3041 			    &value) == 0);
3042 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
3043 			    (u_longlong_t)value);
3044 			path = buf;
3045 		}
3046 
3047 		/*
3048 		 * We identify each top-level vdev by using a <type-id>
3049 		 * naming convention.
3050 		 */
3051 		if (verbose) {
3052 			uint64_t id;
3053 
3054 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3055 			    &id) == 0);
3056 			(void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3057 			    (u_longlong_t)id);
3058 			path = buf;
3059 		}
3060 	}
3061 
3062 	return (zfs_strdup(hdl, path));
3063 }
3064 
3065 static int
3066 zbookmark_compare(const void *a, const void *b)
3067 {
3068 	return (memcmp(a, b, sizeof (zbookmark_t)));
3069 }
3070 
3071 /*
3072  * Retrieve the persistent error log, uniquify the members, and return to the
3073  * caller.
3074  */
3075 int
3076 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3077 {
3078 	zfs_cmd_t zc = { 0 };
3079 	uint64_t count;
3080 	zbookmark_t *zb = NULL;
3081 	int i;
3082 
3083 	/*
3084 	 * Retrieve the raw error list from the kernel.  If the number of errors
3085 	 * has increased, allocate more space and continue until we get the
3086 	 * entire list.
3087 	 */
3088 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3089 	    &count) == 0);
3090 	if (count == 0)
3091 		return (0);
3092 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3093 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3094 		return (-1);
3095 	zc.zc_nvlist_dst_size = count;
3096 	(void) strcpy(zc.zc_name, zhp->zpool_name);
3097 	for (;;) {
3098 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3099 		    &zc) != 0) {
3100 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
3101 			if (errno == ENOMEM) {
3102 				count = zc.zc_nvlist_dst_size;
3103 				if ((zc.zc_nvlist_dst = (uintptr_t)
3104 				    zfs_alloc(zhp->zpool_hdl, count *
3105 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
3106 					return (-1);
3107 			} else {
3108 				return (-1);
3109 			}
3110 		} else {
3111 			break;
3112 		}
3113 	}
3114 
3115 	/*
3116 	 * Sort the resulting bookmarks.  This is a little confusing due to the
3117 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
3118 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3119 	 * _not_ copied as part of the process.  So we point the start of our
3120 	 * array appropriate and decrement the total number of elements.
3121 	 */
3122 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3123 	    zc.zc_nvlist_dst_size;
3124 	count -= zc.zc_nvlist_dst_size;
3125 
3126 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3127 
3128 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3129 
3130 	/*
3131 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3132 	 */
3133 	for (i = 0; i < count; i++) {
3134 		nvlist_t *nv;
3135 
3136 		/* ignoring zb_blkid and zb_level for now */
3137 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3138 		    zb[i-1].zb_object == zb[i].zb_object)
3139 			continue;
3140 
3141 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3142 			goto nomem;
3143 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3144 		    zb[i].zb_objset) != 0) {
3145 			nvlist_free(nv);
3146 			goto nomem;
3147 		}
3148 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3149 		    zb[i].zb_object) != 0) {
3150 			nvlist_free(nv);
3151 			goto nomem;
3152 		}
3153 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3154 			nvlist_free(nv);
3155 			goto nomem;
3156 		}
3157 		nvlist_free(nv);
3158 	}
3159 
3160 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3161 	return (0);
3162 
3163 nomem:
3164 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3165 	return (no_memory(zhp->zpool_hdl));
3166 }
3167 
3168 /*
3169  * Upgrade a ZFS pool to the latest on-disk version.
3170  */
3171 int
3172 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3173 {
3174 	zfs_cmd_t zc = { 0 };
3175 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3176 
3177 	(void) strcpy(zc.zc_name, zhp->zpool_name);
3178 	zc.zc_cookie = new_version;
3179 
3180 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3181 		return (zpool_standard_error_fmt(hdl, errno,
3182 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3183 		    zhp->zpool_name));
3184 	return (0);
3185 }
3186 
3187 void
3188 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3189     char *history_str)
3190 {
3191 	int i;
3192 
3193 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3194 	for (i = 1; i < argc; i++) {
3195 		if (strlen(history_str) + 1 + strlen(argv[i]) >
3196 		    HIS_MAX_RECORD_LEN)
3197 			break;
3198 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3199 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3200 	}
3201 }
3202 
3203 /*
3204  * Stage command history for logging.
3205  */
3206 int
3207 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3208 {
3209 	if (history_str == NULL)
3210 		return (EINVAL);
3211 
3212 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3213 		return (EINVAL);
3214 
3215 	if (hdl->libzfs_log_str != NULL)
3216 		free(hdl->libzfs_log_str);
3217 
3218 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3219 		return (no_memory(hdl));
3220 
3221 	return (0);
3222 }
3223 
3224 /*
3225  * Perform ioctl to get some command history of a pool.
3226  *
3227  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
3228  * logical offset of the history buffer to start reading from.
3229  *
3230  * Upon return, 'off' is the next logical offset to read from and
3231  * 'len' is the actual amount of bytes read into 'buf'.
3232  */
3233 static int
3234 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3235 {
3236 	zfs_cmd_t zc = { 0 };
3237 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3238 
3239 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3240 
3241 	zc.zc_history = (uint64_t)(uintptr_t)buf;
3242 	zc.zc_history_len = *len;
3243 	zc.zc_history_offset = *off;
3244 
3245 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3246 		switch (errno) {
3247 		case EPERM:
3248 			return (zfs_error_fmt(hdl, EZFS_PERM,
3249 			    dgettext(TEXT_DOMAIN,
3250 			    "cannot show history for pool '%s'"),
3251 			    zhp->zpool_name));
3252 		case ENOENT:
3253 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3254 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3255 			    "'%s'"), zhp->zpool_name));
3256 		case ENOTSUP:
3257 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3258 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3259 			    "'%s', pool must be upgraded"), zhp->zpool_name));
3260 		default:
3261 			return (zpool_standard_error_fmt(hdl, errno,
3262 			    dgettext(TEXT_DOMAIN,
3263 			    "cannot get history for '%s'"), zhp->zpool_name));
3264 		}
3265 	}
3266 
3267 	*len = zc.zc_history_len;
3268 	*off = zc.zc_history_offset;
3269 
3270 	return (0);
3271 }
3272 
3273 /*
3274  * Process the buffer of nvlists, unpacking and storing each nvlist record
3275  * into 'records'.  'leftover' is set to the number of bytes that weren't
3276  * processed as there wasn't a complete record.
3277  */
3278 int
3279 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3280     nvlist_t ***records, uint_t *numrecords)
3281 {
3282 	uint64_t reclen;
3283 	nvlist_t *nv;
3284 	int i;
3285 
3286 	while (bytes_read > sizeof (reclen)) {
3287 
3288 		/* get length of packed record (stored as little endian) */
3289 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3290 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3291 
3292 		if (bytes_read < sizeof (reclen) + reclen)
3293 			break;
3294 
3295 		/* unpack record */
3296 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3297 			return (ENOMEM);
3298 		bytes_read -= sizeof (reclen) + reclen;
3299 		buf += sizeof (reclen) + reclen;
3300 
3301 		/* add record to nvlist array */
3302 		(*numrecords)++;
3303 		if (ISP2(*numrecords + 1)) {
3304 			*records = realloc(*records,
3305 			    *numrecords * 2 * sizeof (nvlist_t *));
3306 		}
3307 		(*records)[*numrecords - 1] = nv;
3308 	}
3309 
3310 	*leftover = bytes_read;
3311 	return (0);
3312 }
3313 
3314 #define	HIS_BUF_LEN	(128*1024)
3315 
3316 /*
3317  * Retrieve the command history of a pool.
3318  */
3319 int
3320 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3321 {
3322 	char buf[HIS_BUF_LEN];
3323 	uint64_t off = 0;
3324 	nvlist_t **records = NULL;
3325 	uint_t numrecords = 0;
3326 	int err, i;
3327 
3328 	do {
3329 		uint64_t bytes_read = sizeof (buf);
3330 		uint64_t leftover;
3331 
3332 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3333 			break;
3334 
3335 		/* if nothing else was read in, we're at EOF, just return */
3336 		if (!bytes_read)
3337 			break;
3338 
3339 		if ((err = zpool_history_unpack(buf, bytes_read,
3340 		    &leftover, &records, &numrecords)) != 0)
3341 			break;
3342 		off -= leftover;
3343 
3344 		/* CONSTCOND */
3345 	} while (1);
3346 
3347 	if (!err) {
3348 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3349 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3350 		    records, numrecords) == 0);
3351 	}
3352 	for (i = 0; i < numrecords; i++)
3353 		nvlist_free(records[i]);
3354 	free(records);
3355 
3356 	return (err);
3357 }
3358 
3359 void
3360 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3361     char *pathname, size_t len)
3362 {
3363 	zfs_cmd_t zc = { 0 };
3364 	boolean_t mounted = B_FALSE;
3365 	char *mntpnt = NULL;
3366 	char dsname[MAXNAMELEN];
3367 
3368 	if (dsobj == 0) {
3369 		/* special case for the MOS */
3370 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3371 		return;
3372 	}
3373 
3374 	/* get the dataset's name */
3375 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3376 	zc.zc_obj = dsobj;
3377 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
3378 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3379 		/* just write out a path of two object numbers */
3380 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3381 		    dsobj, obj);
3382 		return;
3383 	}
3384 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3385 
3386 	/* find out if the dataset is mounted */
3387 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3388 
3389 	/* get the corrupted object's path */
3390 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3391 	zc.zc_obj = obj;
3392 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3393 	    &zc) == 0) {
3394 		if (mounted) {
3395 			(void) snprintf(pathname, len, "%s%s", mntpnt,
3396 			    zc.zc_value);
3397 		} else {
3398 			(void) snprintf(pathname, len, "%s:%s",
3399 			    dsname, zc.zc_value);
3400 		}
3401 	} else {
3402 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3403 	}
3404 	free(mntpnt);
3405 }
3406 
3407 /*
3408  * Read the EFI label from the config, if a label does not exist then
3409  * pass back the error to the caller. If the caller has passed a non-NULL
3410  * diskaddr argument then we set it to the starting address of the EFI
3411  * partition.
3412  */
3413 static int
3414 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3415 {
3416 	char *path;
3417 	int fd;
3418 	char diskname[MAXPATHLEN];
3419 	int err = -1;
3420 
3421 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3422 		return (err);
3423 
3424 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3425 	    strrchr(path, '/'));
3426 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3427 		struct dk_gpt *vtoc;
3428 
3429 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3430 			if (sb != NULL)
3431 				*sb = vtoc->efi_parts[0].p_start;
3432 			efi_free(vtoc);
3433 		}
3434 		(void) close(fd);
3435 	}
3436 	return (err);
3437 }
3438 
3439 /*
3440  * determine where a partition starts on a disk in the current
3441  * configuration
3442  */
3443 static diskaddr_t
3444 find_start_block(nvlist_t *config)
3445 {
3446 	nvlist_t **child;
3447 	uint_t c, children;
3448 	diskaddr_t sb = MAXOFFSET_T;
3449 	uint64_t wholedisk;
3450 
3451 	if (nvlist_lookup_nvlist_array(config,
3452 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3453 		if (nvlist_lookup_uint64(config,
3454 		    ZPOOL_CONFIG_WHOLE_DISK,
3455 		    &wholedisk) != 0 || !wholedisk) {
3456 			return (MAXOFFSET_T);
3457 		}
3458 		if (read_efi_label(config, &sb) < 0)
3459 			sb = MAXOFFSET_T;
3460 		return (sb);
3461 	}
3462 
3463 	for (c = 0; c < children; c++) {
3464 		sb = find_start_block(child[c]);
3465 		if (sb != MAXOFFSET_T) {
3466 			return (sb);
3467 		}
3468 	}
3469 	return (MAXOFFSET_T);
3470 }
3471 
3472 /*
3473  * Label an individual disk.  The name provided is the short name,
3474  * stripped of any leading /dev path.
3475  */
3476 int
3477 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3478 {
3479 	char path[MAXPATHLEN];
3480 	struct dk_gpt *vtoc;
3481 	int fd;
3482 	size_t resv = EFI_MIN_RESV_SIZE;
3483 	uint64_t slice_size;
3484 	diskaddr_t start_block;
3485 	char errbuf[1024];
3486 
3487 	/* prepare an error message just in case */
3488 	(void) snprintf(errbuf, sizeof (errbuf),
3489 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3490 
3491 	if (zhp) {
3492 		nvlist_t *nvroot;
3493 
3494 		if (pool_is_bootable(zhp)) {
3495 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3496 			    "EFI labeled devices are not supported on root "
3497 			    "pools."));
3498 			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3499 		}
3500 
3501 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
3502 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3503 
3504 		if (zhp->zpool_start_block == 0)
3505 			start_block = find_start_block(nvroot);
3506 		else
3507 			start_block = zhp->zpool_start_block;
3508 		zhp->zpool_start_block = start_block;
3509 	} else {
3510 		/* new pool */
3511 		start_block = NEW_START_BLOCK;
3512 	}
3513 
3514 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3515 	    BACKUP_SLICE);
3516 
3517 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3518 		/*
3519 		 * This shouldn't happen.  We've long since verified that this
3520 		 * is a valid device.
3521 		 */
3522 		zfs_error_aux(hdl,
3523 		    dgettext(TEXT_DOMAIN, "unable to open device"));
3524 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3525 	}
3526 
3527 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3528 		/*
3529 		 * The only way this can fail is if we run out of memory, or we
3530 		 * were unable to read the disk's capacity
3531 		 */
3532 		if (errno == ENOMEM)
3533 			(void) no_memory(hdl);
3534 
3535 		(void) close(fd);
3536 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3537 		    "unable to read disk capacity"), name);
3538 
3539 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3540 	}
3541 
3542 	slice_size = vtoc->efi_last_u_lba + 1;
3543 	slice_size -= EFI_MIN_RESV_SIZE;
3544 	if (start_block == MAXOFFSET_T)
3545 		start_block = NEW_START_BLOCK;
3546 	slice_size -= start_block;
3547 
3548 	vtoc->efi_parts[0].p_start = start_block;
3549 	vtoc->efi_parts[0].p_size = slice_size;
3550 
3551 	/*
3552 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
3553 	 * disposable by some EFI utilities (since EFI doesn't have a backup
3554 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
3555 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3556 	 * etc. were all pretty specific.  V_USR is as close to reality as we
3557 	 * can get, in the absence of V_OTHER.
3558 	 */
3559 	vtoc->efi_parts[0].p_tag = V_USR;
3560 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3561 
3562 	vtoc->efi_parts[8].p_start = slice_size + start_block;
3563 	vtoc->efi_parts[8].p_size = resv;
3564 	vtoc->efi_parts[8].p_tag = V_RESERVED;
3565 
3566 	if (efi_write(fd, vtoc) != 0) {
3567 		/*
3568 		 * Some block drivers (like pcata) may not support EFI
3569 		 * GPT labels.  Print out a helpful error message dir-
3570 		 * ecting the user to manually label the disk and give
3571 		 * a specific slice.
3572 		 */
3573 		(void) close(fd);
3574 		efi_free(vtoc);
3575 
3576 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3577 		    "try using fdisk(1M) and then provide a specific slice"));
3578 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3579 	}
3580 
3581 	(void) close(fd);
3582 	efi_free(vtoc);
3583 	return (0);
3584 }
3585 
3586 static boolean_t
3587 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3588 {
3589 	char *type;
3590 	nvlist_t **child;
3591 	uint_t children, c;
3592 
3593 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3594 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3595 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
3596 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
3597 	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3598 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
3599 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3600 		    "vdev type '%s' is not supported"), type);
3601 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3602 		return (B_FALSE);
3603 	}
3604 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3605 	    &child, &children) == 0) {
3606 		for (c = 0; c < children; c++) {
3607 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3608 				return (B_FALSE);
3609 		}
3610 	}
3611 	return (B_TRUE);
3612 }
3613 
3614 /*
3615  * check if this zvol is allowable for use as a dump device; zero if
3616  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3617  */
3618 int
3619 zvol_check_dump_config(char *arg)
3620 {
3621 	zpool_handle_t *zhp = NULL;
3622 	nvlist_t *config, *nvroot;
3623 	char *p, *volname;
3624 	nvlist_t **top;
3625 	uint_t toplevels;
3626 	libzfs_handle_t *hdl;
3627 	char errbuf[1024];
3628 	char poolname[ZPOOL_MAXNAMELEN];
3629 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3630 	int ret = 1;
3631 
3632 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3633 		return (-1);
3634 	}
3635 
3636 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3637 	    "dump is not supported on device '%s'"), arg);
3638 
3639 	if ((hdl = libzfs_init()) == NULL)
3640 		return (1);
3641 	libzfs_print_on_error(hdl, B_TRUE);
3642 
3643 	volname = arg + pathlen;
3644 
3645 	/* check the configuration of the pool */
3646 	if ((p = strchr(volname, '/')) == NULL) {
3647 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3648 		    "malformed dataset name"));
3649 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3650 		return (1);
3651 	} else if (p - volname >= ZFS_MAXNAMELEN) {
3652 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3653 		    "dataset name is too long"));
3654 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3655 		return (1);
3656 	} else {
3657 		(void) strncpy(poolname, volname, p - volname);
3658 		poolname[p - volname] = '\0';
3659 	}
3660 
3661 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3662 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3663 		    "could not open pool '%s'"), poolname);
3664 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3665 		goto out;
3666 	}
3667 	config = zpool_get_config(zhp, NULL);
3668 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3669 	    &nvroot) != 0) {
3670 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3671 		    "could not obtain vdev configuration for  '%s'"), poolname);
3672 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3673 		goto out;
3674 	}
3675 
3676 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3677 	    &top, &toplevels) == 0);
3678 	if (toplevels != 1) {
3679 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3680 		    "'%s' has multiple top level vdevs"), poolname);
3681 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3682 		goto out;
3683 	}
3684 
3685 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3686 		goto out;
3687 	}
3688 	ret = 0;
3689 
3690 out:
3691 	if (zhp)
3692 		zpool_close(zhp);
3693 	libzfs_fini(hdl);
3694 	return (ret);
3695 }
3696