1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25  * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26  * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27  * Copyright (c) 2018 Datto Inc.
28  * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29  * Copyright (c) 2017, Intel Corporation.
30  * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
31  * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
32  * Copyright (c) 2021, Klara Inc.
33  */
34 
35 #include <errno.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <libgen.h>
42 #include <zone.h>
43 #include <sys/stat.h>
44 #include <sys/efi_partition.h>
45 #include <sys/systeminfo.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/zfs_sysfs.h>
48 #include <sys/vdev_disk.h>
49 #include <sys/types.h>
50 #include <dlfcn.h>
51 #include <libzutil.h>
52 #include <fcntl.h>
53 
54 #include "zfs_namecheck.h"
55 #include "zfs_prop.h"
56 #include "libzfs_impl.h"
57 #include "zfs_comutil.h"
58 #include "zfeature_common.h"
59 
60 static boolean_t zpool_vdev_is_interior(const char *name);
61 
62 typedef struct prop_flags {
63 	int create:1;	/* Validate property on creation */
64 	int import:1;	/* Validate property on import */
65 	int vdevprop:1;	/* Validate property as a VDEV property */
66 } prop_flags_t;
67 
68 /*
69  * ====================================================================
70  *   zpool property functions
71  * ====================================================================
72  */
73 
74 static int
75 zpool_get_all_props(zpool_handle_t *zhp)
76 {
77 	zfs_cmd_t zc = {"\0"};
78 	libzfs_handle_t *hdl = zhp->zpool_hdl;
79 
80 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
81 
82 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
83 		return (-1);
84 
85 	while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
86 		if (errno == ENOMEM) {
87 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
88 				zcmd_free_nvlists(&zc);
89 				return (-1);
90 			}
91 		} else {
92 			zcmd_free_nvlists(&zc);
93 			return (-1);
94 		}
95 	}
96 
97 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
98 		zcmd_free_nvlists(&zc);
99 		return (-1);
100 	}
101 
102 	zcmd_free_nvlists(&zc);
103 
104 	return (0);
105 }
106 
107 int
108 zpool_props_refresh(zpool_handle_t *zhp)
109 {
110 	nvlist_t *old_props;
111 
112 	old_props = zhp->zpool_props;
113 
114 	if (zpool_get_all_props(zhp) != 0)
115 		return (-1);
116 
117 	nvlist_free(old_props);
118 	return (0);
119 }
120 
121 static const char *
122 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
123     zprop_source_t *src)
124 {
125 	nvlist_t *nv, *nvl;
126 	uint64_t ival;
127 	char *value;
128 	zprop_source_t source;
129 
130 	nvl = zhp->zpool_props;
131 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
132 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
133 		source = ival;
134 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
135 	} else {
136 		source = ZPROP_SRC_DEFAULT;
137 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
138 			value = "-";
139 	}
140 
141 	if (src)
142 		*src = source;
143 
144 	return (value);
145 }
146 
147 uint64_t
148 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
149 {
150 	nvlist_t *nv, *nvl;
151 	uint64_t value;
152 	zprop_source_t source;
153 
154 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
155 		/*
156 		 * zpool_get_all_props() has most likely failed because
157 		 * the pool is faulted, but if all we need is the top level
158 		 * vdev's guid then get it from the zhp config nvlist.
159 		 */
160 		if ((prop == ZPOOL_PROP_GUID) &&
161 		    (nvlist_lookup_nvlist(zhp->zpool_config,
162 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
163 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
164 		    == 0)) {
165 			return (value);
166 		}
167 		return (zpool_prop_default_numeric(prop));
168 	}
169 
170 	nvl = zhp->zpool_props;
171 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
172 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
173 		source = value;
174 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
175 	} else {
176 		source = ZPROP_SRC_DEFAULT;
177 		value = zpool_prop_default_numeric(prop);
178 	}
179 
180 	if (src)
181 		*src = source;
182 
183 	return (value);
184 }
185 
186 /*
187  * Map VDEV STATE to printed strings.
188  */
189 const char *
190 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
191 {
192 	switch (state) {
193 	case VDEV_STATE_CLOSED:
194 	case VDEV_STATE_OFFLINE:
195 		return (gettext("OFFLINE"));
196 	case VDEV_STATE_REMOVED:
197 		return (gettext("REMOVED"));
198 	case VDEV_STATE_CANT_OPEN:
199 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
200 			return (gettext("FAULTED"));
201 		else if (aux == VDEV_AUX_SPLIT_POOL)
202 			return (gettext("SPLIT"));
203 		else
204 			return (gettext("UNAVAIL"));
205 	case VDEV_STATE_FAULTED:
206 		return (gettext("FAULTED"));
207 	case VDEV_STATE_DEGRADED:
208 		return (gettext("DEGRADED"));
209 	case VDEV_STATE_HEALTHY:
210 		return (gettext("ONLINE"));
211 
212 	default:
213 		break;
214 	}
215 
216 	return (gettext("UNKNOWN"));
217 }
218 
219 /*
220  * Map POOL STATE to printed strings.
221  */
222 const char *
223 zpool_pool_state_to_name(pool_state_t state)
224 {
225 	switch (state) {
226 	default:
227 		break;
228 	case POOL_STATE_ACTIVE:
229 		return (gettext("ACTIVE"));
230 	case POOL_STATE_EXPORTED:
231 		return (gettext("EXPORTED"));
232 	case POOL_STATE_DESTROYED:
233 		return (gettext("DESTROYED"));
234 	case POOL_STATE_SPARE:
235 		return (gettext("SPARE"));
236 	case POOL_STATE_L2CACHE:
237 		return (gettext("L2CACHE"));
238 	case POOL_STATE_UNINITIALIZED:
239 		return (gettext("UNINITIALIZED"));
240 	case POOL_STATE_UNAVAIL:
241 		return (gettext("UNAVAIL"));
242 	case POOL_STATE_POTENTIALLY_ACTIVE:
243 		return (gettext("POTENTIALLY_ACTIVE"));
244 	}
245 
246 	return (gettext("UNKNOWN"));
247 }
248 
249 /*
250  * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
251  * "SUSPENDED", etc).
252  */
253 const char *
254 zpool_get_state_str(zpool_handle_t *zhp)
255 {
256 	zpool_errata_t errata;
257 	zpool_status_t status;
258 	nvlist_t *nvroot;
259 	vdev_stat_t *vs;
260 	uint_t vsc;
261 	const char *str;
262 
263 	status = zpool_get_status(zhp, NULL, &errata);
264 
265 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
266 		str = gettext("FAULTED");
267 	} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
268 	    status == ZPOOL_STATUS_IO_FAILURE_MMP) {
269 		str = gettext("SUSPENDED");
270 	} else {
271 		verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
272 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
273 		verify(nvlist_lookup_uint64_array(nvroot,
274 		    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
275 		    == 0);
276 		str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
277 	}
278 	return (str);
279 }
280 
281 /*
282  * Get a zpool property value for 'prop' and return the value in
283  * a pre-allocated buffer.
284  */
285 int
286 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
287     size_t len, zprop_source_t *srctype, boolean_t literal)
288 {
289 	uint64_t intval;
290 	const char *strval;
291 	zprop_source_t src = ZPROP_SRC_NONE;
292 
293 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
294 		switch (prop) {
295 		case ZPOOL_PROP_NAME:
296 			(void) strlcpy(buf, zpool_get_name(zhp), len);
297 			break;
298 
299 		case ZPOOL_PROP_HEALTH:
300 			(void) strlcpy(buf, zpool_get_state_str(zhp), len);
301 			break;
302 
303 		case ZPOOL_PROP_GUID:
304 			intval = zpool_get_prop_int(zhp, prop, &src);
305 			(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
306 			break;
307 
308 		case ZPOOL_PROP_ALTROOT:
309 		case ZPOOL_PROP_CACHEFILE:
310 		case ZPOOL_PROP_COMMENT:
311 		case ZPOOL_PROP_COMPATIBILITY:
312 			if (zhp->zpool_props != NULL ||
313 			    zpool_get_all_props(zhp) == 0) {
314 				(void) strlcpy(buf,
315 				    zpool_get_prop_string(zhp, prop, &src),
316 				    len);
317 				break;
318 			}
319 			zfs_fallthrough;
320 		default:
321 			(void) strlcpy(buf, "-", len);
322 			break;
323 		}
324 
325 		if (srctype != NULL)
326 			*srctype = src;
327 		return (0);
328 	}
329 
330 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
331 	    prop != ZPOOL_PROP_NAME)
332 		return (-1);
333 
334 	switch (zpool_prop_get_type(prop)) {
335 	case PROP_TYPE_STRING:
336 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
337 		    len);
338 		break;
339 
340 	case PROP_TYPE_NUMBER:
341 		intval = zpool_get_prop_int(zhp, prop, &src);
342 
343 		switch (prop) {
344 		case ZPOOL_PROP_SIZE:
345 		case ZPOOL_PROP_ALLOCATED:
346 		case ZPOOL_PROP_FREE:
347 		case ZPOOL_PROP_FREEING:
348 		case ZPOOL_PROP_LEAKED:
349 		case ZPOOL_PROP_ASHIFT:
350 		case ZPOOL_PROP_MAXBLOCKSIZE:
351 		case ZPOOL_PROP_MAXDNODESIZE:
352 			if (literal)
353 				(void) snprintf(buf, len, "%llu",
354 				    (u_longlong_t)intval);
355 			else
356 				(void) zfs_nicenum(intval, buf, len);
357 			break;
358 
359 		case ZPOOL_PROP_EXPANDSZ:
360 		case ZPOOL_PROP_CHECKPOINT:
361 			if (intval == 0) {
362 				(void) strlcpy(buf, "-", len);
363 			} else if (literal) {
364 				(void) snprintf(buf, len, "%llu",
365 				    (u_longlong_t)intval);
366 			} else {
367 				(void) zfs_nicebytes(intval, buf, len);
368 			}
369 			break;
370 
371 		case ZPOOL_PROP_CAPACITY:
372 			if (literal) {
373 				(void) snprintf(buf, len, "%llu",
374 				    (u_longlong_t)intval);
375 			} else {
376 				(void) snprintf(buf, len, "%llu%%",
377 				    (u_longlong_t)intval);
378 			}
379 			break;
380 
381 		case ZPOOL_PROP_FRAGMENTATION:
382 			if (intval == UINT64_MAX) {
383 				(void) strlcpy(buf, "-", len);
384 			} else if (literal) {
385 				(void) snprintf(buf, len, "%llu",
386 				    (u_longlong_t)intval);
387 			} else {
388 				(void) snprintf(buf, len, "%llu%%",
389 				    (u_longlong_t)intval);
390 			}
391 			break;
392 
393 		case ZPOOL_PROP_DEDUPRATIO:
394 			if (literal)
395 				(void) snprintf(buf, len, "%llu.%02llu",
396 				    (u_longlong_t)(intval / 100),
397 				    (u_longlong_t)(intval % 100));
398 			else
399 				(void) snprintf(buf, len, "%llu.%02llux",
400 				    (u_longlong_t)(intval / 100),
401 				    (u_longlong_t)(intval % 100));
402 			break;
403 
404 		case ZPOOL_PROP_HEALTH:
405 			(void) strlcpy(buf, zpool_get_state_str(zhp), len);
406 			break;
407 		case ZPOOL_PROP_VERSION:
408 			if (intval >= SPA_VERSION_FEATURES) {
409 				(void) snprintf(buf, len, "-");
410 				break;
411 			}
412 			zfs_fallthrough;
413 		default:
414 			(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
415 		}
416 		break;
417 
418 	case PROP_TYPE_INDEX:
419 		intval = zpool_get_prop_int(zhp, prop, &src);
420 		if (zpool_prop_index_to_string(prop, intval, &strval)
421 		    != 0)
422 			return (-1);
423 		(void) strlcpy(buf, strval, len);
424 		break;
425 
426 	default:
427 		abort();
428 	}
429 
430 	if (srctype)
431 		*srctype = src;
432 
433 	return (0);
434 }
435 
436 /*
437  * Check if the bootfs name has the same pool name as it is set to.
438  * Assuming bootfs is a valid dataset name.
439  */
440 static boolean_t
441 bootfs_name_valid(const char *pool, const char *bootfs)
442 {
443 	int len = strlen(pool);
444 	if (bootfs[0] == '\0')
445 		return (B_TRUE);
446 
447 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
448 		return (B_FALSE);
449 
450 	if (strncmp(pool, bootfs, len) == 0 &&
451 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
452 		return (B_TRUE);
453 
454 	return (B_FALSE);
455 }
456 
457 /*
458  * Given an nvlist of zpool properties to be set, validate that they are
459  * correct, and parse any numeric properties (index, boolean, etc) if they are
460  * specified as strings.
461  */
462 static nvlist_t *
463 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
464     nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
465 {
466 	nvpair_t *elem;
467 	nvlist_t *retprops;
468 	zpool_prop_t prop;
469 	char *strval;
470 	uint64_t intval;
471 	char *slash, *check;
472 	struct stat64 statbuf;
473 	zpool_handle_t *zhp;
474 	char report[1024];
475 
476 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
477 		(void) no_memory(hdl);
478 		return (NULL);
479 	}
480 
481 	elem = NULL;
482 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
483 		const char *propname = nvpair_name(elem);
484 
485 		if (flags.vdevprop && zpool_prop_vdev(propname)) {
486 			vdev_prop_t vprop = vdev_name_to_prop(propname);
487 
488 			if (vdev_prop_readonly(vprop)) {
489 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
490 				    "is readonly"), propname);
491 				(void) zfs_error(hdl, EZFS_PROPREADONLY,
492 				    errbuf);
493 				goto error;
494 			}
495 
496 			if (zprop_parse_value(hdl, elem, vprop, ZFS_TYPE_VDEV,
497 			    retprops, &strval, &intval, errbuf) != 0)
498 				goto error;
499 
500 			continue;
501 		} else if (flags.vdevprop && vdev_prop_user(propname)) {
502 			if (nvlist_add_nvpair(retprops, elem) != 0) {
503 				(void) no_memory(hdl);
504 				goto error;
505 			}
506 			continue;
507 		} else if (flags.vdevprop) {
508 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
509 			    "invalid property: '%s'"), propname);
510 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
511 			goto error;
512 		}
513 
514 		prop = zpool_name_to_prop(propname);
515 		if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
516 			int err;
517 			char *fname = strchr(propname, '@') + 1;
518 
519 			err = zfeature_lookup_name(fname, NULL);
520 			if (err != 0) {
521 				ASSERT3U(err, ==, ENOENT);
522 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
523 				    "feature '%s' unsupported by kernel"),
524 				    fname);
525 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
526 				goto error;
527 			}
528 
529 			if (nvpair_type(elem) != DATA_TYPE_STRING) {
530 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
531 				    "'%s' must be a string"), propname);
532 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
533 				goto error;
534 			}
535 
536 			(void) nvpair_value_string(elem, &strval);
537 			if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
538 			    strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
539 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
540 				    "property '%s' can only be set to "
541 				    "'enabled' or 'disabled'"), propname);
542 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
543 				goto error;
544 			}
545 
546 			if (!flags.create &&
547 			    strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
548 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 				    "property '%s' can only be set to "
550 				    "'disabled' at creation time"), propname);
551 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
552 				goto error;
553 			}
554 
555 			if (nvlist_add_uint64(retprops, propname, 0) != 0) {
556 				(void) no_memory(hdl);
557 				goto error;
558 			}
559 			continue;
560 		}
561 
562 		/*
563 		 * Make sure this property is valid and applies to this type.
564 		 */
565 		if (prop == ZPOOL_PROP_INVAL) {
566 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
567 			    "invalid property '%s'"), propname);
568 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
569 			goto error;
570 		}
571 
572 		if (zpool_prop_readonly(prop)) {
573 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
574 			    "is readonly"), propname);
575 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
576 			goto error;
577 		}
578 
579 		if (!flags.create && zpool_prop_setonce(prop)) {
580 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 			    "property '%s' can only be set at "
582 			    "creation time"), propname);
583 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
584 			goto error;
585 		}
586 
587 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
588 		    &strval, &intval, errbuf) != 0)
589 			goto error;
590 
591 		/*
592 		 * Perform additional checking for specific properties.
593 		 */
594 		switch (prop) {
595 		case ZPOOL_PROP_VERSION:
596 			if (intval < version ||
597 			    !SPA_VERSION_IS_SUPPORTED(intval)) {
598 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
599 				    "property '%s' number %llu is invalid."),
600 				    propname, (unsigned long long)intval);
601 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
602 				goto error;
603 			}
604 			break;
605 
606 		case ZPOOL_PROP_ASHIFT:
607 			if (intval != 0 &&
608 			    (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
609 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
610 				    "property '%s' number %llu is invalid, "
611 				    "only values between %" PRId32 " and %"
612 				    PRId32 " are allowed."),
613 				    propname, (unsigned long long)intval,
614 				    ASHIFT_MIN, ASHIFT_MAX);
615 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
616 				goto error;
617 			}
618 			break;
619 
620 		case ZPOOL_PROP_BOOTFS:
621 			if (flags.create || flags.import) {
622 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
623 				    "property '%s' cannot be set at creation "
624 				    "or import time"), propname);
625 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
626 				goto error;
627 			}
628 
629 			if (version < SPA_VERSION_BOOTFS) {
630 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
631 				    "pool must be upgraded to support "
632 				    "'%s' property"), propname);
633 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
634 				goto error;
635 			}
636 
637 			/*
638 			 * bootfs property value has to be a dataset name and
639 			 * the dataset has to be in the same pool as it sets to.
640 			 */
641 			if (!bootfs_name_valid(poolname, strval)) {
642 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
643 				    "is an invalid name"), strval);
644 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
645 				goto error;
646 			}
647 
648 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
649 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
650 				    "could not open pool '%s'"), poolname);
651 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
652 				goto error;
653 			}
654 			zpool_close(zhp);
655 			break;
656 
657 		case ZPOOL_PROP_ALTROOT:
658 			if (!flags.create && !flags.import) {
659 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
660 				    "property '%s' can only be set during pool "
661 				    "creation or import"), propname);
662 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
663 				goto error;
664 			}
665 
666 			if (strval[0] != '/') {
667 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
668 				    "bad alternate root '%s'"), strval);
669 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
670 				goto error;
671 			}
672 			break;
673 
674 		case ZPOOL_PROP_CACHEFILE:
675 			if (strval[0] == '\0')
676 				break;
677 
678 			if (strcmp(strval, "none") == 0)
679 				break;
680 
681 			if (strval[0] != '/') {
682 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
683 				    "property '%s' must be empty, an "
684 				    "absolute path, or 'none'"), propname);
685 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
686 				goto error;
687 			}
688 
689 			slash = strrchr(strval, '/');
690 
691 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
692 			    strcmp(slash, "/..") == 0) {
693 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
694 				    "'%s' is not a valid file"), strval);
695 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
696 				goto error;
697 			}
698 
699 			*slash = '\0';
700 
701 			if (strval[0] != '\0' &&
702 			    (stat64(strval, &statbuf) != 0 ||
703 			    !S_ISDIR(statbuf.st_mode))) {
704 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
705 				    "'%s' is not a valid directory"),
706 				    strval);
707 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
708 				goto error;
709 			}
710 
711 			*slash = '/';
712 			break;
713 
714 		case ZPOOL_PROP_COMPATIBILITY:
715 			switch (zpool_load_compat(strval, NULL, report, 1024)) {
716 			case ZPOOL_COMPATIBILITY_OK:
717 			case ZPOOL_COMPATIBILITY_WARNTOKEN:
718 				break;
719 			case ZPOOL_COMPATIBILITY_BADFILE:
720 			case ZPOOL_COMPATIBILITY_BADTOKEN:
721 			case ZPOOL_COMPATIBILITY_NOFILES:
722 				zfs_error_aux(hdl, "%s", report);
723 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
724 				goto error;
725 			}
726 			break;
727 
728 		case ZPOOL_PROP_COMMENT:
729 			for (check = strval; *check != '\0'; check++) {
730 				if (!isprint(*check)) {
731 					zfs_error_aux(hdl,
732 					    dgettext(TEXT_DOMAIN,
733 					    "comment may only have printable "
734 					    "characters"));
735 					(void) zfs_error(hdl, EZFS_BADPROP,
736 					    errbuf);
737 					goto error;
738 				}
739 			}
740 			if (strlen(strval) > ZPROP_MAX_COMMENT) {
741 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
742 				    "comment must not exceed %d characters"),
743 				    ZPROP_MAX_COMMENT);
744 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
745 				goto error;
746 			}
747 			break;
748 		case ZPOOL_PROP_READONLY:
749 			if (!flags.import) {
750 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
751 				    "property '%s' can only be set at "
752 				    "import time"), propname);
753 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
754 				goto error;
755 			}
756 			break;
757 		case ZPOOL_PROP_MULTIHOST:
758 			if (get_system_hostid() == 0) {
759 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
760 				    "requires a non-zero system hostid"));
761 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
762 				goto error;
763 			}
764 			break;
765 		case ZPOOL_PROP_DEDUPDITTO:
766 			printf("Note: property '%s' no longer has "
767 			    "any effect\n", propname);
768 			break;
769 
770 		default:
771 			break;
772 		}
773 	}
774 
775 	return (retprops);
776 error:
777 	nvlist_free(retprops);
778 	return (NULL);
779 }
780 
781 /*
782  * Set zpool property : propname=propval.
783  */
784 int
785 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
786 {
787 	zfs_cmd_t zc = {"\0"};
788 	int ret = -1;
789 	char errbuf[1024];
790 	nvlist_t *nvl = NULL;
791 	nvlist_t *realprops;
792 	uint64_t version;
793 	prop_flags_t flags = { 0 };
794 
795 	(void) snprintf(errbuf, sizeof (errbuf),
796 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
797 	    zhp->zpool_name);
798 
799 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
800 		return (no_memory(zhp->zpool_hdl));
801 
802 	if (nvlist_add_string(nvl, propname, propval) != 0) {
803 		nvlist_free(nvl);
804 		return (no_memory(zhp->zpool_hdl));
805 	}
806 
807 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
808 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
809 	    zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
810 		nvlist_free(nvl);
811 		return (-1);
812 	}
813 
814 	nvlist_free(nvl);
815 	nvl = realprops;
816 
817 	/*
818 	 * Execute the corresponding ioctl() to set this property.
819 	 */
820 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
821 
822 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
823 		nvlist_free(nvl);
824 		return (-1);
825 	}
826 
827 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
828 
829 	zcmd_free_nvlists(&zc);
830 	nvlist_free(nvl);
831 
832 	if (ret)
833 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
834 	else
835 		(void) zpool_props_refresh(zhp);
836 
837 	return (ret);
838 }
839 
840 int
841 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
842     zfs_type_t type, boolean_t literal)
843 {
844 	libzfs_handle_t *hdl = zhp->zpool_hdl;
845 	zprop_list_t *entry;
846 	char buf[ZFS_MAXPROPLEN];
847 	nvlist_t *features = NULL;
848 	nvpair_t *nvp;
849 	zprop_list_t **last;
850 	boolean_t firstexpand = (NULL == *plp);
851 	int i;
852 
853 	if (zprop_expand_list(hdl, plp, type) != 0)
854 		return (-1);
855 
856 	if (type == ZFS_TYPE_VDEV)
857 		return (0);
858 
859 	last = plp;
860 	while (*last != NULL)
861 		last = &(*last)->pl_next;
862 
863 	if ((*plp)->pl_all)
864 		features = zpool_get_features(zhp);
865 
866 	if ((*plp)->pl_all && firstexpand) {
867 		for (i = 0; i < SPA_FEATURES; i++) {
868 			zprop_list_t *entry = zfs_alloc(hdl,
869 			    sizeof (zprop_list_t));
870 			entry->pl_prop = ZPROP_INVAL;
871 			entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
872 			    spa_feature_table[i].fi_uname);
873 			entry->pl_width = strlen(entry->pl_user_prop);
874 			entry->pl_all = B_TRUE;
875 
876 			*last = entry;
877 			last = &entry->pl_next;
878 		}
879 	}
880 
881 	/* add any unsupported features */
882 	for (nvp = nvlist_next_nvpair(features, NULL);
883 	    nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
884 		char *propname;
885 		boolean_t found;
886 		zprop_list_t *entry;
887 
888 		if (zfeature_is_supported(nvpair_name(nvp)))
889 			continue;
890 
891 		propname = zfs_asprintf(hdl, "unsupported@%s",
892 		    nvpair_name(nvp));
893 
894 		/*
895 		 * Before adding the property to the list make sure that no
896 		 * other pool already added the same property.
897 		 */
898 		found = B_FALSE;
899 		entry = *plp;
900 		while (entry != NULL) {
901 			if (entry->pl_user_prop != NULL &&
902 			    strcmp(propname, entry->pl_user_prop) == 0) {
903 				found = B_TRUE;
904 				break;
905 			}
906 			entry = entry->pl_next;
907 		}
908 		if (found) {
909 			free(propname);
910 			continue;
911 		}
912 
913 		entry = zfs_alloc(hdl, sizeof (zprop_list_t));
914 		entry->pl_prop = ZPROP_INVAL;
915 		entry->pl_user_prop = propname;
916 		entry->pl_width = strlen(entry->pl_user_prop);
917 		entry->pl_all = B_TRUE;
918 
919 		*last = entry;
920 		last = &entry->pl_next;
921 	}
922 
923 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
924 		if (entry->pl_fixed && !literal)
925 			continue;
926 
927 		if (entry->pl_prop != ZPROP_INVAL &&
928 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
929 		    NULL, literal) == 0) {
930 			if (strlen(buf) > entry->pl_width)
931 				entry->pl_width = strlen(buf);
932 		}
933 	}
934 
935 	return (0);
936 }
937 
938 int
939 vdev_expand_proplist(zpool_handle_t *zhp, const char *vdevname,
940     zprop_list_t **plp)
941 {
942 	zprop_list_t *entry;
943 	char buf[ZFS_MAXPROPLEN];
944 	char *strval = NULL;
945 	int err = 0;
946 	nvpair_t *elem = NULL;
947 	nvlist_t *vprops = NULL;
948 	nvlist_t *propval = NULL;
949 	const char *propname;
950 	vdev_prop_t prop;
951 	zprop_list_t **last;
952 
953 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
954 		if (entry->pl_fixed)
955 			continue;
956 
957 		if (zpool_get_vdev_prop(zhp, vdevname, entry->pl_prop,
958 		    entry->pl_user_prop, buf, sizeof (buf), NULL,
959 		    B_FALSE) == 0) {
960 			if (strlen(buf) > entry->pl_width)
961 				entry->pl_width = strlen(buf);
962 		}
963 		if (entry->pl_prop == VDEV_PROP_NAME &&
964 		    strlen(vdevname) > entry->pl_width)
965 			entry->pl_width = strlen(vdevname);
966 	}
967 
968 	/* Handle the all properties case */
969 	last = plp;
970 	if (*last != NULL && (*last)->pl_all == B_TRUE) {
971 		while (*last != NULL)
972 			last = &(*last)->pl_next;
973 
974 		err = zpool_get_all_vdev_props(zhp, vdevname, &vprops);
975 		if (err != 0)
976 			return (err);
977 
978 		while ((elem = nvlist_next_nvpair(vprops, elem)) != NULL) {
979 			propname = nvpair_name(elem);
980 
981 			/* Skip properties that are not user defined */
982 			if ((prop = vdev_name_to_prop(propname)) !=
983 			    VDEV_PROP_USER)
984 				continue;
985 
986 			if (nvpair_value_nvlist(elem, &propval) != 0)
987 				continue;
988 
989 			verify(nvlist_lookup_string(propval, ZPROP_VALUE,
990 			    &strval) == 0);
991 
992 			if ((entry = zfs_alloc(zhp->zpool_hdl,
993 			    sizeof (zprop_list_t))) == NULL)
994 				return (ENOMEM);
995 
996 			entry->pl_prop = prop;
997 			entry->pl_user_prop = zfs_strdup(zhp->zpool_hdl,
998 			    propname);
999 			entry->pl_width = strlen(strval);
1000 			entry->pl_all = B_TRUE;
1001 			*last = entry;
1002 			last = &entry->pl_next;
1003 		}
1004 	}
1005 
1006 	return (0);
1007 }
1008 
1009 /*
1010  * Get the state for the given feature on the given ZFS pool.
1011  */
1012 int
1013 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
1014     size_t len)
1015 {
1016 	uint64_t refcount;
1017 	boolean_t found = B_FALSE;
1018 	nvlist_t *features = zpool_get_features(zhp);
1019 	boolean_t supported;
1020 	const char *feature = strchr(propname, '@') + 1;
1021 
1022 	supported = zpool_prop_feature(propname);
1023 	ASSERT(supported || zpool_prop_unsupported(propname));
1024 
1025 	/*
1026 	 * Convert from feature name to feature guid. This conversion is
1027 	 * unnecessary for unsupported@... properties because they already
1028 	 * use guids.
1029 	 */
1030 	if (supported) {
1031 		int ret;
1032 		spa_feature_t fid;
1033 
1034 		ret = zfeature_lookup_name(feature, &fid);
1035 		if (ret != 0) {
1036 			(void) strlcpy(buf, "-", len);
1037 			return (ENOTSUP);
1038 		}
1039 		feature = spa_feature_table[fid].fi_guid;
1040 	}
1041 
1042 	if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
1043 		found = B_TRUE;
1044 
1045 	if (supported) {
1046 		if (!found) {
1047 			(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
1048 		} else  {
1049 			if (refcount == 0)
1050 				(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
1051 			else
1052 				(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
1053 		}
1054 	} else {
1055 		if (found) {
1056 			if (refcount == 0) {
1057 				(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
1058 			} else {
1059 				(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
1060 			}
1061 		} else {
1062 			(void) strlcpy(buf, "-", len);
1063 			return (ENOTSUP);
1064 		}
1065 	}
1066 
1067 	return (0);
1068 }
1069 
1070 /*
1071  * Validate the given pool name, optionally putting an extended error message in
1072  * 'buf'.
1073  */
1074 boolean_t
1075 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
1076 {
1077 	namecheck_err_t why;
1078 	char what;
1079 	int ret;
1080 
1081 	ret = pool_namecheck(pool, &why, &what);
1082 
1083 	/*
1084 	 * The rules for reserved pool names were extended at a later point.
1085 	 * But we need to support users with existing pools that may now be
1086 	 * invalid.  So we only check for this expanded set of names during a
1087 	 * create (or import), and only in userland.
1088 	 */
1089 	if (ret == 0 && !isopen &&
1090 	    (strncmp(pool, "mirror", 6) == 0 ||
1091 	    strncmp(pool, "raidz", 5) == 0 ||
1092 	    strncmp(pool, "draid", 5) == 0 ||
1093 	    strncmp(pool, "spare", 5) == 0 ||
1094 	    strcmp(pool, "log") == 0)) {
1095 		if (hdl != NULL)
1096 			zfs_error_aux(hdl,
1097 			    dgettext(TEXT_DOMAIN, "name is reserved"));
1098 		return (B_FALSE);
1099 	}
1100 
1101 
1102 	if (ret != 0) {
1103 		if (hdl != NULL) {
1104 			switch (why) {
1105 			case NAME_ERR_TOOLONG:
1106 				zfs_error_aux(hdl,
1107 				    dgettext(TEXT_DOMAIN, "name is too long"));
1108 				break;
1109 
1110 			case NAME_ERR_INVALCHAR:
1111 				zfs_error_aux(hdl,
1112 				    dgettext(TEXT_DOMAIN, "invalid character "
1113 				    "'%c' in pool name"), what);
1114 				break;
1115 
1116 			case NAME_ERR_NOLETTER:
1117 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1118 				    "name must begin with a letter"));
1119 				break;
1120 
1121 			case NAME_ERR_RESERVED:
1122 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1123 				    "name is reserved"));
1124 				break;
1125 
1126 			case NAME_ERR_DISKLIKE:
1127 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1128 				    "pool name is reserved"));
1129 				break;
1130 
1131 			case NAME_ERR_LEADING_SLASH:
1132 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1133 				    "leading slash in name"));
1134 				break;
1135 
1136 			case NAME_ERR_EMPTY_COMPONENT:
1137 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1138 				    "empty component in name"));
1139 				break;
1140 
1141 			case NAME_ERR_TRAILING_SLASH:
1142 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1143 				    "trailing slash in name"));
1144 				break;
1145 
1146 			case NAME_ERR_MULTIPLE_DELIMITERS:
1147 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1148 				    "multiple '@' and/or '#' delimiters in "
1149 				    "name"));
1150 				break;
1151 
1152 			case NAME_ERR_NO_AT:
1153 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1154 				    "permission set is missing '@'"));
1155 				break;
1156 
1157 			default:
1158 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1159 				    "(%d) not defined"), why);
1160 				break;
1161 			}
1162 		}
1163 		return (B_FALSE);
1164 	}
1165 
1166 	return (B_TRUE);
1167 }
1168 
1169 /*
1170  * Open a handle to the given pool, even if the pool is currently in the FAULTED
1171  * state.
1172  */
1173 zpool_handle_t *
1174 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1175 {
1176 	zpool_handle_t *zhp;
1177 	boolean_t missing;
1178 
1179 	/*
1180 	 * Make sure the pool name is valid.
1181 	 */
1182 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1183 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1184 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1185 		    pool);
1186 		return (NULL);
1187 	}
1188 
1189 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1190 		return (NULL);
1191 
1192 	zhp->zpool_hdl = hdl;
1193 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1194 
1195 	if (zpool_refresh_stats(zhp, &missing) != 0) {
1196 		zpool_close(zhp);
1197 		return (NULL);
1198 	}
1199 
1200 	if (missing) {
1201 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1202 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
1203 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1204 		zpool_close(zhp);
1205 		return (NULL);
1206 	}
1207 
1208 	return (zhp);
1209 }
1210 
1211 /*
1212  * Like the above, but silent on error.  Used when iterating over pools (because
1213  * the configuration cache may be out of date).
1214  */
1215 int
1216 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1217 {
1218 	zpool_handle_t *zhp;
1219 	boolean_t missing;
1220 
1221 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1222 		return (-1);
1223 
1224 	zhp->zpool_hdl = hdl;
1225 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1226 
1227 	if (zpool_refresh_stats(zhp, &missing) != 0) {
1228 		zpool_close(zhp);
1229 		return (-1);
1230 	}
1231 
1232 	if (missing) {
1233 		zpool_close(zhp);
1234 		*ret = NULL;
1235 		return (0);
1236 	}
1237 
1238 	*ret = zhp;
1239 	return (0);
1240 }
1241 
1242 /*
1243  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1244  * state.
1245  */
1246 zpool_handle_t *
1247 zpool_open(libzfs_handle_t *hdl, const char *pool)
1248 {
1249 	zpool_handle_t *zhp;
1250 
1251 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1252 		return (NULL);
1253 
1254 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1255 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1256 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1257 		zpool_close(zhp);
1258 		return (NULL);
1259 	}
1260 
1261 	return (zhp);
1262 }
1263 
1264 /*
1265  * Close the handle.  Simply frees the memory associated with the handle.
1266  */
1267 void
1268 zpool_close(zpool_handle_t *zhp)
1269 {
1270 	nvlist_free(zhp->zpool_config);
1271 	nvlist_free(zhp->zpool_old_config);
1272 	nvlist_free(zhp->zpool_props);
1273 	free(zhp);
1274 }
1275 
1276 /*
1277  * Return the name of the pool.
1278  */
1279 const char *
1280 zpool_get_name(zpool_handle_t *zhp)
1281 {
1282 	return (zhp->zpool_name);
1283 }
1284 
1285 
1286 /*
1287  * Return the state of the pool (ACTIVE or UNAVAILABLE)
1288  */
1289 int
1290 zpool_get_state(zpool_handle_t *zhp)
1291 {
1292 	return (zhp->zpool_state);
1293 }
1294 
1295 /*
1296  * Check if vdev list contains a special vdev
1297  */
1298 static boolean_t
1299 zpool_has_special_vdev(nvlist_t *nvroot)
1300 {
1301 	nvlist_t **child;
1302 	uint_t children;
1303 
1304 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1305 	    &children) == 0) {
1306 		for (uint_t c = 0; c < children; c++) {
1307 			char *bias;
1308 
1309 			if (nvlist_lookup_string(child[c],
1310 			    ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1311 			    strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1312 				return (B_TRUE);
1313 			}
1314 		}
1315 	}
1316 	return (B_FALSE);
1317 }
1318 
1319 /*
1320  * Check if vdev list contains a dRAID vdev
1321  */
1322 static boolean_t
1323 zpool_has_draid_vdev(nvlist_t *nvroot)
1324 {
1325 	nvlist_t **child;
1326 	uint_t children;
1327 
1328 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1329 	    &child, &children) == 0) {
1330 		for (uint_t c = 0; c < children; c++) {
1331 			char *type;
1332 
1333 			if (nvlist_lookup_string(child[c],
1334 			    ZPOOL_CONFIG_TYPE, &type) == 0 &&
1335 			    strcmp(type, VDEV_TYPE_DRAID) == 0) {
1336 				return (B_TRUE);
1337 			}
1338 		}
1339 	}
1340 	return (B_FALSE);
1341 }
1342 
1343 /*
1344  * Output a dRAID top-level vdev name in to the provided buffer.
1345  */
1346 static char *
1347 zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
1348     uint64_t spares, uint64_t children)
1349 {
1350 	snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
1351 	    VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
1352 	    (u_longlong_t)children, (u_longlong_t)spares);
1353 
1354 	return (name);
1355 }
1356 
1357 /*
1358  * Return B_TRUE if the provided name is a dRAID spare name.
1359  */
1360 boolean_t
1361 zpool_is_draid_spare(const char *name)
1362 {
1363 	uint64_t spare_id, parity, vdev_id;
1364 
1365 	if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
1366 	    (u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
1367 	    (u_longlong_t *)&spare_id) == 3) {
1368 		return (B_TRUE);
1369 	}
1370 
1371 	return (B_FALSE);
1372 }
1373 
1374 /*
1375  * Create the named pool, using the provided vdev list.  It is assumed
1376  * that the consumer has already validated the contents of the nvlist, so we
1377  * don't have to worry about error semantics.
1378  */
1379 int
1380 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1381     nvlist_t *props, nvlist_t *fsprops)
1382 {
1383 	zfs_cmd_t zc = {"\0"};
1384 	nvlist_t *zc_fsprops = NULL;
1385 	nvlist_t *zc_props = NULL;
1386 	nvlist_t *hidden_args = NULL;
1387 	uint8_t *wkeydata = NULL;
1388 	uint_t wkeylen = 0;
1389 	char msg[1024];
1390 	int ret = -1;
1391 
1392 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1393 	    "cannot create '%s'"), pool);
1394 
1395 	if (!zpool_name_valid(hdl, B_FALSE, pool))
1396 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1397 
1398 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1399 		return (-1);
1400 
1401 	if (props) {
1402 		prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1403 
1404 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1405 		    SPA_VERSION_1, flags, msg)) == NULL) {
1406 			goto create_failed;
1407 		}
1408 	}
1409 
1410 	if (fsprops) {
1411 		uint64_t zoned;
1412 		char *zonestr;
1413 
1414 		zoned = ((nvlist_lookup_string(fsprops,
1415 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1416 		    strcmp(zonestr, "on") == 0);
1417 
1418 		if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1419 		    fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1420 			goto create_failed;
1421 		}
1422 
1423 		if (nvlist_exists(zc_fsprops,
1424 		    zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1425 		    !zpool_has_special_vdev(nvroot)) {
1426 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1427 			    "%s property requires a special vdev"),
1428 			    zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1429 			(void) zfs_error(hdl, EZFS_BADPROP, msg);
1430 			goto create_failed;
1431 		}
1432 
1433 		if (!zc_props &&
1434 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1435 			goto create_failed;
1436 		}
1437 		if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1438 		    &wkeydata, &wkeylen) != 0) {
1439 			zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1440 			goto create_failed;
1441 		}
1442 		if (nvlist_add_nvlist(zc_props,
1443 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1444 			goto create_failed;
1445 		}
1446 		if (wkeydata != NULL) {
1447 			if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1448 				goto create_failed;
1449 
1450 			if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1451 			    wkeydata, wkeylen) != 0)
1452 				goto create_failed;
1453 
1454 			if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1455 			    hidden_args) != 0)
1456 				goto create_failed;
1457 		}
1458 	}
1459 
1460 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1461 		goto create_failed;
1462 
1463 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1464 
1465 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1466 
1467 		zcmd_free_nvlists(&zc);
1468 		nvlist_free(zc_props);
1469 		nvlist_free(zc_fsprops);
1470 		nvlist_free(hidden_args);
1471 		if (wkeydata != NULL)
1472 			free(wkeydata);
1473 
1474 		switch (errno) {
1475 		case EBUSY:
1476 			/*
1477 			 * This can happen if the user has specified the same
1478 			 * device multiple times.  We can't reliably detect this
1479 			 * until we try to add it and see we already have a
1480 			 * label.  This can also happen under if the device is
1481 			 * part of an active md or lvm device.
1482 			 */
1483 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1484 			    "one or more vdevs refer to the same device, or "
1485 			    "one of\nthe devices is part of an active md or "
1486 			    "lvm device"));
1487 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1488 
1489 		case ERANGE:
1490 			/*
1491 			 * This happens if the record size is smaller or larger
1492 			 * than the allowed size range, or not a power of 2.
1493 			 *
1494 			 * NOTE: although zfs_valid_proplist is called earlier,
1495 			 * this case may have slipped through since the
1496 			 * pool does not exist yet and it is therefore
1497 			 * impossible to read properties e.g. max blocksize
1498 			 * from the pool.
1499 			 */
1500 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1501 			    "record size invalid"));
1502 			return (zfs_error(hdl, EZFS_BADPROP, msg));
1503 
1504 		case EOVERFLOW:
1505 			/*
1506 			 * This occurs when one of the devices is below
1507 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1508 			 * device was the problem device since there's no
1509 			 * reliable way to determine device size from userland.
1510 			 */
1511 			{
1512 				char buf[64];
1513 
1514 				zfs_nicebytes(SPA_MINDEVSIZE, buf,
1515 				    sizeof (buf));
1516 
1517 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1518 				    "one or more devices is less than the "
1519 				    "minimum size (%s)"), buf);
1520 			}
1521 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1522 
1523 		case ENOSPC:
1524 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1525 			    "one or more devices is out of space"));
1526 			return (zfs_error(hdl, EZFS_BADDEV, msg));
1527 
1528 		case EINVAL:
1529 			if (zpool_has_draid_vdev(nvroot) &&
1530 			    zfeature_lookup_name("draid", NULL) != 0) {
1531 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1532 				    "dRAID vdevs are unsupported by the "
1533 				    "kernel"));
1534 				return (zfs_error(hdl, EZFS_BADDEV, msg));
1535 			} else {
1536 				return (zpool_standard_error(hdl, errno, msg));
1537 			}
1538 
1539 		default:
1540 			return (zpool_standard_error(hdl, errno, msg));
1541 		}
1542 	}
1543 
1544 create_failed:
1545 	zcmd_free_nvlists(&zc);
1546 	nvlist_free(zc_props);
1547 	nvlist_free(zc_fsprops);
1548 	nvlist_free(hidden_args);
1549 	if (wkeydata != NULL)
1550 		free(wkeydata);
1551 	return (ret);
1552 }
1553 
1554 /*
1555  * Destroy the given pool.  It is up to the caller to ensure that there are no
1556  * datasets left in the pool.
1557  */
1558 int
1559 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1560 {
1561 	zfs_cmd_t zc = {"\0"};
1562 	zfs_handle_t *zfp = NULL;
1563 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1564 	char msg[1024];
1565 
1566 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1567 	    (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1568 		return (-1);
1569 
1570 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1571 	zc.zc_history = (uint64_t)(uintptr_t)log_str;
1572 
1573 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1574 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1575 		    "cannot destroy '%s'"), zhp->zpool_name);
1576 
1577 		if (errno == EROFS) {
1578 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1579 			    "one or more devices is read only"));
1580 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1581 		} else {
1582 			(void) zpool_standard_error(hdl, errno, msg);
1583 		}
1584 
1585 		if (zfp)
1586 			zfs_close(zfp);
1587 		return (-1);
1588 	}
1589 
1590 	if (zfp) {
1591 		remove_mountpoint(zfp);
1592 		zfs_close(zfp);
1593 	}
1594 
1595 	return (0);
1596 }
1597 
1598 /*
1599  * Create a checkpoint in the given pool.
1600  */
1601 int
1602 zpool_checkpoint(zpool_handle_t *zhp)
1603 {
1604 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1605 	char msg[1024];
1606 	int error;
1607 
1608 	error = lzc_pool_checkpoint(zhp->zpool_name);
1609 	if (error != 0) {
1610 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1611 		    "cannot checkpoint '%s'"), zhp->zpool_name);
1612 		(void) zpool_standard_error(hdl, error, msg);
1613 		return (-1);
1614 	}
1615 
1616 	return (0);
1617 }
1618 
1619 /*
1620  * Discard the checkpoint from the given pool.
1621  */
1622 int
1623 zpool_discard_checkpoint(zpool_handle_t *zhp)
1624 {
1625 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1626 	char msg[1024];
1627 	int error;
1628 
1629 	error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1630 	if (error != 0) {
1631 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1632 		    "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1633 		(void) zpool_standard_error(hdl, error, msg);
1634 		return (-1);
1635 	}
1636 
1637 	return (0);
1638 }
1639 
1640 /*
1641  * Add the given vdevs to the pool.  The caller must have already performed the
1642  * necessary verification to ensure that the vdev specification is well-formed.
1643  */
1644 int
1645 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1646 {
1647 	zfs_cmd_t zc = {"\0"};
1648 	int ret;
1649 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1650 	char msg[1024];
1651 	nvlist_t **spares, **l2cache;
1652 	uint_t nspares, nl2cache;
1653 
1654 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1655 	    "cannot add to '%s'"), zhp->zpool_name);
1656 
1657 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1658 	    SPA_VERSION_SPARES &&
1659 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1660 	    &spares, &nspares) == 0) {
1661 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1662 		    "upgraded to add hot spares"));
1663 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1664 	}
1665 
1666 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1667 	    SPA_VERSION_L2CACHE &&
1668 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1669 	    &l2cache, &nl2cache) == 0) {
1670 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1671 		    "upgraded to add cache devices"));
1672 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1673 	}
1674 
1675 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1676 		return (-1);
1677 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1678 
1679 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1680 		switch (errno) {
1681 		case EBUSY:
1682 			/*
1683 			 * This can happen if the user has specified the same
1684 			 * device multiple times.  We can't reliably detect this
1685 			 * until we try to add it and see we already have a
1686 			 * label.
1687 			 */
1688 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1689 			    "one or more vdevs refer to the same device"));
1690 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1691 			break;
1692 
1693 		case EINVAL:
1694 
1695 			if (zpool_has_draid_vdev(nvroot) &&
1696 			    zfeature_lookup_name("draid", NULL) != 0) {
1697 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1698 				    "dRAID vdevs are unsupported by the "
1699 				    "kernel"));
1700 			} else {
1701 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1702 				    "invalid config; a pool with removing/"
1703 				    "removed vdevs does not support adding "
1704 				    "raidz or dRAID vdevs"));
1705 			}
1706 
1707 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1708 			break;
1709 
1710 		case EOVERFLOW:
1711 			/*
1712 			 * This occurs when one of the devices is below
1713 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1714 			 * device was the problem device since there's no
1715 			 * reliable way to determine device size from userland.
1716 			 */
1717 			{
1718 				char buf[64];
1719 
1720 				zfs_nicebytes(SPA_MINDEVSIZE, buf,
1721 				    sizeof (buf));
1722 
1723 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1724 				    "device is less than the minimum "
1725 				    "size (%s)"), buf);
1726 			}
1727 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1728 			break;
1729 
1730 		case ENOTSUP:
1731 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1732 			    "pool must be upgraded to add these vdevs"));
1733 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1734 			break;
1735 
1736 		default:
1737 			(void) zpool_standard_error(hdl, errno, msg);
1738 		}
1739 
1740 		ret = -1;
1741 	} else {
1742 		ret = 0;
1743 	}
1744 
1745 	zcmd_free_nvlists(&zc);
1746 
1747 	return (ret);
1748 }
1749 
1750 /*
1751  * Exports the pool from the system.  The caller must ensure that there are no
1752  * mounted datasets in the pool.
1753  */
1754 static int
1755 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1756     const char *log_str)
1757 {
1758 	zfs_cmd_t zc = {"\0"};
1759 
1760 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1761 	zc.zc_cookie = force;
1762 	zc.zc_guid = hardforce;
1763 	zc.zc_history = (uint64_t)(uintptr_t)log_str;
1764 
1765 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1766 		switch (errno) {
1767 		case EXDEV:
1768 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1769 			    "use '-f' to override the following errors:\n"
1770 			    "'%s' has an active shared spare which could be"
1771 			    " used by other pools once '%s' is exported."),
1772 			    zhp->zpool_name, zhp->zpool_name);
1773 			return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1774 			    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1775 			    zhp->zpool_name));
1776 		default:
1777 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1778 			    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1779 			    zhp->zpool_name));
1780 		}
1781 	}
1782 
1783 	return (0);
1784 }
1785 
1786 int
1787 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1788 {
1789 	return (zpool_export_common(zhp, force, B_FALSE, log_str));
1790 }
1791 
1792 int
1793 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1794 {
1795 	return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1796 }
1797 
1798 static void
1799 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1800     nvlist_t *config)
1801 {
1802 	nvlist_t *nv = NULL;
1803 	uint64_t rewindto;
1804 	int64_t loss = -1;
1805 	struct tm t;
1806 	char timestr[128];
1807 
1808 	if (!hdl->libzfs_printerr || config == NULL)
1809 		return;
1810 
1811 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1812 	    nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1813 		return;
1814 	}
1815 
1816 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1817 		return;
1818 	(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1819 
1820 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1821 	    strftime(timestr, 128, "%c", &t) != 0) {
1822 		if (dryrun) {
1823 			(void) printf(dgettext(TEXT_DOMAIN,
1824 			    "Would be able to return %s "
1825 			    "to its state as of %s.\n"),
1826 			    name, timestr);
1827 		} else {
1828 			(void) printf(dgettext(TEXT_DOMAIN,
1829 			    "Pool %s returned to its state as of %s.\n"),
1830 			    name, timestr);
1831 		}
1832 		if (loss > 120) {
1833 			(void) printf(dgettext(TEXT_DOMAIN,
1834 			    "%s approximately %lld "),
1835 			    dryrun ? "Would discard" : "Discarded",
1836 			    ((longlong_t)loss + 30) / 60);
1837 			(void) printf(dgettext(TEXT_DOMAIN,
1838 			    "minutes of transactions.\n"));
1839 		} else if (loss > 0) {
1840 			(void) printf(dgettext(TEXT_DOMAIN,
1841 			    "%s approximately %lld "),
1842 			    dryrun ? "Would discard" : "Discarded",
1843 			    (longlong_t)loss);
1844 			(void) printf(dgettext(TEXT_DOMAIN,
1845 			    "seconds of transactions.\n"));
1846 		}
1847 	}
1848 }
1849 
1850 void
1851 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1852     nvlist_t *config)
1853 {
1854 	nvlist_t *nv = NULL;
1855 	int64_t loss = -1;
1856 	uint64_t edata = UINT64_MAX;
1857 	uint64_t rewindto;
1858 	struct tm t;
1859 	char timestr[128];
1860 
1861 	if (!hdl->libzfs_printerr)
1862 		return;
1863 
1864 	if (reason >= 0)
1865 		(void) printf(dgettext(TEXT_DOMAIN, "action: "));
1866 	else
1867 		(void) printf(dgettext(TEXT_DOMAIN, "\t"));
1868 
1869 	/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1870 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1871 	    nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1872 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1873 		goto no_info;
1874 
1875 	(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1876 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1877 	    &edata);
1878 
1879 	(void) printf(dgettext(TEXT_DOMAIN,
1880 	    "Recovery is possible, but will result in some data loss.\n"));
1881 
1882 	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1883 	    strftime(timestr, 128, "%c", &t) != 0) {
1884 		(void) printf(dgettext(TEXT_DOMAIN,
1885 		    "\tReturning the pool to its state as of %s\n"
1886 		    "\tshould correct the problem.  "),
1887 		    timestr);
1888 	} else {
1889 		(void) printf(dgettext(TEXT_DOMAIN,
1890 		    "\tReverting the pool to an earlier state "
1891 		    "should correct the problem.\n\t"));
1892 	}
1893 
1894 	if (loss > 120) {
1895 		(void) printf(dgettext(TEXT_DOMAIN,
1896 		    "Approximately %lld minutes of data\n"
1897 		    "\tmust be discarded, irreversibly.  "),
1898 		    ((longlong_t)loss + 30) / 60);
1899 	} else if (loss > 0) {
1900 		(void) printf(dgettext(TEXT_DOMAIN,
1901 		    "Approximately %lld seconds of data\n"
1902 		    "\tmust be discarded, irreversibly.  "),
1903 		    (longlong_t)loss);
1904 	}
1905 	if (edata != 0 && edata != UINT64_MAX) {
1906 		if (edata == 1) {
1907 			(void) printf(dgettext(TEXT_DOMAIN,
1908 			    "After rewind, at least\n"
1909 			    "\tone persistent user-data error will remain.  "));
1910 		} else {
1911 			(void) printf(dgettext(TEXT_DOMAIN,
1912 			    "After rewind, several\n"
1913 			    "\tpersistent user-data errors will remain.  "));
1914 		}
1915 	}
1916 	(void) printf(dgettext(TEXT_DOMAIN,
1917 	    "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1918 	    reason >= 0 ? "clear" : "import", name);
1919 
1920 	(void) printf(dgettext(TEXT_DOMAIN,
1921 	    "A scrub of the pool\n"
1922 	    "\tis strongly recommended after recovery.\n"));
1923 	return;
1924 
1925 no_info:
1926 	(void) printf(dgettext(TEXT_DOMAIN,
1927 	    "Destroy and re-create the pool from\n\ta backup source.\n"));
1928 }
1929 
1930 /*
1931  * zpool_import() is a contracted interface. Should be kept the same
1932  * if possible.
1933  *
1934  * Applications should use zpool_import_props() to import a pool with
1935  * new properties value to be set.
1936  */
1937 int
1938 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1939     char *altroot)
1940 {
1941 	nvlist_t *props = NULL;
1942 	int ret;
1943 
1944 	if (altroot != NULL) {
1945 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1946 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1947 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1948 			    newname));
1949 		}
1950 
1951 		if (nvlist_add_string(props,
1952 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1953 		    nvlist_add_string(props,
1954 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1955 			nvlist_free(props);
1956 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1957 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1958 			    newname));
1959 		}
1960 	}
1961 
1962 	ret = zpool_import_props(hdl, config, newname, props,
1963 	    ZFS_IMPORT_NORMAL);
1964 	nvlist_free(props);
1965 	return (ret);
1966 }
1967 
1968 static void
1969 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1970     int indent)
1971 {
1972 	nvlist_t **child;
1973 	uint_t c, children;
1974 	char *vname;
1975 	uint64_t is_log = 0;
1976 
1977 	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1978 	    &is_log);
1979 
1980 	if (name != NULL)
1981 		(void) printf("\t%*s%s%s\n", indent, "", name,
1982 		    is_log ? " [log]" : "");
1983 
1984 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1985 	    &child, &children) != 0)
1986 		return;
1987 
1988 	for (c = 0; c < children; c++) {
1989 		vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1990 		print_vdev_tree(hdl, vname, child[c], indent + 2);
1991 		free(vname);
1992 	}
1993 }
1994 
1995 void
1996 zpool_print_unsup_feat(nvlist_t *config)
1997 {
1998 	nvlist_t *nvinfo, *unsup_feat;
1999 	nvpair_t *nvp;
2000 
2001 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
2002 	    0);
2003 	verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
2004 	    &unsup_feat) == 0);
2005 
2006 	for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
2007 	    nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
2008 		char *desc;
2009 
2010 		verify(nvpair_type(nvp) == DATA_TYPE_STRING);
2011 		verify(nvpair_value_string(nvp, &desc) == 0);
2012 
2013 		if (strlen(desc) > 0)
2014 			(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
2015 		else
2016 			(void) printf("\t%s\n", nvpair_name(nvp));
2017 	}
2018 }
2019 
2020 /*
2021  * Import the given pool using the known configuration and a list of
2022  * properties to be set. The configuration should have come from
2023  * zpool_find_import(). The 'newname' parameters control whether the pool
2024  * is imported with a different name.
2025  */
2026 int
2027 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
2028     nvlist_t *props, int flags)
2029 {
2030 	zfs_cmd_t zc = {"\0"};
2031 	zpool_load_policy_t policy;
2032 	nvlist_t *nv = NULL;
2033 	nvlist_t *nvinfo = NULL;
2034 	nvlist_t *missing = NULL;
2035 	const char *thename;
2036 	char *origname;
2037 	int ret;
2038 	int error = 0;
2039 	char errbuf[1024];
2040 
2041 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
2042 	    &origname) == 0);
2043 
2044 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2045 	    "cannot import pool '%s'"), origname);
2046 
2047 	if (newname != NULL) {
2048 		if (!zpool_name_valid(hdl, B_FALSE, newname))
2049 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
2050 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2051 			    newname));
2052 		thename = newname;
2053 	} else {
2054 		thename = origname;
2055 	}
2056 
2057 	if (props != NULL) {
2058 		uint64_t version;
2059 		prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2060 
2061 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
2062 		    &version) == 0);
2063 
2064 		if ((props = zpool_valid_proplist(hdl, origname,
2065 		    props, version, flags, errbuf)) == NULL)
2066 			return (-1);
2067 		if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
2068 			nvlist_free(props);
2069 			return (-1);
2070 		}
2071 		nvlist_free(props);
2072 	}
2073 
2074 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
2075 
2076 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
2077 	    &zc.zc_guid) == 0);
2078 
2079 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
2080 		zcmd_free_nvlists(&zc);
2081 		return (-1);
2082 	}
2083 	if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
2084 		zcmd_free_nvlists(&zc);
2085 		return (-1);
2086 	}
2087 
2088 	zc.zc_cookie = flags;
2089 	while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
2090 	    errno == ENOMEM) {
2091 		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
2092 			zcmd_free_nvlists(&zc);
2093 			return (-1);
2094 		}
2095 	}
2096 	if (ret != 0)
2097 		error = errno;
2098 
2099 	(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
2100 
2101 	zcmd_free_nvlists(&zc);
2102 
2103 	zpool_get_load_policy(config, &policy);
2104 
2105 	if (error) {
2106 		char desc[1024];
2107 		char aux[256];
2108 
2109 		/*
2110 		 * Dry-run failed, but we print out what success
2111 		 * looks like if we found a best txg
2112 		 */
2113 		if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
2114 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
2115 			    B_TRUE, nv);
2116 			nvlist_free(nv);
2117 			return (-1);
2118 		}
2119 
2120 		if (newname == NULL)
2121 			(void) snprintf(desc, sizeof (desc),
2122 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
2123 			    thename);
2124 		else
2125 			(void) snprintf(desc, sizeof (desc),
2126 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
2127 			    origname, thename);
2128 
2129 		switch (error) {
2130 		case ENOTSUP:
2131 			if (nv != NULL && nvlist_lookup_nvlist(nv,
2132 			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2133 			    nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
2134 				(void) printf(dgettext(TEXT_DOMAIN, "This "
2135 				    "pool uses the following feature(s) not "
2136 				    "supported by this system:\n"));
2137 				zpool_print_unsup_feat(nv);
2138 				if (nvlist_exists(nvinfo,
2139 				    ZPOOL_CONFIG_CAN_RDONLY)) {
2140 					(void) printf(dgettext(TEXT_DOMAIN,
2141 					    "All unsupported features are only "
2142 					    "required for writing to the pool."
2143 					    "\nThe pool can be imported using "
2144 					    "'-o readonly=on'.\n"));
2145 				}
2146 			}
2147 			/*
2148 			 * Unsupported version.
2149 			 */
2150 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
2151 			break;
2152 
2153 		case EREMOTEIO:
2154 			if (nv != NULL && nvlist_lookup_nvlist(nv,
2155 			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
2156 				char *hostname = "<unknown>";
2157 				uint64_t hostid = 0;
2158 				mmp_state_t mmp_state;
2159 
2160 				mmp_state = fnvlist_lookup_uint64(nvinfo,
2161 				    ZPOOL_CONFIG_MMP_STATE);
2162 
2163 				if (nvlist_exists(nvinfo,
2164 				    ZPOOL_CONFIG_MMP_HOSTNAME))
2165 					hostname = fnvlist_lookup_string(nvinfo,
2166 					    ZPOOL_CONFIG_MMP_HOSTNAME);
2167 
2168 				if (nvlist_exists(nvinfo,
2169 				    ZPOOL_CONFIG_MMP_HOSTID))
2170 					hostid = fnvlist_lookup_uint64(nvinfo,
2171 					    ZPOOL_CONFIG_MMP_HOSTID);
2172 
2173 				if (mmp_state == MMP_STATE_ACTIVE) {
2174 					(void) snprintf(aux, sizeof (aux),
2175 					    dgettext(TEXT_DOMAIN, "pool is imp"
2176 					    "orted on host '%s' (hostid=%lx).\n"
2177 					    "Export the pool on the other "
2178 					    "system, then run 'zpool import'."),
2179 					    hostname, (unsigned long) hostid);
2180 				} else if (mmp_state == MMP_STATE_NO_HOSTID) {
2181 					(void) snprintf(aux, sizeof (aux),
2182 					    dgettext(TEXT_DOMAIN, "pool has "
2183 					    "the multihost property on and "
2184 					    "the\nsystem's hostid is not set. "
2185 					    "Set a unique system hostid with "
2186 					    "the zgenhostid(8) command.\n"));
2187 				}
2188 
2189 				(void) zfs_error_aux(hdl, "%s", aux);
2190 			}
2191 			(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2192 			break;
2193 
2194 		case EINVAL:
2195 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2196 			break;
2197 
2198 		case EROFS:
2199 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2200 			    "one or more devices is read only"));
2201 			(void) zfs_error(hdl, EZFS_BADDEV, desc);
2202 			break;
2203 
2204 		case ENXIO:
2205 			if (nv && nvlist_lookup_nvlist(nv,
2206 			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2207 			    nvlist_lookup_nvlist(nvinfo,
2208 			    ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2209 				(void) printf(dgettext(TEXT_DOMAIN,
2210 				    "The devices below are missing or "
2211 				    "corrupted, use '-m' to import the pool "
2212 				    "anyway:\n"));
2213 				print_vdev_tree(hdl, NULL, missing, 2);
2214 				(void) printf("\n");
2215 			}
2216 			(void) zpool_standard_error(hdl, error, desc);
2217 			break;
2218 
2219 		case EEXIST:
2220 			(void) zpool_standard_error(hdl, error, desc);
2221 			break;
2222 
2223 		case EBUSY:
2224 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2225 			    "one or more devices are already in use\n"));
2226 			(void) zfs_error(hdl, EZFS_BADDEV, desc);
2227 			break;
2228 		case ENAMETOOLONG:
2229 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2230 			    "new name of at least one dataset is longer than "
2231 			    "the maximum allowable length"));
2232 			(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2233 			break;
2234 		default:
2235 			(void) zpool_standard_error(hdl, error, desc);
2236 			zpool_explain_recover(hdl,
2237 			    newname ? origname : thename, -error, nv);
2238 			break;
2239 		}
2240 
2241 		nvlist_free(nv);
2242 		ret = -1;
2243 	} else {
2244 		zpool_handle_t *zhp;
2245 
2246 		/*
2247 		 * This should never fail, but play it safe anyway.
2248 		 */
2249 		if (zpool_open_silent(hdl, thename, &zhp) != 0)
2250 			ret = -1;
2251 		else if (zhp != NULL)
2252 			zpool_close(zhp);
2253 		if (policy.zlp_rewind &
2254 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2255 			zpool_rewind_exclaim(hdl, newname ? origname : thename,
2256 			    ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2257 		}
2258 		nvlist_free(nv);
2259 		return (0);
2260 	}
2261 
2262 	return (ret);
2263 }
2264 
2265 /*
2266  * Translate vdev names to guids.  If a vdev_path is determined to be
2267  * unsuitable then a vd_errlist is allocated and the vdev path and errno
2268  * are added to it.
2269  */
2270 static int
2271 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2272     nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2273 {
2274 	nvlist_t *errlist = NULL;
2275 	int error = 0;
2276 
2277 	for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2278 	    elem = nvlist_next_nvpair(vds, elem)) {
2279 		boolean_t spare, cache;
2280 
2281 		char *vd_path = nvpair_name(elem);
2282 		nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2283 		    NULL);
2284 
2285 		if ((tgt == NULL) || cache || spare) {
2286 			if (errlist == NULL) {
2287 				errlist = fnvlist_alloc();
2288 				error = EINVAL;
2289 			}
2290 
2291 			uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2292 			    (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2293 			fnvlist_add_int64(errlist, vd_path, err);
2294 			continue;
2295 		}
2296 
2297 		uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2298 		fnvlist_add_uint64(vdev_guids, vd_path, guid);
2299 
2300 		char msg[MAXNAMELEN];
2301 		(void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2302 		fnvlist_add_string(guids_to_paths, msg, vd_path);
2303 	}
2304 
2305 	if (error != 0) {
2306 		verify(errlist != NULL);
2307 		if (vd_errlist != NULL)
2308 			*vd_errlist = errlist;
2309 		else
2310 			fnvlist_free(errlist);
2311 	}
2312 
2313 	return (error);
2314 }
2315 
2316 static int
2317 xlate_init_err(int err)
2318 {
2319 	switch (err) {
2320 	case ENODEV:
2321 		return (EZFS_NODEVICE);
2322 	case EINVAL:
2323 	case EROFS:
2324 		return (EZFS_BADDEV);
2325 	case EBUSY:
2326 		return (EZFS_INITIALIZING);
2327 	case ESRCH:
2328 		return (EZFS_NO_INITIALIZE);
2329 	}
2330 	return (err);
2331 }
2332 
2333 /*
2334  * Begin, suspend, or cancel the initialization (initializing of all free
2335  * blocks) for the given vdevs in the given pool.
2336  */
2337 static int
2338 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2339     nvlist_t *vds, boolean_t wait)
2340 {
2341 	int err;
2342 
2343 	nvlist_t *vdev_guids = fnvlist_alloc();
2344 	nvlist_t *guids_to_paths = fnvlist_alloc();
2345 	nvlist_t *vd_errlist = NULL;
2346 	nvlist_t *errlist;
2347 	nvpair_t *elem;
2348 
2349 	err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2350 	    guids_to_paths, &vd_errlist);
2351 
2352 	if (err != 0) {
2353 		verify(vd_errlist != NULL);
2354 		goto list_errors;
2355 	}
2356 
2357 	err = lzc_initialize(zhp->zpool_name, cmd_type,
2358 	    vdev_guids, &errlist);
2359 
2360 	if (err != 0) {
2361 		if (errlist != NULL) {
2362 			vd_errlist = fnvlist_lookup_nvlist(errlist,
2363 			    ZPOOL_INITIALIZE_VDEVS);
2364 			goto list_errors;
2365 		}
2366 		(void) zpool_standard_error(zhp->zpool_hdl, err,
2367 		    dgettext(TEXT_DOMAIN, "operation failed"));
2368 		goto out;
2369 	}
2370 
2371 	if (wait) {
2372 		for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2373 		    elem = nvlist_next_nvpair(vdev_guids, elem)) {
2374 
2375 			uint64_t guid = fnvpair_value_uint64(elem);
2376 
2377 			err = lzc_wait_tag(zhp->zpool_name,
2378 			    ZPOOL_WAIT_INITIALIZE, guid, NULL);
2379 			if (err != 0) {
2380 				(void) zpool_standard_error_fmt(zhp->zpool_hdl,
2381 				    err, dgettext(TEXT_DOMAIN, "error "
2382 				    "waiting for '%s' to initialize"),
2383 				    nvpair_name(elem));
2384 
2385 				goto out;
2386 			}
2387 		}
2388 	}
2389 	goto out;
2390 
2391 list_errors:
2392 	for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2393 	    elem = nvlist_next_nvpair(vd_errlist, elem)) {
2394 		int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2395 		char *path;
2396 
2397 		if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2398 		    &path) != 0)
2399 			path = nvpair_name(elem);
2400 
2401 		(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2402 		    "cannot initialize '%s'", path);
2403 	}
2404 
2405 out:
2406 	fnvlist_free(vdev_guids);
2407 	fnvlist_free(guids_to_paths);
2408 
2409 	if (vd_errlist != NULL)
2410 		fnvlist_free(vd_errlist);
2411 
2412 	return (err == 0 ? 0 : -1);
2413 }
2414 
2415 int
2416 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2417     nvlist_t *vds)
2418 {
2419 	return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2420 }
2421 
2422 int
2423 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2424     nvlist_t *vds)
2425 {
2426 	return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2427 }
2428 
2429 static int
2430 xlate_trim_err(int err)
2431 {
2432 	switch (err) {
2433 	case ENODEV:
2434 		return (EZFS_NODEVICE);
2435 	case EINVAL:
2436 	case EROFS:
2437 		return (EZFS_BADDEV);
2438 	case EBUSY:
2439 		return (EZFS_TRIMMING);
2440 	case ESRCH:
2441 		return (EZFS_NO_TRIM);
2442 	case EOPNOTSUPP:
2443 		return (EZFS_TRIM_NOTSUP);
2444 	}
2445 	return (err);
2446 }
2447 
2448 static int
2449 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2450 {
2451 	int err;
2452 	nvpair_t *elem;
2453 
2454 	for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2455 	    elem = nvlist_next_nvpair(vdev_guids, elem)) {
2456 
2457 		uint64_t guid = fnvpair_value_uint64(elem);
2458 
2459 		err = lzc_wait_tag(zhp->zpool_name,
2460 		    ZPOOL_WAIT_TRIM, guid, NULL);
2461 		if (err != 0) {
2462 			(void) zpool_standard_error_fmt(zhp->zpool_hdl,
2463 			    err, dgettext(TEXT_DOMAIN, "error "
2464 			    "waiting to trim '%s'"), nvpair_name(elem));
2465 
2466 			return (err);
2467 		}
2468 	}
2469 	return (0);
2470 }
2471 
2472 /*
2473  * Check errlist and report any errors, omitting ones which should be
2474  * suppressed. Returns B_TRUE if any errors were reported.
2475  */
2476 static boolean_t
2477 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2478     nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2479 {
2480 	nvpair_t *elem;
2481 	boolean_t reported_errs = B_FALSE;
2482 	int num_vds = 0;
2483 	int num_suppressed_errs = 0;
2484 
2485 	for (elem = nvlist_next_nvpair(vds, NULL);
2486 	    elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2487 		num_vds++;
2488 	}
2489 
2490 	for (elem = nvlist_next_nvpair(errlist, NULL);
2491 	    elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2492 		int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2493 		char *path;
2494 
2495 		/*
2496 		 * If only the pool was specified, and it was not a secure
2497 		 * trim then suppress warnings for individual vdevs which
2498 		 * do not support trimming.
2499 		 */
2500 		if (vd_error == EZFS_TRIM_NOTSUP &&
2501 		    trim_flags->fullpool &&
2502 		    !trim_flags->secure) {
2503 			num_suppressed_errs++;
2504 			continue;
2505 		}
2506 
2507 		reported_errs = B_TRUE;
2508 		if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2509 		    &path) != 0)
2510 			path = nvpair_name(elem);
2511 
2512 		(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2513 		    "cannot trim '%s'", path);
2514 	}
2515 
2516 	if (num_suppressed_errs == num_vds) {
2517 		(void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2518 		    "no devices in pool support trim operations"));
2519 		(void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2520 		    dgettext(TEXT_DOMAIN, "cannot trim")));
2521 		reported_errs = B_TRUE;
2522 	}
2523 
2524 	return (reported_errs);
2525 }
2526 
2527 /*
2528  * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2529  * the given vdevs in the given pool.
2530  */
2531 int
2532 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2533     trimflags_t *trim_flags)
2534 {
2535 	int err;
2536 	int retval = 0;
2537 
2538 	nvlist_t *vdev_guids = fnvlist_alloc();
2539 	nvlist_t *guids_to_paths = fnvlist_alloc();
2540 	nvlist_t *errlist = NULL;
2541 
2542 	err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2543 	    guids_to_paths, &errlist);
2544 	if (err != 0) {
2545 		check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2546 		retval = -1;
2547 		goto out;
2548 	}
2549 
2550 	err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2551 	    trim_flags->secure, vdev_guids, &errlist);
2552 	if (err != 0) {
2553 		nvlist_t *vd_errlist;
2554 		if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2555 		    ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2556 			if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2557 			    vds, vd_errlist)) {
2558 				retval = -1;
2559 				goto out;
2560 			}
2561 		} else {
2562 			char msg[1024];
2563 
2564 			(void) snprintf(msg, sizeof (msg),
2565 			    dgettext(TEXT_DOMAIN, "operation failed"));
2566 			zpool_standard_error(zhp->zpool_hdl, err, msg);
2567 			retval = -1;
2568 			goto out;
2569 		}
2570 	}
2571 
2572 
2573 	if (trim_flags->wait)
2574 		retval = zpool_trim_wait(zhp, vdev_guids);
2575 
2576 out:
2577 	if (errlist != NULL)
2578 		fnvlist_free(errlist);
2579 	fnvlist_free(vdev_guids);
2580 	fnvlist_free(guids_to_paths);
2581 	return (retval);
2582 }
2583 
2584 /*
2585  * Scan the pool.
2586  */
2587 int
2588 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2589 {
2590 	zfs_cmd_t zc = {"\0"};
2591 	char msg[1024];
2592 	int err;
2593 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2594 
2595 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2596 	zc.zc_cookie = func;
2597 	zc.zc_flags = cmd;
2598 
2599 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2600 		return (0);
2601 
2602 	err = errno;
2603 
2604 	/* ECANCELED on a scrub means we resumed a paused scrub */
2605 	if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2606 	    cmd == POOL_SCRUB_NORMAL)
2607 		return (0);
2608 
2609 	if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2610 		return (0);
2611 
2612 	if (func == POOL_SCAN_SCRUB) {
2613 		if (cmd == POOL_SCRUB_PAUSE) {
2614 			(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2615 			    "cannot pause scrubbing %s"), zc.zc_name);
2616 		} else {
2617 			assert(cmd == POOL_SCRUB_NORMAL);
2618 			(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2619 			    "cannot scrub %s"), zc.zc_name);
2620 		}
2621 	} else if (func == POOL_SCAN_RESILVER) {
2622 		assert(cmd == POOL_SCRUB_NORMAL);
2623 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2624 		    "cannot restart resilver on %s"), zc.zc_name);
2625 	} else if (func == POOL_SCAN_NONE) {
2626 		(void) snprintf(msg, sizeof (msg),
2627 		    dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2628 		    zc.zc_name);
2629 	} else {
2630 		assert(!"unexpected result");
2631 	}
2632 
2633 	if (err == EBUSY) {
2634 		nvlist_t *nvroot;
2635 		pool_scan_stat_t *ps = NULL;
2636 		uint_t psc;
2637 
2638 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2639 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2640 		(void) nvlist_lookup_uint64_array(nvroot,
2641 		    ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2642 		if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
2643 		    ps->pss_state == DSS_SCANNING) {
2644 			if (cmd == POOL_SCRUB_PAUSE)
2645 				return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2646 			else
2647 				return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2648 		} else {
2649 			return (zfs_error(hdl, EZFS_RESILVERING, msg));
2650 		}
2651 	} else if (err == ENOENT) {
2652 		return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2653 	} else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2654 		return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
2655 	} else {
2656 		return (zpool_standard_error(hdl, err, msg));
2657 	}
2658 }
2659 
2660 /*
2661  * Find a vdev that matches the search criteria specified. We use the
2662  * the nvpair name to determine how we should look for the device.
2663  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2664  * spare; but FALSE if its an INUSE spare.
2665  */
2666 static nvlist_t *
2667 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2668     boolean_t *l2cache, boolean_t *log)
2669 {
2670 	uint_t c, children;
2671 	nvlist_t **child;
2672 	nvlist_t *ret;
2673 	uint64_t is_log;
2674 	char *srchkey;
2675 	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2676 
2677 	/* Nothing to look for */
2678 	if (search == NULL || pair == NULL)
2679 		return (NULL);
2680 
2681 	/* Obtain the key we will use to search */
2682 	srchkey = nvpair_name(pair);
2683 
2684 	switch (nvpair_type(pair)) {
2685 	case DATA_TYPE_UINT64:
2686 		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2687 			uint64_t srchval, theguid;
2688 
2689 			verify(nvpair_value_uint64(pair, &srchval) == 0);
2690 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2691 			    &theguid) == 0);
2692 			if (theguid == srchval)
2693 				return (nv);
2694 		}
2695 		break;
2696 
2697 	case DATA_TYPE_STRING: {
2698 		char *srchval, *val;
2699 
2700 		verify(nvpair_value_string(pair, &srchval) == 0);
2701 		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2702 			break;
2703 
2704 		/*
2705 		 * Search for the requested value. Special cases:
2706 		 *
2707 		 * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in
2708 		 *   "-part1", or "p1".  The suffix is hidden from the user,
2709 		 *   but included in the string, so this matches around it.
2710 		 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2711 		 *   is used to check all possible expanded paths.
2712 		 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2713 		 *
2714 		 * Otherwise, all other searches are simple string compares.
2715 		 */
2716 		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2717 			uint64_t wholedisk = 0;
2718 
2719 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2720 			    &wholedisk);
2721 			if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2722 				return (nv);
2723 
2724 		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2725 			char *type, *idx, *end, *p;
2726 			uint64_t id, vdev_id;
2727 
2728 			/*
2729 			 * Determine our vdev type, keeping in mind
2730 			 * that the srchval is composed of a type and
2731 			 * vdev id pair (i.e. mirror-4).
2732 			 */
2733 			if ((type = strdup(srchval)) == NULL)
2734 				return (NULL);
2735 
2736 			if ((p = strrchr(type, '-')) == NULL) {
2737 				free(type);
2738 				break;
2739 			}
2740 			idx = p + 1;
2741 			*p = '\0';
2742 
2743 			/*
2744 			 * If the types don't match then keep looking.
2745 			 */
2746 			if (strncmp(val, type, strlen(val)) != 0) {
2747 				free(type);
2748 				break;
2749 			}
2750 
2751 			verify(zpool_vdev_is_interior(type));
2752 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2753 			    &id) == 0);
2754 
2755 			errno = 0;
2756 			vdev_id = strtoull(idx, &end, 10);
2757 
2758 			/*
2759 			 * If we are looking for a raidz and a parity is
2760 			 * specified, make sure it matches.
2761 			 */
2762 			int rzlen = strlen(VDEV_TYPE_RAIDZ);
2763 			assert(rzlen == strlen(VDEV_TYPE_DRAID));
2764 			int typlen = strlen(type);
2765 			if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
2766 			    strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
2767 			    typlen != rzlen) {
2768 				uint64_t vdev_parity;
2769 				int parity = *(type + rzlen) - '0';
2770 
2771 				if (parity <= 0 || parity > 3 ||
2772 				    (typlen - rzlen) != 1) {
2773 					/*
2774 					 * Nonsense parity specified, can
2775 					 * never match
2776 					 */
2777 					free(type);
2778 					return (NULL);
2779 				}
2780 				verify(nvlist_lookup_uint64(nv,
2781 				    ZPOOL_CONFIG_NPARITY, &vdev_parity) == 0);
2782 				if ((int)vdev_parity != parity) {
2783 					free(type);
2784 					break;
2785 				}
2786 			}
2787 
2788 			free(type);
2789 			if (errno != 0)
2790 				return (NULL);
2791 
2792 			/*
2793 			 * Now verify that we have the correct vdev id.
2794 			 */
2795 			if (vdev_id == id)
2796 				return (nv);
2797 		}
2798 
2799 		/*
2800 		 * Common case
2801 		 */
2802 		if (strcmp(srchval, val) == 0)
2803 			return (nv);
2804 		break;
2805 	}
2806 
2807 	default:
2808 		break;
2809 	}
2810 
2811 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2812 	    &child, &children) != 0)
2813 		return (NULL);
2814 
2815 	for (c = 0; c < children; c++) {
2816 		if ((ret = vdev_to_nvlist_iter(child[c], search,
2817 		    avail_spare, l2cache, NULL)) != NULL) {
2818 			/*
2819 			 * The 'is_log' value is only set for the toplevel
2820 			 * vdev, not the leaf vdevs.  So we always lookup the
2821 			 * log device from the root of the vdev tree (where
2822 			 * 'log' is non-NULL).
2823 			 */
2824 			if (log != NULL &&
2825 			    nvlist_lookup_uint64(child[c],
2826 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2827 			    is_log) {
2828 				*log = B_TRUE;
2829 			}
2830 			return (ret);
2831 		}
2832 	}
2833 
2834 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2835 	    &child, &children) == 0) {
2836 		for (c = 0; c < children; c++) {
2837 			if ((ret = vdev_to_nvlist_iter(child[c], search,
2838 			    avail_spare, l2cache, NULL)) != NULL) {
2839 				*avail_spare = B_TRUE;
2840 				return (ret);
2841 			}
2842 		}
2843 	}
2844 
2845 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2846 	    &child, &children) == 0) {
2847 		for (c = 0; c < children; c++) {
2848 			if ((ret = vdev_to_nvlist_iter(child[c], search,
2849 			    avail_spare, l2cache, NULL)) != NULL) {
2850 				*l2cache = B_TRUE;
2851 				return (ret);
2852 			}
2853 		}
2854 	}
2855 
2856 	return (NULL);
2857 }
2858 
2859 /*
2860  * Given a physical path or guid, find the associated vdev.
2861  */
2862 nvlist_t *
2863 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2864     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2865 {
2866 	nvlist_t *search, *nvroot, *ret;
2867 	uint64_t guid;
2868 	char *end;
2869 
2870 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2871 
2872 	guid = strtoull(ppath, &end, 0);
2873 	if (guid != 0 && *end == '\0') {
2874 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2875 	} else {
2876 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH,
2877 		    ppath) == 0);
2878 	}
2879 
2880 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2881 	    &nvroot) == 0);
2882 
2883 	*avail_spare = B_FALSE;
2884 	*l2cache = B_FALSE;
2885 	if (log != NULL)
2886 		*log = B_FALSE;
2887 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2888 	nvlist_free(search);
2889 
2890 	return (ret);
2891 }
2892 
2893 /*
2894  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2895  */
2896 static boolean_t
2897 zpool_vdev_is_interior(const char *name)
2898 {
2899 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2900 	    strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2901 	    strncmp(name,
2902 	    VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2903 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2904 		return (B_TRUE);
2905 
2906 	if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
2907 	    !zpool_is_draid_spare(name))
2908 		return (B_TRUE);
2909 
2910 	return (B_FALSE);
2911 }
2912 
2913 nvlist_t *
2914 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2915     boolean_t *l2cache, boolean_t *log)
2916 {
2917 	char *end;
2918 	nvlist_t *nvroot, *search, *ret;
2919 	uint64_t guid;
2920 
2921 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2922 
2923 	guid = strtoull(path, &end, 0);
2924 	if (guid != 0 && *end == '\0') {
2925 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2926 	} else if (zpool_vdev_is_interior(path)) {
2927 		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2928 	} else {
2929 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2930 	}
2931 
2932 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2933 	    &nvroot) == 0);
2934 
2935 	*avail_spare = B_FALSE;
2936 	*l2cache = B_FALSE;
2937 	if (log != NULL)
2938 		*log = B_FALSE;
2939 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2940 	nvlist_free(search);
2941 
2942 	return (ret);
2943 }
2944 
2945 static int
2946 vdev_is_online(nvlist_t *nv)
2947 {
2948 	uint64_t ival;
2949 
2950 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2951 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2952 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2953 		return (0);
2954 
2955 	return (1);
2956 }
2957 
2958 /*
2959  * Helper function for zpool_get_physpaths().
2960  */
2961 static int
2962 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2963     size_t *bytes_written)
2964 {
2965 	size_t bytes_left, pos, rsz;
2966 	char *tmppath;
2967 	const char *format;
2968 
2969 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2970 	    &tmppath) != 0)
2971 		return (EZFS_NODEVICE);
2972 
2973 	pos = *bytes_written;
2974 	bytes_left = physpath_size - pos;
2975 	format = (pos == 0) ? "%s" : " %s";
2976 
2977 	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2978 	*bytes_written += rsz;
2979 
2980 	if (rsz >= bytes_left) {
2981 		/* if physpath was not copied properly, clear it */
2982 		if (bytes_left != 0) {
2983 			physpath[pos] = 0;
2984 		}
2985 		return (EZFS_NOSPC);
2986 	}
2987 	return (0);
2988 }
2989 
2990 static int
2991 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2992     size_t *rsz, boolean_t is_spare)
2993 {
2994 	char *type;
2995 	int ret;
2996 
2997 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2998 		return (EZFS_INVALCONFIG);
2999 
3000 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
3001 		/*
3002 		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
3003 		 * For a spare vdev, we only want to boot from the active
3004 		 * spare device.
3005 		 */
3006 		if (is_spare) {
3007 			uint64_t spare = 0;
3008 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
3009 			    &spare);
3010 			if (!spare)
3011 				return (EZFS_INVALCONFIG);
3012 		}
3013 
3014 		if (vdev_is_online(nv)) {
3015 			if ((ret = vdev_get_one_physpath(nv, physpath,
3016 			    phypath_size, rsz)) != 0)
3017 				return (ret);
3018 		}
3019 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
3020 	    strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3021 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
3022 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
3023 		nvlist_t **child;
3024 		uint_t count;
3025 		int i, ret;
3026 
3027 		if (nvlist_lookup_nvlist_array(nv,
3028 		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
3029 			return (EZFS_INVALCONFIG);
3030 
3031 		for (i = 0; i < count; i++) {
3032 			ret = vdev_get_physpaths(child[i], physpath,
3033 			    phypath_size, rsz, is_spare);
3034 			if (ret == EZFS_NOSPC)
3035 				return (ret);
3036 		}
3037 	}
3038 
3039 	return (EZFS_POOL_INVALARG);
3040 }
3041 
3042 /*
3043  * Get phys_path for a root pool config.
3044  * Return 0 on success; non-zero on failure.
3045  */
3046 static int
3047 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
3048 {
3049 	size_t rsz;
3050 	nvlist_t *vdev_root;
3051 	nvlist_t **child;
3052 	uint_t count;
3053 	char *type;
3054 
3055 	rsz = 0;
3056 
3057 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3058 	    &vdev_root) != 0)
3059 		return (EZFS_INVALCONFIG);
3060 
3061 	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
3062 	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
3063 	    &child, &count) != 0)
3064 		return (EZFS_INVALCONFIG);
3065 
3066 	/*
3067 	 * root pool can only have a single top-level vdev.
3068 	 */
3069 	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
3070 		return (EZFS_POOL_INVALARG);
3071 
3072 	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
3073 	    B_FALSE);
3074 
3075 	/* No online devices */
3076 	if (rsz == 0)
3077 		return (EZFS_NODEVICE);
3078 
3079 	return (0);
3080 }
3081 
3082 /*
3083  * Get phys_path for a root pool
3084  * Return 0 on success; non-zero on failure.
3085  */
3086 int
3087 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
3088 {
3089 	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
3090 	    phypath_size));
3091 }
3092 
3093 /*
3094  * Convert a vdev path to a GUID.  Returns GUID or 0 on error.
3095  *
3096  * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
3097  * if the VDEV is a spare, l2cache, or log device.  If they're NULL then
3098  * ignore them.
3099  */
3100 static uint64_t
3101 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
3102     boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
3103 {
3104 	uint64_t guid;
3105 	boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
3106 	nvlist_t *tgt;
3107 
3108 	if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
3109 	    &log)) == NULL)
3110 		return (0);
3111 
3112 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
3113 	if (is_spare != NULL)
3114 		*is_spare = spare;
3115 	if (is_l2cache != NULL)
3116 		*is_l2cache = l2cache;
3117 	if (is_log != NULL)
3118 		*is_log = log;
3119 
3120 	return (guid);
3121 }
3122 
3123 /* Convert a vdev path to a GUID.  Returns GUID or 0 on error. */
3124 uint64_t
3125 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
3126 {
3127 	return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
3128 }
3129 
3130 /*
3131  * Bring the specified vdev online.   The 'flags' parameter is a set of the
3132  * ZFS_ONLINE_* flags.
3133  */
3134 int
3135 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
3136     vdev_state_t *newstate)
3137 {
3138 	zfs_cmd_t zc = {"\0"};
3139 	char msg[1024];
3140 	char *pathname;
3141 	nvlist_t *tgt;
3142 	boolean_t avail_spare, l2cache, islog;
3143 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3144 	int error;
3145 
3146 	if (flags & ZFS_ONLINE_EXPAND) {
3147 		(void) snprintf(msg, sizeof (msg),
3148 		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
3149 	} else {
3150 		(void) snprintf(msg, sizeof (msg),
3151 		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
3152 	}
3153 
3154 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3155 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3156 	    &islog)) == NULL)
3157 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3158 
3159 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3160 
3161 	if (avail_spare)
3162 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
3163 
3164 	if ((flags & ZFS_ONLINE_EXPAND ||
3165 	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
3166 	    nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
3167 		uint64_t wholedisk = 0;
3168 
3169 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
3170 		    &wholedisk);
3171 
3172 		/*
3173 		 * XXX - L2ARC 1.0 devices can't support expansion.
3174 		 */
3175 		if (l2cache) {
3176 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3177 			    "cannot expand cache devices"));
3178 			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
3179 		}
3180 
3181 		if (wholedisk) {
3182 			const char *fullpath = path;
3183 			char buf[MAXPATHLEN];
3184 
3185 			if (path[0] != '/') {
3186 				error = zfs_resolve_shortname(path, buf,
3187 				    sizeof (buf));
3188 				if (error != 0)
3189 					return (zfs_error(hdl, EZFS_NODEVICE,
3190 					    msg));
3191 
3192 				fullpath = buf;
3193 			}
3194 
3195 			error = zpool_relabel_disk(hdl, fullpath, msg);
3196 			if (error != 0)
3197 				return (error);
3198 		}
3199 	}
3200 
3201 	zc.zc_cookie = VDEV_STATE_ONLINE;
3202 	zc.zc_obj = flags;
3203 
3204 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
3205 		if (errno == EINVAL) {
3206 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
3207 			    "from this pool into a new one.  Use '%s' "
3208 			    "instead"), "zpool detach");
3209 			return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
3210 		}
3211 		return (zpool_standard_error(hdl, errno, msg));
3212 	}
3213 
3214 	*newstate = zc.zc_cookie;
3215 	return (0);
3216 }
3217 
3218 /*
3219  * Take the specified vdev offline
3220  */
3221 int
3222 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
3223 {
3224 	zfs_cmd_t zc = {"\0"};
3225 	char msg[1024];
3226 	nvlist_t *tgt;
3227 	boolean_t avail_spare, l2cache;
3228 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3229 
3230 	(void) snprintf(msg, sizeof (msg),
3231 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3232 
3233 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3234 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3235 	    NULL)) == NULL)
3236 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3237 
3238 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3239 
3240 	if (avail_spare)
3241 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
3242 
3243 	zc.zc_cookie = VDEV_STATE_OFFLINE;
3244 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3245 
3246 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3247 		return (0);
3248 
3249 	switch (errno) {
3250 	case EBUSY:
3251 
3252 		/*
3253 		 * There are no other replicas of this device.
3254 		 */
3255 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3256 
3257 	case EEXIST:
3258 		/*
3259 		 * The log device has unplayed logs
3260 		 */
3261 		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
3262 
3263 	default:
3264 		return (zpool_standard_error(hdl, errno, msg));
3265 	}
3266 }
3267 
3268 /*
3269  * Mark the given vdev faulted.
3270  */
3271 int
3272 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3273 {
3274 	zfs_cmd_t zc = {"\0"};
3275 	char msg[1024];
3276 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3277 
3278 	(void) snprintf(msg, sizeof (msg),
3279 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3280 
3281 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3282 	zc.zc_guid = guid;
3283 	zc.zc_cookie = VDEV_STATE_FAULTED;
3284 	zc.zc_obj = aux;
3285 
3286 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3287 		return (0);
3288 
3289 	switch (errno) {
3290 	case EBUSY:
3291 
3292 		/*
3293 		 * There are no other replicas of this device.
3294 		 */
3295 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3296 
3297 	default:
3298 		return (zpool_standard_error(hdl, errno, msg));
3299 	}
3300 
3301 }
3302 
3303 /*
3304  * Mark the given vdev degraded.
3305  */
3306 int
3307 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3308 {
3309 	zfs_cmd_t zc = {"\0"};
3310 	char msg[1024];
3311 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3312 
3313 	(void) snprintf(msg, sizeof (msg),
3314 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
3315 
3316 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3317 	zc.zc_guid = guid;
3318 	zc.zc_cookie = VDEV_STATE_DEGRADED;
3319 	zc.zc_obj = aux;
3320 
3321 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3322 		return (0);
3323 
3324 	return (zpool_standard_error(hdl, errno, msg));
3325 }
3326 
3327 /*
3328  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3329  * a hot spare.
3330  */
3331 static boolean_t
3332 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3333 {
3334 	nvlist_t **child;
3335 	uint_t c, children;
3336 	char *type;
3337 
3338 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3339 	    &children) == 0) {
3340 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
3341 		    &type) == 0);
3342 
3343 		if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
3344 		    strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
3345 		    children == 2 && child[which] == tgt)
3346 			return (B_TRUE);
3347 
3348 		for (c = 0; c < children; c++)
3349 			if (is_replacing_spare(child[c], tgt, which))
3350 				return (B_TRUE);
3351 	}
3352 
3353 	return (B_FALSE);
3354 }
3355 
3356 /*
3357  * Attach new_disk (fully described by nvroot) to old_disk.
3358  * If 'replacing' is specified, the new disk will replace the old one.
3359  */
3360 int
3361 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3362     const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3363 {
3364 	zfs_cmd_t zc = {"\0"};
3365 	char msg[1024];
3366 	int ret;
3367 	nvlist_t *tgt;
3368 	boolean_t avail_spare, l2cache, islog;
3369 	uint64_t val;
3370 	char *newname;
3371 	nvlist_t **child;
3372 	uint_t children;
3373 	nvlist_t *config_root;
3374 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3375 
3376 	if (replacing)
3377 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3378 		    "cannot replace %s with %s"), old_disk, new_disk);
3379 	else
3380 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3381 		    "cannot attach %s to %s"), new_disk, old_disk);
3382 
3383 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3384 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3385 	    &islog)) == NULL)
3386 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3387 
3388 	if (avail_spare)
3389 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
3390 
3391 	if (l2cache)
3392 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3393 
3394 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3395 	zc.zc_cookie = replacing;
3396 	zc.zc_simple = rebuild;
3397 
3398 	if (rebuild &&
3399 	    zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3400 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3401 		    "the loaded zfs module doesn't support device rebuilds"));
3402 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
3403 	}
3404 
3405 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3406 	    &child, &children) != 0 || children != 1) {
3407 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3408 		    "new device must be a single disk"));
3409 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
3410 	}
3411 
3412 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3413 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
3414 
3415 	if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3416 		return (-1);
3417 
3418 	/*
3419 	 * If the target is a hot spare that has been swapped in, we can only
3420 	 * replace it with another hot spare.
3421 	 */
3422 	if (replacing &&
3423 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3424 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3425 	    NULL) == NULL || !avail_spare) &&
3426 	    is_replacing_spare(config_root, tgt, 1)) {
3427 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3428 		    "can only be replaced by another hot spare"));
3429 		free(newname);
3430 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
3431 	}
3432 
3433 	free(newname);
3434 
3435 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
3436 		return (-1);
3437 
3438 	ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3439 
3440 	zcmd_free_nvlists(&zc);
3441 
3442 	if (ret == 0)
3443 		return (0);
3444 
3445 	switch (errno) {
3446 	case ENOTSUP:
3447 		/*
3448 		 * Can't attach to or replace this type of vdev.
3449 		 */
3450 		if (replacing) {
3451 			uint64_t version = zpool_get_prop_int(zhp,
3452 			    ZPOOL_PROP_VERSION, NULL);
3453 
3454 			if (islog) {
3455 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3456 				    "cannot replace a log with a spare"));
3457 			} else if (rebuild) {
3458 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3459 				    "only mirror and dRAID vdevs support "
3460 				    "sequential reconstruction"));
3461 			} else if (zpool_is_draid_spare(new_disk)) {
3462 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3463 				    "dRAID spares can only replace child "
3464 				    "devices in their parent's dRAID vdev"));
3465 			} else if (version >= SPA_VERSION_MULTI_REPLACE) {
3466 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3467 				    "already in replacing/spare config; wait "
3468 				    "for completion or use 'zpool detach'"));
3469 			} else {
3470 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3471 				    "cannot replace a replacing device"));
3472 			}
3473 		} else {
3474 			char status[64] = {0};
3475 			zpool_prop_get_feature(zhp,
3476 			    "feature@device_rebuild", status, 63);
3477 			if (rebuild &&
3478 			    strncmp(status, ZFS_FEATURE_DISABLED, 64) == 0) {
3479 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3480 				    "device_rebuild feature must be enabled "
3481 				    "in order to use sequential "
3482 				    "reconstruction"));
3483 			} else {
3484 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3485 				    "can only attach to mirrors and top-level "
3486 				    "disks"));
3487 			}
3488 		}
3489 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
3490 		break;
3491 
3492 	case EINVAL:
3493 		/*
3494 		 * The new device must be a single disk.
3495 		 */
3496 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3497 		    "new device must be a single disk"));
3498 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3499 		break;
3500 
3501 	case EBUSY:
3502 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
3503 		    "or device removal is in progress"),
3504 		    new_disk);
3505 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
3506 		break;
3507 
3508 	case EOVERFLOW:
3509 		/*
3510 		 * The new device is too small.
3511 		 */
3512 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3513 		    "device is too small"));
3514 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
3515 		break;
3516 
3517 	case EDOM:
3518 		/*
3519 		 * The new device has a different optimal sector size.
3520 		 */
3521 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3522 		    "new device has a different optimal sector size; use the "
3523 		    "option '-o ashift=N' to override the optimal size"));
3524 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
3525 		break;
3526 
3527 	case ENAMETOOLONG:
3528 		/*
3529 		 * The resulting top-level vdev spec won't fit in the label.
3530 		 */
3531 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
3532 		break;
3533 
3534 	default:
3535 		(void) zpool_standard_error(hdl, errno, msg);
3536 	}
3537 
3538 	return (-1);
3539 }
3540 
3541 /*
3542  * Detach the specified device.
3543  */
3544 int
3545 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3546 {
3547 	zfs_cmd_t zc = {"\0"};
3548 	char msg[1024];
3549 	nvlist_t *tgt;
3550 	boolean_t avail_spare, l2cache;
3551 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3552 
3553 	(void) snprintf(msg, sizeof (msg),
3554 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3555 
3556 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3557 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3558 	    NULL)) == NULL)
3559 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3560 
3561 	if (avail_spare)
3562 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
3563 
3564 	if (l2cache)
3565 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3566 
3567 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3568 
3569 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3570 		return (0);
3571 
3572 	switch (errno) {
3573 
3574 	case ENOTSUP:
3575 		/*
3576 		 * Can't detach from this type of vdev.
3577 		 */
3578 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3579 		    "applicable to mirror and replacing vdevs"));
3580 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
3581 		break;
3582 
3583 	case EBUSY:
3584 		/*
3585 		 * There are no other replicas of this device.
3586 		 */
3587 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3588 		break;
3589 
3590 	default:
3591 		(void) zpool_standard_error(hdl, errno, msg);
3592 	}
3593 
3594 	return (-1);
3595 }
3596 
3597 /*
3598  * Find a mirror vdev in the source nvlist.
3599  *
3600  * The mchild array contains a list of disks in one of the top-level mirrors
3601  * of the source pool.  The schild array contains a list of disks that the
3602  * user specified on the command line.  We loop over the mchild array to
3603  * see if any entry in the schild array matches.
3604  *
3605  * If a disk in the mchild array is found in the schild array, we return
3606  * the index of that entry.  Otherwise we return -1.
3607  */
3608 static int
3609 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3610     nvlist_t **schild, uint_t schildren)
3611 {
3612 	uint_t mc;
3613 
3614 	for (mc = 0; mc < mchildren; mc++) {
3615 		uint_t sc;
3616 		char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3617 		    mchild[mc], 0);
3618 
3619 		for (sc = 0; sc < schildren; sc++) {
3620 			char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3621 			    schild[sc], 0);
3622 			boolean_t result = (strcmp(mpath, spath) == 0);
3623 
3624 			free(spath);
3625 			if (result) {
3626 				free(mpath);
3627 				return (mc);
3628 			}
3629 		}
3630 
3631 		free(mpath);
3632 	}
3633 
3634 	return (-1);
3635 }
3636 
3637 /*
3638  * Split a mirror pool.  If newroot points to null, then a new nvlist
3639  * is generated and it is the responsibility of the caller to free it.
3640  */
3641 int
3642 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3643     nvlist_t *props, splitflags_t flags)
3644 {
3645 	zfs_cmd_t zc = {"\0"};
3646 	char msg[1024], *bias;
3647 	nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3648 	nvlist_t **varray = NULL, *zc_props = NULL;
3649 	uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3650 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3651 	uint64_t vers, readonly = B_FALSE;
3652 	boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3653 	int retval = 0;
3654 
3655 	(void) snprintf(msg, sizeof (msg),
3656 	    dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3657 
3658 	if (!zpool_name_valid(hdl, B_FALSE, newname))
3659 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3660 
3661 	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3662 		(void) fprintf(stderr, gettext("Internal error: unable to "
3663 		    "retrieve pool configuration\n"));
3664 		return (-1);
3665 	}
3666 
3667 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3668 	    == 0);
3669 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3670 
3671 	if (props) {
3672 		prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3673 		if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3674 		    props, vers, flags, msg)) == NULL)
3675 			return (-1);
3676 		(void) nvlist_lookup_uint64(zc_props,
3677 		    zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3678 		if (readonly) {
3679 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3680 			    "property %s can only be set at import time"),
3681 			    zpool_prop_to_name(ZPOOL_PROP_READONLY));
3682 			return (-1);
3683 		}
3684 	}
3685 
3686 	if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3687 	    &children) != 0) {
3688 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3689 		    "Source pool is missing vdev tree"));
3690 		nvlist_free(zc_props);
3691 		return (-1);
3692 	}
3693 
3694 	varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3695 	vcount = 0;
3696 
3697 	if (*newroot == NULL ||
3698 	    nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3699 	    &newchild, &newchildren) != 0)
3700 		newchildren = 0;
3701 
3702 	for (c = 0; c < children; c++) {
3703 		uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3704 		boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
3705 		char *type;
3706 		nvlist_t **mchild, *vdev;
3707 		uint_t mchildren;
3708 		int entry;
3709 
3710 		/*
3711 		 * Unlike cache & spares, slogs are stored in the
3712 		 * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
3713 		 */
3714 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3715 		    &is_log);
3716 		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3717 		    &is_hole);
3718 		if (is_log || is_hole) {
3719 			/*
3720 			 * Create a hole vdev and put it in the config.
3721 			 */
3722 			if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3723 				goto out;
3724 			if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3725 			    VDEV_TYPE_HOLE) != 0)
3726 				goto out;
3727 			if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3728 			    1) != 0)
3729 				goto out;
3730 			if (lastlog == 0)
3731 				lastlog = vcount;
3732 			varray[vcount++] = vdev;
3733 			continue;
3734 		}
3735 		lastlog = 0;
3736 		verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3737 		    == 0);
3738 
3739 		if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
3740 			vdev = child[c];
3741 			if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3742 				goto out;
3743 			continue;
3744 		} else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3745 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3746 			    "Source pool must be composed only of mirrors\n"));
3747 			retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3748 			goto out;
3749 		}
3750 
3751 		if (nvlist_lookup_string(child[c],
3752 		    ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
3753 			if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
3754 				is_special = B_TRUE;
3755 			else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
3756 				is_dedup = B_TRUE;
3757 		}
3758 		verify(nvlist_lookup_nvlist_array(child[c],
3759 		    ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3760 
3761 		/* find or add an entry for this top-level vdev */
3762 		if (newchildren > 0 &&
3763 		    (entry = find_vdev_entry(zhp, mchild, mchildren,
3764 		    newchild, newchildren)) >= 0) {
3765 			/* We found a disk that the user specified. */
3766 			vdev = mchild[entry];
3767 			++found;
3768 		} else {
3769 			/* User didn't specify a disk for this vdev. */
3770 			vdev = mchild[mchildren - 1];
3771 		}
3772 
3773 		if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3774 			goto out;
3775 
3776 		if (flags.dryrun != 0) {
3777 			if (is_dedup == B_TRUE) {
3778 				if (nvlist_add_string(varray[vcount - 1],
3779 				    ZPOOL_CONFIG_ALLOCATION_BIAS,
3780 				    VDEV_ALLOC_BIAS_DEDUP) != 0)
3781 					goto out;
3782 			} else if (is_special == B_TRUE) {
3783 				if (nvlist_add_string(varray[vcount - 1],
3784 				    ZPOOL_CONFIG_ALLOCATION_BIAS,
3785 				    VDEV_ALLOC_BIAS_SPECIAL) != 0)
3786 					goto out;
3787 			}
3788 		}
3789 	}
3790 
3791 	/* did we find every disk the user specified? */
3792 	if (found != newchildren) {
3793 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3794 		    "include at most one disk from each mirror"));
3795 		retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3796 		goto out;
3797 	}
3798 
3799 	/* Prepare the nvlist for populating. */
3800 	if (*newroot == NULL) {
3801 		if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3802 			goto out;
3803 		freelist = B_TRUE;
3804 		if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3805 		    VDEV_TYPE_ROOT) != 0)
3806 			goto out;
3807 	} else {
3808 		verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3809 	}
3810 
3811 	/* Add all the children we found */
3812 	if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3813 	    (const nvlist_t **)varray, lastlog == 0 ? vcount : lastlog) != 0)
3814 		goto out;
3815 
3816 	/*
3817 	 * If we're just doing a dry run, exit now with success.
3818 	 */
3819 	if (flags.dryrun) {
3820 		memory_err = B_FALSE;
3821 		freelist = B_FALSE;
3822 		goto out;
3823 	}
3824 
3825 	/* now build up the config list & call the ioctl */
3826 	if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3827 		goto out;
3828 
3829 	if (nvlist_add_nvlist(newconfig,
3830 	    ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3831 	    nvlist_add_string(newconfig,
3832 	    ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3833 	    nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3834 		goto out;
3835 
3836 	/*
3837 	 * The new pool is automatically part of the namespace unless we
3838 	 * explicitly export it.
3839 	 */
3840 	if (!flags.import)
3841 		zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3842 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3843 	(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3844 	if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3845 		goto out;
3846 	if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3847 		goto out;
3848 
3849 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3850 		retval = zpool_standard_error(hdl, errno, msg);
3851 		goto out;
3852 	}
3853 
3854 	freelist = B_FALSE;
3855 	memory_err = B_FALSE;
3856 
3857 out:
3858 	if (varray != NULL) {
3859 		int v;
3860 
3861 		for (v = 0; v < vcount; v++)
3862 			nvlist_free(varray[v]);
3863 		free(varray);
3864 	}
3865 	zcmd_free_nvlists(&zc);
3866 	nvlist_free(zc_props);
3867 	nvlist_free(newconfig);
3868 	if (freelist) {
3869 		nvlist_free(*newroot);
3870 		*newroot = NULL;
3871 	}
3872 
3873 	if (retval != 0)
3874 		return (retval);
3875 
3876 	if (memory_err)
3877 		return (no_memory(hdl));
3878 
3879 	return (0);
3880 }
3881 
3882 /*
3883  * Remove the given device.
3884  */
3885 int
3886 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3887 {
3888 	zfs_cmd_t zc = {"\0"};
3889 	char msg[1024];
3890 	nvlist_t *tgt;
3891 	boolean_t avail_spare, l2cache, islog;
3892 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3893 	uint64_t version;
3894 
3895 	(void) snprintf(msg, sizeof (msg),
3896 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3897 
3898 	if (zpool_is_draid_spare(path)) {
3899 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3900 		    "dRAID spares cannot be removed"));
3901 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3902 	}
3903 
3904 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3905 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3906 	    &islog)) == NULL)
3907 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3908 
3909 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3910 	if (islog && version < SPA_VERSION_HOLES) {
3911 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3912 		    "pool must be upgraded to support log removal"));
3913 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
3914 	}
3915 
3916 	zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3917 
3918 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3919 		return (0);
3920 
3921 	switch (errno) {
3922 
3923 	case EINVAL:
3924 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3925 		    "invalid config; all top-level vdevs must "
3926 		    "have the same sector size and not be raidz."));
3927 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3928 		break;
3929 
3930 	case EBUSY:
3931 		if (islog) {
3932 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3933 			    "Mount encrypted datasets to replay logs."));
3934 		} else {
3935 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3936 			    "Pool busy; removal may already be in progress"));
3937 		}
3938 		(void) zfs_error(hdl, EZFS_BUSY, msg);
3939 		break;
3940 
3941 	case EACCES:
3942 		if (islog) {
3943 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3944 			    "Mount encrypted datasets to replay logs."));
3945 			(void) zfs_error(hdl, EZFS_BUSY, msg);
3946 		} else {
3947 			(void) zpool_standard_error(hdl, errno, msg);
3948 		}
3949 		break;
3950 
3951 	default:
3952 		(void) zpool_standard_error(hdl, errno, msg);
3953 	}
3954 	return (-1);
3955 }
3956 
3957 int
3958 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3959 {
3960 	zfs_cmd_t zc;
3961 	char msg[1024];
3962 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3963 
3964 	(void) snprintf(msg, sizeof (msg),
3965 	    dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3966 
3967 	bzero(&zc, sizeof (zc));
3968 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3969 	zc.zc_cookie = 1;
3970 
3971 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3972 		return (0);
3973 
3974 	return (zpool_standard_error(hdl, errno, msg));
3975 }
3976 
3977 int
3978 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3979     uint64_t *sizep)
3980 {
3981 	char msg[1024];
3982 	nvlist_t *tgt;
3983 	boolean_t avail_spare, l2cache, islog;
3984 	libzfs_handle_t *hdl = zhp->zpool_hdl;
3985 
3986 	(void) snprintf(msg, sizeof (msg),
3987 	    dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3988 	    path);
3989 
3990 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3991 	    &islog)) == NULL)
3992 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3993 
3994 	if (avail_spare || l2cache || islog) {
3995 		*sizep = 0;
3996 		return (0);
3997 	}
3998 
3999 	if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
4000 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4001 		    "indirect size not available"));
4002 		return (zfs_error(hdl, EINVAL, msg));
4003 	}
4004 	return (0);
4005 }
4006 
4007 /*
4008  * Clear the errors for the pool, or the particular device if specified.
4009  */
4010 int
4011 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
4012 {
4013 	zfs_cmd_t zc = {"\0"};
4014 	char msg[1024];
4015 	nvlist_t *tgt;
4016 	zpool_load_policy_t policy;
4017 	boolean_t avail_spare, l2cache;
4018 	libzfs_handle_t *hdl = zhp->zpool_hdl;
4019 	nvlist_t *nvi = NULL;
4020 	int error;
4021 
4022 	if (path)
4023 		(void) snprintf(msg, sizeof (msg),
4024 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4025 		    path);
4026 	else
4027 		(void) snprintf(msg, sizeof (msg),
4028 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
4029 		    zhp->zpool_name);
4030 
4031 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4032 	if (path) {
4033 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
4034 		    &l2cache, NULL)) == NULL)
4035 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
4036 
4037 		/*
4038 		 * Don't allow error clearing for hot spares.  Do allow
4039 		 * error clearing for l2cache devices.
4040 		 */
4041 		if (avail_spare)
4042 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
4043 
4044 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
4045 		    &zc.zc_guid) == 0);
4046 	}
4047 
4048 	zpool_get_load_policy(rewindnvl, &policy);
4049 	zc.zc_cookie = policy.zlp_rewind;
4050 
4051 	if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
4052 		return (-1);
4053 
4054 	if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
4055 		return (-1);
4056 
4057 	while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
4058 	    errno == ENOMEM) {
4059 		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4060 			zcmd_free_nvlists(&zc);
4061 			return (-1);
4062 		}
4063 	}
4064 
4065 	if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
4066 	    errno != EPERM && errno != EACCES)) {
4067 		if (policy.zlp_rewind &
4068 		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
4069 			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
4070 			zpool_rewind_exclaim(hdl, zc.zc_name,
4071 			    ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
4072 			    nvi);
4073 			nvlist_free(nvi);
4074 		}
4075 		zcmd_free_nvlists(&zc);
4076 		return (0);
4077 	}
4078 
4079 	zcmd_free_nvlists(&zc);
4080 	return (zpool_standard_error(hdl, errno, msg));
4081 }
4082 
4083 /*
4084  * Similar to zpool_clear(), but takes a GUID (used by fmd).
4085  */
4086 int
4087 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
4088 {
4089 	zfs_cmd_t zc = {"\0"};
4090 	char msg[1024];
4091 	libzfs_handle_t *hdl = zhp->zpool_hdl;
4092 
4093 	(void) snprintf(msg, sizeof (msg),
4094 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
4095 	    (u_longlong_t)guid);
4096 
4097 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4098 	zc.zc_guid = guid;
4099 	zc.zc_cookie = ZPOOL_NO_REWIND;
4100 
4101 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
4102 		return (0);
4103 
4104 	return (zpool_standard_error(hdl, errno, msg));
4105 }
4106 
4107 /*
4108  * Change the GUID for a pool.
4109  */
4110 int
4111 zpool_reguid(zpool_handle_t *zhp)
4112 {
4113 	char msg[1024];
4114 	libzfs_handle_t *hdl = zhp->zpool_hdl;
4115 	zfs_cmd_t zc = {"\0"};
4116 
4117 	(void) snprintf(msg, sizeof (msg),
4118 	    dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
4119 
4120 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4121 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
4122 		return (0);
4123 
4124 	return (zpool_standard_error(hdl, errno, msg));
4125 }
4126 
4127 /*
4128  * Reopen the pool.
4129  */
4130 int
4131 zpool_reopen_one(zpool_handle_t *zhp, void *data)
4132 {
4133 	libzfs_handle_t *hdl = zpool_get_handle(zhp);
4134 	const char *pool_name = zpool_get_name(zhp);
4135 	boolean_t *scrub_restart = data;
4136 	int error;
4137 
4138 	error = lzc_reopen(pool_name, *scrub_restart);
4139 	if (error) {
4140 		return (zpool_standard_error_fmt(hdl, error,
4141 		    dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
4142 	}
4143 
4144 	return (0);
4145 }
4146 
4147 /* call into libzfs_core to execute the sync IOCTL per pool */
4148 int
4149 zpool_sync_one(zpool_handle_t *zhp, void *data)
4150 {
4151 	int ret;
4152 	libzfs_handle_t *hdl = zpool_get_handle(zhp);
4153 	const char *pool_name = zpool_get_name(zhp);
4154 	boolean_t *force = data;
4155 	nvlist_t *innvl = fnvlist_alloc();
4156 
4157 	fnvlist_add_boolean_value(innvl, "force", *force);
4158 	if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
4159 		nvlist_free(innvl);
4160 		return (zpool_standard_error_fmt(hdl, ret,
4161 		    dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
4162 	}
4163 	nvlist_free(innvl);
4164 
4165 	return (0);
4166 }
4167 
4168 #define	PATH_BUF_LEN	64
4169 
4170 /*
4171  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
4172  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
4173  * We also check if this is a whole disk, in which case we strip off the
4174  * trailing 's0' slice name.
4175  *
4176  * This routine is also responsible for identifying when disks have been
4177  * reconfigured in a new location.  The kernel will have opened the device by
4178  * devid, but the path will still refer to the old location.  To catch this, we
4179  * first do a path -> devid translation (which is fast for the common case).  If
4180  * the devid matches, we're done.  If not, we do a reverse devid -> path
4181  * translation and issue the appropriate ioctl() to update the path of the vdev.
4182  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
4183  * of these checks.
4184  */
4185 char *
4186 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
4187     int name_flags)
4188 {
4189 	char *path, *type, *env;
4190 	uint64_t value;
4191 	char buf[PATH_BUF_LEN];
4192 	char tmpbuf[PATH_BUF_LEN];
4193 
4194 	/*
4195 	 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
4196 	 * zpool name that will be displayed to the user.
4197 	 */
4198 	verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
4199 	if (zhp != NULL && strcmp(type, "root") == 0)
4200 		return (zfs_strdup(hdl, zpool_get_name(zhp)));
4201 
4202 	env = getenv("ZPOOL_VDEV_NAME_PATH");
4203 	if (env && (strtoul(env, NULL, 0) > 0 ||
4204 	    !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
4205 		name_flags |= VDEV_NAME_PATH;
4206 
4207 	env = getenv("ZPOOL_VDEV_NAME_GUID");
4208 	if (env && (strtoul(env, NULL, 0) > 0 ||
4209 	    !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
4210 		name_flags |= VDEV_NAME_GUID;
4211 
4212 	env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
4213 	if (env && (strtoul(env, NULL, 0) > 0 ||
4214 	    !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
4215 		name_flags |= VDEV_NAME_FOLLOW_LINKS;
4216 
4217 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
4218 	    name_flags & VDEV_NAME_GUID) {
4219 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
4220 		(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
4221 		path = buf;
4222 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
4223 		if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
4224 			char *rp = realpath(path, NULL);
4225 			if (rp) {
4226 				strlcpy(buf, rp, sizeof (buf));
4227 				path = buf;
4228 				free(rp);
4229 			}
4230 		}
4231 
4232 		/*
4233 		 * For a block device only use the name.
4234 		 */
4235 		if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
4236 		    !(name_flags & VDEV_NAME_PATH)) {
4237 			path = zfs_strip_path(path);
4238 		}
4239 
4240 		/*
4241 		 * Remove the partition from the path if this is a whole disk.
4242 		 */
4243 		if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
4244 		    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
4245 		    == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
4246 			return (zfs_strip_partition(path));
4247 		}
4248 	} else {
4249 		path = type;
4250 
4251 		/*
4252 		 * If it's a raidz device, we need to stick in the parity level.
4253 		 */
4254 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
4255 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
4256 			    &value) == 0);
4257 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
4258 			    (u_longlong_t)value);
4259 			path = buf;
4260 		}
4261 
4262 		/*
4263 		 * If it's a dRAID device, we add parity, groups, and spares.
4264 		 */
4265 		if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
4266 			uint64_t ndata, nparity, nspares;
4267 			nvlist_t **child;
4268 			uint_t children;
4269 
4270 			verify(nvlist_lookup_nvlist_array(nv,
4271 			    ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
4272 			verify(nvlist_lookup_uint64(nv,
4273 			    ZPOOL_CONFIG_NPARITY, &nparity) == 0);
4274 			verify(nvlist_lookup_uint64(nv,
4275 			    ZPOOL_CONFIG_DRAID_NDATA, &ndata) == 0);
4276 			verify(nvlist_lookup_uint64(nv,
4277 			    ZPOOL_CONFIG_DRAID_NSPARES, &nspares) == 0);
4278 
4279 			path = zpool_draid_name(buf, sizeof (buf), ndata,
4280 			    nparity, nspares, children);
4281 		}
4282 
4283 		/*
4284 		 * We identify each top-level vdev by using a <type-id>
4285 		 * naming convention.
4286 		 */
4287 		if (name_flags & VDEV_NAME_TYPE_ID) {
4288 			uint64_t id;
4289 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
4290 			    &id) == 0);
4291 			(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4292 			    path, (u_longlong_t)id);
4293 			path = tmpbuf;
4294 		}
4295 	}
4296 
4297 	return (zfs_strdup(hdl, path));
4298 }
4299 
4300 static int
4301 zbookmark_mem_compare(const void *a, const void *b)
4302 {
4303 	return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4304 }
4305 
4306 /*
4307  * Retrieve the persistent error log, uniquify the members, and return to the
4308  * caller.
4309  */
4310 int
4311 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4312 {
4313 	zfs_cmd_t zc = {"\0"};
4314 	libzfs_handle_t *hdl = zhp->zpool_hdl;
4315 	uint64_t count;
4316 	zbookmark_phys_t *zb = NULL;
4317 	int i;
4318 
4319 	/*
4320 	 * Retrieve the raw error list from the kernel.  If the number of errors
4321 	 * has increased, allocate more space and continue until we get the
4322 	 * entire list.
4323 	 */
4324 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
4325 	    &count) == 0);
4326 	if (count == 0)
4327 		return (0);
4328 	zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
4329 	    count * sizeof (zbookmark_phys_t));
4330 	zc.zc_nvlist_dst_size = count;
4331 	(void) strcpy(zc.zc_name, zhp->zpool_name);
4332 	for (;;) {
4333 		if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4334 		    &zc) != 0) {
4335 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
4336 			if (errno == ENOMEM) {
4337 				void *dst;
4338 
4339 				count = zc.zc_nvlist_dst_size;
4340 				dst = zfs_alloc(zhp->zpool_hdl, count *
4341 				    sizeof (zbookmark_phys_t));
4342 				zc.zc_nvlist_dst = (uintptr_t)dst;
4343 			} else {
4344 				return (zpool_standard_error_fmt(hdl, errno,
4345 				    dgettext(TEXT_DOMAIN, "errors: List of "
4346 				    "errors unavailable")));
4347 			}
4348 		} else {
4349 			break;
4350 		}
4351 	}
4352 
4353 	/*
4354 	 * Sort the resulting bookmarks.  This is a little confusing due to the
4355 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
4356 	 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4357 	 * _not_ copied as part of the process.  So we point the start of our
4358 	 * array appropriate and decrement the total number of elements.
4359 	 */
4360 	zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
4361 	    zc.zc_nvlist_dst_size;
4362 	count -= zc.zc_nvlist_dst_size;
4363 
4364 	qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4365 
4366 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4367 
4368 	/*
4369 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4370 	 */
4371 	for (i = 0; i < count; i++) {
4372 		nvlist_t *nv;
4373 
4374 		/* ignoring zb_blkid and zb_level for now */
4375 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4376 		    zb[i-1].zb_object == zb[i].zb_object)
4377 			continue;
4378 
4379 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4380 			goto nomem;
4381 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4382 		    zb[i].zb_objset) != 0) {
4383 			nvlist_free(nv);
4384 			goto nomem;
4385 		}
4386 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4387 		    zb[i].zb_object) != 0) {
4388 			nvlist_free(nv);
4389 			goto nomem;
4390 		}
4391 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4392 			nvlist_free(nv);
4393 			goto nomem;
4394 		}
4395 		nvlist_free(nv);
4396 	}
4397 
4398 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
4399 	return (0);
4400 
4401 nomem:
4402 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
4403 	return (no_memory(zhp->zpool_hdl));
4404 }
4405 
4406 /*
4407  * Upgrade a ZFS pool to the latest on-disk version.
4408  */
4409 int
4410 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4411 {
4412 	zfs_cmd_t zc = {"\0"};
4413 	libzfs_handle_t *hdl = zhp->zpool_hdl;
4414 
4415 	(void) strcpy(zc.zc_name, zhp->zpool_name);
4416 	zc.zc_cookie = new_version;
4417 
4418 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4419 		return (zpool_standard_error_fmt(hdl, errno,
4420 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4421 		    zhp->zpool_name));
4422 	return (0);
4423 }
4424 
4425 void
4426 zfs_save_arguments(int argc, char **argv, char *string, int len)
4427 {
4428 	int i;
4429 
4430 	(void) strlcpy(string, zfs_basename(argv[0]), len);
4431 	for (i = 1; i < argc; i++) {
4432 		(void) strlcat(string, " ", len);
4433 		(void) strlcat(string, argv[i], len);
4434 	}
4435 }
4436 
4437 int
4438 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4439 {
4440 	zfs_cmd_t zc = {"\0"};
4441 	nvlist_t *args;
4442 	int err;
4443 
4444 	args = fnvlist_alloc();
4445 	fnvlist_add_string(args, "message", message);
4446 	err = zcmd_write_src_nvlist(hdl, &zc, args);
4447 	if (err == 0)
4448 		err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4449 	nvlist_free(args);
4450 	zcmd_free_nvlists(&zc);
4451 	return (err);
4452 }
4453 
4454 /*
4455  * Perform ioctl to get some command history of a pool.
4456  *
4457  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
4458  * logical offset of the history buffer to start reading from.
4459  *
4460  * Upon return, 'off' is the next logical offset to read from and
4461  * 'len' is the actual amount of bytes read into 'buf'.
4462  */
4463 static int
4464 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4465 {
4466 	zfs_cmd_t zc = {"\0"};
4467 	libzfs_handle_t *hdl = zhp->zpool_hdl;
4468 
4469 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4470 
4471 	zc.zc_history = (uint64_t)(uintptr_t)buf;
4472 	zc.zc_history_len = *len;
4473 	zc.zc_history_offset = *off;
4474 
4475 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4476 		switch (errno) {
4477 		case EPERM:
4478 			return (zfs_error_fmt(hdl, EZFS_PERM,
4479 			    dgettext(TEXT_DOMAIN,
4480 			    "cannot show history for pool '%s'"),
4481 			    zhp->zpool_name));
4482 		case ENOENT:
4483 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4484 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
4485 			    "'%s'"), zhp->zpool_name));
4486 		case ENOTSUP:
4487 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4488 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
4489 			    "'%s', pool must be upgraded"), zhp->zpool_name));
4490 		default:
4491 			return (zpool_standard_error_fmt(hdl, errno,
4492 			    dgettext(TEXT_DOMAIN,
4493 			    "cannot get history for '%s'"), zhp->zpool_name));
4494 		}
4495 	}
4496 
4497 	*len = zc.zc_history_len;
4498 	*off = zc.zc_history_offset;
4499 
4500 	return (0);
4501 }
4502 
4503 /*
4504  * Retrieve the command history of a pool.
4505  */
4506 int
4507 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4508     boolean_t *eof)
4509 {
4510 	char *buf;
4511 	int buflen = 128 * 1024;
4512 	nvlist_t **records = NULL;
4513 	uint_t numrecords = 0;
4514 	int err, i;
4515 	uint64_t start = *off;
4516 
4517 	buf = malloc(buflen);
4518 	if (buf == NULL)
4519 		return (ENOMEM);
4520 	/* process about 1MB a time */
4521 	while (*off - start < 1024 * 1024) {
4522 		uint64_t bytes_read = buflen;
4523 		uint64_t leftover;
4524 
4525 		if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4526 			break;
4527 
4528 		/* if nothing else was read in, we're at EOF, just return */
4529 		if (!bytes_read) {
4530 			*eof = B_TRUE;
4531 			break;
4532 		}
4533 
4534 		if ((err = zpool_history_unpack(buf, bytes_read,
4535 		    &leftover, &records, &numrecords)) != 0)
4536 			break;
4537 		*off -= leftover;
4538 		if (leftover == bytes_read) {
4539 			/*
4540 			 * no progress made, because buffer is not big enough
4541 			 * to hold this record; resize and retry.
4542 			 */
4543 			buflen *= 2;
4544 			free(buf);
4545 			buf = malloc(buflen);
4546 			if (buf == NULL)
4547 				return (ENOMEM);
4548 		}
4549 	}
4550 
4551 	free(buf);
4552 
4553 	if (!err) {
4554 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4555 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4556 		    (const nvlist_t **)records, numrecords) == 0);
4557 	}
4558 	for (i = 0; i < numrecords; i++)
4559 		nvlist_free(records[i]);
4560 	free(records);
4561 
4562 	return (err);
4563 }
4564 
4565 /*
4566  * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4567  * If there is a new event available 'nvp' will contain a newly allocated
4568  * nvlist and 'dropped' will be set to the number of missed events since
4569  * the last call to this function.  When 'nvp' is set to NULL it indicates
4570  * no new events are available.  In either case the function returns 0 and
4571  * it is up to the caller to free 'nvp'.  In the case of a fatal error the
4572  * function will return a non-zero value.  When the function is called in
4573  * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4574  * it will not return until a new event is available.
4575  */
4576 int
4577 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4578     int *dropped, unsigned flags, int zevent_fd)
4579 {
4580 	zfs_cmd_t zc = {"\0"};
4581 	int error = 0;
4582 
4583 	*nvp = NULL;
4584 	*dropped = 0;
4585 	zc.zc_cleanup_fd = zevent_fd;
4586 
4587 	if (flags & ZEVENT_NONBLOCK)
4588 		zc.zc_guid = ZEVENT_NONBLOCK;
4589 
4590 	if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4591 		return (-1);
4592 
4593 retry:
4594 	if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4595 		switch (errno) {
4596 		case ESHUTDOWN:
4597 			error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4598 			    dgettext(TEXT_DOMAIN, "zfs shutdown"));
4599 			goto out;
4600 		case ENOENT:
4601 			/* Blocking error case should not occur */
4602 			if (!(flags & ZEVENT_NONBLOCK))
4603 				error = zpool_standard_error_fmt(hdl, errno,
4604 				    dgettext(TEXT_DOMAIN, "cannot get event"));
4605 
4606 			goto out;
4607 		case ENOMEM:
4608 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4609 				error = zfs_error_fmt(hdl, EZFS_NOMEM,
4610 				    dgettext(TEXT_DOMAIN, "cannot get event"));
4611 				goto out;
4612 			} else {
4613 				goto retry;
4614 			}
4615 		default:
4616 			error = zpool_standard_error_fmt(hdl, errno,
4617 			    dgettext(TEXT_DOMAIN, "cannot get event"));
4618 			goto out;
4619 		}
4620 	}
4621 
4622 	error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4623 	if (error != 0)
4624 		goto out;
4625 
4626 	*dropped = (int)zc.zc_cookie;
4627 out:
4628 	zcmd_free_nvlists(&zc);
4629 
4630 	return (error);
4631 }
4632 
4633 /*
4634  * Clear all events.
4635  */
4636 int
4637 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4638 {
4639 	zfs_cmd_t zc = {"\0"};
4640 
4641 	if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4642 		return (zpool_standard_error(hdl, errno,
4643 		    dgettext(TEXT_DOMAIN, "cannot clear events")));
4644 
4645 	if (count != NULL)
4646 		*count = (int)zc.zc_cookie; /* # of events cleared */
4647 
4648 	return (0);
4649 }
4650 
4651 /*
4652  * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4653  * the passed zevent_fd file handle.  On success zero is returned,
4654  * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4655  */
4656 int
4657 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4658 {
4659 	zfs_cmd_t zc = {"\0"};
4660 	int error = 0;
4661 
4662 	zc.zc_guid = eid;
4663 	zc.zc_cleanup_fd = zevent_fd;
4664 
4665 	if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4666 		switch (errno) {
4667 		case ENOENT:
4668 			error = zfs_error_fmt(hdl, EZFS_NOENT,
4669 			    dgettext(TEXT_DOMAIN, "cannot get event"));
4670 			break;
4671 
4672 		case ENOMEM:
4673 			error = zfs_error_fmt(hdl, EZFS_NOMEM,
4674 			    dgettext(TEXT_DOMAIN, "cannot get event"));
4675 			break;
4676 
4677 		default:
4678 			error = zpool_standard_error_fmt(hdl, errno,
4679 			    dgettext(TEXT_DOMAIN, "cannot get event"));
4680 			break;
4681 		}
4682 	}
4683 
4684 	return (error);
4685 }
4686 
4687 static void
4688 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4689     char *pathname, size_t len, boolean_t always_unmounted)
4690 {
4691 	zfs_cmd_t zc = {"\0"};
4692 	boolean_t mounted = B_FALSE;
4693 	char *mntpnt = NULL;
4694 	char dsname[ZFS_MAX_DATASET_NAME_LEN];
4695 
4696 	if (dsobj == 0) {
4697 		/* special case for the MOS */
4698 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4699 		    (longlong_t)obj);
4700 		return;
4701 	}
4702 
4703 	/* get the dataset's name */
4704 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4705 	zc.zc_obj = dsobj;
4706 	if (zfs_ioctl(zhp->zpool_hdl,
4707 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4708 		/* just write out a path of two object numbers */
4709 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4710 		    (longlong_t)dsobj, (longlong_t)obj);
4711 		return;
4712 	}
4713 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4714 
4715 	/* find out if the dataset is mounted */
4716 	mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
4717 	    &mntpnt);
4718 
4719 	/* get the corrupted object's path */
4720 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4721 	zc.zc_obj = obj;
4722 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
4723 	    &zc) == 0) {
4724 		if (mounted) {
4725 			(void) snprintf(pathname, len, "%s%s", mntpnt,
4726 			    zc.zc_value);
4727 		} else {
4728 			(void) snprintf(pathname, len, "%s:%s",
4729 			    dsname, zc.zc_value);
4730 		}
4731 	} else {
4732 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4733 		    (longlong_t)obj);
4734 	}
4735 	free(mntpnt);
4736 }
4737 
4738 void
4739 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4740     char *pathname, size_t len)
4741 {
4742 	zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
4743 }
4744 
4745 void
4746 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4747     char *pathname, size_t len)
4748 {
4749 	zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
4750 }
4751 /*
4752  * Wait while the specified activity is in progress in the pool.
4753  */
4754 int
4755 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
4756 {
4757 	boolean_t missing;
4758 
4759 	int error = zpool_wait_status(zhp, activity, &missing, NULL);
4760 
4761 	if (missing) {
4762 		(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
4763 		    dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4764 		    zhp->zpool_name);
4765 		return (ENOENT);
4766 	} else {
4767 		return (error);
4768 	}
4769 }
4770 
4771 /*
4772  * Wait for the given activity and return the status of the wait (whether or not
4773  * any waiting was done) in the 'waited' parameter. Non-existent pools are
4774  * reported via the 'missing' parameter, rather than by printing an error
4775  * message. This is convenient when this function is called in a loop over a
4776  * long period of time (as it is, for example, by zpool's wait cmd). In that
4777  * scenario, a pool being exported or destroyed should be considered a normal
4778  * event, so we don't want to print an error when we find that the pool doesn't
4779  * exist.
4780  */
4781 int
4782 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
4783     boolean_t *missing, boolean_t *waited)
4784 {
4785 	int error = lzc_wait(zhp->zpool_name, activity, waited);
4786 	*missing = (error == ENOENT);
4787 	if (*missing)
4788 		return (0);
4789 
4790 	if (error != 0) {
4791 		(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4792 		    dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4793 		    zhp->zpool_name);
4794 	}
4795 
4796 	return (error);
4797 }
4798 
4799 int
4800 zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
4801 {
4802 	int error = lzc_set_bootenv(zhp->zpool_name, envmap);
4803 	if (error != 0) {
4804 		(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4805 		    dgettext(TEXT_DOMAIN,
4806 		    "error setting bootenv in pool '%s'"), zhp->zpool_name);
4807 	}
4808 
4809 	return (error);
4810 }
4811 
4812 int
4813 zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
4814 {
4815 	nvlist_t *nvl;
4816 	int error;
4817 
4818 	nvl = NULL;
4819 	error = lzc_get_bootenv(zhp->zpool_name, &nvl);
4820 	if (error != 0) {
4821 		(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4822 		    dgettext(TEXT_DOMAIN,
4823 		    "error getting bootenv in pool '%s'"), zhp->zpool_name);
4824 	} else {
4825 		*nvlp = nvl;
4826 	}
4827 
4828 	return (error);
4829 }
4830 
4831 /*
4832  * Attempt to read and parse feature file(s) (from "compatibility" property).
4833  * Files contain zpool feature names, comma or whitespace-separated.
4834  * Comments (# character to next newline) are discarded.
4835  *
4836  * Arguments:
4837  *  compatibility : string containing feature filenames
4838  *  features : either NULL or pointer to array of boolean
4839  *  report : either NULL or pointer to string buffer
4840  *  rlen : length of "report" buffer
4841  *
4842  * compatibility is NULL (unset), "", "off", "legacy", or list of
4843  * comma-separated filenames. filenames should either be absolute,
4844  * or relative to:
4845  *   1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
4846  *   2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
4847  * (Unset), "" or "off" => enable all features
4848  * "legacy" => disable all features
4849  *
4850  * Any feature names read from files which match unames in spa_feature_table
4851  * will have the corresponding boolean set in the features array (if non-NULL).
4852  * If more than one feature set specified, only features present in *all* of
4853  * them will be set.
4854  *
4855  * "report" if not NULL will be populated with a suitable status message.
4856  *
4857  * Return values:
4858  *   ZPOOL_COMPATIBILITY_OK : files read and parsed ok
4859  *   ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
4860  *   ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
4861  *   ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
4862  *   ZPOOL_COMPATIBILITY_NOFILES : no feature files found
4863  */
4864 zpool_compat_status_t
4865 zpool_load_compat(const char *compat, boolean_t *features, char *report,
4866     size_t rlen)
4867 {
4868 	int sdirfd, ddirfd, featfd;
4869 	struct stat fs;
4870 	char *fc;
4871 	char *ps, *ls, *ws;
4872 	char *file, *line, *word;
4873 
4874 	char l_compat[ZFS_MAXPROPLEN];
4875 
4876 	boolean_t ret_nofiles = B_TRUE;
4877 	boolean_t ret_badfile = B_FALSE;
4878 	boolean_t ret_badtoken = B_FALSE;
4879 	boolean_t ret_warntoken = B_FALSE;
4880 
4881 	/* special cases (unset), "" and "off" => enable all features */
4882 	if (compat == NULL || compat[0] == '\0' ||
4883 	    strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
4884 		if (features != NULL)
4885 			for (uint_t i = 0; i < SPA_FEATURES; i++)
4886 				features[i] = B_TRUE;
4887 		if (report != NULL)
4888 			strlcpy(report, gettext("all features enabled"), rlen);
4889 		return (ZPOOL_COMPATIBILITY_OK);
4890 	}
4891 
4892 	/* Final special case "legacy" => disable all features */
4893 	if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
4894 		if (features != NULL)
4895 			for (uint_t i = 0; i < SPA_FEATURES; i++)
4896 				features[i] = B_FALSE;
4897 		if (report != NULL)
4898 			strlcpy(report, gettext("all features disabled"), rlen);
4899 		return (ZPOOL_COMPATIBILITY_OK);
4900 	}
4901 
4902 	/*
4903 	 * Start with all true; will be ANDed with results from each file
4904 	 */
4905 	if (features != NULL)
4906 		for (uint_t i = 0; i < SPA_FEATURES; i++)
4907 			features[i] = B_TRUE;
4908 
4909 	char err_badfile[1024] = "";
4910 	char err_badtoken[1024] = "";
4911 
4912 	/*
4913 	 * We ignore errors from the directory open()
4914 	 * as they're only needed if the filename is relative
4915 	 * which will be checked during the openat().
4916 	 */
4917 
4918 /* O_PATH safer than O_RDONLY if system allows it */
4919 #if defined(O_PATH)
4920 #define	ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
4921 #else
4922 #define	ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
4923 #endif
4924 
4925 	sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
4926 	ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
4927 
4928 	(void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
4929 
4930 	for (file = strtok_r(l_compat, ",", &ps);
4931 	    file != NULL;
4932 	    file = strtok_r(NULL, ",", &ps)) {
4933 
4934 		boolean_t l_features[SPA_FEATURES];
4935 
4936 		enum { Z_SYSCONF, Z_DATA } source;
4937 
4938 		/* try sysconfdir first, then datadir */
4939 		source = Z_SYSCONF;
4940 		if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
4941 			featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
4942 			source = Z_DATA;
4943 		}
4944 
4945 		/* File readable and correct size? */
4946 		if (featfd < 0 ||
4947 		    fstat(featfd, &fs) < 0 ||
4948 		    fs.st_size < 1 ||
4949 		    fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
4950 			(void) close(featfd);
4951 			strlcat(err_badfile, file, ZFS_MAXPROPLEN);
4952 			strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
4953 			ret_badfile = B_TRUE;
4954 			continue;
4955 		}
4956 
4957 /* Prefault the file if system allows */
4958 #if defined(MAP_POPULATE)
4959 #define	ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
4960 #elif defined(MAP_PREFAULT_READ)
4961 #define	ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
4962 #else
4963 #define	ZC_MMAP_FLAGS (MAP_PRIVATE)
4964 #endif
4965 
4966 		/* private mmap() so we can strtok safely */
4967 		fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
4968 		    ZC_MMAP_FLAGS, featfd, 0);
4969 		(void) close(featfd);
4970 
4971 		/* map ok, and last character == newline? */
4972 		if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
4973 			(void) munmap((void *) fc, fs.st_size);
4974 			strlcat(err_badfile, file, ZFS_MAXPROPLEN);
4975 			strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
4976 			ret_badfile = B_TRUE;
4977 			continue;
4978 		}
4979 
4980 		ret_nofiles = B_FALSE;
4981 
4982 		for (uint_t i = 0; i < SPA_FEATURES; i++)
4983 			l_features[i] = B_FALSE;
4984 
4985 		/* replace final newline with NULL to ensure string ends */
4986 		fc[fs.st_size - 1] = '\0';
4987 
4988 		for (line = strtok_r(fc, "\n", &ls);
4989 		    line != NULL;
4990 		    line = strtok_r(NULL, "\n", &ls)) {
4991 			/* discard comments */
4992 			char *r = strchr(line, '#');
4993 			if (r != NULL)
4994 				*r = '\0';
4995 
4996 			for (word = strtok_r(line, ", \t", &ws);
4997 			    word != NULL;
4998 			    word = strtok_r(NULL, ", \t", &ws)) {
4999 				/* Find matching feature name */
5000 				uint_t f;
5001 				for (f = 0; f < SPA_FEATURES; f++) {
5002 					zfeature_info_t *fi =
5003 					    &spa_feature_table[f];
5004 					if (strcmp(word, fi->fi_uname) == 0) {
5005 						l_features[f] = B_TRUE;
5006 						break;
5007 					}
5008 				}
5009 				if (f < SPA_FEATURES)
5010 					continue;
5011 
5012 				/* found an unrecognized word */
5013 				/* lightly sanitize it */
5014 				if (strlen(word) > 32)
5015 					word[32] = '\0';
5016 				for (char *c = word; *c != '\0'; c++)
5017 					if (!isprint(*c))
5018 						*c = '?';
5019 
5020 				strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
5021 				strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
5022 				if (source == Z_SYSCONF)
5023 					ret_badtoken = B_TRUE;
5024 				else
5025 					ret_warntoken = B_TRUE;
5026 			}
5027 		}
5028 		(void) munmap((void *) fc, fs.st_size);
5029 
5030 		if (features != NULL)
5031 			for (uint_t i = 0; i < SPA_FEATURES; i++)
5032 				features[i] &= l_features[i];
5033 	}
5034 	(void) close(sdirfd);
5035 	(void) close(ddirfd);
5036 
5037 	/* Return the most serious error */
5038 	if (ret_badfile) {
5039 		if (report != NULL)
5040 			snprintf(report, rlen, gettext("could not read/"
5041 			    "parse feature file(s): %s"), err_badfile);
5042 		return (ZPOOL_COMPATIBILITY_BADFILE);
5043 	}
5044 	if (ret_nofiles) {
5045 		if (report != NULL)
5046 			strlcpy(report,
5047 			    gettext("no valid compatibility files specified"),
5048 			    rlen);
5049 		return (ZPOOL_COMPATIBILITY_NOFILES);
5050 	}
5051 	if (ret_badtoken) {
5052 		if (report != NULL)
5053 			snprintf(report, rlen, gettext("invalid feature "
5054 			    "name(s) in local compatibility files: %s"),
5055 			    err_badtoken);
5056 		return (ZPOOL_COMPATIBILITY_BADTOKEN);
5057 	}
5058 	if (ret_warntoken) {
5059 		if (report != NULL)
5060 			snprintf(report, rlen, gettext("unrecognized feature "
5061 			    "name(s) in distribution compatibility files: %s"),
5062 			    err_badtoken);
5063 		return (ZPOOL_COMPATIBILITY_WARNTOKEN);
5064 	}
5065 	if (report != NULL)
5066 		strlcpy(report, gettext("compatibility set ok"), rlen);
5067 	return (ZPOOL_COMPATIBILITY_OK);
5068 }
5069 
5070 static int
5071 zpool_vdev_guid(zpool_handle_t *zhp, const char *vdevname, uint64_t *vdev_guid)
5072 {
5073 	nvlist_t *tgt;
5074 	boolean_t avail_spare, l2cache;
5075 
5076 	verify(zhp != NULL);
5077 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
5078 		char errbuf[1024];
5079 		(void) snprintf(errbuf, sizeof (errbuf),
5080 		    dgettext(TEXT_DOMAIN, "pool is in an unavailable state"));
5081 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLUNAVAIL, errbuf));
5082 	}
5083 
5084 	if ((tgt = zpool_find_vdev(zhp, vdevname, &avail_spare, &l2cache,
5085 	    NULL)) == NULL) {
5086 		char errbuf[1024];
5087 		(void) snprintf(errbuf, sizeof (errbuf),
5088 		    dgettext(TEXT_DOMAIN, "can not find %s in %s"),
5089 		    vdevname, zhp->zpool_name);
5090 		return (zfs_error(zhp->zpool_hdl, EZFS_NODEVICE, errbuf));
5091 	}
5092 
5093 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, vdev_guid) == 0);
5094 	return (0);
5095 }
5096 
5097 /*
5098  * Get a vdev property value for 'prop' and return the value in
5099  * a pre-allocated buffer.
5100  */
5101 int
5102 zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name,
5103     char *buf, size_t len, zprop_source_t *srctype, boolean_t literal)
5104 {
5105 	nvlist_t *nv;
5106 	uint64_t intval;
5107 	char *strval;
5108 	zprop_source_t src = ZPROP_SRC_NONE;
5109 
5110 	if (prop == VDEV_PROP_USER) {
5111 		/* user property, prop_name must contain the property name */
5112 		assert(prop_name != NULL);
5113 		if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5114 			verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE,
5115 			    &intval) == 0);
5116 			src = intval;
5117 			verify(nvlist_lookup_string(nv, ZPROP_VALUE,
5118 			    &strval) == 0);
5119 		} else {
5120 			/* user prop not found */
5121 			return (-1);
5122 		}
5123 		(void) strlcpy(buf, strval, len);
5124 		if (srctype)
5125 			*srctype = src;
5126 		return (0);
5127 	}
5128 
5129 	if (prop_name == NULL)
5130 		prop_name = (char *)vdev_prop_to_name(prop);
5131 
5132 	switch (vdev_prop_get_type(prop)) {
5133 	case PROP_TYPE_STRING:
5134 		if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5135 			verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE,
5136 			    &intval) == 0);
5137 			src = intval;
5138 			verify(nvlist_lookup_string(nv, ZPROP_VALUE,
5139 			    &strval) == 0);
5140 		} else {
5141 			src = ZPROP_SRC_DEFAULT;
5142 			if ((strval = (char *)vdev_prop_default_string(prop))
5143 			    == NULL)
5144 				strval = "-";
5145 		}
5146 		(void) strlcpy(buf, strval, len);
5147 		break;
5148 
5149 	case PROP_TYPE_NUMBER:
5150 		if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5151 			verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE,
5152 			    &intval) == 0);
5153 			src = intval;
5154 			verify(nvlist_lookup_uint64(nv, ZPROP_VALUE,
5155 			    &intval) == 0);
5156 		} else {
5157 			src = ZPROP_SRC_DEFAULT;
5158 			intval = vdev_prop_default_numeric(prop);
5159 		}
5160 
5161 		switch (prop) {
5162 		case VDEV_PROP_ASIZE:
5163 		case VDEV_PROP_PSIZE:
5164 		case VDEV_PROP_SIZE:
5165 		case VDEV_PROP_BOOTSIZE:
5166 		case VDEV_PROP_ALLOCATED:
5167 		case VDEV_PROP_FREE:
5168 		case VDEV_PROP_READ_ERRORS:
5169 		case VDEV_PROP_WRITE_ERRORS:
5170 		case VDEV_PROP_CHECKSUM_ERRORS:
5171 		case VDEV_PROP_INITIALIZE_ERRORS:
5172 		case VDEV_PROP_OPS_NULL:
5173 		case VDEV_PROP_OPS_READ:
5174 		case VDEV_PROP_OPS_WRITE:
5175 		case VDEV_PROP_OPS_FREE:
5176 		case VDEV_PROP_OPS_CLAIM:
5177 		case VDEV_PROP_OPS_TRIM:
5178 		case VDEV_PROP_BYTES_NULL:
5179 		case VDEV_PROP_BYTES_READ:
5180 		case VDEV_PROP_BYTES_WRITE:
5181 		case VDEV_PROP_BYTES_FREE:
5182 		case VDEV_PROP_BYTES_CLAIM:
5183 		case VDEV_PROP_BYTES_TRIM:
5184 			if (literal) {
5185 				(void) snprintf(buf, len, "%llu",
5186 				    (u_longlong_t)intval);
5187 			} else {
5188 				(void) zfs_nicenum(intval, buf, len);
5189 			}
5190 			break;
5191 		case VDEV_PROP_EXPANDSZ:
5192 			if (intval == 0) {
5193 				(void) strlcpy(buf, "-", len);
5194 			} else if (literal) {
5195 				(void) snprintf(buf, len, "%llu",
5196 				    (u_longlong_t)intval);
5197 			} else {
5198 				(void) zfs_nicenum(intval, buf, len);
5199 			}
5200 			break;
5201 		case VDEV_PROP_CAPACITY:
5202 			if (literal) {
5203 				(void) snprintf(buf, len, "%llu",
5204 				    (u_longlong_t)intval);
5205 			} else {
5206 				(void) snprintf(buf, len, "%llu%%",
5207 				    (u_longlong_t)intval);
5208 			}
5209 			break;
5210 		case VDEV_PROP_FRAGMENTATION:
5211 			if (intval == UINT64_MAX) {
5212 				(void) strlcpy(buf, "-", len);
5213 			} else {
5214 				(void) snprintf(buf, len, "%llu%%",
5215 				    (u_longlong_t)intval);
5216 			}
5217 			break;
5218 		case VDEV_PROP_STATE:
5219 			if (literal) {
5220 				(void) snprintf(buf, len, "%llu",
5221 				    (u_longlong_t)intval);
5222 			} else {
5223 				(void) strlcpy(buf, zpool_state_to_name(intval,
5224 				    VDEV_AUX_NONE), len);
5225 			}
5226 			break;
5227 		default:
5228 			(void) snprintf(buf, len, "%llu",
5229 			    (u_longlong_t)intval);
5230 		}
5231 		break;
5232 
5233 	case PROP_TYPE_INDEX:
5234 		if (nvlist_lookup_nvlist(nvprop, prop_name, &nv) == 0) {
5235 			verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE,
5236 			    &intval) == 0);
5237 			src = intval;
5238 			verify(nvlist_lookup_uint64(nv, ZPROP_VALUE,
5239 			    &intval) == 0);
5240 		} else {
5241 			src = ZPROP_SRC_DEFAULT;
5242 			intval = vdev_prop_default_numeric(prop);
5243 		}
5244 		if (vdev_prop_index_to_string(prop, intval,
5245 		    (const char **)&strval) != 0)
5246 			return (-1);
5247 		(void) strlcpy(buf, strval, len);
5248 		break;
5249 
5250 	default:
5251 		abort();
5252 	}
5253 
5254 	if (srctype)
5255 		*srctype = src;
5256 
5257 	return (0);
5258 }
5259 
5260 /*
5261  * Get a vdev property value for 'prop_name' and return the value in
5262  * a pre-allocated buffer.
5263  */
5264 int
5265 zpool_get_vdev_prop(zpool_handle_t *zhp, const char *vdevname, vdev_prop_t prop,
5266     char *prop_name, char *buf, size_t len, zprop_source_t *srctype,
5267     boolean_t literal)
5268 {
5269 	nvlist_t *reqnvl, *reqprops;
5270 	nvlist_t *retprops = NULL;
5271 	uint64_t vdev_guid;
5272 	int ret;
5273 
5274 	if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5275 		return (ret);
5276 
5277 	if (nvlist_alloc(&reqnvl, NV_UNIQUE_NAME, 0) != 0)
5278 		return (no_memory(zhp->zpool_hdl));
5279 	if (nvlist_alloc(&reqprops, NV_UNIQUE_NAME, 0) != 0)
5280 		return (no_memory(zhp->zpool_hdl));
5281 
5282 	fnvlist_add_uint64(reqnvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5283 
5284 	if (prop != VDEV_PROP_USER) {
5285 		/* prop_name overrides prop value */
5286 		if (prop_name != NULL)
5287 			prop = vdev_name_to_prop(prop_name);
5288 		else
5289 			prop_name = (char *)vdev_prop_to_name(prop);
5290 		assert(prop < VDEV_NUM_PROPS);
5291 	}
5292 
5293 	assert(prop_name != NULL);
5294 	if (nvlist_add_uint64(reqprops, prop_name, prop) != 0) {
5295 		nvlist_free(reqnvl);
5296 		nvlist_free(reqprops);
5297 		return (no_memory(zhp->zpool_hdl));
5298 	}
5299 
5300 	fnvlist_add_nvlist(reqnvl, ZPOOL_VDEV_PROPS_GET_PROPS, reqprops);
5301 
5302 	ret = lzc_get_vdev_prop(zhp->zpool_name, reqnvl, &retprops);
5303 
5304 	if (ret == 0) {
5305 		ret = zpool_get_vdev_prop_value(retprops, prop, prop_name, buf,
5306 		    len, srctype, literal);
5307 	} else {
5308 		char errbuf[1024];
5309 		(void) snprintf(errbuf, sizeof (errbuf),
5310 		    dgettext(TEXT_DOMAIN, "cannot get vdev property %s from"
5311 		    " %s in %s"), prop_name, vdevname, zhp->zpool_name);
5312 		(void) zpool_standard_error(zhp->zpool_hdl, ret, errbuf);
5313 	}
5314 
5315 	nvlist_free(reqnvl);
5316 	nvlist_free(reqprops);
5317 	nvlist_free(retprops);
5318 
5319 	return (ret);
5320 }
5321 
5322 /*
5323  * Get all vdev properties
5324  */
5325 int
5326 zpool_get_all_vdev_props(zpool_handle_t *zhp, const char *vdevname,
5327     nvlist_t **outnvl)
5328 {
5329 	nvlist_t *nvl = NULL;
5330 	uint64_t vdev_guid;
5331 	int ret;
5332 
5333 	if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5334 		return (ret);
5335 
5336 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5337 		return (no_memory(zhp->zpool_hdl));
5338 
5339 	fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_GET_VDEV, vdev_guid);
5340 
5341 	ret = lzc_get_vdev_prop(zhp->zpool_name, nvl, outnvl);
5342 
5343 	nvlist_free(nvl);
5344 
5345 	if (ret) {
5346 		char errbuf[1024];
5347 		(void) snprintf(errbuf, sizeof (errbuf),
5348 		    dgettext(TEXT_DOMAIN, "cannot get vdev properties for"
5349 		    " %s in %s"), vdevname, zhp->zpool_name);
5350 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5351 	}
5352 
5353 	return (ret);
5354 }
5355 
5356 /*
5357  * Set vdev property
5358  */
5359 int
5360 zpool_set_vdev_prop(zpool_handle_t *zhp, const char *vdevname,
5361     const char *propname, const char *propval)
5362 {
5363 	int ret;
5364 	nvlist_t *nvl = NULL;
5365 	nvlist_t *outnvl = NULL;
5366 	nvlist_t *props;
5367 	nvlist_t *realprops;
5368 	prop_flags_t flags = { 0 };
5369 	uint64_t version;
5370 	uint64_t vdev_guid;
5371 
5372 	if ((ret = zpool_vdev_guid(zhp, vdevname, &vdev_guid)) != 0)
5373 		return (ret);
5374 
5375 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
5376 		return (no_memory(zhp->zpool_hdl));
5377 	if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
5378 		return (no_memory(zhp->zpool_hdl));
5379 
5380 	fnvlist_add_uint64(nvl, ZPOOL_VDEV_PROPS_SET_VDEV, vdev_guid);
5381 
5382 	if (nvlist_add_string(props, propname, propval) != 0) {
5383 		nvlist_free(props);
5384 		return (no_memory(zhp->zpool_hdl));
5385 	}
5386 
5387 	char errbuf[1024];
5388 	(void) snprintf(errbuf, sizeof (errbuf),
5389 	    dgettext(TEXT_DOMAIN, "cannot set property %s for %s on %s"),
5390 	    propname, vdevname, zhp->zpool_name);
5391 
5392 	flags.vdevprop = 1;
5393 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
5394 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
5395 	    zhp->zpool_name, props, version, flags, errbuf)) == NULL) {
5396 		nvlist_free(props);
5397 		nvlist_free(nvl);
5398 		return (-1);
5399 	}
5400 
5401 	nvlist_free(props);
5402 	props = realprops;
5403 
5404 	fnvlist_add_nvlist(nvl, ZPOOL_VDEV_PROPS_SET_PROPS, props);
5405 
5406 	ret = lzc_set_vdev_prop(zhp->zpool_name, nvl, &outnvl);
5407 
5408 	nvlist_free(props);
5409 	nvlist_free(nvl);
5410 	nvlist_free(outnvl);
5411 
5412 	if (ret)
5413 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
5414 
5415 	return (ret);
5416 }
5417