1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <ctype.h>
28 #include <errno.h>
29 #include <devid.h>
30 #include <fcntl.h>
31 #include <libintl.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <strings.h>
35 #include <unistd.h>
36 #include <sys/efi_partition.h>
37 #include <sys/vtoc.h>
38 #include <sys/zfs_ioctl.h>
39 #include <dlfcn.h>
40 
41 #include "zfs_namecheck.h"
42 #include "zfs_prop.h"
43 #include "libzfs_impl.h"
44 
45 const char *hist_event_table[LOG_END] = {
46 	"invalid event",
47 	"pool create",
48 	"vdev add",
49 	"pool remove",
50 	"pool destroy",
51 	"pool export",
52 	"pool import",
53 	"vdev attach",
54 	"vdev replace",
55 	"vdev detach",
56 	"vdev online",
57 	"vdev offline",
58 	"vdev upgrade",
59 	"pool clear",
60 	"pool scrub",
61 	"pool property set",
62 	"create",
63 	"clone",
64 	"destroy",
65 	"destroy_begin_sync",
66 	"inherit",
67 	"property set",
68 	"quota set",
69 	"permission update",
70 	"permission remove",
71 	"permission who remove",
72 	"promote",
73 	"receive",
74 	"rename",
75 	"reservation set",
76 	"replay_inc_sync",
77 	"replay_full_sync",
78 	"rollback",
79 	"snapshot",
80 	"filesystem version upgrade",
81 	"refquota set",
82 	"refreservation set",
83 	"pool scrub done",
84 	"user hold",
85 	"user release",
86 };
87 
88 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
89 
90 #if defined(__i386) || defined(__amd64)
91 #define	BOOTCMD	"installgrub(1M)"
92 #else
93 #define	BOOTCMD	"installboot(1M)"
94 #endif
95 
96 #define	DISK_ROOT	"/dev/dsk"
97 #define	RDISK_ROOT	"/dev/rdsk"
98 #define	BACKUP_SLICE	"s2"
99 
100 /*
101  * ====================================================================
102  *   zpool property functions
103  * ====================================================================
104  */
105 
106 static int
107 zpool_get_all_props(zpool_handle_t *zhp)
108 {
109 	zfs_cmd_t zc = { 0 };
110 	libzfs_handle_t *hdl = zhp->zpool_hdl;
111 
112 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
113 
114 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
115 		return (-1);
116 
117 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
118 		if (errno == ENOMEM) {
119 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
120 				zcmd_free_nvlists(&zc);
121 				return (-1);
122 			}
123 		} else {
124 			zcmd_free_nvlists(&zc);
125 			return (-1);
126 		}
127 	}
128 
129 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
130 		zcmd_free_nvlists(&zc);
131 		return (-1);
132 	}
133 
134 	zcmd_free_nvlists(&zc);
135 
136 	return (0);
137 }
138 
139 static int
140 zpool_props_refresh(zpool_handle_t *zhp)
141 {
142 	nvlist_t *old_props;
143 
144 	old_props = zhp->zpool_props;
145 
146 	if (zpool_get_all_props(zhp) != 0)
147 		return (-1);
148 
149 	nvlist_free(old_props);
150 	return (0);
151 }
152 
153 static char *
154 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
155     zprop_source_t *src)
156 {
157 	nvlist_t *nv, *nvl;
158 	uint64_t ival;
159 	char *value;
160 	zprop_source_t source;
161 
162 	nvl = zhp->zpool_props;
163 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
164 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
165 		source = ival;
166 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
167 	} else {
168 		source = ZPROP_SRC_DEFAULT;
169 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
170 			value = "-";
171 	}
172 
173 	if (src)
174 		*src = source;
175 
176 	return (value);
177 }
178 
179 uint64_t
180 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
181 {
182 	nvlist_t *nv, *nvl;
183 	uint64_t value;
184 	zprop_source_t source;
185 
186 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
187 		/*
188 		 * zpool_get_all_props() has most likely failed because
189 		 * the pool is faulted, but if all we need is the top level
190 		 * vdev's guid then get it from the zhp config nvlist.
191 		 */
192 		if ((prop == ZPOOL_PROP_GUID) &&
193 		    (nvlist_lookup_nvlist(zhp->zpool_config,
194 		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
195 		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
196 		    == 0)) {
197 			return (value);
198 		}
199 		return (zpool_prop_default_numeric(prop));
200 	}
201 
202 	nvl = zhp->zpool_props;
203 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
204 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
205 		source = value;
206 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
207 	} else {
208 		source = ZPROP_SRC_DEFAULT;
209 		value = zpool_prop_default_numeric(prop);
210 	}
211 
212 	if (src)
213 		*src = source;
214 
215 	return (value);
216 }
217 
218 /*
219  * Map VDEV STATE to printed strings.
220  */
221 char *
222 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
223 {
224 	switch (state) {
225 	case VDEV_STATE_CLOSED:
226 	case VDEV_STATE_OFFLINE:
227 		return (gettext("OFFLINE"));
228 	case VDEV_STATE_REMOVED:
229 		return (gettext("REMOVED"));
230 	case VDEV_STATE_CANT_OPEN:
231 		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
232 			return (gettext("FAULTED"));
233 		else
234 			return (gettext("UNAVAIL"));
235 	case VDEV_STATE_FAULTED:
236 		return (gettext("FAULTED"));
237 	case VDEV_STATE_DEGRADED:
238 		return (gettext("DEGRADED"));
239 	case VDEV_STATE_HEALTHY:
240 		return (gettext("ONLINE"));
241 	}
242 
243 	return (gettext("UNKNOWN"));
244 }
245 
246 /*
247  * Get a zpool property value for 'prop' and return the value in
248  * a pre-allocated buffer.
249  */
250 int
251 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
252     zprop_source_t *srctype)
253 {
254 	uint64_t intval;
255 	const char *strval;
256 	zprop_source_t src = ZPROP_SRC_NONE;
257 	nvlist_t *nvroot;
258 	vdev_stat_t *vs;
259 	uint_t vsc;
260 
261 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
262 		switch (prop) {
263 		case ZPOOL_PROP_NAME:
264 			(void) strlcpy(buf, zpool_get_name(zhp), len);
265 			break;
266 
267 		case ZPOOL_PROP_HEALTH:
268 			(void) strlcpy(buf, "FAULTED", len);
269 			break;
270 
271 		case ZPOOL_PROP_GUID:
272 			intval = zpool_get_prop_int(zhp, prop, &src);
273 			(void) snprintf(buf, len, "%llu", intval);
274 			break;
275 
276 		case ZPOOL_PROP_ALTROOT:
277 		case ZPOOL_PROP_CACHEFILE:
278 			if (zhp->zpool_props != NULL ||
279 			    zpool_get_all_props(zhp) == 0) {
280 				(void) strlcpy(buf,
281 				    zpool_get_prop_string(zhp, prop, &src),
282 				    len);
283 				if (srctype != NULL)
284 					*srctype = src;
285 				return (0);
286 			}
287 			/* FALLTHROUGH */
288 		default:
289 			(void) strlcpy(buf, "-", len);
290 			break;
291 		}
292 
293 		if (srctype != NULL)
294 			*srctype = src;
295 		return (0);
296 	}
297 
298 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
299 	    prop != ZPOOL_PROP_NAME)
300 		return (-1);
301 
302 	switch (zpool_prop_get_type(prop)) {
303 	case PROP_TYPE_STRING:
304 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
305 		    len);
306 		break;
307 
308 	case PROP_TYPE_NUMBER:
309 		intval = zpool_get_prop_int(zhp, prop, &src);
310 
311 		switch (prop) {
312 		case ZPOOL_PROP_SIZE:
313 		case ZPOOL_PROP_USED:
314 		case ZPOOL_PROP_AVAILABLE:
315 			(void) zfs_nicenum(intval, buf, len);
316 			break;
317 
318 		case ZPOOL_PROP_CAPACITY:
319 			(void) snprintf(buf, len, "%llu%%",
320 			    (u_longlong_t)intval);
321 			break;
322 
323 		case ZPOOL_PROP_HEALTH:
324 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
325 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
326 			verify(nvlist_lookup_uint64_array(nvroot,
327 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
328 
329 			(void) strlcpy(buf, zpool_state_to_name(intval,
330 			    vs->vs_aux), len);
331 			break;
332 		default:
333 			(void) snprintf(buf, len, "%llu", intval);
334 		}
335 		break;
336 
337 	case PROP_TYPE_INDEX:
338 		intval = zpool_get_prop_int(zhp, prop, &src);
339 		if (zpool_prop_index_to_string(prop, intval, &strval)
340 		    != 0)
341 			return (-1);
342 		(void) strlcpy(buf, strval, len);
343 		break;
344 
345 	default:
346 		abort();
347 	}
348 
349 	if (srctype)
350 		*srctype = src;
351 
352 	return (0);
353 }
354 
355 /*
356  * Check if the bootfs name has the same pool name as it is set to.
357  * Assuming bootfs is a valid dataset name.
358  */
359 static boolean_t
360 bootfs_name_valid(const char *pool, char *bootfs)
361 {
362 	int len = strlen(pool);
363 
364 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
365 		return (B_FALSE);
366 
367 	if (strncmp(pool, bootfs, len) == 0 &&
368 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
369 		return (B_TRUE);
370 
371 	return (B_FALSE);
372 }
373 
374 /*
375  * Inspect the configuration to determine if any of the devices contain
376  * an EFI label.
377  */
378 static boolean_t
379 pool_uses_efi(nvlist_t *config)
380 {
381 	nvlist_t **child;
382 	uint_t c, children;
383 
384 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
385 	    &child, &children) != 0)
386 		return (read_efi_label(config, NULL) >= 0);
387 
388 	for (c = 0; c < children; c++) {
389 		if (pool_uses_efi(child[c]))
390 			return (B_TRUE);
391 	}
392 	return (B_FALSE);
393 }
394 
395 static boolean_t
396 pool_is_bootable(zpool_handle_t *zhp)
397 {
398 	char bootfs[ZPOOL_MAXNAMELEN];
399 
400 	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
401 	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
402 	    sizeof (bootfs)) != 0);
403 }
404 
405 
406 /*
407  * Given an nvlist of zpool properties to be set, validate that they are
408  * correct, and parse any numeric properties (index, boolean, etc) if they are
409  * specified as strings.
410  */
411 static nvlist_t *
412 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
413     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
414 {
415 	nvpair_t *elem;
416 	nvlist_t *retprops;
417 	zpool_prop_t prop;
418 	char *strval;
419 	uint64_t intval;
420 	char *slash;
421 	struct stat64 statbuf;
422 	zpool_handle_t *zhp;
423 	nvlist_t *nvroot;
424 
425 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
426 		(void) no_memory(hdl);
427 		return (NULL);
428 	}
429 
430 	elem = NULL;
431 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
432 		const char *propname = nvpair_name(elem);
433 
434 		/*
435 		 * Make sure this property is valid and applies to this type.
436 		 */
437 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
438 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
439 			    "invalid property '%s'"), propname);
440 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
441 			goto error;
442 		}
443 
444 		if (zpool_prop_readonly(prop)) {
445 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
446 			    "is readonly"), propname);
447 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
448 			goto error;
449 		}
450 
451 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
452 		    &strval, &intval, errbuf) != 0)
453 			goto error;
454 
455 		/*
456 		 * Perform additional checking for specific properties.
457 		 */
458 		switch (prop) {
459 		case ZPOOL_PROP_VERSION:
460 			if (intval < version || intval > SPA_VERSION) {
461 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
462 				    "property '%s' number %d is invalid."),
463 				    propname, intval);
464 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
465 				goto error;
466 			}
467 			break;
468 
469 		case ZPOOL_PROP_BOOTFS:
470 			if (create_or_import) {
471 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
472 				    "property '%s' cannot be set at creation "
473 				    "or import time"), propname);
474 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
475 				goto error;
476 			}
477 
478 			if (version < SPA_VERSION_BOOTFS) {
479 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
480 				    "pool must be upgraded to support "
481 				    "'%s' property"), propname);
482 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
483 				goto error;
484 			}
485 
486 			/*
487 			 * bootfs property value has to be a dataset name and
488 			 * the dataset has to be in the same pool as it sets to.
489 			 */
490 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
491 			    strval)) {
492 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
493 				    "is an invalid name"), strval);
494 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
495 				goto error;
496 			}
497 
498 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
499 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
500 				    "could not open pool '%s'"), poolname);
501 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
502 				goto error;
503 			}
504 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
505 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
506 
507 			/*
508 			 * bootfs property cannot be set on a disk which has
509 			 * been EFI labeled.
510 			 */
511 			if (pool_uses_efi(nvroot)) {
512 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
513 				    "property '%s' not supported on "
514 				    "EFI labeled devices"), propname);
515 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
516 				zpool_close(zhp);
517 				goto error;
518 			}
519 			zpool_close(zhp);
520 			break;
521 
522 		case ZPOOL_PROP_ALTROOT:
523 			if (!create_or_import) {
524 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 				    "property '%s' can only be set during pool "
526 				    "creation or import"), propname);
527 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
528 				goto error;
529 			}
530 
531 			if (strval[0] != '/') {
532 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
533 				    "bad alternate root '%s'"), strval);
534 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
535 				goto error;
536 			}
537 			break;
538 
539 		case ZPOOL_PROP_CACHEFILE:
540 			if (strval[0] == '\0')
541 				break;
542 
543 			if (strcmp(strval, "none") == 0)
544 				break;
545 
546 			if (strval[0] != '/') {
547 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
548 				    "property '%s' must be empty, an "
549 				    "absolute path, or 'none'"), propname);
550 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
551 				goto error;
552 			}
553 
554 			slash = strrchr(strval, '/');
555 
556 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
557 			    strcmp(slash, "/..") == 0) {
558 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 				    "'%s' is not a valid file"), strval);
560 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
561 				goto error;
562 			}
563 
564 			*slash = '\0';
565 
566 			if (strval[0] != '\0' &&
567 			    (stat64(strval, &statbuf) != 0 ||
568 			    !S_ISDIR(statbuf.st_mode))) {
569 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 				    "'%s' is not a valid directory"),
571 				    strval);
572 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
573 				goto error;
574 			}
575 
576 			*slash = '/';
577 			break;
578 		}
579 	}
580 
581 	return (retprops);
582 error:
583 	nvlist_free(retprops);
584 	return (NULL);
585 }
586 
587 /*
588  * Set zpool property : propname=propval.
589  */
590 int
591 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
592 {
593 	zfs_cmd_t zc = { 0 };
594 	int ret = -1;
595 	char errbuf[1024];
596 	nvlist_t *nvl = NULL;
597 	nvlist_t *realprops;
598 	uint64_t version;
599 
600 	(void) snprintf(errbuf, sizeof (errbuf),
601 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
602 	    zhp->zpool_name);
603 
604 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
605 		return (no_memory(zhp->zpool_hdl));
606 
607 	if (nvlist_add_string(nvl, propname, propval) != 0) {
608 		nvlist_free(nvl);
609 		return (no_memory(zhp->zpool_hdl));
610 	}
611 
612 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
613 	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
614 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
615 		nvlist_free(nvl);
616 		return (-1);
617 	}
618 
619 	nvlist_free(nvl);
620 	nvl = realprops;
621 
622 	/*
623 	 * Execute the corresponding ioctl() to set this property.
624 	 */
625 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
626 
627 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
628 		nvlist_free(nvl);
629 		return (-1);
630 	}
631 
632 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
633 
634 	zcmd_free_nvlists(&zc);
635 	nvlist_free(nvl);
636 
637 	if (ret)
638 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
639 	else
640 		(void) zpool_props_refresh(zhp);
641 
642 	return (ret);
643 }
644 
645 int
646 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
647 {
648 	libzfs_handle_t *hdl = zhp->zpool_hdl;
649 	zprop_list_t *entry;
650 	char buf[ZFS_MAXPROPLEN];
651 
652 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
653 		return (-1);
654 
655 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
656 
657 		if (entry->pl_fixed)
658 			continue;
659 
660 		if (entry->pl_prop != ZPROP_INVAL &&
661 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
662 		    NULL) == 0) {
663 			if (strlen(buf) > entry->pl_width)
664 				entry->pl_width = strlen(buf);
665 		}
666 	}
667 
668 	return (0);
669 }
670 
671 
672 /*
673  * Don't start the slice at the default block of 34; many storage
674  * devices will use a stripe width of 128k, so start there instead.
675  */
676 #define	NEW_START_BLOCK	256
677 
678 /*
679  * Validate the given pool name, optionally putting an extended error message in
680  * 'buf'.
681  */
682 boolean_t
683 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
684 {
685 	namecheck_err_t why;
686 	char what;
687 	int ret;
688 
689 	ret = pool_namecheck(pool, &why, &what);
690 
691 	/*
692 	 * The rules for reserved pool names were extended at a later point.
693 	 * But we need to support users with existing pools that may now be
694 	 * invalid.  So we only check for this expanded set of names during a
695 	 * create (or import), and only in userland.
696 	 */
697 	if (ret == 0 && !isopen &&
698 	    (strncmp(pool, "mirror", 6) == 0 ||
699 	    strncmp(pool, "raidz", 5) == 0 ||
700 	    strncmp(pool, "spare", 5) == 0 ||
701 	    strcmp(pool, "log") == 0)) {
702 		if (hdl != NULL)
703 			zfs_error_aux(hdl,
704 			    dgettext(TEXT_DOMAIN, "name is reserved"));
705 		return (B_FALSE);
706 	}
707 
708 
709 	if (ret != 0) {
710 		if (hdl != NULL) {
711 			switch (why) {
712 			case NAME_ERR_TOOLONG:
713 				zfs_error_aux(hdl,
714 				    dgettext(TEXT_DOMAIN, "name is too long"));
715 				break;
716 
717 			case NAME_ERR_INVALCHAR:
718 				zfs_error_aux(hdl,
719 				    dgettext(TEXT_DOMAIN, "invalid character "
720 				    "'%c' in pool name"), what);
721 				break;
722 
723 			case NAME_ERR_NOLETTER:
724 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
725 				    "name must begin with a letter"));
726 				break;
727 
728 			case NAME_ERR_RESERVED:
729 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
730 				    "name is reserved"));
731 				break;
732 
733 			case NAME_ERR_DISKLIKE:
734 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
735 				    "pool name is reserved"));
736 				break;
737 
738 			case NAME_ERR_LEADING_SLASH:
739 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
740 				    "leading slash in name"));
741 				break;
742 
743 			case NAME_ERR_EMPTY_COMPONENT:
744 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
745 				    "empty component in name"));
746 				break;
747 
748 			case NAME_ERR_TRAILING_SLASH:
749 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
750 				    "trailing slash in name"));
751 				break;
752 
753 			case NAME_ERR_MULTIPLE_AT:
754 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
755 				    "multiple '@' delimiters in name"));
756 				break;
757 
758 			}
759 		}
760 		return (B_FALSE);
761 	}
762 
763 	return (B_TRUE);
764 }
765 
766 /*
767  * Open a handle to the given pool, even if the pool is currently in the FAULTED
768  * state.
769  */
770 zpool_handle_t *
771 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
772 {
773 	zpool_handle_t *zhp;
774 	boolean_t missing;
775 
776 	/*
777 	 * Make sure the pool name is valid.
778 	 */
779 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
780 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
781 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
782 		    pool);
783 		return (NULL);
784 	}
785 
786 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
787 		return (NULL);
788 
789 	zhp->zpool_hdl = hdl;
790 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
791 
792 	if (zpool_refresh_stats(zhp, &missing) != 0) {
793 		zpool_close(zhp);
794 		return (NULL);
795 	}
796 
797 	if (missing) {
798 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
799 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
800 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
801 		zpool_close(zhp);
802 		return (NULL);
803 	}
804 
805 	return (zhp);
806 }
807 
808 /*
809  * Like the above, but silent on error.  Used when iterating over pools (because
810  * the configuration cache may be out of date).
811  */
812 int
813 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
814 {
815 	zpool_handle_t *zhp;
816 	boolean_t missing;
817 
818 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
819 		return (-1);
820 
821 	zhp->zpool_hdl = hdl;
822 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
823 
824 	if (zpool_refresh_stats(zhp, &missing) != 0) {
825 		zpool_close(zhp);
826 		return (-1);
827 	}
828 
829 	if (missing) {
830 		zpool_close(zhp);
831 		*ret = NULL;
832 		return (0);
833 	}
834 
835 	*ret = zhp;
836 	return (0);
837 }
838 
839 /*
840  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
841  * state.
842  */
843 zpool_handle_t *
844 zpool_open(libzfs_handle_t *hdl, const char *pool)
845 {
846 	zpool_handle_t *zhp;
847 
848 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
849 		return (NULL);
850 
851 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
852 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
853 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
854 		zpool_close(zhp);
855 		return (NULL);
856 	}
857 
858 	return (zhp);
859 }
860 
861 /*
862  * Close the handle.  Simply frees the memory associated with the handle.
863  */
864 void
865 zpool_close(zpool_handle_t *zhp)
866 {
867 	if (zhp->zpool_config)
868 		nvlist_free(zhp->zpool_config);
869 	if (zhp->zpool_old_config)
870 		nvlist_free(zhp->zpool_old_config);
871 	if (zhp->zpool_props)
872 		nvlist_free(zhp->zpool_props);
873 	free(zhp);
874 }
875 
876 /*
877  * Return the name of the pool.
878  */
879 const char *
880 zpool_get_name(zpool_handle_t *zhp)
881 {
882 	return (zhp->zpool_name);
883 }
884 
885 
886 /*
887  * Return the state of the pool (ACTIVE or UNAVAILABLE)
888  */
889 int
890 zpool_get_state(zpool_handle_t *zhp)
891 {
892 	return (zhp->zpool_state);
893 }
894 
895 /*
896  * Create the named pool, using the provided vdev list.  It is assumed
897  * that the consumer has already validated the contents of the nvlist, so we
898  * don't have to worry about error semantics.
899  */
900 int
901 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
902     nvlist_t *props, nvlist_t *fsprops)
903 {
904 	zfs_cmd_t zc = { 0 };
905 	nvlist_t *zc_fsprops = NULL;
906 	nvlist_t *zc_props = NULL;
907 	char msg[1024];
908 	char *altroot;
909 	int ret = -1;
910 
911 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
912 	    "cannot create '%s'"), pool);
913 
914 	if (!zpool_name_valid(hdl, B_FALSE, pool))
915 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
916 
917 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
918 		return (-1);
919 
920 	if (props) {
921 		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
922 		    SPA_VERSION_1, B_TRUE, msg)) == NULL) {
923 			goto create_failed;
924 		}
925 	}
926 
927 	if (fsprops) {
928 		uint64_t zoned;
929 		char *zonestr;
930 
931 		zoned = ((nvlist_lookup_string(fsprops,
932 		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
933 		    strcmp(zonestr, "on") == 0);
934 
935 		if ((zc_fsprops = zfs_valid_proplist(hdl,
936 		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
937 			goto create_failed;
938 		}
939 		if (!zc_props &&
940 		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
941 			goto create_failed;
942 		}
943 		if (nvlist_add_nvlist(zc_props,
944 		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
945 			goto create_failed;
946 		}
947 	}
948 
949 	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
950 		goto create_failed;
951 
952 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
953 
954 	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
955 
956 		zcmd_free_nvlists(&zc);
957 		nvlist_free(zc_props);
958 		nvlist_free(zc_fsprops);
959 
960 		switch (errno) {
961 		case EBUSY:
962 			/*
963 			 * This can happen if the user has specified the same
964 			 * device multiple times.  We can't reliably detect this
965 			 * until we try to add it and see we already have a
966 			 * label.
967 			 */
968 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
969 			    "one or more vdevs refer to the same device"));
970 			return (zfs_error(hdl, EZFS_BADDEV, msg));
971 
972 		case EOVERFLOW:
973 			/*
974 			 * This occurs when one of the devices is below
975 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
976 			 * device was the problem device since there's no
977 			 * reliable way to determine device size from userland.
978 			 */
979 			{
980 				char buf[64];
981 
982 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
983 
984 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 				    "one or more devices is less than the "
986 				    "minimum size (%s)"), buf);
987 			}
988 			return (zfs_error(hdl, EZFS_BADDEV, msg));
989 
990 		case ENOSPC:
991 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
992 			    "one or more devices is out of space"));
993 			return (zfs_error(hdl, EZFS_BADDEV, msg));
994 
995 		case ENOTBLK:
996 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
997 			    "cache device must be a disk or disk slice"));
998 			return (zfs_error(hdl, EZFS_BADDEV, msg));
999 
1000 		default:
1001 			return (zpool_standard_error(hdl, errno, msg));
1002 		}
1003 	}
1004 
1005 	/*
1006 	 * If this is an alternate root pool, then we automatically set the
1007 	 * mountpoint of the root dataset to be '/'.
1008 	 */
1009 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1010 	    &altroot) == 0) {
1011 		zfs_handle_t *zhp;
1012 
1013 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1014 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1015 		    "/") == 0);
1016 
1017 		zfs_close(zhp);
1018 	}
1019 
1020 create_failed:
1021 	zcmd_free_nvlists(&zc);
1022 	nvlist_free(zc_props);
1023 	nvlist_free(zc_fsprops);
1024 	return (ret);
1025 }
1026 
1027 /*
1028  * Destroy the given pool.  It is up to the caller to ensure that there are no
1029  * datasets left in the pool.
1030  */
1031 int
1032 zpool_destroy(zpool_handle_t *zhp)
1033 {
1034 	zfs_cmd_t zc = { 0 };
1035 	zfs_handle_t *zfp = NULL;
1036 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1037 	char msg[1024];
1038 
1039 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1040 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1041 	    ZFS_TYPE_FILESYSTEM)) == NULL)
1042 		return (-1);
1043 
1044 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1045 
1046 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1047 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1048 		    "cannot destroy '%s'"), zhp->zpool_name);
1049 
1050 		if (errno == EROFS) {
1051 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1052 			    "one or more devices is read only"));
1053 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1054 		} else {
1055 			(void) zpool_standard_error(hdl, errno, msg);
1056 		}
1057 
1058 		if (zfp)
1059 			zfs_close(zfp);
1060 		return (-1);
1061 	}
1062 
1063 	if (zfp) {
1064 		remove_mountpoint(zfp);
1065 		zfs_close(zfp);
1066 	}
1067 
1068 	return (0);
1069 }
1070 
1071 /*
1072  * Add the given vdevs to the pool.  The caller must have already performed the
1073  * necessary verification to ensure that the vdev specification is well-formed.
1074  */
1075 int
1076 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1077 {
1078 	zfs_cmd_t zc = { 0 };
1079 	int ret;
1080 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1081 	char msg[1024];
1082 	nvlist_t **spares, **l2cache;
1083 	uint_t nspares, nl2cache;
1084 
1085 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1086 	    "cannot add to '%s'"), zhp->zpool_name);
1087 
1088 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1089 	    SPA_VERSION_SPARES &&
1090 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1091 	    &spares, &nspares) == 0) {
1092 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1093 		    "upgraded to add hot spares"));
1094 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1095 	}
1096 
1097 	if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1098 	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1099 		uint64_t s;
1100 
1101 		for (s = 0; s < nspares; s++) {
1102 			char *path;
1103 
1104 			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1105 			    &path) == 0 && pool_uses_efi(spares[s])) {
1106 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1107 				    "device '%s' contains an EFI label and "
1108 				    "cannot be used on root pools."),
1109 				    zpool_vdev_name(hdl, NULL, spares[s],
1110 				    B_FALSE));
1111 				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1112 			}
1113 		}
1114 	}
1115 
1116 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1117 	    SPA_VERSION_L2CACHE &&
1118 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1119 	    &l2cache, &nl2cache) == 0) {
1120 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1121 		    "upgraded to add cache devices"));
1122 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1123 	}
1124 
1125 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1126 		return (-1);
1127 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1128 
1129 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1130 		switch (errno) {
1131 		case EBUSY:
1132 			/*
1133 			 * This can happen if the user has specified the same
1134 			 * device multiple times.  We can't reliably detect this
1135 			 * until we try to add it and see we already have a
1136 			 * label.
1137 			 */
1138 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1139 			    "one or more vdevs refer to the same device"));
1140 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1141 			break;
1142 
1143 		case EOVERFLOW:
1144 			/*
1145 			 * This occurrs when one of the devices is below
1146 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1147 			 * device was the problem device since there's no
1148 			 * reliable way to determine device size from userland.
1149 			 */
1150 			{
1151 				char buf[64];
1152 
1153 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1154 
1155 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1156 				    "device is less than the minimum "
1157 				    "size (%s)"), buf);
1158 			}
1159 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1160 			break;
1161 
1162 		case ENOTSUP:
1163 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1164 			    "pool must be upgraded to add these vdevs"));
1165 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1166 			break;
1167 
1168 		case EDOM:
1169 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1170 			    "root pool can not have multiple vdevs"
1171 			    " or separate logs"));
1172 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1173 			break;
1174 
1175 		case ENOTBLK:
1176 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1177 			    "cache device must be a disk or disk slice"));
1178 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1179 			break;
1180 
1181 		default:
1182 			(void) zpool_standard_error(hdl, errno, msg);
1183 		}
1184 
1185 		ret = -1;
1186 	} else {
1187 		ret = 0;
1188 	}
1189 
1190 	zcmd_free_nvlists(&zc);
1191 
1192 	return (ret);
1193 }
1194 
1195 /*
1196  * Exports the pool from the system.  The caller must ensure that there are no
1197  * mounted datasets in the pool.
1198  */
1199 int
1200 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1201 {
1202 	zfs_cmd_t zc = { 0 };
1203 	char msg[1024];
1204 
1205 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1206 	    "cannot export '%s'"), zhp->zpool_name);
1207 
1208 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1209 	zc.zc_cookie = force;
1210 	zc.zc_guid = hardforce;
1211 
1212 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1213 		switch (errno) {
1214 		case EXDEV:
1215 			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1216 			    "use '-f' to override the following errors:\n"
1217 			    "'%s' has an active shared spare which could be"
1218 			    " used by other pools once '%s' is exported."),
1219 			    zhp->zpool_name, zhp->zpool_name);
1220 			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1221 			    msg));
1222 		default:
1223 			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1224 			    msg));
1225 		}
1226 	}
1227 
1228 	return (0);
1229 }
1230 
1231 int
1232 zpool_export(zpool_handle_t *zhp, boolean_t force)
1233 {
1234 	return (zpool_export_common(zhp, force, B_FALSE));
1235 }
1236 
1237 int
1238 zpool_export_force(zpool_handle_t *zhp)
1239 {
1240 	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1241 }
1242 
1243 /*
1244  * zpool_import() is a contracted interface. Should be kept the same
1245  * if possible.
1246  *
1247  * Applications should use zpool_import_props() to import a pool with
1248  * new properties value to be set.
1249  */
1250 int
1251 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1252     char *altroot)
1253 {
1254 	nvlist_t *props = NULL;
1255 	int ret;
1256 
1257 	if (altroot != NULL) {
1258 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1259 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1260 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1261 			    newname));
1262 		}
1263 
1264 		if (nvlist_add_string(props,
1265 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1266 		    nvlist_add_string(props,
1267 		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1268 			nvlist_free(props);
1269 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1270 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1271 			    newname));
1272 		}
1273 	}
1274 
1275 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1276 	if (props)
1277 		nvlist_free(props);
1278 	return (ret);
1279 }
1280 
1281 /*
1282  * Import the given pool using the known configuration and a list of
1283  * properties to be set. The configuration should have come from
1284  * zpool_find_import(). The 'newname' parameters control whether the pool
1285  * is imported with a different name.
1286  */
1287 int
1288 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1289     nvlist_t *props, boolean_t importfaulted)
1290 {
1291 	zfs_cmd_t zc = { 0 };
1292 	char *thename;
1293 	char *origname;
1294 	int ret;
1295 	char errbuf[1024];
1296 
1297 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1298 	    &origname) == 0);
1299 
1300 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1301 	    "cannot import pool '%s'"), origname);
1302 
1303 	if (newname != NULL) {
1304 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1305 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1306 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1307 			    newname));
1308 		thename = (char *)newname;
1309 	} else {
1310 		thename = origname;
1311 	}
1312 
1313 	if (props) {
1314 		uint64_t version;
1315 
1316 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1317 		    &version) == 0);
1318 
1319 		if ((props = zpool_valid_proplist(hdl, origname,
1320 		    props, version, B_TRUE, errbuf)) == NULL) {
1321 			return (-1);
1322 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1323 			nvlist_free(props);
1324 			return (-1);
1325 		}
1326 	}
1327 
1328 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1329 
1330 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1331 	    &zc.zc_guid) == 0);
1332 
1333 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1334 		nvlist_free(props);
1335 		return (-1);
1336 	}
1337 
1338 	zc.zc_cookie = (uint64_t)importfaulted;
1339 	ret = 0;
1340 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1341 		char desc[1024];
1342 		if (newname == NULL)
1343 			(void) snprintf(desc, sizeof (desc),
1344 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1345 			    thename);
1346 		else
1347 			(void) snprintf(desc, sizeof (desc),
1348 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1349 			    origname, thename);
1350 
1351 		switch (errno) {
1352 		case ENOTSUP:
1353 			/*
1354 			 * Unsupported version.
1355 			 */
1356 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1357 			break;
1358 
1359 		case EINVAL:
1360 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1361 			break;
1362 
1363 		default:
1364 			(void) zpool_standard_error(hdl, errno, desc);
1365 		}
1366 
1367 		ret = -1;
1368 	} else {
1369 		zpool_handle_t *zhp;
1370 
1371 		/*
1372 		 * This should never fail, but play it safe anyway.
1373 		 */
1374 		if (zpool_open_silent(hdl, thename, &zhp) != 0)
1375 			ret = -1;
1376 		else if (zhp != NULL)
1377 			zpool_close(zhp);
1378 	}
1379 
1380 	zcmd_free_nvlists(&zc);
1381 	nvlist_free(props);
1382 
1383 	return (ret);
1384 }
1385 
1386 /*
1387  * Scrub the pool.
1388  */
1389 int
1390 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1391 {
1392 	zfs_cmd_t zc = { 0 };
1393 	char msg[1024];
1394 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1395 
1396 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1397 	zc.zc_cookie = type;
1398 
1399 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1400 		return (0);
1401 
1402 	(void) snprintf(msg, sizeof (msg),
1403 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1404 
1405 	if (errno == EBUSY)
1406 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1407 	else
1408 		return (zpool_standard_error(hdl, errno, msg));
1409 }
1410 
1411 /*
1412  * Find a vdev that matches the search criteria specified. We use the
1413  * the nvpair name to determine how we should look for the device.
1414  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1415  * spare; but FALSE if its an INUSE spare.
1416  */
1417 static nvlist_t *
1418 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1419     boolean_t *l2cache, boolean_t *log)
1420 {
1421 	uint_t c, children;
1422 	nvlist_t **child;
1423 	nvlist_t *ret;
1424 	uint64_t is_log;
1425 	char *srchkey;
1426 	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1427 
1428 	/* Nothing to look for */
1429 	if (search == NULL || pair == NULL)
1430 		return (NULL);
1431 
1432 	/* Obtain the key we will use to search */
1433 	srchkey = nvpair_name(pair);
1434 
1435 	switch (nvpair_type(pair)) {
1436 	case DATA_TYPE_UINT64: {
1437 		uint64_t srchval, theguid, present;
1438 
1439 		verify(nvpair_value_uint64(pair, &srchval) == 0);
1440 		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1441 			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1442 			    &present) == 0) {
1443 				/*
1444 				 * If the device has never been present since
1445 				 * import, the only reliable way to match the
1446 				 * vdev is by GUID.
1447 				 */
1448 				verify(nvlist_lookup_uint64(nv,
1449 				    ZPOOL_CONFIG_GUID, &theguid) == 0);
1450 				if (theguid == srchval)
1451 					return (nv);
1452 			}
1453 		}
1454 		break;
1455 	}
1456 
1457 	case DATA_TYPE_STRING: {
1458 		char *srchval, *val;
1459 
1460 		verify(nvpair_value_string(pair, &srchval) == 0);
1461 		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1462 			break;
1463 
1464 		/*
1465 		 * Search for the requested value. We special case the search
1466 		 * for ZPOOL_CONFIG_PATH when it's a wholedisk and when
1467 		 * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1468 		 * Otherwise, all other searches are simple string compares.
1469 		 */
1470 		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) {
1471 			uint64_t wholedisk = 0;
1472 
1473 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1474 			    &wholedisk);
1475 			if (wholedisk) {
1476 				/*
1477 				 * For whole disks, the internal path has 's0',
1478 				 * but the path passed in by the user doesn't.
1479 				 */
1480 				if (strlen(srchval) == strlen(val) - 2 &&
1481 				    strncmp(srchval, val, strlen(srchval)) == 0)
1482 					return (nv);
1483 				break;
1484 			}
1485 		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1486 			char *type, *idx, *end, *p;
1487 			uint64_t id, vdev_id;
1488 
1489 			/*
1490 			 * Determine our vdev type, keeping in mind
1491 			 * that the srchval is composed of a type and
1492 			 * vdev id pair (i.e. mirror-4).
1493 			 */
1494 			if ((type = strdup(srchval)) == NULL)
1495 				return (NULL);
1496 
1497 			if ((p = strrchr(type, '-')) == NULL) {
1498 				free(type);
1499 				break;
1500 			}
1501 			idx = p + 1;
1502 			*p = '\0';
1503 
1504 			/*
1505 			 * If the types don't match then keep looking.
1506 			 */
1507 			if (strncmp(val, type, strlen(val)) != 0) {
1508 				free(type);
1509 				break;
1510 			}
1511 
1512 			verify(strncmp(type, VDEV_TYPE_RAIDZ,
1513 			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1514 			    strncmp(type, VDEV_TYPE_MIRROR,
1515 			    strlen(VDEV_TYPE_MIRROR)) == 0);
1516 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
1517 			    &id) == 0);
1518 
1519 			errno = 0;
1520 			vdev_id = strtoull(idx, &end, 10);
1521 
1522 			free(type);
1523 			if (errno != 0)
1524 				return (NULL);
1525 
1526 			/*
1527 			 * Now verify that we have the correct vdev id.
1528 			 */
1529 			if (vdev_id == id)
1530 				return (nv);
1531 		}
1532 
1533 		/*
1534 		 * Common case
1535 		 */
1536 		if (strcmp(srchval, val) == 0)
1537 			return (nv);
1538 		break;
1539 	}
1540 
1541 	default:
1542 		break;
1543 	}
1544 
1545 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1546 	    &child, &children) != 0)
1547 		return (NULL);
1548 
1549 	for (c = 0; c < children; c++) {
1550 		if ((ret = vdev_to_nvlist_iter(child[c], search,
1551 		    avail_spare, l2cache, NULL)) != NULL) {
1552 			/*
1553 			 * The 'is_log' value is only set for the toplevel
1554 			 * vdev, not the leaf vdevs.  So we always lookup the
1555 			 * log device from the root of the vdev tree (where
1556 			 * 'log' is non-NULL).
1557 			 */
1558 			if (log != NULL &&
1559 			    nvlist_lookup_uint64(child[c],
1560 			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
1561 			    is_log) {
1562 				*log = B_TRUE;
1563 			}
1564 			return (ret);
1565 		}
1566 	}
1567 
1568 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1569 	    &child, &children) == 0) {
1570 		for (c = 0; c < children; c++) {
1571 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1572 			    avail_spare, l2cache, NULL)) != NULL) {
1573 				*avail_spare = B_TRUE;
1574 				return (ret);
1575 			}
1576 		}
1577 	}
1578 
1579 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1580 	    &child, &children) == 0) {
1581 		for (c = 0; c < children; c++) {
1582 			if ((ret = vdev_to_nvlist_iter(child[c], search,
1583 			    avail_spare, l2cache, NULL)) != NULL) {
1584 				*l2cache = B_TRUE;
1585 				return (ret);
1586 			}
1587 		}
1588 	}
1589 
1590 	return (NULL);
1591 }
1592 
1593 /*
1594  * Given a physical path (minus the "/devices" prefix), find the
1595  * associated vdev.
1596  */
1597 nvlist_t *
1598 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
1599     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
1600 {
1601 	nvlist_t *search, *nvroot, *ret;
1602 
1603 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1604 	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
1605 
1606 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1607 	    &nvroot) == 0);
1608 
1609 	*avail_spare = B_FALSE;
1610 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1611 	nvlist_free(search);
1612 
1613 	return (ret);
1614 }
1615 
1616 /*
1617  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
1618  */
1619 boolean_t
1620 zpool_vdev_is_interior(const char *name)
1621 {
1622 	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
1623 	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
1624 		return (B_TRUE);
1625 	return (B_FALSE);
1626 }
1627 
1628 nvlist_t *
1629 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1630     boolean_t *l2cache, boolean_t *log)
1631 {
1632 	char buf[MAXPATHLEN];
1633 	char *end;
1634 	nvlist_t *nvroot, *search, *ret;
1635 	uint64_t guid;
1636 
1637 	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1638 
1639 	guid = strtoull(path, &end, 10);
1640 	if (guid != 0 && *end == '\0') {
1641 		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
1642 	} else if (zpool_vdev_is_interior(path)) {
1643 		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
1644 	} else if (path[0] != '/') {
1645 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1646 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
1647 	} else {
1648 		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
1649 	}
1650 
1651 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1652 	    &nvroot) == 0);
1653 
1654 	*avail_spare = B_FALSE;
1655 	*l2cache = B_FALSE;
1656 	if (log != NULL)
1657 		*log = B_FALSE;
1658 	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
1659 	nvlist_free(search);
1660 
1661 	return (ret);
1662 }
1663 
1664 static int
1665 vdev_online(nvlist_t *nv)
1666 {
1667 	uint64_t ival;
1668 
1669 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
1670 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
1671 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
1672 		return (0);
1673 
1674 	return (1);
1675 }
1676 
1677 /*
1678  * Helper function for zpool_get_physpaths().
1679  */
1680 static int
1681 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
1682     size_t *bytes_written)
1683 {
1684 	size_t bytes_left, pos, rsz;
1685 	char *tmppath;
1686 	const char *format;
1687 
1688 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
1689 	    &tmppath) != 0)
1690 		return (EZFS_NODEVICE);
1691 
1692 	pos = *bytes_written;
1693 	bytes_left = physpath_size - pos;
1694 	format = (pos == 0) ? "%s" : " %s";
1695 
1696 	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
1697 	*bytes_written += rsz;
1698 
1699 	if (rsz >= bytes_left) {
1700 		/* if physpath was not copied properly, clear it */
1701 		if (bytes_left != 0) {
1702 			physpath[pos] = 0;
1703 		}
1704 		return (EZFS_NOSPC);
1705 	}
1706 	return (0);
1707 }
1708 
1709 static int
1710 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
1711     size_t *rsz, boolean_t is_spare)
1712 {
1713 	char *type;
1714 	int ret;
1715 
1716 	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
1717 		return (EZFS_INVALCONFIG);
1718 
1719 	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
1720 		/*
1721 		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
1722 		 * For a spare vdev, we only want to boot from the active
1723 		 * spare device.
1724 		 */
1725 		if (is_spare) {
1726 			uint64_t spare = 0;
1727 			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
1728 			    &spare);
1729 			if (!spare)
1730 				return (EZFS_INVALCONFIG);
1731 		}
1732 
1733 		if (vdev_online(nv)) {
1734 			if ((ret = vdev_get_one_physpath(nv, physpath,
1735 			    phypath_size, rsz)) != 0)
1736 				return (ret);
1737 		}
1738 	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1739 	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1740 	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1741 		nvlist_t **child;
1742 		uint_t count;
1743 		int i, ret;
1744 
1745 		if (nvlist_lookup_nvlist_array(nv,
1746 		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
1747 			return (EZFS_INVALCONFIG);
1748 
1749 		for (i = 0; i < count; i++) {
1750 			ret = vdev_get_physpaths(child[i], physpath,
1751 			    phypath_size, rsz, is_spare);
1752 			if (ret == EZFS_NOSPC)
1753 				return (ret);
1754 		}
1755 	}
1756 
1757 	return (EZFS_POOL_INVALARG);
1758 }
1759 
1760 /*
1761  * Get phys_path for a root pool config.
1762  * Return 0 on success; non-zero on failure.
1763  */
1764 static int
1765 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
1766 {
1767 	size_t rsz;
1768 	nvlist_t *vdev_root;
1769 	nvlist_t **child;
1770 	uint_t count;
1771 	char *type;
1772 
1773 	rsz = 0;
1774 
1775 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
1776 	    &vdev_root) != 0)
1777 		return (EZFS_INVALCONFIG);
1778 
1779 	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
1780 	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
1781 	    &child, &count) != 0)
1782 		return (EZFS_INVALCONFIG);
1783 
1784 	/*
1785 	 * root pool can not have EFI labeled disks and can only have
1786 	 * a single top-level vdev.
1787 	 */
1788 	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
1789 	    pool_uses_efi(vdev_root))
1790 		return (EZFS_POOL_INVALARG);
1791 
1792 	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
1793 	    B_FALSE);
1794 
1795 	/* No online devices */
1796 	if (rsz == 0)
1797 		return (EZFS_NODEVICE);
1798 
1799 	return (0);
1800 }
1801 
1802 /*
1803  * Get phys_path for a root pool
1804  * Return 0 on success; non-zero on failure.
1805  */
1806 int
1807 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
1808 {
1809 	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
1810 	    phypath_size));
1811 }
1812 
1813 /*
1814  * Returns TRUE if the given guid corresponds to the given type.
1815  * This is used to check for hot spares (INUSE or not), and level 2 cache
1816  * devices.
1817  */
1818 static boolean_t
1819 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1820 {
1821 	uint64_t target_guid;
1822 	nvlist_t *nvroot;
1823 	nvlist_t **list;
1824 	uint_t count;
1825 	int i;
1826 
1827 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1828 	    &nvroot) == 0);
1829 	if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1830 		for (i = 0; i < count; i++) {
1831 			verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1832 			    &target_guid) == 0);
1833 			if (guid == target_guid)
1834 				return (B_TRUE);
1835 		}
1836 	}
1837 
1838 	return (B_FALSE);
1839 }
1840 
1841 /*
1842  * If the device has being dynamically expanded then we need to relabel
1843  * the disk to use the new unallocated space.
1844  */
1845 static int
1846 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
1847 {
1848 	char path[MAXPATHLEN];
1849 	char errbuf[1024];
1850 	int fd, error;
1851 	int (*_efi_use_whole_disk)(int);
1852 
1853 	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
1854 	    "efi_use_whole_disk")) == NULL)
1855 		return (-1);
1856 
1857 	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
1858 
1859 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
1860 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
1861 		    "relabel '%s': unable to open device"), name);
1862 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
1863 	}
1864 
1865 	/*
1866 	 * It's possible that we might encounter an error if the device
1867 	 * does not have any unallocated space left. If so, we simply
1868 	 * ignore that error and continue on.
1869 	 */
1870 	error = _efi_use_whole_disk(fd);
1871 	(void) close(fd);
1872 	if (error && error != VT_ENOSPC) {
1873 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
1874 		    "relabel '%s': unable to read disk capacity"), name);
1875 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
1876 	}
1877 	return (0);
1878 }
1879 
1880 /*
1881  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1882  * ZFS_ONLINE_* flags.
1883  */
1884 int
1885 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1886     vdev_state_t *newstate)
1887 {
1888 	zfs_cmd_t zc = { 0 };
1889 	char msg[1024];
1890 	nvlist_t *tgt;
1891 	boolean_t avail_spare, l2cache, islog;
1892 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1893 
1894 	if (flags & ZFS_ONLINE_EXPAND) {
1895 		(void) snprintf(msg, sizeof (msg),
1896 		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
1897 	} else {
1898 		(void) snprintf(msg, sizeof (msg),
1899 		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1900 	}
1901 
1902 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1903 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1904 	    &islog)) == NULL)
1905 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1906 
1907 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1908 
1909 	if (avail_spare ||
1910 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1911 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1912 
1913 	if (flags & ZFS_ONLINE_EXPAND ||
1914 	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
1915 		char *pathname = NULL;
1916 		uint64_t wholedisk = 0;
1917 
1918 		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
1919 		    &wholedisk);
1920 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
1921 		    &pathname) == 0);
1922 
1923 		/*
1924 		 * XXX - L2ARC 1.0 devices can't support expansion.
1925 		 */
1926 		if (l2cache) {
1927 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1928 			    "cannot expand cache devices"));
1929 			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
1930 		}
1931 
1932 		if (wholedisk) {
1933 			pathname += strlen(DISK_ROOT) + 1;
1934 			(void) zpool_relabel_disk(zhp->zpool_hdl, pathname);
1935 		}
1936 	}
1937 
1938 	zc.zc_cookie = VDEV_STATE_ONLINE;
1939 	zc.zc_obj = flags;
1940 
1941 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1942 		return (zpool_standard_error(hdl, errno, msg));
1943 
1944 	*newstate = zc.zc_cookie;
1945 	return (0);
1946 }
1947 
1948 /*
1949  * Take the specified vdev offline
1950  */
1951 int
1952 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1953 {
1954 	zfs_cmd_t zc = { 0 };
1955 	char msg[1024];
1956 	nvlist_t *tgt;
1957 	boolean_t avail_spare, l2cache;
1958 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1959 
1960 	(void) snprintf(msg, sizeof (msg),
1961 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1962 
1963 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1964 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
1965 	    NULL)) == NULL)
1966 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1967 
1968 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1969 
1970 	if (avail_spare ||
1971 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1972 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1973 
1974 	zc.zc_cookie = VDEV_STATE_OFFLINE;
1975 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1976 
1977 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1978 		return (0);
1979 
1980 	switch (errno) {
1981 	case EBUSY:
1982 
1983 		/*
1984 		 * There are no other replicas of this device.
1985 		 */
1986 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1987 
1988 	case EEXIST:
1989 		/*
1990 		 * The log device has unplayed logs
1991 		 */
1992 		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
1993 
1994 	default:
1995 		return (zpool_standard_error(hdl, errno, msg));
1996 	}
1997 }
1998 
1999 /*
2000  * Mark the given vdev faulted.
2001  */
2002 int
2003 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
2004 {
2005 	zfs_cmd_t zc = { 0 };
2006 	char msg[1024];
2007 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2008 
2009 	(void) snprintf(msg, sizeof (msg),
2010 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2011 
2012 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2013 	zc.zc_guid = guid;
2014 	zc.zc_cookie = VDEV_STATE_FAULTED;
2015 
2016 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2017 		return (0);
2018 
2019 	switch (errno) {
2020 	case EBUSY:
2021 
2022 		/*
2023 		 * There are no other replicas of this device.
2024 		 */
2025 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2026 
2027 	default:
2028 		return (zpool_standard_error(hdl, errno, msg));
2029 	}
2030 
2031 }
2032 
2033 /*
2034  * Mark the given vdev degraded.
2035  */
2036 int
2037 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
2038 {
2039 	zfs_cmd_t zc = { 0 };
2040 	char msg[1024];
2041 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2042 
2043 	(void) snprintf(msg, sizeof (msg),
2044 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2045 
2046 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2047 	zc.zc_guid = guid;
2048 	zc.zc_cookie = VDEV_STATE_DEGRADED;
2049 
2050 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2051 		return (0);
2052 
2053 	return (zpool_standard_error(hdl, errno, msg));
2054 }
2055 
2056 /*
2057  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2058  * a hot spare.
2059  */
2060 static boolean_t
2061 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2062 {
2063 	nvlist_t **child;
2064 	uint_t c, children;
2065 	char *type;
2066 
2067 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2068 	    &children) == 0) {
2069 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2070 		    &type) == 0);
2071 
2072 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2073 		    children == 2 && child[which] == tgt)
2074 			return (B_TRUE);
2075 
2076 		for (c = 0; c < children; c++)
2077 			if (is_replacing_spare(child[c], tgt, which))
2078 				return (B_TRUE);
2079 	}
2080 
2081 	return (B_FALSE);
2082 }
2083 
2084 /*
2085  * Attach new_disk (fully described by nvroot) to old_disk.
2086  * If 'replacing' is specified, the new disk will replace the old one.
2087  */
2088 int
2089 zpool_vdev_attach(zpool_handle_t *zhp,
2090     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2091 {
2092 	zfs_cmd_t zc = { 0 };
2093 	char msg[1024];
2094 	int ret;
2095 	nvlist_t *tgt;
2096 	boolean_t avail_spare, l2cache, islog;
2097 	uint64_t val;
2098 	char *path, *newname;
2099 	nvlist_t **child;
2100 	uint_t children;
2101 	nvlist_t *config_root;
2102 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2103 	boolean_t rootpool = pool_is_bootable(zhp);
2104 
2105 	if (replacing)
2106 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2107 		    "cannot replace %s with %s"), old_disk, new_disk);
2108 	else
2109 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2110 		    "cannot attach %s to %s"), new_disk, old_disk);
2111 
2112 	/*
2113 	 * If this is a root pool, make sure that we're not attaching an
2114 	 * EFI labeled device.
2115 	 */
2116 	if (rootpool && pool_uses_efi(nvroot)) {
2117 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2118 		    "EFI labeled devices are not supported on root pools."));
2119 		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2120 	}
2121 
2122 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2123 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2124 	    &islog)) == 0)
2125 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2126 
2127 	if (avail_spare)
2128 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2129 
2130 	if (l2cache)
2131 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2132 
2133 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2134 	zc.zc_cookie = replacing;
2135 
2136 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2137 	    &child, &children) != 0 || children != 1) {
2138 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2139 		    "new device must be a single disk"));
2140 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2141 	}
2142 
2143 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2144 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2145 
2146 	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2147 		return (-1);
2148 
2149 	/*
2150 	 * If the target is a hot spare that has been swapped in, we can only
2151 	 * replace it with another hot spare.
2152 	 */
2153 	if (replacing &&
2154 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2155 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2156 	    NULL) == NULL || !avail_spare) &&
2157 	    is_replacing_spare(config_root, tgt, 1)) {
2158 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2159 		    "can only be replaced by another hot spare"));
2160 		free(newname);
2161 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2162 	}
2163 
2164 	/*
2165 	 * If we are attempting to replace a spare, it canot be applied to an
2166 	 * already spared device.
2167 	 */
2168 	if (replacing &&
2169 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
2170 	    zpool_find_vdev(zhp, newname, &avail_spare,
2171 	    &l2cache, NULL) != NULL && avail_spare &&
2172 	    is_replacing_spare(config_root, tgt, 0)) {
2173 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2174 		    "device has already been replaced with a spare"));
2175 		free(newname);
2176 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2177 	}
2178 
2179 	free(newname);
2180 
2181 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2182 		return (-1);
2183 
2184 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2185 
2186 	zcmd_free_nvlists(&zc);
2187 
2188 	if (ret == 0) {
2189 		if (rootpool) {
2190 			/*
2191 			 * XXX - This should be removed once we can
2192 			 * automatically install the bootblocks on the
2193 			 * newly attached disk.
2194 			 */
2195 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please "
2196 			    "be sure to invoke %s to make '%s' bootable.\n"),
2197 			    BOOTCMD, new_disk);
2198 
2199 			/*
2200 			 * XXX need a better way to prevent user from
2201 			 * booting up a half-baked vdev.
2202 			 */
2203 			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2204 			    "sure to wait until resilver is done "
2205 			    "before rebooting.\n"));
2206 		}
2207 		return (0);
2208 	}
2209 
2210 	switch (errno) {
2211 	case ENOTSUP:
2212 		/*
2213 		 * Can't attach to or replace this type of vdev.
2214 		 */
2215 		if (replacing) {
2216 			if (islog)
2217 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2218 				    "cannot replace a log with a spare"));
2219 			else
2220 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2221 				    "cannot replace a replacing device"));
2222 		} else {
2223 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2224 			    "can only attach to mirrors and top-level "
2225 			    "disks"));
2226 		}
2227 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2228 		break;
2229 
2230 	case EINVAL:
2231 		/*
2232 		 * The new device must be a single disk.
2233 		 */
2234 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2235 		    "new device must be a single disk"));
2236 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2237 		break;
2238 
2239 	case EBUSY:
2240 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2241 		    new_disk);
2242 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2243 		break;
2244 
2245 	case EOVERFLOW:
2246 		/*
2247 		 * The new device is too small.
2248 		 */
2249 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2250 		    "device is too small"));
2251 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2252 		break;
2253 
2254 	case EDOM:
2255 		/*
2256 		 * The new device has a different alignment requirement.
2257 		 */
2258 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2259 		    "devices have different sector alignment"));
2260 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2261 		break;
2262 
2263 	case ENAMETOOLONG:
2264 		/*
2265 		 * The resulting top-level vdev spec won't fit in the label.
2266 		 */
2267 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2268 		break;
2269 
2270 	default:
2271 		(void) zpool_standard_error(hdl, errno, msg);
2272 	}
2273 
2274 	return (-1);
2275 }
2276 
2277 /*
2278  * Detach the specified device.
2279  */
2280 int
2281 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2282 {
2283 	zfs_cmd_t zc = { 0 };
2284 	char msg[1024];
2285 	nvlist_t *tgt;
2286 	boolean_t avail_spare, l2cache;
2287 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2288 
2289 	(void) snprintf(msg, sizeof (msg),
2290 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2291 
2292 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2293 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2294 	    NULL)) == 0)
2295 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2296 
2297 	if (avail_spare)
2298 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2299 
2300 	if (l2cache)
2301 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2302 
2303 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2304 
2305 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2306 		return (0);
2307 
2308 	switch (errno) {
2309 
2310 	case ENOTSUP:
2311 		/*
2312 		 * Can't detach from this type of vdev.
2313 		 */
2314 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2315 		    "applicable to mirror and replacing vdevs"));
2316 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
2317 		break;
2318 
2319 	case EBUSY:
2320 		/*
2321 		 * There are no other replicas of this device.
2322 		 */
2323 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2324 		break;
2325 
2326 	default:
2327 		(void) zpool_standard_error(hdl, errno, msg);
2328 	}
2329 
2330 	return (-1);
2331 }
2332 
2333 /*
2334  * Remove the given device.  Currently, this is supported only for hot spares
2335  * and level 2 cache devices.
2336  */
2337 int
2338 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
2339 {
2340 	zfs_cmd_t zc = { 0 };
2341 	char msg[1024];
2342 	nvlist_t *tgt;
2343 	boolean_t avail_spare, l2cache, islog;
2344 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2345 	uint64_t version;
2346 
2347 	(void) snprintf(msg, sizeof (msg),
2348 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
2349 
2350 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2351 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2352 	    &islog)) == 0)
2353 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2354 	/*
2355 	 * XXX - this should just go away.
2356 	 */
2357 	if (!avail_spare && !l2cache && !islog) {
2358 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2359 		    "only inactive hot spares, cache, top-level, "
2360 		    "or log devices can be removed"));
2361 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2362 	}
2363 
2364 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
2365 	if (islog && version < SPA_VERSION_HOLES) {
2366 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2367 		    "pool must be upgrade to support log removal"));
2368 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
2369 	}
2370 
2371 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2372 
2373 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
2374 		return (0);
2375 
2376 	return (zpool_standard_error(hdl, errno, msg));
2377 }
2378 
2379 /*
2380  * Clear the errors for the pool, or the particular device if specified.
2381  */
2382 int
2383 zpool_clear(zpool_handle_t *zhp, const char *path)
2384 {
2385 	zfs_cmd_t zc = { 0 };
2386 	char msg[1024];
2387 	nvlist_t *tgt;
2388 	boolean_t avail_spare, l2cache;
2389 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2390 
2391 	if (path)
2392 		(void) snprintf(msg, sizeof (msg),
2393 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2394 		    path);
2395 	else
2396 		(void) snprintf(msg, sizeof (msg),
2397 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
2398 		    zhp->zpool_name);
2399 
2400 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2401 	if (path) {
2402 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
2403 		    &l2cache, NULL)) == 0)
2404 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
2405 
2406 		/*
2407 		 * Don't allow error clearing for hot spares.  Do allow
2408 		 * error clearing for l2cache devices.
2409 		 */
2410 		if (avail_spare)
2411 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
2412 
2413 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
2414 		    &zc.zc_guid) == 0);
2415 	}
2416 
2417 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
2418 		return (0);
2419 
2420 	return (zpool_standard_error(hdl, errno, msg));
2421 }
2422 
2423 /*
2424  * Similar to zpool_clear(), but takes a GUID (used by fmd).
2425  */
2426 int
2427 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
2428 {
2429 	zfs_cmd_t zc = { 0 };
2430 	char msg[1024];
2431 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2432 
2433 	(void) snprintf(msg, sizeof (msg),
2434 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
2435 	    guid);
2436 
2437 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2438 	zc.zc_guid = guid;
2439 
2440 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
2441 		return (0);
2442 
2443 	return (zpool_standard_error(hdl, errno, msg));
2444 }
2445 
2446 /*
2447  * Convert from a devid string to a path.
2448  */
2449 static char *
2450 devid_to_path(char *devid_str)
2451 {
2452 	ddi_devid_t devid;
2453 	char *minor;
2454 	char *path;
2455 	devid_nmlist_t *list = NULL;
2456 	int ret;
2457 
2458 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2459 		return (NULL);
2460 
2461 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2462 
2463 	devid_str_free(minor);
2464 	devid_free(devid);
2465 
2466 	if (ret != 0)
2467 		return (NULL);
2468 
2469 	if ((path = strdup(list[0].devname)) == NULL)
2470 		return (NULL);
2471 
2472 	devid_free_nmlist(list);
2473 
2474 	return (path);
2475 }
2476 
2477 /*
2478  * Convert from a path to a devid string.
2479  */
2480 static char *
2481 path_to_devid(const char *path)
2482 {
2483 	int fd;
2484 	ddi_devid_t devid;
2485 	char *minor, *ret;
2486 
2487 	if ((fd = open(path, O_RDONLY)) < 0)
2488 		return (NULL);
2489 
2490 	minor = NULL;
2491 	ret = NULL;
2492 	if (devid_get(fd, &devid) == 0) {
2493 		if (devid_get_minor_name(fd, &minor) == 0)
2494 			ret = devid_str_encode(devid, minor);
2495 		if (minor != NULL)
2496 			devid_str_free(minor);
2497 		devid_free(devid);
2498 	}
2499 	(void) close(fd);
2500 
2501 	return (ret);
2502 }
2503 
2504 /*
2505  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2506  * ignore any failure here, since a common case is for an unprivileged user to
2507  * type 'zpool status', and we'll display the correct information anyway.
2508  */
2509 static void
2510 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2511 {
2512 	zfs_cmd_t zc = { 0 };
2513 
2514 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2515 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2516 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2517 	    &zc.zc_guid) == 0);
2518 
2519 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2520 }
2521 
2522 /*
2523  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2524  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2525  * We also check if this is a whole disk, in which case we strip off the
2526  * trailing 's0' slice name.
2527  *
2528  * This routine is also responsible for identifying when disks have been
2529  * reconfigured in a new location.  The kernel will have opened the device by
2530  * devid, but the path will still refer to the old location.  To catch this, we
2531  * first do a path -> devid translation (which is fast for the common case).  If
2532  * the devid matches, we're done.  If not, we do a reverse devid -> path
2533  * translation and issue the appropriate ioctl() to update the path of the vdev.
2534  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2535  * of these checks.
2536  */
2537 char *
2538 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
2539     boolean_t verbose)
2540 {
2541 	char *path, *devid;
2542 	uint64_t value;
2543 	char buf[64];
2544 	vdev_stat_t *vs;
2545 	uint_t vsc;
2546 
2547 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2548 	    &value) == 0) {
2549 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2550 		    &value) == 0);
2551 		(void) snprintf(buf, sizeof (buf), "%llu",
2552 		    (u_longlong_t)value);
2553 		path = buf;
2554 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2555 
2556 		/*
2557 		 * If the device is dead (faulted, offline, etc) then don't
2558 		 * bother opening it.  Otherwise we may be forcing the user to
2559 		 * open a misbehaving device, which can have undesirable
2560 		 * effects.
2561 		 */
2562 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2563 		    (uint64_t **)&vs, &vsc) != 0 ||
2564 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2565 		    zhp != NULL &&
2566 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2567 			/*
2568 			 * Determine if the current path is correct.
2569 			 */
2570 			char *newdevid = path_to_devid(path);
2571 
2572 			if (newdevid == NULL ||
2573 			    strcmp(devid, newdevid) != 0) {
2574 				char *newpath;
2575 
2576 				if ((newpath = devid_to_path(devid)) != NULL) {
2577 					/*
2578 					 * Update the path appropriately.
2579 					 */
2580 					set_path(zhp, nv, newpath);
2581 					if (nvlist_add_string(nv,
2582 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2583 						verify(nvlist_lookup_string(nv,
2584 						    ZPOOL_CONFIG_PATH,
2585 						    &path) == 0);
2586 					free(newpath);
2587 				}
2588 			}
2589 
2590 			if (newdevid)
2591 				devid_str_free(newdevid);
2592 		}
2593 
2594 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2595 			path += 9;
2596 
2597 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2598 		    &value) == 0 && value) {
2599 			char *tmp = zfs_strdup(hdl, path);
2600 			if (tmp == NULL)
2601 				return (NULL);
2602 			tmp[strlen(path) - 2] = '\0';
2603 			return (tmp);
2604 		}
2605 	} else {
2606 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2607 
2608 		/*
2609 		 * If it's a raidz device, we need to stick in the parity level.
2610 		 */
2611 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2612 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2613 			    &value) == 0);
2614 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2615 			    (u_longlong_t)value);
2616 			path = buf;
2617 		}
2618 
2619 		/*
2620 		 * We identify each top-level vdev by using a <type-id>
2621 		 * naming convention.
2622 		 */
2623 		if (verbose) {
2624 			uint64_t id;
2625 
2626 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2627 			    &id) == 0);
2628 			(void) snprintf(buf, sizeof (buf), "%s-%llu", path,
2629 			    (u_longlong_t)id);
2630 			path = buf;
2631 		}
2632 	}
2633 
2634 	return (zfs_strdup(hdl, path));
2635 }
2636 
2637 static int
2638 zbookmark_compare(const void *a, const void *b)
2639 {
2640 	return (memcmp(a, b, sizeof (zbookmark_t)));
2641 }
2642 
2643 /*
2644  * Retrieve the persistent error log, uniquify the members, and return to the
2645  * caller.
2646  */
2647 int
2648 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2649 {
2650 	zfs_cmd_t zc = { 0 };
2651 	uint64_t count;
2652 	zbookmark_t *zb = NULL;
2653 	int i;
2654 
2655 	/*
2656 	 * Retrieve the raw error list from the kernel.  If the number of errors
2657 	 * has increased, allocate more space and continue until we get the
2658 	 * entire list.
2659 	 */
2660 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2661 	    &count) == 0);
2662 	if (count == 0)
2663 		return (0);
2664 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2665 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2666 		return (-1);
2667 	zc.zc_nvlist_dst_size = count;
2668 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2669 	for (;;) {
2670 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2671 		    &zc) != 0) {
2672 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2673 			if (errno == ENOMEM) {
2674 				count = zc.zc_nvlist_dst_size;
2675 				if ((zc.zc_nvlist_dst = (uintptr_t)
2676 				    zfs_alloc(zhp->zpool_hdl, count *
2677 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2678 					return (-1);
2679 			} else {
2680 				return (-1);
2681 			}
2682 		} else {
2683 			break;
2684 		}
2685 	}
2686 
2687 	/*
2688 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2689 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2690 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2691 	 * _not_ copied as part of the process.  So we point the start of our
2692 	 * array appropriate and decrement the total number of elements.
2693 	 */
2694 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2695 	    zc.zc_nvlist_dst_size;
2696 	count -= zc.zc_nvlist_dst_size;
2697 
2698 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2699 
2700 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2701 
2702 	/*
2703 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2704 	 */
2705 	for (i = 0; i < count; i++) {
2706 		nvlist_t *nv;
2707 
2708 		/* ignoring zb_blkid and zb_level for now */
2709 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2710 		    zb[i-1].zb_object == zb[i].zb_object)
2711 			continue;
2712 
2713 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2714 			goto nomem;
2715 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2716 		    zb[i].zb_objset) != 0) {
2717 			nvlist_free(nv);
2718 			goto nomem;
2719 		}
2720 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2721 		    zb[i].zb_object) != 0) {
2722 			nvlist_free(nv);
2723 			goto nomem;
2724 		}
2725 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2726 			nvlist_free(nv);
2727 			goto nomem;
2728 		}
2729 		nvlist_free(nv);
2730 	}
2731 
2732 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2733 	return (0);
2734 
2735 nomem:
2736 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2737 	return (no_memory(zhp->zpool_hdl));
2738 }
2739 
2740 /*
2741  * Upgrade a ZFS pool to the latest on-disk version.
2742  */
2743 int
2744 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2745 {
2746 	zfs_cmd_t zc = { 0 };
2747 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2748 
2749 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2750 	zc.zc_cookie = new_version;
2751 
2752 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2753 		return (zpool_standard_error_fmt(hdl, errno,
2754 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2755 		    zhp->zpool_name));
2756 	return (0);
2757 }
2758 
2759 void
2760 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2761     char *history_str)
2762 {
2763 	int i;
2764 
2765 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2766 	for (i = 1; i < argc; i++) {
2767 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2768 		    HIS_MAX_RECORD_LEN)
2769 			break;
2770 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2771 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2772 	}
2773 }
2774 
2775 /*
2776  * Stage command history for logging.
2777  */
2778 int
2779 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2780 {
2781 	if (history_str == NULL)
2782 		return (EINVAL);
2783 
2784 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2785 		return (EINVAL);
2786 
2787 	if (hdl->libzfs_log_str != NULL)
2788 		free(hdl->libzfs_log_str);
2789 
2790 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2791 		return (no_memory(hdl));
2792 
2793 	return (0);
2794 }
2795 
2796 /*
2797  * Perform ioctl to get some command history of a pool.
2798  *
2799  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2800  * logical offset of the history buffer to start reading from.
2801  *
2802  * Upon return, 'off' is the next logical offset to read from and
2803  * 'len' is the actual amount of bytes read into 'buf'.
2804  */
2805 static int
2806 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2807 {
2808 	zfs_cmd_t zc = { 0 };
2809 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2810 
2811 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2812 
2813 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2814 	zc.zc_history_len = *len;
2815 	zc.zc_history_offset = *off;
2816 
2817 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2818 		switch (errno) {
2819 		case EPERM:
2820 			return (zfs_error_fmt(hdl, EZFS_PERM,
2821 			    dgettext(TEXT_DOMAIN,
2822 			    "cannot show history for pool '%s'"),
2823 			    zhp->zpool_name));
2824 		case ENOENT:
2825 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2826 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2827 			    "'%s'"), zhp->zpool_name));
2828 		case ENOTSUP:
2829 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2830 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2831 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2832 		default:
2833 			return (zpool_standard_error_fmt(hdl, errno,
2834 			    dgettext(TEXT_DOMAIN,
2835 			    "cannot get history for '%s'"), zhp->zpool_name));
2836 		}
2837 	}
2838 
2839 	*len = zc.zc_history_len;
2840 	*off = zc.zc_history_offset;
2841 
2842 	return (0);
2843 }
2844 
2845 /*
2846  * Process the buffer of nvlists, unpacking and storing each nvlist record
2847  * into 'records'.  'leftover' is set to the number of bytes that weren't
2848  * processed as there wasn't a complete record.
2849  */
2850 int
2851 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2852     nvlist_t ***records, uint_t *numrecords)
2853 {
2854 	uint64_t reclen;
2855 	nvlist_t *nv;
2856 	int i;
2857 
2858 	while (bytes_read > sizeof (reclen)) {
2859 
2860 		/* get length of packed record (stored as little endian) */
2861 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2862 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2863 
2864 		if (bytes_read < sizeof (reclen) + reclen)
2865 			break;
2866 
2867 		/* unpack record */
2868 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2869 			return (ENOMEM);
2870 		bytes_read -= sizeof (reclen) + reclen;
2871 		buf += sizeof (reclen) + reclen;
2872 
2873 		/* add record to nvlist array */
2874 		(*numrecords)++;
2875 		if (ISP2(*numrecords + 1)) {
2876 			*records = realloc(*records,
2877 			    *numrecords * 2 * sizeof (nvlist_t *));
2878 		}
2879 		(*records)[*numrecords - 1] = nv;
2880 	}
2881 
2882 	*leftover = bytes_read;
2883 	return (0);
2884 }
2885 
2886 #define	HIS_BUF_LEN	(128*1024)
2887 
2888 /*
2889  * Retrieve the command history of a pool.
2890  */
2891 int
2892 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2893 {
2894 	char buf[HIS_BUF_LEN];
2895 	uint64_t off = 0;
2896 	nvlist_t **records = NULL;
2897 	uint_t numrecords = 0;
2898 	int err, i;
2899 
2900 	do {
2901 		uint64_t bytes_read = sizeof (buf);
2902 		uint64_t leftover;
2903 
2904 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2905 			break;
2906 
2907 		/* if nothing else was read in, we're at EOF, just return */
2908 		if (!bytes_read)
2909 			break;
2910 
2911 		if ((err = zpool_history_unpack(buf, bytes_read,
2912 		    &leftover, &records, &numrecords)) != 0)
2913 			break;
2914 		off -= leftover;
2915 
2916 		/* CONSTCOND */
2917 	} while (1);
2918 
2919 	if (!err) {
2920 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2921 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2922 		    records, numrecords) == 0);
2923 	}
2924 	for (i = 0; i < numrecords; i++)
2925 		nvlist_free(records[i]);
2926 	free(records);
2927 
2928 	return (err);
2929 }
2930 
2931 void
2932 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2933     char *pathname, size_t len)
2934 {
2935 	zfs_cmd_t zc = { 0 };
2936 	boolean_t mounted = B_FALSE;
2937 	char *mntpnt = NULL;
2938 	char dsname[MAXNAMELEN];
2939 
2940 	if (dsobj == 0) {
2941 		/* special case for the MOS */
2942 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2943 		return;
2944 	}
2945 
2946 	/* get the dataset's name */
2947 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2948 	zc.zc_obj = dsobj;
2949 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2950 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2951 		/* just write out a path of two object numbers */
2952 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2953 		    dsobj, obj);
2954 		return;
2955 	}
2956 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2957 
2958 	/* find out if the dataset is mounted */
2959 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2960 
2961 	/* get the corrupted object's path */
2962 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2963 	zc.zc_obj = obj;
2964 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2965 	    &zc) == 0) {
2966 		if (mounted) {
2967 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2968 			    zc.zc_value);
2969 		} else {
2970 			(void) snprintf(pathname, len, "%s:%s",
2971 			    dsname, zc.zc_value);
2972 		}
2973 	} else {
2974 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2975 	}
2976 	free(mntpnt);
2977 }
2978 
2979 /*
2980  * Read the EFI label from the config, if a label does not exist then
2981  * pass back the error to the caller. If the caller has passed a non-NULL
2982  * diskaddr argument then we set it to the starting address of the EFI
2983  * partition.
2984  */
2985 static int
2986 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2987 {
2988 	char *path;
2989 	int fd;
2990 	char diskname[MAXPATHLEN];
2991 	int err = -1;
2992 
2993 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2994 		return (err);
2995 
2996 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2997 	    strrchr(path, '/'));
2998 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2999 		struct dk_gpt *vtoc;
3000 
3001 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3002 			if (sb != NULL)
3003 				*sb = vtoc->efi_parts[0].p_start;
3004 			efi_free(vtoc);
3005 		}
3006 		(void) close(fd);
3007 	}
3008 	return (err);
3009 }
3010 
3011 /*
3012  * determine where a partition starts on a disk in the current
3013  * configuration
3014  */
3015 static diskaddr_t
3016 find_start_block(nvlist_t *config)
3017 {
3018 	nvlist_t **child;
3019 	uint_t c, children;
3020 	diskaddr_t sb = MAXOFFSET_T;
3021 	uint64_t wholedisk;
3022 
3023 	if (nvlist_lookup_nvlist_array(config,
3024 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3025 		if (nvlist_lookup_uint64(config,
3026 		    ZPOOL_CONFIG_WHOLE_DISK,
3027 		    &wholedisk) != 0 || !wholedisk) {
3028 			return (MAXOFFSET_T);
3029 		}
3030 		if (read_efi_label(config, &sb) < 0)
3031 			sb = MAXOFFSET_T;
3032 		return (sb);
3033 	}
3034 
3035 	for (c = 0; c < children; c++) {
3036 		sb = find_start_block(child[c]);
3037 		if (sb != MAXOFFSET_T) {
3038 			return (sb);
3039 		}
3040 	}
3041 	return (MAXOFFSET_T);
3042 }
3043 
3044 /*
3045  * Label an individual disk.  The name provided is the short name,
3046  * stripped of any leading /dev path.
3047  */
3048 int
3049 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3050 {
3051 	char path[MAXPATHLEN];
3052 	struct dk_gpt *vtoc;
3053 	int fd;
3054 	size_t resv = EFI_MIN_RESV_SIZE;
3055 	uint64_t slice_size;
3056 	diskaddr_t start_block;
3057 	char errbuf[1024];
3058 
3059 	/* prepare an error message just in case */
3060 	(void) snprintf(errbuf, sizeof (errbuf),
3061 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3062 
3063 	if (zhp) {
3064 		nvlist_t *nvroot;
3065 
3066 		if (pool_is_bootable(zhp)) {
3067 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3068 			    "EFI labeled devices are not supported on root "
3069 			    "pools."));
3070 			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3071 		}
3072 
3073 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
3074 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3075 
3076 		if (zhp->zpool_start_block == 0)
3077 			start_block = find_start_block(nvroot);
3078 		else
3079 			start_block = zhp->zpool_start_block;
3080 		zhp->zpool_start_block = start_block;
3081 	} else {
3082 		/* new pool */
3083 		start_block = NEW_START_BLOCK;
3084 	}
3085 
3086 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3087 	    BACKUP_SLICE);
3088 
3089 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3090 		/*
3091 		 * This shouldn't happen.  We've long since verified that this
3092 		 * is a valid device.
3093 		 */
3094 		zfs_error_aux(hdl,
3095 		    dgettext(TEXT_DOMAIN, "unable to open device"));
3096 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3097 	}
3098 
3099 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3100 		/*
3101 		 * The only way this can fail is if we run out of memory, or we
3102 		 * were unable to read the disk's capacity
3103 		 */
3104 		if (errno == ENOMEM)
3105 			(void) no_memory(hdl);
3106 
3107 		(void) close(fd);
3108 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3109 		    "unable to read disk capacity"), name);
3110 
3111 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3112 	}
3113 
3114 	slice_size = vtoc->efi_last_u_lba + 1;
3115 	slice_size -= EFI_MIN_RESV_SIZE;
3116 	if (start_block == MAXOFFSET_T)
3117 		start_block = NEW_START_BLOCK;
3118 	slice_size -= start_block;
3119 
3120 	vtoc->efi_parts[0].p_start = start_block;
3121 	vtoc->efi_parts[0].p_size = slice_size;
3122 
3123 	/*
3124 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
3125 	 * disposable by some EFI utilities (since EFI doesn't have a backup
3126 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
3127 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
3128 	 * etc. were all pretty specific.  V_USR is as close to reality as we
3129 	 * can get, in the absence of V_OTHER.
3130 	 */
3131 	vtoc->efi_parts[0].p_tag = V_USR;
3132 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3133 
3134 	vtoc->efi_parts[8].p_start = slice_size + start_block;
3135 	vtoc->efi_parts[8].p_size = resv;
3136 	vtoc->efi_parts[8].p_tag = V_RESERVED;
3137 
3138 	if (efi_write(fd, vtoc) != 0) {
3139 		/*
3140 		 * Some block drivers (like pcata) may not support EFI
3141 		 * GPT labels.  Print out a helpful error message dir-
3142 		 * ecting the user to manually label the disk and give
3143 		 * a specific slice.
3144 		 */
3145 		(void) close(fd);
3146 		efi_free(vtoc);
3147 
3148 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3149 		    "try using fdisk(1M) and then provide a specific slice"));
3150 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3151 	}
3152 
3153 	(void) close(fd);
3154 	efi_free(vtoc);
3155 	return (0);
3156 }
3157 
3158 static boolean_t
3159 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3160 {
3161 	char *type;
3162 	nvlist_t **child;
3163 	uint_t children, c;
3164 
3165 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3166 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3167 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
3168 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
3169 	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3170 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
3171 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3172 		    "vdev type '%s' is not supported"), type);
3173 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3174 		return (B_FALSE);
3175 	}
3176 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3177 	    &child, &children) == 0) {
3178 		for (c = 0; c < children; c++) {
3179 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3180 				return (B_FALSE);
3181 		}
3182 	}
3183 	return (B_TRUE);
3184 }
3185 
3186 /*
3187  * check if this zvol is allowable for use as a dump device; zero if
3188  * it is, > 0 if it isn't, < 0 if it isn't a zvol
3189  */
3190 int
3191 zvol_check_dump_config(char *arg)
3192 {
3193 	zpool_handle_t *zhp = NULL;
3194 	nvlist_t *config, *nvroot;
3195 	char *p, *volname;
3196 	nvlist_t **top;
3197 	uint_t toplevels;
3198 	libzfs_handle_t *hdl;
3199 	char errbuf[1024];
3200 	char poolname[ZPOOL_MAXNAMELEN];
3201 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3202 	int ret = 1;
3203 
3204 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3205 		return (-1);
3206 	}
3207 
3208 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3209 	    "dump is not supported on device '%s'"), arg);
3210 
3211 	if ((hdl = libzfs_init()) == NULL)
3212 		return (1);
3213 	libzfs_print_on_error(hdl, B_TRUE);
3214 
3215 	volname = arg + pathlen;
3216 
3217 	/* check the configuration of the pool */
3218 	if ((p = strchr(volname, '/')) == NULL) {
3219 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3220 		    "malformed dataset name"));
3221 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
3222 		return (1);
3223 	} else if (p - volname >= ZFS_MAXNAMELEN) {
3224 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3225 		    "dataset name is too long"));
3226 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
3227 		return (1);
3228 	} else {
3229 		(void) strncpy(poolname, volname, p - volname);
3230 		poolname[p - volname] = '\0';
3231 	}
3232 
3233 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
3234 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3235 		    "could not open pool '%s'"), poolname);
3236 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
3237 		goto out;
3238 	}
3239 	config = zpool_get_config(zhp, NULL);
3240 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
3241 	    &nvroot) != 0) {
3242 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3243 		    "could not obtain vdev configuration for  '%s'"), poolname);
3244 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
3245 		goto out;
3246 	}
3247 
3248 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3249 	    &top, &toplevels) == 0);
3250 	if (toplevels != 1) {
3251 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3252 		    "'%s' has multiple top level vdevs"), poolname);
3253 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
3254 		goto out;
3255 	}
3256 
3257 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
3258 		goto out;
3259 	}
3260 	ret = 0;
3261 
3262 out:
3263 	if (zhp)
3264 		zpool_close(zhp);
3265 	libzfs_fini(hdl);
3266 	return (ret);
3267 }
3268