1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/zio.h>
43 #include <strings.h>
44 
45 #include "zfs_namecheck.h"
46 #include "zfs_prop.h"
47 #include "libzfs_impl.h"
48 
49 /*
50  * Validate the given pool name, optionally putting an extended error message in
51  * 'buf'.
52  */
53 static boolean_t
54 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
55 {
56 	namecheck_err_t why;
57 	char what;
58 	int ret;
59 
60 	ret = pool_namecheck(pool, &why, &what);
61 
62 	/*
63 	 * The rules for reserved pool names were extended at a later point.
64 	 * But we need to support users with existing pools that may now be
65 	 * invalid.  So we only check for this expanded set of names during a
66 	 * create (or import), and only in userland.
67 	 */
68 	if (ret == 0 && !isopen &&
69 	    (strncmp(pool, "mirror", 6) == 0 ||
70 	    strncmp(pool, "raidz", 5) == 0 ||
71 	    strncmp(pool, "spare", 5) == 0)) {
72 		zfs_error_aux(hdl,
73 		    dgettext(TEXT_DOMAIN, "name is reserved"));
74 		return (B_FALSE);
75 	}
76 
77 
78 	if (ret != 0) {
79 		if (hdl != NULL) {
80 			switch (why) {
81 			case NAME_ERR_TOOLONG:
82 				zfs_error_aux(hdl,
83 				    dgettext(TEXT_DOMAIN, "name is too long"));
84 				break;
85 
86 			case NAME_ERR_INVALCHAR:
87 				zfs_error_aux(hdl,
88 				    dgettext(TEXT_DOMAIN, "invalid character "
89 				    "'%c' in pool name"), what);
90 				break;
91 
92 			case NAME_ERR_NOLETTER:
93 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
94 				    "name must begin with a letter"));
95 				break;
96 
97 			case NAME_ERR_RESERVED:
98 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
99 				    "name is reserved"));
100 				break;
101 
102 			case NAME_ERR_DISKLIKE:
103 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
104 				    "pool name is reserved"));
105 				break;
106 
107 			case NAME_ERR_LEADING_SLASH:
108 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
109 				    "leading slash in name"));
110 				break;
111 
112 			case NAME_ERR_EMPTY_COMPONENT:
113 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
114 				    "empty component in name"));
115 				break;
116 
117 			case NAME_ERR_TRAILING_SLASH:
118 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
119 				    "trailing slash in name"));
120 				break;
121 
122 			case NAME_ERR_MULTIPLE_AT:
123 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
124 				    "multiple '@' delimiters in name"));
125 				break;
126 
127 			}
128 		}
129 		return (B_FALSE);
130 	}
131 
132 	return (B_TRUE);
133 }
134 
135 static int
136 zpool_get_all_props(zpool_handle_t *zhp)
137 {
138 	zfs_cmd_t zc = { 0 };
139 	libzfs_handle_t *hdl = zhp->zpool_hdl;
140 
141 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
142 
143 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
144 		return (-1);
145 
146 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
147 		if (errno == ENOMEM) {
148 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
149 				zcmd_free_nvlists(&zc);
150 				return (-1);
151 			}
152 		} else {
153 			zcmd_free_nvlists(&zc);
154 			return (-1);
155 		}
156 	}
157 
158 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
159 		zcmd_free_nvlists(&zc);
160 		return (-1);
161 	}
162 
163 	zcmd_free_nvlists(&zc);
164 
165 	return (0);
166 }
167 
168 /*
169  * Open a handle to the given pool, even if the pool is currently in the FAULTED
170  * state.
171  */
172 zpool_handle_t *
173 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
174 {
175 	zpool_handle_t *zhp;
176 	boolean_t missing;
177 
178 	/*
179 	 * Make sure the pool name is valid.
180 	 */
181 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
182 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
183 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
184 		    pool);
185 		return (NULL);
186 	}
187 
188 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
189 		return (NULL);
190 
191 	zhp->zpool_hdl = hdl;
192 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
193 
194 	if (zpool_refresh_stats(zhp, &missing) != 0) {
195 		zpool_close(zhp);
196 		return (NULL);
197 	}
198 
199 	if (missing) {
200 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
201 		    "no such pool"));
202 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
203 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
204 		    pool);
205 		zpool_close(zhp);
206 		return (NULL);
207 	}
208 
209 	return (zhp);
210 }
211 
212 /*
213  * Like the above, but silent on error.  Used when iterating over pools (because
214  * the configuration cache may be out of date).
215  */
216 int
217 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
218 {
219 	zpool_handle_t *zhp;
220 	boolean_t missing;
221 
222 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
223 		return (-1);
224 
225 	zhp->zpool_hdl = hdl;
226 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
227 
228 	if (zpool_refresh_stats(zhp, &missing) != 0) {
229 		zpool_close(zhp);
230 		return (-1);
231 	}
232 
233 	if (missing) {
234 		zpool_close(zhp);
235 		*ret = NULL;
236 		return (0);
237 	}
238 
239 	*ret = zhp;
240 	return (0);
241 }
242 
243 /*
244  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
245  * state.
246  */
247 zpool_handle_t *
248 zpool_open(libzfs_handle_t *hdl, const char *pool)
249 {
250 	zpool_handle_t *zhp;
251 
252 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
253 		return (NULL);
254 
255 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
256 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
257 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
258 		zpool_close(zhp);
259 		return (NULL);
260 	}
261 
262 	return (zhp);
263 }
264 
265 /*
266  * Close the handle.  Simply frees the memory associated with the handle.
267  */
268 void
269 zpool_close(zpool_handle_t *zhp)
270 {
271 	if (zhp->zpool_config)
272 		nvlist_free(zhp->zpool_config);
273 	if (zhp->zpool_old_config)
274 		nvlist_free(zhp->zpool_old_config);
275 	if (zhp->zpool_props)
276 		nvlist_free(zhp->zpool_props);
277 	free(zhp);
278 }
279 
280 /*
281  * Return the name of the pool.
282  */
283 const char *
284 zpool_get_name(zpool_handle_t *zhp)
285 {
286 	return (zhp->zpool_name);
287 }
288 
289 /*
290  * Return the GUID of the pool.
291  */
292 uint64_t
293 zpool_get_guid(zpool_handle_t *zhp)
294 {
295 	uint64_t guid;
296 
297 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
298 	    &guid) == 0);
299 	return (guid);
300 }
301 
302 /*
303  * Return the version of the pool.
304  */
305 uint64_t
306 zpool_get_version(zpool_handle_t *zhp)
307 {
308 	uint64_t version;
309 
310 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
311 	    &version) == 0);
312 
313 	return (version);
314 }
315 
316 /*
317  * Return the amount of space currently consumed by the pool.
318  */
319 uint64_t
320 zpool_get_space_used(zpool_handle_t *zhp)
321 {
322 	nvlist_t *nvroot;
323 	vdev_stat_t *vs;
324 	uint_t vsc;
325 
326 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
327 	    &nvroot) == 0);
328 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
329 	    (uint64_t **)&vs, &vsc) == 0);
330 
331 	return (vs->vs_alloc);
332 }
333 
334 /*
335  * Return the total space in the pool.
336  */
337 uint64_t
338 zpool_get_space_total(zpool_handle_t *zhp)
339 {
340 	nvlist_t *nvroot;
341 	vdev_stat_t *vs;
342 	uint_t vsc;
343 
344 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
345 	    &nvroot) == 0);
346 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
347 	    (uint64_t **)&vs, &vsc) == 0);
348 
349 	return (vs->vs_space);
350 }
351 
352 /*
353  * Return the alternate root for this pool, if any.
354  */
355 int
356 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
357 {
358 	zfs_cmd_t zc = { 0 };
359 
360 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
361 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
362 	    zc.zc_value[0] == '\0')
363 		return (-1);
364 
365 	(void) strlcpy(buf, zc.zc_value, buflen);
366 
367 	return (0);
368 }
369 
370 /*
371  * Return the state of the pool (ACTIVE or UNAVAILABLE)
372  */
373 int
374 zpool_get_state(zpool_handle_t *zhp)
375 {
376 	return (zhp->zpool_state);
377 }
378 
379 /*
380  * Create the named pool, using the provided vdev list.  It is assumed
381  * that the consumer has already validated the contents of the nvlist, so we
382  * don't have to worry about error semantics.
383  */
384 int
385 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
386     const char *altroot)
387 {
388 	zfs_cmd_t zc = { 0 };
389 	char msg[1024];
390 
391 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
392 	    "cannot create '%s'"), pool);
393 
394 	if (!zpool_name_valid(hdl, B_FALSE, pool))
395 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
396 
397 	if (altroot != NULL && altroot[0] != '/')
398 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
399 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
400 
401 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
402 		return (-1);
403 
404 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
405 
406 	if (altroot != NULL)
407 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
408 
409 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
410 		zcmd_free_nvlists(&zc);
411 
412 		switch (errno) {
413 		case EBUSY:
414 			/*
415 			 * This can happen if the user has specified the same
416 			 * device multiple times.  We can't reliably detect this
417 			 * until we try to add it and see we already have a
418 			 * label.
419 			 */
420 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
421 			    "one or more vdevs refer to the same device"));
422 			return (zfs_error(hdl, EZFS_BADDEV, msg));
423 
424 		case EOVERFLOW:
425 			/*
426 			 * This occurs when one of the devices is below
427 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
428 			 * device was the problem device since there's no
429 			 * reliable way to determine device size from userland.
430 			 */
431 			{
432 				char buf[64];
433 
434 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
435 
436 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
437 				    "one or more devices is less than the "
438 				    "minimum size (%s)"), buf);
439 			}
440 			return (zfs_error(hdl, EZFS_BADDEV, msg));
441 
442 		case ENOSPC:
443 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
444 			    "one or more devices is out of space"));
445 			return (zfs_error(hdl, EZFS_BADDEV, msg));
446 
447 		default:
448 			return (zpool_standard_error(hdl, errno, msg));
449 		}
450 	}
451 
452 	zcmd_free_nvlists(&zc);
453 
454 	/*
455 	 * If this is an alternate root pool, then we automatically set the
456 	 * mountpoint of the root dataset to be '/'.
457 	 */
458 	if (altroot != NULL) {
459 		zfs_handle_t *zhp;
460 
461 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
462 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
463 		    "/") == 0);
464 
465 		zfs_close(zhp);
466 	}
467 
468 	return (0);
469 }
470 
471 /*
472  * Destroy the given pool.  It is up to the caller to ensure that there are no
473  * datasets left in the pool.
474  */
475 int
476 zpool_destroy(zpool_handle_t *zhp)
477 {
478 	zfs_cmd_t zc = { 0 };
479 	zfs_handle_t *zfp = NULL;
480 	libzfs_handle_t *hdl = zhp->zpool_hdl;
481 	char msg[1024];
482 
483 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
484 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
485 	    ZFS_TYPE_FILESYSTEM)) == NULL)
486 		return (-1);
487 
488 	if (zpool_remove_zvol_links(zhp) != 0)
489 		return (-1);
490 
491 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
492 
493 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
494 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
495 		    "cannot destroy '%s'"), zhp->zpool_name);
496 
497 		if (errno == EROFS) {
498 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
499 			    "one or more devices is read only"));
500 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
501 		} else {
502 			(void) zpool_standard_error(hdl, errno, msg);
503 		}
504 
505 		if (zfp)
506 			zfs_close(zfp);
507 		return (-1);
508 	}
509 
510 	if (zfp) {
511 		remove_mountpoint(zfp);
512 		zfs_close(zfp);
513 	}
514 
515 	return (0);
516 }
517 
518 /*
519  * Add the given vdevs to the pool.  The caller must have already performed the
520  * necessary verification to ensure that the vdev specification is well-formed.
521  */
522 int
523 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
524 {
525 	zfs_cmd_t zc = { 0 };
526 	int ret;
527 	libzfs_handle_t *hdl = zhp->zpool_hdl;
528 	char msg[1024];
529 	nvlist_t **spares;
530 	uint_t nspares;
531 
532 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
533 	    "cannot add to '%s'"), zhp->zpool_name);
534 
535 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
536 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
537 	    &spares, &nspares) == 0) {
538 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
539 		    "upgraded to add hot spares"));
540 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
541 	}
542 
543 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
544 		return (-1);
545 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
546 
547 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
548 		switch (errno) {
549 		case EBUSY:
550 			/*
551 			 * This can happen if the user has specified the same
552 			 * device multiple times.  We can't reliably detect this
553 			 * until we try to add it and see we already have a
554 			 * label.
555 			 */
556 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
557 			    "one or more vdevs refer to the same device"));
558 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
559 			break;
560 
561 		case EOVERFLOW:
562 			/*
563 			 * This occurrs when one of the devices is below
564 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
565 			 * device was the problem device since there's no
566 			 * reliable way to determine device size from userland.
567 			 */
568 			{
569 				char buf[64];
570 
571 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
572 
573 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
574 				    "device is less than the minimum "
575 				    "size (%s)"), buf);
576 			}
577 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
578 			break;
579 
580 		case ENOTSUP:
581 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
582 			    "pool must be upgraded to add raidz2 vdevs"));
583 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
584 			break;
585 
586 		case EDOM:
587 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
588 			    "root pool can not have concatenated devices"));
589 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
590 			break;
591 
592 		default:
593 			(void) zpool_standard_error(hdl, errno, msg);
594 		}
595 
596 		ret = -1;
597 	} else {
598 		ret = 0;
599 	}
600 
601 	zcmd_free_nvlists(&zc);
602 
603 	return (ret);
604 }
605 
606 /*
607  * Exports the pool from the system.  The caller must ensure that there are no
608  * mounted datasets in the pool.
609  */
610 int
611 zpool_export(zpool_handle_t *zhp)
612 {
613 	zfs_cmd_t zc = { 0 };
614 
615 	if (zpool_remove_zvol_links(zhp) != 0)
616 		return (-1);
617 
618 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
619 
620 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
621 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
622 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
623 		    zhp->zpool_name));
624 	return (0);
625 }
626 
627 /*
628  * Import the given pool using the known configuration.  The configuration
629  * should have come from zpool_find_import().  The 'newname' and 'altroot'
630  * parameters control whether the pool is imported with a different name or with
631  * an alternate root, respectively.
632  */
633 int
634 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
635     const char *altroot)
636 {
637 	zfs_cmd_t zc = { 0 };
638 	char *thename;
639 	char *origname;
640 	int ret;
641 
642 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
643 	    &origname) == 0);
644 
645 	if (newname != NULL) {
646 		if (!zpool_name_valid(hdl, B_FALSE, newname))
647 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
648 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
649 			    newname));
650 		thename = (char *)newname;
651 	} else {
652 		thename = origname;
653 	}
654 
655 	if (altroot != NULL && altroot[0] != '/')
656 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
657 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
658 		    altroot));
659 
660 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
661 
662 	if (altroot != NULL)
663 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
664 	else
665 		zc.zc_value[0] = '\0';
666 
667 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
668 	    &zc.zc_guid) == 0);
669 
670 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
671 		return (-1);
672 
673 	ret = 0;
674 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
675 		char desc[1024];
676 		if (newname == NULL)
677 			(void) snprintf(desc, sizeof (desc),
678 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
679 			    thename);
680 		else
681 			(void) snprintf(desc, sizeof (desc),
682 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
683 			    origname, thename);
684 
685 		switch (errno) {
686 		case ENOTSUP:
687 			/*
688 			 * Unsupported version.
689 			 */
690 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
691 			break;
692 
693 		case EINVAL:
694 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
695 			break;
696 
697 		default:
698 			(void) zpool_standard_error(hdl, errno, desc);
699 		}
700 
701 		ret = -1;
702 	} else {
703 		zpool_handle_t *zhp;
704 		/*
705 		 * This should never fail, but play it safe anyway.
706 		 */
707 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
708 			ret = -1;
709 		} else if (zhp != NULL) {
710 			ret = zpool_create_zvol_links(zhp);
711 			zpool_close(zhp);
712 		}
713 	}
714 
715 	zcmd_free_nvlists(&zc);
716 	return (ret);
717 }
718 
719 /*
720  * Scrub the pool.
721  */
722 int
723 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
724 {
725 	zfs_cmd_t zc = { 0 };
726 	char msg[1024];
727 	libzfs_handle_t *hdl = zhp->zpool_hdl;
728 
729 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
730 	zc.zc_cookie = type;
731 
732 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
733 		return (0);
734 
735 	(void) snprintf(msg, sizeof (msg),
736 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
737 
738 	if (errno == EBUSY)
739 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
740 	else
741 		return (zpool_standard_error(hdl, errno, msg));
742 }
743 
744 /*
745  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
746  * spare; but FALSE if its an INUSE spare.
747  */
748 static nvlist_t *
749 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
750     boolean_t *avail_spare)
751 {
752 	uint_t c, children;
753 	nvlist_t **child;
754 	uint64_t theguid, present;
755 	char *path;
756 	uint64_t wholedisk = 0;
757 	nvlist_t *ret;
758 
759 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
760 
761 	if (search == NULL &&
762 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
763 		/*
764 		 * If the device has never been present since import, the only
765 		 * reliable way to match the vdev is by GUID.
766 		 */
767 		if (theguid == guid)
768 			return (nv);
769 	} else if (search != NULL &&
770 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
771 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
772 		    &wholedisk);
773 		if (wholedisk) {
774 			/*
775 			 * For whole disks, the internal path has 's0', but the
776 			 * path passed in by the user doesn't.
777 			 */
778 			if (strlen(search) == strlen(path) - 2 &&
779 			    strncmp(search, path, strlen(search)) == 0)
780 				return (nv);
781 		} else if (strcmp(search, path) == 0) {
782 			return (nv);
783 		}
784 	}
785 
786 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
787 	    &child, &children) != 0)
788 		return (NULL);
789 
790 	for (c = 0; c < children; c++)
791 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
792 		    avail_spare)) != NULL)
793 			return (ret);
794 
795 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
796 	    &child, &children) == 0) {
797 		for (c = 0; c < children; c++) {
798 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
799 			    avail_spare)) != NULL) {
800 				*avail_spare = B_TRUE;
801 				return (ret);
802 			}
803 		}
804 	}
805 
806 	return (NULL);
807 }
808 
809 nvlist_t *
810 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
811 {
812 	char buf[MAXPATHLEN];
813 	const char *search;
814 	char *end;
815 	nvlist_t *nvroot;
816 	uint64_t guid;
817 
818 	guid = strtoull(path, &end, 10);
819 	if (guid != 0 && *end == '\0') {
820 		search = NULL;
821 	} else if (path[0] != '/') {
822 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
823 		search = buf;
824 	} else {
825 		search = path;
826 	}
827 
828 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
829 	    &nvroot) == 0);
830 
831 	*avail_spare = B_FALSE;
832 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
833 }
834 
835 /*
836  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
837  */
838 static boolean_t
839 is_spare(zpool_handle_t *zhp, uint64_t guid)
840 {
841 	uint64_t spare_guid;
842 	nvlist_t *nvroot;
843 	nvlist_t **spares;
844 	uint_t nspares;
845 	int i;
846 
847 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
848 	    &nvroot) == 0);
849 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
850 	    &spares, &nspares) == 0) {
851 		for (i = 0; i < nspares; i++) {
852 			verify(nvlist_lookup_uint64(spares[i],
853 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
854 			if (guid == spare_guid)
855 				return (B_TRUE);
856 		}
857 	}
858 
859 	return (B_FALSE);
860 }
861 
862 /*
863  * Bring the specified vdev online
864  */
865 int
866 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
867 {
868 	zfs_cmd_t zc = { 0 };
869 	char msg[1024];
870 	nvlist_t *tgt;
871 	boolean_t avail_spare;
872 	libzfs_handle_t *hdl = zhp->zpool_hdl;
873 
874 	(void) snprintf(msg, sizeof (msg),
875 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
876 
877 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
878 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
879 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
880 
881 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
882 
883 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
884 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
885 
886 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
887 		return (0);
888 
889 	return (zpool_standard_error(hdl, errno, msg));
890 }
891 
892 /*
893  * Take the specified vdev offline
894  */
895 int
896 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
897 {
898 	zfs_cmd_t zc = { 0 };
899 	char msg[1024];
900 	nvlist_t *tgt;
901 	boolean_t avail_spare;
902 	libzfs_handle_t *hdl = zhp->zpool_hdl;
903 
904 	(void) snprintf(msg, sizeof (msg),
905 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
906 
907 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
908 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
909 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
910 
911 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
912 
913 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
914 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
915 
916 	zc.zc_cookie = istmp;
917 
918 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
919 		return (0);
920 
921 	switch (errno) {
922 	case EBUSY:
923 
924 		/*
925 		 * There are no other replicas of this device.
926 		 */
927 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
928 
929 	default:
930 		return (zpool_standard_error(hdl, errno, msg));
931 	}
932 }
933 
934 /*
935  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
936  * a hot spare.
937  */
938 static boolean_t
939 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
940 {
941 	nvlist_t **child;
942 	uint_t c, children;
943 	char *type;
944 
945 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
946 	    &children) == 0) {
947 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
948 		    &type) == 0);
949 
950 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
951 		    children == 2 && child[which] == tgt)
952 			return (B_TRUE);
953 
954 		for (c = 0; c < children; c++)
955 			if (is_replacing_spare(child[c], tgt, which))
956 				return (B_TRUE);
957 	}
958 
959 	return (B_FALSE);
960 }
961 
962 /*
963  * Attach new_disk (fully described by nvroot) to old_disk.
964  * If 'replacing' is specified, tne new disk will replace the old one.
965  */
966 int
967 zpool_vdev_attach(zpool_handle_t *zhp,
968     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
969 {
970 	zfs_cmd_t zc = { 0 };
971 	char msg[1024];
972 	int ret;
973 	nvlist_t *tgt;
974 	boolean_t avail_spare;
975 	uint64_t val;
976 	char *path;
977 	nvlist_t **child;
978 	uint_t children;
979 	nvlist_t *config_root;
980 	libzfs_handle_t *hdl = zhp->zpool_hdl;
981 
982 	if (replacing)
983 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
984 		    "cannot replace %s with %s"), old_disk, new_disk);
985 	else
986 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
987 		    "cannot attach %s to %s"), new_disk, old_disk);
988 
989 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
990 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
991 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
992 
993 	if (avail_spare)
994 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
995 
996 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
997 	zc.zc_cookie = replacing;
998 
999 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1000 	    &child, &children) != 0 || children != 1) {
1001 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1002 		    "new device must be a single disk"));
1003 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1004 	}
1005 
1006 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1007 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1008 
1009 	/*
1010 	 * If the target is a hot spare that has been swapped in, we can only
1011 	 * replace it with another hot spare.
1012 	 */
1013 	if (replacing &&
1014 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1015 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1016 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1017 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1018 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1019 		    "can only be replaced by another hot spare"));
1020 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1021 	}
1022 
1023 	/*
1024 	 * If we are attempting to replace a spare, it canot be applied to an
1025 	 * already spared device.
1026 	 */
1027 	if (replacing &&
1028 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1029 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1030 	    is_replacing_spare(config_root, tgt, 0)) {
1031 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1032 		    "device has already been replaced with a spare"));
1033 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1034 	}
1035 
1036 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1037 		return (-1);
1038 
1039 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1040 
1041 	zcmd_free_nvlists(&zc);
1042 
1043 	if (ret == 0)
1044 		return (0);
1045 
1046 	switch (errno) {
1047 	case ENOTSUP:
1048 		/*
1049 		 * Can't attach to or replace this type of vdev.
1050 		 */
1051 		if (replacing)
1052 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1053 			    "cannot replace a replacing device"));
1054 		else
1055 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1056 			    "can only attach to mirrors and top-level "
1057 			    "disks"));
1058 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1059 		break;
1060 
1061 	case EINVAL:
1062 		/*
1063 		 * The new device must be a single disk.
1064 		 */
1065 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1066 		    "new device must be a single disk"));
1067 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1068 		break;
1069 
1070 	case EBUSY:
1071 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1072 		    new_disk);
1073 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1074 		break;
1075 
1076 	case EOVERFLOW:
1077 		/*
1078 		 * The new device is too small.
1079 		 */
1080 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1081 		    "device is too small"));
1082 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1083 		break;
1084 
1085 	case EDOM:
1086 		/*
1087 		 * The new device has a different alignment requirement.
1088 		 */
1089 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1090 		    "devices have different sector alignment"));
1091 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1092 		break;
1093 
1094 	case ENAMETOOLONG:
1095 		/*
1096 		 * The resulting top-level vdev spec won't fit in the label.
1097 		 */
1098 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1099 		break;
1100 
1101 	default:
1102 		(void) zpool_standard_error(hdl, errno, msg);
1103 	}
1104 
1105 	return (-1);
1106 }
1107 
1108 /*
1109  * Detach the specified device.
1110  */
1111 int
1112 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1113 {
1114 	zfs_cmd_t zc = { 0 };
1115 	char msg[1024];
1116 	nvlist_t *tgt;
1117 	boolean_t avail_spare;
1118 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1119 
1120 	(void) snprintf(msg, sizeof (msg),
1121 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1122 
1123 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1124 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1125 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1126 
1127 	if (avail_spare)
1128 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1129 
1130 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1131 
1132 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1133 		return (0);
1134 
1135 	switch (errno) {
1136 
1137 	case ENOTSUP:
1138 		/*
1139 		 * Can't detach from this type of vdev.
1140 		 */
1141 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1142 		    "applicable to mirror and replacing vdevs"));
1143 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1144 		break;
1145 
1146 	case EBUSY:
1147 		/*
1148 		 * There are no other replicas of this device.
1149 		 */
1150 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1151 		break;
1152 
1153 	default:
1154 		(void) zpool_standard_error(hdl, errno, msg);
1155 	}
1156 
1157 	return (-1);
1158 }
1159 
1160 /*
1161  * Remove the given device.  Currently, this is supported only for hot spares.
1162  */
1163 int
1164 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1165 {
1166 	zfs_cmd_t zc = { 0 };
1167 	char msg[1024];
1168 	nvlist_t *tgt;
1169 	boolean_t avail_spare;
1170 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1171 
1172 	(void) snprintf(msg, sizeof (msg),
1173 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1174 
1175 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1176 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1177 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1178 
1179 	if (!avail_spare) {
1180 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1181 		    "only inactive hot spares can be removed"));
1182 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1183 	}
1184 
1185 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1186 
1187 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1188 		return (0);
1189 
1190 	return (zpool_standard_error(hdl, errno, msg));
1191 }
1192 
1193 /*
1194  * Clear the errors for the pool, or the particular device if specified.
1195  */
1196 int
1197 zpool_clear(zpool_handle_t *zhp, const char *path)
1198 {
1199 	zfs_cmd_t zc = { 0 };
1200 	char msg[1024];
1201 	nvlist_t *tgt;
1202 	boolean_t avail_spare;
1203 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1204 
1205 	if (path)
1206 		(void) snprintf(msg, sizeof (msg),
1207 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1208 		    path);
1209 	else
1210 		(void) snprintf(msg, sizeof (msg),
1211 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1212 		    zhp->zpool_name);
1213 
1214 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1215 	if (path) {
1216 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1217 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1218 
1219 		if (avail_spare)
1220 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1221 
1222 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1223 		    &zc.zc_guid) == 0);
1224 	}
1225 
1226 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1227 		return (0);
1228 
1229 	return (zpool_standard_error(hdl, errno, msg));
1230 }
1231 
1232 /*
1233  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1234  * hierarchy.
1235  */
1236 int
1237 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1238     void *data)
1239 {
1240 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1241 	char (*paths)[MAXPATHLEN];
1242 	size_t size = 4;
1243 	int curr, fd, base, ret = 0;
1244 	DIR *dirp;
1245 	struct dirent *dp;
1246 	struct stat st;
1247 
1248 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1249 		return (errno == ENOENT ? 0 : -1);
1250 
1251 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1252 		int err = errno;
1253 		(void) close(base);
1254 		return (err == ENOENT ? 0 : -1);
1255 	}
1256 
1257 	/*
1258 	 * Oddly this wasn't a directory -- ignore that failure since we
1259 	 * know there are no links lower in the (non-existant) hierarchy.
1260 	 */
1261 	if (!S_ISDIR(st.st_mode)) {
1262 		(void) close(base);
1263 		return (0);
1264 	}
1265 
1266 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1267 		(void) close(base);
1268 		return (-1);
1269 	}
1270 
1271 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1272 	curr = 0;
1273 
1274 	while (curr >= 0) {
1275 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1276 			goto err;
1277 
1278 		if (S_ISDIR(st.st_mode)) {
1279 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1280 				goto err;
1281 
1282 			if ((dirp = fdopendir(fd)) == NULL) {
1283 				(void) close(fd);
1284 				goto err;
1285 			}
1286 
1287 			while ((dp = readdir(dirp)) != NULL) {
1288 				if (dp->d_name[0] == '.')
1289 					continue;
1290 
1291 				if (curr + 1 == size) {
1292 					paths = zfs_realloc(hdl, paths,
1293 					    size * sizeof (paths[0]),
1294 					    size * 2 * sizeof (paths[0]));
1295 					if (paths == NULL) {
1296 						(void) closedir(dirp);
1297 						(void) close(fd);
1298 						goto err;
1299 					}
1300 
1301 					size *= 2;
1302 				}
1303 
1304 				(void) strlcpy(paths[curr + 1], paths[curr],
1305 				    sizeof (paths[curr + 1]));
1306 				(void) strlcat(paths[curr], "/",
1307 				    sizeof (paths[curr]));
1308 				(void) strlcat(paths[curr], dp->d_name,
1309 				    sizeof (paths[curr]));
1310 				curr++;
1311 			}
1312 
1313 			(void) closedir(dirp);
1314 
1315 		} else {
1316 			if ((ret = cb(paths[curr], data)) != 0)
1317 				break;
1318 		}
1319 
1320 		curr--;
1321 	}
1322 
1323 	free(paths);
1324 	(void) close(base);
1325 
1326 	return (ret);
1327 
1328 err:
1329 	free(paths);
1330 	(void) close(base);
1331 	return (-1);
1332 }
1333 
1334 typedef struct zvol_cb {
1335 	zpool_handle_t *zcb_pool;
1336 	boolean_t zcb_create;
1337 } zvol_cb_t;
1338 
1339 /*ARGSUSED*/
1340 static int
1341 do_zvol_create(zfs_handle_t *zhp, void *data)
1342 {
1343 	int ret;
1344 
1345 	if (ZFS_IS_VOLUME(zhp))
1346 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1347 
1348 	ret = zfs_iter_children(zhp, do_zvol_create, NULL);
1349 
1350 	zfs_close(zhp);
1351 
1352 	return (ret);
1353 }
1354 
1355 /*
1356  * Iterate over all zvols in the pool and make any necessary minor nodes.
1357  */
1358 int
1359 zpool_create_zvol_links(zpool_handle_t *zhp)
1360 {
1361 	zfs_handle_t *zfp;
1362 	int ret;
1363 
1364 	/*
1365 	 * If the pool is unavailable, just return success.
1366 	 */
1367 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1368 	    zhp->zpool_name)) == NULL)
1369 		return (0);
1370 
1371 	ret = zfs_iter_children(zfp, do_zvol_create, NULL);
1372 
1373 	zfs_close(zfp);
1374 	return (ret);
1375 }
1376 
1377 static int
1378 do_zvol_remove(const char *dataset, void *data)
1379 {
1380 	zpool_handle_t *zhp = data;
1381 
1382 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1383 }
1384 
1385 /*
1386  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1387  * by examining the /dev links so that a corrupted pool doesn't impede this
1388  * operation.
1389  */
1390 int
1391 zpool_remove_zvol_links(zpool_handle_t *zhp)
1392 {
1393 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1394 }
1395 
1396 /*
1397  * Convert from a devid string to a path.
1398  */
1399 static char *
1400 devid_to_path(char *devid_str)
1401 {
1402 	ddi_devid_t devid;
1403 	char *minor;
1404 	char *path;
1405 	devid_nmlist_t *list = NULL;
1406 	int ret;
1407 
1408 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1409 		return (NULL);
1410 
1411 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1412 
1413 	devid_str_free(minor);
1414 	devid_free(devid);
1415 
1416 	if (ret != 0)
1417 		return (NULL);
1418 
1419 	if ((path = strdup(list[0].devname)) == NULL)
1420 		return (NULL);
1421 
1422 	devid_free_nmlist(list);
1423 
1424 	return (path);
1425 }
1426 
1427 /*
1428  * Convert from a path to a devid string.
1429  */
1430 static char *
1431 path_to_devid(const char *path)
1432 {
1433 	int fd;
1434 	ddi_devid_t devid;
1435 	char *minor, *ret;
1436 
1437 	if ((fd = open(path, O_RDONLY)) < 0)
1438 		return (NULL);
1439 
1440 	minor = NULL;
1441 	ret = NULL;
1442 	if (devid_get(fd, &devid) == 0) {
1443 		if (devid_get_minor_name(fd, &minor) == 0)
1444 			ret = devid_str_encode(devid, minor);
1445 		if (minor != NULL)
1446 			devid_str_free(minor);
1447 		devid_free(devid);
1448 	}
1449 	(void) close(fd);
1450 
1451 	return (ret);
1452 }
1453 
1454 /*
1455  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1456  * ignore any failure here, since a common case is for an unprivileged user to
1457  * type 'zpool status', and we'll display the correct information anyway.
1458  */
1459 static void
1460 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1461 {
1462 	zfs_cmd_t zc = { 0 };
1463 
1464 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1465 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1466 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1467 	    &zc.zc_guid) == 0);
1468 
1469 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1470 }
1471 
1472 /*
1473  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1474  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1475  * We also check if this is a whole disk, in which case we strip off the
1476  * trailing 's0' slice name.
1477  *
1478  * This routine is also responsible for identifying when disks have been
1479  * reconfigured in a new location.  The kernel will have opened the device by
1480  * devid, but the path will still refer to the old location.  To catch this, we
1481  * first do a path -> devid translation (which is fast for the common case).  If
1482  * the devid matches, we're done.  If not, we do a reverse devid -> path
1483  * translation and issue the appropriate ioctl() to update the path of the vdev.
1484  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1485  * of these checks.
1486  */
1487 char *
1488 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1489 {
1490 	char *path, *devid;
1491 	uint64_t value;
1492 	char buf[64];
1493 
1494 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1495 	    &value) == 0) {
1496 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1497 		    &value) == 0);
1498 		(void) snprintf(buf, sizeof (buf), "%llu",
1499 		    (u_longlong_t)value);
1500 		path = buf;
1501 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1502 
1503 		if (zhp != NULL &&
1504 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1505 			/*
1506 			 * Determine if the current path is correct.
1507 			 */
1508 			char *newdevid = path_to_devid(path);
1509 
1510 			if (newdevid == NULL ||
1511 			    strcmp(devid, newdevid) != 0) {
1512 				char *newpath;
1513 
1514 				if ((newpath = devid_to_path(devid)) != NULL) {
1515 					/*
1516 					 * Update the path appropriately.
1517 					 */
1518 					set_path(zhp, nv, newpath);
1519 					if (nvlist_add_string(nv,
1520 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1521 						verify(nvlist_lookup_string(nv,
1522 						    ZPOOL_CONFIG_PATH,
1523 						    &path) == 0);
1524 					free(newpath);
1525 				}
1526 			}
1527 
1528 			if (newdevid)
1529 				devid_str_free(newdevid);
1530 		}
1531 
1532 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1533 			path += 9;
1534 
1535 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1536 		    &value) == 0 && value) {
1537 			char *tmp = zfs_strdup(hdl, path);
1538 			if (tmp == NULL)
1539 				return (NULL);
1540 			tmp[strlen(path) - 2] = '\0';
1541 			return (tmp);
1542 		}
1543 	} else {
1544 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1545 
1546 		/*
1547 		 * If it's a raidz device, we need to stick in the parity level.
1548 		 */
1549 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1550 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1551 			    &value) == 0);
1552 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1553 			    (u_longlong_t)value);
1554 			path = buf;
1555 		}
1556 	}
1557 
1558 	return (zfs_strdup(hdl, path));
1559 }
1560 
1561 static int
1562 zbookmark_compare(const void *a, const void *b)
1563 {
1564 	return (memcmp(a, b, sizeof (zbookmark_t)));
1565 }
1566 
1567 /*
1568  * Retrieve the persistent error log, uniquify the members, and return to the
1569  * caller.
1570  */
1571 int
1572 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1573 {
1574 	zfs_cmd_t zc = { 0 };
1575 	uint64_t count;
1576 	zbookmark_t *zb = NULL;
1577 	int i;
1578 
1579 	/*
1580 	 * Retrieve the raw error list from the kernel.  If the number of errors
1581 	 * has increased, allocate more space and continue until we get the
1582 	 * entire list.
1583 	 */
1584 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1585 	    &count) == 0);
1586 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1587 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1588 		return (-1);
1589 	zc.zc_nvlist_dst_size = count;
1590 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1591 	for (;;) {
1592 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1593 		    &zc) != 0) {
1594 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1595 			if (errno == ENOMEM) {
1596 				count = zc.zc_nvlist_dst_size;
1597 				if ((zc.zc_nvlist_dst = (uintptr_t)
1598 				    zfs_alloc(zhp->zpool_hdl, count *
1599 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
1600 					return (-1);
1601 			} else {
1602 				return (-1);
1603 			}
1604 		} else {
1605 			break;
1606 		}
1607 	}
1608 
1609 	/*
1610 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1611 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1612 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1613 	 * _not_ copied as part of the process.  So we point the start of our
1614 	 * array appropriate and decrement the total number of elements.
1615 	 */
1616 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1617 	    zc.zc_nvlist_dst_size;
1618 	count -= zc.zc_nvlist_dst_size;
1619 
1620 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1621 
1622 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1623 
1624 	/*
1625 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1626 	 */
1627 	for (i = 0; i < count; i++) {
1628 		nvlist_t *nv;
1629 
1630 		/* ignoring zb_blkid and zb_level for now */
1631 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1632 		    zb[i-1].zb_object == zb[i].zb_object)
1633 			continue;
1634 
1635 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1636 			goto nomem;
1637 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1638 		    zb[i].zb_objset) != 0) {
1639 			nvlist_free(nv);
1640 			goto nomem;
1641 		}
1642 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1643 		    zb[i].zb_object) != 0) {
1644 			nvlist_free(nv);
1645 			goto nomem;
1646 		}
1647 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1648 			nvlist_free(nv);
1649 			goto nomem;
1650 		}
1651 		nvlist_free(nv);
1652 	}
1653 
1654 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1655 	return (0);
1656 
1657 nomem:
1658 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1659 	return (no_memory(zhp->zpool_hdl));
1660 }
1661 
1662 /*
1663  * Upgrade a ZFS pool to the latest on-disk version.
1664  */
1665 int
1666 zpool_upgrade(zpool_handle_t *zhp)
1667 {
1668 	zfs_cmd_t zc = { 0 };
1669 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1670 
1671 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1672 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1673 		return (zpool_standard_error_fmt(hdl, errno,
1674 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1675 		    zhp->zpool_name));
1676 
1677 	return (0);
1678 }
1679 
1680 /*
1681  * Log command history.
1682  *
1683  * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1684  * otherwise ('zfs').  'pool_create' is B_TRUE if we are logging the creation
1685  * of the pool; B_FALSE otherwise.  'path' is the pathanme containing the
1686  * poolname.  'argc' and 'argv' are used to construct the command string.
1687  */
1688 void
1689 zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
1690 	boolean_t pool, boolean_t pool_create)
1691 {
1692 	char cmd_buf[HIS_MAX_RECORD_LEN];
1693 	char *dspath;
1694 	zfs_cmd_t zc = { 0 };
1695 	int i;
1696 
1697 	/* construct the command string */
1698 	(void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
1699 	for (i = 0; i < argc; i++) {
1700 		if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1701 			break;
1702 		(void) strcat(cmd_buf, " ");
1703 		(void) strcat(cmd_buf, argv[i]);
1704 	}
1705 
1706 	/* figure out the poolname */
1707 	dspath = strpbrk(path, "/@");
1708 	if (dspath == NULL) {
1709 		(void) strcpy(zc.zc_name, path);
1710 	} else {
1711 		(void) strncpy(zc.zc_name, path, dspath - path);
1712 		zc.zc_name[dspath-path] = '\0';
1713 	}
1714 
1715 	zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
1716 	zc.zc_history_len = strlen(cmd_buf);
1717 
1718 	/* overloading zc_history_offset */
1719 	zc.zc_history_offset = pool_create;
1720 
1721 	(void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
1722 }
1723 
1724 /*
1725  * Perform ioctl to get some command history of a pool.
1726  *
1727  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1728  * logical offset of the history buffer to start reading from.
1729  *
1730  * Upon return, 'off' is the next logical offset to read from and
1731  * 'len' is the actual amount of bytes read into 'buf'.
1732  */
1733 static int
1734 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1735 {
1736 	zfs_cmd_t zc = { 0 };
1737 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1738 
1739 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1740 
1741 	zc.zc_history = (uint64_t)(uintptr_t)buf;
1742 	zc.zc_history_len = *len;
1743 	zc.zc_history_offset = *off;
1744 
1745 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1746 		switch (errno) {
1747 		case EPERM:
1748 			return (zfs_error_fmt(hdl, EZFS_PERM,
1749 			    dgettext(TEXT_DOMAIN,
1750 			    "cannot show history for pool '%s'"),
1751 			    zhp->zpool_name));
1752 		case ENOENT:
1753 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1754 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1755 			    "'%s'"), zhp->zpool_name));
1756 		case ENOTSUP:
1757 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1758 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1759 			    "'%s', pool must be upgraded"), zhp->zpool_name));
1760 		default:
1761 			return (zpool_standard_error_fmt(hdl, errno,
1762 			    dgettext(TEXT_DOMAIN,
1763 			    "cannot get history for '%s'"), zhp->zpool_name));
1764 		}
1765 	}
1766 
1767 	*len = zc.zc_history_len;
1768 	*off = zc.zc_history_offset;
1769 
1770 	return (0);
1771 }
1772 
1773 /*
1774  * Process the buffer of nvlists, unpacking and storing each nvlist record
1775  * into 'records'.  'leftover' is set to the number of bytes that weren't
1776  * processed as there wasn't a complete record.
1777  */
1778 static int
1779 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1780     nvlist_t ***records, uint_t *numrecords)
1781 {
1782 	uint64_t reclen;
1783 	nvlist_t *nv;
1784 	int i;
1785 
1786 	while (bytes_read > sizeof (reclen)) {
1787 
1788 		/* get length of packed record (stored as little endian) */
1789 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1790 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1791 
1792 		if (bytes_read < sizeof (reclen) + reclen)
1793 			break;
1794 
1795 		/* unpack record */
1796 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1797 			return (ENOMEM);
1798 		bytes_read -= sizeof (reclen) + reclen;
1799 		buf += sizeof (reclen) + reclen;
1800 
1801 		/* add record to nvlist array */
1802 		(*numrecords)++;
1803 		if (ISP2(*numrecords + 1)) {
1804 			*records = realloc(*records,
1805 			    *numrecords * 2 * sizeof (nvlist_t *));
1806 		}
1807 		(*records)[*numrecords - 1] = nv;
1808 	}
1809 
1810 	*leftover = bytes_read;
1811 	return (0);
1812 }
1813 
1814 #define	HIS_BUF_LEN	(128*1024)
1815 
1816 /*
1817  * Retrieve the command history of a pool.
1818  */
1819 int
1820 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1821 {
1822 	char buf[HIS_BUF_LEN];
1823 	uint64_t off = 0;
1824 	nvlist_t **records = NULL;
1825 	uint_t numrecords = 0;
1826 	int err, i;
1827 
1828 	do {
1829 		uint64_t bytes_read = sizeof (buf);
1830 		uint64_t leftover;
1831 
1832 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1833 			break;
1834 
1835 		/* if nothing else was read in, we're at EOF, just return */
1836 		if (!bytes_read)
1837 			break;
1838 
1839 		if ((err = zpool_history_unpack(buf, bytes_read,
1840 		    &leftover, &records, &numrecords)) != 0)
1841 			break;
1842 		off -= leftover;
1843 
1844 		/* CONSTCOND */
1845 	} while (1);
1846 
1847 	if (!err) {
1848 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1849 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1850 		    records, numrecords) == 0);
1851 	}
1852 	for (i = 0; i < numrecords; i++)
1853 		nvlist_free(records[i]);
1854 	free(records);
1855 
1856 	return (err);
1857 }
1858 
1859 void
1860 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1861     char *pathname, size_t len)
1862 {
1863 	zfs_cmd_t zc = { 0 };
1864 	boolean_t mounted = B_FALSE;
1865 	char *mntpnt = NULL;
1866 	char dsname[MAXNAMELEN];
1867 
1868 	if (dsobj == 0) {
1869 		/* special case for the MOS */
1870 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1871 		return;
1872 	}
1873 
1874 	/* get the dataset's name */
1875 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1876 	zc.zc_obj = dsobj;
1877 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
1878 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1879 		/* just write out a path of two object numbers */
1880 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1881 		    dsobj, obj);
1882 		return;
1883 	}
1884 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1885 
1886 	/* find out if the dataset is mounted */
1887 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1888 
1889 	/* get the corrupted object's path */
1890 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
1891 	zc.zc_obj = obj;
1892 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
1893 	    &zc) == 0) {
1894 		if (mounted) {
1895 			(void) snprintf(pathname, len, "%s%s", mntpnt,
1896 			    zc.zc_value);
1897 		} else {
1898 			(void) snprintf(pathname, len, "%s:%s",
1899 			    dsname, zc.zc_value);
1900 		}
1901 	} else {
1902 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
1903 	}
1904 	free(mntpnt);
1905 }
1906 
1907 int
1908 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
1909 {
1910 	zfs_cmd_t zc = { 0 };
1911 	int ret = -1;
1912 	char errbuf[1024];
1913 	nvlist_t *nvl = NULL;
1914 	nvlist_t *realprops;
1915 
1916 	(void) snprintf(errbuf, sizeof (errbuf),
1917 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
1918 	    zhp->zpool_name);
1919 
1920 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1921 		zfs_error_aux(zhp->zpool_hdl,
1922 		    dgettext(TEXT_DOMAIN, "pool must be "
1923 		    "upgraded to support pool properties"));
1924 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
1925 	}
1926 
1927 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1928 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
1929 
1930 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
1931 	    nvlist_add_string(nvl, propname, propval) != 0) {
1932 		return (no_memory(zhp->zpool_hdl));
1933 	}
1934 
1935 	if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
1936 	    zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
1937 		nvlist_free(nvl);
1938 		return (-1);
1939 	}
1940 
1941 	nvlist_free(nvl);
1942 	nvl = realprops;
1943 
1944 	/*
1945 	 * Execute the corresponding ioctl() to set this property.
1946 	 */
1947 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1948 
1949 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
1950 		return (-1);
1951 
1952 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
1953 	zcmd_free_nvlists(&zc);
1954 
1955 	if (ret)
1956 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
1957 
1958 	return (ret);
1959 }
1960 
1961 int
1962 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
1963     size_t proplen, zfs_source_t *srctype)
1964 {
1965 	uint64_t value;
1966 	char msg[1024], *strvalue;
1967 	nvlist_t *nvp;
1968 	zfs_source_t src = ZFS_SRC_NONE;
1969 
1970 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1971 	    "cannot get property '%s'"), zpool_prop_to_name(prop));
1972 
1973 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1974 		zfs_error_aux(zhp->zpool_hdl,
1975 		    dgettext(TEXT_DOMAIN, "pool must be "
1976 		    "upgraded to support pool properties"));
1977 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
1978 	}
1979 
1980 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1981 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
1982 
1983 	/*
1984 	 * the "name" property is special cased
1985 	 */
1986 	if (!zfs_prop_valid_for_type(prop, ZFS_TYPE_POOL) &&
1987 	    prop != ZFS_PROP_NAME)
1988 		return (-1);
1989 
1990 	switch (prop) {
1991 	case ZFS_PROP_NAME:
1992 		(void) strlcpy(propbuf, zhp->zpool_name, proplen);
1993 		break;
1994 
1995 	case ZFS_PROP_BOOTFS:
1996 		if (nvlist_lookup_nvlist(zhp->zpool_props,
1997 		    zpool_prop_to_name(prop), &nvp) != 0) {
1998 			strvalue = (char *)zfs_prop_default_string(prop);
1999 			if (strvalue == NULL)
2000 				strvalue = "-";
2001 			src = ZFS_SRC_DEFAULT;
2002 		} else {
2003 			VERIFY(nvlist_lookup_uint64(nvp,
2004 			    ZFS_PROP_SOURCE, &value) == 0);
2005 			src = value;
2006 			VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2007 			    &strvalue) == 0);
2008 			if (strlen(strvalue) >= proplen)
2009 				return (-1);
2010 		}
2011 		(void) strcpy(propbuf, strvalue);
2012 		break;
2013 
2014 	default:
2015 		return (-1);
2016 	}
2017 	if (srctype)
2018 		*srctype = src;
2019 	return (0);
2020 }
2021 
2022 int
2023 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2024 {
2025 	return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2026 }
2027 
2028 
2029 int
2030 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2031 {
2032 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2033 	zpool_proplist_t *entry;
2034 	char buf[ZFS_MAXPROPLEN];
2035 
2036 	if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2037 		return (-1);
2038 
2039 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2040 
2041 		if (entry->pl_fixed)
2042 			continue;
2043 
2044 		if (entry->pl_prop != ZFS_PROP_INVAL &&
2045 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2046 		    NULL) == 0) {
2047 			if (strlen(buf) > entry->pl_width)
2048 				entry->pl_width = strlen(buf);
2049 		}
2050 	}
2051 
2052 	return (0);
2053 }
2054