1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25  * Copyright 2015 RackTop Systems.
26  * Copyright (c) 2016, Intel Corporation.
27  */
28 
29 #include <errno.h>
30 #include <libintl.h>
31 #include <libgen.h>
32 #include <stddef.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/stat.h>
36 #include <unistd.h>
37 #include <sys/vdev_impl.h>
38 #include <libzfs.h>
39 #include "libzfs_impl.h"
40 #include <libzutil.h>
41 #include <sys/arc_impl.h>
42 
43 /*
44  * Returns true if the named pool matches the given GUID.
45  */
46 static int
pool_active(libzfs_handle_t * hdl,const char * name,uint64_t guid,boolean_t * isactive)47 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
48     boolean_t *isactive)
49 {
50 	zpool_handle_t *zhp;
51 
52 	if (zpool_open_silent(hdl, name, &zhp) != 0)
53 		return (-1);
54 
55 	if (zhp == NULL) {
56 		*isactive = B_FALSE;
57 		return (0);
58 	}
59 
60 	uint64_t theguid = fnvlist_lookup_uint64(zhp->zpool_config,
61 	    ZPOOL_CONFIG_POOL_GUID);
62 
63 	zpool_close(zhp);
64 
65 	*isactive = (theguid == guid);
66 	return (0);
67 }
68 
69 static nvlist_t *
refresh_config(libzfs_handle_t * hdl,nvlist_t * config)70 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
71 {
72 	nvlist_t *nvl;
73 	zfs_cmd_t zc = {"\0"};
74 	int err, dstbuf_size;
75 
76 	zcmd_write_conf_nvlist(hdl, &zc, config);
77 
78 	dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 32);
79 
80 	zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size);
81 
82 	while ((err = zfs_ioctl(hdl, ZFS_IOC_POOL_TRYIMPORT,
83 	    &zc)) != 0 && errno == ENOMEM)
84 		zcmd_expand_dst_nvlist(hdl, &zc);
85 
86 	if (err) {
87 		zcmd_free_nvlists(&zc);
88 		return (NULL);
89 	}
90 
91 	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
92 		zcmd_free_nvlists(&zc);
93 		return (NULL);
94 	}
95 
96 	zcmd_free_nvlists(&zc);
97 	return (nvl);
98 }
99 
100 static nvlist_t *
refresh_config_libzfs(void * handle,nvlist_t * tryconfig)101 refresh_config_libzfs(void *handle, nvlist_t *tryconfig)
102 {
103 	return (refresh_config((libzfs_handle_t *)handle, tryconfig));
104 }
105 
106 static int
pool_active_libzfs(void * handle,const char * name,uint64_t guid,boolean_t * isactive)107 pool_active_libzfs(void *handle, const char *name, uint64_t guid,
108     boolean_t *isactive)
109 {
110 	return (pool_active((libzfs_handle_t *)handle, name, guid, isactive));
111 }
112 
113 const pool_config_ops_t libzfs_config_ops = {
114 	.pco_refresh_config = refresh_config_libzfs,
115 	.pco_pool_active = pool_active_libzfs,
116 };
117 
118 /*
119  * Return the offset of the given label.
120  */
121 static uint64_t
label_offset(uint64_t size,int l)122 label_offset(uint64_t size, int l)
123 {
124 	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
125 	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
126 	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
127 }
128 
129 /*
130  * Given a file descriptor, clear (zero) the label information.  This function
131  * is used in the appliance stack as part of the ZFS sysevent module and
132  * to implement the "zpool labelclear" command.
133  */
134 int
zpool_clear_label(int fd)135 zpool_clear_label(int fd)
136 {
137 	struct stat64 statbuf;
138 	int l;
139 	vdev_label_t *label;
140 	uint64_t size;
141 	boolean_t labels_cleared = B_FALSE, clear_l2arc_header = B_FALSE,
142 	    header_cleared = B_FALSE;
143 
144 	if (fstat64_blk(fd, &statbuf) == -1)
145 		return (0);
146 
147 	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
148 
149 	if ((label = calloc(1, sizeof (vdev_label_t))) == NULL)
150 		return (-1);
151 
152 	for (l = 0; l < VDEV_LABELS; l++) {
153 		uint64_t state, guid, l2cache;
154 		nvlist_t *config;
155 
156 		if (pread64(fd, label, sizeof (vdev_label_t),
157 		    label_offset(size, l)) != sizeof (vdev_label_t)) {
158 			continue;
159 		}
160 
161 		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
162 		    sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0) {
163 			continue;
164 		}
165 
166 		/* Skip labels which do not have a valid guid. */
167 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
168 		    &guid) != 0 || guid == 0) {
169 			nvlist_free(config);
170 			continue;
171 		}
172 
173 		/* Skip labels which are not in a known valid state. */
174 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
175 		    &state) != 0 || state > POOL_STATE_L2CACHE) {
176 			nvlist_free(config);
177 			continue;
178 		}
179 
180 		/* If the device is a cache device clear the header. */
181 		if (!clear_l2arc_header) {
182 			if (nvlist_lookup_uint64(config,
183 			    ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
184 			    l2cache == POOL_STATE_L2CACHE) {
185 				clear_l2arc_header = B_TRUE;
186 			}
187 		}
188 
189 		nvlist_free(config);
190 
191 		/*
192 		 * A valid label was found, overwrite this label's nvlist
193 		 * and uberblocks with zeros on disk.  This is done to prevent
194 		 * system utilities, like blkid, from incorrectly detecting a
195 		 * partial label.  The leading pad space is left untouched.
196 		 */
197 		memset(label, 0, sizeof (vdev_label_t));
198 		size_t label_size = sizeof (vdev_label_t) - (2 * VDEV_PAD_SIZE);
199 
200 		if (pwrite64(fd, label, label_size, label_offset(size, l) +
201 		    (2 * VDEV_PAD_SIZE)) == label_size)
202 			labels_cleared = B_TRUE;
203 	}
204 
205 	if (clear_l2arc_header) {
206 		_Static_assert(sizeof (*label) >= sizeof (l2arc_dev_hdr_phys_t),
207 		    "label < l2arc_dev_hdr_phys_t");
208 		memset(label, 0, sizeof (l2arc_dev_hdr_phys_t));
209 		if (pwrite64(fd, label, sizeof (l2arc_dev_hdr_phys_t),
210 		    VDEV_LABEL_START_SIZE) == sizeof (l2arc_dev_hdr_phys_t))
211 			header_cleared = B_TRUE;
212 	}
213 
214 	free(label);
215 
216 	if (!labels_cleared || (clear_l2arc_header && !header_cleared))
217 		return (-1);
218 
219 	return (0);
220 }
221 
222 static boolean_t
find_guid(nvlist_t * nv,uint64_t guid)223 find_guid(nvlist_t *nv, uint64_t guid)
224 {
225 	nvlist_t **child;
226 	uint_t c, children;
227 
228 	if (fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID) == guid)
229 		return (B_TRUE);
230 
231 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
232 	    &child, &children) == 0) {
233 		for (c = 0; c < children; c++)
234 			if (find_guid(child[c], guid))
235 				return (B_TRUE);
236 	}
237 
238 	return (B_FALSE);
239 }
240 
241 typedef struct aux_cbdata {
242 	const char	*cb_type;
243 	uint64_t	cb_guid;
244 	zpool_handle_t	*cb_zhp;
245 } aux_cbdata_t;
246 
247 static int
find_aux(zpool_handle_t * zhp,void * data)248 find_aux(zpool_handle_t *zhp, void *data)
249 {
250 	aux_cbdata_t *cbp = data;
251 	nvlist_t **list;
252 	uint_t count;
253 
254 	nvlist_t *nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
255 	    ZPOOL_CONFIG_VDEV_TREE);
256 
257 	if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
258 	    &list, &count) == 0) {
259 		for (uint_t i = 0; i < count; i++) {
260 			uint64_t guid = fnvlist_lookup_uint64(list[i],
261 			    ZPOOL_CONFIG_GUID);
262 			if (guid == cbp->cb_guid) {
263 				cbp->cb_zhp = zhp;
264 				return (1);
265 			}
266 		}
267 	}
268 
269 	zpool_close(zhp);
270 	return (0);
271 }
272 
273 /*
274  * Determines if the pool is in use.  If so, it returns true and the state of
275  * the pool as well as the name of the pool.  Name string is allocated and
276  * must be freed by the caller.
277  */
278 int
zpool_in_use(libzfs_handle_t * hdl,int fd,pool_state_t * state,char ** namestr,boolean_t * inuse)279 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
280     boolean_t *inuse)
281 {
282 	nvlist_t *config;
283 	const char *name = NULL;
284 	boolean_t ret;
285 	uint64_t guid = 0, vdev_guid;
286 	zpool_handle_t *zhp;
287 	nvlist_t *pool_config;
288 	uint64_t stateval, isspare;
289 	aux_cbdata_t cb = { 0 };
290 	boolean_t isactive;
291 
292 	*inuse = B_FALSE;
293 
294 	if (zpool_read_label(fd, &config, NULL) != 0)
295 		return (-1);
296 
297 	if (config == NULL)
298 		return (0);
299 
300 	stateval = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
301 	vdev_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID);
302 
303 	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
304 		name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
305 		guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
306 	}
307 
308 	switch (stateval) {
309 	case POOL_STATE_EXPORTED:
310 		/*
311 		 * A pool with an exported state may in fact be imported
312 		 * read-only, so check the in-core state to see if it's
313 		 * active and imported read-only.  If it is, set
314 		 * its state to active.
315 		 */
316 		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
317 		    (zhp = zpool_open_canfail(hdl, name)) != NULL) {
318 			if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
319 				stateval = POOL_STATE_ACTIVE;
320 
321 			/*
322 			 * All we needed the zpool handle for is the
323 			 * readonly prop check.
324 			 */
325 			zpool_close(zhp);
326 		}
327 
328 		ret = B_TRUE;
329 		break;
330 
331 	case POOL_STATE_ACTIVE:
332 		/*
333 		 * For an active pool, we have to determine if it's really part
334 		 * of a currently active pool (in which case the pool will exist
335 		 * and the guid will be the same), or whether it's part of an
336 		 * active pool that was disconnected without being explicitly
337 		 * exported.
338 		 */
339 		if (pool_active(hdl, name, guid, &isactive) != 0) {
340 			nvlist_free(config);
341 			return (-1);
342 		}
343 
344 		if (isactive) {
345 			/*
346 			 * Because the device may have been removed while
347 			 * offlined, we only report it as active if the vdev is
348 			 * still present in the config.  Otherwise, pretend like
349 			 * it's not in use.
350 			 */
351 			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
352 			    (pool_config = zpool_get_config(zhp, NULL))
353 			    != NULL) {
354 				nvlist_t *nvroot = fnvlist_lookup_nvlist(
355 				    pool_config, ZPOOL_CONFIG_VDEV_TREE);
356 				ret = find_guid(nvroot, vdev_guid);
357 			} else {
358 				ret = B_FALSE;
359 			}
360 
361 			/*
362 			 * If this is an active spare within another pool, we
363 			 * treat it like an unused hot spare.  This allows the
364 			 * user to create a pool with a hot spare that currently
365 			 * in use within another pool.  Since we return B_TRUE,
366 			 * libdiskmgt will continue to prevent generic consumers
367 			 * from using the device.
368 			 */
369 			if (ret && nvlist_lookup_uint64(config,
370 			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
371 				stateval = POOL_STATE_SPARE;
372 
373 			if (zhp != NULL)
374 				zpool_close(zhp);
375 		} else {
376 			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
377 			ret = B_TRUE;
378 		}
379 		break;
380 
381 	case POOL_STATE_SPARE:
382 		/*
383 		 * For a hot spare, it can be either definitively in use, or
384 		 * potentially active.  To determine if it's in use, we iterate
385 		 * over all pools in the system and search for one with a spare
386 		 * with a matching guid.
387 		 *
388 		 * Due to the shared nature of spares, we don't actually report
389 		 * the potentially active case as in use.  This means the user
390 		 * can freely create pools on the hot spares of exported pools,
391 		 * but to do otherwise makes the resulting code complicated, and
392 		 * we end up having to deal with this case anyway.
393 		 */
394 		cb.cb_zhp = NULL;
395 		cb.cb_guid = vdev_guid;
396 		cb.cb_type = ZPOOL_CONFIG_SPARES;
397 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
398 			name = (char *)zpool_get_name(cb.cb_zhp);
399 			ret = B_TRUE;
400 		} else {
401 			ret = B_FALSE;
402 		}
403 		break;
404 
405 	case POOL_STATE_L2CACHE:
406 
407 		/*
408 		 * Check if any pool is currently using this l2cache device.
409 		 */
410 		cb.cb_zhp = NULL;
411 		cb.cb_guid = vdev_guid;
412 		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
413 		if (zpool_iter(hdl, find_aux, &cb) == 1) {
414 			name = (char *)zpool_get_name(cb.cb_zhp);
415 			ret = B_TRUE;
416 		} else {
417 			ret = B_FALSE;
418 		}
419 		break;
420 
421 	default:
422 		ret = B_FALSE;
423 	}
424 
425 
426 	if (ret) {
427 		*namestr = zfs_strdup(hdl, name);
428 		*state = (pool_state_t)stateval;
429 	}
430 
431 	if (cb.cb_zhp)
432 		zpool_close(cb.cb_zhp);
433 
434 	nvlist_free(config);
435 	*inuse = ret;
436 	return (0);
437 }
438