1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012 by Delphix. All rights reserved.
25  * Copyright (c) 2013 Steven Hartland. All rights reserved.
26  */
27 
28 /*
29  * This file contains the functions which analyze the status of a pool.  This
30  * include both the status of an active pool, as well as the status exported
31  * pools.  Returns one of the ZPOOL_STATUS_* defines describing the status of
32  * the pool.  This status is independent (to a certain degree) from the state of
33  * the pool.  A pool's state describes only whether or not it is capable of
34  * providing the necessary fault tolerance for data.  The status describes the
35  * overall status of devices.  A pool that is online can still have a device
36  * that is experiencing errors.
37  *
38  * Only a subset of the possible faults can be detected using 'zpool status',
39  * and not all possible errors correspond to a FMA message ID.  The explanation
40  * is left up to the caller, depending on whether it is a live pool or an
41  * import.
42  */
43 
44 #include <libzfs.h>
45 #include <libzutil.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <unistd.h>
49 #include <sys/systeminfo.h>
50 #include "libzfs_impl.h"
51 #include "zfeature_common.h"
52 
53 /*
54  * Message ID table.  This must be kept in sync with the ZPOOL_STATUS_* defines
55  * in include/libzfs.h.  Note that there are some status results which go past
56  * the end of this table, and hence have no associated message ID.
57  */
58 static char *zfs_msgid_table[] = {
59 	"ZFS-8000-14", /* ZPOOL_STATUS_CORRUPT_CACHE */
60 	"ZFS-8000-2Q", /* ZPOOL_STATUS_MISSING_DEV_R */
61 	"ZFS-8000-3C", /* ZPOOL_STATUS_MISSING_DEV_NR */
62 	"ZFS-8000-4J", /* ZPOOL_STATUS_CORRUPT_LABEL_R */
63 	"ZFS-8000-5E", /* ZPOOL_STATUS_CORRUPT_LABEL_NR */
64 	"ZFS-8000-6X", /* ZPOOL_STATUS_BAD_GUID_SUM */
65 	"ZFS-8000-72", /* ZPOOL_STATUS_CORRUPT_POOL */
66 	"ZFS-8000-8A", /* ZPOOL_STATUS_CORRUPT_DATA */
67 	"ZFS-8000-9P", /* ZPOOL_STATUS_FAILING_DEV */
68 	"ZFS-8000-A5", /* ZPOOL_STATUS_VERSION_NEWER */
69 	"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_MISMATCH */
70 	"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_ACTIVE */
71 	"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_REQUIRED */
72 	"ZFS-8000-HC", /* ZPOOL_STATUS_IO_FAILURE_WAIT */
73 	"ZFS-8000-JQ", /* ZPOOL_STATUS_IO_FAILURE_CONTINUE */
74 	"ZFS-8000-MM", /* ZPOOL_STATUS_IO_FAILURE_MMP */
75 	"ZFS-8000-K4", /* ZPOOL_STATUS_BAD_LOG */
76 	"ZFS-8000-ER", /* ZPOOL_STATUS_ERRATA */
77 	/*
78 	 * The following results have no message ID.
79 	 *	ZPOOL_STATUS_UNSUP_FEAT_READ
80 	 *	ZPOOL_STATUS_UNSUP_FEAT_WRITE
81 	 *	ZPOOL_STATUS_FAULTED_DEV_R
82 	 *	ZPOOL_STATUS_FAULTED_DEV_NR
83 	 *	ZPOOL_STATUS_VERSION_OLDER
84 	 *	ZPOOL_STATUS_FEAT_DISABLED
85 	 *	ZPOOL_STATUS_RESILVERING
86 	 *	ZPOOL_STATUS_OFFLINE_DEV
87 	 *	ZPOOL_STATUS_REMOVED_DEV
88 	 *	ZPOOL_STATUS_REBUILDING
89 	 *	ZPOOL_STATUS_REBUILD_SCRUB
90 	 *	ZPOOL_STATUS_OK
91 	 */
92 };
93 
94 #define	NMSGID	(sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
95 
96 /* ARGSUSED */
97 static int
98 vdev_missing(vdev_stat_t *vs, uint_t vsc)
99 {
100 	return (vs->vs_state == VDEV_STATE_CANT_OPEN &&
101 	    vs->vs_aux == VDEV_AUX_OPEN_FAILED);
102 }
103 
104 /* ARGSUSED */
105 static int
106 vdev_faulted(vdev_stat_t *vs, uint_t vsc)
107 {
108 	return (vs->vs_state == VDEV_STATE_FAULTED);
109 }
110 
111 /* ARGSUSED */
112 static int
113 vdev_errors(vdev_stat_t *vs, uint_t vsc)
114 {
115 	return (vs->vs_state == VDEV_STATE_DEGRADED ||
116 	    vs->vs_read_errors != 0 || vs->vs_write_errors != 0 ||
117 	    vs->vs_checksum_errors != 0);
118 }
119 
120 /* ARGSUSED */
121 static int
122 vdev_broken(vdev_stat_t *vs, uint_t vsc)
123 {
124 	return (vs->vs_state == VDEV_STATE_CANT_OPEN);
125 }
126 
127 /* ARGSUSED */
128 static int
129 vdev_offlined(vdev_stat_t *vs, uint_t vsc)
130 {
131 	return (vs->vs_state == VDEV_STATE_OFFLINE);
132 }
133 
134 /* ARGSUSED */
135 static int
136 vdev_removed(vdev_stat_t *vs, uint_t vsc)
137 {
138 	return (vs->vs_state == VDEV_STATE_REMOVED);
139 }
140 
141 static int
142 vdev_non_native_ashift(vdev_stat_t *vs, uint_t vsc)
143 {
144 	if (getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") != NULL)
145 		return (0);
146 
147 	return (VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
148 	    vs->vs_configured_ashift < vs->vs_physical_ashift);
149 }
150 
151 /*
152  * Detect if any leaf devices that have seen errors or could not be opened.
153  */
154 static boolean_t
155 find_vdev_problem(nvlist_t *vdev, int (*func)(vdev_stat_t *, uint_t),
156     boolean_t ignore_replacing)
157 {
158 	nvlist_t **child;
159 	vdev_stat_t *vs;
160 	uint_t c, vsc, children;
161 
162 	/*
163 	 * Ignore problems within a 'replacing' vdev, since we're presumably in
164 	 * the process of repairing any such errors, and don't want to call them
165 	 * out again.  We'll pick up the fact that a resilver is happening
166 	 * later.
167 	 */
168 	if (ignore_replacing == B_TRUE) {
169 		char *type;
170 
171 		verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE,
172 		    &type) == 0);
173 		if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
174 			return (B_FALSE);
175 	}
176 
177 	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
178 	    &children) == 0) {
179 		for (c = 0; c < children; c++)
180 			if (find_vdev_problem(child[c], func, ignore_replacing))
181 				return (B_TRUE);
182 	} else {
183 		verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
184 		    (uint64_t **)&vs, &vsc) == 0);
185 
186 		if (func(vs, vsc) != 0)
187 			return (B_TRUE);
188 	}
189 
190 	/*
191 	 * Check any L2 cache devs
192 	 */
193 	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
194 	    &children) == 0) {
195 		for (c = 0; c < children; c++)
196 			if (find_vdev_problem(child[c], func, ignore_replacing))
197 				return (B_TRUE);
198 	}
199 
200 	return (B_FALSE);
201 }
202 
203 /*
204  * Active pool health status.
205  *
206  * To determine the status for a pool, we make several passes over the config,
207  * picking the most egregious error we find.  In order of importance, we do the
208  * following:
209  *
210  *	- Check for a complete and valid configuration
211  *	- Look for any faulted or missing devices in a non-replicated config
212  *	- Check for any data errors
213  *	- Check for any faulted or missing devices in a replicated config
214  *	- Look for any devices showing errors
215  *	- Check for any resilvering or rebuilding devices
216  *
217  * There can obviously be multiple errors within a single pool, so this routine
218  * only picks the most damaging of all the current errors to report.
219  */
220 static zpool_status_t
221 check_status(nvlist_t *config, boolean_t isimport, zpool_errata_t *erratap)
222 {
223 	nvlist_t *nvroot;
224 	vdev_stat_t *vs;
225 	pool_scan_stat_t *ps = NULL;
226 	uint_t vsc, psc;
227 	uint64_t nerr;
228 	uint64_t version;
229 	uint64_t stateval;
230 	uint64_t suspended;
231 	uint64_t hostid = 0;
232 	uint64_t errata = 0;
233 	unsigned long system_hostid = get_system_hostid();
234 
235 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
236 	    &version) == 0);
237 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
238 	    &nvroot) == 0);
239 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
240 	    (uint64_t **)&vs, &vsc) == 0);
241 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
242 	    &stateval) == 0);
243 
244 	/*
245 	 * Currently resilvering a vdev
246 	 */
247 	(void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
248 	    (uint64_t **)&ps, &psc);
249 	if (ps != NULL && ps->pss_func == POOL_SCAN_RESILVER &&
250 	    ps->pss_state == DSS_SCANNING)
251 		return (ZPOOL_STATUS_RESILVERING);
252 
253 	/*
254 	 * Currently rebuilding a vdev, check top-level vdevs.
255 	 */
256 	vdev_rebuild_stat_t *vrs = NULL;
257 	nvlist_t **child;
258 	uint_t c, i, children;
259 	uint64_t rebuild_end_time = 0;
260 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
261 	    &child, &children) == 0) {
262 		for (c = 0; c < children; c++) {
263 			if ((nvlist_lookup_uint64_array(child[c],
264 			    ZPOOL_CONFIG_REBUILD_STATS,
265 			    (uint64_t **)&vrs, &i) == 0) && (vrs != NULL)) {
266 				uint64_t state = vrs->vrs_state;
267 
268 				if (state == VDEV_REBUILD_ACTIVE) {
269 					return (ZPOOL_STATUS_REBUILDING);
270 				} else if (state == VDEV_REBUILD_COMPLETE &&
271 				    vrs->vrs_end_time > rebuild_end_time) {
272 					rebuild_end_time = vrs->vrs_end_time;
273 				}
274 			}
275 		}
276 
277 		/*
278 		 * If we can determine when the last scrub was run, and it
279 		 * was before the last rebuild completed, then recommend
280 		 * that the pool be scrubbed to verify all checksums.  When
281 		 * ps is NULL we can infer the pool has never been scrubbed.
282 		 */
283 		if (rebuild_end_time > 0) {
284 			if (ps != NULL) {
285 				if ((ps->pss_state == DSS_FINISHED &&
286 				    ps->pss_func == POOL_SCAN_SCRUB &&
287 				    rebuild_end_time > ps->pss_end_time) ||
288 				    ps->pss_state == DSS_NONE)
289 					return (ZPOOL_STATUS_REBUILD_SCRUB);
290 			} else {
291 				return (ZPOOL_STATUS_REBUILD_SCRUB);
292 			}
293 		}
294 	}
295 
296 	/*
297 	 * The multihost property is set and the pool may be active.
298 	 */
299 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
300 	    vs->vs_aux == VDEV_AUX_ACTIVE) {
301 		mmp_state_t mmp_state;
302 		nvlist_t *nvinfo;
303 
304 		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
305 		mmp_state = fnvlist_lookup_uint64(nvinfo,
306 		    ZPOOL_CONFIG_MMP_STATE);
307 
308 		if (mmp_state == MMP_STATE_ACTIVE)
309 			return (ZPOOL_STATUS_HOSTID_ACTIVE);
310 		else if (mmp_state == MMP_STATE_NO_HOSTID)
311 			return (ZPOOL_STATUS_HOSTID_REQUIRED);
312 		else
313 			return (ZPOOL_STATUS_HOSTID_MISMATCH);
314 	}
315 
316 	/*
317 	 * Pool last accessed by another system.
318 	 */
319 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
320 	if (hostid != 0 && (unsigned long)hostid != system_hostid &&
321 	    stateval == POOL_STATE_ACTIVE)
322 		return (ZPOOL_STATUS_HOSTID_MISMATCH);
323 
324 	/*
325 	 * Newer on-disk version.
326 	 */
327 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
328 	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
329 		return (ZPOOL_STATUS_VERSION_NEWER);
330 
331 	/*
332 	 * Unsupported feature(s).
333 	 */
334 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
335 	    vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
336 		nvlist_t *nvinfo;
337 
338 		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
339 		    &nvinfo) == 0);
340 		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
341 			return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
342 		return (ZPOOL_STATUS_UNSUP_FEAT_READ);
343 	}
344 
345 	/*
346 	 * Check that the config is complete.
347 	 */
348 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
349 	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
350 		return (ZPOOL_STATUS_BAD_GUID_SUM);
351 
352 	/*
353 	 * Check whether the pool has suspended.
354 	 */
355 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
356 	    &suspended) == 0) {
357 		uint64_t reason;
358 
359 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED_REASON,
360 		    &reason) == 0 && reason == ZIO_SUSPEND_MMP)
361 			return (ZPOOL_STATUS_IO_FAILURE_MMP);
362 
363 		if (suspended == ZIO_FAILURE_MODE_CONTINUE)
364 			return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
365 		return (ZPOOL_STATUS_IO_FAILURE_WAIT);
366 	}
367 
368 	/*
369 	 * Could not read a log.
370 	 */
371 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
372 	    vs->vs_aux == VDEV_AUX_BAD_LOG) {
373 		return (ZPOOL_STATUS_BAD_LOG);
374 	}
375 
376 	/*
377 	 * Bad devices in non-replicated config.
378 	 */
379 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
380 	    find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
381 		return (ZPOOL_STATUS_FAULTED_DEV_NR);
382 
383 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
384 	    find_vdev_problem(nvroot, vdev_missing, B_TRUE))
385 		return (ZPOOL_STATUS_MISSING_DEV_NR);
386 
387 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
388 	    find_vdev_problem(nvroot, vdev_broken, B_TRUE))
389 		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
390 
391 	/*
392 	 * Corrupted pool metadata
393 	 */
394 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
395 	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
396 		return (ZPOOL_STATUS_CORRUPT_POOL);
397 
398 	/*
399 	 * Persistent data errors.
400 	 */
401 	if (!isimport) {
402 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
403 		    &nerr) == 0 && nerr != 0)
404 			return (ZPOOL_STATUS_CORRUPT_DATA);
405 	}
406 
407 	/*
408 	 * Missing devices in a replicated config.
409 	 */
410 	if (find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
411 		return (ZPOOL_STATUS_FAULTED_DEV_R);
412 	if (find_vdev_problem(nvroot, vdev_missing, B_TRUE))
413 		return (ZPOOL_STATUS_MISSING_DEV_R);
414 	if (find_vdev_problem(nvroot, vdev_broken, B_TRUE))
415 		return (ZPOOL_STATUS_CORRUPT_LABEL_R);
416 
417 	/*
418 	 * Devices with errors
419 	 */
420 	if (!isimport && find_vdev_problem(nvroot, vdev_errors, B_TRUE))
421 		return (ZPOOL_STATUS_FAILING_DEV);
422 
423 	/*
424 	 * Offlined devices
425 	 */
426 	if (find_vdev_problem(nvroot, vdev_offlined, B_TRUE))
427 		return (ZPOOL_STATUS_OFFLINE_DEV);
428 
429 	/*
430 	 * Removed device
431 	 */
432 	if (find_vdev_problem(nvroot, vdev_removed, B_TRUE))
433 		return (ZPOOL_STATUS_REMOVED_DEV);
434 
435 	/*
436 	 * Suboptimal, but usable, ashift configuration.
437 	 */
438 	if (find_vdev_problem(nvroot, vdev_non_native_ashift, B_FALSE))
439 		return (ZPOOL_STATUS_NON_NATIVE_ASHIFT);
440 
441 	/*
442 	 * Informational errata available.
443 	 */
444 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata);
445 	if (errata) {
446 		*erratap = errata;
447 		return (ZPOOL_STATUS_ERRATA);
448 	}
449 
450 	/*
451 	 * Outdated, but usable, version
452 	 */
453 	if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
454 		return (ZPOOL_STATUS_VERSION_OLDER);
455 
456 	/*
457 	 * Usable pool with disabled features
458 	 */
459 	if (version >= SPA_VERSION_FEATURES) {
460 		int i;
461 		nvlist_t *feat;
462 
463 		if (isimport) {
464 			feat = fnvlist_lookup_nvlist(config,
465 			    ZPOOL_CONFIG_LOAD_INFO);
466 			if (nvlist_exists(feat, ZPOOL_CONFIG_ENABLED_FEAT))
467 				feat = fnvlist_lookup_nvlist(feat,
468 				    ZPOOL_CONFIG_ENABLED_FEAT);
469 		} else {
470 			feat = fnvlist_lookup_nvlist(config,
471 			    ZPOOL_CONFIG_FEATURE_STATS);
472 		}
473 
474 		for (i = 0; i < SPA_FEATURES; i++) {
475 			zfeature_info_t *fi = &spa_feature_table[i];
476 			if (!nvlist_exists(feat, fi->fi_guid))
477 				return (ZPOOL_STATUS_FEAT_DISABLED);
478 		}
479 	}
480 
481 	return (ZPOOL_STATUS_OK);
482 }
483 
484 zpool_status_t
485 zpool_get_status(zpool_handle_t *zhp, char **msgid, zpool_errata_t *errata)
486 {
487 	zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata);
488 	if (msgid != NULL) {
489 		if (ret >= NMSGID)
490 			*msgid = NULL;
491 		else
492 			*msgid = zfs_msgid_table[ret];
493 	}
494 	return (ret);
495 }
496 
497 zpool_status_t
498 zpool_import_status(nvlist_t *config, char **msgid, zpool_errata_t *errata)
499 {
500 	zpool_status_t ret = check_status(config, B_TRUE, errata);
501 
502 	if (ret >= NMSGID)
503 		*msgid = NULL;
504 	else
505 		*msgid = zfs_msgid_table[ret];
506 
507 	return (ret);
508 }
509