1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Postmortem type identification
31  * ------------------------------
32  *
33  * When debugging kernel memory corruption problems, one often determines that
34  * the corrupted buffer has been erroneously written to by a user of an
35  * adjacent buffer -- determining the specifics of the adjacent buffer can
36  * therefore offer insight into the cause of the corruption.  To determine the
37  * type of an arbitrary memory buffer, however, one has historically been
38  * forced to use dcmds ::kgrep and ::whatis in alternating succession; when an
39  * object of known type is finally reached, types can be back-propagated to
40  * determine the type of the unknown object.
41  *
42  * This process is labor-intensive and error-prone.  Using CTF data and a
43  * collection of heuristics, we would like to both automate this process and
44  * improve on it.
45  *
46  * We start by constructing the pointer graph.  Each node in the graph is
47  * a memory object (either a static object from module data, or a dynamically
48  * allocated memory object); the node's outgoing edges represent pointers from
49  * the object to other memory objects in the system.
50  *
51  * Once the graph is constructed, we start at nodes of known type, and use the
52  * type information to determine the type of each pointer represented by an
53  * outgoing edge.  Determining the pointer type allows us to determine the
54  * type of the edge's destination node, and therefore to iteratively continue
55  * the process of type identification.  This process works as long as all
56  * pointed-to objects are exactly the size of their inferred types.
57  *
58  * Unfortunately, pointed-to objects are often _not_ the size of the pointed-to
59  * type.  This is largely due to three phenomena:
60  *
61  * (a)	C makes no distinction between a pointer to a single object and a
62  *	pointer to some number of objects of like type.
63  *
64  * (b)	C performs no bounds checking on array indexing, allowing declarations
65  *	of structures that are implicitly followed by arrays of the type of the
66  *	structure's last member.  These declarations most often look like:
67  *
68  *	    typedef struct foo {
69  *	            int       foo_bar;
70  *	            int       foo_baz;
71  *	            mumble_t  foo_mumble[1];
72  *	    } foo_t;
73  *
74  *	When a foo_t is allocated, the size of n - 1 mumble_t's is added to the
75  *	size of a foo_t to derive the size of the allocation; this allows for
76  *	the n trailing mumble_t's to be referenced from the allocated foo_t
77  *	using C's convenient array syntax -- without requiring an additional
78  *	memory dereference.  ISO C99 calls the last member in such a structure
79  *	the "flexible array member" (FAM); we adhere to this terminology.
80  *
81  * (c)	It is not uncommon for structures to embed smaller structures, and
82  *	to pass pointers to these smaller structures to routines that track
83  *	the structures only by the smaller type.  This can be thought of as
84  *	a sort of crude-but-efficient polymorphism; see e.g., struct seg and
85  *	its embedded avl_node_t.  It is less common (but by no means unheard
86  *	of) for the smaller structures to be used as place holders in data
87  *	structures consisting of the larger structure.  That is, instead of an
88  *	instance of the larger structure being pointed to by the smaller
89  *	structure pointer, an instance of the smaller structure is pointed to
90  *	the larger structure pointer; see e.g., struct buf and struct hbuf or
91  *	struct seg_pcache and struct seg_phash.  This construct is particularly
92  *	important to identify when the smaller structures are in a contiguous
93  *	array (as they are in each of the two examples provided):  by examining
94  *	only the data structure of larger structures, one would erroneously
95  *	assume that the array of the smaller structure is actually an array of
96  *	the larger structure.
97  *
98  * Taken together, these three phenomena imply that if we have a pointer to
99  * an object that is larger than the pointed-to type, we don't know if the
100  * object is an array of objects of the pointed-to type, the pointed-to type
101  * followed by an array of that type's last member, or some other larger type
102  * that we haven't yet discovered.
103  *
104  * Differentiating these three situations is the focus of many of the
105  * type graph heuristics.  Type graph processing is performed in an initial
106  * pass, four type-determining passes, and a final, post-pass:
107  *
108  * Initial: Graph construction
109  *
110  * The initial pass constructs the nodes from the kmem caches and module data,
111  * and constructs the edges by propagating out from module data.  Nodes that
112  * are in module data or known kmem caches (see tg_cachetab[], below) are
113  * marked with their known type.  This pass takes the longest amount of
114  * wall-clock time, for it frequently induces I/O to read the postmortem image
115  * into memory from permanent storage.
116  *
117  * pass1: Conservative propagation
118  *
119  * In pass1, we propagate types out from the known nodes, adding types to
120  * nodes' tgn_typelists as they are inferred.  Nodes are marked as they are
121  * processed to guarantee halting.  We proceed as conservatively as possible
122  * in this pass; if we discover that a node is larger than twice its inferred
123  * type (that is, we've run into one of the three phenomena described above),
124  * we add the inferred type to the node's tgn_typelist, but we don't descend.
125  *
126  * pass2: Array determination
127  *
128  * In pass2, we visit those nodes through which we refused to descend in pass1.
129  * If we find one (and only one) structural interpretation for the object, we
130  * have determined that -- to the best of our knowledge -- we are not seeing
131  * phenomenon (c).  To further differentiate (a) from (b), we check if the
132  * structure ends with an array of size one; if it does, we assume that it has
133  * a flexible array member.  Otherwise, we perform an additional check:  we
134  * calculate the size of the object modulo the size of the inferred type and
135  * subtract it from the size of the object.  If this value is less than or
136  * equal to the size of the next-smaller kmem cache, we know that it's not an
137  * array of the inferred type -- if it were an array of the inferred type, it
138  * would have been instead allocated out of the next-smaller cache.
139  *
140  * In either case (FAM or no FAM), we iterate through each element of the
141  * hypothesised array, checking that each pointer member points to either NULL
142  * or valid memory.  If pointer members do not satisfy these criteria, it is
143  * assumed that we have not satisfactorily determined that the given object is
144  * an array of the inferred type, and we abort processing of the node.  Note
145  * that uninitialized pointers can potentially prevent an otherwise valid
146  * array from being interpreted as such.  Because array misinterpretation
147  * can induce substantial cascading type misinterpretation, it is preferred to
148  * be conservative and accurate in such cases -- even if it means a lower type
149  * recognition rate.
150  *
151  * pass3: Type coalescence
152  *
153  * pass3 coalesces type possibilities by preferring structural possibilities
154  * over non-structural ones.  For example, if an object is either of type
155  * "char" (pointed to by a caddr_t) or type "struct frotz", the possibilities
156  * will be coalesced into just "struct frotz."
157  *
158  * pass4: Non-array type inference
159  *
160  * pass4 is the least conservative:  it is assumed that phenomenon (c) has been
161  * completely ferreted out by prior passes.  All unknown types are visited, and
162  * incoming edges are checked.  If there is only one possible structural
163  * inference for the unknown type, the node is inferred to be of that type, and
164  * the type is propagated.  This pass picks up those nodes that are larger than
165  * their inferred type, but for which the inferred type is likely accurate.
166  * (struct dcentry, with its FAM of characters, is an example type that is
167  * frequently determined by this pass.)
168  *
169  * Post-pass: Greatest unknown reach
170  *
171  * If recognition rate is low (or, from a more practical perspective, if the
172  * object of interest is not automatically identified), it can be useful
173  * to know which node is the greatest impediment to further recognition.
174  * If the user can -- by hook or by crook -- determine the true type of this
175  * node (and set it with ::istype), much more type identification should be
176  * possible.  To facilitate this, we therefore define the _reach_ of a node to
177  * be the number of unknown nodes that could potentially be identified were the
178  * node's type better known.  We determine the reach by performing a
179  * depth-first pass through the graph.  The node of greatest reach (along with
180  * the reach itself) are reported upon completion of the post-pass.
181  */
182 
183 #include <mdb/mdb_param.h>
184 #include <mdb/mdb_modapi.h>
185 #include <mdb/mdb_ctf.h>
186 #include <sys/sysmacros.h>
187 #include <sys/kmem_impl.h>
188 #include <sys/vmem_impl.h>
189 #include <sys/modctl.h>
190 #include <sys/kobj.h>
191 #include <stdio.h>
192 #include "kmem.h"
193 
194 struct tg_node;
195 
196 typedef struct tg_edge {
197 	struct tg_node	*tge_src;	/* source node */
198 	struct tg_node	*tge_dest;	/* destination node */
199 	uintptr_t	tge_srcoffs;	/* offset in source node */
200 	uintptr_t	tge_destoffs;	/* offset in destination node */
201 	struct tg_edge	*tge_nextin;	/* next incoming edge */
202 	struct tg_edge	*tge_nextout;	/* next outgoing edge */
203 	int		tge_marked;	/* mark */
204 } tg_edge_t;
205 
206 typedef struct tg_type {
207 	mdb_ctf_id_t	tgt_type;	/* CTF type */
208 	mdb_ctf_id_t	tgt_utype;	/* unresolved CTF type */
209 	mdb_ctf_id_t	tgt_rtype;	/* referring type */
210 	size_t		tgt_roffs;	/* referring offset */
211 	const char	*tgt_rmember;	/* referring member */
212 	tg_edge_t	*tgt_redge;	/* referring edge */
213 	struct tg_type	*tgt_next;	/* next type */
214 	int		tgt_flags;	/* flags */
215 } tg_type_t;
216 
217 #define	TG_TYPE_ARRAY		0x0001
218 #define	TG_TYPE_NOTARRAY	0x0002
219 #define	TG_TYPE_HASFAM		0x0004
220 
221 typedef struct tg_node {
222 	uintptr_t	tgn_base;	/* address base of object */
223 	uintptr_t	tgn_limit;	/* address limit of object */
224 	tg_edge_t	*tgn_incoming;	/* incoming edges */
225 	tg_edge_t 	*tgn_outgoing;	/* outgoing edges */
226 	tg_type_t 	*tgn_typelist;	/* conjectured typelist */
227 	tg_type_t 	*tgn_fraglist;	/* type fragment list */
228 	char		tgn_marked;	/* marked */
229 	char		tgn_postmarked;	/* marked in postpass */
230 	int		tgn_smaller;	/* size of next-smaller cache */
231 	int		tgn_reach;	/* number of reachable unknown nodes */
232 	mdb_ctf_id_t	tgn_type;	/* known type */
233 } tg_node_t;
234 
235 #define	TG_NODE_SIZE(n)		((n)->tgn_limit - (n)->tgn_base)
236 
237 typedef struct tg_stats {
238 	size_t	tgs_buffers;
239 	size_t	tgs_nodes;
240 	size_t	tgs_unmarked;
241 	size_t	tgs_known;
242 	size_t	tgs_typed;
243 	size_t	tgs_conflicts;
244 	size_t	tgs_frag;
245 	size_t	tgs_candidates;
246 } tg_stats_t;
247 
248 typedef struct tg_typeoffs {
249 	mdb_ctf_id_t		tgto_type;	/* found type */
250 	ulong_t			tgto_offs;	/* offset of interest */
251 	const char		**tgto_memberp;	/* referring member name */
252 	tg_edge_t		*tgto_edge;	/* outbound edge */
253 } tg_typeoffs_t;
254 
255 typedef struct tg_buildstate {
256 	uintptr_t		tgbs_addr;	/* address of region */
257 	uintptr_t		*tgbs_buf;	/* in-core copy of region */
258 	size_t 			tgbs_ndx;	/* current pointer index */
259 	size_t			tgbs_nptrs;	/* number of pointers */
260 	tg_node_t		*tgbs_src;	/* corresponding node */
261 	struct tg_buildstate	*tgbs_next;	/* next stacked or free */
262 } tg_buildstate_t;
263 
264 typedef struct tg_poststate {
265 	tg_node_t		*tgps_node;	/* current node */
266 	tg_edge_t		*tgps_edge;	/* current edge */
267 	size_t			tgps_total;	/* current total */
268 	struct tg_poststate	*tgps_next;	/* next stacked or free */
269 } tg_poststate_t;
270 
271 typedef struct tg_todo {
272 	tg_node_t		*tgtd_node;	/* node to process */
273 	uintptr_t		tgtd_offs;	/* offset within node */
274 	mdb_ctf_id_t		tgtd_type;	/* conjectured type */
275 	struct tg_todo		*tgtd_next;	/* next todo */
276 } tg_todo_t;
277 
278 typedef struct tg_nodedata {
279 	tg_node_t 		*tgd_next;	/* next node to fill in */
280 	size_t			tgd_size;	/* size of this node */
281 } tg_nodedata_t;
282 
283 /*
284  * Some caches can be pretty arduous to identify (or are rife with conflicts).
285  * To assist type identification, specific caches are identified with the
286  * types of their contents.  Each cache need _not_ be listed here; in general,
287  * a cache should only be added to the tg_cachetab[] if the identification rate
288  * for the cache is less than 95%Every .  (The identification rate for a
289  * specific cache can be quickly determined by specifying the cache to
290  * ::typegraph.)
291  */
292 struct {
293 	char *tgc_name;
294 	char *tgc_type;
295 } tg_cachetab[] = {
296 	{ "streams_mblk",	"mblk_t" },
297 	{ "seg_cache",		"struct seg" },
298 	{ "segvn_cache",	"struct segvn_data" },
299 	{ "anon_cache",		"struct anon" },
300 	{ "ufs_inode_cache",	"inode_t" },
301 	{ "hme_cache",		"struct hment" },
302 	{ "queue_cache",	"queinfo_t" },
303 	{ "sock_cache",		"struct sonode" },
304 	{ "ire_cache",		"ire_t" },
305 	{ NULL,			NULL }
306 };
307 
308 /*
309  * Some types are only known by their opaque handles.  While this is a good way
310  * to keep interface clients from eating the Forbidden Fruit, it can make type
311  * identification difficult -- which can be especially important for big
312  * structures like dev_info_t.  To assist type identification, we keep a table
313  * to translate from opaque handles to their underlying structures.  A
314  * translation should only be added to the tg_typetab[] if the lack of
315  * translation is preventing substantial type identification.  (This can be
316  * determined by using the "typeunknown" walker on a dump with bufctl auditing
317  * enabled, and using "::whatis -b" to determine the types of unknown buffers;
318  * if many of these unknown types are structures behind an opaque handle, a
319  * new entry in tg_typetab[] is likely warranted.)
320  */
321 struct {
322 	char 		*tgt_type_name;		/* filled in statically */
323 	char		*tgt_actual_name;	/* filled in statically */
324 	mdb_ctf_id_t	tgt_type;		/* determined dynamically */
325 	mdb_ctf_id_t	tgt_actual_type;	/* determined dynamically */
326 } tg_typetab[] = {
327 	{ "dev_info_t",		"struct dev_info" },
328 	{ "ddi_dma_handle_t",	"ddi_dma_impl_t *" },
329 	{ NULL,			NULL }
330 };
331 
332 static enum {
333 	TG_PASS1 = 1,
334 	TG_PASS2,
335 	TG_PASS3,
336 	TG_PASS4
337 } tg_pass;
338 
339 static size_t tg_nnodes;	/* number of nodes */
340 static size_t tg_nanchored;	/* number of anchored nodes */
341 static tg_node_t *tg_node;	/* array of nodes */
342 static tg_node_t **tg_sorted;	/* sorted array of pointers into tg_node */
343 static size_t tg_nsorted;	/* number of pointers in tg_sorted */
344 static int *tg_sizes;		/* copy of kmem_alloc_sizes[] array */
345 static int tg_nsizes;		/* number of sizes in tg_sizes */
346 static hrtime_t tg_start;	/* start time */
347 static int tg_improved;		/* flag indicating that we have improved */
348 static int tg_built;		/* flag indicating that type graph is built */
349 static uint_t tg_verbose;	/* flag to increase verbosity */
350 
351 static mdb_ctf_id_t typegraph_type_offset(mdb_ctf_id_t, size_t,
352     tg_edge_t *, const char **);
353 
354 static void
355 typegraph_typetab_init(void)
356 {
357 	int i;
358 
359 	for (i = 0; tg_typetab[i].tgt_type_name != NULL; i++) {
360 		if (mdb_ctf_lookup_by_name(tg_typetab[i].tgt_type_name,
361 		    &tg_typetab[i].tgt_type) == -1) {
362 			mdb_warn("can't find type '%s'\n",
363 			    tg_typetab[i].tgt_type_name);
364 			mdb_ctf_type_invalidate(&tg_typetab[i].tgt_type);
365 			continue;
366 		}
367 
368 		if (mdb_ctf_lookup_by_name(tg_typetab[i].tgt_actual_name,
369 		    &tg_typetab[i].tgt_actual_type) == -1) {
370 			mdb_warn("can't find type '%s'\n",
371 			    tg_typetab[i].tgt_actual_name);
372 			mdb_ctf_type_invalidate(&tg_typetab[i].tgt_actual_type);
373 		}
374 	}
375 }
376 
377 /*
378  * A wrapper around mdb_ctf_type_resolve() that first checks the type
379  * translation table.
380  */
381 static mdb_ctf_id_t
382 typegraph_resolve(mdb_ctf_id_t type)
383 {
384 	int i;
385 	mdb_ctf_id_t ret;
386 
387 	/*
388 	 * This could be _much_ more efficient...
389 	 */
390 	for (i = 0; tg_typetab[i].tgt_type_name != NULL; i++) {
391 		if (mdb_ctf_type_cmp(type, tg_typetab[i].tgt_type) == 0) {
392 			type = tg_typetab[i].tgt_actual_type;
393 			break;
394 		}
395 	}
396 
397 	(void) mdb_ctf_type_resolve(type, &ret);
398 	return (ret);
399 }
400 
401 /*
402  * A wrapper around mdb_ctf_type_name() that deals with anonymous structures.
403  * Anonymous structures are those that have no name associated with them.
404  * Nearly always, these structures are referred to by a typedef (e.g.
405  * "typedef struct { int bar } foo_t"); we expect the unresolved type to
406  * be passed as utype.
407  */
408 static char *
409 typegraph_type_name(mdb_ctf_id_t type, mdb_ctf_id_t utype)
410 {
411 	static char buf[MDB_SYM_NAMLEN];
412 
413 	if (mdb_ctf_type_name(type, buf, sizeof (buf)) == NULL) {
414 		(void) strcpy(buf, "<unknown>");
415 	} else {
416 		/*
417 		 * Perhaps a CTF interface would be preferable to this kludgey
418 		 * strcmp()?  Perhaps.
419 		 */
420 		if (strcmp(buf, "struct ") == 0)
421 			(void) mdb_ctf_type_name(utype, buf, sizeof (buf));
422 	}
423 
424 	return (buf);
425 }
426 
427 /*
428  * A wrapper around mdb_ctf_type_size() that accurately accounts for arrays.
429  */
430 static ssize_t
431 typegraph_size(mdb_ctf_id_t type)
432 {
433 	mdb_ctf_arinfo_t arr;
434 	ssize_t size;
435 
436 	if (!mdb_ctf_type_valid(type))
437 		return (-1);
438 
439 	if (mdb_ctf_type_kind(type) != CTF_K_ARRAY)
440 		return (mdb_ctf_type_size(type));
441 
442 	if (mdb_ctf_array_info(type, &arr) == -1)
443 		return (-1);
444 
445 	type = typegraph_resolve(arr.mta_contents);
446 
447 	if (!mdb_ctf_type_valid(type))
448 		return (-1);
449 
450 	if ((size = mdb_ctf_type_size(type)) == -1)
451 		return (-1);
452 
453 	return (size * arr.mta_nelems);
454 }
455 
456 /*
457  * The mdb_ctf_member_iter() callback for typegraph_type_offset().
458  */
459 static int
460 typegraph_offiter(const char *name, mdb_ctf_id_t type,
461     ulong_t off, tg_typeoffs_t *toffs)
462 {
463 	int kind;
464 	ssize_t size;
465 	mdb_ctf_arinfo_t arr;
466 
467 	off /= NBBY;
468 
469 	if (off > toffs->tgto_offs) {
470 		/*
471 		 * We went past it; return failure.
472 		 */
473 		return (1);
474 	}
475 
476 	if (!mdb_ctf_type_valid(type = typegraph_resolve(type)))
477 		return (0);
478 
479 	if ((size = mdb_ctf_type_size(type)) == -1)
480 		return (0);
481 
482 	if (off < toffs->tgto_offs &&
483 	    size != 0 && off + size <= toffs->tgto_offs) {
484 		/*
485 		 * Haven't reached it yet; continue looking.
486 		 */
487 		return (0);
488 	}
489 
490 	/*
491 	 * If the base type is not a structure, an array or a union, and
492 	 * the offset equals the desired offset, we have our type.
493 	 */
494 	if ((kind = mdb_ctf_type_kind(type)) != CTF_K_STRUCT &&
495 	    kind != CTF_K_UNION && kind != CTF_K_ARRAY) {
496 		if (off == toffs->tgto_offs)
497 			toffs->tgto_type = type;
498 
499 		if (toffs->tgto_memberp != NULL)
500 			*(toffs->tgto_memberp) = name;
501 
502 		return (1);
503 	}
504 
505 	/*
506 	 * If the type is an array, see if we fall within the bounds.
507 	 */
508 	if (kind == CTF_K_ARRAY) {
509 		if (mdb_ctf_array_info(type, &arr) == -1)
510 			return (0);
511 
512 		type = typegraph_resolve(arr.mta_contents);
513 
514 		if (!mdb_ctf_type_valid(type))
515 			return (0);
516 
517 		size = mdb_ctf_type_size(type) * arr.mta_nelems;
518 
519 		if (off < toffs->tgto_offs && off + size <= toffs->tgto_offs) {
520 			/*
521 			 * Nope, haven't found it yet; continue looking.
522 			 */
523 			return (0);
524 		}
525 	}
526 
527 	toffs->tgto_type = typegraph_type_offset(type,
528 	    toffs->tgto_offs - off, toffs->tgto_edge, toffs->tgto_memberp);
529 
530 	return (1);
531 }
532 
533 /*
534  * The mdb_ctf_member_iter() callback for typegraph_type_offset() when the type
535  * is found to be of kind CTF_K_UNION.  With unions, we attempt to do better
536  * than just completely punting:  if all but one of the members is impossible
537  * (due to, say, size constraints on the destination node), we can propagate
538  * the valid member.
539  */
540 static int
541 typegraph_union(const char *name, mdb_ctf_id_t type, ulong_t off,
542     tg_typeoffs_t *toffs)
543 {
544 	const char *member = name;
545 	tg_edge_t *e = toffs->tgto_edge;
546 	mdb_ctf_id_t rtype;
547 	size_t rsize;
548 	int kind;
549 
550 	if (!mdb_ctf_type_valid(type = typegraph_resolve(type)))
551 		return (0);
552 
553 	kind = mdb_ctf_type_kind(type);
554 
555 	if (kind == CTF_K_STRUCT || kind != CTF_K_UNION ||
556 	    kind != CTF_K_ARRAY) {
557 		type = typegraph_type_offset(type,
558 		    toffs->tgto_offs - off, e, &member);
559 	}
560 
561 	if (!mdb_ctf_type_valid(type))
562 		return (0);
563 
564 	if (mdb_ctf_type_kind(type) != CTF_K_POINTER)
565 		return (0);
566 
567 	/*
568 	 * Now figure out what exactly we're pointing to.
569 	 */
570 	if (mdb_ctf_type_reference(type, &rtype) == -1)
571 		return (0);
572 
573 	if (!mdb_ctf_type_valid(rtype = typegraph_resolve(rtype)))
574 		return (0);
575 
576 	rsize = mdb_ctf_type_size(rtype);
577 
578 	/*
579 	 * Compare this size to the size of the thing we're pointing to --
580 	 * if it's larger than the node that we're pointing to, we know that
581 	 * the alleged pointer type must be an invalid interpretation of the
582 	 * union.
583 	 */
584 	if (rsize > TG_NODE_SIZE(e->tge_dest) - e->tge_destoffs) {
585 		/*
586 		 * We're in luck -- it's not possibly this pointer.
587 		 */
588 		return (0);
589 	}
590 
591 	/*
592 	 * This looks like it could be legit.  If the type hasn't been
593 	 * specified, we could be in business.
594 	 */
595 	if (mdb_ctf_type_valid(toffs->tgto_type)) {
596 		/*
597 		 * There are two potentially valid interpretations for this
598 		 * union.  Invalidate the type.
599 		 */
600 		mdb_ctf_type_invalidate(&toffs->tgto_type);
601 		return (1);
602 	}
603 
604 	toffs->tgto_type = type;
605 
606 	if (toffs->tgto_memberp != NULL)
607 		*(toffs->tgto_memberp) = member;
608 
609 	return (0);
610 }
611 
612 /*ARGSUSED*/
613 static int
614 typegraph_lastmember(const char *name,
615     mdb_ctf_id_t type, ulong_t off, void *last)
616 {
617 	*((mdb_ctf_id_t *)last) = type;
618 
619 	return (0);
620 }
621 
622 /*
623  * To determine if a structure is has a flexible array member, we iterate over
624  * the members; if the structure has more than one member, and the last member
625  * is an array of size 1, we're going to assume that this structure has a
626  * flexible array member.  Yes, this heuristic is a little sloppy -- but cut me
627  * some slack:  why the hell else would you have an array of size 1?  (Don't
628  * answer that.)
629  */
630 static int
631 typegraph_hasfam(mdb_ctf_id_t type, mdb_ctf_id_t *atype)
632 {
633 	mdb_ctf_arinfo_t arr;
634 	mdb_ctf_id_t last;
635 	int kind;
636 
637 	if (!mdb_ctf_type_valid(type))
638 		return (0);
639 
640 	if ((kind = mdb_ctf_type_kind(type)) != CTF_K_STRUCT)
641 		return (0);
642 
643 	mdb_ctf_type_invalidate(&last);
644 	mdb_ctf_member_iter(type, typegraph_lastmember, &last);
645 
646 	if (!mdb_ctf_type_valid(last))
647 		return (0);
648 
649 	if ((kind = mdb_ctf_type_kind(last)) == CTF_K_STRUCT)
650 		return (typegraph_hasfam(last, atype));
651 
652 	if (kind != CTF_K_ARRAY)
653 		return (0);
654 
655 	if (typegraph_size(last) == typegraph_size(type)) {
656 		/*
657 		 * This structure has only one member; even if that member is
658 		 * an array of size 1, we'll assume that there is something
659 		 * stranger going on than a run-of-the-mill FAM (e.g., a
660 		 * kmutex_t).
661 		 */
662 		return (0);
663 	}
664 
665 	if (mdb_ctf_array_info(last, &arr) == -1)
666 		return (0);
667 
668 	if (arr.mta_nelems != 1)
669 		return (0);
670 
671 	if (atype != NULL)
672 		*atype = typegraph_resolve(arr.mta_contents);
673 
674 	return (1);
675 }
676 
677 /*
678  * This routine takes a type and offset, and returns the type at the specified
679  * offset.  It additionally takes an optional edge to help bust unions, and
680  * an optional address of a character pointer to set to the name of the member
681  * found at the specified offset.
682  */
683 static mdb_ctf_id_t
684 typegraph_type_offset(mdb_ctf_id_t type, size_t offset, tg_edge_t *e,
685     const char **member)
686 {
687 	mdb_ctf_arinfo_t arr;
688 	uint_t kind;
689 	mdb_ctf_id_t last;
690 	ssize_t size;
691 	ssize_t lsize;
692 	tg_typeoffs_t toffs;
693 	mdb_ctf_id_t inval;
694 
695 	mdb_ctf_type_invalidate(&inval);
696 
697 	if (member != NULL)
698 		*member = NULL;
699 
700 	/*
701 	 * Resolve type to its base type.
702 	 */
703 	type = typegraph_resolve(type);
704 	kind = mdb_ctf_type_kind(type);
705 
706 	switch (kind) {
707 	case CTF_K_ARRAY:
708 		/*
709 		 * If this is an array, we need to figure out what it's an
710 		 * array _of_.  We must then figure out the size of the array
711 		 * structure, and then determine our offset within that type.
712 		 * From there, we can recurse.
713 		 */
714 		if (mdb_ctf_array_info(type, &arr) == -1)
715 			return (inval);
716 
717 		type = typegraph_resolve(arr.mta_contents);
718 
719 		if (!mdb_ctf_type_valid(type))
720 			return (inval);
721 
722 		/*
723 		 * If the type is not a structure/union, then check that the
724 		 * offset doesn't point to the middle of the base type and
725 		 * return it.
726 		 */
727 		kind = mdb_ctf_type_kind(type);
728 		size = mdb_ctf_type_size(type);
729 
730 		if (kind != CTF_K_STRUCT && kind != CTF_K_UNION) {
731 			if (offset % size) {
732 				/*
733 				 * The offset is pointing to the middle of a
734 				 * type; return failure.
735 				 */
736 				return (inval);
737 			}
738 
739 			return (type);
740 		}
741 
742 		return (typegraph_type_offset(type, offset % size, e, member));
743 
744 	case CTF_K_STRUCT:
745 		/*
746 		 * If the offset is larger than the size, we need to figure
747 		 * out what exactly we're looking at.  There are several
748 		 * possibilities:
749 		 *
750 		 * (a)	A structure that has this type as its first member.
751 		 *
752 		 * (b)	An array of structures of this type.
753 		 *
754 		 * (c)	A structure has a flexible array member.
755 		 *
756 		 * The differentiation between (a) and (b) has hopefully
757 		 * happened before entering this function.  To differentiate
758 		 * between (b) and (c), we call typegraph_hasfam().
759 		 */
760 		size = mdb_ctf_type_size(type);
761 
762 		if (offset >= size) {
763 			if (typegraph_hasfam(type, &last)) {
764 				/*
765 				 * We have a live one.  Take the size, subtract
766 				 * the size of the last element, and recurse.
767 				 */
768 				if (!mdb_ctf_type_valid(last))
769 					return (inval);
770 
771 				lsize = mdb_ctf_type_size(last);
772 
773 				return (typegraph_type_offset(last,
774 				    offset - size - lsize, e, member));
775 			}
776 
777 			offset %= size;
778 		}
779 
780 		toffs.tgto_offs = offset;
781 		toffs.tgto_memberp = member;
782 		toffs.tgto_edge = e;
783 		mdb_ctf_type_invalidate(&toffs.tgto_type);
784 
785 		mdb_ctf_member_iter(type,
786 		    (mdb_ctf_member_f *)typegraph_offiter, &toffs);
787 
788 		return (toffs.tgto_type);
789 
790 	case CTF_K_POINTER:
791 		if (!mdb_ctf_type_valid(type = typegraph_resolve(type)))
792 			return (inval);
793 
794 		size = mdb_ctf_type_size(type);
795 
796 		if (offset % size) {
797 			/*
798 			 * The offset is pointing to the middle of a type;
799 			 * return failure.
800 			 */
801 			return (inval);
802 		}
803 
804 		return (type);
805 
806 	case CTF_K_UNION:
807 		if (e == NULL) {
808 			/*
809 			 * We've been given no outbound edge -- we have no way
810 			 * of figuring out what the hell this union is.
811 			 */
812 			return (inval);
813 		}
814 
815 		toffs.tgto_offs = offset;
816 		toffs.tgto_memberp = member;
817 		toffs.tgto_edge = e;
818 		mdb_ctf_type_invalidate(&toffs.tgto_type);
819 
820 		/*
821 		 * Try to bust the union...
822 		 */
823 		if (mdb_ctf_member_iter(type,
824 		    (mdb_ctf_member_f *)typegraph_union, &toffs) != 0) {
825 			/*
826 			 * There was at least one valid pointer in there.
827 			 * Return "void *".
828 			 */
829 			(void) mdb_ctf_lookup_by_name("void *", &type);
830 
831 			return (type);
832 		}
833 
834 		return (toffs.tgto_type);
835 
836 	default:
837 		/*
838 		 * If the offset is anything other than zero, no dice.
839 		 */
840 		if (offset != 0)
841 			return (inval);
842 
843 		return (type);
844 	}
845 }
846 
847 /*
848  * This routine takes an address and a type, and determines if the memory
849  * pointed to by the specified address could be of the specified type.
850  * This could become significantly more sophisticated, but for now it's pretty
851  * simple:  this is _not_ of the specified type if it's a pointer, and either:
852  *
853  *  (a)	The alignment is not correct given the type that is pointed to.
854  *
855  *  (b)	The memory pointed to is invalid.  Note that structures that have
856  *	uninitialized pointers may cause us to erroneously fail -- but these
857  *	structures are a bug anyway (uninitialized pointers can confuse many
858  *	analysis tools, including ::findleaks).
859  */
860 static int
861 typegraph_couldbe(uintptr_t addr, mdb_ctf_id_t type)
862 {
863 	int rkind;
864 	mdb_ctf_id_t rtype;
865 	uintptr_t val, throwaway;
866 	size_t rsize;
867 	char buf[MDB_SYM_NAMLEN];
868 
869 	if (mdb_ctf_type_kind(type) != CTF_K_POINTER)
870 		return (1);
871 
872 	if (mdb_ctf_type_reference(type, &rtype) == -1)
873 		return (1);
874 
875 	if (!mdb_ctf_type_valid(rtype = typegraph_resolve(rtype)))
876 		return (1);
877 
878 	if (mdb_vread(&val, sizeof (val), addr) == -1) {
879 		/*
880 		 * This is definitely unexpected.  We should not be getting
881 		 * back an error on a node that was successfully read in.
882 		 * Lacking something better to do, we'll print an error
883 		 * and return.
884 		 */
885 		mdb_warn("failed to evaluate pointer type at address %p", addr);
886 		return (1);
887 	}
888 
889 	rkind = mdb_ctf_type_kind(rtype);
890 
891 	if (rkind == CTF_K_STRUCT || rkind == CTF_K_UNION) {
892 		/*
893 		 * If it's a pointer to a structure or union, it must be
894 		 * aligned to sizeof (uintptr_t).
895 		 */
896 		if (val & (sizeof (uintptr_t) - 1)) {
897 			if (tg_verbose) {
898 				mdb_printf("typegraph: pass %d: rejecting "
899 				    "*%p (%p) as %s: misaligned pointer\n",
900 				    tg_pass, addr, val,
901 				    mdb_ctf_type_name(type, buf, sizeof (buf)));
902 			}
903 
904 			return (0);
905 		}
906 	}
907 
908 	rsize = mdb_ctf_type_size(rtype);
909 
910 	if (val == NULL || rsize == 0)
911 		return (1);
912 
913 	/*
914 	 * For our speculative read, we're going to clamp the referenced size
915 	 * at the size of a pointer.
916 	 */
917 	if (rsize > sizeof (uintptr_t))
918 		rsize = sizeof (uintptr_t);
919 
920 	if (mdb_vread(&throwaway, rsize, val) == -1) {
921 		if (tg_verbose) {
922 			mdb_printf("typegraph: pass %d: rejecting *%p (%p) as"
923 			    " %s: bad pointer\n", tg_pass, addr, val,
924 			    mdb_ctf_type_name(type, buf, sizeof (buf)));
925 		}
926 		return (0);
927 	}
928 
929 	return (1);
930 }
931 
932 static int
933 typegraph_nodecmp(const void *l, const void *r)
934 {
935 	tg_node_t *lhs = *(tg_node_t **)l;
936 	tg_node_t *rhs = *(tg_node_t **)r;
937 
938 	if (lhs->tgn_base < rhs->tgn_base)
939 		return (-1);
940 	if (lhs->tgn_base > rhs->tgn_base)
941 		return (1);
942 
943 	return (0);
944 }
945 
946 static tg_node_t *
947 typegraph_search(uintptr_t addr)
948 {
949 	ssize_t left = 0, right = tg_nnodes - 1, guess;
950 
951 	while (right >= left) {
952 		guess = (right + left) >> 1;
953 
954 		if (addr < tg_sorted[guess]->tgn_base) {
955 			right = guess - 1;
956 			continue;
957 		}
958 
959 		if (addr >= tg_sorted[guess]->tgn_limit) {
960 			left = guess + 1;
961 			continue;
962 		}
963 
964 		return (tg_sorted[guess]);
965 	}
966 
967 	return (NULL);
968 }
969 
970 static int
971 typegraph_interested(const kmem_cache_t *c)
972 {
973 	vmem_t vmem;
974 
975 	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
976 		mdb_warn("cannot read arena %p for cache '%s'",
977 		    (uintptr_t)c->cache_arena, c->cache_name);
978 		return (0);
979 	}
980 
981 	/*
982 	 * If this cache isn't allocating from the kmem_default or the
983 	 * kmem_firewall vmem arena, we're not interested.
984 	 */
985 	if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
986 	    strcmp(vmem.vm_name, "kmem_firewall") != 0)
987 		return (0);
988 
989 	return (1);
990 }
991 
992 static int
993 typegraph_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
994 {
995 	if (!typegraph_interested(c))
996 		return (WALK_NEXT);
997 
998 	*est += kmem_estimate_allocated(addr, c);
999 
1000 	return (WALK_NEXT);
1001 }
1002 
1003 static int
1004 typegraph_estimate_modctl(uintptr_t addr, const struct modctl *m, size_t *est)
1005 {
1006 	struct module mod;
1007 
1008 	if (m->mod_mp == NULL)
1009 		return (WALK_NEXT);
1010 
1011 	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
1012 		mdb_warn("couldn't read modctl %p's module", addr);
1013 		return (WALK_NEXT);
1014 	}
1015 
1016 	(*est) += mod.nsyms;
1017 
1018 	return (WALK_NEXT);
1019 }
1020 
1021 /*ARGSUSED*/
1022 static int
1023 typegraph_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
1024 {
1025 	if (strcmp(vmem->vm_name, "kmem_oversize") != 0)
1026 		return (WALK_NEXT);
1027 
1028 	*est += (size_t)(vmem->vm_kstat.vk_alloc.value.ui64 -
1029 	    vmem->vm_kstat.vk_free.value.ui64);
1030 
1031 	return (WALK_NEXT);
1032 }
1033 
1034 /*ARGSUSED*/
1035 static int
1036 typegraph_buf(uintptr_t addr, void *ignored, tg_nodedata_t *tgd)
1037 {
1038 	tg_node_t *node = tgd->tgd_next++;
1039 	uintptr_t limit = addr + tgd->tgd_size;
1040 
1041 	node->tgn_base = addr;
1042 	node->tgn_limit = limit;
1043 
1044 	return (WALK_NEXT);
1045 }
1046 
1047 /*ARGSUSED*/
1048 static int
1049 typegraph_kmem(uintptr_t addr, const kmem_cache_t *c, tg_node_t **tgp)
1050 {
1051 	tg_node_t *node = *tgp;
1052 	tg_nodedata_t tgd;
1053 	mdb_ctf_id_t type;
1054 	int i, smaller;
1055 
1056 	mdb_ctf_type_invalidate(&type);
1057 
1058 	if (!typegraph_interested(c))
1059 		return (WALK_NEXT);
1060 
1061 	tgd.tgd_size = c->cache_bufsize;
1062 	tgd.tgd_next = *tgp;
1063 
1064 	if (mdb_pwalk("kmem", (mdb_walk_cb_t)typegraph_buf, &tgd, addr) == -1) {
1065 		mdb_warn("can't walk kmem for cache %p (%s)", addr,
1066 		    c->cache_name);
1067 		return (WALK_DONE);
1068 	}
1069 
1070 	*tgp = tgd.tgd_next;
1071 
1072 	for (i = 0; tg_cachetab[i].tgc_name != NULL; i++) {
1073 		if (strcmp(tg_cachetab[i].tgc_name, c->cache_name) != 0)
1074 			continue;
1075 
1076 		if (mdb_ctf_lookup_by_name(tg_cachetab[i].tgc_type,
1077 		    &type) == -1) {
1078 			mdb_warn("could not find type '%s', allegedly type "
1079 			    "for cache %s", tg_cachetab[i].tgc_type,
1080 			    c->cache_name);
1081 			break;
1082 		}
1083 
1084 		break;
1085 	}
1086 
1087 	/*
1088 	 * If this is a named cache (i.e., not from a kmem_alloc_[n] cache),
1089 	 * the nextsize is 0.
1090 	 */
1091 	if (strncmp(c->cache_name, "kmem_alloc_", strlen("kmem_alloc_")) == 0) {
1092 		GElf_Sym sym;
1093 
1094 		if (tg_sizes == NULL) {
1095 			if (mdb_lookup_by_name("kmem_alloc_sizes",
1096 			    &sym) == -1) {
1097 				mdb_warn("failed to find 'kmem_alloc_sizes'");
1098 				return (WALK_ERR);
1099 			}
1100 
1101 			tg_sizes = mdb_zalloc(sym.st_size, UM_SLEEP);
1102 			tg_nsizes = sym.st_size / sizeof (int);
1103 
1104 			if (mdb_vread(tg_sizes, sym.st_size,
1105 			    (uintptr_t)sym.st_value) == -1) {
1106 				mdb_warn("failed to read kmem_alloc_sizes");
1107 				return (WALK_ERR);
1108 			}
1109 		}
1110 
1111 		/*
1112 		 * Yes, this is a linear search -- but we're talking about
1113 		 * a pretty small array (38 elements as of this writing), and
1114 		 * only executed a handful of times (for each sized kmem
1115 		 * cache).
1116 		 */
1117 		for (i = 0; i < tg_nsizes; i++) {
1118 			if (tg_sizes[i] == c->cache_bufsize)
1119 				break;
1120 		}
1121 
1122 		if (i == tg_nsizes) {
1123 			/*
1124 			 * Something is wrong -- this appears to be a sized
1125 			 * kmem cache, but we can't find its size in the
1126 			 * kmem_alloc_sizes array.  Emit a warning and return
1127 			 * failure.
1128 			 */
1129 			mdb_warn("couldn't find buffer size for %s (%d)"
1130 			    " in kmem_alloc_sizes array\n", c->cache_name,
1131 			    c->cache_bufsize);
1132 
1133 			return (WALK_ERR);
1134 		}
1135 
1136 		if (i == 0) {
1137 			smaller = 1;
1138 		} else {
1139 			smaller = tg_sizes[i - 1];
1140 		}
1141 	} else {
1142 		smaller = 0;
1143 	}
1144 
1145 	for (; node < *tgp; node++) {
1146 		node->tgn_type = type;
1147 		node->tgn_smaller = smaller;
1148 	}
1149 
1150 	*tgp = tgd.tgd_next;
1151 
1152 	return (WALK_NEXT);
1153 }
1154 
1155 /*ARGSUSED*/
1156 static int
1157 typegraph_seg(uintptr_t addr, const vmem_seg_t *seg, tg_node_t **tgp)
1158 {
1159 	tg_nodedata_t tgd;
1160 
1161 	tgd.tgd_next = *tgp;
1162 	tgd.tgd_size = seg->vs_end - seg->vs_start;
1163 
1164 	typegraph_buf(seg->vs_start, NULL, &tgd);
1165 
1166 	*tgp = tgd.tgd_next;
1167 	return (WALK_NEXT);
1168 }
1169 
1170 static int
1171 typegraph_vmem(uintptr_t addr, const vmem_t *vmem, tg_node_t **tgp)
1172 {
1173 	if (strcmp(vmem->vm_name, "kmem_oversize") != 0)
1174 		return (WALK_NEXT);
1175 
1176 	if (mdb_pwalk("vmem_alloc",
1177 	    (mdb_walk_cb_t)typegraph_seg, tgp, addr) == -1)
1178 		mdb_warn("can't walk vmem for arena %p", addr);
1179 
1180 	return (WALK_NEXT);
1181 }
1182 
1183 static void
1184 typegraph_build_anchored(uintptr_t addr, size_t size, mdb_ctf_id_t type)
1185 {
1186 	uintptr_t *buf;
1187 	tg_buildstate_t *state = NULL, *new_state, *free = NULL;
1188 	tg_node_t *node, *src;
1189 	tg_edge_t *edge;
1190 	size_t nptrs, ndx;
1191 	uintptr_t min = tg_sorted[0]->tgn_base;
1192 	uintptr_t max = tg_sorted[tg_nnodes - 1]->tgn_limit;
1193 	ssize_t rval;
1194 	int mask = sizeof (uintptr_t) - 1;
1195 
1196 	if (addr == NULL || size < sizeof (uintptr_t))
1197 		return;
1198 
1199 	/*
1200 	 * Add an anchored node.
1201 	 */
1202 	src = &tg_node[tg_nnodes + tg_nanchored++];
1203 	src->tgn_base = addr;
1204 	src->tgn_limit = addr + size;
1205 	src->tgn_type = type;
1206 
1207 push:
1208 	/*
1209 	 * If our address isn't pointer-aligned, we need to align it and
1210 	 * whack the size appropriately.
1211 	 */
1212 	if (addr & mask) {
1213 		if ((mask + 1) - (addr & mask) >= size)
1214 			goto out;
1215 
1216 		size -= (mask + 1) - (addr & mask);
1217 		addr += (mask + 1) - (addr & mask);
1218 	}
1219 
1220 	nptrs = size / sizeof (uintptr_t);
1221 	buf = mdb_alloc(size, UM_SLEEP);
1222 	ndx = 0;
1223 
1224 	if ((rval = mdb_vread(buf, size, addr)) != size) {
1225 		mdb_warn("couldn't read ptr at %p (size %ld); rval is %d",
1226 		    addr, size, rval);
1227 		goto out;
1228 	}
1229 pop:
1230 	for (; ndx < nptrs; ndx++) {
1231 		uintptr_t ptr = buf[ndx];
1232 
1233 		if (ptr < min || ptr >= max)
1234 			continue;
1235 
1236 		if ((node = typegraph_search(ptr)) == NULL)
1237 			continue;
1238 
1239 		/*
1240 		 * We need to record an edge to us.
1241 		 */
1242 		edge = mdb_zalloc(sizeof (tg_edge_t), UM_SLEEP);
1243 
1244 		edge->tge_src = src;
1245 		edge->tge_dest = node;
1246 		edge->tge_nextout = src->tgn_outgoing;
1247 		src->tgn_outgoing = edge;
1248 
1249 		edge->tge_srcoffs += ndx * sizeof (uintptr_t);
1250 		edge->tge_destoffs = ptr - node->tgn_base;
1251 		edge->tge_nextin = node->tgn_incoming;
1252 		node->tgn_incoming = edge;
1253 
1254 		/*
1255 		 * If this node is marked, we don't need to descend.
1256 		 */
1257 		if (node->tgn_marked)
1258 			continue;
1259 
1260 		/*
1261 		 * We need to descend.  To minimize the resource consumption
1262 		 * of type graph construction, we avoid recursing.
1263 		 */
1264 		node->tgn_marked = 1;
1265 
1266 		if (free != NULL) {
1267 			new_state = free;
1268 			free = free->tgbs_next;
1269 		} else {
1270 			new_state =
1271 			    mdb_zalloc(sizeof (tg_buildstate_t), UM_SLEEP);
1272 		}
1273 
1274 		new_state->tgbs_src = src;
1275 		src = node;
1276 
1277 		new_state->tgbs_addr = addr;
1278 		addr = node->tgn_base;
1279 		size = node->tgn_limit - addr;
1280 
1281 		new_state->tgbs_next = state;
1282 		new_state->tgbs_buf = buf;
1283 		new_state->tgbs_ndx = ndx + 1;
1284 		new_state->tgbs_nptrs = nptrs;
1285 		state = new_state;
1286 		goto push;
1287 	}
1288 
1289 	/*
1290 	 * If we're here, then we have completed this region.  We need to
1291 	 * free our buffer, and update our "resident" counter accordingly.
1292 	 */
1293 	mdb_free(buf, size);
1294 
1295 out:
1296 	/*
1297 	 * If we have pushed state, we need to pop it.
1298 	 */
1299 	if (state != NULL) {
1300 		buf = state->tgbs_buf;
1301 		ndx = state->tgbs_ndx;
1302 		src = state->tgbs_src;
1303 		nptrs = state->tgbs_nptrs;
1304 		addr = state->tgbs_addr;
1305 
1306 		size = nptrs * sizeof (uintptr_t);
1307 
1308 		new_state = state->tgbs_next;
1309 		state->tgbs_next = free;
1310 		free = state;
1311 		state = new_state;
1312 
1313 		goto pop;
1314 	}
1315 
1316 	while (free != NULL) {
1317 		state = free;
1318 		free = free->tgbs_next;
1319 		mdb_free(state, sizeof (tg_buildstate_t));
1320 	}
1321 }
1322 
1323 static void
1324 typegraph_build(uintptr_t addr, size_t size)
1325 {
1326 	uintptr_t limit = addr + size;
1327 	char name[MDB_SYM_NAMLEN];
1328 	GElf_Sym sym;
1329 	mdb_ctf_id_t type;
1330 
1331 	do {
1332 		if (mdb_lookup_by_addr(addr, MDB_SYM_EXACT, name,
1333 		    sizeof (name), &sym) == -1) {
1334 			addr++;
1335 			continue;
1336 		}
1337 
1338 		if (sym.st_size == 0) {
1339 			addr++;
1340 			continue;
1341 		}
1342 
1343 		if (strcmp(name, "kstat_initial") == 0) {
1344 			/*
1345 			 * Yes, this is a kludge.  "kstat_initial" ends up
1346 			 * backing the kstat vmem arena -- so we don't want
1347 			 * to include it as an anchor node.
1348 			 */
1349 			addr += sym.st_size;
1350 			continue;
1351 		}
1352 
1353 		/*
1354 		 * We have the symbol; now get its type.
1355 		 */
1356 		if (mdb_ctf_lookup_by_addr(addr, &type) == -1) {
1357 			addr += sym.st_size;
1358 			continue;
1359 		}
1360 
1361 		if (!mdb_ctf_type_valid(type)) {
1362 			addr += sym.st_size;
1363 			continue;
1364 		}
1365 
1366 		if (!mdb_ctf_type_valid(type = typegraph_resolve(type))) {
1367 			addr += sym.st_size;
1368 			continue;
1369 		}
1370 
1371 		typegraph_build_anchored(addr, (size_t)sym.st_size, type);
1372 		addr += sym.st_size;
1373 	} while (addr < limit);
1374 }
1375 
1376 /*ARGSUSED*/
1377 static int
1378 typegraph_thread(uintptr_t addr, const kthread_t *t, mdb_ctf_id_t *type)
1379 {
1380 	/*
1381 	 * If this thread isn't already a node, add it as an anchor.  And
1382 	 * regardless, set its type to be the specified type.
1383 	 */
1384 	tg_node_t *node;
1385 
1386 	if ((node = typegraph_search(addr)) == NULL) {
1387 		typegraph_build_anchored(addr, mdb_ctf_type_size(*type), *type);
1388 	} else {
1389 		node->tgn_type = *type;
1390 	}
1391 
1392 	return (WALK_NEXT);
1393 }
1394 
1395 /*ARGSUSED*/
1396 static int
1397 typegraph_kstat(uintptr_t addr, const vmem_seg_t *seg, mdb_ctf_id_t *type)
1398 {
1399 	size_t size = mdb_ctf_type_size(*type);
1400 
1401 	typegraph_build_anchored(seg->vs_start, size, *type);
1402 
1403 	return (WALK_NEXT);
1404 }
1405 
1406 static void
1407 typegraph_node_addtype(tg_node_t *node, tg_edge_t *edge, mdb_ctf_id_t rtype,
1408     const char *rmember, size_t roffs, mdb_ctf_id_t utype, mdb_ctf_id_t type)
1409 {
1410 	tg_type_t *tp;
1411 	tg_type_t **list;
1412 
1413 	if (edge->tge_destoffs == 0) {
1414 		list = &node->tgn_typelist;
1415 	} else {
1416 		list = &node->tgn_fraglist;
1417 	}
1418 
1419 	/*
1420 	 * First, search for this type in the type list.
1421 	 */
1422 	for (tp = *list; tp != NULL; tp = tp->tgt_next) {
1423 		if (mdb_ctf_type_cmp(tp->tgt_type, type) == 0)
1424 			return;
1425 	}
1426 
1427 	tp = mdb_zalloc(sizeof (tg_type_t), UM_SLEEP);
1428 	tp->tgt_next = *list;
1429 	tp->tgt_type = type;
1430 	tp->tgt_rtype = rtype;
1431 	tp->tgt_utype = utype;
1432 	tp->tgt_redge = edge;
1433 	tp->tgt_roffs = roffs;
1434 	tp->tgt_rmember = rmember;
1435 	*list = tp;
1436 
1437 	tg_improved = 1;
1438 }
1439 
1440 static void
1441 typegraph_stats_node(tg_node_t *node, tg_stats_t *stats)
1442 {
1443 	tg_edge_t *e;
1444 
1445 	stats->tgs_nodes++;
1446 
1447 	if (!node->tgn_marked)
1448 		stats->tgs_unmarked++;
1449 
1450 	if (mdb_ctf_type_valid(node->tgn_type)) {
1451 		stats->tgs_known++;
1452 		return;
1453 	}
1454 
1455 	if (node->tgn_typelist != NULL) {
1456 		stats->tgs_typed++;
1457 
1458 		if (node->tgn_typelist->tgt_next)
1459 			stats->tgs_conflicts++;
1460 
1461 		return;
1462 	}
1463 
1464 	if (node->tgn_fraglist != NULL) {
1465 		stats->tgs_frag++;
1466 		return;
1467 	}
1468 
1469 	/*
1470 	 * This node is not typed -- but check if any of its outgoing edges
1471 	 * were successfully typed; such nodes represent candidates for
1472 	 * an exhaustive type search.
1473 	 */
1474 	for (e = node->tgn_outgoing; e != NULL; e = e->tge_nextout) {
1475 		if (e->tge_dest->tgn_typelist) {
1476 			stats->tgs_candidates++;
1477 			break;
1478 		}
1479 	}
1480 }
1481 
1482 /*ARGSUSED*/
1483 static int
1484 typegraph_stats_buffer(uintptr_t addr, void *ignored, tg_stats_t *stats)
1485 {
1486 	tg_node_t *node;
1487 
1488 	stats->tgs_buffers++;
1489 
1490 	if ((node = typegraph_search(addr)) == NULL) {
1491 		return (WALK_NEXT);
1492 	}
1493 
1494 	typegraph_stats_node(node, stats);
1495 
1496 	return (WALK_NEXT);
1497 }
1498 
1499 /*ARGSUSED*/
1500 void
1501 typegraph_stat_print(char *name, size_t stat)
1502 {
1503 	mdb_printf("typegraph: ");
1504 
1505 	if (name == NULL) {
1506 		mdb_printf("\n");
1507 		return;
1508 	}
1509 
1510 	mdb_printf("%30s => %ld\n", name, stat);
1511 }
1512 
1513 static void
1514 typegraph_stat_str(char *name, char *str)
1515 {
1516 	mdb_printf("typegraph: %30s => %s\n", name, str);
1517 }
1518 
1519 static void
1520 typegraph_stat_perc(char *name, size_t stat, size_t total)
1521 {
1522 	int perc = (stat * 100) / total;
1523 	int tenths = ((stat * 1000) / total) % 10;
1524 
1525 	mdb_printf("typegraph: %30s => %-13ld (%2d.%1d%%)\n", name, stat,
1526 	    perc, tenths);
1527 }
1528 
1529 static void
1530 typegraph_stat_time(int last)
1531 {
1532 	static hrtime_t ts;
1533 	hrtime_t pass;
1534 
1535 	if (ts == 0) {
1536 		pass = (ts = gethrtime()) - tg_start;
1537 	} else {
1538 		hrtime_t now = gethrtime();
1539 
1540 		pass = now - ts;
1541 		ts = now;
1542 	}
1543 
1544 	mdb_printf("typegraph: %30s => %lld seconds\n",
1545 	    "time elapsed, this pass", pass / NANOSEC);
1546 	mdb_printf("typegraph: %30s => %lld seconds\n",
1547 	    "time elapsed, total", (ts - tg_start) / NANOSEC);
1548 	mdb_printf("typegraph:\n");
1549 
1550 	if (last)
1551 		ts = 0;
1552 }
1553 
1554 static void
1555 typegraph_stats(void)
1556 {
1557 	size_t i, n;
1558 	tg_stats_t stats;
1559 
1560 	bzero(&stats, sizeof (stats));
1561 
1562 	for (i = 0; i < tg_nnodes - tg_nanchored; i++)
1563 		typegraph_stats_node(&tg_node[i], &stats);
1564 
1565 	n = stats.tgs_nodes;
1566 
1567 	typegraph_stat_print("pass", tg_pass);
1568 	typegraph_stat_print("nodes", n);
1569 	typegraph_stat_perc("unmarked", stats.tgs_unmarked, n);
1570 	typegraph_stat_perc("known", stats.tgs_known, n);
1571 	typegraph_stat_perc("conjectured", stats.tgs_typed, n);
1572 	typegraph_stat_perc("conjectured fragments", stats.tgs_frag, n);
1573 	typegraph_stat_perc("known or conjectured",
1574 	    stats.tgs_known + stats.tgs_typed + stats.tgs_frag, n);
1575 	typegraph_stat_print("conflicts", stats.tgs_conflicts);
1576 	typegraph_stat_print("candidates", stats.tgs_candidates);
1577 	typegraph_stat_time(0);
1578 }
1579 
1580 /*
1581  * This is called both in pass1 and in subsequent passes (to propagate new type
1582  * inferences).
1583  */
1584 static void
1585 typegraph_pass1_node(tg_node_t *node, mdb_ctf_id_t type)
1586 {
1587 	tg_todo_t *first = NULL, *last = NULL, *free = NULL, *this = NULL;
1588 	tg_todo_t *todo;
1589 	tg_edge_t *e;
1590 	uintptr_t offs = 0;
1591 	size_t size;
1592 	const char *member;
1593 
1594 	if (!mdb_ctf_type_valid(type))
1595 		return;
1596 again:
1597 	/*
1598 	 * For each of the nodes corresponding to our outgoing edges,
1599 	 * determine their type.
1600 	 */
1601 	size = typegraph_size(type);
1602 
1603 	for (e = node->tgn_outgoing; e != NULL; e = e->tge_nextout) {
1604 		mdb_ctf_id_t ntype, rtype;
1605 		size_t nsize;
1606 		int kind;
1607 
1608 		/*
1609 		 * If we're being called in pass1, we're very conservative:
1610 		 *
1611 		 * (a)	If the outgoing edge is beyond the size of the type
1612 		 *	(and the current node is not the root), we refuse to
1613 		 *	descend.  This situation isn't actually hopeless -- we
1614 		 *	could be looking at array of the projected type -- but
1615 		 *	we'll allow a later phase to pass in that node and its
1616 		 *	conjectured type as the root.
1617 		 *
1618 		 * (b)	If the outgoing edge has a destination offset of
1619 		 *	something other than 0, we'll descend, but we won't
1620 		 *	add the type to the type list of the destination node.
1621 		 *	This allows us to gather information that can later be
1622 		 *	used to perform a more constrained search.
1623 		 */
1624 		if (tg_pass == TG_PASS1 && e->tge_srcoffs - offs > size)
1625 			continue;
1626 
1627 		if (offs >= typegraph_size(type))
1628 			continue;
1629 
1630 		if (e->tge_srcoffs < offs)
1631 			continue;
1632 
1633 		if (e->tge_marked)
1634 			continue;
1635 
1636 		ntype = typegraph_type_offset(type,
1637 		    e->tge_srcoffs - offs, e, &member);
1638 
1639 		if (!mdb_ctf_type_valid(ntype))
1640 			continue;
1641 
1642 		if ((kind = mdb_ctf_type_kind(ntype)) != CTF_K_POINTER)
1643 			continue;
1644 
1645 		if (mdb_ctf_type_reference(ntype, &rtype) == -1)
1646 			continue;
1647 
1648 		if (!mdb_ctf_type_valid(ntype = typegraph_resolve(rtype)))
1649 			continue;
1650 
1651 		kind = mdb_ctf_type_kind(ntype);
1652 		nsize = mdb_ctf_type_size(ntype);
1653 
1654 		if (nsize > TG_NODE_SIZE(e->tge_dest) - e->tge_destoffs)
1655 			continue;
1656 
1657 		typegraph_node_addtype(e->tge_dest, e, type,
1658 		    member, e->tge_srcoffs - offs, rtype, ntype);
1659 
1660 		if (e->tge_dest->tgn_marked)
1661 			continue;
1662 
1663 		/*
1664 		 * If our destination offset is 0 and the type that we marked
1665 		 * it with is useful, mark the node that we're
1666 		 * going to visit.  And regardless, mark the edge.
1667 		 */
1668 		if (e->tge_destoffs == 0 && kind == CTF_K_STRUCT)
1669 			e->tge_dest->tgn_marked = 1;
1670 
1671 		e->tge_marked = 1;
1672 
1673 		/*
1674 		 * If this isn't a structure, it's pointless to descend.
1675 		 */
1676 		if (kind != CTF_K_STRUCT)
1677 			continue;
1678 
1679 		if (nsize <= TG_NODE_SIZE(e->tge_dest) / 2) {
1680 			tg_node_t *dest = e->tge_dest;
1681 
1682 			/*
1683 			 * If the conjectured type is less than half of the
1684 			 * size of the object, we might be dealing with a
1685 			 * polymorphic type.  It's dangerous to descend in
1686 			 * this case -- if our conjectured type is larger than
1687 			 * the actual type, we will mispropagate.  (See the
1688 			 * description for phenomenon (c) in the block comment
1689 			 * for how this condition can arise.)  We therefore
1690 			 * only descend if we are in pass4 and there is only
1691 			 * one inference for this node.
1692 			 */
1693 			if (tg_pass < TG_PASS4)
1694 				continue;
1695 
1696 			if (dest->tgn_typelist == NULL ||
1697 			    dest->tgn_typelist->tgt_next != NULL) {
1698 				/*
1699 				 * There is either no inference for this node,
1700 				 * or more than one -- in either case, chicken
1701 				 * out.
1702 				 */
1703 				continue;
1704 			}
1705 		}
1706 
1707 		if (free != NULL) {
1708 			todo = free;
1709 			free = free->tgtd_next;
1710 		} else {
1711 			todo = mdb_alloc(sizeof (tg_todo_t), UM_SLEEP);
1712 		}
1713 
1714 		todo->tgtd_node = e->tge_dest;
1715 		todo->tgtd_type = ntype;
1716 		todo->tgtd_offs = e->tge_destoffs;
1717 		todo->tgtd_next = NULL;
1718 
1719 		if (last == NULL) {
1720 			first = last = todo;
1721 		} else {
1722 			last->tgtd_next = todo;
1723 			last = todo;
1724 		}
1725 	}
1726 
1727 	/*
1728 	 * If this was from a to-do list, it needs to be freed.
1729 	 */
1730 	if (this != NULL) {
1731 		this->tgtd_next = free;
1732 		free = this;
1733 	}
1734 
1735 	/*
1736 	 * Now peel something off of the to-do list.
1737 	 */
1738 	if (first != NULL) {
1739 		this = first;
1740 		first = first->tgtd_next;
1741 		if (first == NULL)
1742 			last = NULL;
1743 
1744 		node = this->tgtd_node;
1745 		offs = this->tgtd_offs;
1746 		type = this->tgtd_type;
1747 		goto again;
1748 	}
1749 
1750 	/*
1751 	 * Nothing more to do -- free the to-do list.
1752 	 */
1753 	while (free != NULL) {
1754 		this = free->tgtd_next;
1755 		mdb_free(free, sizeof (tg_todo_t));
1756 		free = this;
1757 	}
1758 }
1759 
1760 static void
1761 typegraph_pass1(void)
1762 {
1763 	int i;
1764 
1765 	tg_pass = TG_PASS1;
1766 	for (i = 0; i < tg_nnodes; i++)
1767 		typegraph_pass1_node(&tg_node[i], tg_node[i].tgn_type);
1768 }
1769 
1770 static void
1771 typegraph_pass2_node(tg_node_t *node)
1772 {
1773 	mdb_ctf_id_t type, ntype;
1774 	size_t tsize, nsize, rem, offs, limit;
1775 	uintptr_t base, addr;
1776 	int fam, kind;
1777 	tg_type_t *tp, *found = NULL;
1778 
1779 	if (mdb_ctf_type_valid(node->tgn_type))
1780 		return;
1781 
1782 	for (tp = node->tgn_typelist; tp != NULL; tp = tp->tgt_next) {
1783 		if ((kind = mdb_ctf_type_kind(tp->tgt_type)) == CTF_K_UNION) {
1784 			/*
1785 			 * Fucking unions...
1786 			 */
1787 			found = NULL;
1788 			break;
1789 		}
1790 
1791 		if (kind == CTF_K_POINTER || kind == CTF_K_STRUCT) {
1792 			if (found != NULL) {
1793 				/*
1794 				 * There's more than one interpretation for
1795 				 * this structure; for now, we punt.
1796 				 */
1797 				found = NULL;
1798 				break;
1799 			}
1800 			found = tp;
1801 		}
1802 	}
1803 
1804 	if (found == NULL ||
1805 	    (found->tgt_flags & (TG_TYPE_ARRAY | TG_TYPE_NOTARRAY)))
1806 		return;
1807 
1808 	fam = typegraph_hasfam(type = found->tgt_type, &ntype);
1809 
1810 	if (fam) {
1811 		/*
1812 		 * If this structure has a flexible array member, and the
1813 		 * FAM type isn't a struct or pointer, we're going to treat
1814 		 * it as if it did not have a FAM.
1815 		 */
1816 		kind = mdb_ctf_type_kind(ntype);
1817 
1818 		if (kind != CTF_K_POINTER && kind != CTF_K_STRUCT)
1819 			fam = 0;
1820 	}
1821 
1822 	tsize = typegraph_size(type);
1823 	nsize = TG_NODE_SIZE(node);
1824 
1825 	if (!fam) {
1826 		/*
1827 		 * If this doesn't have a flexible array member, and our
1828 		 * preferred type is greater than half the size of the node, we
1829 		 * won't try to treat it as an array.
1830 		 */
1831 		if (tsize > nsize / 2)
1832 			return;
1833 
1834 		if ((rem = (nsize % tsize)) != 0) {
1835 			/*
1836 			 * If the next-smaller cache size is zero, we were
1837 			 * expecting the type size to evenly divide the node
1838 			 * size -- we must not have the right type.
1839 			 */
1840 			if (node->tgn_smaller == 0)
1841 				return;
1842 
1843 			if (nsize - rem <= node->tgn_smaller) {
1844 				/*
1845 				 * If this were really an array of this type,
1846 				 * we would have allocated it out of a smaller
1847 				 * cache -- it's either not an array (e.g.,
1848 				 * it's a bigger, unknown structure) or it's an
1849 				 * array of some other type.  In either case,
1850 				 * we punt.
1851 				 */
1852 				return;
1853 			}
1854 		}
1855 	}
1856 
1857 	/*
1858 	 * So far, this looks like it might be an array.
1859 	 */
1860 	if (node->tgn_smaller != 0) {
1861 		limit = node->tgn_smaller;
1862 	} else {
1863 		limit = TG_NODE_SIZE(node);
1864 	}
1865 
1866 	base = node->tgn_base;
1867 
1868 	if (fam) {
1869 		found->tgt_flags |= TG_TYPE_HASFAM;
1870 
1871 		for (offs = 0; offs < limit; ) {
1872 			ntype = typegraph_type_offset(type, offs, NULL, NULL);
1873 
1874 			if (!mdb_ctf_type_valid(ntype)) {
1875 				offs++;
1876 				continue;
1877 			}
1878 
1879 			if (!typegraph_couldbe(base + offs, ntype)) {
1880 				found->tgt_flags |= TG_TYPE_NOTARRAY;
1881 				return;
1882 			}
1883 
1884 			offs += mdb_ctf_type_size(ntype);
1885 		}
1886 	} else {
1887 		for (offs = 0; offs < tsize; ) {
1888 			ntype = typegraph_type_offset(type, offs, NULL, NULL);
1889 
1890 			if (!mdb_ctf_type_valid(ntype)) {
1891 				offs++;
1892 				continue;
1893 			}
1894 
1895 			for (addr = base + offs; addr < base + limit;
1896 			    addr += tsize) {
1897 				if (typegraph_couldbe(addr, ntype))
1898 					continue;
1899 
1900 				found->tgt_flags |= TG_TYPE_NOTARRAY;
1901 				return;
1902 			}
1903 
1904 			offs += mdb_ctf_type_size(ntype);
1905 			continue;
1906 		}
1907 	}
1908 
1909 	/*
1910 	 * Now mark this type as an array, and reattempt pass1 from this node.
1911 	 */
1912 	found->tgt_flags |= TG_TYPE_ARRAY;
1913 	typegraph_pass1_node(node, type);
1914 }
1915 
1916 static void
1917 typegraph_pass2(void)
1918 {
1919 	int i;
1920 
1921 	tg_pass = TG_PASS2;
1922 	do {
1923 		tg_improved = 0;
1924 
1925 		for (i = 0; i < tg_nnodes; i++)
1926 			typegraph_pass2_node(&tg_node[i]);
1927 	} while (tg_improved);
1928 }
1929 
1930 static void
1931 typegraph_pass3(void)
1932 {
1933 	tg_node_t *node;
1934 	tg_type_t *tp;
1935 	size_t i;
1936 	uintptr_t loffs;
1937 
1938 	tg_pass = TG_PASS3;
1939 	loffs = offsetof(tg_node_t, tgn_typelist);
1940 
1941 again:
1942 	/*
1943 	 * In this pass, we're going to coalesce types.  We're looking for
1944 	 * nodes where one possible type is a structure, and another is
1945 	 * either a CTF_K_INTEGER variant (e.g. "char", "void") or a
1946 	 * CTF_K_FORWARD (an opaque forward definition).
1947 	 *
1948 	 * N.B.  It might appear to be beneficial to coalesce types when
1949 	 * the possibilities include two structures, and one is contained
1950 	 * within the other (e.g., "door_node" contains a "vnode" as its
1951 	 * first member; "vnode" could be smooshed, leaving just "door_node").
1952 	 * This optimization is overly aggressive, however:  there are too
1953 	 * many places where we pull stunts with structures such that they're
1954 	 * actually polymorphic (e.g., "nc_cache" and "ncache").  Performing
1955 	 * this optimization would run the risk of false propagation --
1956 	 * which we want to avoid if at all possible.
1957 	 */
1958 	for (i = 0; i < tg_nnodes; i++) {
1959 		tg_type_t **list;
1960 
1961 		list = (tg_type_t **)((uintptr_t)(node = &tg_node[i]) + loffs);
1962 
1963 		if (mdb_ctf_type_valid(node->tgn_type))
1964 			continue;
1965 
1966 		if (*list == NULL)
1967 			continue;
1968 
1969 		/*
1970 		 * First, scan for type CTF_K_STRUCT.  If we find it, eliminate
1971 		 * everything that's a CTF_K_INTEGER or CTF_K_FORWARD.
1972 		 */
1973 		for (tp = *list; tp != NULL; tp = tp->tgt_next) {
1974 			if (mdb_ctf_type_kind(tp->tgt_type) == CTF_K_STRUCT)
1975 				break;
1976 		}
1977 
1978 		if (tp != NULL) {
1979 			tg_type_t *prev = NULL, *next;
1980 
1981 			for (tp = *list; tp != NULL; tp = next) {
1982 				int kind = mdb_ctf_type_kind(tp->tgt_type);
1983 
1984 				next = tp->tgt_next;
1985 
1986 				if (kind == CTF_K_INTEGER ||
1987 				    kind == CTF_K_FORWARD) {
1988 					if (prev == NULL) {
1989 						*list = next;
1990 					} else {
1991 						prev->tgt_next = next;
1992 					}
1993 
1994 					mdb_free(tp, sizeof (tg_type_t));
1995 				} else {
1996 					prev = tp;
1997 				}
1998 			}
1999 		}
2000 	}
2001 
2002 	if (loffs == offsetof(tg_node_t, tgn_typelist)) {
2003 		loffs = offsetof(tg_node_t, tgn_fraglist);
2004 		goto again;
2005 	}
2006 }
2007 
2008 static void
2009 typegraph_pass4_node(tg_node_t *node)
2010 {
2011 	tg_edge_t *e;
2012 	mdb_ctf_id_t type, ntype;
2013 	tg_node_t *src = NULL;
2014 	int kind;
2015 
2016 	if (mdb_ctf_type_valid(node->tgn_type))
2017 		return;
2018 
2019 	if (node->tgn_typelist != NULL)
2020 		return;
2021 
2022 	mdb_ctf_type_invalidate(&type);
2023 
2024 	/*
2025 	 * Now we want to iterate over all incoming edges.  If we can find an
2026 	 * incoming edge pointing to offset 0 from a node of known or
2027 	 * conjectured type, check the types of the referring node.
2028 	 */
2029 	for (e = node->tgn_incoming; e != NULL; e = e->tge_nextin) {
2030 		tg_node_t *n = e->tge_src;
2031 
2032 		if (e->tge_destoffs != 0)
2033 			continue;
2034 
2035 		if (!mdb_ctf_type_valid(ntype = n->tgn_type)) {
2036 			if (n->tgn_typelist != NULL &&
2037 			    n->tgn_typelist->tgt_next == NULL) {
2038 				ntype = n->tgn_typelist->tgt_type;
2039 			}
2040 
2041 			if (!mdb_ctf_type_valid(ntype))
2042 				continue;
2043 		}
2044 
2045 		kind = mdb_ctf_type_kind(ntype);
2046 
2047 		if (kind != CTF_K_STRUCT && kind != CTF_K_POINTER)
2048 			continue;
2049 
2050 		if (src != NULL && mdb_ctf_type_cmp(type, ntype) != 0) {
2051 			/*
2052 			 * We have two valid, potentially conflicting
2053 			 * interpretations for this node -- chicken out.
2054 			 */
2055 			src = NULL;
2056 			break;
2057 		}
2058 
2059 		src = n;
2060 		type = ntype;
2061 	}
2062 
2063 	if (src != NULL)
2064 		typegraph_pass1_node(src, type);
2065 }
2066 
2067 static void
2068 typegraph_pass4(void)
2069 {
2070 	size_t i, conjectured[2], gen = 0;
2071 
2072 	conjectured[1] = tg_nnodes;
2073 
2074 	tg_pass = TG_PASS4;
2075 	do {
2076 		conjectured[gen] = 0;
2077 
2078 		for (i = 0; i < tg_nnodes; i++) {
2079 			if (tg_node[i].tgn_typelist != NULL)
2080 				conjectured[gen]++;
2081 			typegraph_pass4_node(&tg_node[i]);
2082 		}
2083 
2084 		/*
2085 		 * Perform another pass3 to coalesce any new conflicts.
2086 		 */
2087 		typegraph_pass3();
2088 		tg_pass = TG_PASS4;
2089 		gen ^= 1;
2090 	} while (conjectured[gen ^ 1] < conjectured[gen]);
2091 }
2092 
2093 static void
2094 typegraph_postpass_node(tg_node_t *node)
2095 {
2096 	size_t total = 0;
2097 	tg_edge_t *e, *edge = node->tgn_outgoing;
2098 	tg_poststate_t *free = NULL, *stack = NULL, *state;
2099 	tg_node_t *dest;
2100 
2101 	if (node->tgn_postmarked)
2102 		return;
2103 
2104 push:
2105 	node->tgn_postmarked = 1;
2106 	node->tgn_reach = 0;
2107 
2108 pop:
2109 	for (e = edge; e != NULL; e = e->tge_nextout) {
2110 		dest = e->tge_dest;
2111 
2112 		if (dest->tgn_postmarked)
2113 			continue;
2114 
2115 		/*
2116 		 * Add a new element and descend.
2117 		 */
2118 		if (free == NULL) {
2119 			state = mdb_alloc(sizeof (tg_poststate_t), UM_SLEEP);
2120 		} else {
2121 			state = free;
2122 			free = free->tgps_next;
2123 		}
2124 
2125 		state->tgps_node = node;
2126 		state->tgps_edge = e;
2127 		state->tgps_total = total;
2128 		state->tgps_next = stack;
2129 		stack = state;
2130 
2131 		node = dest;
2132 		edge = dest->tgn_outgoing;
2133 		goto push;
2134 	}
2135 
2136 	if (!mdb_ctf_type_valid(node->tgn_type) &&
2137 	    node->tgn_typelist == NULL && node->tgn_fraglist == NULL) {
2138 		/*
2139 		 * We are an unknown node; our count must reflect this.
2140 		 */
2141 		node->tgn_reach++;
2142 	}
2143 
2144 	/*
2145 	 * Now we need to check for state to pop.
2146 	 */
2147 	if ((state = stack) != NULL) {
2148 		edge = state->tgps_edge;
2149 		node = state->tgps_node;
2150 		total = state->tgps_total;
2151 		dest = edge->tge_dest;
2152 
2153 		stack = state->tgps_next;
2154 		state->tgps_next = free;
2155 		free = state;
2156 
2157 		if (!mdb_ctf_type_valid(dest->tgn_type) &&
2158 		    dest->tgn_typelist == NULL && dest->tgn_fraglist == NULL) {
2159 			/*
2160 			 * We only sum our child's reach into our own if
2161 			 * that child is of unknown type.  This prevents long
2162 			 * chains of non-increasing reach.
2163 			 */
2164 			node->tgn_reach += dest->tgn_reach;
2165 		}
2166 
2167 		edge = edge->tge_nextout;
2168 		goto pop;
2169 	}
2170 
2171 	/*
2172 	 * We need to free our freelist.
2173 	 */
2174 	while (free != NULL) {
2175 		state = free;
2176 		free = free->tgps_next;
2177 		mdb_free(state, sizeof (tg_poststate_t));
2178 	}
2179 }
2180 
2181 static void
2182 typegraph_postpass(void)
2183 {
2184 	int i, max = 0;
2185 	tg_node_t *node, *maxnode = NULL;
2186 	char c[256];
2187 
2188 	for (i = 0; i < tg_nnodes; i++)
2189 		tg_node[i].tgn_postmarked = 0;
2190 
2191 	/*
2192 	 * From those nodes with unknown type and no outgoing edges, we want
2193 	 * to eminate towards the root.
2194 	 */
2195 	for (i = tg_nnodes - tg_nanchored; i < tg_nnodes; i++) {
2196 		node = &tg_node[i];
2197 
2198 		typegraph_postpass_node(node);
2199 	}
2200 
2201 	for (i = 0; i < tg_nnodes - tg_nanchored; i++) {
2202 		node = &tg_node[i];
2203 
2204 		if (mdb_ctf_type_valid(node->tgn_type))
2205 			continue;
2206 
2207 		if (node->tgn_reach < max)
2208 			continue;
2209 
2210 		maxnode = node;
2211 		max = node->tgn_reach;
2212 	}
2213 
2214 	typegraph_stat_str("pass", "post");
2215 
2216 	if (maxnode != NULL) {
2217 		mdb_snprintf(c, sizeof (c), "%p",
2218 		    maxnode->tgn_base, maxnode->tgn_reach);
2219 	} else {
2220 		strcpy(c, "-");
2221 	}
2222 
2223 	typegraph_stat_print("nodes", tg_nnodes - tg_nanchored);
2224 	typegraph_stat_str("greatest unknown node reach", c);
2225 	typegraph_stat_perc("reachable unknown nodes",
2226 	    max, tg_nnodes - tg_nanchored);
2227 	typegraph_stat_time(1);
2228 }
2229 
2230 static void
2231 typegraph_allpass(int first)
2232 {
2233 	size_t i;
2234 	tg_edge_t *e;
2235 
2236 	if (!first)
2237 		tg_start = gethrtime();
2238 
2239 	for (i = 0; i < tg_nnodes; i++) {
2240 		tg_node[i].tgn_marked = 0;
2241 		tg_node[i].tgn_postmarked = 0;
2242 
2243 		for (e = tg_node[i].tgn_incoming; e != NULL; e = e->tge_nextin)
2244 			e->tge_marked = 0;
2245 	}
2246 
2247 	typegraph_pass1();
2248 	typegraph_stats();
2249 	typegraph_pass2();
2250 	typegraph_stats();
2251 	typegraph_pass3();
2252 	typegraph_stats();
2253 	typegraph_pass4();
2254 	typegraph_stats();
2255 	typegraph_postpass();
2256 }
2257 
2258 /*ARGSUSED*/
2259 static int
2260 typegraph_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
2261 {
2262 	struct module mod;
2263 	tg_node_t *node;
2264 	mdb_ctf_id_t type;
2265 
2266 	if (m->mod_mp == NULL)
2267 		return (WALK_NEXT);
2268 
2269 	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
2270 		mdb_warn("couldn't read modctl %p's module", addr);
2271 		return (WALK_NEXT);
2272 	}
2273 
2274 	/*
2275 	 * As long as we're here, we're going to mark the address pointed
2276 	 * to by mod_mp as a "struct module" (mod_mp is defined to be a
2277 	 * void *).  Yes, this is a horrible kludge -- but it's not like
2278 	 * this code isn't already depending on the fact that mod_mp is
2279 	 * actually a pointer to "struct module" (see the mdb_vread(), above).
2280 	 * Without this, we can't identify any of the objects allocated by
2281 	 * krtld.
2282 	 */
2283 	if ((node = typegraph_search((uintptr_t)m->mod_mp)) != NULL) {
2284 		if (mdb_ctf_lookup_by_name("struct module", &type) != -1)
2285 			node->tgn_type = type;
2286 	}
2287 
2288 	typegraph_build((uintptr_t)mod.data, mod.data_size);
2289 	typegraph_build((uintptr_t)mod.bss, mod.bss_size);
2290 
2291 	return (WALK_NEXT);
2292 }
2293 
2294 static void
2295 typegraph_sort(void)
2296 {
2297 	size_t i;
2298 
2299 	if (tg_sorted)
2300 		mdb_free(tg_sorted, tg_nsorted * sizeof (tg_node_t *));
2301 
2302 	tg_nsorted = tg_nnodes;
2303 	tg_sorted = mdb_alloc(tg_nsorted * sizeof (tg_node_t *), UM_SLEEP);
2304 
2305 	for (i = 0; i < tg_nsorted; i++)
2306 		tg_sorted[i] = &tg_node[i];
2307 
2308 	qsort(tg_sorted, tg_nsorted, sizeof (tg_node_t *), typegraph_nodecmp);
2309 }
2310 
2311 static void
2312 typegraph_known_node(uintptr_t addr, const char *typename)
2313 {
2314 	tg_node_t *node;
2315 	mdb_ctf_id_t type;
2316 
2317 	if ((node = typegraph_search(addr)) == NULL) {
2318 		mdb_warn("couldn't find node corresponding to "
2319 		    "%s at %p\n", typename, addr);
2320 		return;
2321 	}
2322 
2323 	if (mdb_ctf_lookup_by_name(typename, &type) == -1) {
2324 		mdb_warn("couldn't find type for '%s'", typename);
2325 		return;
2326 	}
2327 
2328 	node->tgn_type = type;
2329 }
2330 
2331 /*
2332  * There are a few important nodes that are impossible to figure out without
2333  * some carnal knowledge.
2334  */
2335 static void
2336 typegraph_known_nodes(void)
2337 {
2338 	uintptr_t segkp;
2339 
2340 	if (mdb_readvar(&segkp, "segkp") == -1) {
2341 		mdb_warn("couldn't read 'segkp'");
2342 	} else {
2343 		struct seg seg;
2344 
2345 		if (mdb_vread(&seg, sizeof (seg), segkp) == -1) {
2346 			mdb_warn("couldn't read seg at %p", segkp);
2347 		} else {
2348 			typegraph_known_node((uintptr_t)seg.s_data,
2349 			    "struct segkp_segdata");
2350 		}
2351 	}
2352 }
2353 
2354 /*ARGSUSED*/
2355 int
2356 typegraph(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2357 {
2358 	size_t est = 0;
2359 	tg_node_t *tgp;
2360 	kmem_cache_t c;
2361 	tg_stats_t stats;
2362 	mdb_ctf_id_t type;
2363 	int wasbuilt = tg_built;
2364 	uintptr_t kstat_arena;
2365 	uint_t perc;
2366 	int i;
2367 
2368 	if (!mdb_prop_postmortem) {
2369 		mdb_warn("typegraph: can only be run on a system "
2370 		    "dump; see dumpadm(1M)\n");
2371 		return (DCMD_ERR);
2372 	}
2373 
2374 	tg_verbose = 0;
2375 
2376 	if (mdb_getopts(argc, argv,
2377 	    'v', MDB_OPT_SETBITS, TRUE, &tg_verbose, NULL) != argc)
2378 		return (DCMD_USAGE);
2379 
2380 	if (tg_built)
2381 		goto trace;
2382 
2383 	tg_start = gethrtime();
2384 	typegraph_stat_str("pass", "initial");
2385 	typegraph_typetab_init();
2386 
2387 	/*
2388 	 * First, we need an estimate on the number of buffers.
2389 	 */
2390 	if (mdb_walk("kmem_cache",
2391 	    (mdb_walk_cb_t)typegraph_estimate, &est) == -1) {
2392 		mdb_warn("couldn't walk 'kmem_cache'");
2393 		return (DCMD_ERR);
2394 	}
2395 
2396 	if (mdb_walk("modctl",
2397 	    (mdb_walk_cb_t)typegraph_estimate_modctl, &est) == -1) {
2398 		mdb_warn("couldn't walk 'modctl'");
2399 		return (DCMD_ERR);
2400 	}
2401 
2402 	if (mdb_walk("vmem",
2403 	    (mdb_walk_cb_t)typegraph_estimate_vmem, &est) == -1) {
2404 		mdb_warn("couldn't walk 'vmem'");
2405 		return (DCMD_ERR);
2406 	}
2407 
2408 	typegraph_stat_print("maximum nodes", est);
2409 
2410 	tgp = tg_node = mdb_zalloc(sizeof (tg_node_t) * est, UM_SLEEP);
2411 	for (i = 0; i < est; i++)
2412 		mdb_ctf_type_invalidate(&tg_node[i].tgn_type);
2413 
2414 	if (mdb_walk("vmem", (mdb_walk_cb_t)typegraph_vmem, &tgp) == -1) {
2415 		mdb_warn("couldn't walk 'vmem'");
2416 		return (DCMD_ERR);
2417 	}
2418 
2419 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)typegraph_kmem, &tgp) == -1) {
2420 		mdb_warn("couldn't walk 'kmem_cache'");
2421 		return (DCMD_ERR);
2422 	}
2423 
2424 	tg_nnodes = tgp - tg_node;
2425 
2426 	typegraph_stat_print("actual nodes", tg_nnodes);
2427 
2428 	typegraph_sort();
2429 
2430 	if (mdb_ctf_lookup_by_name("kthread_t", &type) == -1) {
2431 		mdb_warn("couldn't find 'kthread_t'");
2432 		return (DCMD_ERR);
2433 	}
2434 
2435 	if (mdb_walk("thread", (mdb_walk_cb_t)typegraph_thread, &type) == -1) {
2436 		mdb_warn("couldn't walk 'thread'");
2437 		return (DCMD_ERR);
2438 	}
2439 
2440 	if (mdb_ctf_lookup_by_name("ekstat_t", &type) == -1) {
2441 		mdb_warn("couldn't find 'ekstat_t'");
2442 		return (DCMD_ERR);
2443 	}
2444 
2445 	if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
2446 		mdb_warn("couldn't read 'kstat_arena'");
2447 		return (DCMD_ERR);
2448 	}
2449 
2450 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)typegraph_kstat,
2451 	    &type, kstat_arena) == -1) {
2452 		mdb_warn("couldn't walk kstat vmem arena");
2453 		return (DCMD_ERR);
2454 	}
2455 
2456 	if (mdb_walk("modctl", (mdb_walk_cb_t)typegraph_modctl, NULL) == -1) {
2457 		mdb_warn("couldn't walk 'modctl'");
2458 		return (DCMD_ERR);
2459 	}
2460 
2461 	typegraph_stat_print("anchored nodes", tg_nanchored);
2462 	tg_nnodes += tg_nanchored;
2463 	typegraph_sort();
2464 	typegraph_known_nodes();
2465 	typegraph_stat_time(0);
2466 	tg_built = 1;
2467 
2468 trace:
2469 	if (!wasbuilt || !(flags & DCMD_ADDRSPEC)) {
2470 		typegraph_allpass(!wasbuilt);
2471 		return (DCMD_OK);
2472 	}
2473 
2474 	bzero(&stats, sizeof (stats));
2475 
2476 	/*
2477 	 * If we've been given an address, it's a kmem cache.
2478 	 */
2479 	if (mdb_vread(&c, sizeof (c), addr) == -1) {
2480 		mdb_warn("couldn't read kmem_cache at %p", addr);
2481 		return (DCMD_ERR);
2482 	}
2483 
2484 	if (mdb_pwalk("kmem",
2485 	    (mdb_walk_cb_t)typegraph_stats_buffer, &stats, addr) == -1) {
2486 		mdb_warn("can't walk kmem for cache %p", addr);
2487 		return (DCMD_ERR);
2488 	}
2489 
2490 	if (DCMD_HDRSPEC(flags))
2491 		mdb_printf("%-25s %7s %7s %7s %7s %7s %7s %5s\n", "NAME",
2492 		    "BUFS", "NODES", "UNMRK", "KNOWN",
2493 		    "INFER", "FRAG", "HIT%");
2494 
2495 	if (stats.tgs_nodes) {
2496 		perc = ((stats.tgs_known + stats.tgs_typed +
2497 		    stats.tgs_frag) * 1000) / stats.tgs_nodes;
2498 	} else {
2499 		perc = 0;
2500 	}
2501 
2502 	mdb_printf("%-25s %7ld %7ld %7ld %7ld %7ld %7ld %3d.%1d\n",
2503 	    c.cache_name, stats.tgs_buffers, stats.tgs_nodes,
2504 	    stats.tgs_unmarked, stats.tgs_known, stats.tgs_typed,
2505 	    stats.tgs_frag, perc / 10, perc % 10);
2506 
2507 	return (DCMD_OK);
2508 }
2509 
2510 int
2511 typegraph_built(void)
2512 {
2513 	if (!tg_built) {
2514 		mdb_warn("type graph not yet built; run ::typegraph.\n");
2515 		return (0);
2516 	}
2517 
2518 	return (1);
2519 }
2520 
2521 int
2522 whattype(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2523 {
2524 	tg_node_t *node;
2525 	tg_edge_t *e;
2526 	char buf[MDB_SYM_NAMLEN];
2527 	tg_type_t *tp;
2528 	int verbose = 0;
2529 
2530 	if (mdb_getopts(argc, argv,
2531 	    'v', MDB_OPT_SETBITS, TRUE, &verbose, NULL) != argc)
2532 		return (DCMD_USAGE);
2533 
2534 	if (!(flags & DCMD_ADDRSPEC))
2535 		return (DCMD_USAGE);
2536 
2537 	if (!typegraph_built())
2538 		return (DCMD_ABORT);
2539 
2540 	if ((node = typegraph_search(addr)) == NULL) {
2541 		mdb_warn("%p does not correspond to a node.\n", addr);
2542 		return (DCMD_OK);
2543 	}
2544 
2545 	if (!verbose) {
2546 		mdb_printf("%p is %p+%p, ", addr, node->tgn_base,
2547 		    addr - node->tgn_base);
2548 
2549 		if (mdb_ctf_type_valid(node->tgn_type)) {
2550 			mdb_printf("%s\n", mdb_ctf_type_name(node->tgn_type,
2551 			    buf, sizeof (buf)));
2552 			return (DCMD_OK);
2553 		}
2554 
2555 		if ((tp = node->tgn_typelist) == NULL) {
2556 			if ((tp = node->tgn_fraglist) == NULL) {
2557 				mdb_printf("unknown type\n");
2558 				return (DCMD_OK);
2559 			}
2560 		}
2561 
2562 		if (tp->tgt_next == NULL && mdb_ctf_type_valid(tp->tgt_type)) {
2563 			int kind = mdb_ctf_type_kind(tp->tgt_type);
2564 			size_t offs = tp->tgt_redge->tge_destoffs;
2565 
2566 			mdb_printf("possibly %s%s ",
2567 			    tp->tgt_flags & TG_TYPE_ARRAY ? "array of " : "",
2568 			    typegraph_type_name(tp->tgt_type, tp->tgt_utype));
2569 
2570 			if (kind != CTF_K_STRUCT && kind != CTF_K_UNION &&
2571 			    mdb_ctf_type_valid(tp->tgt_rtype) &&
2572 			    tp->tgt_rmember != NULL) {
2573 				mdb_printf("(%s.%s) ",
2574 				    mdb_ctf_type_name(tp->tgt_rtype, buf,
2575 				    sizeof (buf)), tp->tgt_rmember);
2576 			}
2577 
2578 			if (offs != 0)
2579 				mdb_printf("at %p", node->tgn_base + offs);
2580 
2581 			mdb_printf("\n");
2582 			return (DCMD_OK);
2583 		}
2584 
2585 		mdb_printf("possibly one of the following:\n");
2586 
2587 		for (; tp != NULL; tp = tp->tgt_next) {
2588 			size_t offs = tp->tgt_redge->tge_destoffs;
2589 
2590 			mdb_printf("  %s%s ",
2591 			    tp->tgt_flags & TG_TYPE_ARRAY ? "array of " : "",
2592 			    typegraph_type_name(tp->tgt_type, tp->tgt_utype));
2593 
2594 			if (offs != 0)
2595 				mdb_printf("at %p ", node->tgn_base + offs);
2596 
2597 			mdb_printf("(from %p+%p, type %s)\n",
2598 			    tp->tgt_redge->tge_src->tgn_base,
2599 			    tp->tgt_redge->tge_srcoffs,
2600 			    mdb_ctf_type_name(tp->tgt_rtype,
2601 			    buf, sizeof (buf)) != NULL ? buf : "<unknown>");
2602 		}
2603 
2604 		mdb_printf("\n");
2605 
2606 		return (DCMD_OK);
2607 	}
2608 
2609 	mdb_printf("%-?s %-?s %-29s %5s %5s %s\n", "BASE", "LIMIT", "TYPE",
2610 	    "SIZE", "REACH", "MRK");
2611 	mdb_printf("%-?p %-?p %-29s %5d %5d %s\n",
2612 	    node->tgn_base, node->tgn_limit,
2613 	    mdb_ctf_type_name(node->tgn_type,
2614 	    buf, sizeof (buf)) != NULL ? buf : "<unknown>",
2615 	    typegraph_size(node->tgn_type), node->tgn_reach,
2616 	    node->tgn_marked ? "yes" : "no");
2617 
2618 	mdb_printf("\n");
2619 	mdb_printf("  %-20s %?s %8s %-20s %s\n",
2620 	    "INFERENCE", "FROM", "SRCOFFS", "REFTYPE", "REFMEMBER");
2621 
2622 	for (tp = node->tgn_typelist; tp != NULL; tp = tp->tgt_next) {
2623 		mdb_printf("  %-20s %?p %8p %-20s %s\n",
2624 		    typegraph_type_name(tp->tgt_type, tp->tgt_utype),
2625 		    tp->tgt_redge->tge_src->tgn_base,
2626 		    tp->tgt_redge->tge_srcoffs,
2627 		    mdb_ctf_type_name(tp->tgt_rtype,
2628 		    buf, sizeof (buf)) != NULL ? buf : "<unknown>",
2629 		    tp->tgt_rmember != NULL ? tp->tgt_rmember : "-");
2630 	}
2631 
2632 	mdb_printf("\n");
2633 	mdb_printf("  %-20s %?s %8s %-20s %s\n",
2634 	    "FRAGMENT", "FROM", "SRCOFFS", "REFTYPE", "REFMEMBER");
2635 
2636 	for (tp = node->tgn_fraglist; tp != NULL; tp = tp->tgt_next) {
2637 		mdb_printf("  %-20s %?p %8p %-20s %s\n",
2638 		    typegraph_type_name(tp->tgt_type, tp->tgt_utype),
2639 		    tp->tgt_redge->tge_src->tgn_base,
2640 		    tp->tgt_redge->tge_srcoffs,
2641 		    mdb_ctf_type_name(tp->tgt_rtype,
2642 		    buf, sizeof (buf)) != NULL ? buf : "<unknown>",
2643 		    tp->tgt_rmember != NULL ? tp->tgt_rmember : "-");
2644 	}
2645 
2646 	mdb_printf("\n");
2647 
2648 	mdb_printf("  %?s %8s %8s %6s %6s %5s\n", "FROM", "SRCOFFS", "DESTOFFS",
2649 	    "MARKED", "STATUS", "REACH");
2650 
2651 	for (e = node->tgn_incoming; e != NULL; e = e->tge_nextin) {
2652 		tg_node_t *n = e->tge_src;
2653 
2654 		mdb_printf("  %?p %8p %8p %6s %6s %ld\n",
2655 		    n->tgn_base, e->tge_srcoffs, e->tge_destoffs,
2656 		    e->tge_marked ? "yes" : "no",
2657 		    mdb_ctf_type_valid(n->tgn_type) ? "known" :
2658 		    n->tgn_typelist != NULL ? "inferd" :
2659 		    n->tgn_fraglist != NULL ? "frgmnt" : "unknwn",
2660 		    n->tgn_reach);
2661 	}
2662 
2663 	mdb_printf("\n  %?s %8s %8s %6s %6s %5s\n", "TO", "SRCOFFS", "DESTOFFS",
2664 	    "MARKED", "STATUS", "REACH");
2665 
2666 	for (e = node->tgn_outgoing; e != NULL; e = e->tge_nextout) {
2667 		tg_node_t *n = e->tge_dest;
2668 
2669 		mdb_printf("  %?p %8p %8p %6s %6s %ld\n",
2670 		    n->tgn_base, e->tge_srcoffs, e->tge_destoffs,
2671 		    e->tge_marked ? "yes" : "no",
2672 		    mdb_ctf_type_valid(n->tgn_type) ? "known" :
2673 		    n->tgn_typelist != NULL ? "inferd" :
2674 		    n->tgn_fraglist != NULL ? "frgmnt" : "unknwn",
2675 		    n->tgn_reach);
2676 	}
2677 
2678 	mdb_printf("\n");
2679 
2680 	return (DCMD_OK);
2681 }
2682 
2683 int
2684 istype(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2685 {
2686 	tg_node_t *node;
2687 	mdb_ctf_id_t type;
2688 
2689 	if (!(flags & DCMD_ADDRSPEC) || argc != 1 ||
2690 	    argv[0].a_type != MDB_TYPE_STRING)
2691 		return (DCMD_USAGE);
2692 
2693 	if (!typegraph_built())
2694 		return (DCMD_ABORT);
2695 
2696 	/*
2697 	 * Determine the node corresponding to the passed address.
2698 	 */
2699 	if ((node = typegraph_search(addr)) == NULL) {
2700 		mdb_warn("%p not found\n", addr);
2701 		return (DCMD_ERR);
2702 	}
2703 
2704 	/*
2705 	 * Now look up the specified type.
2706 	 */
2707 	if (mdb_ctf_lookup_by_name(argv[0].a_un.a_str, &type) == -1) {
2708 		mdb_warn("could not find type %s", argv[0].a_un.a_str);
2709 		return (DCMD_ERR);
2710 	}
2711 
2712 	node->tgn_type = type;
2713 	typegraph_allpass(0);
2714 
2715 	return (DCMD_OK);
2716 }
2717 
2718 /*ARGSUSED*/
2719 int
2720 notype(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
2721 {
2722 	tg_node_t *node;
2723 
2724 	if (!(flags & DCMD_ADDRSPEC) || argc != 0)
2725 		return (DCMD_USAGE);
2726 
2727 	if (!typegraph_built())
2728 		return (DCMD_ABORT);
2729 
2730 	if ((node = typegraph_search(addr)) == NULL) {
2731 		mdb_warn("%p not found\n", addr);
2732 		return (DCMD_ERR);
2733 	}
2734 
2735 	mdb_ctf_type_invalidate(&node->tgn_type);
2736 	typegraph_allpass(0);
2737 
2738 	return (DCMD_OK);
2739 }
2740 
2741 int
2742 typegraph_walk_init(mdb_walk_state_t *wsp)
2743 {
2744 	wsp->walk_data = (void *)0;
2745 	return (WALK_NEXT);
2746 }
2747 
2748 int
2749 typeconflict_walk_step(mdb_walk_state_t *wsp)
2750 {
2751 	size_t ndx;
2752 	tg_node_t *node;
2753 
2754 	for (ndx = (size_t)wsp->walk_data; ndx < tg_nnodes; ndx++) {
2755 		node = &tg_node[ndx];
2756 
2757 		if (mdb_ctf_type_valid(node->tgn_type))
2758 			continue;
2759 
2760 		if (node->tgn_typelist == NULL)
2761 			continue;
2762 
2763 		if (node->tgn_typelist->tgt_next == NULL)
2764 			continue;
2765 
2766 		break;
2767 	}
2768 
2769 	if (ndx == tg_nnodes)
2770 		return (WALK_DONE);
2771 
2772 	wsp->walk_data = (void *)++ndx;
2773 	return (wsp->walk_callback(node->tgn_base, NULL, wsp->walk_cbdata));
2774 }
2775 
2776 int
2777 typeunknown_walk_step(mdb_walk_state_t *wsp)
2778 {
2779 	size_t ndx;
2780 	tg_node_t *node;
2781 
2782 	for (ndx = (size_t)wsp->walk_data; ndx < tg_nnodes; ndx++) {
2783 		node = &tg_node[ndx];
2784 
2785 		if (mdb_ctf_type_valid(node->tgn_type))
2786 			continue;
2787 
2788 		if (node->tgn_typelist != NULL)
2789 			continue;
2790 
2791 		if (node->tgn_fraglist != NULL)
2792 			continue;
2793 
2794 		break;
2795 	}
2796 
2797 	if (ndx == tg_nnodes)
2798 		return (WALK_DONE);
2799 
2800 	wsp->walk_data = (void *)++ndx;
2801 	return (wsp->walk_callback(node->tgn_base, NULL, wsp->walk_cbdata));
2802 }
2803 
2804 #define	FINDLOCKS_DEPTH		32
2805 
2806 typedef struct foundlock {
2807 	uintptr_t	fnd_addr;
2808 	uintptr_t	fnd_owner;
2809 	const char	*fnd_member[FINDLOCKS_DEPTH];
2810 	mdb_ctf_id_t	fnd_parent;
2811 	tg_node_t	*fnd_node;
2812 } foundlock_t;
2813 
2814 typedef struct findlocks {
2815 	uintptr_t	fl_addr;
2816 	uintptr_t	fl_thread;
2817 	size_t		fl_ndx;
2818 	size_t		fl_nlocks;
2819 	foundlock_t	*fl_locks;
2820 	mdb_ctf_id_t	fl_parent;
2821 	tg_node_t	*fl_node;
2822 	const char	*fl_member[FINDLOCKS_DEPTH - 1];
2823 	int		fl_depth;
2824 } findlocks_t;
2825 
2826 /*ARGSUSED*/
2827 static int
2828 findlocks_owner(uintptr_t addr, const void *data, void *owner)
2829 {
2830 	*((uintptr_t *)owner) = addr;
2831 
2832 	return (WALK_NEXT);
2833 }
2834 
2835 static int
2836 findlocks_findmutex(const char *name, mdb_ctf_id_t type, ulong_t offs,
2837     findlocks_t *fl)
2838 {
2839 	static int called = 0;
2840 	static mdb_ctf_id_t mutex;
2841 	static mdb_ctf_id_t thread;
2842 	mdb_ctf_id_t parent = fl->fl_parent;
2843 	uintptr_t addr = fl->fl_addr;
2844 	int kind, depth = fl->fl_depth, i;
2845 	foundlock_t *found;
2846 
2847 	offs /= NBBY;
2848 
2849 	if (!called) {
2850 		if (mdb_ctf_lookup_by_name("kmutex_t", &mutex) == -1) {
2851 			mdb_warn("can't find 'kmutex_t' type");
2852 			return (1);
2853 		}
2854 
2855 		if (!mdb_ctf_type_valid(mutex = typegraph_resolve(mutex))) {
2856 			mdb_warn("can't resolve 'kmutex_t' type");
2857 			return (1);
2858 		}
2859 
2860 		if (mdb_ctf_lookup_by_name("kthread_t", &thread) == -1) {
2861 			mdb_warn("can't find 'kthread_t' type");
2862 			return (1);
2863 		}
2864 
2865 		if (!mdb_ctf_type_valid(thread = typegraph_resolve(thread))) {
2866 			mdb_warn("can't resolve 'kthread_t' type");
2867 			return (1);
2868 		}
2869 
2870 		called = 1;
2871 	}
2872 
2873 	if (!mdb_ctf_type_valid(type))
2874 		return (0);
2875 
2876 	type = typegraph_resolve(type);
2877 	kind = mdb_ctf_type_kind(type);
2878 
2879 	if (!mdb_ctf_type_valid(type))
2880 		return (0);
2881 
2882 	if (kind == CTF_K_ARRAY) {
2883 		mdb_ctf_arinfo_t arr;
2884 		ssize_t size;
2885 
2886 		if (mdb_ctf_array_info(type, &arr) == -1)
2887 			return (0);
2888 
2889 		type = typegraph_resolve(arr.mta_contents);
2890 
2891 		if (!mdb_ctf_type_valid(type))
2892 			return (0);
2893 
2894 		/*
2895 		 * Small optimization:  don't bother running through the array
2896 		 * if we know that we can't process the type.
2897 		 */
2898 		kind = mdb_ctf_type_kind(type);
2899 		size = mdb_ctf_type_size(type);
2900 
2901 		if (kind == CTF_K_POINTER || kind == CTF_K_INTEGER)
2902 			return (0);
2903 
2904 		for (i = 0; i < arr.mta_nelems; i++) {
2905 			fl->fl_addr = addr + offs + (i * size);
2906 			findlocks_findmutex(name, type, 0, fl);
2907 		}
2908 
2909 		fl->fl_addr = addr;
2910 
2911 		return (0);
2912 	}
2913 
2914 	if (kind != CTF_K_STRUCT)
2915 		return (0);
2916 
2917 	if (mdb_ctf_type_cmp(type, mutex) == 0) {
2918 		mdb_ctf_id_t ttype;
2919 		uintptr_t owner = NULL;
2920 		tg_node_t *node;
2921 
2922 		if (mdb_pwalk("mutex_owner",
2923 		    findlocks_owner, &owner, addr + offs) == -1) {
2924 			return (0);
2925 		}
2926 
2927 		/*
2928 		 * Check to see if the owner is a thread.
2929 		 */
2930 		if (owner == NULL || (node = typegraph_search(owner)) == NULL)
2931 			return (0);
2932 
2933 		if (!mdb_ctf_type_valid(node->tgn_type))
2934 			return (0);
2935 
2936 		ttype = typegraph_resolve(node->tgn_type);
2937 
2938 		if (!mdb_ctf_type_valid(ttype))
2939 			return (0);
2940 
2941 		if (mdb_ctf_type_cmp(ttype, thread) != 0)
2942 			return (0);
2943 
2944 		if (fl->fl_thread != NULL && owner != fl->fl_thread)
2945 			return (0);
2946 
2947 		if (fl->fl_ndx >= fl->fl_nlocks) {
2948 			size_t nlocks, osize, size;
2949 			foundlock_t *locks;
2950 
2951 			if ((nlocks = (fl->fl_nlocks << 1)) == 0)
2952 				nlocks = 1;
2953 
2954 			osize = fl->fl_nlocks * sizeof (foundlock_t);
2955 			size = nlocks * sizeof (foundlock_t);
2956 
2957 			locks = mdb_zalloc(size, UM_SLEEP);
2958 
2959 			if (fl->fl_locks) {
2960 				bcopy(fl->fl_locks, locks, osize);
2961 				mdb_free(fl->fl_locks, osize);
2962 			}
2963 
2964 			fl->fl_locks = locks;
2965 			fl->fl_nlocks = nlocks;
2966 		}
2967 
2968 		found = &fl->fl_locks[fl->fl_ndx++];
2969 		found->fnd_addr = (uintptr_t)addr + offs;
2970 		found->fnd_owner = owner;
2971 
2972 		for (i = 0; i < fl->fl_depth; i++)
2973 			found->fnd_member[i] = fl->fl_member[i];
2974 
2975 		found->fnd_member[i] = name;
2976 		found->fnd_parent = fl->fl_parent;
2977 		found->fnd_node = fl->fl_node;
2978 
2979 		return (0);
2980 	}
2981 
2982 	fl->fl_addr = (uintptr_t)addr + offs;
2983 
2984 	if (name == NULL) {
2985 		fl->fl_parent = type;
2986 	} else if (depth < FINDLOCKS_DEPTH - 1) {
2987 		fl->fl_member[depth] = name;
2988 		fl->fl_depth++;
2989 	}
2990 
2991 	mdb_ctf_member_iter(type, (mdb_ctf_member_f *)findlocks_findmutex, fl);
2992 
2993 	fl->fl_addr = addr;
2994 	fl->fl_parent = parent;
2995 	fl->fl_depth = depth;
2996 
2997 	return (0);
2998 }
2999 
3000 static void
3001 findlocks_node(tg_node_t *node, findlocks_t *fl)
3002 {
3003 	mdb_ctf_id_t type = node->tgn_type, ntype;
3004 	int kind;
3005 	tg_type_t *tp, *found = NULL;
3006 
3007 	if (!mdb_ctf_type_valid(type)) {
3008 		mdb_ctf_type_invalidate(&type);
3009 
3010 		for (tp = node->tgn_typelist; tp != NULL; tp = tp->tgt_next) {
3011 			kind = mdb_ctf_type_kind(ntype = tp->tgt_type);
3012 
3013 			if (kind == CTF_K_UNION) {
3014 				/*
3015 				 * Insert disparaging comment about unions here.
3016 				 */
3017 				return;
3018 			}
3019 
3020 			if (kind != CTF_K_STRUCT && kind != CTF_K_ARRAY)
3021 				continue;
3022 
3023 			if (found != NULL) {
3024 				/*
3025 				 * There are multiple interpretations for this
3026 				 * node; we have to punt.
3027 				 */
3028 				return;
3029 			}
3030 
3031 			found = tp;
3032 		}
3033 	}
3034 
3035 	if (found != NULL)
3036 		type = found->tgt_type;
3037 
3038 	fl->fl_parent = type;
3039 	fl->fl_node = node;
3040 
3041 	/*
3042 	 * We have our type.  Now iterate for locks.  Note that we don't yet
3043 	 * deal with locks in flexible array members.
3044 	 */
3045 	if (found != NULL && (found->tgt_flags & TG_TYPE_ARRAY) &&
3046 	    !(found->tgt_flags & TG_TYPE_HASFAM)) {
3047 		uintptr_t base, limit = node->tgn_limit;
3048 		size_t size = mdb_ctf_type_size(found->tgt_type);
3049 
3050 		for (base = node->tgn_base; base < limit; base += size) {
3051 			fl->fl_addr = base;
3052 			findlocks_findmutex(NULL, type, 0, fl);
3053 		}
3054 	} else {
3055 		fl->fl_addr = node->tgn_base;
3056 		findlocks_findmutex(NULL, type, 0, fl);
3057 	}
3058 
3059 	if (mdb_ctf_type_valid(type))
3060 		return;
3061 
3062 	for (tp = node->tgn_fraglist; tp != NULL; tp = tp->tgt_next) {
3063 		kind = mdb_ctf_type_kind(ntype = tp->tgt_type);
3064 
3065 		if (kind != CTF_K_STRUCT && kind != CTF_K_ARRAY)
3066 			continue;
3067 
3068 		fl->fl_addr = node->tgn_base + tp->tgt_redge->tge_destoffs;
3069 		fl->fl_parent = ntype;
3070 		findlocks_findmutex(NULL, ntype, 0, fl);
3071 	}
3072 }
3073 
3074 /*ARGSUSED*/
3075 int
3076 findlocks(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
3077 {
3078 	size_t i, j;
3079 	findlocks_t fl;
3080 
3081 	if (argc != 0)
3082 		return (DCMD_USAGE);
3083 
3084 	if (!typegraph_built())
3085 		return (DCMD_ABORT);
3086 
3087 	if (!(flags & DCMD_ADDRSPEC))
3088 		addr = 0;
3089 
3090 	bzero(&fl, sizeof (fl));
3091 	fl.fl_thread = addr;
3092 
3093 	for (i = 0; i < tg_nnodes; i++) {
3094 		findlocks_node(&tg_node[i], &fl);
3095 	}
3096 
3097 	for (i = 0; i < fl.fl_ndx; i++) {
3098 		foundlock_t *found = &fl.fl_locks[i];
3099 		char buf[MDB_SYM_NAMLEN];
3100 
3101 		if (found->fnd_member[0] != NULL) {
3102 			mdb_printf("%p (%s",
3103 			    found->fnd_addr,
3104 			    mdb_ctf_type_name(found->fnd_parent, buf,
3105 			    sizeof (buf)));
3106 
3107 			for (j = 0; found->fnd_member[j] != NULL; j++)
3108 				mdb_printf(".%s", found->fnd_member[j]);
3109 
3110 			mdb_printf(") is owned by %p\n", found->fnd_owner);
3111 		} else {
3112 			if (found->fnd_node->tgn_incoming == NULL) {
3113 				mdb_printf("%p (%a) is owned by %p\n",
3114 				    found->fnd_addr, found->fnd_addr,
3115 				    found->fnd_owner);
3116 			} else {
3117 				mdb_printf("%p is owned by %p\n",
3118 				    found->fnd_addr, found->fnd_owner);
3119 			}
3120 		}
3121 	}
3122 
3123 	mdb_printf("findlocks: nota bene: %slocks may be held",
3124 	    fl.fl_nlocks ? "other " : "");
3125 
3126 	if (addr == NULL) {
3127 		mdb_printf("\n");
3128 	} else {
3129 		mdb_printf(" by %p\n", addr);
3130 	}
3131 
3132 	if (fl.fl_nlocks)
3133 		mdb_free(fl.fl_locks, fl.fl_nlocks * sizeof (foundlock_t));
3134 
3135 	return (DCMD_OK);
3136 }
3137 
3138 /*
3139  * ::findfalse:  Using type knowledge to detect potential false sharing
3140  *
3141  * In caching SMP systems, memory is kept coherent through bus-based snooping
3142  * protocols.  Under these protocols, only a single cache may have a given line
3143  * of memory in a dirty state.  If a different cache wishes to write to the
3144  * dirty line, the new cache must first read-to-own the dirty line from the
3145  * owning cache.  The size of the line used for coherence (the coherence
3146  * granularity) has an immediate ramification for parallel software:  because
3147  * only one cache may own a line at a given time, one wishes to avoid a
3148  * situation where two or more small, disjoint data structures are both
3149  * (a) contained within a single line and (b) accessed in parallel on disjoint
3150  * CPUs.  This situation -- so-called "false sharing" -- can induce suboptimal
3151  * scalability in otherwise scalable software.
3152  *
3153  * Historically, one has been able to find false sharing only with some
3154  * combination of keen intuition and good luck.  And where false sharing has
3155  * been discovered, it has almost always been after having induced suboptimal
3156  * scaling; one has historically not been able to detect false sharing before
3157  * the fact.
3158  *
3159  * Building on the mechanism for postmortem type information, however, we
3160  * can -- from a system crash dump -- detect the the potentially most egregious
3161  * cases of false sharing.  Specifically, after having run through the type
3162  * identification passes described above, we can iterate over all nodes,
3163  * looking for nodes that satisfy the following criteria:
3164  *
3165  *  (a)	The node is an array.  That is, the node was either determined to
3166  * 	be of type CTF_K_ARRAY, or the node was inferred to be an array in
3167  *	pass2 of type identification (described above).
3168  *
3169  *  (b)	Each element of the array is a structure that is smaller than the
3170  *	coherence granularity.
3171  *
3172  *  (c)	The total size of the array is greater than the coherence granularity.
3173  *
3174  *  (d)	Each element of the array is a structure that contains within it a
3175  *	synchronization primitive (mutex, readers/writer lock, condition
3176  *	variable or semaphore).  We use the presence of a synchronization
3177  *	primitive as a crude indicator that the disjoint elements of the
3178  *	array are accessed in parallel.
3179  *
3180  * Any node satisfying these criteria is identified as an object that could
3181  * potentially suffer from false sharing, and the node's address, symbolic
3182  * name (if any), type, type size and total size are provided as output.
3183  *
3184  * While there are some instances of false sharing that do not meet the
3185  * above criteria (e.g., if the synchronization for each element is handled
3186  * in a separate structure, or if the elements are only manipulated with
3187  * atomic memory operations), these criteria yield many examples of false
3188  * sharing without swamping the user with false positives.
3189  */
3190 #define	FINDFALSE_COHERENCE_SIZE	64
3191 
3192 /*ARGSUSED*/
3193 static int
3194 findfalse_findsync(const char *name, mdb_ctf_id_t type, ulong_t offs,
3195     void *ignored)
3196 {
3197 	int i, kind;
3198 	static int called = 0;
3199 	static struct {
3200 		char *name;
3201 		mdb_ctf_id_t type;
3202 	} sync[] = {
3203 		{ "kmutex_t" },
3204 		{ "krwlock_t" },
3205 		{ "kcondvar_t" },
3206 		{ "ksema_t" },
3207 		{ NULL }
3208 	};
3209 
3210 	if (!called) {
3211 		char *name;
3212 
3213 		called = 1;
3214 
3215 		for (i = 0; (name = sync[i].name) != NULL; i++) {
3216 			if (mdb_ctf_lookup_by_name(name, &sync[i].type) == -1) {
3217 				mdb_warn("can't find '%s' type", name);
3218 				return (0);
3219 			}
3220 
3221 			sync[i].type = typegraph_resolve(sync[i].type);
3222 
3223 			if (!mdb_ctf_type_valid(sync[i].type)) {
3224 				mdb_warn("can't resolve '%s' type", name);
3225 				return (0);
3226 			}
3227 		}
3228 	}
3229 
3230 	/*
3231 	 * See if this type is any of the synchronization primitives.
3232 	 */
3233 	if (!mdb_ctf_type_valid(type))
3234 		return (0);
3235 
3236 	type = typegraph_resolve(type);
3237 
3238 	for (i = 0; sync[i].name != NULL; i++) {
3239 		if (mdb_ctf_type_cmp(type, sync[i].type) == 0) {
3240 			/*
3241 			 * We have a winner!
3242 			 */
3243 			return (1);
3244 		}
3245 	}
3246 
3247 	if ((kind = mdb_ctf_type_kind(type)) == CTF_K_ARRAY) {
3248 		mdb_ctf_arinfo_t arr;
3249 
3250 		if (mdb_ctf_array_info(type, &arr) == -1)
3251 			return (0);
3252 
3253 		type = typegraph_resolve(arr.mta_contents);
3254 
3255 		return (findfalse_findsync(name, type, 0, NULL));
3256 	}
3257 
3258 	if (kind != CTF_K_STRUCT)
3259 		return (0);
3260 
3261 	if (mdb_ctf_member_iter(type,
3262 	    (mdb_ctf_member_f *)findfalse_findsync, NULL) != 0)
3263 		return (1);
3264 
3265 	return (0);
3266 }
3267 
3268 static void
3269 findfalse_node(tg_node_t *node)
3270 {
3271 	mdb_ctf_id_t type = node->tgn_type;
3272 	tg_type_t *tp, *found = NULL;
3273 	ssize_t size;
3274 	int kind;
3275 	char buf[MDB_SYM_NAMLEN + 1];
3276 	GElf_Sym sym;
3277 
3278 	if (!mdb_ctf_type_valid(type)) {
3279 		mdb_ctf_type_invalidate(&type);
3280 
3281 		for (tp = node->tgn_typelist; tp != NULL; tp = tp->tgt_next) {
3282 			kind = mdb_ctf_type_kind(tp->tgt_type);
3283 
3284 			if (kind == CTF_K_UNION) {
3285 				/*
3286 				 * Once again, the unions impede progress...
3287 				 */
3288 				return;
3289 			}
3290 
3291 			if (kind != CTF_K_STRUCT && kind != CTF_K_ARRAY)
3292 				continue;
3293 
3294 			if (found != NULL) {
3295 				/*
3296 				 * There are multiple interpretations for this
3297 				 * node; we have to punt.
3298 				 */
3299 				return;
3300 			}
3301 
3302 			found = tp;
3303 		}
3304 	}
3305 
3306 	if (found != NULL)
3307 		type = found->tgt_type;
3308 
3309 	if (!mdb_ctf_type_valid(type))
3310 		return;
3311 
3312 	kind = mdb_ctf_type_kind(type);
3313 
3314 	/*
3315 	 * If this isn't an array (or treated as one), it can't induce false
3316 	 * sharing.  (Or at least, we can't detect it.)
3317 	 */
3318 	if (found != NULL) {
3319 		if (!(found->tgt_flags & TG_TYPE_ARRAY))
3320 			return;
3321 
3322 		if (found->tgt_flags & TG_TYPE_HASFAM)
3323 			return;
3324 	} else {
3325 		if (kind != CTF_K_ARRAY)
3326 			return;
3327 	}
3328 
3329 	if (kind == CTF_K_ARRAY) {
3330 		mdb_ctf_arinfo_t arr;
3331 
3332 		if (mdb_ctf_array_info(type, &arr) == -1)
3333 			return;
3334 
3335 		type = typegraph_resolve(arr.mta_contents);
3336 
3337 		if (!mdb_ctf_type_valid(type))
3338 			return;
3339 
3340 	}
3341 
3342 	size = mdb_ctf_type_size(type);
3343 
3344 	/*
3345 	 * If the size is greater than or equal to the cache line size, it's
3346 	 * not false sharing.  (Or at least, the false sharing is benign.)
3347 	 */
3348 	if (size >= FINDFALSE_COHERENCE_SIZE)
3349 		return;
3350 
3351 	if (TG_NODE_SIZE(node) <= FINDFALSE_COHERENCE_SIZE)
3352 		return;
3353 
3354 	/*
3355 	 * This looks like it could be a falsely shared structure.  If this
3356 	 * type contains a mutex, rwlock, semaphore or condition variable,
3357 	 * we're going to report it.
3358 	 */
3359 	if (!findfalse_findsync(NULL, type, 0, NULL))
3360 		return;
3361 
3362 	mdb_printf("%?p ", node->tgn_base);
3363 
3364 	if (mdb_lookup_by_addr(node->tgn_base, MDB_SYM_EXACT, buf,
3365 	    sizeof (buf), &sym) != -1) {
3366 		mdb_printf("%-28s ", buf);
3367 	} else {
3368 		mdb_printf("%-28s ", "-");
3369 	}
3370 
3371 	mdb_printf("%-22s %2d %7ld\n",
3372 	    mdb_ctf_type_name(type, buf, sizeof (buf)), size,
3373 	    TG_NODE_SIZE(node));
3374 }
3375 
3376 /*ARGSUSED*/
3377 int
3378 findfalse(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
3379 {
3380 	ssize_t i;
3381 
3382 	if (argc != 0 || (flags & DCMD_ADDRSPEC))
3383 		return (DCMD_USAGE);
3384 
3385 	if (!typegraph_built())
3386 		return (DCMD_ABORT);
3387 
3388 	mdb_printf("%?s %-28s %-22s %2s %7s\n", "ADDR", "SYMBOL", "TYPE",
3389 	    "SZ", "TOTSIZE");
3390 
3391 	/*
3392 	 * We go from the back of the bus and move forward to report false
3393 	 * sharing in named symbols before reporting false sharing in dynamic
3394 	 * structures.
3395 	 */
3396 	for (i = tg_nnodes - 1; i >= 0; i--)
3397 		findfalse_node(&tg_node[i]);
3398 
3399 	return (DCMD_OK);
3400 }
3401