1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * MDB Target Layer
31  *
32  * The *target* is the program being inspected by the debugger.  The MDB target
33  * layer provides a set of functions that insulate common debugger code,
34  * including the MDB Module API, from the implementation details of how the
35  * debugger accesses information from a given target.  Each target exports a
36  * standard set of properties, including one or more address  spaces, one or
37  * more symbol tables, a set of load objects, and a set of threads that can be
38  * examined using the interfaces in <mdb/mdb_target.h>.  This technique has
39  * been employed successfully in other debuggers, including [1], primarily
40  * to improve portability, although the term "target" often refers to the
41  * encapsulation of architectural or operating system-specific details.  The
42  * target abstraction is useful for MDB because it allows us to easily extend
43  * the debugger to examine a variety of different program forms.  Primarily,
44  * the target functions validate input arguments and then call an appropriate
45  * function in the target ops vector, defined in <mdb/mdb_target_impl.h>.
46  * However, this interface layer provides a very high level of flexibility for
47  * separating the debugger interface from instrumentation details.  Experience
48  * has shown this kind of design can facilitate separating out debugger
49  * instrumentation into an external agent [2] and enable the development of
50  * advanced instrumentation frameworks [3].  We want MDB to be an ideal
51  * extensible framework for the development of such applications.
52  *
53  * Aside from a set of wrapper functions, the target layer also provides event
54  * management for targets that represent live executing programs.  Our model of
55  * events is also extensible, and is based upon work in [3] and [4].  We define
56  * a *software event* as a state transition in the target program (for example,
57  * the transition of the program counter to a location of interest) that is
58  * observed by the debugger or its agent.  A *software event specifier* is a
59  * description of a class of software events that is used by the debugger to
60  * instrument the target so that the corresponding software events can be
61  * observed.  In MDB, software event specifiers are represented by the
62  * mdb_sespec_t structure, defined in <mdb/mdb_target_impl.h>.  As the user,
63  * the internal debugger code, and MDB modules may all wish to observe software
64  * events and receive appropriate notification and callbacks, we do not expose
65  * software event specifiers directly as part of the user interface.  Instead,
66  * clients of the target layer request that events be observed by creating
67  * new *virtual event specifiers*.  Each virtual specifier is named by a unique
68  * non-zero integer (the VID), and is represented by a mdb_vespec_t structure.
69  * One or more virtual specifiers are then associated with each underlying
70  * software event specifier.  This design enforces the constraint that the
71  * target must only insert one set of instrumentation, regardless of how many
72  * times the target layer was asked to trace a given event.  For example, if
73  * multiple clients request a breakpoint at a particular address, the virtual
74  * specifiers will map to the same sespec, ensuring that only one breakpoint
75  * trap instruction is actually planted at the given target address.  When no
76  * virtual specifiers refer to an sespec, it is no longer needed and can be
77  * removed, along with the corresponding instrumentation.
78  *
79  * The following state transition diagram illustrates the life cycle of a
80  * software event specifier and example transitions:
81  *
82  *                                         cont/
83  *     +--------+   delete   +--------+    stop    +-------+
84  *    (|( DEAD )|) <------- (  ACTIVE  ) <------> (  ARMED  )
85  *     +--------+            +--------+            +-------+
86  *          ^   load/unload  ^        ^   failure/     |
87  *   delete |        object /          \  reset        | failure
88  *          |              v            v              |
89  *          |      +--------+          +-------+       |
90  *          +---- (   IDLE   )        (   ERR   ) <----+
91  *          |      +--------+          +-------+
92  *          |                              |
93  *          +------------------------------+
94  *
95  * The MDB execution control model is based upon the synchronous debugging
96  * model exported by Solaris proc(4).  A target program is set running or the
97  * debugger is attached to a running target.  On ISTOP (stop on event of
98  * interest), one target thread is selected as the representative.  The
99  * algorithm for selecting the representative is target-specific, but we assume
100  * that if an observed software event has occurred, the target will select the
101  * thread that triggered the state transition of interest.  The other threads
102  * are stopped in sympathy with the representative as soon as possible.  Prior
103  * to continuing the target, we plant our instrumentation, transitioning event
104  * specifiers from the ACTIVE to the ARMED state, and then back again when the
105  * target stops.  We then query each active event specifier to learn which ones
106  * are matched, and then invoke the callbacks associated with their vespecs.
107  * If an OS error occurs while attempting to arm or disarm a specifier, the
108  * specifier is transitioned to the ERROR state; we will attempt to arm it
109  * again at the next continue.  If no target process is under our control or
110  * if an event is not currently applicable (e.g. a deferred breakpoint on an
111  * object that is not yet loaded), it remains in the IDLE state.  The target
112  * implementation should intercept object load events and then transition the
113  * specifier to the ACTIVE state when the corresponding object is loaded.
114  *
115  * To simplify the debugger implementation and allow targets to easily provide
116  * new types of observable events, most of the event specifier management is
117  * done by the target layer.  Each software event specifier provides an ops
118  * vector of subroutines that the target layer can call to perform the
119  * various state transitions described above.  The target maintains two lists
120  * of mdb_sespec_t's: the t_idle list (IDLE state) and the t_active list
121  * (ACTIVE, ARMED, and ERROR states).  Each mdb_sespec_t maintains a list of
122  * associated mdb_vespec_t's.  If an sespec is IDLE or ERROR, its se_errno
123  * field will have an errno value specifying the reason for its inactivity.
124  * The vespec stores the client's callback function and private data, and the
125  * arguments used to construct the sespec.  All objects are reference counted
126  * so we can destroy an object when it is no longer needed.  The mdb_sespec_t
127  * invariants for the respective states are as follows:
128  *
129  *   IDLE: on t_idle list, se_data == NULL, se_errno != 0, se_ctor not called
130  * ACTIVE: on t_active list, se_data valid, se_errno == 0, se_ctor called
131  *  ARMED: on t_active list, se_data valid, se_errno == 0, se_ctor called
132  *  ERROR: on t_active list, se_data valid, se_errno != 0, se_ctor called
133  *
134  * Additional commentary on specific state transitions and issues involving
135  * event management can be found below near the target layer functions.
136  *
137  * References
138  *
139  * [1] John Gilmore, "Working in GDB", Technical Report, Cygnus Support,
140  *     1.84 edition, 1994.
141  *
142  * [2] David R. Hanson and Mukund Raghavachari, "A Machine-Independent
143  *     Debugger", Software--Practice and Experience, 26(11), 1277-1299(1996).
144  *
145  * [3] Michael W. Shapiro, "RDB: A System for Incremental Replay Debugging",
146  *     Technical Report CS-97-12, Department of Computer Science,
147  *     Brown University.
148  *
149  * [4] Daniel B. Price, "New Techniques for Replay Debugging", Technical
150  *     Report CS-98-05, Department of Computer Science, Brown University.
151  */
152 
153 #include <mdb/mdb_target_impl.h>
154 #include <mdb/mdb_debug.h>
155 #include <mdb/mdb_modapi.h>
156 #include <mdb/mdb_err.h>
157 #include <mdb/mdb_callb.h>
158 #include <mdb/mdb_gelf.h>
159 #include <mdb/mdb_io_impl.h>
160 #include <mdb/mdb_string.h>
161 #include <mdb/mdb_signal.h>
162 #include <mdb/mdb_frame.h>
163 #include <mdb/mdb.h>
164 
165 #include <sys/stat.h>
166 #include <sys/param.h>
167 #include <sys/signal.h>
168 #include <strings.h>
169 #include <stdlib.h>
170 #include <errno.h>
171 
172 /*
173  * Define convenience macros for referencing the set of vespec flag bits that
174  * are preserved by the target implementation, and the set of bits that
175  * determine automatic ve_hits == ve_limit behavior.
176  */
177 #define	T_IMPL_BITS	\
178 	(MDB_TGT_SPEC_INTERNAL | MDB_TGT_SPEC_SILENT | MDB_TGT_SPEC_MATCHED | \
179 	MDB_TGT_SPEC_DELETED)
180 
181 #define	T_AUTO_BITS	\
182 	(MDB_TGT_SPEC_AUTOSTOP | MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS)
183 
184 /*
185  * Define convenience macro for referencing target flag pending continue bits.
186  */
187 #define	T_CONT_BITS	\
188 	(MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_STEP_BRANCH | \
189 	MDB_TGT_F_NEXT | MDB_TGT_F_CONT)
190 
191 mdb_tgt_t *
192 mdb_tgt_create(mdb_tgt_ctor_f *ctor, int flags, int argc, const char *argv[])
193 {
194 	mdb_module_t *mp;
195 	mdb_tgt_t *t;
196 
197 	if (flags & ~MDB_TGT_F_ALL) {
198 		(void) set_errno(EINVAL);
199 		return (NULL);
200 	}
201 
202 	t = mdb_zalloc(sizeof (mdb_tgt_t), UM_SLEEP);
203 	mdb_list_append(&mdb.m_tgtlist, t);
204 
205 	t->t_module = &mdb.m_rmod;
206 	t->t_matched = T_SE_END;
207 	t->t_flags = flags;
208 	t->t_vepos = 1;
209 	t->t_veneg = 1;
210 
211 	for (mp = mdb.m_mhead; mp != NULL; mp = mp->mod_next) {
212 		if (ctor == mp->mod_tgt_ctor) {
213 			t->t_module = mp;
214 			break;
215 		}
216 	}
217 
218 	if (ctor(t, argc, argv) != 0) {
219 		mdb_list_delete(&mdb.m_tgtlist, t);
220 		mdb_free(t, sizeof (mdb_tgt_t));
221 		return (NULL);
222 	}
223 
224 	mdb_dprintf(MDB_DBG_TGT, "t_create %s (%p)\n",
225 	    t->t_module->mod_name, (void *)t);
226 
227 	(void) t->t_ops->t_status(t, &t->t_status);
228 	return (t);
229 }
230 
231 int
232 mdb_tgt_getflags(mdb_tgt_t *t)
233 {
234 	return (t->t_flags);
235 }
236 
237 int
238 mdb_tgt_setflags(mdb_tgt_t *t, int flags)
239 {
240 	if (flags & ~MDB_TGT_F_ALL)
241 		return (set_errno(EINVAL));
242 
243 	return (t->t_ops->t_setflags(t, flags));
244 }
245 
246 int
247 mdb_tgt_setcontext(mdb_tgt_t *t, void *context)
248 {
249 	return (t->t_ops->t_setcontext(t, context));
250 }
251 
252 /*ARGSUSED*/
253 static int
254 tgt_delete_vespec(mdb_tgt_t *t, void *private, int vid, void *data)
255 {
256 	(void) mdb_tgt_vespec_delete(t, vid);
257 	return (0);
258 }
259 
260 void
261 mdb_tgt_destroy(mdb_tgt_t *t)
262 {
263 	mdb_xdata_t *xdp, *nxdp;
264 
265 	if (mdb.m_target == t) {
266 		mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
267 		    t->t_module->mod_name, (void *)t);
268 		t->t_ops->t_deactivate(t);
269 		mdb.m_target = NULL;
270 	}
271 
272 	mdb_dprintf(MDB_DBG_TGT, "t_destroy %s (%p)\n",
273 	    t->t_module->mod_name, (void *)t);
274 
275 	for (xdp = mdb_list_next(&t->t_xdlist); xdp != NULL; xdp = nxdp) {
276 		nxdp = mdb_list_next(xdp);
277 		mdb_list_delete(&t->t_xdlist, xdp);
278 		mdb_free(xdp, sizeof (mdb_xdata_t));
279 	}
280 
281 	mdb_tgt_sespec_idle_all(t, EBUSY, TRUE);
282 	(void) mdb_tgt_vespec_iter(t, tgt_delete_vespec, NULL);
283 	t->t_ops->t_destroy(t);
284 
285 	mdb_list_delete(&mdb.m_tgtlist, t);
286 	mdb_free(t, sizeof (mdb_tgt_t));
287 
288 	if (mdb.m_target == NULL)
289 		mdb_tgt_activate(mdb_list_prev(&mdb.m_tgtlist));
290 }
291 
292 void
293 mdb_tgt_activate(mdb_tgt_t *t)
294 {
295 	mdb_tgt_t *otgt = mdb.m_target;
296 
297 	if (mdb.m_target != NULL) {
298 		mdb_dprintf(MDB_DBG_TGT, "t_deactivate %s (%p)\n",
299 		    mdb.m_target->t_module->mod_name, (void *)mdb.m_target);
300 		mdb.m_target->t_ops->t_deactivate(mdb.m_target);
301 	}
302 
303 	if ((mdb.m_target = t) != NULL) {
304 		const char *v = strstr(mdb.m_root, "%V");
305 
306 		mdb_dprintf(MDB_DBG_TGT, "t_activate %s (%p)\n",
307 		    t->t_module->mod_name, (void *)t);
308 
309 		/*
310 		 * If the root was explicitly set with -R and contains %V,
311 		 * expand it like a path.  If the resulting directory is
312 		 * not present, then replace %V with "latest" and re-evaluate.
313 		 */
314 		if (v != NULL) {
315 			char old_root[MAXPATHLEN];
316 			const char **p;
317 #ifndef _KMDB
318 			struct stat s;
319 #endif
320 			size_t len;
321 
322 			p = mdb_path_alloc(mdb.m_root, &len);
323 			(void) strcpy(old_root, mdb.m_root);
324 			(void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
325 			mdb.m_root[MAXPATHLEN - 1] = '\0';
326 			mdb_path_free(p, len);
327 
328 #ifndef _KMDB
329 			if (stat(mdb.m_root, &s) == -1 && errno == ENOENT) {
330 				mdb.m_flags |= MDB_FL_LATEST;
331 				p = mdb_path_alloc(old_root, &len);
332 				(void) strncpy(mdb.m_root, p[0], MAXPATHLEN);
333 				mdb.m_root[MAXPATHLEN - 1] = '\0';
334 				mdb_path_free(p, len);
335 			}
336 #endif
337 		}
338 
339 		/*
340 		 * Re-evaluate the macro and dmod paths now that we have the
341 		 * new target set and m_root figured out.
342 		 */
343 		if (otgt == NULL) {
344 			mdb_set_ipath(mdb.m_ipathstr);
345 			mdb_set_lpath(mdb.m_lpathstr);
346 		}
347 
348 		t->t_ops->t_activate(t);
349 	}
350 }
351 
352 void
353 mdb_tgt_periodic(mdb_tgt_t *t)
354 {
355 	t->t_ops->t_periodic(t);
356 }
357 
358 const char *
359 mdb_tgt_name(mdb_tgt_t *t)
360 {
361 	return (t->t_ops->t_name(t));
362 }
363 
364 const char *
365 mdb_tgt_isa(mdb_tgt_t *t)
366 {
367 	return (t->t_ops->t_isa(t));
368 }
369 
370 const char *
371 mdb_tgt_platform(mdb_tgt_t *t)
372 {
373 	return (t->t_ops->t_platform(t));
374 }
375 
376 int
377 mdb_tgt_uname(mdb_tgt_t *t, struct utsname *utsp)
378 {
379 	return (t->t_ops->t_uname(t, utsp));
380 }
381 
382 int
383 mdb_tgt_dmodel(mdb_tgt_t *t)
384 {
385 	return (t->t_ops->t_dmodel(t));
386 }
387 
388 ssize_t
389 mdb_tgt_aread(mdb_tgt_t *t, mdb_tgt_as_t as,
390 	void *buf, size_t n, mdb_tgt_addr_t addr)
391 {
392 	if (t->t_flags & MDB_TGT_F_ASIO)
393 		return (t->t_ops->t_aread(t, as, buf, n, addr));
394 
395 	switch ((uintptr_t)as) {
396 	case (uintptr_t)MDB_TGT_AS_VIRT:
397 		return (t->t_ops->t_vread(t, buf, n, addr));
398 	case (uintptr_t)MDB_TGT_AS_PHYS:
399 		return (t->t_ops->t_pread(t, buf, n, addr));
400 	case (uintptr_t)MDB_TGT_AS_FILE:
401 		return (t->t_ops->t_fread(t, buf, n, addr));
402 	case (uintptr_t)MDB_TGT_AS_IO:
403 		return (t->t_ops->t_ioread(t, buf, n, addr));
404 	}
405 	return (t->t_ops->t_aread(t, as, buf, n, addr));
406 }
407 
408 ssize_t
409 mdb_tgt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as,
410 	const void *buf, size_t n, mdb_tgt_addr_t addr)
411 {
412 	if (!(t->t_flags & MDB_TGT_F_RDWR))
413 		return (set_errno(EMDB_TGTRDONLY));
414 
415 	if (t->t_flags & MDB_TGT_F_ASIO)
416 		return (t->t_ops->t_awrite(t, as, buf, n, addr));
417 
418 	switch ((uintptr_t)as) {
419 	case (uintptr_t)MDB_TGT_AS_VIRT:
420 		return (t->t_ops->t_vwrite(t, buf, n, addr));
421 	case (uintptr_t)MDB_TGT_AS_PHYS:
422 		return (t->t_ops->t_pwrite(t, buf, n, addr));
423 	case (uintptr_t)MDB_TGT_AS_FILE:
424 		return (t->t_ops->t_fwrite(t, buf, n, addr));
425 	case (uintptr_t)MDB_TGT_AS_IO:
426 		return (t->t_ops->t_iowrite(t, buf, n, addr));
427 	}
428 	return (t->t_ops->t_awrite(t, as, buf, n, addr));
429 }
430 
431 ssize_t
432 mdb_tgt_vread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
433 {
434 	return (t->t_ops->t_vread(t, buf, n, addr));
435 }
436 
437 ssize_t
438 mdb_tgt_vwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
439 {
440 	if (t->t_flags & MDB_TGT_F_RDWR)
441 		return (t->t_ops->t_vwrite(t, buf, n, addr));
442 
443 	return (set_errno(EMDB_TGTRDONLY));
444 }
445 
446 ssize_t
447 mdb_tgt_pread(mdb_tgt_t *t, void *buf, size_t n, physaddr_t addr)
448 {
449 	return (t->t_ops->t_pread(t, buf, n, addr));
450 }
451 
452 ssize_t
453 mdb_tgt_pwrite(mdb_tgt_t *t, const void *buf, size_t n, physaddr_t addr)
454 {
455 	if (t->t_flags & MDB_TGT_F_RDWR)
456 		return (t->t_ops->t_pwrite(t, buf, n, addr));
457 
458 	return (set_errno(EMDB_TGTRDONLY));
459 }
460 
461 ssize_t
462 mdb_tgt_fread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
463 {
464 	return (t->t_ops->t_fread(t, buf, n, addr));
465 }
466 
467 ssize_t
468 mdb_tgt_fwrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
469 {
470 	if (t->t_flags & MDB_TGT_F_RDWR)
471 		return (t->t_ops->t_fwrite(t, buf, n, addr));
472 
473 	return (set_errno(EMDB_TGTRDONLY));
474 }
475 
476 ssize_t
477 mdb_tgt_ioread(mdb_tgt_t *t, void *buf, size_t n, uintptr_t addr)
478 {
479 	return (t->t_ops->t_ioread(t, buf, n, addr));
480 }
481 
482 ssize_t
483 mdb_tgt_iowrite(mdb_tgt_t *t, const void *buf, size_t n, uintptr_t addr)
484 {
485 	if (t->t_flags & MDB_TGT_F_RDWR)
486 		return (t->t_ops->t_iowrite(t, buf, n, addr));
487 
488 	return (set_errno(EMDB_TGTRDONLY));
489 }
490 
491 int
492 mdb_tgt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
493 {
494 	return (t->t_ops->t_vtop(t, as, va, pap));
495 }
496 
497 ssize_t
498 mdb_tgt_readstr(mdb_tgt_t *t, mdb_tgt_as_t as, char *buf,
499 	size_t nbytes, mdb_tgt_addr_t addr)
500 {
501 	ssize_t n, nread = mdb_tgt_aread(t, as, buf, nbytes, addr);
502 	char *p;
503 
504 	if (nread >= 0) {
505 		if ((p = memchr(buf, '\0', nread)) != NULL)
506 			nread = (size_t)(p - buf);
507 		goto done;
508 	}
509 
510 	nread = 0;
511 	p = &buf[0];
512 
513 	while (nread < nbytes && (n = mdb_tgt_aread(t, as, p, 1, addr)) == 1) {
514 		if (*p == '\0')
515 			return (nread);
516 		nread++;
517 		addr++;
518 		p++;
519 	}
520 
521 	if (nread == 0 && n == -1)
522 		return (-1); /* If we can't even read a byte, return -1 */
523 
524 done:
525 	if (nbytes != 0)
526 		buf[MIN(nread, nbytes - 1)] = '\0';
527 
528 	return (nread);
529 }
530 
531 ssize_t
532 mdb_tgt_writestr(mdb_tgt_t *t, mdb_tgt_as_t as,
533 	const char *buf, mdb_tgt_addr_t addr)
534 {
535 	ssize_t nwritten = mdb_tgt_awrite(t, as, buf, strlen(buf) + 1, addr);
536 	return (nwritten > 0 ? nwritten - 1 : nwritten);
537 }
538 
539 int
540 mdb_tgt_lookup_by_name(mdb_tgt_t *t, const char *obj,
541 	const char *name, GElf_Sym *symp, mdb_syminfo_t *sip)
542 {
543 	mdb_syminfo_t info;
544 	GElf_Sym sym;
545 	uint_t id;
546 
547 	if (name == NULL || t == NULL)
548 		return (set_errno(EINVAL));
549 
550 	if (obj == MDB_TGT_OBJ_EVERY &&
551 	    mdb_gelf_symtab_lookup_by_name(mdb.m_prsym, name, &sym, &id) == 0) {
552 		info.sym_table = MDB_TGT_PRVSYM;
553 		info.sym_id = id;
554 		goto found;
555 	}
556 
557 	if (t->t_ops->t_lookup_by_name(t, obj, name, &sym, &info) == 0)
558 		goto found;
559 
560 	return (-1);
561 
562 found:
563 	if (symp != NULL)
564 		*symp = sym;
565 	if (sip != NULL)
566 		*sip = info;
567 	return (0);
568 }
569 
570 int
571 mdb_tgt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
572 	char *buf, size_t len, GElf_Sym *symp, mdb_syminfo_t *sip)
573 {
574 	mdb_syminfo_t info;
575 	GElf_Sym sym;
576 
577 	if (t == NULL)
578 		return (set_errno(EINVAL));
579 
580 	if (t->t_ops->t_lookup_by_addr(t, addr, flags,
581 	    buf, len, &sym, &info) == 0) {
582 		if (symp != NULL)
583 			*symp = sym;
584 		if (sip != NULL)
585 			*sip = info;
586 		return (0);
587 	}
588 
589 	return (-1);
590 }
591 
592 /*
593  * The mdb_tgt_lookup_by_scope function is a convenience routine for code that
594  * wants to look up a scoped symbol name such as "object`symbol".  It is
595  * implemented as a simple wrapper around mdb_tgt_lookup_by_name.  Note that
596  * we split on the *last* occurrence of "`", so the object name itself may
597  * contain additional scopes whose evaluation is left to the target.  This
598  * allows targets to implement additional scopes, such as source files,
599  * function names, link map identifiers, etc.
600  */
601 int
602 mdb_tgt_lookup_by_scope(mdb_tgt_t *t, const char *s, GElf_Sym *symp,
603 	mdb_syminfo_t *sip)
604 {
605 	const char *object = MDB_TGT_OBJ_EVERY;
606 	const char *name = s;
607 
608 	if (t == NULL)
609 		return (set_errno(EINVAL));
610 
611 	if (strchr(name, '`') != NULL) {
612 		char buf[MDB_TGT_SYM_NAMLEN];
613 
614 		(void) strncpy(buf, s, sizeof (buf));
615 		buf[sizeof (buf) - 1] = '\0';
616 		name = buf;
617 
618 		if ((s = strrsplit(buf, '`')) != NULL) {
619 			object = buf;
620 			name = s;
621 			if (*object == '\0')
622 				return (set_errno(EMDB_NOOBJ));
623 			if (*name == '\0')
624 				return (set_errno(EMDB_NOSYM));
625 		}
626 	}
627 
628 	return (mdb_tgt_lookup_by_name(t, object, name, symp, sip));
629 }
630 
631 int
632 mdb_tgt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which,
633 	uint_t type, mdb_tgt_sym_f *cb, void *p)
634 {
635 	if ((which != MDB_TGT_SYMTAB && which != MDB_TGT_DYNSYM) ||
636 	    (type & ~(MDB_TGT_BIND_ANY | MDB_TGT_TYPE_ANY)) != 0)
637 		return (set_errno(EINVAL));
638 
639 	return (t->t_ops->t_symbol_iter(t, obj, which, type, cb, p));
640 }
641 
642 ssize_t
643 mdb_tgt_readsym(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf, size_t nbytes,
644 	const char *obj, const char *name)
645 {
646 	GElf_Sym sym;
647 
648 	if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
649 		return (mdb_tgt_aread(t, as, buf, nbytes, sym.st_value));
650 
651 	return (-1);
652 }
653 
654 ssize_t
655 mdb_tgt_writesym(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
656 	size_t nbytes, const char *obj, const char *name)
657 {
658 	GElf_Sym sym;
659 
660 	if (mdb_tgt_lookup_by_name(t, obj, name, &sym, NULL) == 0)
661 		return (mdb_tgt_awrite(t, as, buf, nbytes, sym.st_value));
662 
663 	return (-1);
664 }
665 
666 int
667 mdb_tgt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
668 {
669 	return (t->t_ops->t_mapping_iter(t, cb, p));
670 }
671 
672 int
673 mdb_tgt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *cb, void *p)
674 {
675 	return (t->t_ops->t_object_iter(t, cb, p));
676 }
677 
678 const mdb_map_t *
679 mdb_tgt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
680 {
681 	return (t->t_ops->t_addr_to_map(t, addr));
682 }
683 
684 const mdb_map_t *
685 mdb_tgt_name_to_map(mdb_tgt_t *t, const char *name)
686 {
687 	return (t->t_ops->t_name_to_map(t, name));
688 }
689 
690 struct ctf_file *
691 mdb_tgt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
692 {
693 	return (t->t_ops->t_addr_to_ctf(t, addr));
694 }
695 
696 struct ctf_file *
697 mdb_tgt_name_to_ctf(mdb_tgt_t *t, const char *name)
698 {
699 	return (t->t_ops->t_name_to_ctf(t, name));
700 }
701 
702 /*
703  * Return the latest target status.  We just copy out our cached copy.  The
704  * status only needs to change when the target is run, stepped, or continued.
705  */
706 int
707 mdb_tgt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
708 {
709 	uint_t dstop = (t->t_status.st_flags & MDB_TGT_DSTOP);
710 	uint_t istop = (t->t_status.st_flags & MDB_TGT_ISTOP);
711 	uint_t state = t->t_status.st_state;
712 
713 	if (tsp == NULL)
714 		return (set_errno(EINVAL));
715 
716 	/*
717 	 * If we're called with the address of the target's internal status,
718 	 * then call down to update it; otherwise copy out the saved status.
719 	 */
720 	if (tsp == &t->t_status && t->t_ops->t_status(t, &t->t_status) != 0)
721 		return (-1); /* errno is set for us */
722 
723 	/*
724 	 * Assert that our state is valid before returning it.  The state must
725 	 * be valid, and DSTOP and ISTOP cannot be set simultaneously.  ISTOP
726 	 * is only valid when stopped.  DSTOP is only valid when running or
727 	 * stopped.  If any test fails, abort the debugger.
728 	 */
729 	if (state > MDB_TGT_LOST)
730 		fail("invalid target state (%u)\n", state);
731 	if (state != MDB_TGT_STOPPED && istop)
732 		fail("target state is (%u) and ISTOP is set\n", state);
733 	if (state != MDB_TGT_STOPPED && state != MDB_TGT_RUNNING && dstop)
734 		fail("target state is (%u) and DSTOP is set\n", state);
735 	if (istop && dstop)
736 		fail("target has ISTOP and DSTOP set simultaneously\n");
737 
738 	if (tsp != &t->t_status)
739 		bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
740 
741 	return (0);
742 }
743 
744 /*
745  * For the given sespec, scan its list of vespecs for ones that are marked
746  * temporary and delete them.  We use the same method as vespec_delete below.
747  */
748 /*ARGSUSED*/
749 void
750 mdb_tgt_sespec_prune_one(mdb_tgt_t *t, mdb_sespec_t *sep)
751 {
752 	mdb_vespec_t *vep, *nvep;
753 
754 	for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
755 		nvep = mdb_list_next(vep);
756 
757 		if ((vep->ve_flags & (MDB_TGT_SPEC_DELETED |
758 		    MDB_TGT_SPEC_TEMPORARY)) == MDB_TGT_SPEC_TEMPORARY) {
759 			vep->ve_flags |= MDB_TGT_SPEC_DELETED;
760 			mdb_tgt_vespec_rele(t, vep);
761 		}
762 	}
763 }
764 
765 /*
766  * Prune each sespec on the active list of temporary vespecs.  This function
767  * is called, for example, after the target finishes a continue operation.
768  */
769 void
770 mdb_tgt_sespec_prune_all(mdb_tgt_t *t)
771 {
772 	mdb_sespec_t *sep, *nsep;
773 
774 	for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
775 		nsep = mdb_list_next(sep);
776 		mdb_tgt_sespec_prune_one(t, sep);
777 	}
778 }
779 
780 /*
781  * Transition the given sespec to the IDLE state.  We invoke the destructor,
782  * and then move the sespec from the active list to the idle list.
783  */
784 void
785 mdb_tgt_sespec_idle_one(mdb_tgt_t *t, mdb_sespec_t *sep, int reason)
786 {
787 	ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
788 
789 	if (sep->se_state == MDB_TGT_SPEC_ARMED)
790 		(void) sep->se_ops->se_disarm(t, sep);
791 
792 	sep->se_ops->se_dtor(t, sep);
793 	sep->se_data = NULL;
794 
795 	sep->se_state = MDB_TGT_SPEC_IDLE;
796 	sep->se_errno = reason;
797 
798 	mdb_list_delete(&t->t_active, sep);
799 	mdb_list_append(&t->t_idle, sep);
800 
801 	mdb_tgt_sespec_prune_one(t, sep);
802 }
803 
804 /*
805  * Transition each sespec on the active list to the IDLE state.  This function
806  * is called, for example, after the target terminates execution.
807  */
808 void
809 mdb_tgt_sespec_idle_all(mdb_tgt_t *t, int reason, int clear_matched)
810 {
811 	mdb_sespec_t *sep, *nsep;
812 	mdb_vespec_t *vep;
813 
814 	while ((sep = t->t_matched) != T_SE_END && clear_matched) {
815 		for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
816 			vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
817 			vep = mdb_list_next(vep);
818 		}
819 
820 		t->t_matched = sep->se_matched;
821 		sep->se_matched = NULL;
822 		mdb_tgt_sespec_rele(t, sep);
823 	}
824 
825 	for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
826 		nsep = mdb_list_next(sep);
827 		mdb_tgt_sespec_idle_one(t, sep, reason);
828 	}
829 }
830 
831 /*
832  * Attempt to transition the given sespec from the IDLE to ACTIVE state.  We
833  * do this by invoking se_ctor -- if this fails, we save the reason in se_errno
834  * and return -1 with errno set.  One strange case we need to deal with here is
835  * the possibility that a given vespec is sitting on the idle list with its
836  * corresponding sespec, but it is actually a duplicate of another sespec on the
837  * active list.  This can happen if the sespec is associated with a
838  * MDB_TGT_SPEC_DISABLED vespec that was just enabled, and is now ready to be
839  * activated.  A more interesting reason this situation might arise is the case
840  * where a virtual address breakpoint is set at an address just mmap'ed by
841  * dlmopen.  Since no symbol table information is available for this mapping
842  * yet, a pre-existing deferred symbolic breakpoint may already exist for this
843  * address, but it is on the idle list.  When the symbol table is ready and the
844  * DLACTIVITY event occurs, we now discover that the virtual address obtained by
845  * evaluating the symbolic breakpoint matches the explicit virtual address of
846  * the active virtual breakpoint.  To resolve this conflict in either case, we
847  * destroy the idle sespec, and attach its list of vespecs to the existing
848  * active sespec.
849  */
850 int
851 mdb_tgt_sespec_activate_one(mdb_tgt_t *t, mdb_sespec_t *sep)
852 {
853 	mdb_vespec_t *vep = mdb_list_next(&sep->se_velist);
854 
855 	mdb_vespec_t *nvep;
856 	mdb_sespec_t *dup;
857 
858 	ASSERT(sep->se_state == MDB_TGT_SPEC_IDLE);
859 	ASSERT(vep != NULL);
860 
861 	if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
862 		return (0); /* cannot be activated while disabled bit set */
863 
864 	/*
865 	 * First search the active list for an existing, duplicate sespec to
866 	 * handle the special case described above.
867 	 */
868 	for (dup = mdb_list_next(&t->t_active); dup; dup = mdb_list_next(dup)) {
869 		if (dup->se_ops == sep->se_ops &&
870 		    dup->se_ops->se_secmp(t, dup, vep->ve_args)) {
871 			ASSERT(dup != sep);
872 			break;
873 		}
874 	}
875 
876 	/*
877 	 * If a duplicate is found, destroy the existing, idle sespec, and
878 	 * attach all of its vespecs to the duplicate sespec.
879 	 */
880 	if (dup != NULL) {
881 		for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
882 			mdb_dprintf(MDB_DBG_TGT, "merge [ %d ] to sespec %p\n",
883 			    vep->ve_id, (void *)dup);
884 
885 			if (dup->se_matched != NULL)
886 				vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
887 
888 			nvep = mdb_list_next(vep);
889 			vep->ve_hits = 0;
890 
891 			mdb_list_delete(&sep->se_velist, vep);
892 			mdb_tgt_sespec_rele(t, sep);
893 
894 			mdb_list_append(&dup->se_velist, vep);
895 			mdb_tgt_sespec_hold(t, dup);
896 			vep->ve_se = dup;
897 		}
898 
899 		mdb_dprintf(MDB_DBG_TGT, "merged idle sespec %p with %p\n",
900 		    (void *)sep, (void *)dup);
901 		return (0);
902 	}
903 
904 	/*
905 	 * If no duplicate is found, call the sespec's constructor.  If this
906 	 * is successful, move the sespec to the active list.
907 	 */
908 	if (sep->se_ops->se_ctor(t, sep, vep->ve_args) < 0) {
909 		sep->se_errno = errno;
910 		sep->se_data = NULL;
911 
912 		return (-1);
913 	}
914 
915 	for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
916 		nvep = mdb_list_next(vep);
917 		vep->ve_hits = 0;
918 	}
919 	mdb_list_delete(&t->t_idle, sep);
920 	mdb_list_append(&t->t_active, sep);
921 	sep->se_state = MDB_TGT_SPEC_ACTIVE;
922 	sep->se_errno = 0;
923 
924 	return (0);
925 }
926 
927 /*
928  * Transition each sespec on the idle list to the ACTIVE state.  This function
929  * is called, for example, after the target's t_run() function returns.  If
930  * the se_ctor() function fails, the specifier is not yet applicable; it will
931  * remain on the idle list and can be activated later.
932  *
933  * Returns 1 if there weren't any unexpected activation failures; 0 if there
934  * were.
935  */
936 int
937 mdb_tgt_sespec_activate_all(mdb_tgt_t *t)
938 {
939 	mdb_sespec_t *sep, *nsep;
940 	int rc = 1;
941 
942 	for (sep = mdb_list_next(&t->t_idle); sep != NULL; sep = nsep) {
943 		nsep = mdb_list_next(sep);
944 
945 		if (mdb_tgt_sespec_activate_one(t, sep) < 0 &&
946 		    sep->se_errno != EMDB_NOOBJ)
947 			rc = 0;
948 	}
949 
950 	return (rc);
951 }
952 
953 /*
954  * Transition the given sespec to the ARMED state.  Note that we attempt to
955  * re-arm sespecs previously in the ERROR state.  If se_arm() fails the sespec
956  * transitions to the ERROR state but stays on the active list.
957  */
958 void
959 mdb_tgt_sespec_arm_one(mdb_tgt_t *t, mdb_sespec_t *sep)
960 {
961 	ASSERT(sep->se_state != MDB_TGT_SPEC_IDLE);
962 
963 	if (sep->se_state == MDB_TGT_SPEC_ARMED)
964 		return; /* do not arm sespecs more than once */
965 
966 	if (sep->se_ops->se_arm(t, sep) == -1) {
967 		sep->se_state = MDB_TGT_SPEC_ERROR;
968 		sep->se_errno = errno;
969 	} else {
970 		sep->se_state = MDB_TGT_SPEC_ARMED;
971 		sep->se_errno = 0;
972 	}
973 }
974 
975 /*
976  * Transition each sespec on the active list (except matched specs) to the
977  * ARMED state.  This function is called prior to continuing the target.
978  */
979 void
980 mdb_tgt_sespec_arm_all(mdb_tgt_t *t)
981 {
982 	mdb_sespec_t *sep, *nsep;
983 
984 	for (sep = mdb_list_next(&t->t_active); sep != NULL; sep = nsep) {
985 		nsep = mdb_list_next(sep);
986 		if (sep->se_matched == NULL)
987 			mdb_tgt_sespec_arm_one(t, sep);
988 	}
989 }
990 
991 /*
992  * Transition each sespec on the active list that is in the ARMED state to
993  * the ACTIVE state.  If se_disarm() fails, the sespec is transitioned to
994  * the ERROR state instead, but left on the active list.
995  */
996 static void
997 tgt_disarm_sespecs(mdb_tgt_t *t)
998 {
999 	mdb_sespec_t *sep;
1000 
1001 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1002 		if (sep->se_state != MDB_TGT_SPEC_ARMED)
1003 			continue; /* do not disarm if in ERROR state */
1004 
1005 		if (sep->se_ops->se_disarm(t, sep) == -1) {
1006 			sep->se_state = MDB_TGT_SPEC_ERROR;
1007 			sep->se_errno = errno;
1008 		} else {
1009 			sep->se_state = MDB_TGT_SPEC_ACTIVE;
1010 			sep->se_errno = 0;
1011 		}
1012 	}
1013 }
1014 
1015 /*
1016  * Determine if the software event that triggered the most recent stop matches
1017  * any of the active event specifiers.  If 'all' is TRUE, we consider all
1018  * sespecs in our search.   If 'all' is FALSE, we only consider ARMED sespecs.
1019  * If we successfully match an event, we add it to the t_matched list and
1020  * place an additional hold on it.
1021  */
1022 static mdb_sespec_t *
1023 tgt_match_sespecs(mdb_tgt_t *t, int all)
1024 {
1025 	mdb_sespec_t *sep;
1026 
1027 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1028 		if (all == FALSE && sep->se_state != MDB_TGT_SPEC_ARMED)
1029 			continue; /* restrict search to ARMED sespecs */
1030 
1031 		if (sep->se_state != MDB_TGT_SPEC_ERROR &&
1032 		    sep->se_ops->se_match(t, sep, &t->t_status)) {
1033 			mdb_dprintf(MDB_DBG_TGT, "match se %p\n", (void *)sep);
1034 			mdb_tgt_sespec_hold(t, sep);
1035 			sep->se_matched = t->t_matched;
1036 			t->t_matched = sep;
1037 		}
1038 	}
1039 
1040 	return (t->t_matched);
1041 }
1042 
1043 /*
1044  * This function provides the low-level target continue algorithm.  We proceed
1045  * in three phases: (1) we arm the active sespecs, except the specs matched at
1046  * the time we last stopped, (2) we call se_cont() on any matched sespecs to
1047  * step over these event transitions, and then arm the corresponding sespecs,
1048  * and (3) we call the appropriate low-level continue routine.  Once the
1049  * target stops again, we determine which sespecs were matched, and invoke the
1050  * appropriate vespec callbacks and perform other vespec maintenance.
1051  */
1052 static int
1053 tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp,
1054     int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1055 {
1056 	mdb_var_t *hitv = mdb_nv_lookup(&mdb.m_nv, "hits");
1057 	uintptr_t pc = t->t_status.st_pc;
1058 	int error = 0;
1059 
1060 	mdb_sespec_t *sep, *nsep, *matched;
1061 	mdb_vespec_t *vep, *nvep;
1062 	uintptr_t addr;
1063 
1064 	uint_t cbits = 0;	/* union of pending continue bits */
1065 	uint_t ncont = 0;	/* # of callbacks that requested cont */
1066 	uint_t n = 0;		/* # of callbacks */
1067 
1068 	/*
1069 	 * If the target is undead, dead, or lost, we no longer allow continue.
1070 	 * This effectively forces the user to use ::kill or ::run after death.
1071 	 */
1072 	if (t->t_status.st_state == MDB_TGT_UNDEAD)
1073 		return (set_errno(EMDB_TGTZOMB));
1074 	if (t->t_status.st_state == MDB_TGT_DEAD)
1075 		return (set_errno(EMDB_TGTCORE));
1076 	if (t->t_status.st_state == MDB_TGT_LOST)
1077 		return (set_errno(EMDB_TGTLOST));
1078 
1079 	/*
1080 	 * If any of single-step, step-over, or step-out is pending, it takes
1081 	 * precedence over an explicit or pending continue, because these are
1082 	 * all different specialized forms of continue.
1083 	 */
1084 	if (t->t_flags & MDB_TGT_F_STEP)
1085 		t_cont = t->t_ops->t_step;
1086 	else if (t->t_flags & MDB_TGT_F_NEXT)
1087 		t_cont = t->t_ops->t_step;
1088 	else if (t->t_flags & MDB_TGT_F_STEP_BRANCH)
1089 		t_cont = t->t_ops->t_cont;
1090 	else if (t->t_flags & MDB_TGT_F_STEP_OUT)
1091 		t_cont = t->t_ops->t_cont;
1092 
1093 	/*
1094 	 * To handle step-over, we ask the target to find the address past the
1095 	 * next control transfer instruction.  If an address is found, we plant
1096 	 * a temporary breakpoint there and continue; otherwise just step.
1097 	 */
1098 	if ((t->t_flags & MDB_TGT_F_NEXT) && !(t->t_flags & MDB_TGT_F_STEP)) {
1099 		if (t->t_ops->t_next(t, &addr) == -1 || mdb_tgt_add_vbrkpt(t,
1100 		    addr, MDB_TGT_SPEC_HIDDEN | MDB_TGT_SPEC_TEMPORARY,
1101 		    no_se_f, NULL) == 0) {
1102 			mdb_dprintf(MDB_DBG_TGT, "next falling back to step: "
1103 			    "%s\n", mdb_strerror(errno));
1104 		} else
1105 			t_cont = t->t_ops->t_cont;
1106 	}
1107 
1108 	/*
1109 	 * To handle step-out, we ask the target to find the return address of
1110 	 * the current frame, plant a temporary breakpoint there, and continue.
1111 	 */
1112 	if (t->t_flags & MDB_TGT_F_STEP_OUT) {
1113 		if (t->t_ops->t_step_out(t, &addr) == -1)
1114 			return (-1); /* errno is set for us */
1115 
1116 		if (mdb_tgt_add_vbrkpt(t, addr, MDB_TGT_SPEC_HIDDEN |
1117 		    MDB_TGT_SPEC_TEMPORARY, no_se_f, NULL) == 0)
1118 			return (-1); /* errno is set for us */
1119 	}
1120 
1121 	/*
1122 	 * To handle step-branch, we ask the target to enable it for the coming
1123 	 * continue.  Step-branch is incompatible with step, so don't enable it
1124 	 * if we're going to be stepping.
1125 	 */
1126 	if (t->t_flags & MDB_TGT_F_STEP_BRANCH && t_cont == t->t_ops->t_cont) {
1127 		if (t->t_ops->t_step_branch(t) == -1)
1128 			return (-1); /* errno is set for us */
1129 	}
1130 
1131 	(void) mdb_signal_block(SIGHUP);
1132 	(void) mdb_signal_block(SIGTERM);
1133 	mdb_intr_disable();
1134 
1135 	t->t_flags &= ~T_CONT_BITS;
1136 	t->t_flags |= MDB_TGT_F_BUSY;
1137 	mdb_tgt_sespec_arm_all(t);
1138 
1139 	ASSERT(t->t_matched != NULL);
1140 	matched = t->t_matched;
1141 	t->t_matched = T_SE_END;
1142 
1143 	if (mdb.m_term != NULL)
1144 		IOP_SUSPEND(mdb.m_term);
1145 
1146 	/*
1147 	 * Iterate over the matched sespec list, performing autostop processing
1148 	 * and clearing the matched bit for each associated vespec.  We then
1149 	 * invoke each sespec's se_cont callback in order to continue past
1150 	 * the corresponding event.  If the matched list has more than one
1151 	 * sespec, we assume that the se_cont callbacks are non-interfering.
1152 	 */
1153 	for (sep = matched; sep != T_SE_END; sep = sep->se_matched) {
1154 		for (vep = mdb_list_next(&sep->se_velist); vep != NULL; ) {
1155 			if ((vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP) &&
1156 			    (vep->ve_limit && vep->ve_hits == vep->ve_limit))
1157 				vep->ve_hits = 0;
1158 
1159 			vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1160 			vep = mdb_list_next(vep);
1161 		}
1162 
1163 		if (sep->se_ops->se_cont(t, sep, &t->t_status) == -1) {
1164 			error = errno ? errno : -1;
1165 			tgt_disarm_sespecs(t);
1166 			break;
1167 		}
1168 
1169 		if (!(t->t_status.st_flags & MDB_TGT_ISTOP)) {
1170 			tgt_disarm_sespecs(t);
1171 			if (t->t_status.st_state == MDB_TGT_UNDEAD)
1172 				mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1173 			else if (t->t_status.st_state == MDB_TGT_LOST)
1174 				mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1175 			break;
1176 		}
1177 	}
1178 
1179 	/*
1180 	 * Clear the se_matched field for each matched sespec, and drop the
1181 	 * reference count since the sespec is no longer on the matched list.
1182 	 */
1183 	for (sep = matched; sep != T_SE_END; sep = nsep) {
1184 		nsep = sep->se_matched;
1185 		sep->se_matched = NULL;
1186 		mdb_tgt_sespec_rele(t, sep);
1187 	}
1188 
1189 	/*
1190 	 * If the matched list was non-empty, see if we hit another event while
1191 	 * performing se_cont() processing.  If so, don't bother continuing any
1192 	 * further.  If not, arm the sespecs on the old matched list by calling
1193 	 * mdb_tgt_sespec_arm_all() again and then continue by calling t_cont.
1194 	 */
1195 	if (matched != T_SE_END) {
1196 		if (error != 0 || !(t->t_status.st_flags & MDB_TGT_ISTOP))
1197 			goto out; /* abort now if se_cont() failed */
1198 
1199 		if ((t->t_matched = tgt_match_sespecs(t, FALSE)) != T_SE_END) {
1200 			tgt_disarm_sespecs(t);
1201 			goto out;
1202 		}
1203 
1204 		mdb_tgt_sespec_arm_all(t);
1205 	}
1206 
1207 	if (t_cont != t->t_ops->t_step || pc == t->t_status.st_pc) {
1208 		if (t_cont(t, &t->t_status) != 0)
1209 			error = errno ? errno : -1;
1210 	}
1211 
1212 	tgt_disarm_sespecs(t);
1213 
1214 	if (t->t_flags & MDB_TGT_F_UNLOAD)
1215 		longjmp(mdb.m_frame->f_pcb, MDB_ERR_QUIT);
1216 
1217 	if (t->t_status.st_state == MDB_TGT_UNDEAD)
1218 		mdb_tgt_sespec_idle_all(t, EMDB_TGTZOMB, TRUE);
1219 	else if (t->t_status.st_state == MDB_TGT_LOST)
1220 		mdb_tgt_sespec_idle_all(t, EMDB_TGTLOST, TRUE);
1221 	else if (t->t_status.st_flags & MDB_TGT_ISTOP)
1222 		t->t_matched = tgt_match_sespecs(t, TRUE);
1223 out:
1224 	if (mdb.m_term != NULL)
1225 		IOP_RESUME(mdb.m_term);
1226 
1227 	(void) mdb_signal_unblock(SIGTERM);
1228 	(void) mdb_signal_unblock(SIGHUP);
1229 	mdb_intr_enable();
1230 
1231 	for (sep = t->t_matched; sep != T_SE_END; sep = sep->se_matched) {
1232 		/*
1233 		 * When we invoke a ve_callback, it may in turn request that the
1234 		 * target continue immediately after callback processing is
1235 		 * complete.  We only allow this to occur if *all* callbacks
1236 		 * agree to continue.  To implement this behavior, we keep a
1237 		 * count (ncont) of such requests, and only apply the cumulative
1238 		 * continue bits (cbits) to the target if ncont is equal to the
1239 		 * total number of callbacks that are invoked (n).
1240 		 */
1241 		for (vep = mdb_list_next(&sep->se_velist);
1242 		    vep != NULL; vep = nvep, n++) {
1243 			/*
1244 			 * Place an extra hold on the current vespec and pick
1245 			 * up the next pointer before invoking the callback: we
1246 			 * must be prepared for the vespec to be deleted or
1247 			 * moved to a different list by the callback.
1248 			 */
1249 			mdb_tgt_vespec_hold(t, vep);
1250 			nvep = mdb_list_next(vep);
1251 
1252 			vep->ve_flags |= MDB_TGT_SPEC_MATCHED;
1253 			vep->ve_hits++;
1254 
1255 			mdb_nv_set_value(mdb.m_dot, t->t_status.st_pc);
1256 			mdb_nv_set_value(hitv, vep->ve_hits);
1257 
1258 			ASSERT((t->t_flags & T_CONT_BITS) == 0);
1259 			vep->ve_callback(t, vep->ve_id, vep->ve_data);
1260 
1261 			ncont += (t->t_flags & T_CONT_BITS) != 0;
1262 			cbits |= (t->t_flags & T_CONT_BITS);
1263 			t->t_flags &= ~T_CONT_BITS;
1264 
1265 			if (vep->ve_limit && vep->ve_hits == vep->ve_limit) {
1266 				if (vep->ve_flags & MDB_TGT_SPEC_AUTODEL)
1267 					(void) mdb_tgt_vespec_delete(t,
1268 					    vep->ve_id);
1269 				else if (vep->ve_flags & MDB_TGT_SPEC_AUTODIS)
1270 					(void) mdb_tgt_vespec_disable(t,
1271 					    vep->ve_id);
1272 			}
1273 
1274 			if (vep->ve_limit && vep->ve_hits < vep->ve_limit) {
1275 				if (vep->ve_flags & MDB_TGT_SPEC_AUTOSTOP)
1276 					(void) mdb_tgt_continue(t, NULL);
1277 			}
1278 
1279 			mdb_tgt_vespec_rele(t, vep);
1280 		}
1281 	}
1282 
1283 	if (t->t_matched != T_SE_END && ncont == n)
1284 		t->t_flags |= cbits; /* apply continues (see above) */
1285 
1286 	mdb_tgt_sespec_prune_all(t);
1287 
1288 	t->t_status.st_flags &= ~MDB_TGT_BUSY;
1289 	t->t_flags &= ~MDB_TGT_F_BUSY;
1290 
1291 	if (tsp != NULL)
1292 		bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
1293 
1294 	if (error != 0)
1295 		return (set_errno(error));
1296 
1297 	return (0);
1298 }
1299 
1300 /*
1301  * This function is the common glue that connects the high-level target layer
1302  * continue functions (e.g. step and cont below) with the low-level
1303  * tgt_continue() function above.  Since vespec callbacks may perform any
1304  * actions, including attempting to continue the target itself, we must be
1305  * prepared to be called while the target is still marked F_BUSY.  In this
1306  * case, we just set a pending bit and return.  When we return from the call
1307  * to tgt_continue() that made us busy into the tgt_request_continue() call
1308  * that is still on the stack, we will loop around and call tgt_continue()
1309  * again.  This allows vespecs to continue the target without recursion.
1310  */
1311 static int
1312 tgt_request_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp, uint_t tflag,
1313     int (*t_cont)(mdb_tgt_t *, mdb_tgt_status_t *))
1314 {
1315 	mdb_tgt_spec_desc_t desc;
1316 	mdb_sespec_t *sep;
1317 	char buf[BUFSIZ];
1318 	int status;
1319 
1320 	if (t->t_flags & MDB_TGT_F_BUSY) {
1321 		t->t_flags |= tflag;
1322 		return (0);
1323 	}
1324 
1325 	do {
1326 		status = tgt_continue(t, tsp, t_cont);
1327 	} while (status == 0 && (t->t_flags & T_CONT_BITS));
1328 
1329 	if (status == 0) {
1330 		for (sep = t->t_matched; sep != T_SE_END;
1331 		    sep = sep->se_matched) {
1332 			mdb_vespec_t *vep;
1333 
1334 			for (vep = mdb_list_next(&sep->se_velist); vep;
1335 			    vep = mdb_list_next(vep)) {
1336 				if (vep->ve_flags & MDB_TGT_SPEC_SILENT)
1337 					continue;
1338 				warn("%s\n", sep->se_ops->se_info(t, sep,
1339 				    vep, &desc, buf, sizeof (buf)));
1340 			}
1341 		}
1342 
1343 		mdb_callb_fire(MDB_CALLB_STCHG);
1344 	}
1345 
1346 	t->t_flags &= ~T_CONT_BITS;
1347 	return (status);
1348 }
1349 
1350 /*
1351  * Restart target execution: we rely upon the underlying target implementation
1352  * to do most of the work for us.  In particular, we assume it will properly
1353  * preserve the state of our event lists if the run fails for some reason,
1354  * and that it will reset all events to the IDLE state if the run succeeds.
1355  * If it is successful, we attempt to activate all of the idle sespecs.  The
1356  * t_run() operation is defined to leave the target stopped at the earliest
1357  * possible point in execution, and then return control to the debugger,
1358  * awaiting a step or continue operation to set it running again.
1359  */
1360 int
1361 mdb_tgt_run(mdb_tgt_t *t, int argc, const mdb_arg_t *argv)
1362 {
1363 	int i;
1364 
1365 	for (i = 0; i < argc; i++) {
1366 		if (argv->a_type != MDB_TYPE_STRING)
1367 			return (set_errno(EINVAL));
1368 	}
1369 
1370 	if (t->t_ops->t_run(t, argc, argv) == -1)
1371 		return (-1); /* errno is set for us */
1372 
1373 	t->t_flags &= ~T_CONT_BITS;
1374 	(void) mdb_tgt_sespec_activate_all(t);
1375 
1376 	if (mdb.m_term != NULL)
1377 		IOP_CTL(mdb.m_term, MDB_IOC_CTTY, NULL);
1378 
1379 	return (0);
1380 }
1381 
1382 int
1383 mdb_tgt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1384 {
1385 	return (tgt_request_continue(t, tsp, MDB_TGT_F_STEP, t->t_ops->t_step));
1386 }
1387 
1388 int
1389 mdb_tgt_step_out(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1390 {
1391 	t->t_flags |= MDB_TGT_F_STEP_OUT; /* set flag even if tgt not busy */
1392 	return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1393 }
1394 
1395 int
1396 mdb_tgt_step_branch(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1397 {
1398 	t->t_flags |= MDB_TGT_F_STEP_BRANCH; /* set flag even if tgt not busy */
1399 	return (tgt_request_continue(t, tsp, 0, t->t_ops->t_cont));
1400 }
1401 
1402 int
1403 mdb_tgt_next(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1404 {
1405 	t->t_flags |= MDB_TGT_F_NEXT; /* set flag even if tgt not busy */
1406 	return (tgt_request_continue(t, tsp, 0, t->t_ops->t_step));
1407 }
1408 
1409 int
1410 mdb_tgt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1411 {
1412 	return (tgt_request_continue(t, tsp, MDB_TGT_F_CONT, t->t_ops->t_cont));
1413 }
1414 
1415 int
1416 mdb_tgt_signal(mdb_tgt_t *t, int sig)
1417 {
1418 	return (t->t_ops->t_signal(t, sig));
1419 }
1420 
1421 void *
1422 mdb_tgt_vespec_data(mdb_tgt_t *t, int vid)
1423 {
1424 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1425 
1426 	if (vep == NULL) {
1427 		(void) set_errno(EMDB_NOSESPEC);
1428 		return (NULL);
1429 	}
1430 
1431 	return (vep->ve_data);
1432 }
1433 
1434 /*
1435  * Return a structured description and comment string for the given vespec.
1436  * We fill in the common information from the vespec, and then call down to
1437  * the underlying sespec to provide the comment string and modify any
1438  * event type-specific information.
1439  */
1440 char *
1441 mdb_tgt_vespec_info(mdb_tgt_t *t, int vid, mdb_tgt_spec_desc_t *sp,
1442     char *buf, size_t nbytes)
1443 {
1444 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, vid);
1445 
1446 	mdb_tgt_spec_desc_t desc;
1447 	mdb_sespec_t *sep;
1448 
1449 	if (vep == NULL) {
1450 		if (sp != NULL)
1451 			bzero(sp, sizeof (mdb_tgt_spec_desc_t));
1452 		(void) set_errno(EMDB_NOSESPEC);
1453 		return (NULL);
1454 	}
1455 
1456 	if (sp == NULL)
1457 		sp = &desc;
1458 
1459 	sep = vep->ve_se;
1460 
1461 	sp->spec_id = vep->ve_id;
1462 	sp->spec_flags = vep->ve_flags;
1463 	sp->spec_hits = vep->ve_hits;
1464 	sp->spec_limit = vep->ve_limit;
1465 	sp->spec_state = sep->se_state;
1466 	sp->spec_errno = sep->se_errno;
1467 	sp->spec_base = NULL;
1468 	sp->spec_size = 0;
1469 	sp->spec_data = vep->ve_data;
1470 
1471 	return (sep->se_ops->se_info(t, sep, vep, sp, buf, nbytes));
1472 }
1473 
1474 /*
1475  * Qsort callback for sorting vespecs by VID, used below.
1476  */
1477 static int
1478 tgt_vespec_compare(const mdb_vespec_t **lp, const mdb_vespec_t **rp)
1479 {
1480 	return ((*lp)->ve_id - (*rp)->ve_id);
1481 }
1482 
1483 /*
1484  * Iterate over all vespecs and call the specified callback function with the
1485  * corresponding VID and caller data pointer.  We want the callback function
1486  * to see a consistent, sorted snapshot of the vespecs, and allow the callback
1487  * to take actions such as deleting the vespec itself, so we cannot simply
1488  * iterate over the lists.  Instead, we pre-allocate an array of vespec
1489  * pointers, fill it in and place an additional hold on each vespec, and then
1490  * sort it.  After the callback has been executed on each vespec in the
1491  * sorted array, we remove our hold and free the temporary array.
1492  */
1493 int
1494 mdb_tgt_vespec_iter(mdb_tgt_t *t, mdb_tgt_vespec_f *func, void *p)
1495 {
1496 	mdb_vespec_t **veps, **vepp, **vend;
1497 	mdb_vespec_t *vep, *nvep;
1498 	mdb_sespec_t *sep;
1499 
1500 	uint_t vecnt = t->t_vecnt;
1501 
1502 	veps = mdb_alloc(sizeof (mdb_vespec_t *) * vecnt, UM_SLEEP);
1503 	vend = veps + vecnt;
1504 	vepp = veps;
1505 
1506 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
1507 		for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1508 			mdb_tgt_vespec_hold(t, vep);
1509 			nvep = mdb_list_next(vep);
1510 			*vepp++ = vep;
1511 		}
1512 	}
1513 
1514 	for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
1515 		for (vep = mdb_list_next(&sep->se_velist); vep; vep = nvep) {
1516 			mdb_tgt_vespec_hold(t, vep);
1517 			nvep = mdb_list_next(vep);
1518 			*vepp++ = vep;
1519 		}
1520 	}
1521 
1522 	if (vepp != vend) {
1523 		fail("target has %u vespecs on list but vecnt shows %u\n",
1524 		    (uint_t)(vepp - veps), vecnt);
1525 	}
1526 
1527 	qsort(veps, vecnt, sizeof (mdb_vespec_t *),
1528 	    (int (*)(const void *, const void *))tgt_vespec_compare);
1529 
1530 	for (vepp = veps; vepp < vend; vepp++) {
1531 		if (func(t, p, (*vepp)->ve_id, (*vepp)->ve_data) != 0)
1532 			break;
1533 	}
1534 
1535 	for (vepp = veps; vepp < vend; vepp++)
1536 		mdb_tgt_vespec_rele(t, *vepp);
1537 
1538 	mdb_free(veps, sizeof (mdb_vespec_t *) * vecnt);
1539 	return (0);
1540 }
1541 
1542 /*
1543  * Reset the vespec flags, match limit, and callback data to the specified
1544  * values.  We silently correct invalid parameters, except for the VID.
1545  * The caller is required to query the existing properties and pass back
1546  * the existing values for any properties that should not be modified.
1547  * If the callback data is modified, the caller is responsible for cleaning
1548  * up any state associated with the previous value.
1549  */
1550 int
1551 mdb_tgt_vespec_modify(mdb_tgt_t *t, int id, uint_t flags,
1552     uint_t limit, void *data)
1553 {
1554 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1555 
1556 	if (vep == NULL)
1557 		return (set_errno(EMDB_NOSESPEC));
1558 
1559 	/*
1560 	 * If the value of the MDB_TGT_SPEC_DISABLED bit is changing, call the
1561 	 * appropriate vespec function to do the enable/disable work.
1562 	 */
1563 	if ((flags & MDB_TGT_SPEC_DISABLED) !=
1564 	    (vep->ve_flags & MDB_TGT_SPEC_DISABLED)) {
1565 		if (flags & MDB_TGT_SPEC_DISABLED)
1566 			(void) mdb_tgt_vespec_disable(t, id);
1567 		else
1568 			(void) mdb_tgt_vespec_enable(t, id);
1569 	}
1570 
1571 	/*
1572 	 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
1573 	 * value: extra bits are cleared according to order of precedence.
1574 	 */
1575 	if (flags & MDB_TGT_SPEC_AUTOSTOP)
1576 		flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
1577 	else if (flags & MDB_TGT_SPEC_AUTODEL)
1578 		flags &= ~MDB_TGT_SPEC_AUTODIS;
1579 
1580 	/*
1581 	 * The TEMPORARY property always takes precedence over STICKY.
1582 	 */
1583 	if (flags & MDB_TGT_SPEC_TEMPORARY)
1584 		flags &= ~MDB_TGT_SPEC_STICKY;
1585 
1586 	/*
1587 	 * If any MDB_TGT_SPEC_AUTO* bits are changing, reset the hit count
1588 	 * back to zero and clear all of the old auto bits.
1589 	 */
1590 	if ((flags & T_AUTO_BITS) != (vep->ve_flags & T_AUTO_BITS)) {
1591 		vep->ve_flags &= ~T_AUTO_BITS;
1592 		vep->ve_hits = 0;
1593 	}
1594 
1595 	vep->ve_flags = (vep->ve_flags & T_IMPL_BITS) | (flags & ~T_IMPL_BITS);
1596 	vep->ve_data = data;
1597 
1598 	/*
1599 	 * If any MDB_TGT_SPEC_AUTO* flags are set, make sure the limit is at
1600 	 * least one.  If none are set, reset it back to zero.
1601 	 */
1602 	if (vep->ve_flags & T_AUTO_BITS)
1603 		vep->ve_limit = MAX(limit, 1);
1604 	else
1605 		vep->ve_limit = 0;
1606 
1607 	/*
1608 	 * As a convenience, we allow the caller to specify SPEC_DELETED in
1609 	 * the flags field as indication that the event should be deleted.
1610 	 */
1611 	if (flags & MDB_TGT_SPEC_DELETED)
1612 		(void) mdb_tgt_vespec_delete(t, id);
1613 
1614 	return (0);
1615 }
1616 
1617 /*
1618  * Remove the user disabled bit from the specified vespec, and attempt to
1619  * activate the underlying sespec and move it to the active list if possible.
1620  */
1621 int
1622 mdb_tgt_vespec_enable(mdb_tgt_t *t, int id)
1623 {
1624 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1625 
1626 	if (vep == NULL)
1627 		return (set_errno(EMDB_NOSESPEC));
1628 
1629 	if (vep->ve_flags & MDB_TGT_SPEC_DISABLED) {
1630 		ASSERT(mdb_list_next(vep) == NULL);
1631 		vep->ve_flags &= ~MDB_TGT_SPEC_DISABLED;
1632 		if (mdb_tgt_sespec_activate_one(t, vep->ve_se) < 0)
1633 			return (-1); /* errno is set for us */
1634 	}
1635 
1636 	return (0);
1637 }
1638 
1639 /*
1640  * Set the user disabled bit on the specified vespec, and move it to the idle
1641  * list.  If the vespec is not alone with its sespec or if it is a currently
1642  * matched event, we must always create a new idle sespec and move the vespec
1643  * there.  If the vespec was alone and active, we can simply idle the sespec.
1644  */
1645 int
1646 mdb_tgt_vespec_disable(mdb_tgt_t *t, int id)
1647 {
1648 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1649 	mdb_sespec_t *sep;
1650 
1651 	if (vep == NULL)
1652 		return (set_errno(EMDB_NOSESPEC));
1653 
1654 	if (vep->ve_flags & MDB_TGT_SPEC_DISABLED)
1655 		return (0); /* already disabled */
1656 
1657 	if (mdb_list_prev(vep) != NULL || mdb_list_next(vep) != NULL ||
1658 	    vep->ve_se->se_matched != NULL) {
1659 
1660 		sep = mdb_tgt_sespec_insert(t, vep->ve_se->se_ops, &t->t_idle);
1661 
1662 		mdb_list_delete(&vep->ve_se->se_velist, vep);
1663 		mdb_tgt_sespec_rele(t, vep->ve_se);
1664 
1665 		mdb_list_append(&sep->se_velist, vep);
1666 		mdb_tgt_sespec_hold(t, sep);
1667 
1668 		vep->ve_flags &= ~MDB_TGT_SPEC_MATCHED;
1669 		vep->ve_se = sep;
1670 
1671 	} else if (vep->ve_se->se_state != MDB_TGT_SPEC_IDLE)
1672 		mdb_tgt_sespec_idle_one(t, vep->ve_se, EMDB_SPECDIS);
1673 
1674 	vep->ve_flags |= MDB_TGT_SPEC_DISABLED;
1675 	return (0);
1676 }
1677 
1678 /*
1679  * Delete the given vespec.  We use the MDB_TGT_SPEC_DELETED flag to ensure that
1680  * multiple calls to mdb_tgt_vespec_delete to not attempt to decrement the
1681  * reference count on the vespec more than once.  This is because the vespec
1682  * may remain referenced if it is currently held by another routine (e.g.
1683  * vespec_iter), and so the user could attempt to delete it more than once
1684  * since it reference count will be >= 2 prior to the first delete call.
1685  */
1686 int
1687 mdb_tgt_vespec_delete(mdb_tgt_t *t, int id)
1688 {
1689 	mdb_vespec_t *vep = mdb_tgt_vespec_lookup(t, id);
1690 
1691 	if (vep == NULL)
1692 		return (set_errno(EMDB_NOSESPEC));
1693 
1694 	if (vep->ve_flags & MDB_TGT_SPEC_DELETED)
1695 		return (set_errno(EBUSY));
1696 
1697 	vep->ve_flags |= MDB_TGT_SPEC_DELETED;
1698 	mdb_tgt_vespec_rele(t, vep);
1699 	return (0);
1700 }
1701 
1702 int
1703 mdb_tgt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr,
1704     int spec_flags, mdb_tgt_se_f *func, void *p)
1705 {
1706 	return (t->t_ops->t_add_vbrkpt(t, addr, spec_flags, func, p));
1707 }
1708 
1709 int
1710 mdb_tgt_add_sbrkpt(mdb_tgt_t *t, const char *symbol,
1711     int spec_flags, mdb_tgt_se_f *func, void *p)
1712 {
1713 	return (t->t_ops->t_add_sbrkpt(t, symbol, spec_flags, func, p));
1714 }
1715 
1716 int
1717 mdb_tgt_add_pwapt(mdb_tgt_t *t, physaddr_t pa, size_t n, uint_t flags,
1718     int spec_flags, mdb_tgt_se_f *func, void *p)
1719 {
1720 	if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1721 		(void) set_errno(EINVAL);
1722 		return (0);
1723 	}
1724 
1725 	if (pa + n < pa) {
1726 		(void) set_errno(EMDB_WPRANGE);
1727 		return (0);
1728 	}
1729 
1730 	return (t->t_ops->t_add_pwapt(t, pa, n, flags, spec_flags, func, p));
1731 }
1732 
1733 int
1734 mdb_tgt_add_vwapt(mdb_tgt_t *t, uintptr_t va, size_t n, uint_t flags,
1735     int spec_flags, mdb_tgt_se_f *func, void *p)
1736 {
1737 	if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1738 		(void) set_errno(EINVAL);
1739 		return (0);
1740 	}
1741 
1742 	if (va + n < va) {
1743 		(void) set_errno(EMDB_WPRANGE);
1744 		return (0);
1745 	}
1746 
1747 	return (t->t_ops->t_add_vwapt(t, va, n, flags, spec_flags, func, p));
1748 }
1749 
1750 int
1751 mdb_tgt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t n, uint_t flags,
1752     int spec_flags, mdb_tgt_se_f *func, void *p)
1753 {
1754 	if ((flags & ~MDB_TGT_WA_RWX) || flags == 0) {
1755 		(void) set_errno(EINVAL);
1756 		return (0);
1757 	}
1758 
1759 	if (addr + n < addr) {
1760 		(void) set_errno(EMDB_WPRANGE);
1761 		return (0);
1762 	}
1763 
1764 	return (t->t_ops->t_add_iowapt(t, addr, n, flags, spec_flags, func, p));
1765 }
1766 
1767 int
1768 mdb_tgt_add_sysenter(mdb_tgt_t *t, int sysnum,
1769     int spec_flags, mdb_tgt_se_f *func, void *p)
1770 {
1771 	return (t->t_ops->t_add_sysenter(t, sysnum, spec_flags, func, p));
1772 }
1773 
1774 int
1775 mdb_tgt_add_sysexit(mdb_tgt_t *t, int sysnum,
1776     int spec_flags, mdb_tgt_se_f *func, void *p)
1777 {
1778 	return (t->t_ops->t_add_sysexit(t, sysnum, spec_flags, func, p));
1779 }
1780 
1781 int
1782 mdb_tgt_add_signal(mdb_tgt_t *t, int sig,
1783     int spec_flags, mdb_tgt_se_f *func, void *p)
1784 {
1785 	return (t->t_ops->t_add_signal(t, sig, spec_flags, func, p));
1786 }
1787 
1788 int
1789 mdb_tgt_add_fault(mdb_tgt_t *t, int flt,
1790     int spec_flags, mdb_tgt_se_f *func, void *p)
1791 {
1792 	return (t->t_ops->t_add_fault(t, flt, spec_flags, func, p));
1793 }
1794 
1795 int
1796 mdb_tgt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1797     const char *rname, mdb_tgt_reg_t *rp)
1798 {
1799 	return (t->t_ops->t_getareg(t, tid, rname, rp));
1800 }
1801 
1802 int
1803 mdb_tgt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid,
1804     const char *rname, mdb_tgt_reg_t r)
1805 {
1806 	return (t->t_ops->t_putareg(t, tid, rname, r));
1807 }
1808 
1809 int
1810 mdb_tgt_stack_iter(mdb_tgt_t *t, const mdb_tgt_gregset_t *gregs,
1811     mdb_tgt_stack_f *cb, void *p)
1812 {
1813 	return (t->t_ops->t_stack_iter(t, gregs, cb, p));
1814 }
1815 
1816 int
1817 mdb_tgt_xdata_iter(mdb_tgt_t *t, mdb_tgt_xdata_f *func, void *private)
1818 {
1819 	mdb_xdata_t *xdp;
1820 
1821 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1822 		if (func(private, xdp->xd_name, xdp->xd_desc,
1823 		    xdp->xd_copy(t, NULL, 0)) != 0)
1824 			break;
1825 	}
1826 
1827 	return (0);
1828 }
1829 
1830 ssize_t
1831 mdb_tgt_getxdata(mdb_tgt_t *t, const char *name, void *buf, size_t nbytes)
1832 {
1833 	mdb_xdata_t *xdp;
1834 
1835 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1836 		if (strcmp(xdp->xd_name, name) == 0)
1837 			return (xdp->xd_copy(t, buf, nbytes));
1838 	}
1839 
1840 	return (set_errno(ENODATA));
1841 }
1842 
1843 long
1844 mdb_tgt_notsup()
1845 {
1846 	return (set_errno(EMDB_TGTNOTSUP));
1847 }
1848 
1849 void *
1850 mdb_tgt_null()
1851 {
1852 	(void) set_errno(EMDB_TGTNOTSUP);
1853 	return (NULL);
1854 }
1855 
1856 long
1857 mdb_tgt_nop()
1858 {
1859 	return (0L);
1860 }
1861 
1862 int
1863 mdb_tgt_xdata_insert(mdb_tgt_t *t, const char *name, const char *desc,
1864 	ssize_t (*copy)(mdb_tgt_t *, void *, size_t))
1865 {
1866 	mdb_xdata_t *xdp;
1867 
1868 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1869 		if (strcmp(xdp->xd_name, name) == 0)
1870 			return (set_errno(EMDB_XDEXISTS));
1871 	}
1872 
1873 	xdp = mdb_alloc(sizeof (mdb_xdata_t), UM_SLEEP);
1874 	mdb_list_append(&t->t_xdlist, xdp);
1875 
1876 	xdp->xd_name = name;
1877 	xdp->xd_desc = desc;
1878 	xdp->xd_copy = copy;
1879 
1880 	return (0);
1881 }
1882 
1883 int
1884 mdb_tgt_xdata_delete(mdb_tgt_t *t, const char *name)
1885 {
1886 	mdb_xdata_t *xdp;
1887 
1888 	for (xdp = mdb_list_next(&t->t_xdlist); xdp; xdp = mdb_list_next(xdp)) {
1889 		if (strcmp(xdp->xd_name, name) == 0) {
1890 			mdb_list_delete(&t->t_xdlist, xdp);
1891 			mdb_free(xdp, sizeof (mdb_xdata_t));
1892 			return (0);
1893 		}
1894 	}
1895 
1896 	return (set_errno(EMDB_NOXD));
1897 }
1898 
1899 int
1900 mdb_tgt_sym_match(const GElf_Sym *sym, uint_t mask)
1901 {
1902 	uchar_t s_bind = GELF_ST_BIND(sym->st_info);
1903 	uchar_t s_type = GELF_ST_TYPE(sym->st_info);
1904 
1905 	/*
1906 	 * In case you haven't already guessed, this relies on the bitmask
1907 	 * used by <mdb/mdb_target.h> and <libproc.h> for encoding symbol
1908 	 * type and binding matching the order of STB and STT constants
1909 	 * in <sys/elf.h>.  ELF can't change without breaking binary
1910 	 * compatibility, so I think this is reasonably fair game.
1911 	 */
1912 	if (s_bind < STB_NUM && s_type < STT_NUM) {
1913 		uint_t type = (1 << (s_type + 8)) | (1 << s_bind);
1914 		return ((type & ~mask) == 0);
1915 	}
1916 
1917 	return (0); /* Unknown binding or type; fail to match */
1918 }
1919 
1920 void
1921 mdb_tgt_elf_export(mdb_gelf_file_t *gf)
1922 {
1923 	GElf_Xword d = 0, t = 0;
1924 	GElf_Addr b = 0, e = 0;
1925 	uint32_t m = 0;
1926 	mdb_var_t *v;
1927 
1928 	/*
1929 	 * Reset legacy adb variables based on the specified ELF object file
1930 	 * provided by the target.  We define these variables:
1931 	 *
1932 	 * b - the address of the data segment (first writeable Phdr)
1933 	 * d - the size of the data segment
1934 	 * e - the address of the entry point
1935 	 * m - the magic number identifying the file
1936 	 * t - the address of the text segment (first executable Phdr)
1937 	 */
1938 	if (gf != NULL) {
1939 		const GElf_Phdr *text = NULL, *data = NULL;
1940 		size_t i;
1941 
1942 		e = gf->gf_ehdr.e_entry;
1943 		bcopy(&gf->gf_ehdr.e_ident[EI_MAG0], &m, sizeof (m));
1944 
1945 		for (i = 0; i < gf->gf_npload; i++) {
1946 			if (text == NULL && (gf->gf_phdrs[i].p_flags & PF_X))
1947 				text = &gf->gf_phdrs[i];
1948 			if (data == NULL && (gf->gf_phdrs[i].p_flags & PF_W))
1949 				data = &gf->gf_phdrs[i];
1950 		}
1951 
1952 		if (text != NULL)
1953 			t = text->p_memsz;
1954 		if (data != NULL) {
1955 			b = data->p_vaddr;
1956 			d = data->p_memsz;
1957 		}
1958 	}
1959 
1960 	if ((v = mdb_nv_lookup(&mdb.m_nv, "b")) != NULL)
1961 		mdb_nv_set_value(v, b);
1962 	if ((v = mdb_nv_lookup(&mdb.m_nv, "d")) != NULL)
1963 		mdb_nv_set_value(v, d);
1964 	if ((v = mdb_nv_lookup(&mdb.m_nv, "e")) != NULL)
1965 		mdb_nv_set_value(v, e);
1966 	if ((v = mdb_nv_lookup(&mdb.m_nv, "m")) != NULL)
1967 		mdb_nv_set_value(v, m);
1968 	if ((v = mdb_nv_lookup(&mdb.m_nv, "t")) != NULL)
1969 		mdb_nv_set_value(v, t);
1970 }
1971 
1972 /*ARGSUSED*/
1973 void
1974 mdb_tgt_sespec_hold(mdb_tgt_t *t, mdb_sespec_t *sep)
1975 {
1976 	sep->se_refs++;
1977 	ASSERT(sep->se_refs != 0);
1978 }
1979 
1980 void
1981 mdb_tgt_sespec_rele(mdb_tgt_t *t, mdb_sespec_t *sep)
1982 {
1983 	ASSERT(sep->se_refs != 0);
1984 
1985 	if (--sep->se_refs == 0) {
1986 		mdb_dprintf(MDB_DBG_TGT, "destroying sespec %p\n", (void *)sep);
1987 		ASSERT(mdb_list_next(&sep->se_velist) == NULL);
1988 
1989 		if (sep->se_state != MDB_TGT_SPEC_IDLE) {
1990 			sep->se_ops->se_dtor(t, sep);
1991 			mdb_list_delete(&t->t_active, sep);
1992 		} else
1993 			mdb_list_delete(&t->t_idle, sep);
1994 
1995 		mdb_free(sep, sizeof (mdb_sespec_t));
1996 	}
1997 }
1998 
1999 mdb_sespec_t *
2000 mdb_tgt_sespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, mdb_list_t *list)
2001 {
2002 	mdb_sespec_t *sep = mdb_zalloc(sizeof (mdb_sespec_t), UM_SLEEP);
2003 
2004 	if (list == &t->t_active)
2005 		sep->se_state = MDB_TGT_SPEC_ACTIVE;
2006 	else
2007 		sep->se_state = MDB_TGT_SPEC_IDLE;
2008 
2009 	mdb_list_append(list, sep);
2010 	sep->se_ops = ops;
2011 	return (sep);
2012 }
2013 
2014 mdb_sespec_t *
2015 mdb_tgt_sespec_lookup_active(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2016 {
2017 	mdb_sespec_t *sep;
2018 
2019 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2020 		if (sep->se_ops == ops && sep->se_ops->se_secmp(t, sep, args))
2021 			break;
2022 	}
2023 
2024 	return (sep);
2025 }
2026 
2027 mdb_sespec_t *
2028 mdb_tgt_sespec_lookup_idle(mdb_tgt_t *t, const mdb_se_ops_t *ops, void *args)
2029 {
2030 	mdb_sespec_t *sep;
2031 
2032 	for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2033 		if (sep->se_ops == ops && sep->se_ops->se_vecmp(t,
2034 		    mdb_list_next(&sep->se_velist), args))
2035 			break;
2036 	}
2037 
2038 	return (sep);
2039 }
2040 
2041 /*ARGSUSED*/
2042 void
2043 mdb_tgt_vespec_hold(mdb_tgt_t *t, mdb_vespec_t *vep)
2044 {
2045 	vep->ve_refs++;
2046 	ASSERT(vep->ve_refs != 0);
2047 }
2048 
2049 void
2050 mdb_tgt_vespec_rele(mdb_tgt_t *t, mdb_vespec_t *vep)
2051 {
2052 	ASSERT(vep->ve_refs != 0);
2053 
2054 	if (--vep->ve_refs == 0) {
2055 		/*
2056 		 * Remove this vespec from the sespec's velist and decrement
2057 		 * the reference count on the sespec.
2058 		 */
2059 		mdb_list_delete(&vep->ve_se->se_velist, vep);
2060 		mdb_tgt_sespec_rele(t, vep->ve_se);
2061 
2062 		/*
2063 		 * If we are deleting the most recently assigned VID, reset
2064 		 * t_vepos or t_veneg as appropriate to re-use that number.
2065 		 * This could be enhanced to re-use any free number by
2066 		 * maintaining a bitmap or hash of the allocated IDs.
2067 		 */
2068 		if (vep->ve_id > 0 && t->t_vepos == vep->ve_id + 1)
2069 			t->t_vepos = vep->ve_id;
2070 		else if (vep->ve_id < 0 && t->t_veneg == -vep->ve_id + 1)
2071 			t->t_veneg = -vep->ve_id;
2072 
2073 		/*
2074 		 * Call the destructor to clean up ve_args, and then free
2075 		 * the actual vespec structure.
2076 		 */
2077 		vep->ve_dtor(vep);
2078 		mdb_free(vep, sizeof (mdb_vespec_t));
2079 
2080 		ASSERT(t->t_vecnt != 0);
2081 		t->t_vecnt--;
2082 	}
2083 }
2084 
2085 int
2086 mdb_tgt_vespec_insert(mdb_tgt_t *t, const mdb_se_ops_t *ops, int flags,
2087     mdb_tgt_se_f *func, void *data, void *args, void (*dtor)(mdb_vespec_t *))
2088 {
2089 	mdb_vespec_t *vep = mdb_zalloc(sizeof (mdb_vespec_t), UM_SLEEP);
2090 
2091 	int id, mult, *seqp;
2092 	mdb_sespec_t *sep;
2093 
2094 	/*
2095 	 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
2096 	 * value: extra bits are cleared according to order of precedence.
2097 	 */
2098 	if (flags & MDB_TGT_SPEC_AUTOSTOP)
2099 		flags &= ~(MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS);
2100 	else if (flags & MDB_TGT_SPEC_AUTODEL)
2101 		flags &= ~MDB_TGT_SPEC_AUTODIS;
2102 
2103 	/*
2104 	 * The TEMPORARY property always takes precedence over STICKY.
2105 	 */
2106 	if (flags & MDB_TGT_SPEC_TEMPORARY)
2107 		flags &= ~MDB_TGT_SPEC_STICKY;
2108 
2109 	/*
2110 	 * Find a matching sespec or create a new one on the appropriate list.
2111 	 * We always create a new sespec if the vespec is created disabled.
2112 	 */
2113 	if (flags & MDB_TGT_SPEC_DISABLED)
2114 		sep = mdb_tgt_sespec_insert(t, ops, &t->t_idle);
2115 	else if ((sep = mdb_tgt_sespec_lookup_active(t, ops, args)) == NULL &&
2116 	    (sep = mdb_tgt_sespec_lookup_idle(t, ops, args)) == NULL)
2117 		sep = mdb_tgt_sespec_insert(t, ops, &t->t_active);
2118 
2119 	/*
2120 	 * Generate a new ID for the vespec.  Increasing positive integers are
2121 	 * assigned to visible vespecs; decreasing negative integers are
2122 	 * assigned to hidden vespecs.  The target saves our most recent choice.
2123 	 */
2124 	if (flags & MDB_TGT_SPEC_INTERNAL) {
2125 		seqp = &t->t_veneg;
2126 		mult = -1;
2127 	} else {
2128 		seqp = &t->t_vepos;
2129 		mult = 1;
2130 	}
2131 
2132 	id = *seqp;
2133 
2134 	while (mdb_tgt_vespec_lookup(t, id * mult) != NULL)
2135 		id = MAX(id + 1, 1);
2136 
2137 	*seqp = MAX(id + 1, 1);
2138 
2139 	vep->ve_id = id * mult;
2140 	vep->ve_flags = flags & ~(MDB_TGT_SPEC_MATCHED | MDB_TGT_SPEC_DELETED);
2141 	vep->ve_se = sep;
2142 	vep->ve_callback = func;
2143 	vep->ve_data = data;
2144 	vep->ve_args = args;
2145 	vep->ve_dtor = dtor;
2146 
2147 	mdb_list_append(&sep->se_velist, vep);
2148 	mdb_tgt_sespec_hold(t, sep);
2149 
2150 	mdb_tgt_vespec_hold(t, vep);
2151 	t->t_vecnt++;
2152 
2153 	/*
2154 	 * If this vespec is the first reference to the sespec and it's active,
2155 	 * then it is newly created and we should attempt to initialize it.
2156 	 * If se_ctor fails, then move the sespec back to the idle list.
2157 	 */
2158 	if (sep->se_refs == 1 && sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2159 	    sep->se_ops->se_ctor(t, sep, vep->ve_args) == -1) {
2160 
2161 		mdb_list_delete(&t->t_active, sep);
2162 		mdb_list_append(&t->t_idle, sep);
2163 
2164 		sep->se_state = MDB_TGT_SPEC_IDLE;
2165 		sep->se_errno = errno;
2166 		sep->se_data = NULL;
2167 	}
2168 
2169 	/*
2170 	 * If the sespec is active and the target is currently running (because
2171 	 * we grabbed it using PGRAB_NOSTOP), then go ahead and attempt to arm
2172 	 * the sespec so it will take effect immediately.
2173 	 */
2174 	if (sep->se_state == MDB_TGT_SPEC_ACTIVE &&
2175 	    t->t_status.st_state == MDB_TGT_RUNNING)
2176 		mdb_tgt_sespec_arm_one(t, sep);
2177 
2178 	mdb_dprintf(MDB_DBG_TGT, "inserted [ %d ] sep=%p refs=%u state=%d\n",
2179 	    vep->ve_id, (void *)sep, sep->se_refs, sep->se_state);
2180 
2181 	return (vep->ve_id);
2182 }
2183 
2184 /*
2185  * Search the target's active, idle, and disabled lists for the vespec matching
2186  * the specified VID, and return a pointer to it, or NULL if no match is found.
2187  */
2188 mdb_vespec_t *
2189 mdb_tgt_vespec_lookup(mdb_tgt_t *t, int vid)
2190 {
2191 	mdb_sespec_t *sep;
2192 	mdb_vespec_t *vep;
2193 
2194 	if (vid == 0)
2195 		return (NULL); /* 0 is never a valid VID */
2196 
2197 	for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) {
2198 		for (vep = mdb_list_next(&sep->se_velist); vep;
2199 		    vep = mdb_list_next(vep)) {
2200 			if (vep->ve_id == vid)
2201 				return (vep);
2202 		}
2203 	}
2204 
2205 	for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) {
2206 		for (vep = mdb_list_next(&sep->se_velist); vep;
2207 		    vep = mdb_list_next(vep)) {
2208 			if (vep->ve_id == vid)
2209 				return (vep);
2210 		}
2211 	}
2212 
2213 	return (NULL);
2214 }
2215 
2216 /*ARGSUSED*/
2217 void
2218 no_ve_dtor(mdb_vespec_t *vep)
2219 {
2220 	/* default destructor does nothing */
2221 }
2222 
2223 /*ARGSUSED*/
2224 void
2225 no_se_f(mdb_tgt_t *t, int vid, void *data)
2226 {
2227 	/* default callback does nothing */
2228 }
2229 
2230 /*ARGSUSED*/
2231 void
2232 no_se_dtor(mdb_tgt_t *t, mdb_sespec_t *sep)
2233 {
2234 	/* default destructor does nothing */
2235 }
2236 
2237 /*ARGSUSED*/
2238 int
2239 no_se_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args)
2240 {
2241 	return (sep->se_data == args);
2242 }
2243 
2244 /*ARGSUSED*/
2245 int
2246 no_se_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args)
2247 {
2248 	return (vep->ve_args == args);
2249 }
2250 
2251 /*ARGSUSED*/
2252 int
2253 no_se_arm(mdb_tgt_t *t, mdb_sespec_t *sep)
2254 {
2255 	return (0); /* return success */
2256 }
2257 
2258 /*ARGSUSED*/
2259 int
2260 no_se_disarm(mdb_tgt_t *t, mdb_sespec_t *sep)
2261 {
2262 	return (0); /* return success */
2263 }
2264 
2265 /*ARGSUSED*/
2266 int
2267 no_se_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp)
2268 {
2269 	if (tsp != &t->t_status)
2270 		bcopy(&t->t_status, tsp, sizeof (mdb_tgt_status_t));
2271 
2272 	return (0); /* return success */
2273 }
2274 
2275 int
2276 mdb_tgt_register_dcmds(mdb_tgt_t *t, const mdb_dcmd_t *dcp, int flags)
2277 {
2278 	int fail = 0;
2279 
2280 	for (; dcp->dc_name != NULL; dcp++) {
2281 		if (mdb_module_add_dcmd(t->t_module, dcp, flags) == -1) {
2282 			warn("failed to add dcmd %s", dcp->dc_name);
2283 			fail++;
2284 		}
2285 	}
2286 
2287 	return (fail > 0 ? -1 : 0);
2288 }
2289 
2290 int
2291 mdb_tgt_register_walkers(mdb_tgt_t *t, const mdb_walker_t *wp, int flags)
2292 {
2293 	int fail = 0;
2294 
2295 	for (; wp->walk_name != NULL; wp++) {
2296 		if (mdb_module_add_walker(t->t_module, wp, flags) == -1) {
2297 			warn("failed to add walk %s", wp->walk_name);
2298 			fail++;
2299 		}
2300 	}
2301 
2302 	return (fail > 0 ? -1 : 0);
2303 }
2304 
2305 void
2306 mdb_tgt_register_regvars(mdb_tgt_t *t, const mdb_tgt_regdesc_t *rdp,
2307     const mdb_nv_disc_t *disc, int flags)
2308 {
2309 	for (; rdp->rd_name != NULL; rdp++) {
2310 		if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
2311 			continue; /* Don't export register as a variable */
2312 
2313 		if (rdp->rd_flags & MDB_TGT_R_RDONLY)
2314 			flags |= MDB_NV_RDONLY;
2315 
2316 		(void) mdb_nv_insert(&mdb.m_nv, rdp->rd_name, disc,
2317 		    (uintptr_t)t, MDB_NV_PERSIST | flags);
2318 	}
2319 }
2320