xref: /illumos-gate/usr/src/cmd/sgs/rtld/common/dlfcns.c (revision 94c894bb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *	Copyright (c) 1988 AT&T
29  *	  All Rights Reserved
30  */
31 
32 /*
33  * Programmatic interface to the run_time linker.
34  */
35 
36 #include	<sys/debug.h>
37 #include	<stdio.h>
38 #include	<string.h>
39 #include	<dlfcn.h>
40 #include	<synch.h>
41 #include	<limits.h>
42 #include	<debug.h>
43 #include	<conv.h>
44 #include	"_rtld.h"
45 #include	"_audit.h"
46 #include	"_elf.h"
47 #include	"_inline.h"
48 #include	"msg.h"
49 
50 /*
51  * Determine who called us - given a pc determine in which object it resides.
52  *
53  * For dlopen() the link map of the caller must be passed to load_so() so that
54  * the appropriate search rules (4.x or 5.0) are used to locate any
55  * dependencies.  Also, if we've been called from a 4.x module it may be
56  * necessary to fix the specified pathname so that it conforms with the 5.0 elf
57  * rules.
58  *
59  * For dlsym() the link map of the caller is used to determine RTLD_NEXT
60  * requests, together with requests based off of a dlopen(0).
61  * For dladdr() this routines provides a generic means of scanning all loaded
62  * segments.
63  */
64 Rt_map *
65 _caller(caddr_t cpc, int flags)
66 {
67 	Lm_list	*lml;
68 	Aliste	idx1;
69 
70 	for (APLIST_TRAVERSE(dynlm_list, idx1, lml)) {
71 		Aliste	idx2;
72 		Lm_cntl	*lmc;
73 
74 		for (ALIST_TRAVERSE(lml->lm_lists, idx2, lmc)) {
75 			Rt_map	*lmp;
76 
77 			for (lmp = lmc->lc_head; lmp;
78 			    lmp = NEXT_RT_MAP(lmp)) {
79 
80 				if (find_segment(cpc, lmp))
81 					return (lmp);
82 			}
83 		}
84 	}
85 
86 	/*
87 	 * No mapping can be determined.  If asked for a default, assume this
88 	 * is from the executable.
89 	 */
90 	if (flags & CL_EXECDEF)
91 		return ((Rt_map *)lml_main.lm_head);
92 
93 	return (0);
94 }
95 
96 #pragma weak _dlerror = dlerror
97 
98 /*
99  * External entry for dlerror(3dl).  Returns a pointer to the string describing
100  * the last occurring error.  The last occurring error is cleared.
101  */
102 char *
103 dlerror()
104 {
105 	char	*error;
106 	Rt_map	*clmp;
107 	int	entry;
108 
109 	entry = enter(0);
110 
111 	clmp = _caller(caller(), CL_EXECDEF);
112 
113 	DBG_CALL(Dbg_dl_dlerror(clmp, lasterr));
114 
115 	error = lasterr;
116 	lasterr = NULL;
117 
118 	if (entry)
119 		leave(LIST(clmp), 0);
120 	return (error);
121 }
122 
123 /*
124  * Add a dependency as a group descriptor to a group handle.  Returns 0 on
125  * failure.  On success, returns the group descriptor, and if alep is non-NULL
126  * the *alep is set to ALE_EXISTS if the dependency already exists, or to
127  * ALE_CREATE if the dependency is newly created.
128  */
129 Grp_desc *
130 hdl_add(Grp_hdl *ghp, Rt_map *lmp, uint_t dflags, int *alep)
131 {
132 	Grp_desc	*gdp;
133 	Aliste		idx;
134 	int		ale = ALE_CREATE;
135 	uint_t		oflags;
136 
137 	/*
138 	 * Make sure this dependency hasn't already been recorded.
139 	 */
140 	for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) {
141 		if (gdp->gd_depend == lmp) {
142 			ale = ALE_EXISTS;
143 			break;
144 		}
145 	}
146 
147 	if (ale == ALE_CREATE) {
148 		Grp_desc	gd;
149 
150 		/*
151 		 * Create a new handle descriptor.
152 		 */
153 		gd.gd_depend = lmp;
154 		gd.gd_flags = 0;
155 
156 		/*
157 		 * Indicate this object is a part of this handles group.
158 		 */
159 		if (aplist_append(&GROUPS(lmp), ghp, AL_CNT_GROUPS) == NULL)
160 			return (NULL);
161 
162 		/*
163 		 * Append the new dependency to this handle.
164 		 */
165 		if ((gdp = alist_append(&ghp->gh_depends, &gd,
166 		    sizeof (Grp_desc), AL_CNT_DEPENDS)) == NULL)
167 			return (NULL);
168 	}
169 
170 	oflags = gdp->gd_flags;
171 	gdp->gd_flags |= dflags;
172 
173 	if (DBG_ENABLED) {
174 		if (ale == ALE_CREATE)
175 			DBG_CALL(Dbg_file_hdl_action(ghp, lmp, DBG_DEP_ADD,
176 			    gdp->gd_flags));
177 		else if (gdp->gd_flags != oflags)
178 			DBG_CALL(Dbg_file_hdl_action(ghp, lmp, DBG_DEP_UPDATE,
179 			    gdp->gd_flags));
180 	}
181 
182 	if (alep)
183 		*alep = ale;
184 	return (gdp);
185 }
186 
187 /*
188  * Create a handle.
189  *
190  *   rlmp -	represents the reference link-map for which the handle is being
191  *		created.
192  *   clmp -	represents the caller who is requesting the handle.
193  *   hflags -	provide group handle flags (GPH_*) that affect the use of the
194  *		handle, such as dlopen(0), or use or use of RTLD_FIRST.
195  *   rdflags -	provide group dependency flags for the reference link-map rlmp,
196  *		such as whether the dependency can be used for dlsym(), can be
197  *		relocated against, or whether this objects dependencies should
198  *		be processed.
199  *   cdflags -	provide group dependency flags for the caller.
200  */
201 Grp_hdl *
202 hdl_create(Lm_list *lml, Rt_map *rlmp, Rt_map *clmp, uint_t hflags,
203     uint_t rdflags, uint_t cdflags)
204 {
205 	Grp_hdl	*ghp = NULL, *aghp;
206 	APlist	**alpp;
207 	Aliste	idx;
208 
209 	/*
210 	 * For dlopen(0) the handle is maintained as part of the link-map list,
211 	 * otherwise the handle is associated with the reference link-map.
212 	 */
213 	if (hflags & GPH_ZERO)
214 		alpp = &(lml->lm_handle);
215 	else
216 		alpp = &(HANDLES(rlmp));
217 
218 	/*
219 	 * Objects can contain multiple handles depending on the handle flags
220 	 * supplied.  Most RTLD flags pertain to the object itself and the
221 	 * bindings that it can achieve.  Multiple handles for these flags
222 	 * don't make sense.  But if the flag determines how the handle might
223 	 * be used, then multiple handles may exist.  Presently this only makes
224 	 * sense for RTLD_FIRST.  Determine if an appropriate handle already
225 	 * exists.
226 	 */
227 	for (APLIST_TRAVERSE(*alpp, idx, aghp)) {
228 		if ((aghp->gh_flags & GPH_FIRST) == (hflags & GPH_FIRST)) {
229 			ghp = aghp;
230 			break;
231 		}
232 	}
233 
234 	if (ghp == NULL) {
235 		uint_t	ndx;
236 
237 		/*
238 		 * If this is the first request for this handle, allocate and
239 		 * initialize a new handle.
240 		 */
241 		DBG_CALL(Dbg_file_hdl_title(DBG_HDL_CREATE));
242 
243 		if ((ghp = malloc(sizeof (Grp_hdl))) == NULL)
244 			return (NULL);
245 
246 		/*
247 		 * Associate the handle with the link-map list or the reference
248 		 * link-map as appropriate.
249 		 */
250 		if (aplist_append(alpp, ghp, AL_CNT_GROUPS) == NULL) {
251 			free(ghp);
252 			return (NULL);
253 		}
254 
255 		/*
256 		 * Record the existence of this handle for future verification.
257 		 */
258 		/* LINTED */
259 		ndx = (uintptr_t)ghp % HDLIST_SZ;
260 
261 		if (aplist_append(&hdl_alp[ndx], ghp, AL_CNT_HANDLES) == NULL) {
262 			(void) aplist_delete_value(*alpp, ghp);
263 			free(ghp);
264 			return (NULL);
265 		}
266 
267 		ghp->gh_depends = NULL;
268 		ghp->gh_refcnt = 1;
269 		ghp->gh_flags = hflags;
270 
271 		/*
272 		 * A dlopen(0) handle is identified by the GPH_ZERO flag, the
273 		 * head of the link-map list is defined as the owner.  There is
274 		 * no need to maintain a list of dependencies, for when this
275 		 * handle is used (for dlsym()) a dynamic search through the
276 		 * entire link-map list provides for searching all objects with
277 		 * GLOBAL visibility.
278 		 */
279 		if (hflags & GPH_ZERO) {
280 			ghp->gh_ownlmp = lml->lm_head;
281 			ghp->gh_ownlml = lml;
282 		} else {
283 			ghp->gh_ownlmp = rlmp;
284 			ghp->gh_ownlml = LIST(rlmp);
285 
286 			if (hdl_add(ghp, rlmp, rdflags, NULL) == NULL)
287 				return (NULL);
288 
289 			/*
290 			 * If this new handle is a private handle, there's no
291 			 * need to track the caller, so we're done.
292 			 */
293 			if (hflags & GPH_PRIVATE)
294 				return (ghp);
295 
296 			/*
297 			 * If this new handle is public, and isn't a special
298 			 * handle representing ld.so.1, indicate that a local
299 			 * group now exists.  This state allows singleton
300 			 * searches to be optimized.
301 			 */
302 			if ((hflags & GPH_LDSO) == 0)
303 				LIST(rlmp)->lm_flags |= LML_FLG_GROUPSEXIST;
304 		}
305 	} else {
306 		/*
307 		 * If a handle already exists, bump its reference count.
308 		 *
309 		 * If the previous reference count was 0, then this is a handle
310 		 * that an earlier call to dlclose() was unable to remove.  Such
311 		 * handles are put on the orphan list.  As this handle is back
312 		 * in use, it must be removed from the orphan list.
313 		 *
314 		 * Note, handles associated with a link-map list itself (i.e.
315 		 * dlopen(0)) can have a reference count of 0.  However, these
316 		 * handles are never deleted, and therefore are never moved to
317 		 * the orphan list.
318 		 */
319 		if ((ghp->gh_refcnt++ == 0) &&
320 		    ((ghp->gh_flags & GPH_ZERO) == 0)) {
321 			uint_t	ndx;
322 
323 			/* LINTED */
324 			ndx = (uintptr_t)ghp % HDLIST_SZ;
325 
326 			(void) aplist_delete_value(hdl_alp[HDLIST_ORP], ghp);
327 			(void) aplist_append(&hdl_alp[ndx], ghp,
328 			    AL_CNT_HANDLES);
329 
330 			if (DBG_ENABLED) {
331 				Aliste		idx;
332 				Grp_desc	*gdp;
333 
334 				DBG_CALL(Dbg_file_hdl_title(DBG_HDL_REINST));
335 				for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp))
336 					DBG_CALL(Dbg_file_hdl_action(ghp,
337 					    gdp->gd_depend, DBG_DEP_REINST, 0));
338 			}
339 		}
340 
341 		/*
342 		 * If we've been asked to create a private handle, there's no
343 		 * need to track the caller.
344 		 */
345 		if (hflags & GPH_PRIVATE) {
346 			/*
347 			 * Negate the reference count increment.
348 			 */
349 			ghp->gh_refcnt--;
350 			return (ghp);
351 		} else {
352 			/*
353 			 * If a private handle already exists, promote this
354 			 * handle to public by initializing both the reference
355 			 * count and the handle flags.
356 			 */
357 			if (ghp->gh_flags & GPH_PRIVATE) {
358 				ghp->gh_refcnt = 1;
359 				ghp->gh_flags &= ~GPH_PRIVATE;
360 				ghp->gh_flags |= hflags;
361 			}
362 		}
363 	}
364 
365 	/*
366 	 * Keep track of the parent (caller).  As this object can be referenced
367 	 * by different parents, this processing is carried out every time a
368 	 * handle is requested.
369 	 */
370 	if (clmp && (hdl_add(ghp, clmp, cdflags, NULL) == NULL))
371 		return (NULL);
372 
373 	return (ghp);
374 }
375 
376 /*
377  * Initialize a handle that has been created for an object that is already
378  * loaded.  The handle is initialized with the present dependencies of that
379  * object.  Once this initialization has occurred, any new objects that might
380  * be loaded as dependencies (lazy-loading) are added to the handle as each new
381  * object is loaded.
382  */
383 int
384 hdl_initialize(Grp_hdl *ghp, Rt_map *nlmp, int mode, int promote)
385 {
386 	Aliste		idx;
387 	Grp_desc	*gdp;
388 
389 	/*
390 	 * If the handle has already been initialized, and the initial object's
391 	 * mode hasn't been promoted, there's no need to recompute the modes of
392 	 * any dependencies.  If the object we've added has just been opened,
393 	 * the objects dependencies will not yet have been processed.  These
394 	 * dependencies will be added on later calls to load_one().  Otherwise,
395 	 * this object already exists, so add all of its dependencies to the
396 	 * handle were operating on.
397 	 */
398 	if (((ghp->gh_flags & GPH_INITIAL) && (promote == 0)) ||
399 	    ((FLAGS(nlmp) & FLG_RT_ANALYZED) == 0)) {
400 		ghp->gh_flags |= GPH_INITIAL;
401 		return (1);
402 	}
403 
404 	DBG_CALL(Dbg_file_hdl_title(DBG_HDL_ADD));
405 	for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) {
406 		Rt_map		*lmp = gdp->gd_depend;
407 		Aliste		idx1;
408 		Bnd_desc	*bdp;
409 
410 		/*
411 		 * If this dependency doesn't indicate that its dependencies
412 		 * should be added to a handle, ignore it.  This case identifies
413 		 * a parent of a dlopen(RTLD_PARENT) request.
414 		 */
415 		if ((gdp->gd_flags & GPD_ADDEPS) == 0)
416 			continue;
417 
418 		for (APLIST_TRAVERSE(DEPENDS(lmp), idx1, bdp)) {
419 			Rt_map	*dlmp = bdp->b_depend;
420 
421 			if ((bdp->b_flags & BND_NEEDED) == 0)
422 				continue;
423 
424 			if (hdl_add(ghp, dlmp,
425 			    (GPD_DLSYM | GPD_RELOC | GPD_ADDEPS), NULL) == NULL)
426 				return (0);
427 
428 			(void) update_mode(dlmp, MODE(dlmp), mode);
429 		}
430 	}
431 	ghp->gh_flags |= GPH_INITIAL;
432 	return (1);
433 }
434 
435 /*
436  * Sanity check a program-provided handle.
437  */
438 static int
439 hdl_validate(Grp_hdl *ghp)
440 {
441 	Aliste		idx;
442 	Grp_hdl		*lghp;
443 	uint_t		ndx;
444 
445 	/* LINTED */
446 	ndx = (uintptr_t)ghp % HDLIST_SZ;
447 
448 	for (APLIST_TRAVERSE(hdl_alp[ndx], idx, lghp)) {
449 		if ((lghp == ghp) && (ghp->gh_refcnt != 0))
450 			return (1);
451 	}
452 	return (0);
453 }
454 
455 /*
456  * Core dlclose activity.
457  */
458 int
459 dlclose_core(Grp_hdl *ghp, Rt_map *clmp, Lm_list *lml)
460 {
461 	int	error;
462 
463 	/*
464 	 * If we're already at atexit() there's no point processing further,
465 	 * all objects have already been tsorted for fini processing.
466 	 */
467 	if (rtld_flags & RT_FL_ATEXIT)
468 		return (0);
469 
470 	/*
471 	 * Diagnose what we're up to.
472 	 */
473 	if (ghp->gh_flags & GPH_ZERO) {
474 		DBG_CALL(Dbg_dl_dlclose(clmp, MSG_ORIG(MSG_STR_ZERO),
475 		    DBG_DLCLOSE_IGNORE));
476 	} else {
477 		DBG_CALL(Dbg_dl_dlclose(clmp, NAME(ghp->gh_ownlmp),
478 		    DBG_DLCLOSE_NULL));
479 	}
480 
481 	/*
482 	 * Decrement reference count of this object.
483 	 */
484 	if (--(ghp->gh_refcnt))
485 		return (0);
486 
487 	/*
488 	 * If this handle is special (dlopen(0)), then leave it around - it
489 	 * has little overhead.
490 	 */
491 	if (ghp->gh_flags & GPH_ZERO)
492 		return (0);
493 
494 	/*
495 	 * This handle is no longer being referenced, remove it.  If this handle
496 	 * is part of an alternative link-map list, determine if the whole list
497 	 * can be removed also.
498 	 */
499 	error = remove_hdl(ghp, clmp, NULL);
500 
501 	if ((lml->lm_flags & (LML_FLG_BASELM | LML_FLG_RTLDLM)) == 0)
502 		remove_lml(lml);
503 
504 	return (error);
505 }
506 
507 /*
508  * Internal dlclose activity.  Called from user level or directly for internal
509  * error cleanup.
510  */
511 int
512 dlclose_intn(Grp_hdl *ghp, Rt_map *clmp)
513 {
514 	Rt_map	*nlmp = NULL;
515 	Lm_list	*olml = NULL;
516 	int	error;
517 
518 	/*
519 	 * Although we're deleting object(s) it's quite possible that additional
520 	 * objects get loaded from running the .fini section(s) of the objects
521 	 * being deleted.  These objects will have been added to the same
522 	 * link-map list as those objects being deleted.  Remember this list
523 	 * for later investigation.
524 	 */
525 	olml = ghp->gh_ownlml;
526 
527 	error = dlclose_core(ghp, clmp, olml);
528 
529 	/*
530 	 * Determine whether the original link-map list still exists.  In the
531 	 * case of a dlclose of an alternative (dlmopen) link-map the whole
532 	 * list may have been removed.
533 	 */
534 	if (olml) {
535 		Aliste	idx;
536 		Lm_list	*lml;
537 
538 		for (APLIST_TRAVERSE(dynlm_list, idx, lml)) {
539 			if (olml == lml) {
540 				nlmp = olml->lm_head;
541 				break;
542 			}
543 		}
544 	}
545 	load_completion(nlmp);
546 	return (error);
547 }
548 
549 /*
550  * Argument checking for dlclose.  Only called via external entry.
551  */
552 static int
553 dlclose_check(void *handle, Rt_map *clmp)
554 {
555 	Grp_hdl	*ghp = (Grp_hdl *)handle;
556 
557 	if (hdl_validate(ghp) == 0) {
558 		Conv_inv_buf_t	inv_buf;
559 
560 		(void) conv_invalid_val(&inv_buf, EC_NATPTR(ghp), 0);
561 		DBG_CALL(Dbg_dl_dlclose(clmp, inv_buf.buf, DBG_DLCLOSE_NULL));
562 
563 		eprintf(LIST(clmp), ERR_FATAL, MSG_INTL(MSG_ARG_INVHNDL),
564 		    EC_NATPTR(handle));
565 		return (1);
566 	}
567 	return (dlclose_intn(ghp, clmp));
568 }
569 
570 #pragma weak _dlclose = dlclose
571 
572 /*
573  * External entry for dlclose(3dl).  Returns 0 for success, non-zero otherwise.
574  */
575 int
576 dlclose(void *handle)
577 {
578 	int		error, entry;
579 	Rt_map		*clmp;
580 
581 	entry = enter(0);
582 
583 	clmp = _caller(caller(), CL_EXECDEF);
584 
585 	error = dlclose_check(handle, clmp);
586 
587 	if (entry)
588 		leave(LIST(clmp), 0);
589 	return (error);
590 }
591 
592 static uint_t	lmid = 0;
593 
594 /*
595  * The addition of new link-map lists is assumed to be in small quantities.
596  * Here, we assign a unique link-map id for diagnostic use.  Simply update the
597  * running link-map count until we max out.
598  */
599 int
600 newlmid(Lm_list *lml)
601 {
602 	char	buffer[MSG_LMID_ALT_SIZE + 12];
603 
604 	if (lmid == UINT_MAX) {
605 		lml->lm_lmid = UINT_MAX;
606 		(void) strncpy(buffer, MSG_ORIG(MSG_LMID_MAXED),
607 		    MSG_LMID_ALT_SIZE + 12);
608 	} else {
609 		lml->lm_lmid = lmid++;
610 		(void) snprintf(buffer, MSG_LMID_ALT_SIZE + 12,
611 		    MSG_ORIG(MSG_LMID_FMT), MSG_ORIG(MSG_LMID_ALT),
612 		    lml->lm_lmid);
613 	}
614 	if ((lml->lm_lmidstr = strdup(buffer)) == NULL)
615 		return (0);
616 
617 	return (1);
618 }
619 
620 /*
621  * Core dlopen activity.
622  */
623 static Grp_hdl *
624 dlmopen_core(Lm_list *lml, Lm_list *olml, const char *path, int mode,
625     Rt_map *clmp, uint_t flags, uint_t orig, int *in_nfavl)
626 {
627 	Alist		*palp = NULL;
628 	Rt_map		*nlmp;
629 	Grp_hdl		*ghp;
630 	Aliste		olmco, nlmco;
631 
632 	DBG_CALL(Dbg_dl_dlopen(clmp,
633 	    (path ? path : MSG_ORIG(MSG_STR_ZERO)), in_nfavl, mode));
634 
635 	/*
636 	 * Having diagnosed the originally defined modes, assign any defaults
637 	 * or corrections.
638 	 */
639 	if (((mode & (RTLD_GROUP | RTLD_WORLD)) == 0) &&
640 	    ((mode & RTLD_NOLOAD) == 0))
641 		mode |= (RTLD_GROUP | RTLD_WORLD);
642 	if ((mode & RTLD_NOW) && (rtld_flags2 & RT_FL2_BINDLAZY)) {
643 		mode &= ~RTLD_NOW;
644 		mode |= RTLD_LAZY;
645 	}
646 
647 	/*
648 	 * If the path specified is null then we're operating on global
649 	 * objects.  Associate a dummy handle with the link-map list.
650 	 */
651 	if (path == NULL) {
652 		Grp_hdl *ghp;
653 		uint_t	hflags, rdflags, cdflags;
654 		int	promote = 0;
655 
656 		/*
657 		 * Establish any flags for the handle (Grp_hdl).
658 		 *
659 		 *  -	This is a dummy, public, handle (0) that provides for a
660 		 *	dynamic	search of all global objects within the process.
661 		 *  -   Use of the RTLD_FIRST mode indicates that only the first
662 		 *	dependency on the handle (the referenced object) can be
663 		 *	used to satisfy dlsym() requests.
664 		 */
665 		hflags = (GPH_PUBLIC | GPH_ZERO);
666 		if (mode & RTLD_FIRST)
667 			hflags |= GPH_FIRST;
668 
669 		/*
670 		 * Establish the flags for the referenced dependency descriptor
671 		 * (Grp_desc).
672 		 *
673 		 *  -	The referenced object is available for dlsym().
674 		 *  -	The referenced object is available to relocate against.
675 		 *  -	The referenced object should have it's dependencies
676 		 *	added to this handle.
677 		 */
678 		rdflags = (GPD_DLSYM | GPD_RELOC | GPD_ADDEPS);
679 
680 		/*
681 		 * Establish the flags for this callers dependency descriptor
682 		 * (Grp_desc).
683 		 *
684 		 *  -	The explicit creation of a handle creates a descriptor
685 		 *	for the referenced object and the parent (caller).
686 		 *  -	Use of the RTLD_PARENT flag indicates that the parent
687 		 *	can be relocated against.
688 		 */
689 		cdflags = GPD_PARENT;
690 		if (mode & RTLD_PARENT)
691 			cdflags |= GPD_RELOC;
692 
693 		if ((ghp = hdl_create(lml, 0, clmp, hflags, rdflags,
694 		    cdflags)) == NULL)
695 			return (NULL);
696 
697 		/*
698 		 * Traverse the main link-map control list, updating the mode
699 		 * of any objects as necessary.  Call the relocation engine if
700 		 * this mode promotes the existing state of any relocations.
701 		 * crle()'s first pass loads all objects necessary for building
702 		 * a configuration file, however none of them are relocated.
703 		 * crle()'s second pass relocates objects in preparation for
704 		 * dldump()'ing using dlopen(0, RTLD_NOW).
705 		 */
706 		if ((mode & (RTLD_NOW | RTLD_CONFGEN)) == RTLD_CONFGEN)
707 			return (ghp);
708 
709 		for (nlmp = lml->lm_head; nlmp; nlmp = NEXT_RT_MAP(nlmp)) {
710 			if (((MODE(nlmp) & RTLD_GLOBAL) == 0) ||
711 			    (FLAGS(nlmp) & FLG_RT_DELETE))
712 				continue;
713 
714 			if (update_mode(nlmp, MODE(nlmp), mode))
715 				promote = 1;
716 		}
717 		if (promote)
718 			(void) relocate_lmc(lml, ALIST_OFF_DATA, clmp,
719 			    lml->lm_head, in_nfavl);
720 
721 		return (ghp);
722 	}
723 
724 	/*
725 	 * Fix the pathname.  If this object expands to multiple paths (ie.
726 	 * $ISALIST or $HWCAP have been used), then make sure the user has also
727 	 * furnished the RTLD_FIRST flag.  As yet, we don't support opening
728 	 * more than one object at a time, so enforcing the RTLD_FIRST flag
729 	 * provides flexibility should we be able to support dlopening more
730 	 * than one object in the future.
731 	 */
732 	if (LM_FIX_NAME(clmp)(path, clmp, &palp, AL_CNT_NEEDED, orig) == NULL)
733 		return (NULL);
734 
735 	if ((palp->al_arritems > 1) && ((mode & RTLD_FIRST) == 0)) {
736 		remove_plist(&palp, 1);
737 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLMODE_5));
738 		return (NULL);
739 	}
740 
741 	/*
742 	 * Establish a link-map control list for this request, and load the
743 	 * associated object.
744 	 */
745 	if ((nlmco = create_cntl(lml, 1)) == NULL) {
746 		remove_plist(&palp, 1);
747 		return (NULL);
748 	}
749 	olmco = nlmco;
750 
751 	nlmp = load_one(lml, nlmco, palp, clmp, mode, (flags | FLG_RT_PUBHDL),
752 	    &ghp, in_nfavl);
753 
754 	/*
755 	 * Remove any expanded pathname infrastructure, and if the dependency
756 	 * couldn't be loaded, cleanup.
757 	 */
758 	remove_plist(&palp, 1);
759 	if (nlmp == NULL) {
760 		remove_cntl(lml, olmco);
761 		return (NULL);
762 	}
763 
764 	/*
765 	 * If loading an auditor was requested, and the auditor already existed,
766 	 * then the link-map returned will be to the original auditor.  The new
767 	 * link-map list that was initially created, and the associated link-map
768 	 * control list are no longer needed.  As the auditor is already loaded,
769 	 * we're probably done, but fall through in case additional relocations
770 	 * would be triggered by the mode of the caller.
771 	 */
772 	if ((flags & FLG_RT_AUDIT) && (LIST(nlmp) != lml)) {
773 		remove_cntl(lml, olmco);
774 		lml = LIST(nlmp);
775 		olmco = 0;
776 		nlmco = ALIST_OFF_DATA;
777 	}
778 
779 	/*
780 	 * Finish processing the objects associated with this request.
781 	 */
782 	if (((nlmp = analyze_lmc(lml, nlmco, nlmp, in_nfavl)) == NULL) ||
783 	    (relocate_lmc(lml, nlmco, clmp, nlmp, in_nfavl) == 0)) {
784 		ghp = NULL;
785 		nlmp = NULL;
786 	}
787 
788 	/*
789 	 * If the dlopen has failed, clean up any objects that might have been
790 	 * loaded successfully on this new link-map control list.
791 	 */
792 	if (olmco && (nlmp == NULL))
793 		remove_lmc(lml, clmp, olmco, path);
794 
795 	/*
796 	 * Finally, remove any temporary link-map control list.  Note, if this
797 	 * operation successfully established a new link-map list, then a base
798 	 * link-map control list will have been created, which must remain.
799 	 */
800 	if (olmco && ((nlmp == NULL) || (olml != (Lm_list *)LM_ID_NEWLM)))
801 		remove_cntl(lml, olmco);
802 
803 	return (ghp);
804 }
805 
806 /*
807  * dlopen() and dlsym() operations are the means by which a process can
808  * test for the existence of required dependencies.  If the necessary
809  * dependencies don't exist, then associated functionality can't be used.
810  * However, the lack of dependencies can be fixed, and the dlopen() and
811  * dlsym() requests can be repeated.  As we use a "not-found" AVL tree to
812  * cache any failed full path loads, secondary dlopen() and dlsym() requests
813  * will fail, even if the dependencies have been installed.
814  *
815  * dlopen() and dlsym() retry any failures by removing the "not-found" AVL
816  * tree.  Should any dependencies be found, their names are added to the
817  * FullPath AVL tree.  This routine removes any new "not-found" AVL tree,
818  * so that the dlopen() or dlsym() can replace the original "not-found" tree.
819  */
820 inline static void
821 nfavl_remove(avl_tree_t *avlt)
822 {
823 	PathNode	*pnp;
824 	void		*cookie = NULL;
825 
826 	if (avlt) {
827 		while ((pnp = avl_destroy_nodes(avlt, &cookie)) != NULL)
828 			free(pnp);
829 
830 		avl_destroy(avlt);
831 		free(avlt);
832 	}
833 }
834 
835 /*
836  * Internal dlopen() activity.  Called from user level or directly for internal
837  * opens that require a handle.
838  */
839 Grp_hdl *
840 dlmopen_intn(Lm_list *lml, const char *path, int mode, Rt_map *clmp,
841     uint_t flags, uint_t orig)
842 {
843 	Lm_list	*olml = lml;
844 	Rt_map	*dlmp = NULL;
845 	Grp_hdl	*ghp;
846 	int	in_nfavl = 0;
847 
848 	/*
849 	 * Check for magic link-map list values:
850 	 *
851 	 *  LM_ID_BASE:		Operate on the PRIMARY (executables) link map
852 	 *  LM_ID_LDSO:		Operation on ld.so.1's link map
853 	 *  LM_ID_NEWLM: 	Create a new link-map.
854 	 */
855 	if (lml == (Lm_list *)LM_ID_NEWLM) {
856 		if ((lml = calloc(sizeof (Lm_list), 1)) == NULL)
857 			return (NULL);
858 
859 		/*
860 		 * Establish the new link-map flags from the callers and those
861 		 * explicitly provided.
862 		 */
863 		lml->lm_tflags = LIST(clmp)->lm_tflags;
864 		if (flags & FLG_RT_AUDIT) {
865 			/*
866 			 * Unset any auditing flags - an auditor shouldn't be
867 			 * audited.  Insure all audit dependencies are loaded.
868 			 */
869 			lml->lm_tflags &= ~LML_TFLG_AUD_MASK;
870 			lml->lm_tflags |=
871 			    (LML_TFLG_NOLAZYLD | LML_TFLG_LOADFLTR);
872 			lml->lm_flags |= LML_FLG_NOAUDIT;
873 		}
874 
875 		if (aplist_append(&dynlm_list, lml, AL_CNT_DYNLIST) == NULL) {
876 			free(lml);
877 			return (NULL);
878 		}
879 		if (newlmid(lml) == 0) {
880 			(void) aplist_delete_value(dynlm_list, lml);
881 			free(lml);
882 			return (NULL);
883 		}
884 	} else if ((uintptr_t)lml < LM_ID_NUM) {
885 		if ((uintptr_t)lml == LM_ID_BASE)
886 			lml = &lml_main;
887 		else if ((uintptr_t)lml == LM_ID_LDSO)
888 			lml = &lml_rtld;
889 	}
890 
891 	/*
892 	 * Open the required object on the associated link-map list.
893 	 */
894 	ghp = dlmopen_core(lml, olml, path, mode, clmp, flags, orig, &in_nfavl);
895 
896 	/*
897 	 * If the object could not be found it is possible that the "not-found"
898 	 * AVL tree had indicated that the file does not exist.  In case the
899 	 * file system has changed since this "not-found" recording was made,
900 	 * retry the dlopen() with a clean "not-found" AVL tree.
901 	 */
902 	if ((ghp == NULL) && in_nfavl) {
903 		avl_tree_t	*oavlt = nfavl;
904 
905 		nfavl = NULL;
906 		ghp = dlmopen_core(lml, olml, path, mode, clmp, flags, orig,
907 		    NULL);
908 
909 		/*
910 		 * If the file is found, then its full path name will have been
911 		 * registered in the FullPath AVL tree.  Remove any new
912 		 * "not-found" AVL information, and restore the former AVL tree.
913 		 */
914 		nfavl_remove(nfavl);
915 		nfavl = oavlt;
916 	}
917 
918 	/*
919 	 * Establish the new link-map from which .init processing will begin.
920 	 * Ignore .init firing when constructing a configuration file (crle(1)).
921 	 */
922 	if (ghp && ((mode & RTLD_CONFGEN) == 0))
923 		dlmp = ghp->gh_ownlmp;
924 
925 	/*
926 	 * If loading an auditor was requested, and the auditor already existed,
927 	 * then the link-map returned will be to the original auditor.  Remove
928 	 * the link-map control list that was created for this request.
929 	 */
930 	if (dlmp && (flags & FLG_RT_AUDIT) && (LIST(dlmp) != lml)) {
931 		remove_lml(lml);
932 		lml = LIST(dlmp);
933 	}
934 
935 	/*
936 	 * If this load failed, remove any alternative link-map list.
937 	 */
938 	if ((ghp == NULL) &&
939 	    ((lml->lm_flags & (LML_FLG_BASELM | LML_FLG_RTLDLM)) == 0)) {
940 		remove_lml(lml);
941 		lml = NULL;
942 	}
943 
944 	/*
945 	 * Finish this load request.  If objects were loaded, .init processing
946 	 * is computed.  Finally, the debuggers are informed of the link-map
947 	 * lists being stable.
948 	 */
949 	load_completion(dlmp);
950 
951 	return (ghp);
952 }
953 
954 /*
955  * Argument checking for dlopen.  Only called via external entry.
956  */
957 static Grp_hdl *
958 dlmopen_check(Lm_list *lml, const char *path, int mode, Rt_map *clmp)
959 {
960 	/*
961 	 * Verify that a valid pathname has been supplied.
962 	 */
963 	if (path && (*path == '\0')) {
964 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLPATH));
965 		return (0);
966 	}
967 
968 	/*
969 	 * Historically we've always verified the mode is either RTLD_NOW or
970 	 * RTLD_LAZY.  RTLD_NOLOAD is valid by itself.  Use of LM_ID_NEWLM
971 	 * requires a specific pathname, and use of RTLD_PARENT is meaningless.
972 	 */
973 	if ((mode & (RTLD_NOW | RTLD_LAZY | RTLD_NOLOAD)) == 0) {
974 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLMODE_1));
975 		return (0);
976 	}
977 	if ((mode & (RTLD_NOW | RTLD_LAZY)) == (RTLD_NOW | RTLD_LAZY)) {
978 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLMODE_2));
979 		return (0);
980 	}
981 	if ((lml == (Lm_list *)LM_ID_NEWLM) && (path == NULL)) {
982 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLMODE_3));
983 		return (0);
984 	}
985 	if ((lml == (Lm_list *)LM_ID_NEWLM) && (mode & RTLD_PARENT)) {
986 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLMODE_4));
987 		return (0);
988 	}
989 
990 	return (dlmopen_intn(lml, path, mode, clmp, 0, 0));
991 }
992 
993 #pragma weak _dlopen = dlopen
994 
995 /*
996  * External entry for dlopen(3dl).  On success, returns a pointer (handle) to
997  * the structure containing information about the newly added object, ie. can
998  * be used by dlsym(). On failure, returns a null pointer.
999  */
1000 void *
1001 dlopen(const char *path, int mode)
1002 {
1003 	int	entry;
1004 	Rt_map	*clmp;
1005 	Grp_hdl	*ghp;
1006 	Lm_list	*lml;
1007 
1008 	entry = enter(0);
1009 
1010 	clmp = _caller(caller(), CL_EXECDEF);
1011 	lml = LIST(clmp);
1012 
1013 	ghp = dlmopen_check(lml, path, mode, clmp);
1014 
1015 	if (entry)
1016 		leave(lml, 0);
1017 	return ((void *)ghp);
1018 }
1019 
1020 #pragma weak _dlmopen = dlmopen
1021 
1022 /*
1023  * External entry for dlmopen(3dl).
1024  */
1025 void *
1026 dlmopen(Lmid_t lmid, const char *path, int mode)
1027 {
1028 	int	entry;
1029 	Rt_map	*clmp;
1030 	Grp_hdl	*ghp;
1031 
1032 	entry = enter(0);
1033 
1034 	clmp = _caller(caller(), CL_EXECDEF);
1035 
1036 	ghp = dlmopen_check((Lm_list *)lmid, path, mode, clmp);
1037 
1038 	if (entry)
1039 		leave(LIST(clmp), 0);
1040 	return ((void *)ghp);
1041 }
1042 
1043 /*
1044  * Handle processing for dlsym.
1045  */
1046 int
1047 dlsym_handle(Grp_hdl *ghp, Slookup *slp, Sresult *srp, uint_t *binfo,
1048     int *in_nfavl)
1049 {
1050 	Rt_map		*nlmp, * lmp = ghp->gh_ownlmp;
1051 	Rt_map		*clmp = slp->sl_cmap;
1052 	const char	*name = slp->sl_name;
1053 	Slookup		sl = *slp;
1054 
1055 	sl.sl_flags = (LKUP_FIRST | LKUP_DLSYM | LKUP_SPEC);
1056 
1057 	/*
1058 	 * Continue processing a dlsym request.  Lookup the required symbol in
1059 	 * each link-map specified by the handle.
1060 	 *
1061 	 * To leverage off of lazy loading, dlsym() requests can result in two
1062 	 * passes.  The first descends the link-maps of any objects already in
1063 	 * the address space.  If the symbol isn't located, and lazy
1064 	 * dependencies still exist, then a second pass is made to load these
1065 	 * dependencies if applicable.  This model means that in the case where
1066 	 * a symbol exists in more than one object, the one located may not be
1067 	 * constant - this is the standard issue with lazy loading. In addition,
1068 	 * attempting to locate a symbol that doesn't exist will result in the
1069 	 * loading of all lazy dependencies on the given handle, which can
1070 	 * defeat some of the advantages of lazy loading (look out JVM).
1071 	 */
1072 	if (ghp->gh_flags & GPH_ZERO) {
1073 		Lm_list	*lml;
1074 		uint_t	lazy = 0;
1075 
1076 		/*
1077 		 * If this symbol lookup is triggered from a dlopen(0) handle,
1078 		 * traverse the present link-map list looking for promiscuous
1079 		 * entries.
1080 		 */
1081 		for (nlmp = lmp; nlmp; nlmp = NEXT_RT_MAP(nlmp)) {
1082 			/*
1083 			 * If this handle indicates we're only to look in the
1084 			 * first object check whether we're done.
1085 			 */
1086 			if ((nlmp != lmp) && (ghp->gh_flags & GPH_FIRST))
1087 				return (0);
1088 
1089 			if (!(MODE(nlmp) & RTLD_GLOBAL))
1090 				continue;
1091 			if ((FLAGS(nlmp) & FLG_RT_DELETE) &&
1092 			    ((FLAGS(clmp) & FLG_RT_DELETE) == 0))
1093 				continue;
1094 
1095 			sl.sl_imap = nlmp;
1096 			if (LM_LOOKUP_SYM(clmp)(&sl, srp, binfo, in_nfavl))
1097 				return (1);
1098 
1099 			/*
1100 			 * Keep track of any global pending lazy loads.
1101 			 */
1102 			lazy += LAZY(nlmp);
1103 		}
1104 
1105 		/*
1106 		 * If we're unable to locate the symbol and this link-map list
1107 		 * still has pending lazy dependencies, start loading them in an
1108 		 * attempt to exhaust the search.  Note that as we're already
1109 		 * traversing a dynamic linked list of link-maps there's no
1110 		 * need for elf_lazy_find_sym() to descend the link-maps itself.
1111 		 */
1112 		lml = LIST(lmp);
1113 		if (lazy) {
1114 			DBG_CALL(Dbg_syms_lazy_rescan(lml, name));
1115 
1116 			sl.sl_flags |= LKUP_NODESCENT;
1117 
1118 			for (nlmp = lmp; nlmp; nlmp = NEXT_RT_MAP(nlmp)) {
1119 
1120 				if (!(MODE(nlmp) & RTLD_GLOBAL) || !LAZY(nlmp))
1121 					continue;
1122 				if ((FLAGS(nlmp) & FLG_RT_DELETE) &&
1123 				    ((FLAGS(clmp) & FLG_RT_DELETE) == 0))
1124 					continue;
1125 
1126 				sl.sl_imap = nlmp;
1127 				if (elf_lazy_find_sym(&sl, srp, binfo,
1128 				    in_nfavl))
1129 					return (1);
1130 			}
1131 		}
1132 	} else {
1133 		/*
1134 		 * Traverse the dlopen() handle searching all presently loaded
1135 		 * link-maps.
1136 		 */
1137 		Grp_desc	*gdp;
1138 		Aliste		idx;
1139 		uint_t		lazy = 0;
1140 
1141 		for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) {
1142 			nlmp = gdp->gd_depend;
1143 
1144 			if ((gdp->gd_flags & GPD_DLSYM) == 0)
1145 				continue;
1146 
1147 			sl.sl_imap = nlmp;
1148 			if (LM_LOOKUP_SYM(clmp)(&sl, srp, binfo, in_nfavl))
1149 				return (1);
1150 
1151 			if (ghp->gh_flags & GPH_FIRST)
1152 				return (0);
1153 
1154 			/*
1155 			 * Keep track of any pending lazy loads associated
1156 			 * with this handle.
1157 			 */
1158 			lazy += LAZY(nlmp);
1159 		}
1160 
1161 		/*
1162 		 * If we're unable to locate the symbol and this handle still
1163 		 * has pending lazy dependencies, start loading the lazy
1164 		 * dependencies, in an attempt to exhaust the search.
1165 		 */
1166 		if (lazy) {
1167 			DBG_CALL(Dbg_syms_lazy_rescan(LIST(lmp), name));
1168 
1169 			for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) {
1170 				nlmp = gdp->gd_depend;
1171 
1172 				if (((gdp->gd_flags & GPD_DLSYM) == 0) ||
1173 				    (LAZY(nlmp) == 0))
1174 					continue;
1175 
1176 				sl.sl_imap = nlmp;
1177 				if (elf_lazy_find_sym(&sl, srp, binfo,
1178 				    in_nfavl))
1179 					return (1);
1180 			}
1181 		}
1182 	}
1183 	return (0);
1184 }
1185 
1186 /*
1187  * Core dlsym activity.  Selects symbol lookup method from handle.
1188  */
1189 static void *
1190 dlsym_core(void *handle, const char *name, Rt_map *clmp, Rt_map **dlmp,
1191     int *in_nfavl)
1192 {
1193 	Sym		*sym = NULL;
1194 	int		ret = 0;
1195 	Syminfo		*sip;
1196 	Slookup		sl;
1197 	Sresult		sr;
1198 	uint_t		binfo;
1199 
1200 	/*
1201 	 * Initialize the symbol lookup data structure.
1202 	 *
1203 	 * Standard relocations are evaluated using the symbol index of the
1204 	 * associated relocation symbol.  This index provides for loading
1205 	 * any lazy dependency and establishing a direct binding if necessary.
1206 	 * If a dlsym() operation originates from an object that contains a
1207 	 * symbol table entry for the same name, then we need to establish the
1208 	 * symbol index so that any dependency requirements can be triggered.
1209 	 *
1210 	 * Therefore, the first symbol lookup that is carried out is for the
1211 	 * symbol name within the calling object.  If this symbol exists, the
1212 	 * symbols index is computed, added to the Slookup data, and thus used
1213 	 * to seed the real symbol lookup.
1214 	 */
1215 	SLOOKUP_INIT(sl, name, clmp, clmp, ld_entry_cnt, elf_hash(name),
1216 	    0, 0, 0, LKUP_SYMNDX);
1217 	SRESULT_INIT(sr, name);
1218 
1219 	if (THIS_IS_ELF(clmp) && SYMINTP(clmp)(&sl, &sr, &binfo, NULL)) {
1220 		sym = sr.sr_sym;
1221 
1222 		sl.sl_rsymndx = (((ulong_t)sym -
1223 		    (ulong_t)SYMTAB(clmp)) / SYMENT(clmp));
1224 		sl.sl_rsym = sym;
1225 	}
1226 
1227 	SRESULT_INIT(sr, name);
1228 
1229 	if (sym && (ELF_ST_VISIBILITY(sym->st_other) == STV_SINGLETON)) {
1230 		Rt_map	*hlmp = LIST(clmp)->lm_head;
1231 
1232 		/*
1233 		 * If a symbol reference is known, and that reference indicates
1234 		 * that the symbol is a singleton, then the search for the
1235 		 * symbol must follow the default search path.
1236 		 */
1237 		DBG_CALL(Dbg_dl_dlsym(clmp, name, in_nfavl, 0,
1238 		    DBG_DLSYM_SINGLETON));
1239 
1240 		sl.sl_imap = hlmp;
1241 		sl.sl_flags = LKUP_SPEC;
1242 		if (handle == RTLD_PROBE)
1243 			sl.sl_flags |= LKUP_NOFALLBACK;
1244 		ret = LM_LOOKUP_SYM(clmp)(&sl, &sr, &binfo, in_nfavl);
1245 
1246 	} else if (handle == RTLD_NEXT) {
1247 		Rt_map	*nlmp;
1248 
1249 		/*
1250 		 * If this handle is RTLD_NEXT determine whether a lazy load
1251 		 * from the caller might provide the next object.  This mimics
1252 		 * the lazy loading initialization normally carried out by
1253 		 * lookup_sym(), however here, we must do this up-front, as
1254 		 * lookup_sym() will be used to inspect the next object.
1255 		 */
1256 		if ((sl.sl_rsymndx) && ((sip = SYMINFO(clmp)) != NULL)) {
1257 			/* LINTED */
1258 			sip = (Syminfo *)((char *)sip +
1259 			    (sl.sl_rsymndx * SYMINENT(clmp)));
1260 
1261 			if ((sip->si_flags & SYMINFO_FLG_DIRECT) &&
1262 			    (sip->si_boundto < SYMINFO_BT_LOWRESERVE))
1263 				(void) elf_lazy_load(clmp, &sl,
1264 				    sip->si_boundto, name, 0, NULL, in_nfavl);
1265 
1266 			/*
1267 			 * Clear the symbol index, so as not to confuse
1268 			 * lookup_sym() of the next object.
1269 			 */
1270 			sl.sl_rsymndx = 0;
1271 			sl.sl_rsym = NULL;
1272 		}
1273 
1274 		/*
1275 		 * If the handle is RTLD_NEXT start searching in the next link
1276 		 * map from the callers.  Determine permissions from the
1277 		 * present link map.  Indicate to lookup_sym() that we're on an
1278 		 * RTLD_NEXT request so that it will use the callers link map to
1279 		 * start any possible lazy dependency loading.
1280 		 */
1281 		sl.sl_imap = nlmp = NEXT_RT_MAP(clmp);
1282 
1283 		DBG_CALL(Dbg_dl_dlsym(clmp, name, in_nfavl,
1284 		    (nlmp ? NAME(nlmp) : MSG_INTL(MSG_STR_NULL)),
1285 		    DBG_DLSYM_NEXT));
1286 
1287 		if (nlmp == NULL)
1288 			return (0);
1289 
1290 		sl.sl_flags = LKUP_NEXT;
1291 		ret = LM_LOOKUP_SYM(clmp)(&sl, &sr, &binfo, in_nfavl);
1292 
1293 	} else if (handle == RTLD_SELF) {
1294 		/*
1295 		 * If the handle is RTLD_SELF start searching from the caller.
1296 		 */
1297 		DBG_CALL(Dbg_dl_dlsym(clmp, name, in_nfavl, NAME(clmp),
1298 		    DBG_DLSYM_SELF));
1299 
1300 		sl.sl_imap = clmp;
1301 		sl.sl_flags = (LKUP_SPEC | LKUP_SELF);
1302 		ret = LM_LOOKUP_SYM(clmp)(&sl, &sr, &binfo, in_nfavl);
1303 
1304 	} else if (handle == RTLD_DEFAULT) {
1305 		Rt_map	*hlmp = LIST(clmp)->lm_head;
1306 
1307 		/*
1308 		 * If the handle is RTLD_DEFAULT mimic the standard symbol
1309 		 * lookup as would be triggered by a relocation.
1310 		 */
1311 		DBG_CALL(Dbg_dl_dlsym(clmp, name, in_nfavl, 0,
1312 		    DBG_DLSYM_DEFAULT));
1313 
1314 		sl.sl_imap = hlmp;
1315 		sl.sl_flags = LKUP_SPEC;
1316 		ret = LM_LOOKUP_SYM(clmp)(&sl, &sr, &binfo, in_nfavl);
1317 
1318 	} else if (handle == RTLD_PROBE) {
1319 		Rt_map	*hlmp = LIST(clmp)->lm_head;
1320 
1321 		/*
1322 		 * If the handle is RTLD_PROBE, mimic the standard symbol
1323 		 * lookup as would be triggered by a relocation, however do
1324 		 * not fall back to a lazy loading rescan if the symbol can't be
1325 		 * found within the currently loaded objects.  Note, a lazy
1326 		 * loaded dependency required by the caller might still get
1327 		 * loaded to satisfy this request, but no exhaustive lazy load
1328 		 * rescan is carried out.
1329 		 */
1330 		DBG_CALL(Dbg_dl_dlsym(clmp, name, in_nfavl, 0,
1331 		    DBG_DLSYM_PROBE));
1332 
1333 		sl.sl_imap = hlmp;
1334 		sl.sl_flags = (LKUP_SPEC | LKUP_NOFALLBACK);
1335 		ret = LM_LOOKUP_SYM(clmp)(&sl, &sr, &binfo, in_nfavl);
1336 
1337 	} else {
1338 		Grp_hdl *ghp = (Grp_hdl *)handle;
1339 
1340 		/*
1341 		 * Look in the shared object specified by the handle and in all
1342 		 * of its dependencies.
1343 		 */
1344 		DBG_CALL(Dbg_dl_dlsym(clmp, name, in_nfavl,
1345 		    NAME(ghp->gh_ownlmp), DBG_DLSYM_DEF));
1346 
1347 		ret = LM_DLSYM(clmp)(ghp, &sl, &sr, &binfo, in_nfavl);
1348 	}
1349 
1350 	if (ret && ((sym = sr.sr_sym) != NULL)) {
1351 		Lm_list	*lml = LIST(clmp);
1352 		Addr	addr = sym->st_value;
1353 
1354 		*dlmp = sr.sr_dmap;
1355 		if (!(FLAGS(*dlmp) & FLG_RT_FIXED))
1356 			addr += ADDR(*dlmp);
1357 
1358 		/*
1359 		 * Indicate that the defining object is now used.
1360 		 */
1361 		if (*dlmp != clmp)
1362 			FLAGS1(*dlmp) |= FL1_RT_USED;
1363 
1364 		DBG_CALL(Dbg_bind_global(clmp, 0, 0, (Xword)-1, PLT_T_NONE,
1365 		    *dlmp, addr, sym->st_value, sr.sr_name, binfo));
1366 
1367 		if ((lml->lm_tflags | AFLAGS(clmp)) & LML_TFLG_AUD_SYMBIND) {
1368 			uint_t	sb_flags = LA_SYMB_DLSYM;
1369 			/* LINTED */
1370 			uint_t	symndx = (uint_t)(((Xword)sym -
1371 			    (Xword)SYMTAB(*dlmp)) / SYMENT(*dlmp));
1372 			addr = audit_symbind(clmp, *dlmp, sym, symndx, addr,
1373 			    &sb_flags);
1374 		}
1375 		return ((void *)addr);
1376 	}
1377 
1378 	return (NULL);
1379 }
1380 
1381 /*
1382  * Internal dlsym activity.  Called from user level or directly for internal
1383  * symbol lookup.
1384  */
1385 void *
1386 dlsym_intn(void *handle, const char *name, Rt_map *clmp, Rt_map **dlmp)
1387 {
1388 	Rt_map		*llmp = NULL;
1389 	void		*error;
1390 	Aliste		idx;
1391 	Grp_desc	*gdp;
1392 	int		in_nfavl = 0;
1393 
1394 	/*
1395 	 * While looking for symbols it's quite possible that additional objects
1396 	 * get loaded from lazy loading.  These objects will have been added to
1397 	 * the same link-map list as those objects on the handle.  Remember this
1398 	 * list for later investigation.
1399 	 */
1400 	if ((handle == RTLD_NEXT) || (handle == RTLD_DEFAULT) ||
1401 	    (handle == RTLD_SELF) || (handle == RTLD_PROBE))
1402 		llmp = LIST(clmp)->lm_tail;
1403 	else {
1404 		Grp_hdl	*ghp = (Grp_hdl *)handle;
1405 
1406 		if (ghp->gh_ownlmp)
1407 			llmp = LIST(ghp->gh_ownlmp)->lm_tail;
1408 		else {
1409 			for (ALIST_TRAVERSE(ghp->gh_depends, idx, gdp)) {
1410 				if ((llmp =
1411 				    LIST(gdp->gd_depend)->lm_tail) != NULL)
1412 					break;
1413 			}
1414 		}
1415 	}
1416 
1417 	error = dlsym_core(handle, name, clmp, dlmp, &in_nfavl);
1418 
1419 	/*
1420 	 * If the symbol could not be found it is possible that the "not-found"
1421 	 * AVL tree had indicated that a required file does not exist.  In case
1422 	 * the file system has changed since this "not-found" recording was
1423 	 * made, retry the dlsym() with a clean "not-found" AVL tree.
1424 	 */
1425 	if ((error == NULL) && in_nfavl) {
1426 		avl_tree_t	*oavlt = nfavl;
1427 
1428 		nfavl = NULL;
1429 		error = dlsym_core(handle, name, clmp, dlmp, NULL);
1430 
1431 		/*
1432 		 * If the symbol is found, then any file that was loaded will
1433 		 * have had its full path name registered in the FullPath AVL
1434 		 * tree.  Remove any new "not-found" AVL information, and
1435 		 * restore the former AVL tree.
1436 		 */
1437 		nfavl_remove(nfavl);
1438 		nfavl = oavlt;
1439 	}
1440 
1441 	if (error == NULL) {
1442 		/*
1443 		 * Cache the error message, as Java tends to fall through this
1444 		 * code many times.
1445 		 */
1446 		if (nosym_str == NULL)
1447 			nosym_str = MSG_INTL(MSG_GEN_NOSYM);
1448 		eprintf(LIST(clmp), ERR_FATAL, nosym_str, name);
1449 	}
1450 
1451 	load_completion(llmp);
1452 	return (error);
1453 }
1454 
1455 /*
1456  * Argument checking for dlsym.  Only called via external entry.
1457  */
1458 static void *
1459 dlsym_check(void *handle, const char *name, Rt_map *clmp, Rt_map **dlmp)
1460 {
1461 	/*
1462 	 * Verify the arguments.
1463 	 */
1464 	if (name == NULL) {
1465 		eprintf(LIST(clmp), ERR_FATAL, MSG_INTL(MSG_ARG_ILLSYM));
1466 		return (NULL);
1467 	}
1468 	if ((handle != RTLD_NEXT) && (handle != RTLD_DEFAULT) &&
1469 	    (handle != RTLD_SELF) && (handle != RTLD_PROBE) &&
1470 	    (hdl_validate((Grp_hdl *)handle) == 0)) {
1471 		eprintf(LIST(clmp), ERR_FATAL, MSG_INTL(MSG_ARG_INVHNDL),
1472 		    EC_NATPTR(handle));
1473 		return (NULL);
1474 	}
1475 	return (dlsym_intn(handle, name, clmp, dlmp));
1476 }
1477 
1478 
1479 #pragma weak _dlsym = dlsym
1480 
1481 /*
1482  * External entry for dlsym().  On success, returns the address of the specified
1483  * symbol.  On error returns a null.
1484  */
1485 void *
1486 dlsym(void *handle, const char *name)
1487 {
1488 	int	entry;
1489 	Rt_map	*clmp, *dlmp = NULL;
1490 	void	*addr;
1491 
1492 	entry = enter(0);
1493 
1494 	clmp = _caller(caller(), CL_EXECDEF);
1495 
1496 	addr = dlsym_check(handle, name, clmp, &dlmp);
1497 
1498 	if (entry) {
1499 		if (dlmp)
1500 			is_dep_init(dlmp, clmp);
1501 		leave(LIST(clmp), 0);
1502 	}
1503 	return (addr);
1504 }
1505 
1506 /*
1507  * Core dladdr activity.
1508  */
1509 static void
1510 dladdr_core(Rt_map *almp, void *addr, Dl_info_t *dlip, void **info, int flags)
1511 {
1512 	/*
1513 	 * Set up generic information and any defaults.
1514 	 */
1515 	dlip->dli_fname = PATHNAME(almp);
1516 
1517 	dlip->dli_fbase = (void *)ADDR(almp);
1518 	dlip->dli_sname = NULL;
1519 	dlip->dli_saddr = NULL;
1520 
1521 	/*
1522 	 * Determine the nearest symbol to this address.
1523 	 */
1524 	LM_DLADDR(almp)((ulong_t)addr, almp, dlip, info, flags);
1525 }
1526 
1527 #pragma weak _dladdr = dladdr
1528 
1529 /*
1530  * External entry for dladdr(3dl) and dladdr1(3dl).  Returns an information
1531  * structure that reflects the symbol closest to the address specified.
1532  */
1533 int
1534 dladdr(void *addr, Dl_info_t *dlip)
1535 {
1536 	int	entry, error;
1537 	Rt_map	*clmp, *almp;
1538 
1539 	entry = enter(0);
1540 
1541 	clmp = _caller(caller(), CL_EXECDEF);
1542 
1543 	DBG_CALL(Dbg_dl_dladdr(clmp, addr));
1544 
1545 	/*
1546 	 * Use our calling technique to determine what object is associated
1547 	 * with the supplied address.  If a caller can't be determined,
1548 	 * indicate the failure.
1549 	 */
1550 	if ((almp = _caller(addr, CL_NONE)) == NULL) {
1551 		eprintf(LIST(clmp), ERR_FATAL, MSG_INTL(MSG_ARG_INVADDR),
1552 		    EC_NATPTR(addr));
1553 		error = 0;
1554 	} else {
1555 		dladdr_core(almp, addr, dlip, 0, 0);
1556 		error = 1;
1557 	}
1558 
1559 	if (entry)
1560 		leave(0, 0);
1561 	return (error);
1562 }
1563 
1564 #pragma weak _dladdr1 = dladdr1
1565 
1566 int
1567 dladdr1(void *addr, Dl_info_t *dlip, void **info, int flags)
1568 {
1569 	int	entry, ret = 1;
1570 	Rt_map	*clmp, *almp;
1571 	Lm_list	*clml;
1572 
1573 	entry = enter(0);
1574 
1575 	clmp = _caller(caller(), CL_EXECDEF);
1576 	clml = LIST(clmp);
1577 
1578 	DBG_CALL(Dbg_dl_dladdr(clmp, addr));
1579 
1580 	/*
1581 	 * Validate any flags.
1582 	 */
1583 	if (flags) {
1584 		int	request;
1585 
1586 		if (((request = (flags & RTLD_DL_MASK)) != RTLD_DL_SYMENT) &&
1587 		    (request != RTLD_DL_LINKMAP)) {
1588 			eprintf(clml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLFLAGS),
1589 			    flags);
1590 			ret = 0;
1591 
1592 		} else if (info == NULL) {
1593 			eprintf(clml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLINFO),
1594 			    flags);
1595 			ret = 0;
1596 		}
1597 	}
1598 
1599 	/*
1600 	 * Use our calling technique to determine what object is associated
1601 	 * with the supplied address.  If a caller can't be determined,
1602 	 * indicate the failure.
1603 	 */
1604 	if (ret) {
1605 		if ((almp = _caller(addr, CL_NONE)) == NULL) {
1606 			eprintf(clml, ERR_FATAL, MSG_INTL(MSG_ARG_INVADDR),
1607 			    EC_NATPTR(addr));
1608 			ret = 0;
1609 		} else
1610 			dladdr_core(almp, addr, dlip, info, flags);
1611 	}
1612 
1613 	if (entry)
1614 		leave(clml, 0);
1615 	return (ret);
1616 }
1617 
1618 /*
1619  * Core dldump activity.
1620  */
1621 static int
1622 dldump_core(Rt_map *clmp, Rt_map *lmp, const char *ipath, const char *opath,
1623     int flags)
1624 {
1625 	Lm_list	*lml = LIST(clmp);
1626 	Addr	addr = 0;
1627 
1628 	/*
1629 	 * Verify any arguments first.
1630 	 */
1631 	if ((opath == NULL) || (opath[0] == '\0') ||
1632 	    ((lmp == NULL) && (ipath[0] == '\0'))) {
1633 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLPATH));
1634 		return (1);
1635 	}
1636 
1637 	/*
1638 	 * If an input file is specified make sure its one of our dependencies
1639 	 * on the main link-map list.  Note, this has really all evolved for
1640 	 * crle(), which uses libcrle.so on an alternative link-map to trigger
1641 	 * dumping objects from the main link-map list.   If we ever want to
1642 	 * dump objects from alternative link-maps, this model is going to
1643 	 * have to be revisited.
1644 	 */
1645 	if (lmp == NULL) {
1646 		if ((lmp = is_so_loaded(&lml_main, ipath, NULL)) == NULL) {
1647 			eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_NOFILE),
1648 			    ipath);
1649 			return (1);
1650 		}
1651 		if (FLAGS(lmp) & FLG_RT_ALTER) {
1652 			eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_ALTER), ipath);
1653 			return (1);
1654 		}
1655 		if (FLAGS(lmp) & FLG_RT_NODUMP) {
1656 			eprintf(lml, ERR_FATAL, MSG_INTL(MSG_GEN_NODUMP),
1657 			    ipath);
1658 			return (1);
1659 		}
1660 	}
1661 
1662 	/*
1663 	 * If the object being dump'ed isn't fixed identify its mapping.
1664 	 */
1665 	if (!(FLAGS(lmp) & FLG_RT_FIXED))
1666 		addr = ADDR(lmp);
1667 
1668 	/*
1669 	 * As rt_dldump() will effectively lazy load the necessary support
1670 	 * libraries, make sure ld.so.1 is initialized for plt relocations.
1671 	 */
1672 	if (elf_rtld_load() == 0)
1673 		return (0);
1674 
1675 	/*
1676 	 * Dump the required image.
1677 	 */
1678 	return (rt_dldump(lmp, opath, flags, addr));
1679 }
1680 
1681 #pragma weak _dldump = dldump
1682 
1683 /*
1684  * External entry for dldump(3c).  Returns 0 on success, non-zero otherwise.
1685  */
1686 int
1687 dldump(const char *ipath, const char *opath, int flags)
1688 {
1689 	int	error, entry;
1690 	Rt_map	*clmp, *lmp;
1691 
1692 	entry = enter(0);
1693 
1694 	clmp = _caller(caller(), CL_EXECDEF);
1695 
1696 	if (ipath) {
1697 		lmp = NULL;
1698 	} else {
1699 		lmp = lml_main.lm_head;
1700 		ipath = NAME(lmp);
1701 	}
1702 
1703 	DBG_CALL(Dbg_dl_dldump(clmp, ipath, opath, flags));
1704 
1705 	error = dldump_core(clmp, lmp, ipath, opath, flags);
1706 
1707 	if (entry)
1708 		leave(LIST(clmp), 0);
1709 	return (error);
1710 }
1711 
1712 /*
1713  * get_linkmap_id() translates Lm_list * pointers to the Link_map id as used by
1714  * the rtld_db and dlmopen() interfaces.  It checks to see if the Link_map is
1715  * one of the primary ones and if so returns it's special token:
1716  *		LM_ID_BASE
1717  *		LM_ID_LDSO
1718  *
1719  * If it's not one of the primary link_map id's it will instead returns a
1720  * pointer to the Lm_list structure which uniquely identifies the Link_map.
1721  */
1722 Lmid_t
1723 get_linkmap_id(Lm_list *lml)
1724 {
1725 	if (lml->lm_flags & LML_FLG_BASELM)
1726 		return (LM_ID_BASE);
1727 	if (lml->lm_flags & LML_FLG_RTLDLM)
1728 		return (LM_ID_LDSO);
1729 
1730 	return ((Lmid_t)lml);
1731 }
1732 
1733 /*
1734  * Extract information for a dlopen() handle.
1735  */
1736 static int
1737 dlinfo_core(void *handle, int request, void *p, Rt_map *clmp)
1738 {
1739 	Conv_inv_buf_t	inv_buf;
1740 	char		*handlename;
1741 	Lm_list		*lml = LIST(clmp);
1742 	Rt_map		*lmp = NULL;
1743 
1744 	/*
1745 	 * Determine whether a handle is provided.  A handle isn't needed for
1746 	 * all operations, but it is validated here for the initial diagnostic.
1747 	 */
1748 	if (handle == RTLD_SELF) {
1749 		lmp = clmp;
1750 	} else {
1751 		Grp_hdl	*ghp = (Grp_hdl *)handle;
1752 
1753 		if (hdl_validate(ghp))
1754 			lmp = ghp->gh_ownlmp;
1755 	}
1756 	if (lmp) {
1757 		handlename = NAME(lmp);
1758 	} else {
1759 		(void) conv_invalid_val(&inv_buf, EC_NATPTR(handle), 0);
1760 		handlename = inv_buf.buf;
1761 	}
1762 
1763 	DBG_CALL(Dbg_dl_dlinfo(clmp, handlename, request, p));
1764 
1765 	/*
1766 	 * Validate the request and return buffer.
1767 	 */
1768 	if ((request > RTLD_DI_MAX) || (p == NULL)) {
1769 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_ILLVAL));
1770 		return (-1);
1771 	}
1772 
1773 	/*
1774 	 * Return configuration cache name and address.
1775 	 */
1776 	if (request == RTLD_DI_CONFIGADDR) {
1777 		Dl_info_t	*dlip = (Dl_info_t *)p;
1778 
1779 		if ((config->c_name == NULL) || (config->c_bgn == 0) ||
1780 		    (config->c_end == 0)) {
1781 			eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_NOCONFIG));
1782 			return (-1);
1783 		}
1784 		dlip->dli_fname = config->c_name;
1785 		dlip->dli_fbase = (void *)config->c_bgn;
1786 		return (0);
1787 	}
1788 
1789 	/*
1790 	 * Return profiled object name (used by ldprof audit library).
1791 	 */
1792 	if (request == RTLD_DI_PROFILENAME) {
1793 		if (profile_name == NULL) {
1794 			eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_NOPROFNAME));
1795 			return (-1);
1796 		}
1797 
1798 		*(const char **)p = profile_name;
1799 		return (0);
1800 	}
1801 	if (request == RTLD_DI_PROFILEOUT) {
1802 		/*
1803 		 * If a profile destination directory hasn't been specified
1804 		 * provide a default.
1805 		 */
1806 		if (profile_out == NULL)
1807 			profile_out = MSG_ORIG(MSG_PTH_VARTMP);
1808 
1809 		*(const char **)p = profile_out;
1810 		return (0);
1811 	}
1812 
1813 	/*
1814 	 * Obtain or establish a termination signal.
1815 	 */
1816 	if (request == RTLD_DI_GETSIGNAL) {
1817 		*(int *)p = killsig;
1818 		return (0);
1819 	}
1820 
1821 	if (request == RTLD_DI_SETSIGNAL) {
1822 		sigset_t	set;
1823 		int		sig = *(int *)p;
1824 
1825 		/*
1826 		 * Determine whether the signal is in range.
1827 		 */
1828 		(void) sigfillset(&set);
1829 		if (sigismember(&set, sig) != 1) {
1830 			eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_INVSIG), sig);
1831 			return (-1);
1832 		}
1833 
1834 		killsig = sig;
1835 		return (0);
1836 	}
1837 
1838 	/*
1839 	 * For any other request a link-map is required.  Verify the handle.
1840 	 */
1841 	if (lmp == NULL) {
1842 		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_ARG_INVHNDL),
1843 		    EC_NATPTR(handle));
1844 		return (-1);
1845 	}
1846 
1847 	/*
1848 	 * Obtain the process arguments, environment and auxv.  Note, as the
1849 	 * environment can be modified by the user (putenv(3c)), reinitialize
1850 	 * the environment pointer on each request.
1851 	 */
1852 	if (request == RTLD_DI_ARGSINFO) {
1853 		Dl_argsinfo_t	*aip = (Dl_argsinfo_t *)p;
1854 		Lm_list		*lml = LIST(lmp);
1855 
1856 		*aip = argsinfo;
1857 		if (lml->lm_flags & LML_FLG_ENVIRON)
1858 			aip->dla_envp = *(lml->lm_environ);
1859 
1860 		return (0);
1861 	}
1862 
1863 	/*
1864 	 * Return Lmid_t of the Link-Map list that the specified object is
1865 	 * loaded on.
1866 	 */
1867 	if (request == RTLD_DI_LMID) {
1868 		*(Lmid_t *)p = get_linkmap_id(LIST(lmp));
1869 		return (0);
1870 	}
1871 
1872 	/*
1873 	 * Return a pointer to the Link-Map structure associated with the
1874 	 * specified object.
1875 	 */
1876 	if (request == RTLD_DI_LINKMAP) {
1877 		*(Link_map **)p = (Link_map *)lmp;
1878 		return (0);
1879 	}
1880 
1881 	/*
1882 	 * Return search path information, or the size of the buffer required
1883 	 * to store the information.
1884 	 */
1885 	if ((request == RTLD_DI_SERINFO) || (request == RTLD_DI_SERINFOSIZE)) {
1886 		Spath_desc	sd = { search_rules, NULL, 0 };
1887 		Pdesc		*pdp;
1888 		Dl_serinfo_t	*info;
1889 		Dl_serpath_t	*path;
1890 		char		*strs;
1891 		size_t		size = sizeof (Dl_serinfo_t);
1892 		uint_t		cnt = 0;
1893 
1894 		info = (Dl_serinfo_t *)p;
1895 		path = &info->dls_serpath[0];
1896 		strs = (char *)&info->dls_serpath[info->dls_cnt];
1897 
1898 		/*
1899 		 * Traverse search path entries for this object.
1900 		 */
1901 		while ((pdp = get_next_dir(&sd, lmp, 0)) != NULL) {
1902 			size_t	_size;
1903 
1904 			if (pdp->pd_pname == NULL)
1905 				continue;
1906 
1907 			/*
1908 			 * If configuration information exists, it's possible
1909 			 * this path has been identified as non-existent, if so
1910 			 * ignore it.
1911 			 */
1912 			if (pdp->pd_info) {
1913 				Rtc_obj	*dobj = (Rtc_obj *)pdp->pd_info;
1914 				if (dobj->co_flags & RTC_OBJ_NOEXIST)
1915 					continue;
1916 			}
1917 
1918 			/*
1919 			 * Keep track of search path count and total info size.
1920 			 */
1921 			if (cnt++)
1922 				size += sizeof (Dl_serpath_t);
1923 			_size = pdp->pd_plen + 1;
1924 			size += _size;
1925 
1926 			if (request == RTLD_DI_SERINFOSIZE)
1927 				continue;
1928 
1929 			/*
1930 			 * If we're filling in search path information, confirm
1931 			 * there's sufficient space.
1932 			 */
1933 			if (size > info->dls_size) {
1934 				eprintf(lml, ERR_FATAL,
1935 				    MSG_INTL(MSG_ARG_SERSIZE),
1936 				    EC_OFF(info->dls_size));
1937 				return (-1);
1938 			}
1939 			if (cnt > info->dls_cnt) {
1940 				eprintf(lml, ERR_FATAL,
1941 				    MSG_INTL(MSG_ARG_SERCNT), info->dls_cnt);
1942 				return (-1);
1943 			}
1944 
1945 			/*
1946 			 * Append the path to the information buffer.
1947 			 */
1948 			(void) strcpy(strs, pdp->pd_pname);
1949 			path->dls_name = strs;
1950 			path->dls_flags = pdp->pd_flags;
1951 
1952 			strs = strs + _size;
1953 			path++;
1954 		}
1955 
1956 		/*
1957 		 * If we're here to size the search buffer fill it in.
1958 		 */
1959 		if (request == RTLD_DI_SERINFOSIZE) {
1960 			info->dls_size = size;
1961 			info->dls_cnt = cnt;
1962 		}
1963 
1964 		return (0);
1965 	}
1966 
1967 	/*
1968 	 * Return the origin of the object associated with this link-map.
1969 	 * Basically return the dirname(1) of the objects fullpath.
1970 	 */
1971 	if (request == RTLD_DI_ORIGIN) {
1972 		char	*str = (char *)p;
1973 
1974 		(void) strncpy(str, ORIGNAME(lmp), DIRSZ(lmp));
1975 		str += DIRSZ(lmp);
1976 		*str = '\0';
1977 
1978 		return (0);
1979 	}
1980 
1981 	/*
1982 	 * Return the number of object mappings, or the mapping information for
1983 	 * this object.
1984 	 */
1985 	if (request == RTLD_DI_MMAPCNT) {
1986 		uint_t	*cnt = (uint_t *)p;
1987 
1988 		*cnt = MMAPCNT(lmp);
1989 		return (0);
1990 	}
1991 	if (request == RTLD_DI_MMAPS) {
1992 		Dl_mapinfo_t	*mip = (Dl_mapinfo_t *)p;
1993 
1994 		if (mip->dlm_acnt && mip->dlm_maps) {
1995 			uint_t	cnt = 0;
1996 
1997 			while ((cnt < mip->dlm_acnt) && (cnt < MMAPCNT(lmp))) {
1998 				mip->dlm_maps[cnt] = MMAPS(lmp)[cnt];
1999 				cnt++;
2000 			}
2001 			mip->dlm_rcnt = cnt;
2002 		}
2003 		return (0);
2004 	}
2005 
2006 	return (0);
2007 }
2008 
2009 #pragma weak _dlinfo = dlinfo
2010 
2011 /*
2012  * External entry for dlinfo(3dl).
2013  */
2014 int
2015 dlinfo(void *handle, int request, void *p)
2016 {
2017 	int	error, entry;
2018 	Rt_map	*clmp;
2019 
2020 	entry = enter(0);
2021 
2022 	clmp = _caller(caller(), CL_EXECDEF);
2023 
2024 	error = dlinfo_core(handle, request, p, clmp);
2025 
2026 	if (entry)
2027 		leave(LIST(clmp), 0);
2028 	return (error);
2029 }
2030 
2031 
2032 /*
2033  * GNU defined function to iterate through the program headers for all
2034  * currently loaded dynamic objects. The caller supplies a callback function
2035  * which is called for each object.
2036  *
2037  * entry:
2038  *	callback - Callback function to call. The arguments to the callback
2039  *		function are:
2040  *		info - Address of dl_phdr_info structure
2041  *		size - sizeof (struct dl_phdr_info)
2042  *		data - Caller supplied value.
2043  *	data - Value supplied by caller, which is passed to callback without
2044  *		examination.
2045  *
2046  * exit:
2047  *	callback is called for each dynamic ELF object in the process address
2048  *	space, halting when a non-zero value is returned, or when the last
2049  *	object has been processed. The return value from the last call
2050  *	to callback is returned.
2051  *
2052  * note:
2053  *	The Linux implementation has added additional fields to the
2054  *	dl_phdr_info structure over time. The callback function is
2055  *	supposed to use the size field to determine which fields are
2056  *	present, and to avoid attempts to access non-existent fields.
2057  *	We have added those fields that are compatible with Solaris, and
2058  *	which are used by GNU C++ (g++) runtime exception handling support.
2059  *
2060  * note:
2061  *	We issue a callback for every ELF object mapped into the process
2062  *	address space at the time this routine is entered. These callbacks
2063  *	are arbitrary functions that can do anything, including possibly
2064  *	causing new objects to be mapped into the process, or unmapped.
2065  *	This complicates matters:
2066  *
2067  *	-	Adding new objects can cause the alists to be reallocated
2068  *		or for contents to move. This can happen explicitly via
2069  *		dlopen(), or implicitly via lazy loading. One might consider
2070  *		simply banning dlopen from a callback, but lazy loading must
2071  *		be allowed, in which case there's no reason to ban dlopen().
2072  *
2073  *	-	Removing objects can leave us holding references to freed
2074  *		memory that must not be accessed, and can cause the list
2075  *		items to move in a way that would cause us to miss reporting
2076  *		one, or double report others.
2077  *
2078  *	-	We cannot allocate memory to build a separate data structure,
2079  *		because the interface to dl_iterate_phdr() does not have a
2080  *		way to communicate allocation errors back to the caller.
2081  *		Even if we could, it would be difficult to do so efficiently.
2082  *
2083  *	-	It is possible for dl_iterate_phdr() to be called recursively
2084  *		from a callback, and there is no way for us to detect or manage
2085  *		this effectively, particularly as the user might use longjmp()
2086  *		to skip past us on return. Hence, we must be reentrant
2087  *		(stateless), further precluding the option of building a
2088  *		separate data structure.
2089  *
2090  *	Despite these constraints, we are able to traverse the link-map
2091  *	lists safely:
2092  *
2093  *	-	Once interposer (preload) objects have been processed at
2094  *		startup, we know that new objects are always placed at the
2095  *		end of the list. Hence, if we are reading a list when that
2096  *		happens, the new object will not alter the part of the list
2097  *		that we've already processed.
2098  *
2099  *	-	The alist _TRAVERSE macros recalculate the address of the
2100  *		current item from scratch on each iteration, rather than
2101  *		incrementing a pointer. Hence, alist additions that occur
2102  *		in mid-traverse will not cause confusion.
2103  *
2104  * 	There is one limitation: We cannot continue operation if an object
2105  *	is removed from the process from within a callback. We detect when
2106  *	this happens and return immediately with a -1 return value.
2107  *
2108  * note:
2109  *	As currently implemented, if a callback causes an object to be loaded,
2110  *	that object may or may not be reported by the current invocation of
2111  *	dl_iterate_phdr(), based on whether or not we have already processed
2112  *	the link-map list that receives it. If we want to prevent this, it
2113  *	can be done efficiently by associating the current value of cnt_map
2114  *	with each new Rt_map entered into the system. Then this function can
2115  *	use that to detect and skip new objects that enter the system in
2116  *	mid-iteration. However, the Linux documentation is ambiguous on whether
2117  *	this is necessary, and it does not appear to matter in practice.
2118  *	We have therefore chosen not to do so at this time.
2119  */
2120 int
2121 dl_iterate_phdr(int (*callback)(struct dl_phdr_info *, size_t, void *),
2122     void *data)
2123 {
2124 	struct dl_phdr_info	info;
2125 	u_longlong_t		l_cnt_map = cnt_map;
2126 	u_longlong_t		l_cnt_unmap = cnt_unmap;
2127 	Lm_list			*lml, *clml;
2128 	Lm_cntl			*lmc;
2129 	Rt_map			*lmp, *clmp;
2130 	Aliste			idx1, idx2;
2131 	Ehdr			*ehdr;
2132 	int			ret = 0;
2133 	int			entry;
2134 
2135 	entry = enter(0);
2136 	clmp = _caller(caller(), CL_EXECDEF);
2137 	clml = LIST(clmp);
2138 
2139 	DBG_CALL(Dbg_dl_iphdr_enter(clmp, cnt_map, cnt_unmap));
2140 
2141 	/* Issue a callback for each ELF object in the process */
2142 	for (APLIST_TRAVERSE(dynlm_list, idx1, lml)) {
2143 		for (ALIST_TRAVERSE(lml->lm_lists, idx2, lmc)) {
2144 			for (lmp = lmc->lc_head; lmp; lmp = NEXT_RT_MAP(lmp)) {
2145 #if defined(_sparc) && !defined(_LP64)
2146 				/*
2147 				 * On 32-bit sparc, the possibility exists that
2148 				 * this object is not ELF.
2149 				 */
2150 				if (THIS_IS_NOT_ELF(lmp))
2151 					continue;
2152 #endif
2153 				/* Prepare the object information structure */
2154 				ehdr = (Ehdr *) ADDR(lmp);
2155 				info.dlpi_addr = (ehdr->e_type == ET_EXEC) ?
2156 				    0 : ADDR(lmp);
2157 				info.dlpi_name = lmp->rt_pathname;
2158 				info.dlpi_phdr = (Phdr *)
2159 				    (ADDR(lmp) + ehdr->e_phoff);
2160 				info.dlpi_phnum = ehdr->e_phnum;
2161 				info.dlpi_adds = cnt_map;
2162 				info.dlpi_subs = cnt_unmap;
2163 
2164 				/* Issue the callback */
2165 				DBG_CALL(Dbg_dl_iphdr_callback(clml, &info));
2166 				leave(clml, thr_flg_reenter);
2167 				ret = (* callback)(&info, sizeof (info), data);
2168 				(void) enter(thr_flg_reenter);
2169 
2170 				/* Return immediately on non-zero result */
2171 				if (ret != 0)
2172 					goto done;
2173 
2174 				/* Adapt to object mapping changes */
2175 				if ((cnt_map == l_cnt_map) &&
2176 				    (cnt_unmap == l_cnt_unmap))
2177 					continue;
2178 
2179 				DBG_CALL(Dbg_dl_iphdr_mapchange(clml, cnt_map,
2180 				    cnt_unmap));
2181 
2182 				/* Stop if an object was unmapped */
2183 				if (cnt_unmap == l_cnt_unmap) {
2184 					l_cnt_map = cnt_map;
2185 					continue;
2186 				}
2187 
2188 				ret = -1;
2189 				DBG_CALL(Dbg_dl_iphdr_unmap_ret(clml));
2190 				goto done;
2191 			}
2192 		}
2193 	}
2194 
2195 done:
2196 	if (entry)
2197 		leave(LIST(clmp), 0);
2198 	return (ret);
2199 }
2200