1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Utilities to handle shared object dependency graph. 29 * 30 * The algorithms used in this file are taken from the following book: 31 * Algorithms in C 32 * Robert Sedgewick 33 * Addison-Wesley Publishing company 34 * ISBN 0-201-51425-7 35 * From the following chapters: 36 * Chapter 29 Elementary Graph Algorithms 37 * Chapter 32 Directed Graph 38 */ 39 40 #include <sys/types.h> 41 #include <stdarg.h> 42 #include <stdio.h> 43 #include <dlfcn.h> 44 #include <signal.h> 45 #include <locale.h> 46 #include <string.h> 47 #include <libintl.h> 48 #include <debug.h> 49 #include "_rtld.h" 50 #include "msg.h" 51 52 /* 53 * Structure for maintaining sorting state. 54 */ 55 typedef struct { 56 Rt_map **s_lmpa; /* link-map[] (returned to caller) */ 57 Rt_map *s_lmp; /* originating link-map */ 58 Rt_map **s_stack; /* strongly connected component stack */ 59 APlist *s_scc; /* cyclic list */ 60 APlist *s_queue; /* depth queue for cyclic components */ 61 int s_sndx; /* present stack index */ 62 int s_lndx; /* present link-map index */ 63 int s_num; /* number of objects to sort */ 64 int s_initfirst; /* no. of INITFIRST entries */ 65 } Sort; 66 67 #define AL_CNT_SCC 10 68 69 /* 70 * qsort(3c) comparison function. 71 */ 72 static int 73 compare(const void *lmpp1, const void *lmpp2) 74 { 75 Rt_map *lmp1 = *((Rt_map **)lmpp1); 76 Rt_map *lmp2 = *((Rt_map **)lmpp2); 77 78 if (IDX(lmp1) > IDX(lmp2)) 79 return (-1); 80 if (IDX(lmp1) < IDX(lmp2)) 81 return (1); 82 return (0); 83 } 84 85 /* 86 * This routine is called when a cyclic dependency is detected between strongly 87 * connected components. The nodes within the cycle are reverse breadth-first 88 * sorted. 89 */ 90 static int 91 sort_scc(Sort *sort, int fndx, int flag) 92 { 93 static const char *tfmt = 0, *ffmt; 94 static int cnt = 1; 95 int ndx; 96 Rt_map *lmp; 97 Lm_list *lml = LIST(sort->s_lmp); 98 Word lmflags = lml->lm_flags; 99 Word init, unref; 100 101 /* 102 * If this is the first cyclic dependency traverse the new objects that 103 * have been added to the link-map list and for each object establish 104 * a unique depth index. We build this dynamically as we have no idea 105 * of the number of objects that will be inspected (logic matches that 106 * used by dlsym() to traverse lazy dependencies). 107 */ 108 if (sort->s_queue == NULL) { 109 Aliste idx1; 110 Rt_map *lmp, *lmp2; 111 112 lmp = sort->s_lmp; 113 ndx = 1; 114 115 if (aplist_append(&sort->s_queue, lmp, sort->s_num) == NULL) 116 return (0); 117 118 IDX(lmp) = ndx++; 119 120 for (APLIST_TRAVERSE(sort->s_queue, idx1, lmp2)) { 121 Bnd_desc *bdp; 122 Aliste idx2; 123 124 for (APLIST_TRAVERSE(DEPENDS(lmp2), idx2, bdp)) { 125 Rt_map *lmp3 = bdp->b_depend; 126 127 if (IDX(lmp3) || (LIST(lmp3) != lml)) 128 continue; 129 130 /* 131 * If we're .init processing and this depend- 132 * encies .init has been called, skip it. 133 */ 134 if ((flag & RT_SORT_REV) && 135 (FLAGS(lmp3) & FLG_RT_INITCALL)) 136 continue; 137 138 if (aplist_append(&sort->s_queue, lmp3, 139 sort->s_num) == NULL) 140 return (0); 141 142 IDX(lmp3) = ndx++; 143 } 144 } 145 } 146 147 /* 148 * Sort the cyclics. 149 */ 150 qsort(&(sort->s_lmpa[fndx]), sort->s_lndx - fndx, sizeof (Rt_map *), 151 compare); 152 153 /* 154 * Under ldd -i, or debugging, print this cycle. Under ldd -i/-U assign 155 * each object a group identifier so that cyclic dependent callers can 156 * be better traced (see trace_sort()), or analyzed for non-use. 157 */ 158 if (((init = (lmflags & LML_FLG_TRC_INIT)) == 0) && 159 ((unref = (lmflags & LML_FLG_TRC_UNREF)) == 0) && 160 (DBG_ENABLED == 0)) 161 return (1); 162 163 if (init) { 164 if (tfmt == 0) { 165 tfmt = MSG_INTL(MSG_LDD_INIT_FMT_01); 166 ffmt = MSG_ORIG(MSG_LDD_INIT_FMT_FILE); 167 } 168 (void) printf(tfmt, cnt); 169 } 170 DBG_CALL(Dbg_util_scc_title(lml, (flag & RT_SORT_REV))); 171 172 /* 173 * Identify this cyclic group, and under ldd -i print the cycle in the 174 * order its components will be run. 175 */ 176 if (flag & RT_SORT_REV) { 177 for (ndx = fndx; ndx < sort->s_lndx; ndx++) { 178 lmp = sort->s_lmpa[ndx]; 179 CYCGROUP(lmp) = cnt; 180 181 if (init) 182 (void) printf(ffmt, NAME(lmp)); 183 DBG_CALL(Dbg_util_scc_entry(lmp, ndx)); 184 } 185 cnt++; 186 187 } else if (DBG_ENABLED) { 188 for (ndx = sort->s_lndx - 1; ndx >= fndx; ndx--) { 189 lmp = sort->s_lmpa[ndx]; 190 DBG_CALL(Dbg_util_scc_entry(lmp, ndx)); 191 } 192 } 193 194 /* 195 * If we're looking for unused dependencies determine if any of these 196 * cyclic components are referenced from outside of the cycle. 197 */ 198 if (unref || DBG_ENABLED) { 199 for (ndx = fndx; ndx < sort->s_lndx; ndx++) { 200 Bnd_desc *bdp; 201 Aliste idx; 202 203 lmp = sort->s_lmpa[ndx]; 204 205 /* 206 * If this object has a handle then it can be in use by 207 * anyone. 208 */ 209 if (HANDLES(lmp)) 210 return (1); 211 212 /* 213 * Traverse this objects callers looking for outside 214 * references. 215 */ 216 for (APLIST_TRAVERSE(CALLERS(lmp), idx, bdp)) { 217 Rt_map *clmp = bdp->b_caller; 218 219 if ((bdp->b_flags & BND_REFER) == 0) 220 continue; 221 222 if (CYCGROUP(lmp) != CYCGROUP(clmp)) 223 return (1); 224 } 225 } 226 227 /* 228 * If we're here then none of the cyclic dependents have been 229 * referenced from outside of the cycle, mark them as unused. 230 */ 231 for (ndx = fndx; ndx < sort->s_lndx; ndx++) { 232 lmp = sort->s_lmpa[ndx]; 233 FLAGS1(lmp) &= ~FL1_RT_USED; 234 } 235 } 236 return (1); 237 } 238 239 /* 240 * Take elements off of the stack and move them to the link-map array. Typically 241 * this routine just pops one strongly connected component (individual link-map) 242 * at a time. When a cyclic dependency has been detected the stack will contain 243 * more than just the present object to process, and will trigger the later call 244 * to sort_scc() to sort these elements. 245 */ 246 static int 247 visit(Lm_list *lml, Rt_map *lmp, Sort *sort, int flag) 248 { 249 APlist *alp = NULL; 250 int num = sort->s_lndx; 251 Word tracing = lml->lm_flags & LML_FLG_TRC_ENABLE; 252 Rt_map *tlmp; 253 254 do { 255 tlmp = sort->s_stack[--(sort->s_sndx)]; 256 SORTVAL(tlmp) = sort->s_num; 257 DBG_CALL(Dbg_util_collect(tlmp, sort->s_lndx, flag)); 258 sort->s_lmpa[(sort->s_lndx)++] = tlmp; 259 260 if (flag & RT_SORT_REV) { 261 /* 262 * Indicate the object has had its .init collected. 263 * Note, that regardless of the object having a .init 264 * the object is added to the tsort list, as it's from 265 * this list that any post-init flags are established. 266 */ 267 FLAGS(tlmp) |= FLG_RT_INITCLCT; 268 lml->lm_init--; 269 } else { 270 /* 271 * Indicate the object has had its .fini collected. 272 * Note, that regardless of the object having a .fini, 273 * the object is added to the tsort list, as it's from 274 * this list that any audit_objclose() activity is 275 * triggered. 276 */ 277 FLAGS(tlmp) |= FLG_RT_FINICLCT; 278 } 279 280 /* 281 * If tracing, save the strongly connected component. 282 */ 283 if (tracing && (aplist_append(&alp, tlmp, 284 AL_CNT_SCC) == NULL)) 285 return (0); 286 } while (tlmp != lmp); 287 288 /* 289 * Determine if there are cyclic dependencies to process. If so, sort 290 * the components, and retain them for tracing output. 291 */ 292 if (sort->s_lndx > (num + 1)) { 293 if (sort_scc(sort, num, flag) == 0) 294 return (0); 295 296 if (tracing && (aplist_append(&sort->s_scc, alp, 297 AL_CNT_SCC) == NULL)) 298 return (0); 299 } else if (alp) 300 free(alp); 301 302 return (1); 303 } 304 305 static int 306 dep_visit(Lm_list *, Rt_map *, uint_t, Rt_map *, Sort *, int); 307 308 static int 309 _dep_visit(Lm_list *lml, int min, Rt_map *clmp, Rt_map *dlmp, uint_t bflags, 310 Sort *sort, int flag) 311 { 312 int _min; 313 314 /* 315 * Only collect objects that belong to the callers link-map. Catches 316 * cross dependencies (filtering) to ld.so.1. 317 */ 318 if (LIST(dlmp) != lml) 319 return (min); 320 321 /* 322 * Determine if this object hasn't been inspected. 323 */ 324 if ((_min = SORTVAL(dlmp)) == -1) { 325 if (flag & RT_SORT_REV) { 326 /* 327 * For .init processing, only collect objects that have 328 * been relocated and haven't already been collected. 329 */ 330 if ((FLAGS(dlmp) & (FLG_RT_RELOCED | 331 FLG_RT_INITCLCT)) != FLG_RT_RELOCED) 332 return (min); 333 334 /* 335 * If this object contains no .init, there's no need to 336 * establish a dependency. 337 */ 338 if ((INIT(dlmp) == 0) && (INITARRAY(dlmp) == 0)) 339 return (min); 340 } else { 341 /* 342 * For .fini processing only collect objects that have 343 * had their .init collected, and haven't already been 344 * .fini collected. 345 */ 346 if ((FLAGS(dlmp) & (FLG_RT_INITCLCT | 347 FLG_RT_FINICLCT)) != FLG_RT_INITCLCT) 348 return (min); 349 350 /* 351 * If we're deleting a subset of objects, only collect 352 * those marked for deletion. 353 */ 354 if ((flag & RT_SORT_DELETE) && 355 ((FLAGS(dlmp) & FLG_RT_DELETE) == 0)) 356 return (min); 357 358 /* 359 * If this object contains no .fini, there's no need to 360 * establish a dependency. 361 */ 362 if ((FINI(dlmp) == 0) && (FINIARRAY(dlmp) == 0)) 363 return (min); 364 } 365 366 /* 367 * Inspect this new dependency. 368 */ 369 if ((_min = dep_visit(lml, clmp, bflags, dlmp, 370 sort, flag)) == -1) 371 return (-1); 372 } 373 374 /* 375 * Keep track of the smallest SORTVAL that has been encountered. If 376 * this value is smaller than the present object, then the dependency 377 * edge has cycled back to objects that have been processed earlier 378 * along this dependency edge. 379 */ 380 if (_min < min) { 381 DBG_CALL(Dbg_util_edge_out(clmp, sort->s_stack[_min])); 382 return (_min); 383 } else 384 return (min); 385 } 386 387 /* 388 * Visit the dependencies of each object. 389 */ 390 static int 391 dep_visit(Lm_list *lml, Rt_map *clmp, uint_t cbflags, Rt_map *lmp, Sort *sort, 392 int flag) 393 { 394 int min; 395 Aliste idx1; 396 Bnd_desc *bdp; 397 Dyninfo *dip; 398 399 min = SORTVAL(lmp) = sort->s_sndx; 400 sort->s_stack[(sort->s_sndx)++] = lmp; 401 402 if (FLAGS(lmp) & FLG_RT_INITFRST) 403 sort->s_initfirst++; 404 405 DBG_CALL(Dbg_util_edge_in(lml, clmp, cbflags, lmp, min, flag)); 406 407 /* 408 * Traverse both explicit and implicit dependencies. 409 */ 410 for (APLIST_TRAVERSE(DEPENDS(lmp), idx1, bdp)) { 411 if ((min = _dep_visit(lml, min, lmp, bdp->b_depend, 412 bdp->b_flags, sort, flag)) == -1) 413 return (-1); 414 } 415 416 /* 417 * Traverse any filtee dependencies. 418 */ 419 if (((dip = DYNINFO(lmp)) != NULL) && (FLAGS1(lmp) & MSK_RT_FILTER)) { 420 uint_t cnt, max = DYNINFOCNT(lmp); 421 422 for (cnt = 0; cnt < max; cnt++, dip++) { 423 Alist *falp; 424 Pdesc *pdp; 425 426 if (((falp = (Alist *)dip->di_info) == NULL) || 427 ((dip->di_flags & MSK_DI_FILTER) == 0)) 428 continue; 429 430 for (ALIST_TRAVERSE(falp, idx1, pdp)) { 431 Aliste idx2; 432 Grp_hdl *ghp; 433 Grp_desc *gdp; 434 435 if ((pdp->pd_plen == 0) || 436 ((ghp = (Grp_hdl *)pdp->pd_info) == NULL)) 437 continue; 438 439 for (ALIST_TRAVERSE(ghp->gh_depends, idx2, 440 gdp)) { 441 442 if (gdp->gd_depend == lmp) 443 continue; 444 if ((min = _dep_visit(lml, min, lmp, 445 gdp->gd_depend, BND_FILTER, 446 sort, flag)) == -1) 447 return (-1); 448 } 449 } 450 } 451 } 452 453 /* 454 * Having returned to where the minimum SORTVAL is equivalent to the 455 * object that has just been processed, collect any dependencies that 456 * are available on the sorting stack. 457 */ 458 if (min == SORTVAL(lmp)) { 459 if (visit(lml, lmp, sort, flag) == 0) 460 return (-1); 461 } 462 return (min); 463 } 464 465 /* 466 * Find corresponding strongly connected component structure. 467 */ 468 static APlist * 469 trace_find_scc(Sort *sort, Rt_map *lmp) 470 { 471 APlist *alp; 472 Aliste idx1; 473 474 for (APLIST_TRAVERSE(sort->s_scc, idx1, alp)) { 475 Rt_map *lmp2; 476 Aliste idx2; 477 478 for (APLIST_TRAVERSE(alp, idx2, lmp2)) { 479 if (lmp == lmp2) 480 return (alp); 481 } 482 } 483 return (NULL); 484 } 485 486 /* 487 * Print out the .init dependency information (ldd). 488 */ 489 static void 490 trace_sort(Sort *sort) 491 { 492 int ndx = 0; 493 APlist *alp; 494 Rt_map *lmp1; 495 496 (void) printf(MSG_ORIG(MSG_STR_NL)); 497 498 while ((lmp1 = sort->s_lmpa[ndx++]) != NULL) { 499 static const char *ffmt, *cfmt = 0, *sfmt = 0; 500 Bnd_desc *bdp; 501 Aliste idx1; 502 503 if ((INIT(lmp1) == 0) || (FLAGS(lmp1) & FLG_RT_INITCALL)) 504 continue; 505 506 if (sfmt == 0) 507 sfmt = MSG_INTL(MSG_LDD_INIT_FMT_02); 508 509 /* 510 * If the only component on the strongly connected list is 511 * this link-map, then there are no dependencies. 512 */ 513 if ((alp = trace_find_scc(sort, lmp1)) == NULL) { 514 (void) printf(sfmt, NAME(lmp1)); 515 continue; 516 } 517 518 /* 519 * Establish message formats for cyclic dependencies. 520 */ 521 if (cfmt == 0) { 522 cfmt = MSG_INTL(MSG_LDD_INIT_FMT_03); 523 ffmt = MSG_ORIG(MSG_LDD_INIT_FMT_FILE); 524 } 525 526 (void) printf(cfmt, NAME(lmp1), CYCGROUP(lmp1)); 527 528 for (APLIST_TRAVERSE(CALLERS(lmp1), idx1, bdp)) { 529 Rt_map *lmp3, *lmp2 = bdp->b_caller; 530 Aliste idx2; 531 532 for (APLIST_TRAVERSE(alp, idx2, lmp3)) { 533 if (lmp2 != lmp3) 534 continue; 535 536 (void) printf(ffmt, NAME(lmp3)); 537 } 538 } 539 } 540 } 541 542 /* 543 * A reverse ordered list (for .init's) contains INITFIRST elements. Move each 544 * of these elements to the front of the list. 545 */ 546 static void 547 r_initfirst(Sort * sort, int end) 548 { 549 Rt_map *tlmp; 550 int bgn, ifst, lifst = 0; 551 552 for (bgn = 0; bgn < sort->s_initfirst; bgn++) { 553 for (ifst = lifst; ifst <= end; ifst++) { 554 tlmp = sort->s_lmpa[ifst]; 555 556 if (!(FLAGS(tlmp) & FLG_RT_INITFRST)) 557 continue; 558 559 /* 560 * If the INITFIRST element is already at the front of 561 * the list leave it there. 562 */ 563 if (ifst == bgn) { 564 lifst = ifst + 1; 565 break; 566 } 567 568 /* 569 * Move the elements from the front of the list up to 570 * the INITFIRST element, back one position. 571 */ 572 (void) memmove(&sort->s_lmpa[bgn + 1], 573 &sort->s_lmpa[bgn], 574 ((ifst - bgn) * sizeof (Rt_map *))); 575 576 /* 577 * Insert INITFIRST element at the front of the list. 578 */ 579 sort->s_lmpa[bgn] = tlmp; 580 lifst = ifst + 1; 581 break; 582 } 583 } 584 } 585 586 /* 587 * A forward ordered list (for .fini's) contains INITFIRST elements. Move each 588 * of these elements to the front of the list. 589 */ 590 static void 591 f_initfirst(Sort *sort, int end) 592 { 593 Rt_map *tlmp; 594 int bgn, ifst, lifst = 0; 595 596 for (bgn = 0; bgn < sort->s_initfirst; bgn++) { 597 for (ifst = lifst; ifst <= end; ifst++) { 598 tlmp = sort->s_lmpa[ifst]; 599 600 if (!(FLAGS(tlmp) & FLG_RT_INITFRST)) 601 continue; 602 603 /* 604 * If the INITFIRST element is already at the end of 605 * the list leave it there. 606 */ 607 if (ifst == end) 608 break; 609 610 /* 611 * Move the elements from after the INITFIRST element 612 * up to the back of the list, up one position. 613 */ 614 (void) memmove(&sort->s_lmpa[ifst], 615 &sort->s_lmpa[ifst + 1], 616 ((end - ifst) * sizeof (Rt_map *))); 617 618 /* 619 * Insert INITFIRST element at the back of the list. 620 */ 621 sort->s_lmpa[end--] = tlmp; 622 lifst = ifst; 623 break; 624 } 625 } 626 } 627 628 /* 629 * Determine whether .init or .fini processing is required. 630 */ 631 static int 632 initorfini(Lm_list *lml, Rt_map *lmp, int flag, Sort *sort) 633 { 634 if (flag & RT_SORT_REV) { 635 /* 636 * For .init processing, only collect objects that have been 637 * relocated and haven't already been collected. 638 */ 639 if ((FLAGS(lmp) & (FLG_RT_RELOCED | FLG_RT_INITCLCT)) != 640 FLG_RT_RELOCED) 641 return (0); 642 643 if (dep_visit(lml, 0, 0, lmp, sort, flag) == -1) 644 return (1); 645 646 } else if (!(flag & RT_SORT_DELETE) || (FLAGS(lmp) & FLG_RT_DELETE)) { 647 /* 648 * Only collect objects that have had their .init collected, 649 * and haven't already been .fini collected. 650 */ 651 if (!((FLAGS(lmp) & (FLG_RT_INITCLCT | FLG_RT_FINICLCT)) == 652 (FLG_RT_INITCLCT))) 653 return (0); 654 655 if (dep_visit(lml, 0, 0, lmp, sort, flag) == -1) 656 return (1); 657 } 658 return (0); 659 } 660 661 /* 662 * Sort the dependency 663 */ 664 Rt_map ** 665 tsort(Rt_map *lmp, int num, int flag) 666 { 667 Rt_map *_lmp; 668 Lm_list *lml = LIST(lmp); 669 Word init = lml->lm_flags & LML_FLG_TRC_INIT; 670 Sort sort = { 0 }; 671 size_t size; 672 673 if (num == 0) 674 return (0); 675 676 /* 677 * Prior to tsorting any .init sections, insure that the `environ' 678 * symbol is initialized for this link-map list. 679 */ 680 if ((flag & RT_SORT_REV) && ((lml->lm_flags & 681 (LML_FLG_TRC_ENABLE | LML_FLG_ENVIRON)) == 0)) 682 set_environ(lml); 683 684 /* 685 * Allocate memory for link-map list array. Calloc the array to insure 686 * all elements are zero, we might find that no objects need processing. 687 * At the same time, allocate a stack for any topological sorting that 688 * might be necessary. 689 */ 690 sort.s_lmp = lmp; 691 sort.s_num = num + 1; 692 size = sort.s_num * sizeof (Rt_map *); 693 if ((sort.s_lmpa = calloc(2, size)) == NULL) 694 return ((Rt_map **)S_ERROR); 695 sort.s_stack = (Rt_map **)((uintptr_t)sort.s_lmpa + size); 696 697 /* 698 * Determine where to start searching for tsort() candidates. Any call 699 * to tsort() for .init processing is passed the link-map from which to 700 * start searching. However, if new objects have dependencies on 701 * existing objects, or existing objects have been promoted (RTLD_LAZY 702 * to RTLD_NOW), then start searching at the head of the link-map list. 703 * These previously loaded objects will have been tagged for inclusion 704 * in this tsort() pass. They still remain on an existing tsort() list, 705 * which must have been prempted for control to have arrived here. 706 * However, they will be ignored when encountered on any previous 707 * tsort() list if their .init has already been called. 708 */ 709 if (lml->lm_flags & LML_FLG_OBJREEVAL) 710 _lmp = lml->lm_head; 711 else 712 _lmp = lmp; 713 714 DBG_CALL(Dbg_file_bindings(_lmp, flag)); 715 lml->lm_flags &= 716 ~(LML_FLG_OBJREEVAL | LML_FLG_OBJADDED | LML_FLG_OBJDELETED); 717 718 /* 719 * If interposers exist, inspect these objects first. 720 * 721 * Interposers can provide implicit dependencies - for example, an 722 * application that has a dependency on libumem will caused any other 723 * dependencies of the application that use the malloc family, to 724 * have an implicit dependency on libumem. However, under the default 725 * condition of lazy binding, these dependency relationships on libumem 726 * are unknown to the tsorting process (ie. a call to one of the malloc 727 * family has not occurred to establish the dependency). This lack of 728 * dependency information makes the interposer look "standalone", 729 * whereas the interposers .init/.fini should be analyzed with respect 730 * to the dependency relationship libumem will eventually create. 731 * 732 * By inspecting interposing objects first, we are able to trigger 733 * their .init sections to be accounted for before any others. 734 * Selecting these .init sections first is important for the malloc 735 * libraries, as these libraries need to prepare for pthread_atfork(). 736 * However, handling interposer libraries in this generic fashion 737 * should help provide for other dependency relationships that may 738 * exist. 739 */ 740 if ((lml->lm_flags & (LML_FLG_INTRPOSE | LML_FLG_INTRPOSETSORT)) == 741 LML_FLG_INTRPOSE) { 742 Rt_map *ilmp = _lmp; 743 744 /* 745 * Unless the executable is tagged as an interposer, skip to 746 * the next object. 747 */ 748 if ((FLAGS(ilmp) & MSK_RT_INTPOSE) == 0) 749 ilmp = NEXT_RT_MAP(ilmp); 750 751 for (; ilmp; ilmp = NEXT_RT_MAP(ilmp)) { 752 if ((FLAGS(ilmp) & MSK_RT_INTPOSE) == 0) 753 break; 754 755 if (initorfini(lml, ilmp, (flag | RT_SORT_INTPOSE), 756 &sort) != 0) 757 return ((Rt_map **)S_ERROR); 758 } 759 760 /* 761 * Once all interposers are processed, there is no need to 762 * look for interposers again. An interposer can only 763 * be introduced before any relocation takes place, thus 764 * interposer .init's will be grabbed during the first tsort 765 * starting at the head of the link-map list. 766 * 767 * Interposers can't be unloaded. Thus interposer .fini's can 768 * only be called during atexit() processing. The interposer 769 * tsort flag is removed from each link-map list during 770 * atexit_fini() so that the interposers .fini sections are 771 * processed appropriately. 772 */ 773 lml->lm_flags |= LML_FLG_INTRPOSETSORT; 774 } 775 776 /* 777 * Inspect any standard objects. 778 */ 779 for (; _lmp; _lmp = NEXT_RT_MAP(_lmp)) { 780 if (FLAGS(_lmp) & MSK_RT_INTPOSE) 781 continue; 782 783 if (initorfini(lml, _lmp, flag, &sort) != 0) 784 return ((Rt_map **)S_ERROR); 785 } 786 787 /* 788 * The dependencies have been collected such that they are appropriate 789 * for an .init order, for .fini order reverse them. 790 */ 791 if (flag & RT_SORT_FWD) { 792 int bgn = 0, end = sort.s_lndx - 1; 793 794 while (bgn < end) { 795 Rt_map *tlmp = sort.s_lmpa[end]; 796 797 sort.s_lmpa[end] = sort.s_lmpa[bgn]; 798 sort.s_lmpa[bgn] = tlmp; 799 800 bgn++, end--; 801 } 802 } 803 804 /* 805 * If INITFIRST objects have been collected then move them to the front 806 * or end of the list as appropriate. 807 */ 808 if (sort.s_initfirst) { 809 if (flag & RT_SORT_REV) 810 r_initfirst(&sort, sort.s_lndx - 1); 811 else 812 f_initfirst(&sort, sort.s_lndx - 1); 813 } 814 815 /* 816 * If tracing .init sections (only meaningful for RT_SORT_REV), print 817 * out the sorted dependencies. 818 */ 819 if (init) 820 trace_sort(&sort); 821 822 /* 823 * Clean any temporary structures prior to return. 824 */ 825 if (sort.s_queue) { 826 Aliste idx; 827 Rt_map *lmp2; 828 829 /* 830 * Traverse the link-maps collected on the sort queue and 831 * delete the depth index. These link-maps may be traversed 832 * again to sort other components either for inits, and almost 833 * certainly for .finis. 834 */ 835 for (APLIST_TRAVERSE(sort.s_queue, idx, lmp2)) 836 IDX(lmp2) = 0; 837 838 free(sort.s_queue); 839 } 840 841 if (sort.s_scc) { 842 Aliste idx; 843 APlist *alp; 844 845 for (APLIST_TRAVERSE(sort.s_scc, idx, alp)) 846 free(alp); 847 free(sort.s_scc); 848 } 849 850 /* 851 * The caller is responsible for freeing the sorted link-map list once 852 * the associated .init/.fini's have been fired. 853 */ 854 DBG_CALL(Dbg_util_nl(lml, DBG_NL_STD)); 855 return (sort.s_lmpa); 856 } 857