1 /* Subroutines needed for unwinding stack frames for exception handling.  */
2 /* Copyright (C) 1997-2018 Free Software Foundation, Inc.
3    Contributed by Jason Merrill <jason@cygnus.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20 
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24 <http://www.gnu.org/licenses/>.  */
25 
26 #ifndef _Unwind_Find_FDE
27 #include "tconfig.h"
28 #include "tsystem.h"
29 #include "coretypes.h"
30 #include "tm.h"
31 #include "libgcc_tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #else
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40     && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
42 #endif
43 #endif
44 
45 /* The unseen_objects list contains objects that have been registered
46    but not yet categorized in any way.  The seen_objects list has had
47    its pc_begin and count fields initialized at minimum, and is sorted
48    by decreasing value of pc_begin.  */
49 static struct object *unseen_objects;
50 static struct object *seen_objects;
51 #ifdef ATOMIC_FDE_FAST_PATH
52 static int any_objects_registered;
53 #endif
54 
55 #ifdef __GTHREAD_MUTEX_INIT
56 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
57 #define init_object_mutex_once()
58 #else
59 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
60 static __gthread_mutex_t object_mutex;
61 
62 static void
63 init_object_mutex (void)
64 {
65   __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
66 }
67 
68 static void
69 init_object_mutex_once (void)
70 {
71   static __gthread_once_t once = __GTHREAD_ONCE_INIT;
72   __gthread_once (&once, init_object_mutex);
73 }
74 #else
75 /* ???  Several targets include this file with stubbing parts of gthr.h
76    and expect no locking to be done.  */
77 #define init_object_mutex_once()
78 static __gthread_mutex_t object_mutex;
79 #endif
80 #endif
81 
82 /* Called from crtbegin.o to register the unwind info for an object.  */
83 
84 void
85 __register_frame_info_bases (const void *begin, struct object *ob,
86 			     void *tbase, void *dbase)
87 {
88   /* If .eh_frame is empty, don't register at all.  */
89   if ((const uword *) begin == 0 || *(const uword *) begin == 0)
90     return;
91 
92   ob->pc_begin = (void *)-1;
93   ob->tbase = tbase;
94   ob->dbase = dbase;
95   ob->u.single = begin;
96   ob->s.i = 0;
97   ob->s.b.encoding = DW_EH_PE_omit;
98 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
99   ob->fde_end = NULL;
100 #endif
101 
102   init_object_mutex_once ();
103   __gthread_mutex_lock (&object_mutex);
104 
105   ob->next = unseen_objects;
106   unseen_objects = ob;
107 #ifdef ATOMIC_FDE_FAST_PATH
108   /* Set flag that at least one library has registered FDEs.
109      Use relaxed MO here, it is up to the app to ensure that the library
110      loading/initialization happens-before using that library in other
111      threads (in particular unwinding with that library's functions
112      appearing in the backtraces).  Calling that library's functions
113      without waiting for the library to initialize would be racy.  */
114   if (!any_objects_registered)
115     __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
116 #endif
117 
118   __gthread_mutex_unlock (&object_mutex);
119 }
120 
121 void
122 __register_frame_info (const void *begin, struct object *ob)
123 {
124   __register_frame_info_bases (begin, ob, 0, 0);
125 }
126 
127 void
128 __register_frame (void *begin)
129 {
130   struct object *ob;
131 
132   /* If .eh_frame is empty, don't register at all.  */
133   if (*(uword *) begin == 0)
134     return;
135 
136   ob = malloc (sizeof (struct object));
137   __register_frame_info (begin, ob);
138 }
139 
140 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
141    for different translation units.  Called from the file generated by
142    collect2.  */
143 
144 void
145 __register_frame_info_table_bases (void *begin, struct object *ob,
146 				   void *tbase, void *dbase)
147 {
148   ob->pc_begin = (void *)-1;
149   ob->tbase = tbase;
150   ob->dbase = dbase;
151   ob->u.array = begin;
152   ob->s.i = 0;
153   ob->s.b.from_array = 1;
154   ob->s.b.encoding = DW_EH_PE_omit;
155 
156   init_object_mutex_once ();
157   __gthread_mutex_lock (&object_mutex);
158 
159   ob->next = unseen_objects;
160   unseen_objects = ob;
161 #ifdef ATOMIC_FDE_FAST_PATH
162   /* Set flag that at least one library has registered FDEs.
163      Use relaxed MO here, it is up to the app to ensure that the library
164      loading/initialization happens-before using that library in other
165      threads (in particular unwinding with that library's functions
166      appearing in the backtraces).  Calling that library's functions
167      without waiting for the library to initialize would be racy.  */
168   if (!any_objects_registered)
169     __atomic_store_n (&any_objects_registered, 1, __ATOMIC_RELAXED);
170 #endif
171 
172   __gthread_mutex_unlock (&object_mutex);
173 }
174 
175 void
176 __register_frame_info_table (void *begin, struct object *ob)
177 {
178   __register_frame_info_table_bases (begin, ob, 0, 0);
179 }
180 
181 void
182 __register_frame_table (void *begin)
183 {
184   struct object *ob = malloc (sizeof (struct object));
185   __register_frame_info_table (begin, ob);
186 }
187 
188 /* Called from crtbegin.o to deregister the unwind info for an object.  */
189 /* ??? Glibc has for a while now exported __register_frame_info and
190    __deregister_frame_info.  If we call __register_frame_info_bases
191    from crtbegin (wherein it is declared weak), and this object does
192    not get pulled from libgcc.a for other reasons, then the
193    invocation of __deregister_frame_info will be resolved from glibc.
194    Since the registration did not happen there, we'll die.
195 
196    Therefore, declare a new deregistration entry point that does the
197    exact same thing, but will resolve to the same library as
198    implements __register_frame_info_bases.  */
199 
200 void *
201 __deregister_frame_info_bases (const void *begin)
202 {
203   struct object **p;
204   struct object *ob = 0;
205 
206   /* If .eh_frame is empty, we haven't registered.  */
207   if ((const uword *) begin == 0 || *(const uword *) begin == 0)
208     return ob;
209 
210   init_object_mutex_once ();
211   __gthread_mutex_lock (&object_mutex);
212 
213   for (p = &unseen_objects; *p ; p = &(*p)->next)
214     if ((*p)->u.single == begin)
215       {
216 	ob = *p;
217 	*p = ob->next;
218 	goto out;
219       }
220 
221   for (p = &seen_objects; *p ; p = &(*p)->next)
222     if ((*p)->s.b.sorted)
223       {
224 	if ((*p)->u.sort->orig_data == begin)
225 	  {
226 	    ob = *p;
227 	    *p = ob->next;
228 	    free (ob->u.sort);
229 	    goto out;
230 	  }
231       }
232     else
233       {
234 	if ((*p)->u.single == begin)
235 	  {
236 	    ob = *p;
237 	    *p = ob->next;
238 	    goto out;
239 	  }
240       }
241 
242  out:
243   __gthread_mutex_unlock (&object_mutex);
244   gcc_assert (ob);
245   return (void *) ob;
246 }
247 
248 void *
249 __deregister_frame_info (const void *begin)
250 {
251   return __deregister_frame_info_bases (begin);
252 }
253 
254 void
255 __deregister_frame (void *begin)
256 {
257   /* If .eh_frame is empty, we haven't registered.  */
258   if (*(uword *) begin != 0)
259     free (__deregister_frame_info (begin));
260 }
261 
262 
263 /* Like base_of_encoded_value, but take the base from a struct object
264    instead of an _Unwind_Context.  */
265 
266 static _Unwind_Ptr
267 base_from_object (unsigned char encoding, struct object *ob)
268 {
269   if (encoding == DW_EH_PE_omit)
270     return 0;
271 
272   switch (encoding & 0x70)
273     {
274     case DW_EH_PE_absptr:
275     case DW_EH_PE_pcrel:
276     case DW_EH_PE_aligned:
277       return 0;
278 
279     case DW_EH_PE_textrel:
280       return (_Unwind_Ptr) ob->tbase;
281     case DW_EH_PE_datarel:
282       return (_Unwind_Ptr) ob->dbase;
283     default:
284       gcc_unreachable ();
285     }
286 }
287 
288 /* Return the FDE pointer encoding from the CIE.  */
289 /* ??? This is a subset of extract_cie_info from unwind-dw2.c.  */
290 
291 static int
292 get_cie_encoding (const struct dwarf_cie *cie)
293 {
294   const unsigned char *aug, *p;
295   _Unwind_Ptr dummy;
296   _uleb128_t utmp;
297   _sleb128_t stmp;
298 
299   aug = cie->augmentation;
300   p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string.  */
301   if (__builtin_expect (cie->version >= 4, 0))
302     {
303       if (p[0] != sizeof (void *) || p[1] != 0)
304 	return DW_EH_PE_omit;		/* We are not prepared to handle unexpected
305 					   address sizes or segment selectors.  */
306       p += 2;				/* Skip address size and segment size.  */
307     }
308 
309   if (aug[0] != 'z')
310     return DW_EH_PE_absptr;
311 
312   p = read_uleb128 (p, &utmp);		/* Skip code alignment.  */
313   p = read_sleb128 (p, &stmp);		/* Skip data alignment.  */
314   if (cie->version == 1)		/* Skip return address column.  */
315     p++;
316   else
317     p = read_uleb128 (p, &utmp);
318 
319   aug++;				/* Skip 'z' */
320   p = read_uleb128 (p, &utmp);		/* Skip augmentation length.  */
321   while (1)
322     {
323       /* This is what we're looking for.  */
324       if (*aug == 'R')
325 	return *p;
326       /* Personality encoding and pointer.  */
327       else if (*aug == 'P')
328 	{
329 	  /* ??? Avoid dereferencing indirect pointers, since we're
330 	     faking the base address.  Gotta keep DW_EH_PE_aligned
331 	     intact, however.  */
332 	  p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
333 	}
334       /* LSDA encoding.  */
335       else if (*aug == 'L')
336 	p++;
337       /* Otherwise end of string, or unknown augmentation.  */
338       else
339 	return DW_EH_PE_absptr;
340       aug++;
341     }
342 }
343 
344 static inline int
345 get_fde_encoding (const struct dwarf_fde *f)
346 {
347   return get_cie_encoding (get_cie (f));
348 }
349 
350 
351 /* Sorting an array of FDEs by address.
352    (Ideally we would have the linker sort the FDEs so we don't have to do
353    it at run time. But the linkers are not yet prepared for this.)  */
354 
355 /* Comparison routines.  Three variants of increasing complexity.  */
356 
357 static int
358 fde_unencoded_compare (struct object *ob __attribute__((unused)),
359 		       const fde *x, const fde *y)
360 {
361   _Unwind_Ptr x_ptr, y_ptr;
362   memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
363   memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
364 
365   if (x_ptr > y_ptr)
366     return 1;
367   if (x_ptr < y_ptr)
368     return -1;
369   return 0;
370 }
371 
372 static int
373 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
374 {
375   _Unwind_Ptr base, x_ptr, y_ptr;
376 
377   base = base_from_object (ob->s.b.encoding, ob);
378   read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
379   read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
380 
381   if (x_ptr > y_ptr)
382     return 1;
383   if (x_ptr < y_ptr)
384     return -1;
385   return 0;
386 }
387 
388 static int
389 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
390 {
391   int x_encoding, y_encoding;
392   _Unwind_Ptr x_ptr, y_ptr;
393 
394   x_encoding = get_fde_encoding (x);
395   read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
396 				x->pc_begin, &x_ptr);
397 
398   y_encoding = get_fde_encoding (y);
399   read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
400 				y->pc_begin, &y_ptr);
401 
402   if (x_ptr > y_ptr)
403     return 1;
404   if (x_ptr < y_ptr)
405     return -1;
406   return 0;
407 }
408 
409 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
410 
411 
412 /* This is a special mix of insertion sort and heap sort, optimized for
413    the data sets that actually occur. They look like
414    101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
415    I.e. a linearly increasing sequence (coming from functions in the text
416    section), with additionally a few unordered elements (coming from functions
417    in gnu_linkonce sections) whose values are higher than the values in the
418    surrounding linear sequence (but not necessarily higher than the values
419    at the end of the linear sequence!).
420    The worst-case total run time is O(N) + O(n log (n)), where N is the
421    total number of FDEs and n is the number of erratic ones.  */
422 
423 struct fde_accumulator
424 {
425   struct fde_vector *linear;
426   struct fde_vector *erratic;
427 };
428 
429 static inline int
430 start_fde_sort (struct fde_accumulator *accu, size_t count)
431 {
432   size_t size;
433   if (! count)
434     return 0;
435 
436   size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
437   if ((accu->linear = malloc (size)))
438     {
439       accu->linear->count = 0;
440       if ((accu->erratic = malloc (size)))
441 	accu->erratic->count = 0;
442       return 1;
443     }
444   else
445     return 0;
446 }
447 
448 static inline void
449 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
450 {
451   if (accu->linear)
452     accu->linear->array[accu->linear->count++] = this_fde;
453 }
454 
455 /* Split LINEAR into a linear sequence with low values and an erratic
456    sequence with high values, put the linear one (of longest possible
457    length) into LINEAR and the erratic one into ERRATIC. This is O(N).
458 
459    Because the longest linear sequence we are trying to locate within the
460    incoming LINEAR array can be interspersed with (high valued) erratic
461    entries.  We construct a chain indicating the sequenced entries.
462    To avoid having to allocate this chain, we overlay it onto the space of
463    the ERRATIC array during construction.  A final pass iterates over the
464    chain to determine what should be placed in the ERRATIC array, and
465    what is the linear sequence.  This overlay is safe from aliasing.  */
466 
467 static inline void
468 fde_split (struct object *ob, fde_compare_t fde_compare,
469 	   struct fde_vector *linear, struct fde_vector *erratic)
470 {
471   static const fde *marker;
472   size_t count = linear->count;
473   const fde *const *chain_end = &marker;
474   size_t i, j, k;
475 
476   /* This should optimize out, but it is wise to make sure this assumption
477      is correct. Should these have different sizes, we cannot cast between
478      them and the overlaying onto ERRATIC will not work.  */
479   gcc_assert (sizeof (const fde *) == sizeof (const fde **));
480 
481   for (i = 0; i < count; i++)
482     {
483       const fde *const *probe;
484 
485       for (probe = chain_end;
486 	   probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
487 	   probe = chain_end)
488 	{
489 	  chain_end = (const fde *const*) erratic->array[probe - linear->array];
490 	  erratic->array[probe - linear->array] = NULL;
491 	}
492       erratic->array[i] = (const fde *) chain_end;
493       chain_end = &linear->array[i];
494     }
495 
496   /* Each entry in LINEAR which is part of the linear sequence we have
497      discovered will correspond to a non-NULL entry in the chain we built in
498      the ERRATIC array.  */
499   for (i = j = k = 0; i < count; i++)
500     if (erratic->array[i])
501       linear->array[j++] = linear->array[i];
502     else
503       erratic->array[k++] = linear->array[i];
504   linear->count = j;
505   erratic->count = k;
506 }
507 
508 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
509 
510 /* Convert a semi-heap to a heap.  A semi-heap is a heap except possibly
511    for the first (root) node; push it down to its rightful place.  */
512 
513 static void
514 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
515 		int lo, int hi)
516 {
517   int i, j;
518 
519   for (i = lo, j = 2*i+1;
520        j < hi;
521        j = 2*i+1)
522     {
523       if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
524 	++j;
525 
526       if (fde_compare (ob, a[i], a[j]) < 0)
527 	{
528 	  SWAP (a[i], a[j]);
529 	  i = j;
530 	}
531       else
532 	break;
533     }
534 }
535 
536 /* This is O(n log(n)).  BSD/OS defines heapsort in stdlib.h, so we must
537    use a name that does not conflict.  */
538 
539 static void
540 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
541 		struct fde_vector *erratic)
542 {
543   /* For a description of this algorithm, see:
544      Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
545      p. 60-61.  */
546   const fde ** a = erratic->array;
547   /* A portion of the array is called a "heap" if for all i>=0:
548      If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
549      If i and 2i+2 are valid indices, then a[i] >= a[2i+2].  */
550   size_t n = erratic->count;
551   int m;
552 
553   /* Expand our heap incrementally from the end of the array, heapifying
554      each resulting semi-heap as we go.  After each step, a[m] is the top
555      of a heap.  */
556   for (m = n/2-1; m >= 0; --m)
557     frame_downheap (ob, fde_compare, a, m, n);
558 
559   /* Shrink our heap incrementally from the end of the array, first
560      swapping out the largest element a[0] and then re-heapifying the
561      resulting semi-heap.  After each step, a[0..m) is a heap.  */
562   for (m = n-1; m >= 1; --m)
563     {
564       SWAP (a[0], a[m]);
565       frame_downheap (ob, fde_compare, a, 0, m);
566     }
567 #undef SWAP
568 }
569 
570 /* Merge V1 and V2, both sorted, and put the result into V1.  */
571 static inline void
572 fde_merge (struct object *ob, fde_compare_t fde_compare,
573 	   struct fde_vector *v1, struct fde_vector *v2)
574 {
575   size_t i1, i2;
576   const fde * fde2;
577 
578   i2 = v2->count;
579   if (i2 > 0)
580     {
581       i1 = v1->count;
582       do
583 	{
584 	  i2--;
585 	  fde2 = v2->array[i2];
586 	  while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
587 	    {
588 	      v1->array[i1+i2] = v1->array[i1-1];
589 	      i1--;
590 	    }
591 	  v1->array[i1+i2] = fde2;
592 	}
593       while (i2 > 0);
594       v1->count += v2->count;
595     }
596 }
597 
598 static inline void
599 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
600 {
601   fde_compare_t fde_compare;
602 
603   gcc_assert (!accu->linear || accu->linear->count == count);
604 
605   if (ob->s.b.mixed_encoding)
606     fde_compare = fde_mixed_encoding_compare;
607   else if (ob->s.b.encoding == DW_EH_PE_absptr)
608     fde_compare = fde_unencoded_compare;
609   else
610     fde_compare = fde_single_encoding_compare;
611 
612   if (accu->erratic)
613     {
614       fde_split (ob, fde_compare, accu->linear, accu->erratic);
615       gcc_assert (accu->linear->count + accu->erratic->count == count);
616       frame_heapsort (ob, fde_compare, accu->erratic);
617       fde_merge (ob, fde_compare, accu->linear, accu->erratic);
618       free (accu->erratic);
619     }
620   else
621     {
622       /* We've not managed to malloc an erratic array,
623 	 so heap sort in the linear one.  */
624       frame_heapsort (ob, fde_compare, accu->linear);
625     }
626 }
627 
628 
629 /* Update encoding, mixed_encoding, and pc_begin for OB for the
630    fde array beginning at THIS_FDE.  Return the number of fdes
631    encountered along the way.  */
632 
633 static size_t
634 classify_object_over_fdes (struct object *ob, const fde *this_fde)
635 {
636   const struct dwarf_cie *last_cie = 0;
637   size_t count = 0;
638   int encoding = DW_EH_PE_absptr;
639   _Unwind_Ptr base = 0;
640 
641   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
642     {
643       const struct dwarf_cie *this_cie;
644       _Unwind_Ptr mask, pc_begin;
645 
646       /* Skip CIEs.  */
647       if (this_fde->CIE_delta == 0)
648 	continue;
649 
650       /* Determine the encoding for this FDE.  Note mixed encoded
651 	 objects for later.  */
652       this_cie = get_cie (this_fde);
653       if (this_cie != last_cie)
654 	{
655 	  last_cie = this_cie;
656 	  encoding = get_cie_encoding (this_cie);
657 	  if (encoding == DW_EH_PE_omit)
658 	    return -1;
659 	  base = base_from_object (encoding, ob);
660 	  if (ob->s.b.encoding == DW_EH_PE_omit)
661 	    ob->s.b.encoding = encoding;
662 	  else if (ob->s.b.encoding != encoding)
663 	    ob->s.b.mixed_encoding = 1;
664 	}
665 
666       read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
667 				    &pc_begin);
668 
669       /* Take care to ignore link-once functions that were removed.
670 	 In these cases, the function address will be NULL, but if
671 	 the encoding is smaller than a pointer a true NULL may not
672 	 be representable.  Assume 0 in the representable bits is NULL.  */
673       mask = size_of_encoded_value (encoding);
674       if (mask < sizeof (void *))
675 	mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
676       else
677 	mask = -1;
678 
679       if ((pc_begin & mask) == 0)
680 	continue;
681 
682       count += 1;
683       if ((void *) pc_begin < ob->pc_begin)
684 	ob->pc_begin = (void *) pc_begin;
685     }
686 
687   return count;
688 }
689 
690 static void
691 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
692 {
693   const struct dwarf_cie *last_cie = 0;
694   int encoding = ob->s.b.encoding;
695   _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
696 
697   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
698     {
699       const struct dwarf_cie *this_cie;
700 
701       /* Skip CIEs.  */
702       if (this_fde->CIE_delta == 0)
703 	continue;
704 
705       if (ob->s.b.mixed_encoding)
706 	{
707 	  /* Determine the encoding for this FDE.  Note mixed encoded
708 	     objects for later.  */
709 	  this_cie = get_cie (this_fde);
710 	  if (this_cie != last_cie)
711 	    {
712 	      last_cie = this_cie;
713 	      encoding = get_cie_encoding (this_cie);
714 	      base = base_from_object (encoding, ob);
715 	    }
716 	}
717 
718       if (encoding == DW_EH_PE_absptr)
719 	{
720 	  _Unwind_Ptr ptr;
721 	  memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
722 	  if (ptr == 0)
723 	    continue;
724 	}
725       else
726 	{
727 	  _Unwind_Ptr pc_begin, mask;
728 
729 	  read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
730 					&pc_begin);
731 
732 	  /* Take care to ignore link-once functions that were removed.
733 	     In these cases, the function address will be NULL, but if
734 	     the encoding is smaller than a pointer a true NULL may not
735 	     be representable.  Assume 0 in the representable bits is NULL.  */
736 	  mask = size_of_encoded_value (encoding);
737 	  if (mask < sizeof (void *))
738 	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
739 	  else
740 	    mask = -1;
741 
742 	  if ((pc_begin & mask) == 0)
743 	    continue;
744 	}
745 
746       fde_insert (accu, this_fde);
747     }
748 }
749 
750 /* Set up a sorted array of pointers to FDEs for a loaded object.  We
751    count up the entries before allocating the array because it's likely to
752    be faster.  We can be called multiple times, should we have failed to
753    allocate a sorted fde array on a previous occasion.  */
754 
755 static inline void
756 init_object (struct object* ob)
757 {
758   struct fde_accumulator accu;
759   size_t count;
760 
761   count = ob->s.b.count;
762   if (count == 0)
763     {
764       if (ob->s.b.from_array)
765 	{
766 	  fde **p = ob->u.array;
767 	  for (count = 0; *p; ++p)
768 	    {
769 	      size_t cur_count = classify_object_over_fdes (ob, *p);
770 	      if (cur_count == (size_t) -1)
771 		goto unhandled_fdes;
772 	      count += cur_count;
773 	    }
774 	}
775       else
776 	{
777 	  count = classify_object_over_fdes (ob, ob->u.single);
778 	  if (count == (size_t) -1)
779 	    {
780 	      static const fde terminator;
781 	    unhandled_fdes:
782 	      ob->s.i = 0;
783 	      ob->s.b.encoding = DW_EH_PE_omit;
784 	      ob->u.single = &terminator;
785 	      return;
786 	    }
787 	}
788 
789       /* The count field we have in the main struct object is somewhat
790 	 limited, but should suffice for virtually all cases.  If the
791 	 counted value doesn't fit, re-write a zero.  The worst that
792 	 happens is that we re-count next time -- admittedly non-trivial
793 	 in that this implies some 2M fdes, but at least we function.  */
794       ob->s.b.count = count;
795       if (ob->s.b.count != count)
796 	ob->s.b.count = 0;
797     }
798 
799   if (!start_fde_sort (&accu, count))
800     return;
801 
802   if (ob->s.b.from_array)
803     {
804       fde **p;
805       for (p = ob->u.array; *p; ++p)
806 	add_fdes (ob, &accu, *p);
807     }
808   else
809     add_fdes (ob, &accu, ob->u.single);
810 
811   end_fde_sort (ob, &accu, count);
812 
813   /* Save the original fde pointer, since this is the key by which the
814      DSO will deregister the object.  */
815   accu.linear->orig_data = ob->u.single;
816   ob->u.sort = accu.linear;
817 
818   ob->s.b.sorted = 1;
819 }
820 
821 /* A linear search through a set of FDEs for the given PC.  This is
822    used when there was insufficient memory to allocate and sort an
823    array.  */
824 
825 static const fde *
826 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
827 {
828   const struct dwarf_cie *last_cie = 0;
829   int encoding = ob->s.b.encoding;
830   _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
831 
832   for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
833     {
834       const struct dwarf_cie *this_cie;
835       _Unwind_Ptr pc_begin, pc_range;
836 
837       /* Skip CIEs.  */
838       if (this_fde->CIE_delta == 0)
839 	continue;
840 
841       if (ob->s.b.mixed_encoding)
842 	{
843 	  /* Determine the encoding for this FDE.  Note mixed encoded
844 	     objects for later.  */
845 	  this_cie = get_cie (this_fde);
846 	  if (this_cie != last_cie)
847 	    {
848 	      last_cie = this_cie;
849 	      encoding = get_cie_encoding (this_cie);
850 	      base = base_from_object (encoding, ob);
851 	    }
852 	}
853 
854       if (encoding == DW_EH_PE_absptr)
855 	{
856 	  const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
857 	  pc_begin = pc_array[0];
858 	  pc_range = pc_array[1];
859 	  if (pc_begin == 0)
860 	    continue;
861 	}
862       else
863 	{
864 	  _Unwind_Ptr mask;
865 	  const unsigned char *p;
866 
867 	  p = read_encoded_value_with_base (encoding, base,
868 					    this_fde->pc_begin, &pc_begin);
869 	  read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
870 
871 	  /* Take care to ignore link-once functions that were removed.
872 	     In these cases, the function address will be NULL, but if
873 	     the encoding is smaller than a pointer a true NULL may not
874 	     be representable.  Assume 0 in the representable bits is NULL.  */
875 	  mask = size_of_encoded_value (encoding);
876 	  if (mask < sizeof (void *))
877 	    mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
878 	  else
879 	    mask = -1;
880 
881 	  if ((pc_begin & mask) == 0)
882 	    continue;
883 	}
884 
885       if ((_Unwind_Ptr) pc - pc_begin < pc_range)
886 	return this_fde;
887     }
888 
889   return NULL;
890 }
891 
892 /* Binary search for an FDE containing the given PC.  Here are three
893    implementations of increasing complexity.  */
894 
895 static inline const fde *
896 binary_search_unencoded_fdes (struct object *ob, void *pc)
897 {
898   struct fde_vector *vec = ob->u.sort;
899   size_t lo, hi;
900 
901   for (lo = 0, hi = vec->count; lo < hi; )
902     {
903       size_t i = (lo + hi) / 2;
904       const fde *const f = vec->array[i];
905       void *pc_begin;
906       uaddr pc_range;
907       memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
908       memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
909 
910       if (pc < pc_begin)
911 	hi = i;
912       else if (pc >= pc_begin + pc_range)
913 	lo = i + 1;
914       else
915 	return f;
916     }
917 
918   return NULL;
919 }
920 
921 static inline const fde *
922 binary_search_single_encoding_fdes (struct object *ob, void *pc)
923 {
924   struct fde_vector *vec = ob->u.sort;
925   int encoding = ob->s.b.encoding;
926   _Unwind_Ptr base = base_from_object (encoding, ob);
927   size_t lo, hi;
928 
929   for (lo = 0, hi = vec->count; lo < hi; )
930     {
931       size_t i = (lo + hi) / 2;
932       const fde *f = vec->array[i];
933       _Unwind_Ptr pc_begin, pc_range;
934       const unsigned char *p;
935 
936       p = read_encoded_value_with_base (encoding, base, f->pc_begin,
937 					&pc_begin);
938       read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
939 
940       if ((_Unwind_Ptr) pc < pc_begin)
941 	hi = i;
942       else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
943 	lo = i + 1;
944       else
945 	return f;
946     }
947 
948   return NULL;
949 }
950 
951 static inline const fde *
952 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
953 {
954   struct fde_vector *vec = ob->u.sort;
955   size_t lo, hi;
956 
957   for (lo = 0, hi = vec->count; lo < hi; )
958     {
959       size_t i = (lo + hi) / 2;
960       const fde *f = vec->array[i];
961       _Unwind_Ptr pc_begin, pc_range;
962       const unsigned char *p;
963       int encoding;
964 
965       encoding = get_fde_encoding (f);
966       p = read_encoded_value_with_base (encoding,
967 					base_from_object (encoding, ob),
968 					f->pc_begin, &pc_begin);
969       read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
970 
971       if ((_Unwind_Ptr) pc < pc_begin)
972 	hi = i;
973       else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
974 	lo = i + 1;
975       else
976 	return f;
977     }
978 
979   return NULL;
980 }
981 
982 static const fde *
983 search_object (struct object* ob, void *pc)
984 {
985   /* If the data hasn't been sorted, try to do this now.  We may have
986      more memory available than last time we tried.  */
987   if (! ob->s.b.sorted)
988     {
989       init_object (ob);
990 
991       /* Despite the above comment, the normal reason to get here is
992 	 that we've not processed this object before.  A quick range
993 	 check is in order.  */
994       if (pc < ob->pc_begin)
995 	return NULL;
996     }
997 
998   if (ob->s.b.sorted)
999     {
1000       if (ob->s.b.mixed_encoding)
1001 	return binary_search_mixed_encoding_fdes (ob, pc);
1002       else if (ob->s.b.encoding == DW_EH_PE_absptr)
1003 	return binary_search_unencoded_fdes (ob, pc);
1004       else
1005 	return binary_search_single_encoding_fdes (ob, pc);
1006     }
1007   else
1008     {
1009       /* Long slow laborious linear search, cos we've no memory.  */
1010       if (ob->s.b.from_array)
1011 	{
1012 	  fde **p;
1013 	  for (p = ob->u.array; *p ; p++)
1014 	    {
1015 	      const fde *f = linear_search_fdes (ob, *p, pc);
1016 	      if (f)
1017 		return f;
1018 	    }
1019 	  return NULL;
1020 	}
1021       else
1022 	return linear_search_fdes (ob, ob->u.single, pc);
1023     }
1024 }
1025 
1026 const fde *
1027 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1028 {
1029   struct object *ob;
1030   const fde *f = NULL;
1031 
1032 #ifdef ATOMIC_FDE_FAST_PATH
1033   /* For targets where unwind info is usually not registered through these
1034      APIs anymore, avoid taking a global lock.
1035      Use relaxed MO here, it is up to the app to ensure that the library
1036      loading/initialization happens-before using that library in other
1037      threads (in particular unwinding with that library's functions
1038      appearing in the backtraces).  Calling that library's functions
1039      without waiting for the library to initialize would be racy.  */
1040   if (__builtin_expect (!__atomic_load_n (&any_objects_registered,
1041 					  __ATOMIC_RELAXED), 1))
1042     return NULL;
1043 #endif
1044 
1045   init_object_mutex_once ();
1046   __gthread_mutex_lock (&object_mutex);
1047 
1048   /* Linear search through the classified objects, to find the one
1049      containing the pc.  Note that pc_begin is sorted descending, and
1050      we expect objects to be non-overlapping.  */
1051   for (ob = seen_objects; ob; ob = ob->next)
1052     if (pc >= ob->pc_begin)
1053       {
1054 	f = search_object (ob, pc);
1055 	if (f)
1056 	  goto fini;
1057 	break;
1058       }
1059 
1060   /* Classify and search the objects we've not yet processed.  */
1061   while ((ob = unseen_objects))
1062     {
1063       struct object **p;
1064 
1065       unseen_objects = ob->next;
1066       f = search_object (ob, pc);
1067 
1068       /* Insert the object into the classified list.  */
1069       for (p = &seen_objects; *p ; p = &(*p)->next)
1070 	if ((*p)->pc_begin < ob->pc_begin)
1071 	  break;
1072       ob->next = *p;
1073       *p = ob;
1074 
1075       if (f)
1076 	goto fini;
1077     }
1078 
1079  fini:
1080   __gthread_mutex_unlock (&object_mutex);
1081 
1082   if (f)
1083     {
1084       int encoding;
1085       _Unwind_Ptr func;
1086 
1087       bases->tbase = ob->tbase;
1088       bases->dbase = ob->dbase;
1089 
1090       encoding = ob->s.b.encoding;
1091       if (ob->s.b.mixed_encoding)
1092 	encoding = get_fde_encoding (f);
1093       read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1094 				    f->pc_begin, &func);
1095       bases->func = (void *) func;
1096     }
1097 
1098   return f;
1099 }
1100