1/* Common unwinding code for ARM EABI and C6X.
2   Copyright (C) 2004, 2005, 2009, 2011 Free Software Foundation, Inc.
3   Contributed by Paul Brook
4
5   This file is free software; you can redistribute it and/or modify it
6   under the terms of the GNU General Public License as published by the
7   Free Software Foundation; either version 3, or (at your option) any
8   later version.
9
10   This file is distributed in the hope that it will be useful, but
11   WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13   General Public License for more details.
14
15   Under Section 7 of GPL version 3, you are granted additional
16   permissions described in the GCC Runtime Library Exception, version
17   3.1, as published by the Free Software Foundation.
18
19   You should have received a copy of the GNU General Public License and
20   a copy of the GCC Runtime Library Exception along with this program;
21   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
22   <http://www.gnu.org/licenses/>.  */
23
24#include "tconfig.h"
25#include "tsystem.h"
26#include "unwind.h"
27
28/* Used for SystemTap unwinder probe.  */
29#ifdef HAVE_SYS_SDT_H
30#include <sys/sdt.h>
31#endif
32
33/* We add a prototype for abort here to avoid creating a dependency on
34   target headers.  */
35extern void abort (void);
36
37/* Definitions for C++ runtime support routines.  We make these weak
38   declarations to avoid pulling in libsupc++ unnecessarily.  */
39typedef unsigned char bool;
40
41typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
42enum __cxa_type_match_result
43  {
44    ctm_failed = 0,
45    ctm_succeeded = 1,
46    ctm_succeeded_with_ptr_to_base = 2
47  };
48
49void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp);
50bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp);
51enum __cxa_type_match_result __attribute__((weak)) __cxa_type_match
52  (_Unwind_Control_Block *ucbp, const type_info *rttip,
53   bool is_reference, void **matched_object);
54
55_Unwind_Ptr __attribute__((weak))
56__gnu_Unwind_Find_exidx (_Unwind_Ptr, int *);
57
58#define EXIDX_CANTUNWIND 1
59#define uint32_highbit (((_uw) 1) << 31)
60
61#define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1)
62#define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2)
63#define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3)
64#define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4)
65
66/* Unwind descriptors.  */
67
68typedef struct
69{
70  _uw16 length;
71  _uw16 offset;
72} EHT16;
73
74typedef struct
75{
76  _uw length;
77  _uw offset;
78} EHT32;
79
80/* An exception index table entry.  */
81
82typedef struct __EIT_entry
83{
84  _uw fnoffset;
85  _uw content;
86} __EIT_entry;
87
88/* Assembly helper functions.  */
89
90/* Restore core register state.  Never returns.  */
91void __attribute__((noreturn)) restore_core_regs (struct core_regs *);
92
93
94/* Restore coprocessor state after phase1 unwinding.  */
95static void restore_non_core_regs (phase1_vrs * vrs);
96
97/* A better way to do this would probably be to compare the absolute address
98   with a segment relative relocation of the same symbol.  */
99
100extern int __text_start;
101extern int __data_start;
102
103/* The exception index table location.  */
104extern __EIT_entry __exidx_start;
105extern __EIT_entry __exidx_end;
106
107/* Core unwinding functions.  */
108
109/* Calculate the address encoded by a 31-bit self-relative offset at address
110   P.  */
111static inline _uw selfrel_offset31 (const _uw *p);
112
113static _uw __gnu_unwind_get_pr_addr (int idx);
114
115static void _Unwind_DebugHook (void *, void *)
116  __attribute__ ((__noinline__, __used__, __noclone__));
117
118/* This function is called during unwinding.  It is intended as a hook
119   for a debugger to intercept exceptions.  CFA is the CFA of the
120   target frame.  HANDLER is the PC to which control will be
121   transferred.  */
122
123static void
124_Unwind_DebugHook (void *cfa __attribute__ ((__unused__)),
125		   void *handler __attribute__ ((__unused__)))
126{
127  /* We only want to use stap probes starting with v3.  Earlier
128     versions added too much startup cost.  */
129#if defined (HAVE_SYS_SDT_H) && defined (STAP_PROBE2) && _SDT_NOTE_TYPE >= 3
130  STAP_PROBE2 (libgcc, unwind, cfa, handler);
131#else
132  asm ("");
133#endif
134}
135
136/* This is a wrapper to be called when we need to restore core registers.
137   It will call `_Unwind_DebugHook' before restoring the registers, thus
138   making it possible to intercept and debug exceptions.
139
140   When calling `_Unwind_DebugHook', the first argument (the CFA) is zero
141   because we are not interested in it.  However, it must be there (even
142   being zero) because GDB expects to find it when using the probe.  */
143
144#define uw_restore_core_regs(TARGET, CORE)				      \
145  do									      \
146    {									      \
147      void *handler = __builtin_frob_return_addr ((void *) VRS_PC (TARGET));  \
148      _Unwind_DebugHook (0, handler);					      \
149      restore_core_regs (CORE);						      \
150    }									      \
151  while (0)
152
153/* Perform a binary search for RETURN_ADDRESS in TABLE.  The table contains
154   NREC entries.  */
155
156static const __EIT_entry *
157search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address)
158{
159  _uw next_fn;
160  _uw this_fn;
161  int n, left, right;
162
163  if (nrec == 0)
164    return (__EIT_entry *) 0;
165
166  left = 0;
167  right = nrec - 1;
168
169  while (1)
170    {
171      n = (left + right) / 2;
172      this_fn = selfrel_offset31 (&table[n].fnoffset);
173      if (n != nrec - 1)
174	next_fn = selfrel_offset31 (&table[n + 1].fnoffset) - 1;
175      else
176	next_fn = (_uw)0 - 1;
177
178      if (return_address < this_fn)
179	{
180	  if (n == left)
181	    return (__EIT_entry *) 0;
182	  right = n - 1;
183	}
184      else if (return_address <= next_fn)
185	return &table[n];
186      else
187	left = n + 1;
188    }
189}
190
191/* Find the exception index table eintry for the given address.
192   Fill in the relevant fields of the UCB.
193   Returns _URC_FAILURE if an error occurred, _URC_OK on success.  */
194
195static _Unwind_Reason_Code
196get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address)
197{
198  const __EIT_entry * eitp;
199  int nrec;
200
201  /* The return address is the address of the instruction following the
202     call instruction (plus one in thumb mode).  If this was the last
203     instruction in the function the address will lie in the following
204     function.  Subtract 2 from the address so that it points within the call
205     instruction itself.  */
206  return_address -= 2;
207
208  if (__gnu_Unwind_Find_exidx)
209    {
210      eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address,
211							    &nrec);
212      if (!eitp)
213	{
214	  UCB_PR_ADDR (ucbp) = 0;
215	  return _URC_FAILURE;
216	}
217    }
218  else
219    {
220      eitp = &__exidx_start;
221      nrec = &__exidx_end - &__exidx_start;
222    }
223
224  eitp = search_EIT_table (eitp, nrec, return_address);
225
226  if (!eitp)
227    {
228      UCB_PR_ADDR (ucbp) = 0;
229      return _URC_FAILURE;
230    }
231  ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset);
232
233  /* Can this frame be unwound at all?  */
234  if (eitp->content == EXIDX_CANTUNWIND)
235    {
236      UCB_PR_ADDR (ucbp) = 0;
237      return _URC_END_OF_STACK;
238    }
239
240  /* Obtain the address of the "real" __EHT_Header word.  */
241
242  if (eitp->content & uint32_highbit)
243    {
244      /* It is immediate data.  */
245      ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
246      ucbp->pr_cache.additional = 1;
247    }
248  else
249    {
250      /* The low 31 bits of the content field are a self-relative
251	 offset to an _Unwind_EHT_Entry structure.  */
252      ucbp->pr_cache.ehtp =
253	(_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content);
254      ucbp->pr_cache.additional = 0;
255    }
256
257  /* Discover the personality routine address.  */
258  if (*ucbp->pr_cache.ehtp & (1u << 31))
259    {
260      /* One of the predefined standard routines.  */
261      _uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf;
262      UCB_PR_ADDR (ucbp) = __gnu_unwind_get_pr_addr (idx);
263      if (UCB_PR_ADDR (ucbp) == 0)
264	{
265	  /* Failed */
266	  return _URC_FAILURE;
267	}
268    }
269  else
270    {
271      /* Execute region offset to PR */
272      UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp);
273    }
274  return _URC_OK;
275}
276
277
278/* Perform phase2 unwinding.  VRS is the initial virtual register state.  */
279
280static void __attribute__((noreturn))
281unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs)
282{
283  _Unwind_Reason_Code pr_result;
284
285  do
286    {
287      /* Find the entry for this routine.  */
288      if (get_eit_entry (ucbp, VRS_PC(vrs)) != _URC_OK)
289	abort ();
290
291      UCB_SAVED_CALLSITE_ADDR (ucbp) = VRS_PC(vrs);
292
293      /* Call the pr to decide what to do.  */
294      pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
295	(_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs);
296    }
297  while (pr_result == _URC_CONTINUE_UNWIND);
298
299  if (pr_result != _URC_INSTALL_CONTEXT)
300    abort();
301
302  uw_restore_core_regs (vrs, &vrs->core);
303}
304
305/* Perform phase2 forced unwinding.  */
306
307static _Unwind_Reason_Code
308unwind_phase2_forced (_Unwind_Control_Block *ucbp, phase2_vrs *entry_vrs,
309		      int resuming)
310{
311  _Unwind_Stop_Fn stop_fn = (_Unwind_Stop_Fn) UCB_FORCED_STOP_FN (ucbp);
312  void *stop_arg = (void *)UCB_FORCED_STOP_ARG (ucbp);
313  _Unwind_Reason_Code pr_result = 0;
314  /* We use phase1_vrs here even though we do not demand save, for the
315     prev_sp field.  */
316  phase1_vrs saved_vrs, next_vrs;
317
318  /* Save the core registers.  */
319  saved_vrs.core = entry_vrs->core;
320  /* We don't need to demand-save the non-core registers, because we
321     unwind in a single pass.  */
322  saved_vrs.demand_save_flags = 0;
323
324  /* Unwind until we reach a propagation barrier.  */
325  do
326    {
327      _Unwind_State action;
328      _Unwind_Reason_Code entry_code;
329      _Unwind_Reason_Code stop_code;
330
331      /* Find the entry for this routine.  */
332      entry_code = get_eit_entry (ucbp, VRS_PC (&saved_vrs));
333
334      if (resuming)
335	{
336	  action = _US_UNWIND_FRAME_RESUME | _US_FORCE_UNWIND;
337	  resuming = 0;
338	}
339      else
340	action = _US_UNWIND_FRAME_STARTING | _US_FORCE_UNWIND;
341
342      if (entry_code == _URC_OK)
343	{
344	  UCB_SAVED_CALLSITE_ADDR (ucbp) = VRS_PC (&saved_vrs);
345
346	  next_vrs = saved_vrs;
347
348	  /* Call the pr to decide what to do.  */
349	  pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
350	    (action, ucbp, (void *) &next_vrs);
351
352	  saved_vrs.prev_sp = VRS_SP (&next_vrs);
353	}
354      else
355	{
356	  /* Treat any failure as the end of unwinding, to cope more
357	     gracefully with missing EH information.  Mixed EH and
358	     non-EH within one object will usually result in failure,
359	     because the .ARM.exidx tables do not indicate the end
360	     of the code to which they apply; but mixed EH and non-EH
361	     shared objects should return an unwind failure at the
362	     entry of a non-EH shared object.  */
363	  action |= _US_END_OF_STACK;
364
365	  saved_vrs.prev_sp = VRS_SP (&saved_vrs);
366	}
367
368      stop_code = stop_fn (1, action, ucbp->exception_class, ucbp,
369			   (void *)&saved_vrs, stop_arg);
370      if (stop_code != _URC_NO_REASON)
371	return _URC_FAILURE;
372
373      if (entry_code != _URC_OK)
374	return entry_code;
375
376      saved_vrs = next_vrs;
377    }
378  while (pr_result == _URC_CONTINUE_UNWIND);
379
380  if (pr_result != _URC_INSTALL_CONTEXT)
381    {
382      /* Some sort of failure has occurred in the pr and probably the
383	 pr returned _URC_FAILURE.  */
384      return _URC_FAILURE;
385    }
386
387  uw_restore_core_regs (&saved_vrs, &saved_vrs.core);
388}
389
390/* This is a very limited implementation of _Unwind_GetCFA.  It returns
391   the stack pointer as it is about to be unwound, and is only valid
392   while calling the stop function during forced unwinding.  If the
393   current personality routine result is going to run a cleanup, this
394   will not be the CFA; but when the frame is really unwound, it will
395   be.  */
396
397_Unwind_Word
398_Unwind_GetCFA (_Unwind_Context *context)
399{
400  return ((phase1_vrs *) context)->prev_sp;
401}
402
403/* Perform phase1 unwinding.  UCBP is the exception being thrown, and
404   entry_VRS is the register state on entry to _Unwind_RaiseException.  */
405
406_Unwind_Reason_Code
407__gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *);
408
409_Unwind_Reason_Code
410__gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp,
411			     phase2_vrs * entry_vrs)
412{
413  phase1_vrs saved_vrs;
414  _Unwind_Reason_Code pr_result;
415
416  /* Set the pc to the call site.  */
417  VRS_PC (entry_vrs) = VRS_RETURN(entry_vrs);
418
419  /* Save the core registers.  */
420  saved_vrs.core = entry_vrs->core;
421  /* Set demand-save flags.  */
422  saved_vrs.demand_save_flags = ~(_uw) 0;
423
424  /* Unwind until we reach a propagation barrier.  */
425  do
426    {
427      /* Find the entry for this routine.  */
428      if (get_eit_entry (ucbp, VRS_PC (&saved_vrs)) != _URC_OK)
429	return _URC_FAILURE;
430
431      /* Call the pr to decide what to do.  */
432      pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
433	(_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs);
434    }
435  while (pr_result == _URC_CONTINUE_UNWIND);
436
437  /* We've unwound as far as we want to go, so restore the original
438     register state.  */
439  restore_non_core_regs (&saved_vrs);
440  if (pr_result != _URC_HANDLER_FOUND)
441    {
442      /* Some sort of failure has occurred in the pr and probably the
443	 pr returned _URC_FAILURE.  */
444      return _URC_FAILURE;
445    }
446
447  unwind_phase2 (ucbp, entry_vrs);
448}
449
450/* Resume unwinding after a cleanup has been run.  UCBP is the exception
451   being thrown and ENTRY_VRS is the register state on entry to
452   _Unwind_Resume.  */
453_Unwind_Reason_Code
454__gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *,
455			   _Unwind_Stop_Fn, void *, phase2_vrs *);
456
457_Unwind_Reason_Code
458__gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *ucbp,
459			   _Unwind_Stop_Fn stop_fn, void *stop_arg,
460			   phase2_vrs *entry_vrs)
461{
462  UCB_FORCED_STOP_FN (ucbp) = (_uw) stop_fn;
463  UCB_FORCED_STOP_ARG (ucbp) = (_uw) stop_arg;
464
465  /* Set the pc to the call site.  */
466  VRS_PC (entry_vrs) = VRS_RETURN(entry_vrs);
467
468  return unwind_phase2_forced (ucbp, entry_vrs, 0);
469}
470
471_Unwind_Reason_Code
472__gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *);
473
474_Unwind_Reason_Code
475__gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs)
476{
477  _Unwind_Reason_Code pr_result;
478
479  /* Recover the saved address.  */
480  VRS_PC (entry_vrs) = UCB_SAVED_CALLSITE_ADDR (ucbp);
481
482  if (UCB_FORCED_STOP_FN (ucbp))
483    {
484      unwind_phase2_forced (ucbp, entry_vrs, 1);
485
486      /* We can't return failure at this point.  */
487      abort ();
488    }
489
490  /* Call the cached PR.  */
491  pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
492	(_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs);
493
494  switch (pr_result)
495    {
496    case _URC_INSTALL_CONTEXT:
497      /* Upload the registers to enter the landing pad.  */
498      uw_restore_core_regs (entry_vrs, &entry_vrs->core);
499
500    case _URC_CONTINUE_UNWIND:
501      /* Continue unwinding the next frame.  */
502      unwind_phase2 (ucbp, entry_vrs);
503
504    default:
505      abort ();
506    }
507}
508
509_Unwind_Reason_Code
510__gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block *, phase2_vrs *);
511
512_Unwind_Reason_Code
513__gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block * ucbp,
514				phase2_vrs * entry_vrs)
515{
516  if (!UCB_FORCED_STOP_FN (ucbp))
517    return __gnu_Unwind_RaiseException (ucbp, entry_vrs);
518
519  /* Set the pc to the call site.  */
520  VRS_PC (entry_vrs) = VRS_RETURN (entry_vrs);
521  /* Continue unwinding the next frame.  */
522  return unwind_phase2_forced (ucbp, entry_vrs, 0);
523}
524
525/* Clean up an exception object when unwinding is complete.  */
526void
527_Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused)))
528{
529}
530
531
532/* Free an exception.  */
533
534void
535_Unwind_DeleteException (_Unwind_Exception * exc)
536{
537  if (exc->exception_cleanup)
538    (*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc);
539}
540
541
542/* Perform stack backtrace through unwind data.  */
543_Unwind_Reason_Code
544__gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
545		       phase2_vrs * entry_vrs);
546_Unwind_Reason_Code
547__gnu_Unwind_Backtrace(_Unwind_Trace_Fn trace, void * trace_argument,
548		       phase2_vrs * entry_vrs)
549{
550  phase1_vrs saved_vrs;
551  _Unwind_Reason_Code code;
552
553  _Unwind_Control_Block ucb;
554  _Unwind_Control_Block *ucbp = &ucb;
555
556  /* Set the pc to the call site.  */
557  VRS_PC (entry_vrs) = VRS_RETURN (entry_vrs);
558
559  /* Save the core registers.  */
560  saved_vrs.core = entry_vrs->core;
561  /* Set demand-save flags.  */
562  saved_vrs.demand_save_flags = ~(_uw) 0;
563
564  do
565    {
566      /* Find the entry for this routine.  */
567      if (get_eit_entry (ucbp, VRS_PC (&saved_vrs)) != _URC_OK)
568	{
569	  code = _URC_FAILURE;
570	  break;
571	}
572
573      /* The dwarf unwinder assumes the context structure holds things
574	 like the function and LSDA pointers.  The ARM implementation
575	 caches these in the exception header (UCB).  To avoid
576	 rewriting everything we make the virtual IP register point at
577	 the UCB.  */
578      _Unwind_SetGR((_Unwind_Context *)&saved_vrs, UNWIND_POINTER_REG, (_Unwind_Ptr) ucbp);
579
580      /* Call trace function.  */
581      if ((*trace) ((_Unwind_Context *) &saved_vrs, trace_argument)
582	  != _URC_NO_REASON)
583	{
584	  code = _URC_FAILURE;
585	  break;
586	}
587
588      /* Call the pr to decide what to do.  */
589      code = ((personality_routine) UCB_PR_ADDR (ucbp))
590	(_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND,
591	 ucbp, (void *) &saved_vrs);
592    }
593  while (code != _URC_END_OF_STACK
594	 && code != _URC_FAILURE);
595
596  restore_non_core_regs (&saved_vrs);
597  return code;
598}
599
600
601/* Common implementation for ARM ABI defined personality routines.
602   ID is the index of the personality routine, other arguments are as defined
603   by __aeabi_unwind_cpp_pr{0,1,2}.  */
604
605static _Unwind_Reason_Code
606__gnu_unwind_pr_common (_Unwind_State state,
607			_Unwind_Control_Block *ucbp,
608			_Unwind_Context *context,
609			int id)
610{
611  __gnu_unwind_state uws;
612  _uw *data;
613  _uw offset;
614  _uw len;
615  _uw rtti_count;
616  int phase2_call_unexpected_after_unwind = 0;
617  int in_range = 0;
618  int forced_unwind = state & _US_FORCE_UNWIND;
619
620  state &= _US_ACTION_MASK;
621
622  data = (_uw *) ucbp->pr_cache.ehtp;
623  uws.data = *(data++);
624  uws.next = data;
625  if (id == 0)
626    {
627      uws.data <<= 8;
628      uws.words_left = 0;
629      uws.bytes_left = 3;
630    }
631  else if (id < 3)
632    {
633      uws.words_left = (uws.data >> 16) & 0xff;
634      uws.data <<= 16;
635      uws.bytes_left = 2;
636      data += uws.words_left;
637    }
638
639  /* Restore the saved pointer.  */
640  if (state == _US_UNWIND_FRAME_RESUME)
641    data = (_uw *) ucbp->cleanup_cache.bitpattern[0];
642
643  if ((ucbp->pr_cache.additional & 1) == 0)
644    {
645      /* Process descriptors.  */
646      while (*data)
647	{
648	  _uw addr;
649	  _uw fnstart;
650
651	  if (id == 2)
652	    {
653	      len = ((EHT32 *) data)->length;
654	      offset = ((EHT32 *) data)->offset;
655	      data += 2;
656	    }
657	  else
658	    {
659	      len = ((EHT16 *) data)->length;
660	      offset = ((EHT16 *) data)->offset;
661	      data++;
662	    }
663
664	  fnstart = ucbp->pr_cache.fnstart + (offset & ~1);
665	  addr = _Unwind_GetGR (context, R_PC);
666	  in_range = (fnstart <= addr && addr < fnstart + (len & ~1));
667
668	  switch (((offset & 1) << 1) | (len & 1))
669	    {
670	    case 0:
671	      /* Cleanup.  */
672	      if (state != _US_VIRTUAL_UNWIND_FRAME
673		  && in_range)
674		{
675		  /* Cleanup in range, and we are running cleanups.  */
676		  _uw lp;
677
678		  /* Landing pad address is 31-bit pc-relative offset.  */
679		  lp = selfrel_offset31 (data);
680		  data++;
681		  /* Save the exception data pointer.  */
682		  ucbp->cleanup_cache.bitpattern[0] = (_uw) data;
683		  if (!__cxa_begin_cleanup (ucbp))
684		    return _URC_FAILURE;
685		  /* Setup the VRS to enter the landing pad.  */
686		  _Unwind_SetGR (context, R_PC, lp);
687		  return _URC_INSTALL_CONTEXT;
688		}
689	      /* Cleanup not in range, or we are in stage 1.  */
690	      data++;
691	      break;
692
693	    case 1:
694	      /* Catch handler.  */
695	      if (state == _US_VIRTUAL_UNWIND_FRAME)
696		{
697		  if (in_range)
698		    {
699		      /* Check for a barrier.  */
700		      _uw rtti;
701		      bool is_reference = (data[0] & uint32_highbit) != 0;
702		      void *matched;
703		      enum __cxa_type_match_result match_type;
704
705		      /* Check for no-throw areas.  */
706		      if (data[1] == (_uw) -2)
707			return _URC_FAILURE;
708
709		      /* The thrown object immediately follows the ECB.  */
710		      matched = (void *)(ucbp + 1);
711		      if (data[1] != (_uw) -1)
712			{
713			  /* Match a catch specification.  */
714			  rtti = _Unwind_decode_typeinfo_ptr (0,
715							      (_uw) &data[1]);
716			  match_type = __cxa_type_match (ucbp,
717							 (type_info *) rtti,
718							 is_reference,
719							 &matched);
720			}
721		      else
722			match_type = ctm_succeeded;
723
724		      if (match_type)
725			{
726			  ucbp->barrier_cache.sp =
727			    _Unwind_GetGR (context, R_SP);
728			  // ctm_succeeded_with_ptr_to_base really
729			  // means _c_t_m indirected the pointer
730			  // object.  We have to reconstruct the
731			  // additional pointer layer by using a temporary.
732			  if (match_type == ctm_succeeded_with_ptr_to_base)
733			    {
734			      ucbp->barrier_cache.bitpattern[2]
735				= (_uw) matched;
736			      ucbp->barrier_cache.bitpattern[0]
737				= (_uw) &ucbp->barrier_cache.bitpattern[2];
738			    }
739			  else
740			    ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
741			  ucbp->barrier_cache.bitpattern[1] = (_uw) data;
742			  return _URC_HANDLER_FOUND;
743			}
744		    }
745		  /* Handler out of range, or not matched.  */
746		}
747	      else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
748		       && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
749		{
750		  /* Matched a previous propagation barrier.  */
751		  _uw lp;
752
753		  /* Setup for entry to the handler.  */
754		  lp = selfrel_offset31 (data);
755		  _Unwind_SetGR (context, R_PC, lp);
756		  _Unwind_SetGR (context, 0, (_uw) ucbp);
757		  return _URC_INSTALL_CONTEXT;
758		}
759	      /* Catch handler not matched.  Advance to the next descriptor.  */
760	      data += 2;
761	      break;
762
763	    case 2:
764	      rtti_count = data[0] & 0x7fffffff;
765	      /* Exception specification.  */
766	      if (state == _US_VIRTUAL_UNWIND_FRAME)
767		{
768		  if (in_range && (!forced_unwind || !rtti_count))
769		    {
770		      /* Match against the exception specification.  */
771		      _uw i;
772		      _uw rtti;
773		      void *matched;
774
775		      for (i = 0; i < rtti_count; i++)
776			{
777			  matched = (void *)(ucbp + 1);
778			  rtti = _Unwind_decode_typeinfo_ptr (0,
779			      (_uw) &data[i + 1]);
780			  if (__cxa_type_match (ucbp, (type_info *) rtti, 0,
781						&matched))
782			    break;
783			}
784
785		      if (i == rtti_count)
786			{
787			  /* Exception does not match the spec.  */
788			  ucbp->barrier_cache.sp =
789			    _Unwind_GetGR (context, R_SP);
790			  ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
791			  ucbp->barrier_cache.bitpattern[1] = (_uw) data;
792			  return _URC_HANDLER_FOUND;
793			}
794		    }
795		  /* Handler out of range, or exception is permitted.  */
796		}
797	      else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
798		       && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
799		{
800		  /* Matched a previous propagation barrier.  */
801		  _uw lp;
802		  /* Record the RTTI list for __cxa_call_unexpected.  */
803		  ucbp->barrier_cache.bitpattern[1] = rtti_count;
804		  ucbp->barrier_cache.bitpattern[2] = 0;
805		  ucbp->barrier_cache.bitpattern[3] = 4;
806		  ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1];
807
808		  if (data[0] & uint32_highbit)
809		    {
810		      data += rtti_count + 1;
811		      /* Setup for entry to the handler.  */
812		      lp = selfrel_offset31 (data);
813		      data++;
814		      _Unwind_SetGR (context, R_PC, lp);
815		      _Unwind_SetGR (context, 0, (_uw) ucbp);
816		      return _URC_INSTALL_CONTEXT;
817		    }
818		  else
819		    phase2_call_unexpected_after_unwind = 1;
820		}
821	      if (data[0] & uint32_highbit)
822		data++;
823	      data += rtti_count + 1;
824	      break;
825
826	    default:
827	      /* Should never happen.  */
828	      return _URC_FAILURE;
829	    }
830	  /* Finished processing this descriptor.  */
831	}
832    }
833
834  if (id >= 3)
835    {
836      /* 24-bit ecoding */
837      if (__gnu_unwind_24bit (context, uws.data, id == 4) != _URC_OK)
838	return _URC_FAILURE;
839    }
840  else
841    {
842      if (__gnu_unwind_execute (context, &uws) != _URC_OK)
843	return _URC_FAILURE;
844    }
845
846  if (phase2_call_unexpected_after_unwind)
847    {
848      /* Enter __cxa_unexpected as if called from the call site.  */
849      _Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC));
850      _Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected);
851      return _URC_INSTALL_CONTEXT;
852    }
853
854  return _URC_CONTINUE_UNWIND;
855}
856