xref: /netbsd/external/gpl3/gcc/dist/gcc/dwarf2cfi.cc (revision f0fbc68b)
1 /* Dwarf2 Call Frame Information helper routines.
2    Copyright (C) 1992-2022 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36 
37 #include "except.h"		/* expand_builtin_dwarf_sp_column */
38 #include "profile-count.h"	/* For expr.h */
39 #include "expr.h"		/* init_return_column_size */
40 #include "output.h"		/* asm_out_file */
41 #include "debug.h"		/* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
42 #include "flags.h"		/* dwarf_debuginfo_p */
43 
44 /* ??? Poison these here until it can be done generically.  They've been
45    totally replaced in this file; make sure it stays that way.  */
46 #undef DWARF2_UNWIND_INFO
47 #undef DWARF2_FRAME_INFO
48 #if (GCC_VERSION >= 3000)
49  #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
50 #endif
51 
52 #ifndef INCOMING_RETURN_ADDR_RTX
53 #define INCOMING_RETURN_ADDR_RTX  (gcc_unreachable (), NULL_RTX)
54 #endif
55 
56 #ifndef DEFAULT_INCOMING_FRAME_SP_OFFSET
57 #define DEFAULT_INCOMING_FRAME_SP_OFFSET INCOMING_FRAME_SP_OFFSET
58 #endif
59 
60 /* A collected description of an entire row of the abstract CFI table.  */
61 struct GTY(()) dw_cfi_row
62 {
63   /* The expression that computes the CFA, expressed in two different ways.
64      The CFA member for the simple cases, and the full CFI expression for
65      the complex cases.  The later will be a DW_CFA_cfa_expression.  */
66   dw_cfa_location cfa;
67   dw_cfi_ref cfa_cfi;
68 
69   /* The expressions for any register column that is saved.  */
70   cfi_vec reg_save;
71 
72   /* True if the register window is saved.  */
73   bool window_save;
74 
75   /* True if the return address is in a mangled state.  */
76   bool ra_mangled;
77 };
78 
79 /* The caller's ORIG_REG is saved in SAVED_IN_REG.  */
80 struct GTY(()) reg_saved_in_data {
81   rtx orig_reg;
82   rtx saved_in_reg;
83 };
84 
85 
86 /* Since we no longer have a proper CFG, we're going to create a facsimile
87    of one on the fly while processing the frame-related insns.
88 
89    We create dw_trace_info structures for each extended basic block beginning
90    and ending at a "save point".  Save points are labels, barriers, certain
91    notes, and of course the beginning and end of the function.
92 
93    As we encounter control transfer insns, we propagate the "current"
94    row state across the edges to the starts of traces.  When checking is
95    enabled, we validate that we propagate the same data from all sources.
96 
97    All traces are members of the TRACE_INFO array, in the order in which
98    they appear in the instruction stream.
99 
100    All save points are present in the TRACE_INDEX hash, mapping the insn
101    starting a trace to the dw_trace_info describing the trace.  */
102 
103 struct dw_trace_info
104 {
105   /* The insn that begins the trace.  */
106   rtx_insn *head;
107 
108   /* The row state at the beginning and end of the trace.  */
109   dw_cfi_row *beg_row, *end_row;
110 
111   /* Tracking for DW_CFA_GNU_args_size.  The "true" sizes are those we find
112      while scanning insns.  However, the args_size value is irrelevant at
113      any point except can_throw_internal_p insns.  Therefore the "delay"
114      sizes the values that must actually be emitted for this trace.  */
115   poly_int64_pod beg_true_args_size, end_true_args_size;
116   poly_int64_pod beg_delay_args_size, end_delay_args_size;
117 
118   /* The first EH insn in the trace, where beg_delay_args_size must be set.  */
119   rtx_insn *eh_head;
120 
121   /* The following variables contain data used in interpreting frame related
122      expressions.  These are not part of the "real" row state as defined by
123      Dwarf, but it seems like they need to be propagated into a trace in case
124      frame related expressions have been sunk.  */
125   /* ??? This seems fragile.  These variables are fragments of a larger
126      expression.  If we do not keep the entire expression together, we risk
127      not being able to put it together properly.  Consider forcing targets
128      to generate self-contained expressions and dropping all of the magic
129      interpretation code in this file.  Or at least refusing to shrink wrap
130      any frame related insn that doesn't contain a complete expression.  */
131 
132   /* The register used for saving registers to the stack, and its offset
133      from the CFA.  */
134   dw_cfa_location cfa_store;
135 
136   /* A temporary register holding an integral value used in adjusting SP
137      or setting up the store_reg.  The "offset" field holds the integer
138      value, not an offset.  */
139   dw_cfa_location cfa_temp;
140 
141   /* A set of registers saved in other registers.  This is the inverse of
142      the row->reg_save info, if the entry is a DW_CFA_register.  This is
143      implemented as a flat array because it normally contains zero or 1
144      entry, depending on the target.  IA-64 is the big spender here, using
145      a maximum of 5 entries.  */
146   vec<reg_saved_in_data> regs_saved_in_regs;
147 
148   /* An identifier for this trace.  Used only for debugging dumps.  */
149   unsigned id;
150 
151   /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS.  */
152   bool switch_sections;
153 
154   /* True if we've seen different values incoming to beg_true_args_size.  */
155   bool args_size_undefined;
156 
157   /* True if we've seen an insn with a REG_ARGS_SIZE note before EH_HEAD.  */
158   bool args_size_defined_for_eh;
159 };
160 
161 
162 /* Hashtable helpers.  */
163 
164 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
165 {
166   static inline hashval_t hash (const dw_trace_info *);
167   static inline bool equal (const dw_trace_info *, const dw_trace_info *);
168 };
169 
170 inline hashval_t
hash(const dw_trace_info * ti)171 trace_info_hasher::hash (const dw_trace_info *ti)
172 {
173   return INSN_UID (ti->head);
174 }
175 
176 inline bool
equal(const dw_trace_info * a,const dw_trace_info * b)177 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
178 {
179   return a->head == b->head;
180 }
181 
182 
183 /* The variables making up the pseudo-cfg, as described above.  */
184 static vec<dw_trace_info> trace_info;
185 static vec<dw_trace_info *> trace_work_list;
186 static hash_table<trace_info_hasher> *trace_index;
187 
188 /* A vector of call frame insns for the CIE.  */
189 cfi_vec cie_cfi_vec;
190 
191 /* The state of the first row of the FDE table, which includes the
192    state provided by the CIE.  */
193 static GTY(()) dw_cfi_row *cie_cfi_row;
194 
195 static GTY(()) reg_saved_in_data *cie_return_save;
196 
197 static GTY(()) unsigned long dwarf2out_cfi_label_num;
198 
199 /* The insn after which a new CFI note should be emitted.  */
200 static rtx_insn *add_cfi_insn;
201 
202 /* When non-null, add_cfi will add the CFI to this vector.  */
203 static cfi_vec *add_cfi_vec;
204 
205 /* The current instruction trace.  */
206 static dw_trace_info *cur_trace;
207 
208 /* The current, i.e. most recently generated, row of the CFI table.  */
209 static dw_cfi_row *cur_row;
210 
211 /* A copy of the current CFA, for use during the processing of a
212    single insn.  */
213 static dw_cfa_location *cur_cfa;
214 
215 /* We delay emitting a register save until either (a) we reach the end
216    of the prologue or (b) the register is clobbered.  This clusters
217    register saves so that there are fewer pc advances.  */
218 
219 struct queued_reg_save {
220   rtx reg;
221   rtx saved_reg;
222   poly_int64_pod cfa_offset;
223 };
224 
225 
226 static vec<queued_reg_save> queued_reg_saves;
227 
228 /* True if any CFI directives were emitted at the current insn.  */
229 static bool any_cfis_emitted;
230 
231 /* Short-hand for commonly used register numbers.  */
232 static struct cfa_reg dw_stack_pointer_regnum;
233 static struct cfa_reg dw_frame_pointer_regnum;
234 
235 /* Hook used by __throw.  */
236 
237 rtx
expand_builtin_dwarf_sp_column(void)238 expand_builtin_dwarf_sp_column (void)
239 {
240   unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
241   return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
242 }
243 
244 /* MEM is a memory reference for the register size table, each element of
245    which has mode MODE.  Initialize column C as a return address column.  */
246 
247 static void
init_return_column_size(scalar_int_mode mode,rtx mem,unsigned int c)248 init_return_column_size (scalar_int_mode mode, rtx mem, unsigned int c)
249 {
250   HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
251   HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
252   emit_move_insn (adjust_address (mem, mode, offset),
253 		  gen_int_mode (size, mode));
254 }
255 
256 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
257    init_one_dwarf_reg_size to communicate on what has been done by the
258    latter.  */
259 
260 struct init_one_dwarf_reg_state
261 {
262   /* Whether the dwarf return column was initialized.  */
263   bool wrote_return_column;
264 
265   /* For each hard register REGNO, whether init_one_dwarf_reg_size
266      was given REGNO to process already.  */
267   bool processed_regno [FIRST_PSEUDO_REGISTER];
268 
269 };
270 
271 /* Helper for expand_builtin_init_dwarf_reg_sizes.  Generate code to
272    initialize the dwarf register size table entry corresponding to register
273    REGNO in REGMODE.  TABLE is the table base address, SLOTMODE is the mode to
274    use for the size entry to initialize, and INIT_STATE is the communication
275    datastructure conveying what we're doing to our caller.  */
276 
277 static
init_one_dwarf_reg_size(int regno,machine_mode regmode,rtx table,machine_mode slotmode,init_one_dwarf_reg_state * init_state)278 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
279 			      rtx table, machine_mode slotmode,
280 			      init_one_dwarf_reg_state *init_state)
281 {
282   const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
283   const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
284   const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
285 
286   poly_int64 slotoffset = dcol * GET_MODE_SIZE (slotmode);
287   poly_int64 regsize = GET_MODE_SIZE (regmode);
288 
289   init_state->processed_regno[regno] = true;
290 
291   if (rnum >= DWARF_FRAME_REGISTERS)
292     return;
293 
294   if (dnum == DWARF_FRAME_RETURN_COLUMN)
295     {
296       if (regmode == VOIDmode)
297 	return;
298       init_state->wrote_return_column = true;
299     }
300 
301   /* ??? When is this true?  Should it be a test based on DCOL instead?  */
302   if (maybe_lt (slotoffset, 0))
303     return;
304 
305   emit_move_insn (adjust_address (table, slotmode, slotoffset),
306 		  gen_int_mode (regsize, slotmode));
307 }
308 
309 /* Generate code to initialize the dwarf register size table located
310    at the provided ADDRESS.  */
311 
312 void
expand_builtin_init_dwarf_reg_sizes(tree address)313 expand_builtin_init_dwarf_reg_sizes (tree address)
314 {
315   unsigned int i;
316   scalar_int_mode mode = SCALAR_INT_TYPE_MODE (char_type_node);
317   rtx addr = expand_normal (address);
318   rtx mem = gen_rtx_MEM (BLKmode, addr);
319 
320   init_one_dwarf_reg_state init_state;
321 
322   memset ((char *)&init_state, 0, sizeof (init_state));
323 
324   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
325     {
326       machine_mode save_mode;
327       rtx span;
328 
329       /* No point in processing a register multiple times.  This could happen
330 	 with register spans, e.g. when a reg is first processed as a piece of
331 	 a span, then as a register on its own later on.  */
332 
333       if (init_state.processed_regno[i])
334 	continue;
335 
336       save_mode = targetm.dwarf_frame_reg_mode (i);
337       span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
338 
339       if (!span)
340 	init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
341       else
342 	{
343 	  for (int si = 0; si < XVECLEN (span, 0); si++)
344 	    {
345 	      rtx reg = XVECEXP (span, 0, si);
346 
347 	      init_one_dwarf_reg_size
348 		(REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
349 	    }
350 	}
351     }
352 
353   if (!init_state.wrote_return_column)
354     init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
355 
356 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
357   init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
358 #endif
359 
360   targetm.init_dwarf_reg_sizes_extra (address);
361 }
362 
363 
364 static dw_trace_info *
get_trace_info(rtx_insn * insn)365 get_trace_info (rtx_insn *insn)
366 {
367   dw_trace_info dummy;
368   dummy.head = insn;
369   return trace_index->find_with_hash (&dummy, INSN_UID (insn));
370 }
371 
372 static bool
save_point_p(rtx_insn * insn)373 save_point_p (rtx_insn *insn)
374 {
375   /* Labels, except those that are really jump tables.  */
376   if (LABEL_P (insn))
377     return inside_basic_block_p (insn);
378 
379   /* We split traces at the prologue/epilogue notes because those
380      are points at which the unwind info is usually stable.  This
381      makes it easier to find spots with identical unwind info so
382      that we can use remember/restore_state opcodes.  */
383   if (NOTE_P (insn))
384     switch (NOTE_KIND (insn))
385       {
386       case NOTE_INSN_PROLOGUE_END:
387       case NOTE_INSN_EPILOGUE_BEG:
388 	return true;
389       }
390 
391   return false;
392 }
393 
394 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder.  */
395 
396 static inline HOST_WIDE_INT
div_data_align(HOST_WIDE_INT off)397 div_data_align (HOST_WIDE_INT off)
398 {
399   HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
400   gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
401   return r;
402 }
403 
404 /* Return true if we need a signed version of a given opcode
405    (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended).  */
406 
407 static inline bool
need_data_align_sf_opcode(HOST_WIDE_INT off)408 need_data_align_sf_opcode (HOST_WIDE_INT off)
409 {
410   return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
411 }
412 
413 /* Return a pointer to a newly allocated Call Frame Instruction.  */
414 
415 static inline dw_cfi_ref
new_cfi(void)416 new_cfi (void)
417 {
418   dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
419 
420   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
421   cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
422 
423   return cfi;
424 }
425 
426 /* Return a newly allocated CFI row, with no defined data.  */
427 
428 static dw_cfi_row *
new_cfi_row(void)429 new_cfi_row (void)
430 {
431   dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
432 
433   row->cfa.reg.set_by_dwreg (INVALID_REGNUM);
434 
435   return row;
436 }
437 
438 /* Return a copy of an existing CFI row.  */
439 
440 static dw_cfi_row *
copy_cfi_row(dw_cfi_row * src)441 copy_cfi_row (dw_cfi_row *src)
442 {
443   dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
444 
445   *dst = *src;
446   dst->reg_save = vec_safe_copy (src->reg_save);
447 
448   return dst;
449 }
450 
451 /* Return a copy of an existing CFA location.  */
452 
453 static dw_cfa_location *
copy_cfa(dw_cfa_location * src)454 copy_cfa (dw_cfa_location *src)
455 {
456   dw_cfa_location *dst = ggc_alloc<dw_cfa_location> ();
457   *dst = *src;
458   return dst;
459 }
460 
461 /* Generate a new label for the CFI info to refer to.  */
462 
463 static char *
dwarf2out_cfi_label(void)464 dwarf2out_cfi_label (void)
465 {
466   int num = dwarf2out_cfi_label_num++;
467   char label[20];
468 
469   ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
470 
471   return xstrdup (label);
472 }
473 
474 /* Add CFI either to the current insn stream or to a vector, or both.  */
475 
476 static void
add_cfi(dw_cfi_ref cfi)477 add_cfi (dw_cfi_ref cfi)
478 {
479   any_cfis_emitted = true;
480 
481   if (add_cfi_insn != NULL)
482     {
483       add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
484       NOTE_CFI (add_cfi_insn) = cfi;
485     }
486 
487   if (add_cfi_vec != NULL)
488     vec_safe_push (*add_cfi_vec, cfi);
489 }
490 
491 static void
add_cfi_args_size(poly_int64 size)492 add_cfi_args_size (poly_int64 size)
493 {
494   /* We don't yet have a representation for polynomial sizes.  */
495   HOST_WIDE_INT const_size = size.to_constant ();
496 
497   dw_cfi_ref cfi = new_cfi ();
498 
499   /* While we can occasionally have args_size < 0 internally, this state
500      should not persist at a point we actually need an opcode.  */
501   gcc_assert (const_size >= 0);
502 
503   cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
504   cfi->dw_cfi_oprnd1.dw_cfi_offset = const_size;
505 
506   add_cfi (cfi);
507 }
508 
509 static void
add_cfi_restore(unsigned reg)510 add_cfi_restore (unsigned reg)
511 {
512   dw_cfi_ref cfi = new_cfi ();
513 
514   cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
515   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
516 
517   add_cfi (cfi);
518 }
519 
520 /* Perform ROW->REG_SAVE[COLUMN] = CFI.  CFI may be null, indicating
521    that the register column is no longer saved.  */
522 
523 static void
update_row_reg_save(dw_cfi_row * row,unsigned column,dw_cfi_ref cfi)524 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
525 {
526   if (vec_safe_length (row->reg_save) <= column)
527     vec_safe_grow_cleared (row->reg_save, column + 1, true);
528   (*row->reg_save)[column] = cfi;
529 }
530 
531 /* This function fills in aa dw_cfa_location structure from a dwarf location
532    descriptor sequence.  */
533 
534 static void
get_cfa_from_loc_descr(dw_cfa_location * cfa,struct dw_loc_descr_node * loc)535 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
536 {
537   struct dw_loc_descr_node *ptr;
538   cfa->offset = 0;
539   cfa->base_offset = 0;
540   cfa->indirect = 0;
541   cfa->reg.set_by_dwreg (INVALID_REGNUM);
542 
543   for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
544     {
545       enum dwarf_location_atom op = ptr->dw_loc_opc;
546 
547       switch (op)
548 	{
549 	case DW_OP_reg0:
550 	case DW_OP_reg1:
551 	case DW_OP_reg2:
552 	case DW_OP_reg3:
553 	case DW_OP_reg4:
554 	case DW_OP_reg5:
555 	case DW_OP_reg6:
556 	case DW_OP_reg7:
557 	case DW_OP_reg8:
558 	case DW_OP_reg9:
559 	case DW_OP_reg10:
560 	case DW_OP_reg11:
561 	case DW_OP_reg12:
562 	case DW_OP_reg13:
563 	case DW_OP_reg14:
564 	case DW_OP_reg15:
565 	case DW_OP_reg16:
566 	case DW_OP_reg17:
567 	case DW_OP_reg18:
568 	case DW_OP_reg19:
569 	case DW_OP_reg20:
570 	case DW_OP_reg21:
571 	case DW_OP_reg22:
572 	case DW_OP_reg23:
573 	case DW_OP_reg24:
574 	case DW_OP_reg25:
575 	case DW_OP_reg26:
576 	case DW_OP_reg27:
577 	case DW_OP_reg28:
578 	case DW_OP_reg29:
579 	case DW_OP_reg30:
580 	case DW_OP_reg31:
581 	  cfa->reg.set_by_dwreg (op - DW_OP_reg0);
582 	  break;
583 	case DW_OP_regx:
584 	  cfa->reg.set_by_dwreg (ptr->dw_loc_oprnd1.v.val_int);
585 	  break;
586 	case DW_OP_breg0:
587 	case DW_OP_breg1:
588 	case DW_OP_breg2:
589 	case DW_OP_breg3:
590 	case DW_OP_breg4:
591 	case DW_OP_breg5:
592 	case DW_OP_breg6:
593 	case DW_OP_breg7:
594 	case DW_OP_breg8:
595 	case DW_OP_breg9:
596 	case DW_OP_breg10:
597 	case DW_OP_breg11:
598 	case DW_OP_breg12:
599 	case DW_OP_breg13:
600 	case DW_OP_breg14:
601 	case DW_OP_breg15:
602 	case DW_OP_breg16:
603 	case DW_OP_breg17:
604 	case DW_OP_breg18:
605 	case DW_OP_breg19:
606 	case DW_OP_breg20:
607 	case DW_OP_breg21:
608 	case DW_OP_breg22:
609 	case DW_OP_breg23:
610 	case DW_OP_breg24:
611 	case DW_OP_breg25:
612 	case DW_OP_breg26:
613 	case DW_OP_breg27:
614 	case DW_OP_breg28:
615 	case DW_OP_breg29:
616 	case DW_OP_breg30:
617 	case DW_OP_breg31:
618 	case DW_OP_bregx:
619 	  if (cfa->reg.reg == INVALID_REGNUM)
620 	    {
621 	      unsigned regno
622 		= (op == DW_OP_bregx
623 		   ? ptr->dw_loc_oprnd1.v.val_int : op - DW_OP_breg0);
624 	      cfa->reg.set_by_dwreg (regno);
625 	      cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
626 	    }
627 	  else
628 	    {
629 	      /* Handle case when span can cover multiple registers.  We
630 		 only support the simple case of consecutive registers
631 		 all with the same size.  DWARF that we are dealing with
632 		 will look something like:
633 		 <DW_OP_bregx: (r49) 0; DW_OP_const1u: 32; DW_OP_shl;
634 		  DW_OP_bregx: (r48) 0; DW_OP_plus> */
635 
636 	      unsigned regno
637 		= (op == DW_OP_bregx
638 		   ? ptr->dw_loc_oprnd1.v.val_int : op - DW_OP_breg0);
639 	      gcc_assert (regno == cfa->reg.reg - 1);
640 	      cfa->reg.span++;
641 	      /* From all the consecutive registers used, we want to set
642 		 cfa->reg.reg to lower number register.  */
643 	      cfa->reg.reg = regno;
644 	      /* The offset was the shift value.  Use it to get the
645 		 span_width and then set it to 0.  */
646 	      cfa->reg.span_width = cfa->offset.to_constant () / 8;
647 	      cfa->offset = 0;
648 	    }
649 	  break;
650 	case DW_OP_deref:
651 	  cfa->indirect = 1;
652 	  break;
653 	case DW_OP_shl:
654 	  break;
655 	case DW_OP_lit0:
656 	case DW_OP_lit1:
657 	case DW_OP_lit2:
658 	case DW_OP_lit3:
659 	case DW_OP_lit4:
660 	case DW_OP_lit5:
661 	case DW_OP_lit6:
662 	case DW_OP_lit7:
663 	case DW_OP_lit8:
664 	case DW_OP_lit9:
665 	case DW_OP_lit10:
666 	case DW_OP_lit11:
667 	case DW_OP_lit12:
668 	case DW_OP_lit13:
669 	case DW_OP_lit14:
670 	case DW_OP_lit15:
671 	case DW_OP_lit16:
672 	case DW_OP_lit17:
673 	case DW_OP_lit18:
674 	case DW_OP_lit19:
675 	case DW_OP_lit20:
676 	case DW_OP_lit21:
677 	case DW_OP_lit22:
678 	case DW_OP_lit23:
679 	case DW_OP_lit24:
680 	case DW_OP_lit25:
681 	case DW_OP_lit26:
682 	case DW_OP_lit27:
683 	case DW_OP_lit28:
684 	case DW_OP_lit29:
685 	case DW_OP_lit30:
686 	case DW_OP_lit31:
687 	  gcc_assert (known_eq (cfa->offset, 0));
688 	  cfa->offset = op - DW_OP_lit0;
689 	  break;
690 	case DW_OP_const1u:
691 	case DW_OP_const1s:
692 	case DW_OP_const2u:
693 	case DW_OP_const2s:
694 	case DW_OP_const4s:
695 	case DW_OP_const8s:
696 	case DW_OP_constu:
697 	case DW_OP_consts:
698 	  gcc_assert (known_eq (cfa->offset, 0));
699 	  cfa->offset = ptr->dw_loc_oprnd1.v.val_int;
700 	  break;
701 	case DW_OP_minus:
702 	  cfa->offset = -cfa->offset;
703 	  break;
704 	case DW_OP_plus:
705 	  /* The offset is already in place.  */
706 	  break;
707 	case DW_OP_plus_uconst:
708 	  cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
709 	  break;
710 	default:
711 	  gcc_unreachable ();
712 	}
713     }
714 }
715 
716 /* Find the previous value for the CFA, iteratively.  CFI is the opcode
717    to interpret, *LOC will be updated as necessary, *REMEMBER is used for
718    one level of remember/restore state processing.  */
719 
720 void
lookup_cfa_1(dw_cfi_ref cfi,dw_cfa_location * loc,dw_cfa_location * remember)721 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
722 {
723   switch (cfi->dw_cfi_opc)
724     {
725     case DW_CFA_def_cfa_offset:
726     case DW_CFA_def_cfa_offset_sf:
727       loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
728       break;
729     case DW_CFA_def_cfa_register:
730       loc->reg.set_by_dwreg (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
731       break;
732     case DW_CFA_def_cfa:
733     case DW_CFA_def_cfa_sf:
734       loc->reg.set_by_dwreg (cfi->dw_cfi_oprnd1.dw_cfi_reg_num);
735       loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
736       break;
737     case DW_CFA_def_cfa_expression:
738       if (cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc)
739 	*loc = *cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc;
740       else
741 	get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
742       break;
743 
744     case DW_CFA_remember_state:
745       gcc_assert (!remember->in_use);
746       *remember = *loc;
747       remember->in_use = 1;
748       break;
749     case DW_CFA_restore_state:
750       gcc_assert (remember->in_use);
751       *loc = *remember;
752       remember->in_use = 0;
753       break;
754 
755     default:
756       break;
757     }
758 }
759 
760 /* Determine if two dw_cfa_location structures define the same data.  */
761 
762 bool
cfa_equal_p(const dw_cfa_location * loc1,const dw_cfa_location * loc2)763 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
764 {
765   return (loc1->reg == loc2->reg
766 	  && known_eq (loc1->offset, loc2->offset)
767 	  && loc1->indirect == loc2->indirect
768 	  && (loc1->indirect == 0
769 	      || known_eq (loc1->base_offset, loc2->base_offset)));
770 }
771 
772 /* Determine if two CFI operands are identical.  */
773 
774 static bool
cfi_oprnd_equal_p(enum dw_cfi_oprnd_type t,dw_cfi_oprnd * a,dw_cfi_oprnd * b)775 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
776 {
777   switch (t)
778     {
779     case dw_cfi_oprnd_unused:
780       return true;
781     case dw_cfi_oprnd_reg_num:
782       return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
783     case dw_cfi_oprnd_offset:
784       return a->dw_cfi_offset == b->dw_cfi_offset;
785     case dw_cfi_oprnd_addr:
786       return (a->dw_cfi_addr == b->dw_cfi_addr
787 	      || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
788     case dw_cfi_oprnd_loc:
789       return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
790     case dw_cfi_oprnd_cfa_loc:
791       /* If any of them is NULL, don't dereference either.  */
792       if (!a->dw_cfi_cfa_loc || !b->dw_cfi_cfa_loc)
793 	return a->dw_cfi_cfa_loc == b->dw_cfi_cfa_loc;
794       return cfa_equal_p (a->dw_cfi_cfa_loc, b->dw_cfi_cfa_loc);
795     }
796   gcc_unreachable ();
797 }
798 
799 /* Determine if two CFI entries are identical.  */
800 
801 static bool
cfi_equal_p(dw_cfi_ref a,dw_cfi_ref b)802 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
803 {
804   enum dwarf_call_frame_info opc;
805 
806   /* Make things easier for our callers, including missing operands.  */
807   if (a == b)
808     return true;
809   if (a == NULL || b == NULL)
810     return false;
811 
812   /* Obviously, the opcodes must match.  */
813   opc = a->dw_cfi_opc;
814   if (opc != b->dw_cfi_opc)
815     return false;
816 
817   /* Compare the two operands, re-using the type of the operands as
818      already exposed elsewhere.  */
819   return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
820 			     &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
821 	  && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
822 				&a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
823 }
824 
825 /* Determine if two CFI_ROW structures are identical.  */
826 
827 static bool
cfi_row_equal_p(dw_cfi_row * a,dw_cfi_row * b)828 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
829 {
830   size_t i, n_a, n_b, n_max;
831 
832   if (a->cfa_cfi)
833     {
834       if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
835 	return false;
836     }
837   else if (!cfa_equal_p (&a->cfa, &b->cfa))
838     return false;
839 
840   n_a = vec_safe_length (a->reg_save);
841   n_b = vec_safe_length (b->reg_save);
842   n_max = MAX (n_a, n_b);
843 
844   for (i = 0; i < n_max; ++i)
845     {
846       dw_cfi_ref r_a = NULL, r_b = NULL;
847 
848       if (i < n_a)
849 	r_a = (*a->reg_save)[i];
850       if (i < n_b)
851 	r_b = (*b->reg_save)[i];
852 
853       if (!cfi_equal_p (r_a, r_b))
854         return false;
855     }
856 
857   if (a->window_save != b->window_save)
858     return false;
859 
860   if (a->ra_mangled != b->ra_mangled)
861     return false;
862 
863   return true;
864 }
865 
866 /* The CFA is now calculated from NEW_CFA.  Consider OLD_CFA in determining
867    what opcode to emit.  Returns the CFI opcode to effect the change, or
868    NULL if NEW_CFA == OLD_CFA.  */
869 
870 static dw_cfi_ref
def_cfa_0(dw_cfa_location * old_cfa,dw_cfa_location * new_cfa)871 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
872 {
873   dw_cfi_ref cfi;
874 
875   /* If nothing changed, no need to issue any call frame instructions.  */
876   if (cfa_equal_p (old_cfa, new_cfa))
877     return NULL;
878 
879   cfi = new_cfi ();
880 
881   HOST_WIDE_INT const_offset;
882   if (new_cfa->reg == old_cfa->reg
883       && new_cfa->reg.span == 1
884       && !new_cfa->indirect
885       && !old_cfa->indirect
886       && new_cfa->offset.is_constant (&const_offset))
887     {
888       /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
889 	 the CFA register did not change but the offset did.  The data
890 	 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
891 	 in the assembler via the .cfi_def_cfa_offset directive.  */
892       if (const_offset < 0)
893 	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
894       else
895 	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
896       cfi->dw_cfi_oprnd1.dw_cfi_offset = const_offset;
897     }
898   else if (new_cfa->offset.is_constant ()
899 	   && known_eq (new_cfa->offset, old_cfa->offset)
900 	   && old_cfa->reg.reg != INVALID_REGNUM
901 	   && new_cfa->reg.span == 1
902 	   && !new_cfa->indirect
903 	   && !old_cfa->indirect)
904     {
905       /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
906 	 indicating the CFA register has changed to <register> but the
907 	 offset has not changed.  This requires the old CFA to have
908 	 been set as a register plus offset rather than a general
909 	 DW_CFA_def_cfa_expression.  */
910       cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
911       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg.reg;
912     }
913   else if (new_cfa->indirect == 0
914 	   && new_cfa->offset.is_constant (&const_offset)
915 	   && new_cfa->reg.span == 1)
916     {
917       /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
918 	 indicating the CFA register has changed to <register> with
919 	 the specified offset.  The data factoring for DW_CFA_def_cfa_sf
920 	 happens in output_cfi, or in the assembler via the .cfi_def_cfa
921 	 directive.  */
922       if (const_offset < 0)
923 	cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
924       else
925 	cfi->dw_cfi_opc = DW_CFA_def_cfa;
926       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg.reg;
927       cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
928     }
929   else
930     {
931       /* Construct a DW_CFA_def_cfa_expression instruction to
932 	 calculate the CFA using a full location expression since no
933 	 register-offset pair is available.  */
934       struct dw_loc_descr_node *loc_list;
935 
936       cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
937       loc_list = build_cfa_loc (new_cfa, 0);
938       cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
939       if (!new_cfa->offset.is_constant ()
940 	  || !new_cfa->base_offset.is_constant ())
941 	/* It's hard to reconstruct the CFA location for a polynomial
942 	   expression, so just cache it instead.  */
943 	cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = copy_cfa (new_cfa);
944       else
945 	cfi->dw_cfi_oprnd2.dw_cfi_cfa_loc = NULL;
946     }
947 
948   return cfi;
949 }
950 
951 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact.  */
952 
953 static void
def_cfa_1(dw_cfa_location * new_cfa)954 def_cfa_1 (dw_cfa_location *new_cfa)
955 {
956   dw_cfi_ref cfi;
957 
958   if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
959     cur_trace->cfa_store.offset = new_cfa->offset;
960 
961   cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
962   if (cfi)
963     {
964       cur_row->cfa = *new_cfa;
965       cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
966 			  ? cfi : NULL);
967 
968       add_cfi (cfi);
969     }
970 }
971 
972 /* Add the CFI for saving a register.  REG is the CFA column number.
973    If SREG is INVALID_REGISTER, the register is saved at OFFSET from the CFA;
974    otherwise it is saved in SREG.  */
975 
976 static void
reg_save(unsigned int reg,struct cfa_reg sreg,poly_int64 offset)977 reg_save (unsigned int reg, struct cfa_reg sreg, poly_int64 offset)
978 {
979   dw_fde_ref fde = cfun ? cfun->fde : NULL;
980   dw_cfi_ref cfi = new_cfi ();
981 
982   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
983 
984   if (sreg.reg == INVALID_REGNUM)
985     {
986       HOST_WIDE_INT const_offset;
987       /* When stack is aligned, store REG using DW_CFA_expression with FP.  */
988       if (fde && fde->stack_realign)
989 	{
990 	  cfi->dw_cfi_opc = DW_CFA_expression;
991 	  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
992 	  cfi->dw_cfi_oprnd2.dw_cfi_loc
993 	    = build_cfa_aligned_loc (&cur_row->cfa, offset,
994 				     fde->stack_realignment);
995 	}
996       else if (offset.is_constant (&const_offset))
997 	{
998 	  if (need_data_align_sf_opcode (const_offset))
999 	    cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
1000 	  else if (reg & ~0x3f)
1001 	    cfi->dw_cfi_opc = DW_CFA_offset_extended;
1002 	  else
1003 	    cfi->dw_cfi_opc = DW_CFA_offset;
1004 	  cfi->dw_cfi_oprnd2.dw_cfi_offset = const_offset;
1005 	}
1006       else
1007 	{
1008 	  cfi->dw_cfi_opc = DW_CFA_expression;
1009 	  cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
1010 	  cfi->dw_cfi_oprnd2.dw_cfi_loc
1011 	    = build_cfa_loc (&cur_row->cfa, offset);
1012 	}
1013     }
1014   else if (sreg.reg == reg)
1015     {
1016       /* While we could emit something like DW_CFA_same_value or
1017 	 DW_CFA_restore, we never expect to see something like that
1018 	 in a prologue.  This is more likely to be a bug.  A backend
1019 	 can always bypass this by using REG_CFA_RESTORE directly.  */
1020       gcc_unreachable ();
1021     }
1022   else if (sreg.span > 1)
1023     {
1024       cfi->dw_cfi_opc = DW_CFA_expression;
1025       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
1026       cfi->dw_cfi_oprnd2.dw_cfi_loc = build_span_loc (sreg);
1027     }
1028   else
1029     {
1030       cfi->dw_cfi_opc = DW_CFA_register;
1031       cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg.reg;
1032     }
1033 
1034   add_cfi (cfi);
1035   update_row_reg_save (cur_row, reg, cfi);
1036 }
1037 
1038 /* A subroutine of scan_trace.  Check INSN for a REG_ARGS_SIZE note
1039    and adjust data structures to match.  */
1040 
1041 static void
notice_args_size(rtx_insn * insn)1042 notice_args_size (rtx_insn *insn)
1043 {
1044   poly_int64 args_size, delta;
1045   rtx note;
1046 
1047   note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
1048   if (note == NULL)
1049     return;
1050 
1051   if (!cur_trace->eh_head)
1052     cur_trace->args_size_defined_for_eh = true;
1053 
1054   args_size = get_args_size (note);
1055   delta = args_size - cur_trace->end_true_args_size;
1056   if (known_eq (delta, 0))
1057     return;
1058 
1059   cur_trace->end_true_args_size = args_size;
1060 
1061   /* If the CFA is computed off the stack pointer, then we must adjust
1062      the computation of the CFA as well.  */
1063   if (cur_cfa->reg == dw_stack_pointer_regnum)
1064     {
1065       gcc_assert (!cur_cfa->indirect);
1066 
1067       /* Convert a change in args_size (always a positive in the
1068 	 direction of stack growth) to a change in stack pointer.  */
1069       if (!STACK_GROWS_DOWNWARD)
1070 	delta = -delta;
1071 
1072       cur_cfa->offset += delta;
1073     }
1074 }
1075 
1076 /* A subroutine of scan_trace.  INSN is can_throw_internal.  Update the
1077    data within the trace related to EH insns and args_size.  */
1078 
1079 static void
notice_eh_throw(rtx_insn * insn)1080 notice_eh_throw (rtx_insn *insn)
1081 {
1082   poly_int64 args_size = cur_trace->end_true_args_size;
1083   if (cur_trace->eh_head == NULL)
1084     {
1085       cur_trace->eh_head = insn;
1086       cur_trace->beg_delay_args_size = args_size;
1087       cur_trace->end_delay_args_size = args_size;
1088     }
1089   else if (maybe_ne (cur_trace->end_delay_args_size, args_size))
1090     {
1091       cur_trace->end_delay_args_size = args_size;
1092 
1093       /* ??? If the CFA is the stack pointer, search backward for the last
1094 	 CFI note and insert there.  Given that the stack changed for the
1095 	 args_size change, there *must* be such a note in between here and
1096 	 the last eh insn.  */
1097       add_cfi_args_size (args_size);
1098     }
1099 }
1100 
1101 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation.  */
1102 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
1103    used in places where rtl is prohibited.  */
1104 
1105 static inline unsigned
dwf_regno(const_rtx reg)1106 dwf_regno (const_rtx reg)
1107 {
1108   gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
1109   return DWARF_FRAME_REGNUM (REGNO (reg));
1110 }
1111 
1112 /* Like dwf_regno, but when the value can span multiple registers.  */
1113 
1114 static struct cfa_reg
dwf_cfa_reg(rtx reg)1115 dwf_cfa_reg (rtx reg)
1116 {
1117   struct cfa_reg result;
1118 
1119   result.reg = dwf_regno (reg);
1120   result.span = 1;
1121   result.span_width = 0;
1122 
1123   rtx span = targetm.dwarf_register_span (reg);
1124   if (span)
1125     {
1126       /* We only support the simple case of consecutive registers all with the
1127 	 same size.  */
1128       result.span = XVECLEN (span, 0);
1129       result.span_width = GET_MODE_SIZE (GET_MODE (XVECEXP (span, 0, 0)))
1130 			  .to_constant ();
1131 
1132       if (CHECKING_P)
1133 	{
1134 	  /* Ensure that the above assumption is accurate.  */
1135 	  for (unsigned int i = 0; i < result.span; i++)
1136 	    {
1137 	      gcc_assert (GET_MODE_SIZE (GET_MODE (XVECEXP (span, 0, i)))
1138 			  .to_constant ()  == result.span_width);
1139 	      gcc_assert (REG_P (XVECEXP (span, 0, i)));
1140 	      gcc_assert (dwf_regno (XVECEXP (span, 0, i)) == result.reg + i);
1141 	    }
1142 	}
1143     }
1144 
1145   return result;
1146 }
1147 
1148 /* More efficient comparisons that don't call targetm.dwarf_register_span
1149    unnecessarily.  These cfa_reg vs. rtx comparisons should be done at
1150    least for call-saved REGs that might not be CFA related (like stack
1151    pointer, hard frame pointer or DRAP registers are), in other cases it is
1152    just a compile time and memory optimization.  */
1153 
1154 static bool
operator ==(cfa_reg & cfa,rtx reg)1155 operator== (cfa_reg &cfa, rtx reg)
1156 {
1157   unsigned int regno = dwf_regno (reg);
1158   if (cfa.reg != regno)
1159     return false;
1160   struct cfa_reg other = dwf_cfa_reg (reg);
1161   return cfa == other;
1162 }
1163 
1164 static inline bool
operator !=(cfa_reg & cfa,rtx reg)1165 operator!= (cfa_reg &cfa, rtx reg)
1166 {
1167   return !(cfa == reg);
1168 }
1169 
1170 /* Compare X and Y for equivalence.  The inputs may be REGs or PC_RTX.  */
1171 
1172 static bool
compare_reg_or_pc(rtx x,rtx y)1173 compare_reg_or_pc (rtx x, rtx y)
1174 {
1175   if (REG_P (x) && REG_P (y))
1176     return REGNO (x) == REGNO (y);
1177   return x == y;
1178 }
1179 
1180 /* Record SRC as being saved in DEST.  DEST may be null to delete an
1181    existing entry.  SRC may be a register or PC_RTX.  */
1182 
1183 static void
record_reg_saved_in_reg(rtx dest,rtx src)1184 record_reg_saved_in_reg (rtx dest, rtx src)
1185 {
1186   reg_saved_in_data *elt;
1187   size_t i;
1188 
1189   FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
1190     if (compare_reg_or_pc (elt->orig_reg, src))
1191       {
1192 	if (dest == NULL)
1193 	  cur_trace->regs_saved_in_regs.unordered_remove (i);
1194 	else
1195 	  elt->saved_in_reg = dest;
1196 	return;
1197       }
1198 
1199   if (dest == NULL)
1200     return;
1201 
1202   reg_saved_in_data e = {src, dest};
1203   cur_trace->regs_saved_in_regs.safe_push (e);
1204 }
1205 
1206 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1207    SREG, or if SREG is NULL then it is saved at OFFSET to the CFA.  */
1208 
1209 static void
queue_reg_save(rtx reg,rtx sreg,poly_int64 offset)1210 queue_reg_save (rtx reg, rtx sreg, poly_int64 offset)
1211 {
1212   queued_reg_save *q;
1213   queued_reg_save e = {reg, sreg, offset};
1214   size_t i;
1215 
1216   /* Duplicates waste space, but it's also necessary to remove them
1217      for correctness, since the queue gets output in reverse order.  */
1218   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1219     if (compare_reg_or_pc (q->reg, reg))
1220       {
1221 	*q = e;
1222 	return;
1223       }
1224 
1225   queued_reg_saves.safe_push (e);
1226 }
1227 
1228 /* Output all the entries in QUEUED_REG_SAVES.  */
1229 
1230 static void
dwarf2out_flush_queued_reg_saves(void)1231 dwarf2out_flush_queued_reg_saves (void)
1232 {
1233   queued_reg_save *q;
1234   size_t i;
1235 
1236   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1237     {
1238       unsigned int reg;
1239       struct cfa_reg sreg;
1240 
1241       record_reg_saved_in_reg (q->saved_reg, q->reg);
1242 
1243       if (q->reg == pc_rtx)
1244 	reg = DWARF_FRAME_RETURN_COLUMN;
1245       else
1246         reg = dwf_regno (q->reg);
1247       if (q->saved_reg)
1248 	sreg = dwf_cfa_reg (q->saved_reg);
1249       else
1250 	sreg.set_by_dwreg (INVALID_REGNUM);
1251       reg_save (reg, sreg, q->cfa_offset);
1252     }
1253 
1254   queued_reg_saves.truncate (0);
1255 }
1256 
1257 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1258    location for?  Or, does it clobber a register which we've previously
1259    said that some other register is saved in, and for which we now
1260    have a new location for?  */
1261 
1262 static bool
clobbers_queued_reg_save(const_rtx insn)1263 clobbers_queued_reg_save (const_rtx insn)
1264 {
1265   queued_reg_save *q;
1266   size_t iq;
1267 
1268   FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1269     {
1270       size_t ir;
1271       reg_saved_in_data *rir;
1272 
1273       if (modified_in_p (q->reg, insn))
1274 	return true;
1275 
1276       FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1277 	if (compare_reg_or_pc (q->reg, rir->orig_reg)
1278 	    && modified_in_p (rir->saved_in_reg, insn))
1279 	  return true;
1280     }
1281 
1282   return false;
1283 }
1284 
1285 /* What register, if any, is currently saved in REG?  */
1286 
1287 static rtx
reg_saved_in(rtx reg)1288 reg_saved_in (rtx reg)
1289 {
1290   unsigned int regn = REGNO (reg);
1291   queued_reg_save *q;
1292   reg_saved_in_data *rir;
1293   size_t i;
1294 
1295   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1296     if (q->saved_reg && regn == REGNO (q->saved_reg))
1297       return q->reg;
1298 
1299   FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1300     if (regn == REGNO (rir->saved_in_reg))
1301       return rir->orig_reg;
1302 
1303   return NULL_RTX;
1304 }
1305 
1306 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note.  */
1307 
1308 static void
dwarf2out_frame_debug_def_cfa(rtx pat)1309 dwarf2out_frame_debug_def_cfa (rtx pat)
1310 {
1311   memset (cur_cfa, 0, sizeof (*cur_cfa));
1312 
1313   pat = strip_offset (pat, &cur_cfa->offset);
1314   if (MEM_P (pat))
1315     {
1316       cur_cfa->indirect = 1;
1317       pat = strip_offset (XEXP (pat, 0), &cur_cfa->base_offset);
1318     }
1319   /* ??? If this fails, we could be calling into the _loc functions to
1320      define a full expression.  So far no port does that.  */
1321   gcc_assert (REG_P (pat));
1322   cur_cfa->reg = dwf_cfa_reg (pat);
1323 }
1324 
1325 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note.  */
1326 
1327 static void
dwarf2out_frame_debug_adjust_cfa(rtx pat)1328 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1329 {
1330   rtx src, dest;
1331 
1332   gcc_assert (GET_CODE (pat) == SET);
1333   dest = XEXP (pat, 0);
1334   src = XEXP (pat, 1);
1335 
1336   switch (GET_CODE (src))
1337     {
1338     case PLUS:
1339       gcc_assert (cur_cfa->reg == XEXP (src, 0));
1340       cur_cfa->offset -= rtx_to_poly_int64 (XEXP (src, 1));
1341       break;
1342 
1343     case REG:
1344       break;
1345 
1346     default:
1347       gcc_unreachable ();
1348     }
1349 
1350   cur_cfa->reg = dwf_cfa_reg (dest);
1351   gcc_assert (cur_cfa->indirect == 0);
1352 }
1353 
1354 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note.  */
1355 
1356 static void
dwarf2out_frame_debug_cfa_offset(rtx set)1357 dwarf2out_frame_debug_cfa_offset (rtx set)
1358 {
1359   poly_int64 offset;
1360   rtx src, addr, span;
1361   unsigned int sregno;
1362 
1363   src = XEXP (set, 1);
1364   addr = XEXP (set, 0);
1365   gcc_assert (MEM_P (addr));
1366   addr = XEXP (addr, 0);
1367 
1368   /* As documented, only consider extremely simple addresses.  */
1369   switch (GET_CODE (addr))
1370     {
1371     case REG:
1372       gcc_assert (cur_cfa->reg == addr);
1373       offset = -cur_cfa->offset;
1374       break;
1375     case PLUS:
1376       gcc_assert (cur_cfa->reg == XEXP (addr, 0));
1377       offset = rtx_to_poly_int64 (XEXP (addr, 1)) - cur_cfa->offset;
1378       break;
1379     default:
1380       gcc_unreachable ();
1381     }
1382 
1383   if (src == pc_rtx)
1384     {
1385       span = NULL;
1386       sregno = DWARF_FRAME_RETURN_COLUMN;
1387     }
1388   else
1389     {
1390       span = targetm.dwarf_register_span (src);
1391       sregno = dwf_regno (src);
1392     }
1393 
1394   /* ??? We'd like to use queue_reg_save, but we need to come up with
1395      a different flushing heuristic for epilogues.  */
1396   struct cfa_reg invalid;
1397   invalid.set_by_dwreg (INVALID_REGNUM);
1398   if (!span)
1399     reg_save (sregno, invalid, offset);
1400   else
1401     {
1402       /* We have a PARALLEL describing where the contents of SRC live.
1403    	 Adjust the offset for each piece of the PARALLEL.  */
1404       poly_int64 span_offset = offset;
1405 
1406       gcc_assert (GET_CODE (span) == PARALLEL);
1407 
1408       const int par_len = XVECLEN (span, 0);
1409       for (int par_index = 0; par_index < par_len; par_index++)
1410 	{
1411 	  rtx elem = XVECEXP (span, 0, par_index);
1412 	  sregno = dwf_regno (src);
1413 	  reg_save (sregno, invalid, span_offset);
1414 	  span_offset += GET_MODE_SIZE (GET_MODE (elem));
1415 	}
1416     }
1417 }
1418 
1419 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note.  */
1420 
1421 static void
dwarf2out_frame_debug_cfa_register(rtx set)1422 dwarf2out_frame_debug_cfa_register (rtx set)
1423 {
1424   rtx src, dest;
1425   unsigned sregno;
1426   struct cfa_reg dregno;
1427 
1428   src = XEXP (set, 1);
1429   dest = XEXP (set, 0);
1430 
1431   record_reg_saved_in_reg (dest, src);
1432   if (src == pc_rtx)
1433     sregno = DWARF_FRAME_RETURN_COLUMN;
1434   else
1435     sregno = dwf_regno (src);
1436 
1437   dregno = dwf_cfa_reg (dest);
1438 
1439   /* ??? We'd like to use queue_reg_save, but we need to come up with
1440      a different flushing heuristic for epilogues.  */
1441   reg_save (sregno, dregno, 0);
1442 }
1443 
1444 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note.  */
1445 
1446 static void
dwarf2out_frame_debug_cfa_expression(rtx set)1447 dwarf2out_frame_debug_cfa_expression (rtx set)
1448 {
1449   rtx src, dest, span;
1450   dw_cfi_ref cfi = new_cfi ();
1451   unsigned regno;
1452 
1453   dest = SET_DEST (set);
1454   src = SET_SRC (set);
1455 
1456   gcc_assert (REG_P (src));
1457   gcc_assert (MEM_P (dest));
1458 
1459   span = targetm.dwarf_register_span (src);
1460   gcc_assert (!span);
1461 
1462   regno = dwf_regno (src);
1463 
1464   cfi->dw_cfi_opc = DW_CFA_expression;
1465   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1466   cfi->dw_cfi_oprnd2.dw_cfi_loc
1467     = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1468 			  GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1469 
1470   /* ??? We'd like to use queue_reg_save, were the interface different,
1471      and, as above, we could manage flushing for epilogues.  */
1472   add_cfi (cfi);
1473   update_row_reg_save (cur_row, regno, cfi);
1474 }
1475 
1476 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1477    note.  */
1478 
1479 static void
dwarf2out_frame_debug_cfa_val_expression(rtx set)1480 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1481 {
1482   rtx dest = SET_DEST (set);
1483   gcc_assert (REG_P (dest));
1484 
1485   rtx span = targetm.dwarf_register_span (dest);
1486   gcc_assert (!span);
1487 
1488   rtx src = SET_SRC (set);
1489   dw_cfi_ref cfi = new_cfi ();
1490   cfi->dw_cfi_opc = DW_CFA_val_expression;
1491   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1492   cfi->dw_cfi_oprnd2.dw_cfi_loc
1493     = mem_loc_descriptor (src, GET_MODE (src),
1494 			  GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1495   add_cfi (cfi);
1496   update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1497 }
1498 
1499 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note.  */
1500 
1501 static void
dwarf2out_frame_debug_cfa_restore(rtx reg)1502 dwarf2out_frame_debug_cfa_restore (rtx reg)
1503 {
1504   gcc_assert (REG_P (reg));
1505 
1506   rtx span = targetm.dwarf_register_span (reg);
1507   if (!span)
1508     {
1509       unsigned int regno = dwf_regno (reg);
1510       add_cfi_restore (regno);
1511       update_row_reg_save (cur_row, regno, NULL);
1512     }
1513   else
1514     {
1515       /* We have a PARALLEL describing where the contents of REG live.
1516 	 Restore the register for each piece of the PARALLEL.  */
1517       gcc_assert (GET_CODE (span) == PARALLEL);
1518 
1519       const int par_len = XVECLEN (span, 0);
1520       for (int par_index = 0; par_index < par_len; par_index++)
1521 	{
1522 	  reg = XVECEXP (span, 0, par_index);
1523 	  gcc_assert (REG_P (reg));
1524 	  unsigned int regno = dwf_regno (reg);
1525 	  add_cfi_restore (regno);
1526 	  update_row_reg_save (cur_row, regno, NULL);
1527 	}
1528     }
1529 }
1530 
1531 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1532 
1533    ??? Perhaps we should note in the CIE where windows are saved (instead
1534    of assuming 0(cfa)) and what registers are in the window.  */
1535 
1536 static void
dwarf2out_frame_debug_cfa_window_save(void)1537 dwarf2out_frame_debug_cfa_window_save (void)
1538 {
1539   dw_cfi_ref cfi = new_cfi ();
1540 
1541   cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1542   add_cfi (cfi);
1543   cur_row->window_save = true;
1544 }
1545 
1546 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_TOGGLE_RA_MANGLE.
1547    Note: DW_CFA_GNU_window_save dwarf opcode is reused for toggling RA mangle
1548    state, this is a target specific operation on AArch64 and can only be used
1549    on other targets if they don't use the window save operation otherwise.  */
1550 
1551 static void
dwarf2out_frame_debug_cfa_toggle_ra_mangle(void)1552 dwarf2out_frame_debug_cfa_toggle_ra_mangle (void)
1553 {
1554   dw_cfi_ref cfi = new_cfi ();
1555 
1556   cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1557   add_cfi (cfi);
1558   cur_row->ra_mangled = !cur_row->ra_mangled;
1559 }
1560 
1561 /* Record call frame debugging information for an expression EXPR,
1562    which either sets SP or FP (adjusting how we calculate the frame
1563    address) or saves a register to the stack or another register.
1564    LABEL indicates the address of EXPR.
1565 
1566    This function encodes a state machine mapping rtxes to actions on
1567    cfa, cfa_store, and cfa_temp.reg.  We describe these rules so
1568    users need not read the source code.
1569 
1570   The High-Level Picture
1571 
1572   Changes in the register we use to calculate the CFA: Currently we
1573   assume that if you copy the CFA register into another register, we
1574   should take the other one as the new CFA register; this seems to
1575   work pretty well.  If it's wrong for some target, it's simple
1576   enough not to set RTX_FRAME_RELATED_P on the insn in question.
1577 
1578   Changes in the register we use for saving registers to the stack:
1579   This is usually SP, but not always.  Again, we deduce that if you
1580   copy SP into another register (and SP is not the CFA register),
1581   then the new register is the one we will be using for register
1582   saves.  This also seems to work.
1583 
1584   Register saves: There's not much guesswork about this one; if
1585   RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1586   register save, and the register used to calculate the destination
1587   had better be the one we think we're using for this purpose.
1588   It's also assumed that a copy from a call-saved register to another
1589   register is saving that register if RTX_FRAME_RELATED_P is set on
1590   that instruction.  If the copy is from a call-saved register to
1591   the *same* register, that means that the register is now the same
1592   value as in the caller.
1593 
1594   Except: If the register being saved is the CFA register, and the
1595   offset is nonzero, we are saving the CFA, so we assume we have to
1596   use DW_CFA_def_cfa_expression.  If the offset is 0, we assume that
1597   the intent is to save the value of SP from the previous frame.
1598 
1599   In addition, if a register has previously been saved to a different
1600   register,
1601 
1602   Invariants / Summaries of Rules
1603 
1604   cfa	       current rule for calculating the CFA.  It usually
1605 	       consists of a register and an offset.  This is
1606 	       actually stored in *cur_cfa, but abbreviated
1607 	       for the purposes of this documentation.
1608   cfa_store    register used by prologue code to save things to the stack
1609 	       cfa_store.offset is the offset from the value of
1610 	       cfa_store.reg to the actual CFA
1611   cfa_temp     register holding an integral value.  cfa_temp.offset
1612 	       stores the value, which will be used to adjust the
1613 	       stack pointer.  cfa_temp is also used like cfa_store,
1614 	       to track stores to the stack via fp or a temp reg.
1615 
1616   Rules  1- 4: Setting a register's value to cfa.reg or an expression
1617 	       with cfa.reg as the first operand changes the cfa.reg and its
1618 	       cfa.offset.  Rule 1 and 4 also set cfa_temp.reg and
1619 	       cfa_temp.offset.
1620 
1621   Rules  6- 9: Set a non-cfa.reg register value to a constant or an
1622 	       expression yielding a constant.  This sets cfa_temp.reg
1623 	       and cfa_temp.offset.
1624 
1625   Rule 5:      Create a new register cfa_store used to save items to the
1626 	       stack.
1627 
1628   Rules 10-14: Save a register to the stack.  Define offset as the
1629 	       difference of the original location and cfa_store's
1630 	       location (or cfa_temp's location if cfa_temp is used).
1631 
1632   Rules 16-20: If AND operation happens on sp in prologue, we assume
1633 	       stack is realigned.  We will use a group of DW_OP_XXX
1634 	       expressions to represent the location of the stored
1635 	       register instead of CFA+offset.
1636 
1637   The Rules
1638 
1639   "{a,b}" indicates a choice of a xor b.
1640   "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1641 
1642   Rule 1:
1643   (set <reg1> <reg2>:cfa.reg)
1644   effects: cfa.reg = <reg1>
1645 	   cfa.offset unchanged
1646 	   cfa_temp.reg = <reg1>
1647 	   cfa_temp.offset = cfa.offset
1648 
1649   Rule 2:
1650   (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1651 			      {<const_int>,<reg>:cfa_temp.reg}))
1652   effects: cfa.reg = sp if fp used
1653 	   cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1654 	   cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1655 	     if cfa_store.reg==sp
1656 
1657   Rule 3:
1658   (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1659   effects: cfa.reg = fp
1660 	   cfa_offset += +/- <const_int>
1661 
1662   Rule 4:
1663   (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1664   constraints: <reg1> != fp
1665 	       <reg1> != sp
1666   effects: cfa.reg = <reg1>
1667 	   cfa_temp.reg = <reg1>
1668 	   cfa_temp.offset = cfa.offset
1669 
1670   Rule 5:
1671   (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1672   constraints: <reg1> != fp
1673 	       <reg1> != sp
1674   effects: cfa_store.reg = <reg1>
1675 	   cfa_store.offset = cfa.offset - cfa_temp.offset
1676 
1677   Rule 6:
1678   (set <reg> <const_int>)
1679   effects: cfa_temp.reg = <reg>
1680 	   cfa_temp.offset = <const_int>
1681 
1682   Rule 7:
1683   (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1684   effects: cfa_temp.reg = <reg1>
1685 	   cfa_temp.offset |= <const_int>
1686 
1687   Rule 8:
1688   (set <reg> (high <exp>))
1689   effects: none
1690 
1691   Rule 9:
1692   (set <reg> (lo_sum <exp> <const_int>))
1693   effects: cfa_temp.reg = <reg>
1694 	   cfa_temp.offset = <const_int>
1695 
1696   Rule 10:
1697   (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1698   effects: cfa_store.offset -= <const_int>
1699 	   cfa.offset = cfa_store.offset if cfa.reg == sp
1700 	   cfa.reg = sp
1701 	   cfa.base_offset = -cfa_store.offset
1702 
1703   Rule 11:
1704   (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1705   effects: cfa_store.offset += -/+ mode_size(mem)
1706 	   cfa.offset = cfa_store.offset if cfa.reg == sp
1707 	   cfa.reg = sp
1708 	   cfa.base_offset = -cfa_store.offset
1709 
1710   Rule 12:
1711   (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1712 
1713        <reg2>)
1714   effects: cfa.reg = <reg1>
1715 	   cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1716 
1717   Rule 13:
1718   (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1719   effects: cfa.reg = <reg1>
1720 	   cfa.base_offset = -{cfa_store,cfa_temp}.offset
1721 
1722   Rule 14:
1723   (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1724   effects: cfa.reg = <reg1>
1725 	   cfa.base_offset = -cfa_temp.offset
1726 	   cfa_temp.offset -= mode_size(mem)
1727 
1728   Rule 15:
1729   (set <reg> {unspec, unspec_volatile})
1730   effects: target-dependent
1731 
1732   Rule 16:
1733   (set sp (and: sp <const_int>))
1734   constraints: cfa_store.reg == sp
1735   effects: cfun->fde.stack_realign = 1
1736            cfa_store.offset = 0
1737 	   fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1738 
1739   Rule 17:
1740   (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1741   effects: cfa_store.offset += -/+ mode_size(mem)
1742 
1743   Rule 18:
1744   (set (mem ({pre_inc, pre_dec} sp)) fp)
1745   constraints: fde->stack_realign == 1
1746   effects: cfa_store.offset = 0
1747 	   cfa.reg != HARD_FRAME_POINTER_REGNUM
1748 
1749   Rule 19:
1750   (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1751   constraints: fde->stack_realign == 1
1752                && cfa.offset == 0
1753                && cfa.indirect == 0
1754                && cfa.reg != HARD_FRAME_POINTER_REGNUM
1755   effects: Use DW_CFA_def_cfa_expression to define cfa
1756   	   cfa.reg == fde->drap_reg  */
1757 
1758 static void
dwarf2out_frame_debug_expr(rtx expr)1759 dwarf2out_frame_debug_expr (rtx expr)
1760 {
1761   rtx src, dest, span;
1762   poly_int64 offset;
1763   dw_fde_ref fde;
1764 
1765   /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1766      the PARALLEL independently. The first element is always processed if
1767      it is a SET. This is for backward compatibility.   Other elements
1768      are processed only if they are SETs and the RTX_FRAME_RELATED_P
1769      flag is set in them.  */
1770   if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1771     {
1772       int par_index;
1773       int limit = XVECLEN (expr, 0);
1774       rtx elem;
1775 
1776       /* PARALLELs have strict read-modify-write semantics, so we
1777 	 ought to evaluate every rvalue before changing any lvalue.
1778 	 It's cumbersome to do that in general, but there's an
1779 	 easy approximation that is enough for all current users:
1780 	 handle register saves before register assignments.  */
1781       if (GET_CODE (expr) == PARALLEL)
1782 	for (par_index = 0; par_index < limit; par_index++)
1783 	  {
1784 	    elem = XVECEXP (expr, 0, par_index);
1785 	    if (GET_CODE (elem) == SET
1786 		&& MEM_P (SET_DEST (elem))
1787 		&& (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1788 	      dwarf2out_frame_debug_expr (elem);
1789 	  }
1790 
1791       for (par_index = 0; par_index < limit; par_index++)
1792 	{
1793 	  elem = XVECEXP (expr, 0, par_index);
1794 	  if (GET_CODE (elem) == SET
1795 	      && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1796 	      && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1797 	    dwarf2out_frame_debug_expr (elem);
1798 	}
1799       return;
1800     }
1801 
1802   gcc_assert (GET_CODE (expr) == SET);
1803 
1804   src = SET_SRC (expr);
1805   dest = SET_DEST (expr);
1806 
1807   if (REG_P (src))
1808     {
1809       rtx rsi = reg_saved_in (src);
1810       if (rsi)
1811 	src = rsi;
1812     }
1813 
1814   fde = cfun->fde;
1815 
1816   switch (GET_CODE (dest))
1817     {
1818     case REG:
1819       switch (GET_CODE (src))
1820 	{
1821 	  /* Setting FP from SP.  */
1822 	case REG:
1823 	  if (cur_cfa->reg == src)
1824 	    {
1825 	      /* Rule 1 */
1826 	      /* Update the CFA rule wrt SP or FP.  Make sure src is
1827 		 relative to the current CFA register.
1828 
1829 		 We used to require that dest be either SP or FP, but the
1830 		 ARM copies SP to a temporary register, and from there to
1831 		 FP.  So we just rely on the backends to only set
1832 		 RTX_FRAME_RELATED_P on appropriate insns.  */
1833 	      cur_cfa->reg = dwf_cfa_reg (dest);
1834 	      cur_trace->cfa_temp.reg = cur_cfa->reg;
1835 	      cur_trace->cfa_temp.offset = cur_cfa->offset;
1836 	    }
1837 	  else
1838 	    {
1839 	      /* Saving a register in a register.  */
1840 	      gcc_assert (!fixed_regs [REGNO (dest)]
1841 			  /* For the SPARC and its register window.  */
1842 			  || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1843 
1844               /* After stack is aligned, we can only save SP in FP
1845 		 if drap register is used.  In this case, we have
1846 		 to restore stack pointer with the CFA value and we
1847 		 don't generate this DWARF information.  */
1848 	      if (fde
1849 		  && fde->stack_realign
1850 		  && REGNO (src) == STACK_POINTER_REGNUM)
1851 		{
1852 		  gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1853 			      && fde->drap_reg != INVALID_REGNUM
1854 			      && cur_cfa->reg != src
1855 			      && fde->rule18);
1856 		  fde->rule18 = 0;
1857 		  /* The save of hard frame pointer has been deferred
1858 		     until this point when Rule 18 applied.  Emit it now.  */
1859 		  queue_reg_save (dest, NULL_RTX, 0);
1860 		  /* And as the instruction modifies the hard frame pointer,
1861 		     flush the queue as well.  */
1862 		  dwarf2out_flush_queued_reg_saves ();
1863 		}
1864 	      else
1865 		queue_reg_save (src, dest, 0);
1866 	    }
1867 	  break;
1868 
1869 	case PLUS:
1870 	case MINUS:
1871 	case LO_SUM:
1872 	  if (dest == stack_pointer_rtx)
1873 	    {
1874 	      /* Rule 2 */
1875 	      /* Adjusting SP.  */
1876 	      if (REG_P (XEXP (src, 1)))
1877 		{
1878 		  gcc_assert (cur_trace->cfa_temp.reg == XEXP (src, 1));
1879 		  offset = cur_trace->cfa_temp.offset;
1880 		}
1881 	      else if (!poly_int_rtx_p (XEXP (src, 1), &offset))
1882 		gcc_unreachable ();
1883 
1884 	      if (XEXP (src, 0) == hard_frame_pointer_rtx)
1885 		{
1886 		  /* Restoring SP from FP in the epilogue.  */
1887 		  gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1888 		  cur_cfa->reg = dw_stack_pointer_regnum;
1889 		}
1890 	      else if (GET_CODE (src) == LO_SUM)
1891 		/* Assume we've set the source reg of the LO_SUM from sp.  */
1892 		;
1893 	      else
1894 		gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1895 
1896 	      if (GET_CODE (src) != MINUS)
1897 		offset = -offset;
1898 	      if (cur_cfa->reg == dw_stack_pointer_regnum)
1899 		cur_cfa->offset += offset;
1900 	      if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1901 		cur_trace->cfa_store.offset += offset;
1902 	    }
1903 	  else if (dest == hard_frame_pointer_rtx)
1904 	    {
1905 	      /* Rule 3 */
1906 	      /* Either setting the FP from an offset of the SP,
1907 		 or adjusting the FP */
1908 	      gcc_assert (frame_pointer_needed);
1909 
1910 	      gcc_assert (REG_P (XEXP (src, 0))
1911 			  && cur_cfa->reg == XEXP (src, 0));
1912 	      offset = rtx_to_poly_int64 (XEXP (src, 1));
1913 	      if (GET_CODE (src) != MINUS)
1914 		offset = -offset;
1915 	      cur_cfa->offset += offset;
1916 	      cur_cfa->reg = dw_frame_pointer_regnum;
1917 	    }
1918 	  else
1919 	    {
1920 	      gcc_assert (GET_CODE (src) != MINUS);
1921 
1922 	      /* Rule 4 */
1923 	      if (REG_P (XEXP (src, 0))
1924 		  && cur_cfa->reg == XEXP (src, 0)
1925 		  && poly_int_rtx_p (XEXP (src, 1), &offset))
1926 		{
1927 		  /* Setting a temporary CFA register that will be copied
1928 		     into the FP later on.  */
1929 		  offset = -offset;
1930 		  cur_cfa->offset += offset;
1931 		  cur_cfa->reg = dwf_cfa_reg (dest);
1932 		  /* Or used to save regs to the stack.  */
1933 		  cur_trace->cfa_temp.reg = cur_cfa->reg;
1934 		  cur_trace->cfa_temp.offset = cur_cfa->offset;
1935 		}
1936 
1937 	      /* Rule 5 */
1938 	      else if (REG_P (XEXP (src, 0))
1939 		       && cur_trace->cfa_temp.reg == XEXP (src, 0)
1940 		       && XEXP (src, 1) == stack_pointer_rtx)
1941 		{
1942 		  /* Setting a scratch register that we will use instead
1943 		     of SP for saving registers to the stack.  */
1944 		  gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1945 		  cur_trace->cfa_store.reg = dwf_cfa_reg (dest);
1946 		  cur_trace->cfa_store.offset
1947 		    = cur_cfa->offset - cur_trace->cfa_temp.offset;
1948 		}
1949 
1950 	      /* Rule 9 */
1951 	      else if (GET_CODE (src) == LO_SUM
1952 		       && poly_int_rtx_p (XEXP (src, 1),
1953 					  &cur_trace->cfa_temp.offset))
1954 		cur_trace->cfa_temp.reg = dwf_cfa_reg (dest);
1955 	      else
1956 		gcc_unreachable ();
1957 	    }
1958 	  break;
1959 
1960 	  /* Rule 6 */
1961 	case CONST_INT:
1962 	case CONST_POLY_INT:
1963 	  cur_trace->cfa_temp.reg = dwf_cfa_reg (dest);
1964 	  cur_trace->cfa_temp.offset = rtx_to_poly_int64 (src);
1965 	  break;
1966 
1967 	  /* Rule 7 */
1968 	case IOR:
1969 	  gcc_assert (REG_P (XEXP (src, 0))
1970 		      && cur_trace->cfa_temp.reg == XEXP (src, 0)
1971 		      && CONST_INT_P (XEXP (src, 1)));
1972 
1973 	  cur_trace->cfa_temp.reg = dwf_cfa_reg (dest);
1974 	  if (!can_ior_p (cur_trace->cfa_temp.offset, INTVAL (XEXP (src, 1)),
1975 			  &cur_trace->cfa_temp.offset))
1976 	    /* The target shouldn't generate this kind of CFI note if we
1977 	       can't represent it.  */
1978 	    gcc_unreachable ();
1979 	  break;
1980 
1981 	  /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1982 	     which will fill in all of the bits.  */
1983 	  /* Rule 8 */
1984 	case HIGH:
1985 	  break;
1986 
1987 	  /* Rule 15 */
1988 	case UNSPEC:
1989 	case UNSPEC_VOLATILE:
1990 	  /* All unspecs should be represented by REG_CFA_* notes.  */
1991 	  gcc_unreachable ();
1992 	  return;
1993 
1994 	  /* Rule 16 */
1995 	case AND:
1996           /* If this AND operation happens on stack pointer in prologue,
1997 	     we assume the stack is realigned and we extract the
1998 	     alignment.  */
1999           if (fde && XEXP (src, 0) == stack_pointer_rtx)
2000             {
2001 	      /* We interpret reg_save differently with stack_realign set.
2002 		 Thus we must flush whatever we have queued first.  */
2003 	      dwarf2out_flush_queued_reg_saves ();
2004 
2005               gcc_assert (cur_trace->cfa_store.reg
2006 			  == XEXP (src, 0));
2007               fde->stack_realign = 1;
2008               fde->stack_realignment = INTVAL (XEXP (src, 1));
2009               cur_trace->cfa_store.offset = 0;
2010 
2011 	      if (cur_cfa->reg != dw_stack_pointer_regnum
2012 		  && cur_cfa->reg != dw_frame_pointer_regnum)
2013 		{
2014 		  gcc_assert (cur_cfa->reg.span == 1);
2015 		  fde->drap_reg = cur_cfa->reg.reg;
2016 		}
2017             }
2018           return;
2019 
2020 	default:
2021 	  gcc_unreachable ();
2022 	}
2023       break;
2024 
2025     case MEM:
2026 
2027       /* Saving a register to the stack.  Make sure dest is relative to the
2028 	 CFA register.  */
2029       switch (GET_CODE (XEXP (dest, 0)))
2030 	{
2031 	  /* Rule 10 */
2032 	  /* With a push.  */
2033 	case PRE_MODIFY:
2034 	case POST_MODIFY:
2035 	  /* We can't handle variable size modifications.  */
2036 	  offset = -rtx_to_poly_int64 (XEXP (XEXP (XEXP (dest, 0), 1), 1));
2037 
2038 	  gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2039 		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
2040 
2041 	  cur_trace->cfa_store.offset += offset;
2042 	  if (cur_cfa->reg == dw_stack_pointer_regnum)
2043 	    cur_cfa->offset = cur_trace->cfa_store.offset;
2044 
2045 	  if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
2046 	    offset -= cur_trace->cfa_store.offset;
2047 	  else
2048 	    offset = -cur_trace->cfa_store.offset;
2049 	  break;
2050 
2051 	  /* Rule 11 */
2052 	case PRE_INC:
2053 	case PRE_DEC:
2054 	case POST_DEC:
2055 	  offset = GET_MODE_SIZE (GET_MODE (dest));
2056 	  if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
2057 	    offset = -offset;
2058 
2059 	  gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
2060 		       == STACK_POINTER_REGNUM)
2061 		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
2062 
2063 	  cur_trace->cfa_store.offset += offset;
2064 
2065           /* Rule 18: If stack is aligned, we will use FP as a
2066 	     reference to represent the address of the stored
2067 	     regiser.  */
2068           if (fde
2069               && fde->stack_realign
2070 	      && REG_P (src)
2071 	      && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
2072 	    {
2073 	      gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
2074 	      cur_trace->cfa_store.offset = 0;
2075 	      fde->rule18 = 1;
2076 	    }
2077 
2078 	  if (cur_cfa->reg == dw_stack_pointer_regnum)
2079 	    cur_cfa->offset = cur_trace->cfa_store.offset;
2080 
2081 	  if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
2082 	    offset += -cur_trace->cfa_store.offset;
2083 	  else
2084 	    offset = -cur_trace->cfa_store.offset;
2085 	  break;
2086 
2087 	  /* Rule 12 */
2088 	  /* With an offset.  */
2089 	case PLUS:
2090 	case MINUS:
2091 	case LO_SUM:
2092 	  {
2093 	    struct cfa_reg regno;
2094 
2095 	    gcc_assert (REG_P (XEXP (XEXP (dest, 0), 0)));
2096 	    offset = rtx_to_poly_int64 (XEXP (XEXP (dest, 0), 1));
2097 	    if (GET_CODE (XEXP (dest, 0)) == MINUS)
2098 	      offset = -offset;
2099 
2100 	    regno = dwf_cfa_reg (XEXP (XEXP (dest, 0), 0));
2101 
2102 	    if (cur_cfa->reg == regno)
2103 	      offset -= cur_cfa->offset;
2104 	    else if (cur_trace->cfa_store.reg == regno)
2105 	      offset -= cur_trace->cfa_store.offset;
2106 	    else
2107 	      {
2108 		gcc_assert (cur_trace->cfa_temp.reg == regno);
2109 		offset -= cur_trace->cfa_temp.offset;
2110 	      }
2111 	  }
2112 	  break;
2113 
2114 	  /* Rule 13 */
2115 	  /* Without an offset.  */
2116 	case REG:
2117 	  {
2118 	    struct cfa_reg regno = dwf_cfa_reg (XEXP (dest, 0));
2119 
2120 	    if (cur_cfa->reg == regno)
2121 	      offset = -cur_cfa->offset;
2122 	    else if (cur_trace->cfa_store.reg == regno)
2123 	      offset = -cur_trace->cfa_store.offset;
2124 	    else
2125 	      {
2126 		gcc_assert (cur_trace->cfa_temp.reg == regno);
2127 		offset = -cur_trace->cfa_temp.offset;
2128 	      }
2129 	  }
2130 	  break;
2131 
2132 	  /* Rule 14 */
2133 	case POST_INC:
2134 	  gcc_assert (cur_trace->cfa_temp.reg == XEXP (XEXP (dest, 0), 0));
2135 	  offset = -cur_trace->cfa_temp.offset;
2136 	  cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
2137 	  break;
2138 
2139 	default:
2140 	  gcc_unreachable ();
2141 	}
2142 
2143       /* Rule 17 */
2144       /* If the source operand of this MEM operation is a memory,
2145 	 we only care how much stack grew.  */
2146       if (MEM_P (src))
2147         break;
2148 
2149       if (REG_P (src)
2150 	  && REGNO (src) != STACK_POINTER_REGNUM
2151 	  && REGNO (src) != HARD_FRAME_POINTER_REGNUM
2152 	  && cur_cfa->reg == src)
2153 	{
2154 	  /* We're storing the current CFA reg into the stack.  */
2155 
2156 	  if (known_eq (cur_cfa->offset, 0))
2157 	    {
2158               /* Rule 19 */
2159               /* If stack is aligned, putting CFA reg into stack means
2160 		 we can no longer use reg + offset to represent CFA.
2161 		 Here we use DW_CFA_def_cfa_expression instead.  The
2162 		 result of this expression equals to the original CFA
2163 		 value.  */
2164               if (fde
2165                   && fde->stack_realign
2166                   && cur_cfa->indirect == 0
2167                   && cur_cfa->reg != dw_frame_pointer_regnum)
2168                 {
2169 		  gcc_assert (fde->drap_reg == cur_cfa->reg.reg);
2170 
2171 		  cur_cfa->indirect = 1;
2172 		  cur_cfa->reg = dw_frame_pointer_regnum;
2173 		  cur_cfa->base_offset = offset;
2174 		  cur_cfa->offset = 0;
2175 
2176 		  fde->drap_reg_saved = 1;
2177 		  break;
2178                 }
2179 
2180 	      /* If the source register is exactly the CFA, assume
2181 		 we're saving SP like any other register; this happens
2182 		 on the ARM.  */
2183 	      queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
2184 	      break;
2185 	    }
2186 	  else
2187 	    {
2188 	      /* Otherwise, we'll need to look in the stack to
2189 		 calculate the CFA.  */
2190 	      rtx x = XEXP (dest, 0);
2191 
2192 	      if (!REG_P (x))
2193 		x = XEXP (x, 0);
2194 	      gcc_assert (REG_P (x));
2195 
2196 	      cur_cfa->reg = dwf_cfa_reg (x);
2197 	      cur_cfa->base_offset = offset;
2198 	      cur_cfa->indirect = 1;
2199 	      break;
2200 	    }
2201 	}
2202 
2203       if (REG_P (src))
2204 	span = targetm.dwarf_register_span (src);
2205       else
2206 	span = NULL;
2207 
2208       if (!span)
2209 	{
2210 	  if (fde->rule18)
2211 	    /* Just verify the hard frame pointer save when doing dynamic
2212 	       realignment uses expected offset.  The actual queue_reg_save
2213 	       needs to be deferred until the instruction that sets
2214 	       hard frame pointer to stack pointer, see PR99334 for
2215 	       details.  */
2216 	    gcc_assert (known_eq (offset, 0));
2217 	  else
2218 	    queue_reg_save (src, NULL_RTX, offset);
2219 	}
2220       else
2221 	{
2222 	  /* We have a PARALLEL describing where the contents of SRC live.
2223 	     Queue register saves for each piece of the PARALLEL.  */
2224 	  poly_int64 span_offset = offset;
2225 
2226 	  gcc_assert (GET_CODE (span) == PARALLEL);
2227 
2228 	  const int par_len = XVECLEN (span, 0);
2229 	  for (int par_index = 0; par_index < par_len; par_index++)
2230 	    {
2231 	      rtx elem = XVECEXP (span, 0, par_index);
2232 	      queue_reg_save (elem, NULL_RTX, span_offset);
2233 	      span_offset += GET_MODE_SIZE (GET_MODE (elem));
2234 	    }
2235 	}
2236       break;
2237 
2238     default:
2239       gcc_unreachable ();
2240     }
2241 }
2242 
2243 /* Record call frame debugging information for INSN, which either sets
2244    SP or FP (adjusting how we calculate the frame address) or saves a
2245    register to the stack.  */
2246 
2247 static void
dwarf2out_frame_debug(rtx_insn * insn)2248 dwarf2out_frame_debug (rtx_insn *insn)
2249 {
2250   rtx note, n, pat;
2251   bool handled_one = false;
2252 
2253   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2254     switch (REG_NOTE_KIND (note))
2255       {
2256       case REG_FRAME_RELATED_EXPR:
2257 	pat = XEXP (note, 0);
2258 	goto do_frame_expr;
2259 
2260       case REG_CFA_DEF_CFA:
2261 	dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2262 	handled_one = true;
2263 	break;
2264 
2265       case REG_CFA_ADJUST_CFA:
2266 	n = XEXP (note, 0);
2267 	if (n == NULL)
2268 	  {
2269 	    n = PATTERN (insn);
2270 	    if (GET_CODE (n) == PARALLEL)
2271 	      n = XVECEXP (n, 0, 0);
2272 	  }
2273 	dwarf2out_frame_debug_adjust_cfa (n);
2274 	handled_one = true;
2275 	break;
2276 
2277       case REG_CFA_OFFSET:
2278 	n = XEXP (note, 0);
2279 	if (n == NULL)
2280 	  n = single_set (insn);
2281 	dwarf2out_frame_debug_cfa_offset (n);
2282 	handled_one = true;
2283 	break;
2284 
2285       case REG_CFA_REGISTER:
2286 	n = XEXP (note, 0);
2287 	if (n == NULL)
2288 	  {
2289 	    n = PATTERN (insn);
2290 	    if (GET_CODE (n) == PARALLEL)
2291 	      n = XVECEXP (n, 0, 0);
2292 	  }
2293 	dwarf2out_frame_debug_cfa_register (n);
2294 	handled_one = true;
2295 	break;
2296 
2297       case REG_CFA_EXPRESSION:
2298       case REG_CFA_VAL_EXPRESSION:
2299 	n = XEXP (note, 0);
2300 	if (n == NULL)
2301 	  n = single_set (insn);
2302 
2303 	if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2304 	  dwarf2out_frame_debug_cfa_expression (n);
2305 	else
2306 	  dwarf2out_frame_debug_cfa_val_expression (n);
2307 
2308 	handled_one = true;
2309 	break;
2310 
2311       case REG_CFA_RESTORE:
2312 	n = XEXP (note, 0);
2313 	if (n == NULL)
2314 	  {
2315 	    n = PATTERN (insn);
2316 	    if (GET_CODE (n) == PARALLEL)
2317 	      n = XVECEXP (n, 0, 0);
2318 	    n = XEXP (n, 0);
2319 	  }
2320 	dwarf2out_frame_debug_cfa_restore (n);
2321 	handled_one = true;
2322 	break;
2323 
2324       case REG_CFA_SET_VDRAP:
2325 	n = XEXP (note, 0);
2326 	if (REG_P (n))
2327 	  {
2328 	    dw_fde_ref fde = cfun->fde;
2329 	    if (fde)
2330 	      {
2331 		gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2332 		if (REG_P (n))
2333 		  fde->vdrap_reg = dwf_regno (n);
2334 	      }
2335 	  }
2336 	handled_one = true;
2337 	break;
2338 
2339       case REG_CFA_TOGGLE_RA_MANGLE:
2340 	dwarf2out_frame_debug_cfa_toggle_ra_mangle ();
2341 	handled_one = true;
2342 	break;
2343 
2344       case REG_CFA_WINDOW_SAVE:
2345 	dwarf2out_frame_debug_cfa_window_save ();
2346 	handled_one = true;
2347 	break;
2348 
2349       case REG_CFA_FLUSH_QUEUE:
2350 	/* The actual flush happens elsewhere.  */
2351 	handled_one = true;
2352 	break;
2353 
2354       default:
2355 	break;
2356       }
2357 
2358   if (!handled_one)
2359     {
2360       pat = PATTERN (insn);
2361     do_frame_expr:
2362       dwarf2out_frame_debug_expr (pat);
2363 
2364       /* Check again.  A parallel can save and update the same register.
2365          We could probably check just once, here, but this is safer than
2366          removing the check at the start of the function.  */
2367       if (clobbers_queued_reg_save (pat))
2368 	dwarf2out_flush_queued_reg_saves ();
2369     }
2370 }
2371 
2372 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW.  */
2373 
2374 static void
change_cfi_row(dw_cfi_row * old_row,dw_cfi_row * new_row)2375 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2376 {
2377   size_t i, n_old, n_new, n_max;
2378   dw_cfi_ref cfi;
2379 
2380   if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2381     add_cfi (new_row->cfa_cfi);
2382   else
2383     {
2384       cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2385       if (cfi)
2386 	add_cfi (cfi);
2387     }
2388 
2389   n_old = vec_safe_length (old_row->reg_save);
2390   n_new = vec_safe_length (new_row->reg_save);
2391   n_max = MAX (n_old, n_new);
2392 
2393   for (i = 0; i < n_max; ++i)
2394     {
2395       dw_cfi_ref r_old = NULL, r_new = NULL;
2396 
2397       if (i < n_old)
2398 	r_old = (*old_row->reg_save)[i];
2399       if (i < n_new)
2400 	r_new = (*new_row->reg_save)[i];
2401 
2402       if (r_old == r_new)
2403 	;
2404       else if (r_new == NULL)
2405 	add_cfi_restore (i);
2406       else if (!cfi_equal_p (r_old, r_new))
2407         add_cfi (r_new);
2408     }
2409 
2410   if (!old_row->window_save && new_row->window_save)
2411     {
2412       dw_cfi_ref cfi = new_cfi ();
2413 
2414       gcc_assert (!old_row->ra_mangled && !new_row->ra_mangled);
2415       cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
2416       add_cfi (cfi);
2417     }
2418 
2419   if (old_row->ra_mangled != new_row->ra_mangled)
2420     {
2421       dw_cfi_ref cfi = new_cfi ();
2422 
2423       gcc_assert (!old_row->window_save && !new_row->window_save);
2424       /* DW_CFA_GNU_window_save is reused for toggling RA mangle state.  */
2425       cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
2426       add_cfi (cfi);
2427     }
2428 }
2429 
2430 /* Examine CFI and return true if a cfi label and set_loc is needed
2431    beforehand.  Even when generating CFI assembler instructions, we
2432    still have to add the cfi to the list so that lookup_cfa_1 works
2433    later on.  When -g2 and above we even need to force emitting of
2434    CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2435    purposes.  If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2436    and so don't use convert_cfa_to_fb_loc_list.  */
2437 
2438 static bool
cfi_label_required_p(dw_cfi_ref cfi)2439 cfi_label_required_p (dw_cfi_ref cfi)
2440 {
2441   if (!dwarf2out_do_cfi_asm ())
2442     return true;
2443 
2444   if (dwarf_version == 2
2445       && debug_info_level > DINFO_LEVEL_TERSE
2446       && dwarf_debuginfo_p ())
2447     {
2448       switch (cfi->dw_cfi_opc)
2449 	{
2450 	case DW_CFA_def_cfa_offset:
2451 	case DW_CFA_def_cfa_offset_sf:
2452 	case DW_CFA_def_cfa_register:
2453 	case DW_CFA_def_cfa:
2454 	case DW_CFA_def_cfa_sf:
2455 	case DW_CFA_def_cfa_expression:
2456 	case DW_CFA_restore_state:
2457 	  return true;
2458 	default:
2459 	  return false;
2460 	}
2461     }
2462   return false;
2463 }
2464 
2465 /* Walk the function, looking for NOTE_INSN_CFI notes.  Add the CFIs to the
2466    function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2467    necessary.  */
2468 static void
add_cfis_to_fde(void)2469 add_cfis_to_fde (void)
2470 {
2471   dw_fde_ref fde = cfun->fde;
2472   rtx_insn *insn, *next;
2473 
2474   for (insn = get_insns (); insn; insn = next)
2475     {
2476       next = NEXT_INSN (insn);
2477 
2478       if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2479 	fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2480 
2481       if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2482 	{
2483 	  bool required = cfi_label_required_p (NOTE_CFI (insn));
2484 	  while (next)
2485 	    if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2486 	      {
2487 		required |= cfi_label_required_p (NOTE_CFI (next));
2488 		next = NEXT_INSN (next);
2489 	      }
2490 	    else if (active_insn_p (next)
2491 		     || (NOTE_P (next) && (NOTE_KIND (next)
2492 					   == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2493 	      break;
2494 	    else
2495 	      next = NEXT_INSN (next);
2496 	  if (required)
2497 	    {
2498 	      int num = dwarf2out_cfi_label_num;
2499 	      const char *label = dwarf2out_cfi_label ();
2500 	      dw_cfi_ref xcfi;
2501 
2502 	      /* Set the location counter to the new label.  */
2503 	      xcfi = new_cfi ();
2504 	      xcfi->dw_cfi_opc = DW_CFA_advance_loc4;
2505 	      xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2506 	      vec_safe_push (fde->dw_fde_cfi, xcfi);
2507 
2508 	      rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2509 	      NOTE_LABEL_NUMBER (tmp) = num;
2510 	    }
2511 
2512 	  do
2513 	    {
2514 	      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2515 		vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2516 	      insn = NEXT_INSN (insn);
2517 	    }
2518 	  while (insn != next);
2519 	}
2520     }
2521 }
2522 
2523 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2524 
2525 /* If LABEL is the start of a trace, then initialize the state of that
2526    trace from CUR_TRACE and CUR_ROW.  */
2527 
2528 static void
maybe_record_trace_start(rtx_insn * start,rtx_insn * origin)2529 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2530 {
2531   dw_trace_info *ti;
2532 
2533   ti = get_trace_info (start);
2534   gcc_assert (ti != NULL);
2535 
2536   if (dump_file)
2537     {
2538       fprintf (dump_file, "   saw edge from trace %u to %u (via %s %d)\n",
2539 	       cur_trace->id, ti->id,
2540 	       (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2541 	       (origin ? INSN_UID (origin) : 0));
2542     }
2543 
2544   poly_int64 args_size = cur_trace->end_true_args_size;
2545   if (ti->beg_row == NULL)
2546     {
2547       /* This is the first time we've encountered this trace.  Propagate
2548 	 state across the edge and push the trace onto the work list.  */
2549       ti->beg_row = copy_cfi_row (cur_row);
2550       ti->beg_true_args_size = args_size;
2551 
2552       ti->cfa_store = cur_trace->cfa_store;
2553       ti->cfa_temp = cur_trace->cfa_temp;
2554       ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2555 
2556       trace_work_list.safe_push (ti);
2557 
2558       if (dump_file)
2559 	fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2560     }
2561   else
2562     {
2563 
2564       /* We ought to have the same state incoming to a given trace no
2565 	 matter how we arrive at the trace.  Anything else means we've
2566 	 got some kind of optimization error.  */
2567 #if CHECKING_P
2568       if (!cfi_row_equal_p (cur_row, ti->beg_row))
2569 	{
2570 	  if (dump_file)
2571 	    {
2572 	      fprintf (dump_file, "Inconsistent CFI state!\n");
2573 	      fprintf (dump_file, "SHOULD have:\n");
2574 	      dump_cfi_row (dump_file, ti->beg_row);
2575 	      fprintf (dump_file, "DO have:\n");
2576 	      dump_cfi_row (dump_file, cur_row);
2577 	    }
2578 
2579 	  gcc_unreachable ();
2580 	}
2581 #endif
2582 
2583       /* The args_size is allowed to conflict if it isn't actually used.  */
2584       if (maybe_ne (ti->beg_true_args_size, args_size))
2585 	ti->args_size_undefined = true;
2586     }
2587 }
2588 
2589 /* Similarly, but handle the args_size and CFA reset across EH
2590    and non-local goto edges.  */
2591 
2592 static void
maybe_record_trace_start_abnormal(rtx_insn * start,rtx_insn * origin)2593 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2594 {
2595   poly_int64 save_args_size, delta;
2596   dw_cfa_location save_cfa;
2597 
2598   save_args_size = cur_trace->end_true_args_size;
2599   if (known_eq (save_args_size, 0))
2600     {
2601       maybe_record_trace_start (start, origin);
2602       return;
2603     }
2604 
2605   delta = -save_args_size;
2606   cur_trace->end_true_args_size = 0;
2607 
2608   save_cfa = cur_row->cfa;
2609   if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2610     {
2611       /* Convert a change in args_size (always a positive in the
2612 	 direction of stack growth) to a change in stack pointer.  */
2613       if (!STACK_GROWS_DOWNWARD)
2614 	delta = -delta;
2615 
2616       cur_row->cfa.offset += delta;
2617     }
2618 
2619   maybe_record_trace_start (start, origin);
2620 
2621   cur_trace->end_true_args_size = save_args_size;
2622   cur_row->cfa = save_cfa;
2623 }
2624 
2625 /* Propagate CUR_TRACE state to the destinations implied by INSN.  */
2626 /* ??? Sadly, this is in large part a duplicate of make_edges.  */
2627 
2628 static void
create_trace_edges(rtx_insn * insn)2629 create_trace_edges (rtx_insn *insn)
2630 {
2631   rtx tmp;
2632   int i, n;
2633 
2634   if (JUMP_P (insn))
2635     {
2636       rtx_jump_table_data *table;
2637 
2638       if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2639 	return;
2640 
2641       if (tablejump_p (insn, NULL, &table))
2642 	{
2643 	  rtvec vec = table->get_labels ();
2644 
2645 	  n = GET_NUM_ELEM (vec);
2646 	  for (i = 0; i < n; ++i)
2647 	    {
2648 	      rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2649 	      maybe_record_trace_start (lab, insn);
2650 	    }
2651 
2652 	  /* Handle casesi dispatch insns.  */
2653 	  if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX)
2654 	    {
2655 	      rtx_insn * lab = label_ref_label (XEXP (SET_SRC (tmp), 2));
2656 	      maybe_record_trace_start (lab, insn);
2657 	    }
2658 	}
2659       else if (computed_jump_p (insn))
2660 	{
2661 	  rtx_insn *temp;
2662 	  unsigned int i;
2663 	  FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2664 	    maybe_record_trace_start (temp, insn);
2665 	}
2666       else if (returnjump_p (insn))
2667 	;
2668       else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2669 	{
2670 	  n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2671 	  for (i = 0; i < n; ++i)
2672 	    {
2673 	      rtx_insn *lab =
2674 		as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2675 	      maybe_record_trace_start (lab, insn);
2676 	    }
2677 	}
2678       else
2679 	{
2680 	  rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2681 	  gcc_assert (lab != NULL);
2682 	  maybe_record_trace_start (lab, insn);
2683 	}
2684     }
2685   else if (CALL_P (insn))
2686     {
2687       /* Sibling calls don't have edges inside this function.  */
2688       if (SIBLING_CALL_P (insn))
2689 	return;
2690 
2691       /* Process non-local goto edges.  */
2692       if (can_nonlocal_goto (insn))
2693 	for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2694 	     lab;
2695 	     lab = lab->next ())
2696 	  maybe_record_trace_start_abnormal (lab->insn (), insn);
2697     }
2698   else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2699     {
2700       int i, n = seq->len ();
2701       for (i = 0; i < n; ++i)
2702 	create_trace_edges (seq->insn (i));
2703       return;
2704     }
2705 
2706   /* Process EH edges.  */
2707   if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2708     {
2709       eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2710       if (lp)
2711 	maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2712     }
2713 }
2714 
2715 /* A subroutine of scan_trace.  Do what needs to be done "after" INSN.  */
2716 
2717 static void
scan_insn_after(rtx_insn * insn)2718 scan_insn_after (rtx_insn *insn)
2719 {
2720   if (RTX_FRAME_RELATED_P (insn))
2721     dwarf2out_frame_debug (insn);
2722   notice_args_size (insn);
2723 }
2724 
2725 /* Scan the trace beginning at INSN and create the CFI notes for the
2726    instructions therein.  */
2727 
2728 static void
scan_trace(dw_trace_info * trace,bool entry)2729 scan_trace (dw_trace_info *trace, bool entry)
2730 {
2731   rtx_insn *prev, *insn = trace->head;
2732   dw_cfa_location this_cfa;
2733 
2734   if (dump_file)
2735     fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2736 	     trace->id, rtx_name[(int) GET_CODE (insn)],
2737 	     INSN_UID (insn));
2738 
2739   trace->end_row = copy_cfi_row (trace->beg_row);
2740   trace->end_true_args_size = trace->beg_true_args_size;
2741 
2742   cur_trace = trace;
2743   cur_row = trace->end_row;
2744 
2745   this_cfa = cur_row->cfa;
2746   cur_cfa = &this_cfa;
2747 
2748   /* If the current function starts with a non-standard incoming frame
2749      sp offset, emit a note before the first instruction.  */
2750   if (entry
2751       && DEFAULT_INCOMING_FRAME_SP_OFFSET != INCOMING_FRAME_SP_OFFSET)
2752     {
2753       add_cfi_insn = insn;
2754       gcc_assert (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED);
2755       this_cfa.offset = INCOMING_FRAME_SP_OFFSET;
2756       def_cfa_1 (&this_cfa);
2757     }
2758 
2759   for (prev = insn, insn = NEXT_INSN (insn);
2760        insn;
2761        prev = insn, insn = NEXT_INSN (insn))
2762     {
2763       rtx_insn *control;
2764 
2765       /* Do everything that happens "before" the insn.  */
2766       add_cfi_insn = prev;
2767 
2768       /* Notice the end of a trace.  */
2769       if (BARRIER_P (insn))
2770 	{
2771 	  /* Don't bother saving the unneeded queued registers at all.  */
2772 	  queued_reg_saves.truncate (0);
2773 	  break;
2774 	}
2775       if (save_point_p (insn))
2776 	{
2777 	  /* Propagate across fallthru edges.  */
2778 	  dwarf2out_flush_queued_reg_saves ();
2779 	  maybe_record_trace_start (insn, NULL);
2780 	  break;
2781 	}
2782 
2783       if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2784 	continue;
2785 
2786       /* Handle all changes to the row state.  Sequences require special
2787 	 handling for the positioning of the notes.  */
2788       if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2789 	{
2790 	  rtx_insn *elt;
2791 	  int i, n = pat->len ();
2792 
2793 	  control = pat->insn (0);
2794 	  if (can_throw_internal (control))
2795 	    notice_eh_throw (control);
2796 	  dwarf2out_flush_queued_reg_saves ();
2797 
2798 	  if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2799 	    {
2800 	      /* ??? Hopefully multiple delay slots are not annulled.  */
2801 	      gcc_assert (n == 2);
2802 	      gcc_assert (!RTX_FRAME_RELATED_P (control));
2803 	      gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2804 
2805 	      elt = pat->insn (1);
2806 
2807 	      if (INSN_FROM_TARGET_P (elt))
2808 		{
2809 		  cfi_vec save_row_reg_save;
2810 
2811 		  /* If ELT is an instruction from target of an annulled
2812 		     branch, the effects are for the target only and so
2813 		     the args_size and CFA along the current path
2814 		     shouldn't change.  */
2815 		  add_cfi_insn = NULL;
2816 		  poly_int64 restore_args_size = cur_trace->end_true_args_size;
2817 		  cur_cfa = &cur_row->cfa;
2818 		  save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2819 
2820 		  scan_insn_after (elt);
2821 
2822 		  /* ??? Should we instead save the entire row state?  */
2823 		  gcc_assert (!queued_reg_saves.length ());
2824 
2825 		  create_trace_edges (control);
2826 
2827 		  cur_trace->end_true_args_size = restore_args_size;
2828 		  cur_row->cfa = this_cfa;
2829 		  cur_row->reg_save = save_row_reg_save;
2830 		  cur_cfa = &this_cfa;
2831 		}
2832 	      else
2833 		{
2834 		  /* If ELT is a annulled branch-taken instruction (i.e.
2835 		     executed only when branch is not taken), the args_size
2836 		     and CFA should not change through the jump.  */
2837 		  create_trace_edges (control);
2838 
2839 		  /* Update and continue with the trace.  */
2840 		  add_cfi_insn = insn;
2841 		  scan_insn_after (elt);
2842 		  def_cfa_1 (&this_cfa);
2843 		}
2844 	      continue;
2845 	    }
2846 
2847 	  /* The insns in the delay slot should all be considered to happen
2848 	     "before" a call insn.  Consider a call with a stack pointer
2849 	     adjustment in the delay slot.  The backtrace from the callee
2850 	     should include the sp adjustment.  Unfortunately, that leaves
2851 	     us with an unavoidable unwinding error exactly at the call insn
2852 	     itself.  For jump insns we'd prefer to avoid this error by
2853 	     placing the notes after the sequence.  */
2854 	  if (JUMP_P (control))
2855 	    add_cfi_insn = insn;
2856 
2857 	  for (i = 1; i < n; ++i)
2858 	    {
2859 	      elt = pat->insn (i);
2860 	      scan_insn_after (elt);
2861 	    }
2862 
2863 	  /* Make sure any register saves are visible at the jump target.  */
2864 	  dwarf2out_flush_queued_reg_saves ();
2865 	  any_cfis_emitted = false;
2866 
2867           /* However, if there is some adjustment on the call itself, e.g.
2868 	     a call_pop, that action should be considered to happen after
2869 	     the call returns.  */
2870 	  add_cfi_insn = insn;
2871 	  scan_insn_after (control);
2872 	}
2873       else
2874 	{
2875 	  /* Flush data before calls and jumps, and of course if necessary.  */
2876 	  if (can_throw_internal (insn))
2877 	    {
2878 	      notice_eh_throw (insn);
2879 	      dwarf2out_flush_queued_reg_saves ();
2880 	    }
2881 	  else if (!NONJUMP_INSN_P (insn)
2882 		   || clobbers_queued_reg_save (insn)
2883 		   || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2884 	    dwarf2out_flush_queued_reg_saves ();
2885 	  any_cfis_emitted = false;
2886 
2887 	  add_cfi_insn = insn;
2888 	  scan_insn_after (insn);
2889 	  control = insn;
2890 	}
2891 
2892       /* Between frame-related-p and args_size we might have otherwise
2893 	 emitted two cfa adjustments.  Do it now.  */
2894       def_cfa_1 (&this_cfa);
2895 
2896       /* Minimize the number of advances by emitting the entire queue
2897 	 once anything is emitted.  */
2898       if (any_cfis_emitted
2899 	  || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2900 	dwarf2out_flush_queued_reg_saves ();
2901 
2902       /* Note that a test for control_flow_insn_p does exactly the
2903 	 same tests as are done to actually create the edges.  So
2904 	 always call the routine and let it not create edges for
2905 	 non-control-flow insns.  */
2906       create_trace_edges (control);
2907     }
2908 
2909   gcc_assert (!cfun->fde || !cfun->fde->rule18);
2910   add_cfi_insn = NULL;
2911   cur_row = NULL;
2912   cur_trace = NULL;
2913   cur_cfa = NULL;
2914 }
2915 
2916 /* Scan the function and create the initial set of CFI notes.  */
2917 
2918 static void
create_cfi_notes(void)2919 create_cfi_notes (void)
2920 {
2921   dw_trace_info *ti;
2922 
2923   gcc_checking_assert (!queued_reg_saves.exists ());
2924   gcc_checking_assert (!trace_work_list.exists ());
2925 
2926   /* Always begin at the entry trace.  */
2927   ti = &trace_info[0];
2928   scan_trace (ti, true);
2929 
2930   while (!trace_work_list.is_empty ())
2931     {
2932       ti = trace_work_list.pop ();
2933       scan_trace (ti, false);
2934     }
2935 
2936   queued_reg_saves.release ();
2937   trace_work_list.release ();
2938 }
2939 
2940 /* Return the insn before the first NOTE_INSN_CFI after START.  */
2941 
2942 static rtx_insn *
before_next_cfi_note(rtx_insn * start)2943 before_next_cfi_note (rtx_insn *start)
2944 {
2945   rtx_insn *prev = start;
2946   while (start)
2947     {
2948       if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2949 	return prev;
2950       prev = start;
2951       start = NEXT_INSN (start);
2952     }
2953   gcc_unreachable ();
2954 }
2955 
2956 /* Insert CFI notes between traces to properly change state between them.  */
2957 
2958 static void
connect_traces(void)2959 connect_traces (void)
2960 {
2961   unsigned i, n;
2962   dw_trace_info *prev_ti, *ti;
2963 
2964   /* ??? Ideally, we should have both queued and processed every trace.
2965      However the current representation of constant pools on various targets
2966      is indistinguishable from unreachable code.  Assume for the moment that
2967      we can simply skip over such traces.  */
2968   /* ??? Consider creating a DATA_INSN rtx code to indicate that
2969      these are not "real" instructions, and should not be considered.
2970      This could be generically useful for tablejump data as well.  */
2971   /* Remove all unprocessed traces from the list.  */
2972   unsigned ix, ix2;
2973   VEC_ORDERED_REMOVE_IF_FROM_TO (trace_info, ix, ix2, ti, 1,
2974 				 trace_info.length (), ti->beg_row == NULL);
2975   FOR_EACH_VEC_ELT (trace_info, ix, ti)
2976     gcc_assert (ti->end_row != NULL);
2977 
2978   /* Work from the end back to the beginning.  This lets us easily insert
2979      remember/restore_state notes in the correct order wrt other notes.  */
2980   n = trace_info.length ();
2981   prev_ti = &trace_info[n - 1];
2982   for (i = n - 1; i > 0; --i)
2983     {
2984       dw_cfi_row *old_row;
2985 
2986       ti = prev_ti;
2987       prev_ti = &trace_info[i - 1];
2988 
2989       add_cfi_insn = ti->head;
2990 
2991       /* In dwarf2out_switch_text_section, we'll begin a new FDE
2992 	 for the portion of the function in the alternate text
2993 	 section.  The row state at the very beginning of that
2994 	 new FDE will be exactly the row state from the CIE.  */
2995       if (ti->switch_sections)
2996 	old_row = cie_cfi_row;
2997       else
2998 	{
2999 	  old_row = prev_ti->end_row;
3000 	  /* If there's no change from the previous end state, fine.  */
3001 	  if (cfi_row_equal_p (old_row, ti->beg_row))
3002 	    ;
3003 	  /* Otherwise check for the common case of sharing state with
3004 	     the beginning of an epilogue, but not the end.  Insert
3005 	     remember/restore opcodes in that case.  */
3006 	  else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
3007 	    {
3008 	      dw_cfi_ref cfi;
3009 
3010 	      /* Note that if we blindly insert the remember at the
3011 		 start of the trace, we can wind up increasing the
3012 		 size of the unwind info due to extra advance opcodes.
3013 		 Instead, put the remember immediately before the next
3014 		 state change.  We know there must be one, because the
3015 		 state at the beginning and head of the trace differ.  */
3016 	      add_cfi_insn = before_next_cfi_note (prev_ti->head);
3017 	      cfi = new_cfi ();
3018 	      cfi->dw_cfi_opc = DW_CFA_remember_state;
3019 	      add_cfi (cfi);
3020 
3021 	      add_cfi_insn = ti->head;
3022 	      cfi = new_cfi ();
3023 	      cfi->dw_cfi_opc = DW_CFA_restore_state;
3024 	      add_cfi (cfi);
3025 
3026 	      /* If the target unwinder does not save the CFA as part of the
3027 		 register state, we need to restore it separately.  */
3028 	      if (targetm.asm_out.should_restore_cfa_state ()
3029 		  && (cfi = def_cfa_0 (&old_row->cfa, &ti->beg_row->cfa)))
3030 		add_cfi (cfi);
3031 
3032 	      old_row = prev_ti->beg_row;
3033 	    }
3034 	  /* Otherwise, we'll simply change state from the previous end.  */
3035 	}
3036 
3037       change_cfi_row (old_row, ti->beg_row);
3038 
3039       if (dump_file && add_cfi_insn != ti->head)
3040 	{
3041 	  rtx_insn *note;
3042 
3043 	  fprintf (dump_file, "Fixup between trace %u and %u:\n",
3044 		   prev_ti->id, ti->id);
3045 
3046 	  note = ti->head;
3047 	  do
3048 	    {
3049 	      note = NEXT_INSN (note);
3050 	      gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
3051 	      output_cfi_directive (dump_file, NOTE_CFI (note));
3052 	    }
3053 	  while (note != add_cfi_insn);
3054 	}
3055     }
3056 
3057   /* Connect args_size between traces that have can_throw_internal insns.  */
3058   if (cfun->eh->lp_array)
3059     {
3060       poly_int64 prev_args_size = 0;
3061 
3062       for (i = 0; i < n; ++i)
3063 	{
3064 	  ti = &trace_info[i];
3065 
3066 	  if (ti->switch_sections)
3067 	    prev_args_size = 0;
3068 
3069 	  if (ti->eh_head == NULL)
3070 	    continue;
3071 
3072 	  /* We require either the incoming args_size values to match or the
3073 	     presence of an insn setting it before the first EH insn.  */
3074 	  gcc_assert (!ti->args_size_undefined || ti->args_size_defined_for_eh);
3075 
3076 	  /* In the latter case, we force the creation of a CFI note.  */
3077 	  if (ti->args_size_undefined
3078 	      || maybe_ne (ti->beg_delay_args_size, prev_args_size))
3079 	    {
3080 	      /* ??? Search back to previous CFI note.  */
3081 	      add_cfi_insn = PREV_INSN (ti->eh_head);
3082 	      add_cfi_args_size (ti->beg_delay_args_size);
3083 	    }
3084 
3085 	  prev_args_size = ti->end_delay_args_size;
3086 	}
3087     }
3088 }
3089 
3090 /* Set up the pseudo-cfg of instruction traces, as described at the
3091    block comment at the top of the file.  */
3092 
3093 static void
create_pseudo_cfg(void)3094 create_pseudo_cfg (void)
3095 {
3096   bool saw_barrier, switch_sections;
3097   dw_trace_info ti;
3098   rtx_insn *insn;
3099   unsigned i;
3100 
3101   /* The first trace begins at the start of the function,
3102      and begins with the CIE row state.  */
3103   trace_info.create (16);
3104   memset (&ti, 0, sizeof (ti));
3105   ti.head = get_insns ();
3106   ti.beg_row = cie_cfi_row;
3107   ti.cfa_store = cie_cfi_row->cfa;
3108   ti.cfa_temp.reg.set_by_dwreg (INVALID_REGNUM);
3109   trace_info.quick_push (ti);
3110 
3111   if (cie_return_save)
3112     ti.regs_saved_in_regs.safe_push (*cie_return_save);
3113 
3114   /* Walk all the insns, collecting start of trace locations.  */
3115   saw_barrier = false;
3116   switch_sections = false;
3117   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3118     {
3119       if (BARRIER_P (insn))
3120 	saw_barrier = true;
3121       else if (NOTE_P (insn)
3122 	       && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
3123 	{
3124 	  /* We should have just seen a barrier.  */
3125 	  gcc_assert (saw_barrier);
3126 	  switch_sections = true;
3127 	}
3128       /* Watch out for save_point notes between basic blocks.
3129 	 In particular, a note after a barrier.  Do not record these,
3130 	 delaying trace creation until the label.  */
3131       else if (save_point_p (insn)
3132 	       && (LABEL_P (insn) || !saw_barrier))
3133 	{
3134 	  memset (&ti, 0, sizeof (ti));
3135 	  ti.head = insn;
3136 	  ti.switch_sections = switch_sections;
3137 	  ti.id = trace_info.length ();
3138 	  trace_info.safe_push (ti);
3139 
3140 	  saw_barrier = false;
3141 	  switch_sections = false;
3142 	}
3143     }
3144 
3145   /* Create the trace index after we've finished building trace_info,
3146      avoiding stale pointer problems due to reallocation.  */
3147   trace_index
3148     = new hash_table<trace_info_hasher> (trace_info.length ());
3149   dw_trace_info *tp;
3150   FOR_EACH_VEC_ELT (trace_info, i, tp)
3151     {
3152       dw_trace_info **slot;
3153 
3154       if (dump_file)
3155 	fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
3156 		 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
3157 		 tp->switch_sections ? " (section switch)" : "");
3158 
3159       slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
3160       gcc_assert (*slot == NULL);
3161       *slot = tp;
3162     }
3163 }
3164 
3165 /* Record the initial position of the return address.  RTL is
3166    INCOMING_RETURN_ADDR_RTX.  */
3167 
3168 static void
initial_return_save(rtx rtl)3169 initial_return_save (rtx rtl)
3170 {
3171   struct cfa_reg reg;
3172   reg.set_by_dwreg (INVALID_REGNUM);
3173   poly_int64 offset = 0;
3174 
3175   switch (GET_CODE (rtl))
3176     {
3177     case REG:
3178       /* RA is in a register.  */
3179       reg = dwf_cfa_reg (rtl);
3180       break;
3181 
3182     case MEM:
3183       /* RA is on the stack.  */
3184       rtl = XEXP (rtl, 0);
3185       switch (GET_CODE (rtl))
3186 	{
3187 	case REG:
3188 	  gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
3189 	  offset = 0;
3190 	  break;
3191 
3192 	case PLUS:
3193 	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3194 	  offset = rtx_to_poly_int64 (XEXP (rtl, 1));
3195 	  break;
3196 
3197 	case MINUS:
3198 	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
3199 	  offset = -rtx_to_poly_int64 (XEXP (rtl, 1));
3200 	  break;
3201 
3202 	default:
3203 	  gcc_unreachable ();
3204 	}
3205 
3206       break;
3207 
3208     case PLUS:
3209       /* The return address is at some offset from any value we can
3210 	 actually load.  For instance, on the SPARC it is in %i7+8. Just
3211 	 ignore the offset for now; it doesn't matter for unwinding frames.  */
3212       gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
3213       initial_return_save (XEXP (rtl, 0));
3214       return;
3215 
3216     default:
3217       gcc_unreachable ();
3218     }
3219 
3220   if (reg.reg != DWARF_FRAME_RETURN_COLUMN)
3221     {
3222       if (reg.reg != INVALID_REGNUM)
3223         record_reg_saved_in_reg (rtl, pc_rtx);
3224       reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
3225     }
3226 }
3227 
3228 static void
create_cie_data(void)3229 create_cie_data (void)
3230 {
3231   dw_cfa_location loc;
3232   dw_trace_info cie_trace;
3233 
3234   dw_stack_pointer_regnum = dwf_cfa_reg (stack_pointer_rtx);
3235 
3236   memset (&cie_trace, 0, sizeof (cie_trace));
3237   cur_trace = &cie_trace;
3238 
3239   add_cfi_vec = &cie_cfi_vec;
3240   cie_cfi_row = cur_row = new_cfi_row ();
3241 
3242   /* On entry, the Canonical Frame Address is at SP.  */
3243   memset (&loc, 0, sizeof (loc));
3244   loc.reg = dw_stack_pointer_regnum;
3245   /* create_cie_data is called just once per TU, and when using .cfi_startproc
3246      is even done by the assembler rather than the compiler.  If the target
3247      has different incoming frame sp offsets depending on what kind of
3248      function it is, use a single constant offset for the target and
3249      if needed, adjust before the first instruction in insn stream.  */
3250   loc.offset = DEFAULT_INCOMING_FRAME_SP_OFFSET;
3251   def_cfa_1 (&loc);
3252 
3253   if (targetm.debug_unwind_info () == UI_DWARF2
3254       || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3255     {
3256       initial_return_save (INCOMING_RETURN_ADDR_RTX);
3257 
3258       /* For a few targets, we have the return address incoming into a
3259 	 register, but choose a different return column.  This will result
3260 	 in a DW_CFA_register for the return, and an entry in
3261 	 regs_saved_in_regs to match.  If the target later stores that
3262 	 return address register to the stack, we want to be able to emit
3263 	 the DW_CFA_offset against the return column, not the intermediate
3264 	 save register.  Save the contents of regs_saved_in_regs so that
3265 	 we can re-initialize it at the start of each function.  */
3266       switch (cie_trace.regs_saved_in_regs.length ())
3267 	{
3268 	case 0:
3269 	  break;
3270 	case 1:
3271 	  cie_return_save = ggc_alloc<reg_saved_in_data> ();
3272 	  *cie_return_save = cie_trace.regs_saved_in_regs[0];
3273 	  cie_trace.regs_saved_in_regs.release ();
3274 	  break;
3275 	default:
3276 	  gcc_unreachable ();
3277 	}
3278     }
3279 
3280   add_cfi_vec = NULL;
3281   cur_row = NULL;
3282   cur_trace = NULL;
3283 }
3284 
3285 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3286    state at each location within the function.  These notes will be
3287    emitted during pass_final.  */
3288 
3289 static unsigned int
execute_dwarf2_frame(void)3290 execute_dwarf2_frame (void)
3291 {
3292   /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file.  */
3293   dw_frame_pointer_regnum = dwf_cfa_reg (hard_frame_pointer_rtx);
3294 
3295   /* The first time we're called, compute the incoming frame state.  */
3296   if (cie_cfi_vec == NULL)
3297     create_cie_data ();
3298 
3299   dwarf2out_alloc_current_fde ();
3300 
3301   create_pseudo_cfg ();
3302 
3303   /* Do the work.  */
3304   create_cfi_notes ();
3305   connect_traces ();
3306   add_cfis_to_fde ();
3307 
3308   /* Free all the data we allocated.  */
3309   {
3310     size_t i;
3311     dw_trace_info *ti;
3312 
3313     FOR_EACH_VEC_ELT (trace_info, i, ti)
3314       ti->regs_saved_in_regs.release ();
3315   }
3316   trace_info.release ();
3317 
3318   delete trace_index;
3319   trace_index = NULL;
3320 
3321   return 0;
3322 }
3323 
3324 /* Convert a DWARF call frame info. operation to its string name */
3325 
3326 static const char *
dwarf_cfi_name(unsigned int cfi_opc)3327 dwarf_cfi_name (unsigned int cfi_opc)
3328 {
3329   const char *name = get_DW_CFA_name (cfi_opc);
3330 
3331   if (name != NULL)
3332     return name;
3333 
3334   return "DW_CFA_<unknown>";
3335 }
3336 
3337 /* This routine will generate the correct assembly data for a location
3338    description based on a cfi entry with a complex address.  */
3339 
3340 static void
output_cfa_loc(dw_cfi_ref cfi,int for_eh)3341 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3342 {
3343   dw_loc_descr_ref loc;
3344   unsigned long size;
3345 
3346   if (cfi->dw_cfi_opc == DW_CFA_expression
3347       || cfi->dw_cfi_opc == DW_CFA_val_expression)
3348     {
3349       unsigned r =
3350 	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3351       dw2_asm_output_data (1, r, NULL);
3352       loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3353     }
3354   else
3355     loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3356 
3357   /* Output the size of the block.  */
3358   size = size_of_locs (loc);
3359   dw2_asm_output_data_uleb128 (size, NULL);
3360 
3361   /* Now output the operations themselves.  */
3362   output_loc_sequence (loc, for_eh);
3363 }
3364 
3365 /* Similar, but used for .cfi_escape.  */
3366 
3367 static void
output_cfa_loc_raw(dw_cfi_ref cfi)3368 output_cfa_loc_raw (dw_cfi_ref cfi)
3369 {
3370   dw_loc_descr_ref loc;
3371   unsigned long size;
3372 
3373   if (cfi->dw_cfi_opc == DW_CFA_expression
3374       || cfi->dw_cfi_opc == DW_CFA_val_expression)
3375     {
3376       unsigned r =
3377 	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3378       fprintf (asm_out_file, "%#x,", r);
3379       loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3380     }
3381   else
3382     loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3383 
3384   /* Output the size of the block.  */
3385   size = size_of_locs (loc);
3386   dw2_asm_output_data_uleb128_raw (size);
3387   fputc (',', asm_out_file);
3388 
3389   /* Now output the operations themselves.  */
3390   output_loc_sequence_raw (loc);
3391 }
3392 
3393 /* Output a Call Frame Information opcode and its operand(s).  */
3394 
3395 void
output_cfi(dw_cfi_ref cfi,dw_fde_ref fde,int for_eh)3396 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3397 {
3398   unsigned long r;
3399   HOST_WIDE_INT off;
3400 
3401   if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3402     dw2_asm_output_data (1, (cfi->dw_cfi_opc
3403 			     | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3404 			 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3405 			 ((unsigned HOST_WIDE_INT)
3406 			  cfi->dw_cfi_oprnd1.dw_cfi_offset));
3407   else if (cfi->dw_cfi_opc == DW_CFA_offset)
3408     {
3409       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3410       dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3411 			   "DW_CFA_offset, column %#lx", r);
3412       off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3413       dw2_asm_output_data_uleb128 (off, NULL);
3414     }
3415   else if (cfi->dw_cfi_opc == DW_CFA_restore)
3416     {
3417       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3418       dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3419 			   "DW_CFA_restore, column %#lx", r);
3420     }
3421   else
3422     {
3423       dw2_asm_output_data (1, cfi->dw_cfi_opc,
3424 			   "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3425 
3426       switch (cfi->dw_cfi_opc)
3427 	{
3428 	case DW_CFA_set_loc:
3429 	  if (for_eh)
3430 	    dw2_asm_output_encoded_addr_rtx (
3431 		ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3432 		gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3433 		false, NULL);
3434 	  else
3435 	    dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3436 				 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3437 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3438 	  break;
3439 
3440 	case DW_CFA_advance_loc1:
3441 	  dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3442 				fde->dw_fde_current_label, NULL);
3443 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3444 	  break;
3445 
3446 	case DW_CFA_advance_loc2:
3447 	  dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3448 				fde->dw_fde_current_label, NULL);
3449 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3450 	  break;
3451 
3452 	case DW_CFA_advance_loc4:
3453 	  dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3454 				fde->dw_fde_current_label, NULL);
3455 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3456 	  break;
3457 
3458 	case DW_CFA_MIPS_advance_loc8:
3459 	  dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3460 				fde->dw_fde_current_label, NULL);
3461 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3462 	  break;
3463 
3464 	case DW_CFA_offset_extended:
3465 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3466 	  dw2_asm_output_data_uleb128 (r, NULL);
3467 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3468 	  dw2_asm_output_data_uleb128 (off, NULL);
3469 	  break;
3470 
3471 	case DW_CFA_def_cfa:
3472 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3473 	  dw2_asm_output_data_uleb128 (r, NULL);
3474 	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3475 	  break;
3476 
3477 	case DW_CFA_offset_extended_sf:
3478 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3479 	  dw2_asm_output_data_uleb128 (r, NULL);
3480 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3481 	  dw2_asm_output_data_sleb128 (off, NULL);
3482 	  break;
3483 
3484 	case DW_CFA_def_cfa_sf:
3485 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3486 	  dw2_asm_output_data_uleb128 (r, NULL);
3487 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3488 	  dw2_asm_output_data_sleb128 (off, NULL);
3489 	  break;
3490 
3491 	case DW_CFA_restore_extended:
3492 	case DW_CFA_undefined:
3493 	case DW_CFA_same_value:
3494 	case DW_CFA_def_cfa_register:
3495 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3496 	  dw2_asm_output_data_uleb128 (r, NULL);
3497 	  break;
3498 
3499 	case DW_CFA_register:
3500 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3501 	  dw2_asm_output_data_uleb128 (r, NULL);
3502 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3503 	  dw2_asm_output_data_uleb128 (r, NULL);
3504 	  break;
3505 
3506 	case DW_CFA_def_cfa_offset:
3507 	case DW_CFA_GNU_args_size:
3508 	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3509 	  break;
3510 
3511 	case DW_CFA_def_cfa_offset_sf:
3512 	  off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3513 	  dw2_asm_output_data_sleb128 (off, NULL);
3514 	  break;
3515 
3516 	case DW_CFA_GNU_window_save:
3517 	  break;
3518 
3519 	case DW_CFA_def_cfa_expression:
3520 	case DW_CFA_expression:
3521 	case DW_CFA_val_expression:
3522 	  output_cfa_loc (cfi, for_eh);
3523 	  break;
3524 
3525 	case DW_CFA_GNU_negative_offset_extended:
3526 	  /* Obsoleted by DW_CFA_offset_extended_sf.  */
3527 	  gcc_unreachable ();
3528 
3529 	default:
3530 	  break;
3531 	}
3532     }
3533 }
3534 
3535 /* Similar, but do it via assembler directives instead.  */
3536 
3537 void
output_cfi_directive(FILE * f,dw_cfi_ref cfi)3538 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3539 {
3540   unsigned long r, r2;
3541 
3542   switch (cfi->dw_cfi_opc)
3543     {
3544     case DW_CFA_advance_loc:
3545     case DW_CFA_advance_loc1:
3546     case DW_CFA_advance_loc2:
3547     case DW_CFA_advance_loc4:
3548     case DW_CFA_MIPS_advance_loc8:
3549     case DW_CFA_set_loc:
3550       /* Should only be created in a code path not followed when emitting
3551 	 via directives.  The assembler is going to take care of this for
3552 	 us.  But this routines is also used for debugging dumps, so
3553 	 print something.  */
3554       gcc_assert (f != asm_out_file);
3555       fprintf (f, "\t.cfi_advance_loc\n");
3556       break;
3557 
3558     case DW_CFA_offset:
3559     case DW_CFA_offset_extended:
3560     case DW_CFA_offset_extended_sf:
3561       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3562       fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3563 	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3564       break;
3565 
3566     case DW_CFA_restore:
3567     case DW_CFA_restore_extended:
3568       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3569       fprintf (f, "\t.cfi_restore %lu\n", r);
3570       break;
3571 
3572     case DW_CFA_undefined:
3573       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3574       fprintf (f, "\t.cfi_undefined %lu\n", r);
3575       break;
3576 
3577     case DW_CFA_same_value:
3578       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3579       fprintf (f, "\t.cfi_same_value %lu\n", r);
3580       break;
3581 
3582     case DW_CFA_def_cfa:
3583     case DW_CFA_def_cfa_sf:
3584       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3585       fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3586 	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3587       break;
3588 
3589     case DW_CFA_def_cfa_register:
3590       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3591       fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3592       break;
3593 
3594     case DW_CFA_register:
3595       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3596       r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3597       fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3598       break;
3599 
3600     case DW_CFA_def_cfa_offset:
3601     case DW_CFA_def_cfa_offset_sf:
3602       fprintf (f, "\t.cfi_def_cfa_offset "
3603 	       HOST_WIDE_INT_PRINT_DEC"\n",
3604 	       cfi->dw_cfi_oprnd1.dw_cfi_offset);
3605       break;
3606 
3607     case DW_CFA_remember_state:
3608       fprintf (f, "\t.cfi_remember_state\n");
3609       break;
3610     case DW_CFA_restore_state:
3611       fprintf (f, "\t.cfi_restore_state\n");
3612       break;
3613 
3614     case DW_CFA_GNU_args_size:
3615       if (f == asm_out_file)
3616 	{
3617 	  fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3618 	  dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3619 	  if (flag_debug_asm)
3620 	    fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3621 		     ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3622 	  fputc ('\n', f);
3623 	}
3624       else
3625 	{
3626 	  fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3627 		   cfi->dw_cfi_oprnd1.dw_cfi_offset);
3628 	}
3629       break;
3630 
3631     case DW_CFA_GNU_window_save:
3632       fprintf (f, "\t.cfi_window_save\n");
3633       break;
3634 
3635     case DW_CFA_def_cfa_expression:
3636     case DW_CFA_expression:
3637     case DW_CFA_val_expression:
3638       if (f != asm_out_file)
3639 	{
3640 	  fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3641 		   cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3642 		   cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3643 	  break;
3644 	}
3645       fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3646       output_cfa_loc_raw (cfi);
3647       fputc ('\n', f);
3648       break;
3649 
3650     default:
3651       gcc_unreachable ();
3652     }
3653 }
3654 
3655 void
dwarf2out_emit_cfi(dw_cfi_ref cfi)3656 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3657 {
3658   if (dwarf2out_do_cfi_asm ())
3659     output_cfi_directive (asm_out_file, cfi);
3660 }
3661 
3662 static void
dump_cfi_row(FILE * f,dw_cfi_row * row)3663 dump_cfi_row (FILE *f, dw_cfi_row *row)
3664 {
3665   dw_cfi_ref cfi;
3666   unsigned i;
3667 
3668   cfi = row->cfa_cfi;
3669   if (!cfi)
3670     {
3671       dw_cfa_location dummy;
3672       memset (&dummy, 0, sizeof (dummy));
3673       dummy.reg.set_by_dwreg (INVALID_REGNUM);
3674       cfi = def_cfa_0 (&dummy, &row->cfa);
3675     }
3676   output_cfi_directive (f, cfi);
3677 
3678   FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3679     if (cfi)
3680       output_cfi_directive (f, cfi);
3681 }
3682 
3683 void debug_cfi_row (dw_cfi_row *row);
3684 
3685 void
debug_cfi_row(dw_cfi_row * row)3686 debug_cfi_row (dw_cfi_row *row)
3687 {
3688   dump_cfi_row (stderr, row);
3689 }
3690 
3691 
3692 /* Save the result of dwarf2out_do_frame across PCH.
3693    This variable is tri-state, with 0 unset, >0 true, <0 false.  */
3694 static GTY(()) signed char saved_do_cfi_asm = 0;
3695 
3696 /* Decide whether to emit EH frame unwind information for the current
3697    translation unit.  */
3698 
3699 bool
dwarf2out_do_eh_frame(void)3700 dwarf2out_do_eh_frame (void)
3701 {
3702   return
3703     (flag_unwind_tables || flag_exceptions)
3704     && targetm_common.except_unwind_info (&global_options) == UI_DWARF2;
3705 }
3706 
3707 /* Decide whether we want to emit frame unwind information for the current
3708    translation unit.  */
3709 
3710 bool
dwarf2out_do_frame(void)3711 dwarf2out_do_frame (void)
3712 {
3713   /* We want to emit correct CFA location expressions or lists, so we
3714      have to return true if we're going to generate debug info, even if
3715      we're not going to output frame or unwind info.  */
3716   if (dwarf_debuginfo_p () || dwarf_based_debuginfo_p ())
3717     return true;
3718 
3719   if (saved_do_cfi_asm > 0)
3720     return true;
3721 
3722   if (targetm.debug_unwind_info () == UI_DWARF2)
3723     return true;
3724 
3725   if (dwarf2out_do_eh_frame ())
3726     return true;
3727 
3728   return false;
3729 }
3730 
3731 /* Decide whether to emit frame unwind via assembler directives.  */
3732 
3733 bool
dwarf2out_do_cfi_asm(void)3734 dwarf2out_do_cfi_asm (void)
3735 {
3736   int enc;
3737 
3738   if (saved_do_cfi_asm != 0)
3739     return saved_do_cfi_asm > 0;
3740 
3741   /* Assume failure for a moment.  */
3742   saved_do_cfi_asm = -1;
3743 
3744   if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3745     return false;
3746   if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3747     return false;
3748 
3749   /* Make sure the personality encoding is one the assembler can support.
3750      In particular, aligned addresses can't be handled.  */
3751   enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3752   if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3753     return false;
3754   enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3755   if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3756     return false;
3757 
3758   /* If we can't get the assembler to emit only .debug_frame, and we don't need
3759      dwarf2 unwind info for exceptions, then emit .debug_frame by hand.  */
3760   if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE && !dwarf2out_do_eh_frame ())
3761     return false;
3762 
3763   /* Success!  */
3764   saved_do_cfi_asm = 1;
3765   return true;
3766 }
3767 
3768 namespace {
3769 
3770 const pass_data pass_data_dwarf2_frame =
3771 {
3772   RTL_PASS, /* type */
3773   "dwarf2", /* name */
3774   OPTGROUP_NONE, /* optinfo_flags */
3775   TV_FINAL, /* tv_id */
3776   0, /* properties_required */
3777   0, /* properties_provided */
3778   0, /* properties_destroyed */
3779   0, /* todo_flags_start */
3780   0, /* todo_flags_finish */
3781 };
3782 
3783 class pass_dwarf2_frame : public rtl_opt_pass
3784 {
3785 public:
pass_dwarf2_frame(gcc::context * ctxt)3786   pass_dwarf2_frame (gcc::context *ctxt)
3787     : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3788   {}
3789 
3790   /* opt_pass methods: */
3791   virtual bool gate (function *);
execute(function *)3792   virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3793 
3794 }; // class pass_dwarf2_frame
3795 
3796 bool
gate(function *)3797 pass_dwarf2_frame::gate (function *)
3798 {
3799   /* Targets which still implement the prologue in assembler text
3800      cannot use the generic dwarf2 unwinding.  */
3801   if (!targetm.have_prologue ())
3802     return false;
3803 
3804   /* ??? What to do for UI_TARGET unwinding?  They might be able to benefit
3805      from the optimized shrink-wrapping annotations that we will compute.
3806      For now, only produce the CFI notes for dwarf2.  */
3807   return dwarf2out_do_frame ();
3808 }
3809 
3810 } // anon namespace
3811 
3812 rtl_opt_pass *
make_pass_dwarf2_frame(gcc::context * ctxt)3813 make_pass_dwarf2_frame (gcc::context *ctxt)
3814 {
3815   return new pass_dwarf2_frame (ctxt);
3816 }
3817 
3818 #include "gt-dwarf2cfi.h"
3819