1 /* Branch trace support for GDB, the GNU debugger.
2 
3    Copyright (C) 2013-2021 Free Software Foundation, Inc.
4 
5    Contributed by Intel Corp. <markus.t.metzger@intel.com>
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
47 
48 static const target_info record_btrace_target_info = {
49   "record-btrace",
50   N_("Branch tracing target"),
51   N_("Collect control-flow trace and provide the execution history.")
52 };
53 
54 /* The target_ops of record-btrace.  */
55 
56 class record_btrace_target final : public target_ops
57 {
58 public:
info()59   const target_info &info () const override
60   { return record_btrace_target_info; }
61 
stratum()62   strata stratum () const override { return record_stratum; }
63 
64   void close () override;
65   void async (int) override;
66 
detach(inferior * inf,int from_tty)67   void detach (inferior *inf, int from_tty) override
68   { record_detach (this, inf, from_tty); }
69 
70   void disconnect (const char *, int) override;
71 
mourn_inferior()72   void mourn_inferior () override
73   { record_mourn_inferior (this); }
74 
kill()75   void kill () override
76   { record_kill (this); }
77 
78   enum record_method record_method (ptid_t ptid) override;
79 
80   void stop_recording () override;
81   void info_record () override;
82 
83   void insn_history (int size, gdb_disassembly_flags flags) override;
84   void insn_history_from (ULONGEST from, int size,
85 			  gdb_disassembly_flags flags) override;
86   void insn_history_range (ULONGEST begin, ULONGEST end,
87 			   gdb_disassembly_flags flags) override;
88   void call_history (int size, record_print_flags flags) override;
89   void call_history_from (ULONGEST begin, int size, record_print_flags flags)
90     override;
91   void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92     override;
93 
94   bool record_is_replaying (ptid_t ptid) override;
95   bool record_will_replay (ptid_t ptid, int dir) override;
96   void record_stop_replaying () override;
97 
98   enum target_xfer_status xfer_partial (enum target_object object,
99 					const char *annex,
100 					gdb_byte *readbuf,
101 					const gdb_byte *writebuf,
102 					ULONGEST offset, ULONGEST len,
103 					ULONGEST *xfered_len) override;
104 
105   int insert_breakpoint (struct gdbarch *,
106 			 struct bp_target_info *) override;
107   int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
108 			 enum remove_bp_reason) override;
109 
110   void fetch_registers (struct regcache *, int) override;
111 
112   void store_registers (struct regcache *, int) override;
113   void prepare_to_store (struct regcache *) override;
114 
115   const struct frame_unwind *get_unwinder () override;
116 
117   const struct frame_unwind *get_tailcall_unwinder () override;
118 
119   void resume (ptid_t, int, enum gdb_signal) override;
120   ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
121 
122   void stop (ptid_t) override;
123   void update_thread_list () override;
124   bool thread_alive (ptid_t ptid) override;
125   void goto_record_begin () override;
126   void goto_record_end () override;
127   void goto_record (ULONGEST insn) override;
128 
129   bool can_execute_reverse () override;
130 
131   bool stopped_by_sw_breakpoint () override;
132   bool supports_stopped_by_sw_breakpoint () override;
133 
134   bool stopped_by_hw_breakpoint () override;
135   bool supports_stopped_by_hw_breakpoint () override;
136 
137   enum exec_direction_kind execution_direction () override;
138   void prepare_to_generate_core () override;
139   void done_generating_core () override;
140 };
141 
142 static record_btrace_target record_btrace_ops;
143 
144 /* Initialize the record-btrace target ops.  */
145 
146 /* Token associated with a new-thread observer enabling branch tracing
147    for the new thread.  */
148 static const gdb::observers::token record_btrace_thread_observer_token {};
149 
150 /* Memory access types used in set/show record btrace replay-memory-access.  */
151 static const char replay_memory_access_read_only[] = "read-only";
152 static const char replay_memory_access_read_write[] = "read-write";
153 static const char *const replay_memory_access_types[] =
154 {
155   replay_memory_access_read_only,
156   replay_memory_access_read_write,
157   NULL
158 };
159 
160 /* The currently allowed replay memory access type.  */
161 static const char *replay_memory_access = replay_memory_access_read_only;
162 
163 /* The cpu state kinds.  */
164 enum record_btrace_cpu_state_kind
165 {
166   CS_AUTO,
167   CS_NONE,
168   CS_CPU
169 };
170 
171 /* The current cpu state.  */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173 
174 /* The current cpu for trace decode.  */
175 static struct btrace_cpu record_btrace_cpu;
176 
177 /* Command lists for "set/show record btrace".  */
178 static struct cmd_list_element *set_record_btrace_cmdlist;
179 static struct cmd_list_element *show_record_btrace_cmdlist;
180 
181 /* The execution direction of the last resume we got.  See record-full.c.  */
182 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183 
184 /* The async event handler for reverse/replay execution.  */
185 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186 
187 /* A flag indicating that we are currently generating a core file.  */
188 static int record_btrace_generating_corefile;
189 
190 /* The current branch trace configuration.  */
191 static struct btrace_config record_btrace_conf;
192 
193 /* Command list for "record btrace".  */
194 static struct cmd_list_element *record_btrace_cmdlist;
195 
196 /* Command lists for "set/show record btrace bts".  */
197 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199 
200 /* Command lists for "set/show record btrace pt".  */
201 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203 
204 /* Command list for "set record btrace cpu".  */
205 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206 
207 /* Print a record-btrace debug message.  Use do ... while (0) to avoid
208    ambiguities when used in if statements.  */
209 
210 #define DEBUG(msg, args...)						\
211   do									\
212     {									\
213       if (record_debug != 0)						\
214 	fprintf_unfiltered (gdb_stdlog,					\
215 			    "[record-btrace] " msg "\n", ##args);	\
216     }									\
217   while (0)
218 
219 
220 /* Return the cpu configured by the user.  Returns NULL if the cpu was
221    configured as auto.  */
222 const struct btrace_cpu *
record_btrace_get_cpu(void)223 record_btrace_get_cpu (void)
224 {
225   switch (record_btrace_cpu_state)
226     {
227     case CS_AUTO:
228       return nullptr;
229 
230     case CS_NONE:
231       record_btrace_cpu.vendor = CV_UNKNOWN;
232       /* Fall through.  */
233     case CS_CPU:
234       return &record_btrace_cpu;
235     }
236 
237   error (_("Internal error: bad record btrace cpu state."));
238 }
239 
240 /* Update the branch trace for the current thread and return a pointer to its
241    thread_info.
242 
243    Throws an error if there is no thread or no trace.  This function never
244    returns NULL.  */
245 
246 static struct thread_info *
require_btrace_thread(void)247 require_btrace_thread (void)
248 {
249   DEBUG ("require");
250 
251   if (inferior_ptid == null_ptid)
252     error (_("No thread."));
253 
254   thread_info *tp = inferior_thread ();
255 
256   validate_registers_access ();
257 
258   btrace_fetch (tp, record_btrace_get_cpu ());
259 
260   if (btrace_is_empty (tp))
261     error (_("No trace."));
262 
263   return tp;
264 }
265 
266 /* Update the branch trace for the current thread and return a pointer to its
267    branch trace information struct.
268 
269    Throws an error if there is no thread or no trace.  This function never
270    returns NULL.  */
271 
272 static struct btrace_thread_info *
require_btrace(void)273 require_btrace (void)
274 {
275   struct thread_info *tp;
276 
277   tp = require_btrace_thread ();
278 
279   return &tp->btrace;
280 }
281 
282 /* Enable branch tracing for one thread.  Warn on errors.  */
283 
284 static void
record_btrace_enable_warn(struct thread_info * tp)285 record_btrace_enable_warn (struct thread_info *tp)
286 {
287   /* Ignore this thread if its inferior is not recorded by us.  */
288   target_ops *rec = tp->inf->target_at (record_stratum);
289   if (rec != &record_btrace_ops)
290     return;
291 
292   try
293     {
294       btrace_enable (tp, &record_btrace_conf);
295     }
296   catch (const gdb_exception_error &error)
297     {
298       warning ("%s", error.what ());
299     }
300 }
301 
302 /* Enable automatic tracing of new threads.  */
303 
304 static void
record_btrace_auto_enable(void)305 record_btrace_auto_enable (void)
306 {
307   DEBUG ("attach thread observer");
308 
309   gdb::observers::new_thread.attach (record_btrace_enable_warn,
310 				     record_btrace_thread_observer_token,
311 				     "record-btrace");
312 }
313 
314 /* Disable automatic tracing of new threads.  */
315 
316 static void
record_btrace_auto_disable(void)317 record_btrace_auto_disable (void)
318 {
319   DEBUG ("detach thread observer");
320 
321   gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
322 }
323 
324 /* The record-btrace async event handler function.  */
325 
326 static void
record_btrace_handle_async_inferior_event(gdb_client_data data)327 record_btrace_handle_async_inferior_event (gdb_client_data data)
328 {
329   inferior_event_handler (INF_REG_EVENT);
330 }
331 
332 /* See record-btrace.h.  */
333 
334 void
record_btrace_push_target(void)335 record_btrace_push_target (void)
336 {
337   const char *format;
338 
339   record_btrace_auto_enable ();
340 
341   current_inferior ()->push_target (&record_btrace_ops);
342 
343   record_btrace_async_inferior_event_handler
344     = create_async_event_handler (record_btrace_handle_async_inferior_event,
345 				  NULL, "record-btrace");
346   record_btrace_generating_corefile = 0;
347 
348   format = btrace_format_short_string (record_btrace_conf.format);
349   gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
350 }
351 
352 /* Disable btrace on a set of threads on scope exit.  */
353 
354 struct scoped_btrace_disable
355 {
356   scoped_btrace_disable () = default;
357 
358   DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
359 
~scoped_btrace_disablescoped_btrace_disable360   ~scoped_btrace_disable ()
361   {
362     for (thread_info *tp : m_threads)
363       btrace_disable (tp);
364   }
365 
add_threadscoped_btrace_disable366   void add_thread (thread_info *thread)
367   {
368     m_threads.push_front (thread);
369   }
370 
discardscoped_btrace_disable371   void discard ()
372   {
373     m_threads.clear ();
374   }
375 
376 private:
377   std::forward_list<thread_info *> m_threads;
378 };
379 
380 /* Open target record-btrace.  */
381 
382 static void
record_btrace_target_open(const char * args,int from_tty)383 record_btrace_target_open (const char *args, int from_tty)
384 {
385   /* If we fail to enable btrace for one thread, disable it for the threads for
386      which it was successfully enabled.  */
387   scoped_btrace_disable btrace_disable;
388 
389   DEBUG ("open");
390 
391   record_preopen ();
392 
393   if (!target_has_execution ())
394     error (_("The program is not being run."));
395 
396   for (thread_info *tp : current_inferior ()->non_exited_threads ())
397     if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
398       {
399 	btrace_enable (tp, &record_btrace_conf);
400 
401 	btrace_disable.add_thread (tp);
402       }
403 
404   record_btrace_push_target ();
405 
406   btrace_disable.discard ();
407 }
408 
409 /* The stop_recording method of target record-btrace.  */
410 
411 void
stop_recording()412 record_btrace_target::stop_recording ()
413 {
414   DEBUG ("stop recording");
415 
416   record_btrace_auto_disable ();
417 
418   for (thread_info *tp : current_inferior ()->non_exited_threads ())
419     if (tp->btrace.target != NULL)
420       btrace_disable (tp);
421 }
422 
423 /* The disconnect method of target record-btrace.  */
424 
425 void
disconnect(const char * args,int from_tty)426 record_btrace_target::disconnect (const char *args,
427 				  int from_tty)
428 {
429   struct target_ops *beneath = this->beneath ();
430 
431   /* Do not stop recording, just clean up GDB side.  */
432   current_inferior ()->unpush_target (this);
433 
434   /* Forward disconnect.  */
435   beneath->disconnect (args, from_tty);
436 }
437 
438 /* The close method of target record-btrace.  */
439 
440 void
close()441 record_btrace_target::close ()
442 {
443   if (record_btrace_async_inferior_event_handler != NULL)
444     delete_async_event_handler (&record_btrace_async_inferior_event_handler);
445 
446   /* Make sure automatic recording gets disabled even if we did not stop
447      recording before closing the record-btrace target.  */
448   record_btrace_auto_disable ();
449 
450   /* We should have already stopped recording.
451      Tear down btrace in case we have not.  */
452   for (thread_info *tp : current_inferior ()->non_exited_threads ())
453     btrace_teardown (tp);
454 }
455 
456 /* The async method of target record-btrace.  */
457 
458 void
async(int enable)459 record_btrace_target::async (int enable)
460 {
461   if (enable)
462     mark_async_event_handler (record_btrace_async_inferior_event_handler);
463   else
464     clear_async_event_handler (record_btrace_async_inferior_event_handler);
465 
466   this->beneath ()->async (enable);
467 }
468 
469 /* Adjusts the size and returns a human readable size suffix.  */
470 
471 static const char *
record_btrace_adjust_size(unsigned int * size)472 record_btrace_adjust_size (unsigned int *size)
473 {
474   unsigned int sz;
475 
476   sz = *size;
477 
478   if ((sz & ((1u << 30) - 1)) == 0)
479     {
480       *size = sz >> 30;
481       return "GB";
482     }
483   else if ((sz & ((1u << 20) - 1)) == 0)
484     {
485       *size = sz >> 20;
486       return "MB";
487     }
488   else if ((sz & ((1u << 10) - 1)) == 0)
489     {
490       *size = sz >> 10;
491       return "kB";
492     }
493   else
494     return "";
495 }
496 
497 /* Print a BTS configuration.  */
498 
499 static void
record_btrace_print_bts_conf(const struct btrace_config_bts * conf)500 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501 {
502   const char *suffix;
503   unsigned int size;
504 
505   size = conf->size;
506   if (size > 0)
507     {
508       suffix = record_btrace_adjust_size (&size);
509       printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
510     }
511 }
512 
513 /* Print an Intel Processor Trace configuration.  */
514 
515 static void
record_btrace_print_pt_conf(const struct btrace_config_pt * conf)516 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517 {
518   const char *suffix;
519   unsigned int size;
520 
521   size = conf->size;
522   if (size > 0)
523     {
524       suffix = record_btrace_adjust_size (&size);
525       printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
526     }
527 }
528 
529 /* Print a branch tracing configuration.  */
530 
531 static void
record_btrace_print_conf(const struct btrace_config * conf)532 record_btrace_print_conf (const struct btrace_config *conf)
533 {
534   printf_unfiltered (_("Recording format: %s.\n"),
535 		     btrace_format_string (conf->format));
536 
537   switch (conf->format)
538     {
539     case BTRACE_FORMAT_NONE:
540       return;
541 
542     case BTRACE_FORMAT_BTS:
543       record_btrace_print_bts_conf (&conf->bts);
544       return;
545 
546     case BTRACE_FORMAT_PT:
547       record_btrace_print_pt_conf (&conf->pt);
548       return;
549     }
550 
551   internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
552 }
553 
554 /* The info_record method of target record-btrace.  */
555 
556 void
info_record()557 record_btrace_target::info_record ()
558 {
559   struct btrace_thread_info *btinfo;
560   const struct btrace_config *conf;
561   struct thread_info *tp;
562   unsigned int insns, calls, gaps;
563 
564   DEBUG ("info");
565 
566   if (inferior_ptid == null_ptid)
567     error (_("No thread."));
568 
569   tp = inferior_thread ();
570 
571   validate_registers_access ();
572 
573   btinfo = &tp->btrace;
574 
575   conf = ::btrace_conf (btinfo);
576   if (conf != NULL)
577     record_btrace_print_conf (conf);
578 
579   btrace_fetch (tp, record_btrace_get_cpu ());
580 
581   insns = 0;
582   calls = 0;
583   gaps = 0;
584 
585   if (!btrace_is_empty (tp))
586     {
587       struct btrace_call_iterator call;
588       struct btrace_insn_iterator insn;
589 
590       btrace_call_end (&call, btinfo);
591       btrace_call_prev (&call, 1);
592       calls = btrace_call_number (&call);
593 
594       btrace_insn_end (&insn, btinfo);
595       insns = btrace_insn_number (&insn);
596 
597       /* If the last instruction is not a gap, it is the current instruction
598 	 that is not actually part of the record.  */
599       if (btrace_insn_get (&insn) != NULL)
600 	insns -= 1;
601 
602       gaps = btinfo->ngaps;
603     }
604 
605   printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
606 		       "for thread %s (%s).\n"), insns, calls, gaps,
607 		     print_thread_id (tp),
608 		     target_pid_to_str (tp->ptid).c_str ());
609 
610   if (btrace_is_replaying (tp))
611     printf_unfiltered (_("Replay in progress.  At instruction %u.\n"),
612 		       btrace_insn_number (btinfo->replay));
613 }
614 
615 /* Print a decode error.  */
616 
617 static void
btrace_ui_out_decode_error(struct ui_out * uiout,int errcode,enum btrace_format format)618 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
619 			    enum btrace_format format)
620 {
621   const char *errstr = btrace_decode_error (format, errcode);
622 
623   uiout->text (_("["));
624   /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT.  */
625   if (!(format == BTRACE_FORMAT_PT && errcode > 0))
626     {
627       uiout->text (_("decode error ("));
628       uiout->field_signed ("errcode", errcode);
629       uiout->text (_("): "));
630     }
631   uiout->text (errstr);
632   uiout->text (_("]\n"));
633 }
634 
635 /* A range of source lines.  */
636 
637 struct btrace_line_range
638 {
639   /* The symtab this line is from.  */
640   struct symtab *symtab;
641 
642   /* The first line (inclusive).  */
643   int begin;
644 
645   /* The last line (exclusive).  */
646   int end;
647 };
648 
649 /* Construct a line range.  */
650 
651 static struct btrace_line_range
btrace_mk_line_range(struct symtab * symtab,int begin,int end)652 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653 {
654   struct btrace_line_range range;
655 
656   range.symtab = symtab;
657   range.begin = begin;
658   range.end = end;
659 
660   return range;
661 }
662 
663 /* Add a line to a line range.  */
664 
665 static struct btrace_line_range
btrace_line_range_add(struct btrace_line_range range,int line)666 btrace_line_range_add (struct btrace_line_range range, int line)
667 {
668   if (range.end <= range.begin)
669     {
670       /* This is the first entry.  */
671       range.begin = line;
672       range.end = line + 1;
673     }
674   else if (line < range.begin)
675     range.begin = line;
676   else if (range.end < line)
677     range.end = line;
678 
679   return range;
680 }
681 
682 /* Return non-zero if RANGE is empty, zero otherwise.  */
683 
684 static int
btrace_line_range_is_empty(struct btrace_line_range range)685 btrace_line_range_is_empty (struct btrace_line_range range)
686 {
687   return range.end <= range.begin;
688 }
689 
690 /* Return non-zero if LHS contains RHS, zero otherwise.  */
691 
692 static int
btrace_line_range_contains_range(struct btrace_line_range lhs,struct btrace_line_range rhs)693 btrace_line_range_contains_range (struct btrace_line_range lhs,
694 				  struct btrace_line_range rhs)
695 {
696   return ((lhs.symtab == rhs.symtab)
697 	  && (lhs.begin <= rhs.begin)
698 	  && (rhs.end <= lhs.end));
699 }
700 
701 /* Find the line range associated with PC.  */
702 
703 static struct btrace_line_range
btrace_find_line_range(CORE_ADDR pc)704 btrace_find_line_range (CORE_ADDR pc)
705 {
706   struct btrace_line_range range;
707   struct linetable_entry *lines;
708   struct linetable *ltable;
709   struct symtab *symtab;
710   int nlines, i;
711 
712   symtab = find_pc_line_symtab (pc);
713   if (symtab == NULL)
714     return btrace_mk_line_range (NULL, 0, 0);
715 
716   ltable = SYMTAB_LINETABLE (symtab);
717   if (ltable == NULL)
718     return btrace_mk_line_range (symtab, 0, 0);
719 
720   nlines = ltable->nitems;
721   lines = ltable->item;
722   if (nlines <= 0)
723     return btrace_mk_line_range (symtab, 0, 0);
724 
725   range = btrace_mk_line_range (symtab, 0, 0);
726   for (i = 0; i < nlines - 1; i++)
727     {
728       /* The test of is_stmt here was added when the is_stmt field was
729 	 introduced to the 'struct linetable_entry' structure.  This
730 	 ensured that this loop maintained the same behaviour as before we
731 	 introduced is_stmt.  That said, it might be that we would be
732 	 better off not checking is_stmt here, this would lead to us
733 	 possibly adding more line numbers to the range.  At the time this
734 	 change was made I was unsure how to test this so chose to go with
735 	 maintaining the existing experience.  */
736       if ((lines[i].pc == pc) && (lines[i].line != 0)
737 	  && (lines[i].is_stmt == 1))
738 	range = btrace_line_range_add (range, lines[i].line);
739     }
740 
741   return range;
742 }
743 
744 /* Print source lines in LINES to UIOUT.
745 
746    UI_ITEM_CHAIN is a cleanup chain for the last source line and the
747    instructions corresponding to that source line.  When printing a new source
748    line, we do the cleanups for the open chain and open a new cleanup chain for
749    the new source line.  If the source line range in LINES is not empty, this
750    function will leave the cleanup chain for the last printed source line open
751    so instructions can be added to it.  */
752 
753 static void
btrace_print_lines(struct btrace_line_range lines,struct ui_out * uiout,gdb::optional<ui_out_emit_tuple> * src_and_asm_tuple,gdb::optional<ui_out_emit_list> * asm_list,gdb_disassembly_flags flags)754 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
755 		    gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
756 		    gdb::optional<ui_out_emit_list> *asm_list,
757 		    gdb_disassembly_flags flags)
758 {
759   print_source_lines_flags psl_flags;
760 
761   if (flags & DISASSEMBLY_FILENAME)
762     psl_flags |= PRINT_SOURCE_LINES_FILENAME;
763 
764   for (int line = lines.begin; line < lines.end; ++line)
765     {
766       asm_list->reset ();
767 
768       src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
769 
770       print_source_lines (lines.symtab, line, line + 1, psl_flags);
771 
772       asm_list->emplace (uiout, "line_asm_insn");
773     }
774 }
775 
776 /* Disassemble a section of the recorded instruction trace.  */
777 
778 static void
btrace_insn_history(struct ui_out * uiout,const struct btrace_thread_info * btinfo,const struct btrace_insn_iterator * begin,const struct btrace_insn_iterator * end,gdb_disassembly_flags flags)779 btrace_insn_history (struct ui_out *uiout,
780 		     const struct btrace_thread_info *btinfo,
781 		     const struct btrace_insn_iterator *begin,
782 		     const struct btrace_insn_iterator *end,
783 		     gdb_disassembly_flags flags)
784 {
785   DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
786 	 btrace_insn_number (begin), btrace_insn_number (end));
787 
788   flags |= DISASSEMBLY_SPECULATIVE;
789 
790   struct gdbarch *gdbarch = target_gdbarch ();
791   btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
792 
793   ui_out_emit_list list_emitter (uiout, "asm_insns");
794 
795   gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
796   gdb::optional<ui_out_emit_list> asm_list;
797 
798   gdb_pretty_print_disassembler disasm (gdbarch, uiout);
799 
800   for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
801 	 btrace_insn_next (&it, 1))
802     {
803       const struct btrace_insn *insn;
804 
805       insn = btrace_insn_get (&it);
806 
807       /* A NULL instruction indicates a gap in the trace.  */
808       if (insn == NULL)
809 	{
810 	  const struct btrace_config *conf;
811 
812 	  conf = btrace_conf (btinfo);
813 
814 	  /* We have trace so we must have a configuration.  */
815 	  gdb_assert (conf != NULL);
816 
817 	  uiout->field_fmt ("insn-number", "%u",
818 			    btrace_insn_number (&it));
819 	  uiout->text ("\t");
820 
821 	  btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
822 				      conf->format);
823 	}
824       else
825 	{
826 	  struct disasm_insn dinsn;
827 
828 	  if ((flags & DISASSEMBLY_SOURCE) != 0)
829 	    {
830 	      struct btrace_line_range lines;
831 
832 	      lines = btrace_find_line_range (insn->pc);
833 	      if (!btrace_line_range_is_empty (lines)
834 		  && !btrace_line_range_contains_range (last_lines, lines))
835 		{
836 		  btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
837 				      flags);
838 		  last_lines = lines;
839 		}
840 	      else if (!src_and_asm_tuple.has_value ())
841 		{
842 		  gdb_assert (!asm_list.has_value ());
843 
844 		  src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
845 
846 		  /* No source information.  */
847 		  asm_list.emplace (uiout, "line_asm_insn");
848 		}
849 
850 	      gdb_assert (src_and_asm_tuple.has_value ());
851 	      gdb_assert (asm_list.has_value ());
852 	    }
853 
854 	  memset (&dinsn, 0, sizeof (dinsn));
855 	  dinsn.number = btrace_insn_number (&it);
856 	  dinsn.addr = insn->pc;
857 
858 	  if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
859 	    dinsn.is_speculative = 1;
860 
861 	  disasm.pretty_print_insn (&dinsn, flags);
862 	}
863     }
864 }
865 
866 /* The insn_history method of target record-btrace.  */
867 
868 void
insn_history(int size,gdb_disassembly_flags flags)869 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
870 {
871   struct btrace_thread_info *btinfo;
872   struct btrace_insn_history *history;
873   struct btrace_insn_iterator begin, end;
874   struct ui_out *uiout;
875   unsigned int context, covered;
876 
877   uiout = current_uiout;
878   ui_out_emit_tuple tuple_emitter (uiout, "insn history");
879   context = abs (size);
880   if (context == 0)
881     error (_("Bad record instruction-history-size."));
882 
883   btinfo = require_btrace ();
884   history = btinfo->insn_history;
885   if (history == NULL)
886     {
887       struct btrace_insn_iterator *replay;
888 
889       DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
890 
891       /* If we're replaying, we start at the replay position.  Otherwise, we
892 	 start at the tail of the trace.  */
893       replay = btinfo->replay;
894       if (replay != NULL)
895 	begin = *replay;
896       else
897 	btrace_insn_end (&begin, btinfo);
898 
899       /* We start from here and expand in the requested direction.  Then we
900 	 expand in the other direction, as well, to fill up any remaining
901 	 context.  */
902       end = begin;
903       if (size < 0)
904 	{
905 	  /* We want the current position covered, as well.  */
906 	  covered = btrace_insn_next (&end, 1);
907 	  covered += btrace_insn_prev (&begin, context - covered);
908 	  covered += btrace_insn_next (&end, context - covered);
909 	}
910       else
911 	{
912 	  covered = btrace_insn_next (&end, context);
913 	  covered += btrace_insn_prev (&begin, context - covered);
914 	}
915     }
916   else
917     {
918       begin = history->begin;
919       end = history->end;
920 
921       DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
922 	     btrace_insn_number (&begin), btrace_insn_number (&end));
923 
924       if (size < 0)
925 	{
926 	  end = begin;
927 	  covered = btrace_insn_prev (&begin, context);
928 	}
929       else
930 	{
931 	  begin = end;
932 	  covered = btrace_insn_next (&end, context);
933 	}
934     }
935 
936   if (covered > 0)
937     btrace_insn_history (uiout, btinfo, &begin, &end, flags);
938   else
939     {
940       if (size < 0)
941 	printf_unfiltered (_("At the start of the branch trace record.\n"));
942       else
943 	printf_unfiltered (_("At the end of the branch trace record.\n"));
944     }
945 
946   btrace_set_insn_history (btinfo, &begin, &end);
947 }
948 
949 /* The insn_history_range method of target record-btrace.  */
950 
951 void
insn_history_range(ULONGEST from,ULONGEST to,gdb_disassembly_flags flags)952 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
953 					  gdb_disassembly_flags flags)
954 {
955   struct btrace_thread_info *btinfo;
956   struct btrace_insn_iterator begin, end;
957   struct ui_out *uiout;
958   unsigned int low, high;
959   int found;
960 
961   uiout = current_uiout;
962   ui_out_emit_tuple tuple_emitter (uiout, "insn history");
963   low = from;
964   high = to;
965 
966   DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
967 
968   /* Check for wrap-arounds.  */
969   if (low != from || high != to)
970     error (_("Bad range."));
971 
972   if (high < low)
973     error (_("Bad range."));
974 
975   btinfo = require_btrace ();
976 
977   found = btrace_find_insn_by_number (&begin, btinfo, low);
978   if (found == 0)
979     error (_("Range out of bounds."));
980 
981   found = btrace_find_insn_by_number (&end, btinfo, high);
982   if (found == 0)
983     {
984       /* Silently truncate the range.  */
985       btrace_insn_end (&end, btinfo);
986     }
987   else
988     {
989       /* We want both begin and end to be inclusive.  */
990       btrace_insn_next (&end, 1);
991     }
992 
993   btrace_insn_history (uiout, btinfo, &begin, &end, flags);
994   btrace_set_insn_history (btinfo, &begin, &end);
995 }
996 
997 /* The insn_history_from method of target record-btrace.  */
998 
999 void
insn_history_from(ULONGEST from,int size,gdb_disassembly_flags flags)1000 record_btrace_target::insn_history_from (ULONGEST from, int size,
1001 					 gdb_disassembly_flags flags)
1002 {
1003   ULONGEST begin, end, context;
1004 
1005   context = abs (size);
1006   if (context == 0)
1007     error (_("Bad record instruction-history-size."));
1008 
1009   if (size < 0)
1010     {
1011       end = from;
1012 
1013       if (from < context)
1014 	begin = 0;
1015       else
1016 	begin = from - context + 1;
1017     }
1018   else
1019     {
1020       begin = from;
1021       end = from + context - 1;
1022 
1023       /* Check for wrap-around.  */
1024       if (end < begin)
1025 	end = ULONGEST_MAX;
1026     }
1027 
1028   insn_history_range (begin, end, flags);
1029 }
1030 
1031 /* Print the instruction number range for a function call history line.  */
1032 
1033 static void
btrace_call_history_insn_range(struct ui_out * uiout,const struct btrace_function * bfun)1034 btrace_call_history_insn_range (struct ui_out *uiout,
1035 				const struct btrace_function *bfun)
1036 {
1037   unsigned int begin, end, size;
1038 
1039   size = bfun->insn.size ();
1040   gdb_assert (size > 0);
1041 
1042   begin = bfun->insn_offset;
1043   end = begin + size - 1;
1044 
1045   uiout->field_unsigned ("insn begin", begin);
1046   uiout->text (",");
1047   uiout->field_unsigned ("insn end", end);
1048 }
1049 
1050 /* Compute the lowest and highest source line for the instructions in BFUN
1051    and return them in PBEGIN and PEND.
1052    Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1053    result from inlining or macro expansion.  */
1054 
1055 static void
btrace_compute_src_line_range(const struct btrace_function * bfun,int * pbegin,int * pend)1056 btrace_compute_src_line_range (const struct btrace_function *bfun,
1057 			       int *pbegin, int *pend)
1058 {
1059   struct symtab *symtab;
1060   struct symbol *sym;
1061   int begin, end;
1062 
1063   begin = INT_MAX;
1064   end = INT_MIN;
1065 
1066   sym = bfun->sym;
1067   if (sym == NULL)
1068     goto out;
1069 
1070   symtab = symbol_symtab (sym);
1071 
1072   for (const btrace_insn &insn : bfun->insn)
1073     {
1074       struct symtab_and_line sal;
1075 
1076       sal = find_pc_line (insn.pc, 0);
1077       if (sal.symtab != symtab || sal.line == 0)
1078 	continue;
1079 
1080       begin = std::min (begin, sal.line);
1081       end = std::max (end, sal.line);
1082     }
1083 
1084  out:
1085   *pbegin = begin;
1086   *pend = end;
1087 }
1088 
1089 /* Print the source line information for a function call history line.  */
1090 
1091 static void
btrace_call_history_src_line(struct ui_out * uiout,const struct btrace_function * bfun)1092 btrace_call_history_src_line (struct ui_out *uiout,
1093 			      const struct btrace_function *bfun)
1094 {
1095   struct symbol *sym;
1096   int begin, end;
1097 
1098   sym = bfun->sym;
1099   if (sym == NULL)
1100     return;
1101 
1102   uiout->field_string ("file",
1103 		       symtab_to_filename_for_display (symbol_symtab (sym)),
1104 		       file_name_style.style ());
1105 
1106   btrace_compute_src_line_range (bfun, &begin, &end);
1107   if (end < begin)
1108     return;
1109 
1110   uiout->text (":");
1111   uiout->field_signed ("min line", begin);
1112 
1113   if (end == begin)
1114     return;
1115 
1116   uiout->text (",");
1117   uiout->field_signed ("max line", end);
1118 }
1119 
1120 /* Get the name of a branch trace function.  */
1121 
1122 static const char *
btrace_get_bfun_name(const struct btrace_function * bfun)1123 btrace_get_bfun_name (const struct btrace_function *bfun)
1124 {
1125   struct minimal_symbol *msym;
1126   struct symbol *sym;
1127 
1128   if (bfun == NULL)
1129     return "??";
1130 
1131   msym = bfun->msym;
1132   sym = bfun->sym;
1133 
1134   if (sym != NULL)
1135     return sym->print_name ();
1136   else if (msym != NULL)
1137     return msym->print_name ();
1138   else
1139     return "??";
1140 }
1141 
1142 /* Disassemble a section of the recorded function trace.  */
1143 
1144 static void
btrace_call_history(struct ui_out * uiout,const struct btrace_thread_info * btinfo,const struct btrace_call_iterator * begin,const struct btrace_call_iterator * end,int int_flags)1145 btrace_call_history (struct ui_out *uiout,
1146 		     const struct btrace_thread_info *btinfo,
1147 		     const struct btrace_call_iterator *begin,
1148 		     const struct btrace_call_iterator *end,
1149 		     int int_flags)
1150 {
1151   struct btrace_call_iterator it;
1152   record_print_flags flags = (enum record_print_flag) int_flags;
1153 
1154   DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1155 	 btrace_call_number (end));
1156 
1157   for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1158     {
1159       const struct btrace_function *bfun;
1160       struct minimal_symbol *msym;
1161       struct symbol *sym;
1162 
1163       bfun = btrace_call_get (&it);
1164       sym = bfun->sym;
1165       msym = bfun->msym;
1166 
1167       /* Print the function index.  */
1168       uiout->field_unsigned ("index", bfun->number);
1169       uiout->text ("\t");
1170 
1171       /* Indicate gaps in the trace.  */
1172       if (bfun->errcode != 0)
1173 	{
1174 	  const struct btrace_config *conf;
1175 
1176 	  conf = btrace_conf (btinfo);
1177 
1178 	  /* We have trace so we must have a configuration.  */
1179 	  gdb_assert (conf != NULL);
1180 
1181 	  btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1182 
1183 	  continue;
1184 	}
1185 
1186       if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1187 	{
1188 	  int level = bfun->level + btinfo->level, i;
1189 
1190 	  for (i = 0; i < level; ++i)
1191 	    uiout->text ("  ");
1192 	}
1193 
1194       if (sym != NULL)
1195 	uiout->field_string ("function", sym->print_name (),
1196 			     function_name_style.style ());
1197       else if (msym != NULL)
1198 	uiout->field_string ("function", msym->print_name (),
1199 			     function_name_style.style ());
1200       else if (!uiout->is_mi_like_p ())
1201 	uiout->field_string ("function", "??",
1202 			     function_name_style.style ());
1203 
1204       if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1205 	{
1206 	  uiout->text (_("\tinst "));
1207 	  btrace_call_history_insn_range (uiout, bfun);
1208 	}
1209 
1210       if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1211 	{
1212 	  uiout->text (_("\tat "));
1213 	  btrace_call_history_src_line (uiout, bfun);
1214 	}
1215 
1216       uiout->text ("\n");
1217     }
1218 }
1219 
1220 /* The call_history method of target record-btrace.  */
1221 
1222 void
call_history(int size,record_print_flags flags)1223 record_btrace_target::call_history (int size, record_print_flags flags)
1224 {
1225   struct btrace_thread_info *btinfo;
1226   struct btrace_call_history *history;
1227   struct btrace_call_iterator begin, end;
1228   struct ui_out *uiout;
1229   unsigned int context, covered;
1230 
1231   uiout = current_uiout;
1232   ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1233   context = abs (size);
1234   if (context == 0)
1235     error (_("Bad record function-call-history-size."));
1236 
1237   btinfo = require_btrace ();
1238   history = btinfo->call_history;
1239   if (history == NULL)
1240     {
1241       struct btrace_insn_iterator *replay;
1242 
1243       DEBUG ("call-history (0x%x): %d", (int) flags, size);
1244 
1245       /* If we're replaying, we start at the replay position.  Otherwise, we
1246 	 start at the tail of the trace.  */
1247       replay = btinfo->replay;
1248       if (replay != NULL)
1249 	{
1250 	  begin.btinfo = btinfo;
1251 	  begin.index = replay->call_index;
1252 	}
1253       else
1254 	btrace_call_end (&begin, btinfo);
1255 
1256       /* We start from here and expand in the requested direction.  Then we
1257 	 expand in the other direction, as well, to fill up any remaining
1258 	 context.  */
1259       end = begin;
1260       if (size < 0)
1261 	{
1262 	  /* We want the current position covered, as well.  */
1263 	  covered = btrace_call_next (&end, 1);
1264 	  covered += btrace_call_prev (&begin, context - covered);
1265 	  covered += btrace_call_next (&end, context - covered);
1266 	}
1267       else
1268 	{
1269 	  covered = btrace_call_next (&end, context);
1270 	  covered += btrace_call_prev (&begin, context- covered);
1271 	}
1272     }
1273   else
1274     {
1275       begin = history->begin;
1276       end = history->end;
1277 
1278       DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1279 	     btrace_call_number (&begin), btrace_call_number (&end));
1280 
1281       if (size < 0)
1282 	{
1283 	  end = begin;
1284 	  covered = btrace_call_prev (&begin, context);
1285 	}
1286       else
1287 	{
1288 	  begin = end;
1289 	  covered = btrace_call_next (&end, context);
1290 	}
1291     }
1292 
1293   if (covered > 0)
1294     btrace_call_history (uiout, btinfo, &begin, &end, flags);
1295   else
1296     {
1297       if (size < 0)
1298 	printf_unfiltered (_("At the start of the branch trace record.\n"));
1299       else
1300 	printf_unfiltered (_("At the end of the branch trace record.\n"));
1301     }
1302 
1303   btrace_set_call_history (btinfo, &begin, &end);
1304 }
1305 
1306 /* The call_history_range method of target record-btrace.  */
1307 
1308 void
call_history_range(ULONGEST from,ULONGEST to,record_print_flags flags)1309 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1310 					  record_print_flags flags)
1311 {
1312   struct btrace_thread_info *btinfo;
1313   struct btrace_call_iterator begin, end;
1314   struct ui_out *uiout;
1315   unsigned int low, high;
1316   int found;
1317 
1318   uiout = current_uiout;
1319   ui_out_emit_tuple tuple_emitter (uiout, "func history");
1320   low = from;
1321   high = to;
1322 
1323   DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1324 
1325   /* Check for wrap-arounds.  */
1326   if (low != from || high != to)
1327     error (_("Bad range."));
1328 
1329   if (high < low)
1330     error (_("Bad range."));
1331 
1332   btinfo = require_btrace ();
1333 
1334   found = btrace_find_call_by_number (&begin, btinfo, low);
1335   if (found == 0)
1336     error (_("Range out of bounds."));
1337 
1338   found = btrace_find_call_by_number (&end, btinfo, high);
1339   if (found == 0)
1340     {
1341       /* Silently truncate the range.  */
1342       btrace_call_end (&end, btinfo);
1343     }
1344   else
1345     {
1346       /* We want both begin and end to be inclusive.  */
1347       btrace_call_next (&end, 1);
1348     }
1349 
1350   btrace_call_history (uiout, btinfo, &begin, &end, flags);
1351   btrace_set_call_history (btinfo, &begin, &end);
1352 }
1353 
1354 /* The call_history_from method of target record-btrace.  */
1355 
1356 void
call_history_from(ULONGEST from,int size,record_print_flags flags)1357 record_btrace_target::call_history_from (ULONGEST from, int size,
1358 					 record_print_flags flags)
1359 {
1360   ULONGEST begin, end, context;
1361 
1362   context = abs (size);
1363   if (context == 0)
1364     error (_("Bad record function-call-history-size."));
1365 
1366   if (size < 0)
1367     {
1368       end = from;
1369 
1370       if (from < context)
1371 	begin = 0;
1372       else
1373 	begin = from - context + 1;
1374     }
1375   else
1376     {
1377       begin = from;
1378       end = from + context - 1;
1379 
1380       /* Check for wrap-around.  */
1381       if (end < begin)
1382 	end = ULONGEST_MAX;
1383     }
1384 
1385   call_history_range ( begin, end, flags);
1386 }
1387 
1388 /* The record_method method of target record-btrace.  */
1389 
1390 enum record_method
record_method(ptid_t ptid)1391 record_btrace_target::record_method (ptid_t ptid)
1392 {
1393   process_stratum_target *proc_target = current_inferior ()->process_target ();
1394   thread_info *const tp = find_thread_ptid (proc_target, ptid);
1395 
1396   if (tp == NULL)
1397     error (_("No thread."));
1398 
1399   if (tp->btrace.target == NULL)
1400     return RECORD_METHOD_NONE;
1401 
1402   return RECORD_METHOD_BTRACE;
1403 }
1404 
1405 /* The record_is_replaying method of target record-btrace.  */
1406 
1407 bool
record_is_replaying(ptid_t ptid)1408 record_btrace_target::record_is_replaying (ptid_t ptid)
1409 {
1410   process_stratum_target *proc_target = current_inferior ()->process_target ();
1411   for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1412     if (btrace_is_replaying (tp))
1413       return true;
1414 
1415   return false;
1416 }
1417 
1418 /* The record_will_replay method of target record-btrace.  */
1419 
1420 bool
record_will_replay(ptid_t ptid,int dir)1421 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1422 {
1423   return dir == EXEC_REVERSE || record_is_replaying (ptid);
1424 }
1425 
1426 /* The xfer_partial method of target record-btrace.  */
1427 
1428 enum target_xfer_status
xfer_partial(enum target_object object,const char * annex,gdb_byte * readbuf,const gdb_byte * writebuf,ULONGEST offset,ULONGEST len,ULONGEST * xfered_len)1429 record_btrace_target::xfer_partial (enum target_object object,
1430 				    const char *annex, gdb_byte *readbuf,
1431 				    const gdb_byte *writebuf, ULONGEST offset,
1432 				    ULONGEST len, ULONGEST *xfered_len)
1433 {
1434   /* Filter out requests that don't make sense during replay.  */
1435   if (replay_memory_access == replay_memory_access_read_only
1436       && !record_btrace_generating_corefile
1437       && record_is_replaying (inferior_ptid))
1438     {
1439       switch (object)
1440 	{
1441 	case TARGET_OBJECT_MEMORY:
1442 	  {
1443 	    const struct target_section *section;
1444 
1445 	    /* We do not allow writing memory in general.  */
1446 	    if (writebuf != NULL)
1447 	      {
1448 		*xfered_len = len;
1449 		return TARGET_XFER_UNAVAILABLE;
1450 	      }
1451 
1452 	    /* We allow reading readonly memory.  */
1453 	    section = target_section_by_addr (this, offset);
1454 	    if (section != NULL)
1455 	      {
1456 		/* Check if the section we found is readonly.  */
1457 		if ((bfd_section_flags (section->the_bfd_section)
1458 		     & SEC_READONLY) != 0)
1459 		  {
1460 		    /* Truncate the request to fit into this section.  */
1461 		    len = std::min (len, section->endaddr - offset);
1462 		    break;
1463 		  }
1464 	      }
1465 
1466 	    *xfered_len = len;
1467 	    return TARGET_XFER_UNAVAILABLE;
1468 	  }
1469 	}
1470     }
1471 
1472   /* Forward the request.  */
1473   return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1474 					 offset, len, xfered_len);
1475 }
1476 
1477 /* The insert_breakpoint method of target record-btrace.  */
1478 
1479 int
insert_breakpoint(struct gdbarch * gdbarch,struct bp_target_info * bp_tgt)1480 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1481 					 struct bp_target_info *bp_tgt)
1482 {
1483   const char *old;
1484   int ret;
1485 
1486   /* Inserting breakpoints requires accessing memory.  Allow it for the
1487      duration of this function.  */
1488   old = replay_memory_access;
1489   replay_memory_access = replay_memory_access_read_write;
1490 
1491   ret = 0;
1492   try
1493     {
1494       ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1495     }
1496   catch (const gdb_exception &except)
1497     {
1498       replay_memory_access = old;
1499       throw;
1500     }
1501   replay_memory_access = old;
1502 
1503   return ret;
1504 }
1505 
1506 /* The remove_breakpoint method of target record-btrace.  */
1507 
1508 int
remove_breakpoint(struct gdbarch * gdbarch,struct bp_target_info * bp_tgt,enum remove_bp_reason reason)1509 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1510 					 struct bp_target_info *bp_tgt,
1511 					 enum remove_bp_reason reason)
1512 {
1513   const char *old;
1514   int ret;
1515 
1516   /* Removing breakpoints requires accessing memory.  Allow it for the
1517      duration of this function.  */
1518   old = replay_memory_access;
1519   replay_memory_access = replay_memory_access_read_write;
1520 
1521   ret = 0;
1522   try
1523     {
1524       ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1525     }
1526   catch (const gdb_exception &except)
1527     {
1528       replay_memory_access = old;
1529       throw;
1530     }
1531   replay_memory_access = old;
1532 
1533   return ret;
1534 }
1535 
1536 /* The fetch_registers method of target record-btrace.  */
1537 
1538 void
fetch_registers(struct regcache * regcache,int regno)1539 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1540 {
1541   btrace_insn_iterator *replay = nullptr;
1542 
1543   /* Thread-db may ask for a thread's registers before GDB knows about the
1544      thread.  We forward the request to the target beneath in this
1545      case.  */
1546   thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1547   if (tp != nullptr)
1548     replay =  tp->btrace.replay;
1549 
1550   if (replay != nullptr && !record_btrace_generating_corefile)
1551     {
1552       const struct btrace_insn *insn;
1553       struct gdbarch *gdbarch;
1554       int pcreg;
1555 
1556       gdbarch = regcache->arch ();
1557       pcreg = gdbarch_pc_regnum (gdbarch);
1558       if (pcreg < 0)
1559 	return;
1560 
1561       /* We can only provide the PC register.  */
1562       if (regno >= 0 && regno != pcreg)
1563 	return;
1564 
1565       insn = btrace_insn_get (replay);
1566       gdb_assert (insn != NULL);
1567 
1568       regcache->raw_supply (regno, &insn->pc);
1569     }
1570   else
1571     this->beneath ()->fetch_registers (regcache, regno);
1572 }
1573 
1574 /* The store_registers method of target record-btrace.  */
1575 
1576 void
store_registers(struct regcache * regcache,int regno)1577 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1578 {
1579   if (!record_btrace_generating_corefile
1580       && record_is_replaying (regcache->ptid ()))
1581     error (_("Cannot write registers while replaying."));
1582 
1583   gdb_assert (may_write_registers);
1584 
1585   this->beneath ()->store_registers (regcache, regno);
1586 }
1587 
1588 /* The prepare_to_store method of target record-btrace.  */
1589 
1590 void
prepare_to_store(struct regcache * regcache)1591 record_btrace_target::prepare_to_store (struct regcache *regcache)
1592 {
1593   if (!record_btrace_generating_corefile
1594       && record_is_replaying (regcache->ptid ()))
1595     return;
1596 
1597   this->beneath ()->prepare_to_store (regcache);
1598 }
1599 
1600 /* The branch trace frame cache.  */
1601 
1602 struct btrace_frame_cache
1603 {
1604   /* The thread.  */
1605   struct thread_info *tp;
1606 
1607   /* The frame info.  */
1608   struct frame_info *frame;
1609 
1610   /* The branch trace function segment.  */
1611   const struct btrace_function *bfun;
1612 };
1613 
1614 /* A struct btrace_frame_cache hash table indexed by NEXT.  */
1615 
1616 static htab_t bfcache;
1617 
1618 /* hash_f for htab_create_alloc of bfcache.  */
1619 
1620 static hashval_t
bfcache_hash(const void * arg)1621 bfcache_hash (const void *arg)
1622 {
1623   const struct btrace_frame_cache *cache
1624     = (const struct btrace_frame_cache *) arg;
1625 
1626   return htab_hash_pointer (cache->frame);
1627 }
1628 
1629 /* eq_f for htab_create_alloc of bfcache.  */
1630 
1631 static int
bfcache_eq(const void * arg1,const void * arg2)1632 bfcache_eq (const void *arg1, const void *arg2)
1633 {
1634   const struct btrace_frame_cache *cache1
1635     = (const struct btrace_frame_cache *) arg1;
1636   const struct btrace_frame_cache *cache2
1637     = (const struct btrace_frame_cache *) arg2;
1638 
1639   return cache1->frame == cache2->frame;
1640 }
1641 
1642 /* Create a new btrace frame cache.  */
1643 
1644 static struct btrace_frame_cache *
bfcache_new(struct frame_info * frame)1645 bfcache_new (struct frame_info *frame)
1646 {
1647   struct btrace_frame_cache *cache;
1648   void **slot;
1649 
1650   cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1651   cache->frame = frame;
1652 
1653   slot = htab_find_slot (bfcache, cache, INSERT);
1654   gdb_assert (*slot == NULL);
1655   *slot = cache;
1656 
1657   return cache;
1658 }
1659 
1660 /* Extract the branch trace function from a branch trace frame.  */
1661 
1662 static const struct btrace_function *
btrace_get_frame_function(struct frame_info * frame)1663 btrace_get_frame_function (struct frame_info *frame)
1664 {
1665   const struct btrace_frame_cache *cache;
1666   struct btrace_frame_cache pattern;
1667   void **slot;
1668 
1669   pattern.frame = frame;
1670 
1671   slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1672   if (slot == NULL)
1673     return NULL;
1674 
1675   cache = (const struct btrace_frame_cache *) *slot;
1676   return cache->bfun;
1677 }
1678 
1679 /* Implement stop_reason method for record_btrace_frame_unwind.  */
1680 
1681 static enum unwind_stop_reason
record_btrace_frame_unwind_stop_reason(struct frame_info * this_frame,void ** this_cache)1682 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1683 					void **this_cache)
1684 {
1685   const struct btrace_frame_cache *cache;
1686   const struct btrace_function *bfun;
1687 
1688   cache = (const struct btrace_frame_cache *) *this_cache;
1689   bfun = cache->bfun;
1690   gdb_assert (bfun != NULL);
1691 
1692   if (bfun->up == 0)
1693     return UNWIND_UNAVAILABLE;
1694 
1695   return UNWIND_NO_REASON;
1696 }
1697 
1698 /* Implement this_id method for record_btrace_frame_unwind.  */
1699 
1700 static void
record_btrace_frame_this_id(struct frame_info * this_frame,void ** this_cache,struct frame_id * this_id)1701 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1702 			     struct frame_id *this_id)
1703 {
1704   const struct btrace_frame_cache *cache;
1705   const struct btrace_function *bfun;
1706   struct btrace_call_iterator it;
1707   CORE_ADDR code, special;
1708 
1709   cache = (const struct btrace_frame_cache *) *this_cache;
1710 
1711   bfun = cache->bfun;
1712   gdb_assert (bfun != NULL);
1713 
1714   while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1715     bfun = btrace_call_get (&it);
1716 
1717   code = get_frame_func (this_frame);
1718   special = bfun->number;
1719 
1720   *this_id = frame_id_build_unavailable_stack_special (code, special);
1721 
1722   DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1723 	 btrace_get_bfun_name (cache->bfun),
1724 	 core_addr_to_string_nz (this_id->code_addr),
1725 	 core_addr_to_string_nz (this_id->special_addr));
1726 }
1727 
1728 /* Implement prev_register method for record_btrace_frame_unwind.  */
1729 
1730 static struct value *
record_btrace_frame_prev_register(struct frame_info * this_frame,void ** this_cache,int regnum)1731 record_btrace_frame_prev_register (struct frame_info *this_frame,
1732 				   void **this_cache,
1733 				   int regnum)
1734 {
1735   const struct btrace_frame_cache *cache;
1736   const struct btrace_function *bfun, *caller;
1737   struct btrace_call_iterator it;
1738   struct gdbarch *gdbarch;
1739   CORE_ADDR pc;
1740   int pcreg;
1741 
1742   gdbarch = get_frame_arch (this_frame);
1743   pcreg = gdbarch_pc_regnum (gdbarch);
1744   if (pcreg < 0 || regnum != pcreg)
1745     throw_error (NOT_AVAILABLE_ERROR,
1746 		 _("Registers are not available in btrace record history"));
1747 
1748   cache = (const struct btrace_frame_cache *) *this_cache;
1749   bfun = cache->bfun;
1750   gdb_assert (bfun != NULL);
1751 
1752   if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1753     throw_error (NOT_AVAILABLE_ERROR,
1754 		 _("No caller in btrace record history"));
1755 
1756   caller = btrace_call_get (&it);
1757 
1758   if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1759     pc = caller->insn.front ().pc;
1760   else
1761     {
1762       pc = caller->insn.back ().pc;
1763       pc += gdb_insn_length (gdbarch, pc);
1764     }
1765 
1766   DEBUG ("[frame] unwound PC in %s on level %d: %s",
1767 	 btrace_get_bfun_name (bfun), bfun->level,
1768 	 core_addr_to_string_nz (pc));
1769 
1770   return frame_unwind_got_address (this_frame, regnum, pc);
1771 }
1772 
1773 /* Implement sniffer method for record_btrace_frame_unwind.  */
1774 
1775 static int
record_btrace_frame_sniffer(const struct frame_unwind * self,struct frame_info * this_frame,void ** this_cache)1776 record_btrace_frame_sniffer (const struct frame_unwind *self,
1777 			     struct frame_info *this_frame,
1778 			     void **this_cache)
1779 {
1780   const struct btrace_function *bfun;
1781   struct btrace_frame_cache *cache;
1782   struct thread_info *tp;
1783   struct frame_info *next;
1784 
1785   /* THIS_FRAME does not contain a reference to its thread.  */
1786   tp = inferior_thread ();
1787 
1788   bfun = NULL;
1789   next = get_next_frame (this_frame);
1790   if (next == NULL)
1791     {
1792       const struct btrace_insn_iterator *replay;
1793 
1794       replay = tp->btrace.replay;
1795       if (replay != NULL)
1796 	bfun = &replay->btinfo->functions[replay->call_index];
1797     }
1798   else
1799     {
1800       const struct btrace_function *callee;
1801       struct btrace_call_iterator it;
1802 
1803       callee = btrace_get_frame_function (next);
1804       if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1805 	return 0;
1806 
1807       if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1808 	return 0;
1809 
1810       bfun = btrace_call_get (&it);
1811     }
1812 
1813   if (bfun == NULL)
1814     return 0;
1815 
1816   DEBUG ("[frame] sniffed frame for %s on level %d",
1817 	 btrace_get_bfun_name (bfun), bfun->level);
1818 
1819   /* This is our frame.  Initialize the frame cache.  */
1820   cache = bfcache_new (this_frame);
1821   cache->tp = tp;
1822   cache->bfun = bfun;
1823 
1824   *this_cache = cache;
1825   return 1;
1826 }
1827 
1828 /* Implement sniffer method for record_btrace_tailcall_frame_unwind.  */
1829 
1830 static int
record_btrace_tailcall_frame_sniffer(const struct frame_unwind * self,struct frame_info * this_frame,void ** this_cache)1831 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1832 				      struct frame_info *this_frame,
1833 				      void **this_cache)
1834 {
1835   const struct btrace_function *bfun, *callee;
1836   struct btrace_frame_cache *cache;
1837   struct btrace_call_iterator it;
1838   struct frame_info *next;
1839   struct thread_info *tinfo;
1840 
1841   next = get_next_frame (this_frame);
1842   if (next == NULL)
1843     return 0;
1844 
1845   callee = btrace_get_frame_function (next);
1846   if (callee == NULL)
1847     return 0;
1848 
1849   if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1850     return 0;
1851 
1852   tinfo = inferior_thread ();
1853   if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1854     return 0;
1855 
1856   bfun = btrace_call_get (&it);
1857 
1858   DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1859 	 btrace_get_bfun_name (bfun), bfun->level);
1860 
1861   /* This is our frame.  Initialize the frame cache.  */
1862   cache = bfcache_new (this_frame);
1863   cache->tp = tinfo;
1864   cache->bfun = bfun;
1865 
1866   *this_cache = cache;
1867   return 1;
1868 }
1869 
1870 static void
record_btrace_frame_dealloc_cache(struct frame_info * self,void * this_cache)1871 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1872 {
1873   struct btrace_frame_cache *cache;
1874   void **slot;
1875 
1876   cache = (struct btrace_frame_cache *) this_cache;
1877 
1878   slot = htab_find_slot (bfcache, cache, NO_INSERT);
1879   gdb_assert (slot != NULL);
1880 
1881   htab_remove_elt (bfcache, cache);
1882 }
1883 
1884 /* btrace recording does not store previous memory content, neither the stack
1885    frames content.  Any unwinding would return erroneous results as the stack
1886    contents no longer matches the changed PC value restored from history.
1887    Therefore this unwinder reports any possibly unwound registers as
1888    <unavailable>.  */
1889 
1890 const struct frame_unwind record_btrace_frame_unwind =
1891 {
1892   "record-btrace",
1893   NORMAL_FRAME,
1894   record_btrace_frame_unwind_stop_reason,
1895   record_btrace_frame_this_id,
1896   record_btrace_frame_prev_register,
1897   NULL,
1898   record_btrace_frame_sniffer,
1899   record_btrace_frame_dealloc_cache
1900 };
1901 
1902 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1903 {
1904   "record-btrace tailcall",
1905   TAILCALL_FRAME,
1906   record_btrace_frame_unwind_stop_reason,
1907   record_btrace_frame_this_id,
1908   record_btrace_frame_prev_register,
1909   NULL,
1910   record_btrace_tailcall_frame_sniffer,
1911   record_btrace_frame_dealloc_cache
1912 };
1913 
1914 /* Implement the get_unwinder method.  */
1915 
1916 const struct frame_unwind *
get_unwinder()1917 record_btrace_target::get_unwinder ()
1918 {
1919   return &record_btrace_frame_unwind;
1920 }
1921 
1922 /* Implement the get_tailcall_unwinder method.  */
1923 
1924 const struct frame_unwind *
get_tailcall_unwinder()1925 record_btrace_target::get_tailcall_unwinder ()
1926 {
1927   return &record_btrace_tailcall_frame_unwind;
1928 }
1929 
1930 /* Return a human-readable string for FLAG.  */
1931 
1932 static const char *
btrace_thread_flag_to_str(btrace_thread_flags flag)1933 btrace_thread_flag_to_str (btrace_thread_flags flag)
1934 {
1935   switch (flag)
1936     {
1937     case BTHR_STEP:
1938       return "step";
1939 
1940     case BTHR_RSTEP:
1941       return "reverse-step";
1942 
1943     case BTHR_CONT:
1944       return "cont";
1945 
1946     case BTHR_RCONT:
1947       return "reverse-cont";
1948 
1949     case BTHR_STOP:
1950       return "stop";
1951     }
1952 
1953   return "<invalid>";
1954 }
1955 
1956 /* Indicate that TP should be resumed according to FLAG.  */
1957 
1958 static void
record_btrace_resume_thread(struct thread_info * tp,enum btrace_thread_flag flag)1959 record_btrace_resume_thread (struct thread_info *tp,
1960 			     enum btrace_thread_flag flag)
1961 {
1962   struct btrace_thread_info *btinfo;
1963 
1964   DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1965 	 target_pid_to_str (tp->ptid).c_str (), flag,
1966 	 btrace_thread_flag_to_str (flag));
1967 
1968   btinfo = &tp->btrace;
1969 
1970   /* Fetch the latest branch trace.  */
1971   btrace_fetch (tp, record_btrace_get_cpu ());
1972 
1973   /* A resume request overwrites a preceding resume or stop request.  */
1974   btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1975   btinfo->flags |= flag;
1976 }
1977 
1978 /* Get the current frame for TP.  */
1979 
1980 static struct frame_id
get_thread_current_frame_id(struct thread_info * tp)1981 get_thread_current_frame_id (struct thread_info *tp)
1982 {
1983   struct frame_id id;
1984   bool executing;
1985 
1986   /* Set current thread, which is implicitly used by
1987      get_current_frame.  */
1988   scoped_restore_current_thread restore_thread;
1989 
1990   switch_to_thread (tp);
1991 
1992   process_stratum_target *proc_target = tp->inf->process_target ();
1993 
1994   /* Clear the executing flag to allow changes to the current frame.
1995      We are not actually running, yet.  We just started a reverse execution
1996      command or a record goto command.
1997      For the latter, EXECUTING is false and this has no effect.
1998      For the former, EXECUTING is true and we're in wait, about to
1999      move the thread.  Since we need to recompute the stack, we temporarily
2000      set EXECUTING to false.  */
2001   executing = tp->executing;
2002   set_executing (proc_target, inferior_ptid, false);
2003 
2004   id = null_frame_id;
2005   try
2006     {
2007       id = get_frame_id (get_current_frame ());
2008     }
2009   catch (const gdb_exception &except)
2010     {
2011       /* Restore the previous execution state.  */
2012       set_executing (proc_target, inferior_ptid, executing);
2013 
2014       throw;
2015     }
2016 
2017   /* Restore the previous execution state.  */
2018   set_executing (proc_target, inferior_ptid, executing);
2019 
2020   return id;
2021 }
2022 
2023 /* Start replaying a thread.  */
2024 
2025 static struct btrace_insn_iterator *
record_btrace_start_replaying(struct thread_info * tp)2026 record_btrace_start_replaying (struct thread_info *tp)
2027 {
2028   struct btrace_insn_iterator *replay;
2029   struct btrace_thread_info *btinfo;
2030 
2031   btinfo = &tp->btrace;
2032   replay = NULL;
2033 
2034   /* We can't start replaying without trace.  */
2035   if (btinfo->functions.empty ())
2036     return NULL;
2037 
2038   /* GDB stores the current frame_id when stepping in order to detects steps
2039      into subroutines.
2040      Since frames are computed differently when we're replaying, we need to
2041      recompute those stored frames and fix them up so we can still detect
2042      subroutines after we started replaying.  */
2043   try
2044     {
2045       struct frame_id frame_id;
2046       int upd_step_frame_id, upd_step_stack_frame_id;
2047 
2048       /* The current frame without replaying - computed via normal unwind.  */
2049       frame_id = get_thread_current_frame_id (tp);
2050 
2051       /* Check if we need to update any stepping-related frame id's.  */
2052       upd_step_frame_id = frame_id_eq (frame_id,
2053 				       tp->control.step_frame_id);
2054       upd_step_stack_frame_id = frame_id_eq (frame_id,
2055 					     tp->control.step_stack_frame_id);
2056 
2057       /* We start replaying at the end of the branch trace.  This corresponds
2058 	 to the current instruction.  */
2059       replay = XNEW (struct btrace_insn_iterator);
2060       btrace_insn_end (replay, btinfo);
2061 
2062       /* Skip gaps at the end of the trace.  */
2063       while (btrace_insn_get (replay) == NULL)
2064 	{
2065 	  unsigned int steps;
2066 
2067 	  steps = btrace_insn_prev (replay, 1);
2068 	  if (steps == 0)
2069 	    error (_("No trace."));
2070 	}
2071 
2072       /* We're not replaying, yet.  */
2073       gdb_assert (btinfo->replay == NULL);
2074       btinfo->replay = replay;
2075 
2076       /* Make sure we're not using any stale registers.  */
2077       registers_changed_thread (tp);
2078 
2079       /* The current frame with replaying - computed via btrace unwind.  */
2080       frame_id = get_thread_current_frame_id (tp);
2081 
2082       /* Replace stepping related frames where necessary.  */
2083       if (upd_step_frame_id)
2084 	tp->control.step_frame_id = frame_id;
2085       if (upd_step_stack_frame_id)
2086 	tp->control.step_stack_frame_id = frame_id;
2087     }
2088   catch (const gdb_exception &except)
2089     {
2090       xfree (btinfo->replay);
2091       btinfo->replay = NULL;
2092 
2093       registers_changed_thread (tp);
2094 
2095       throw;
2096     }
2097 
2098   return replay;
2099 }
2100 
2101 /* Stop replaying a thread.  */
2102 
2103 static void
record_btrace_stop_replaying(struct thread_info * tp)2104 record_btrace_stop_replaying (struct thread_info *tp)
2105 {
2106   struct btrace_thread_info *btinfo;
2107 
2108   btinfo = &tp->btrace;
2109 
2110   xfree (btinfo->replay);
2111   btinfo->replay = NULL;
2112 
2113   /* Make sure we're not leaving any stale registers.  */
2114   registers_changed_thread (tp);
2115 }
2116 
2117 /* Stop replaying TP if it is at the end of its execution history.  */
2118 
2119 static void
record_btrace_stop_replaying_at_end(struct thread_info * tp)2120 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2121 {
2122   struct btrace_insn_iterator *replay, end;
2123   struct btrace_thread_info *btinfo;
2124 
2125   btinfo = &tp->btrace;
2126   replay = btinfo->replay;
2127 
2128   if (replay == NULL)
2129     return;
2130 
2131   btrace_insn_end (&end, btinfo);
2132 
2133   if (btrace_insn_cmp (replay, &end) == 0)
2134     record_btrace_stop_replaying (tp);
2135 }
2136 
2137 /* The resume method of target record-btrace.  */
2138 
2139 void
resume(ptid_t ptid,int step,enum gdb_signal signal)2140 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2141 {
2142   enum btrace_thread_flag flag, cflag;
2143 
2144   DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2145 	 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2146 	 step ? "step" : "cont");
2147 
2148   /* Store the execution direction of the last resume.
2149 
2150      If there is more than one resume call, we have to rely on infrun
2151      to not change the execution direction in-between.  */
2152   record_btrace_resume_exec_dir = ::execution_direction;
2153 
2154   /* As long as we're not replaying, just forward the request.
2155 
2156      For non-stop targets this means that no thread is replaying.  In order to
2157      make progress, we may need to explicitly move replaying threads to the end
2158      of their execution history.  */
2159   if ((::execution_direction != EXEC_REVERSE)
2160       && !record_is_replaying (minus_one_ptid))
2161     {
2162       this->beneath ()->resume (ptid, step, signal);
2163       return;
2164     }
2165 
2166   /* Compute the btrace thread flag for the requested move.  */
2167   if (::execution_direction == EXEC_REVERSE)
2168     {
2169       flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2170       cflag = BTHR_RCONT;
2171     }
2172   else
2173     {
2174       flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2175       cflag = BTHR_CONT;
2176     }
2177 
2178   /* We just indicate the resume intent here.  The actual stepping happens in
2179      record_btrace_wait below.
2180 
2181      For all-stop targets, we only step INFERIOR_PTID and continue others.  */
2182 
2183   process_stratum_target *proc_target = current_inferior ()->process_target ();
2184 
2185   if (!target_is_non_stop_p ())
2186     {
2187       gdb_assert (inferior_ptid.matches (ptid));
2188 
2189       for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2190 	{
2191 	  if (tp->ptid.matches (inferior_ptid))
2192 	    record_btrace_resume_thread (tp, flag);
2193 	  else
2194 	    record_btrace_resume_thread (tp, cflag);
2195 	}
2196     }
2197   else
2198     {
2199       for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2200 	record_btrace_resume_thread (tp, flag);
2201     }
2202 
2203   /* Async support.  */
2204   if (target_can_async_p ())
2205     {
2206       target_async (1);
2207       mark_async_event_handler (record_btrace_async_inferior_event_handler);
2208     }
2209 }
2210 
2211 /* Cancel resuming TP.  */
2212 
2213 static void
record_btrace_cancel_resume(struct thread_info * tp)2214 record_btrace_cancel_resume (struct thread_info *tp)
2215 {
2216   btrace_thread_flags flags;
2217 
2218   flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2219   if (flags == 0)
2220     return;
2221 
2222   DEBUG ("cancel resume thread %s (%s): %x (%s)",
2223 	 print_thread_id (tp),
2224 	 target_pid_to_str (tp->ptid).c_str (), flags.raw (),
2225 	 btrace_thread_flag_to_str (flags));
2226 
2227   tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2228   record_btrace_stop_replaying_at_end (tp);
2229 }
2230 
2231 /* Return a target_waitstatus indicating that we ran out of history.  */
2232 
2233 static struct target_waitstatus
btrace_step_no_history(void)2234 btrace_step_no_history (void)
2235 {
2236   struct target_waitstatus status;
2237 
2238   status.kind = TARGET_WAITKIND_NO_HISTORY;
2239 
2240   return status;
2241 }
2242 
2243 /* Return a target_waitstatus indicating that a step finished.  */
2244 
2245 static struct target_waitstatus
btrace_step_stopped(void)2246 btrace_step_stopped (void)
2247 {
2248   struct target_waitstatus status;
2249 
2250   status.kind = TARGET_WAITKIND_STOPPED;
2251   status.value.sig = GDB_SIGNAL_TRAP;
2252 
2253   return status;
2254 }
2255 
2256 /* Return a target_waitstatus indicating that a thread was stopped as
2257    requested.  */
2258 
2259 static struct target_waitstatus
btrace_step_stopped_on_request(void)2260 btrace_step_stopped_on_request (void)
2261 {
2262   struct target_waitstatus status;
2263 
2264   status.kind = TARGET_WAITKIND_STOPPED;
2265   status.value.sig = GDB_SIGNAL_0;
2266 
2267   return status;
2268 }
2269 
2270 /* Return a target_waitstatus indicating a spurious stop.  */
2271 
2272 static struct target_waitstatus
btrace_step_spurious(void)2273 btrace_step_spurious (void)
2274 {
2275   struct target_waitstatus status;
2276 
2277   status.kind = TARGET_WAITKIND_SPURIOUS;
2278 
2279   return status;
2280 }
2281 
2282 /* Return a target_waitstatus indicating that the thread was not resumed.  */
2283 
2284 static struct target_waitstatus
btrace_step_no_resumed(void)2285 btrace_step_no_resumed (void)
2286 {
2287   struct target_waitstatus status;
2288 
2289   status.kind = TARGET_WAITKIND_NO_RESUMED;
2290 
2291   return status;
2292 }
2293 
2294 /* Return a target_waitstatus indicating that we should wait again.  */
2295 
2296 static struct target_waitstatus
btrace_step_again(void)2297 btrace_step_again (void)
2298 {
2299   struct target_waitstatus status;
2300 
2301   status.kind = TARGET_WAITKIND_IGNORE;
2302 
2303   return status;
2304 }
2305 
2306 /* Clear the record histories.  */
2307 
2308 static void
record_btrace_clear_histories(struct btrace_thread_info * btinfo)2309 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2310 {
2311   xfree (btinfo->insn_history);
2312   xfree (btinfo->call_history);
2313 
2314   btinfo->insn_history = NULL;
2315   btinfo->call_history = NULL;
2316 }
2317 
2318 /* Check whether TP's current replay position is at a breakpoint.  */
2319 
2320 static int
record_btrace_replay_at_breakpoint(struct thread_info * tp)2321 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2322 {
2323   struct btrace_insn_iterator *replay;
2324   struct btrace_thread_info *btinfo;
2325   const struct btrace_insn *insn;
2326 
2327   btinfo = &tp->btrace;
2328   replay = btinfo->replay;
2329 
2330   if (replay == NULL)
2331     return 0;
2332 
2333   insn = btrace_insn_get (replay);
2334   if (insn == NULL)
2335     return 0;
2336 
2337   return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2338 					     &btinfo->stop_reason);
2339 }
2340 
2341 /* Step one instruction in forward direction.  */
2342 
2343 static struct target_waitstatus
record_btrace_single_step_forward(struct thread_info * tp)2344 record_btrace_single_step_forward (struct thread_info *tp)
2345 {
2346   struct btrace_insn_iterator *replay, end, start;
2347   struct btrace_thread_info *btinfo;
2348 
2349   btinfo = &tp->btrace;
2350   replay = btinfo->replay;
2351 
2352   /* We're done if we're not replaying.  */
2353   if (replay == NULL)
2354     return btrace_step_no_history ();
2355 
2356   /* Check if we're stepping a breakpoint.  */
2357   if (record_btrace_replay_at_breakpoint (tp))
2358     return btrace_step_stopped ();
2359 
2360   /* Skip gaps during replay.  If we end up at a gap (at the end of the trace),
2361      jump back to the instruction at which we started.  */
2362   start = *replay;
2363   do
2364     {
2365       unsigned int steps;
2366 
2367       /* We will bail out here if we continue stepping after reaching the end
2368 	 of the execution history.  */
2369       steps = btrace_insn_next (replay, 1);
2370       if (steps == 0)
2371 	{
2372 	  *replay = start;
2373 	  return btrace_step_no_history ();
2374 	}
2375     }
2376   while (btrace_insn_get (replay) == NULL);
2377 
2378   /* Determine the end of the instruction trace.  */
2379   btrace_insn_end (&end, btinfo);
2380 
2381   /* The execution trace contains (and ends with) the current instruction.
2382      This instruction has not been executed, yet, so the trace really ends
2383      one instruction earlier.  */
2384   if (btrace_insn_cmp (replay, &end) == 0)
2385     return btrace_step_no_history ();
2386 
2387   return btrace_step_spurious ();
2388 }
2389 
2390 /* Step one instruction in backward direction.  */
2391 
2392 static struct target_waitstatus
record_btrace_single_step_backward(struct thread_info * tp)2393 record_btrace_single_step_backward (struct thread_info *tp)
2394 {
2395   struct btrace_insn_iterator *replay, start;
2396   struct btrace_thread_info *btinfo;
2397 
2398   btinfo = &tp->btrace;
2399   replay = btinfo->replay;
2400 
2401   /* Start replaying if we're not already doing so.  */
2402   if (replay == NULL)
2403     replay = record_btrace_start_replaying (tp);
2404 
2405   /* If we can't step any further, we reached the end of the history.
2406      Skip gaps during replay.  If we end up at a gap (at the beginning of
2407      the trace), jump back to the instruction at which we started.  */
2408   start = *replay;
2409   do
2410     {
2411       unsigned int steps;
2412 
2413       steps = btrace_insn_prev (replay, 1);
2414       if (steps == 0)
2415 	{
2416 	  *replay = start;
2417 	  return btrace_step_no_history ();
2418 	}
2419     }
2420   while (btrace_insn_get (replay) == NULL);
2421 
2422   /* Check if we're stepping a breakpoint.
2423 
2424      For reverse-stepping, this check is after the step.  There is logic in
2425      infrun.c that handles reverse-stepping separately.  See, for example,
2426      proceed and adjust_pc_after_break.
2427 
2428      This code assumes that for reverse-stepping, PC points to the last
2429      de-executed instruction, whereas for forward-stepping PC points to the
2430      next to-be-executed instruction.  */
2431   if (record_btrace_replay_at_breakpoint (tp))
2432     return btrace_step_stopped ();
2433 
2434   return btrace_step_spurious ();
2435 }
2436 
2437 /* Step a single thread.  */
2438 
2439 static struct target_waitstatus
record_btrace_step_thread(struct thread_info * tp)2440 record_btrace_step_thread (struct thread_info *tp)
2441 {
2442   struct btrace_thread_info *btinfo;
2443   struct target_waitstatus status;
2444   btrace_thread_flags flags;
2445 
2446   btinfo = &tp->btrace;
2447 
2448   flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2449   btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2450 
2451   DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2452 	 target_pid_to_str (tp->ptid).c_str (), flags.raw (),
2453 	 btrace_thread_flag_to_str (flags));
2454 
2455   /* We can't step without an execution history.  */
2456   if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2457     return btrace_step_no_history ();
2458 
2459   switch (flags)
2460     {
2461     default:
2462       internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2463 
2464     case BTHR_STOP:
2465       return btrace_step_stopped_on_request ();
2466 
2467     case BTHR_STEP:
2468       status = record_btrace_single_step_forward (tp);
2469       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2470 	break;
2471 
2472       return btrace_step_stopped ();
2473 
2474     case BTHR_RSTEP:
2475       status = record_btrace_single_step_backward (tp);
2476       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2477 	break;
2478 
2479       return btrace_step_stopped ();
2480 
2481     case BTHR_CONT:
2482       status = record_btrace_single_step_forward (tp);
2483       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2484 	break;
2485 
2486       btinfo->flags |= flags;
2487       return btrace_step_again ();
2488 
2489     case BTHR_RCONT:
2490       status = record_btrace_single_step_backward (tp);
2491       if (status.kind != TARGET_WAITKIND_SPURIOUS)
2492 	break;
2493 
2494       btinfo->flags |= flags;
2495       return btrace_step_again ();
2496     }
2497 
2498   /* We keep threads moving at the end of their execution history.  The wait
2499      method will stop the thread for whom the event is reported.  */
2500   if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2501     btinfo->flags |= flags;
2502 
2503   return status;
2504 }
2505 
2506 /* Announce further events if necessary.  */
2507 
2508 static void
record_btrace_maybe_mark_async_event(const std::vector<thread_info * > & moving,const std::vector<thread_info * > & no_history)2509 record_btrace_maybe_mark_async_event
2510   (const std::vector<thread_info *> &moving,
2511    const std::vector<thread_info *> &no_history)
2512 {
2513   bool more_moving = !moving.empty ();
2514   bool more_no_history = !no_history.empty ();;
2515 
2516   if (!more_moving && !more_no_history)
2517     return;
2518 
2519   if (more_moving)
2520     DEBUG ("movers pending");
2521 
2522   if (more_no_history)
2523     DEBUG ("no-history pending");
2524 
2525   mark_async_event_handler (record_btrace_async_inferior_event_handler);
2526 }
2527 
2528 /* The wait method of target record-btrace.  */
2529 
2530 ptid_t
wait(ptid_t ptid,struct target_waitstatus * status,target_wait_flags options)2531 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2532 			    target_wait_flags options)
2533 {
2534   std::vector<thread_info *> moving;
2535   std::vector<thread_info *> no_history;
2536 
2537   /* Clear this, if needed we'll re-mark it below.  */
2538   clear_async_event_handler (record_btrace_async_inferior_event_handler);
2539 
2540   DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (),
2541 	 (unsigned) options);
2542 
2543   /* As long as we're not replaying, just forward the request.  */
2544   if ((::execution_direction != EXEC_REVERSE)
2545       && !record_is_replaying (minus_one_ptid))
2546     {
2547       return this->beneath ()->wait (ptid, status, options);
2548     }
2549 
2550   /* Keep a work list of moving threads.  */
2551   process_stratum_target *proc_target = current_inferior ()->process_target ();
2552   for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2553     if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2554       moving.push_back (tp);
2555 
2556   if (moving.empty ())
2557     {
2558       *status = btrace_step_no_resumed ();
2559 
2560       DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2561 	     target_waitstatus_to_string (status).c_str ());
2562 
2563       return null_ptid;
2564     }
2565 
2566   /* Step moving threads one by one, one step each, until either one thread
2567      reports an event or we run out of threads to step.
2568 
2569      When stepping more than one thread, chances are that some threads reach
2570      the end of their execution history earlier than others.  If we reported
2571      this immediately, all-stop on top of non-stop would stop all threads and
2572      resume the same threads next time.  And we would report the same thread
2573      having reached the end of its execution history again.
2574 
2575      In the worst case, this would starve the other threads.  But even if other
2576      threads would be allowed to make progress, this would result in far too
2577      many intermediate stops.
2578 
2579      We therefore delay the reporting of "no execution history" until we have
2580      nothing else to report.  By this time, all threads should have moved to
2581      either the beginning or the end of their execution history.  There will
2582      be a single user-visible stop.  */
2583   struct thread_info *eventing = NULL;
2584   while ((eventing == NULL) && !moving.empty ())
2585     {
2586       for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2587 	{
2588 	  thread_info *tp = moving[ix];
2589 
2590 	  *status = record_btrace_step_thread (tp);
2591 
2592 	  switch (status->kind)
2593 	    {
2594 	    case TARGET_WAITKIND_IGNORE:
2595 	      ix++;
2596 	      break;
2597 
2598 	    case TARGET_WAITKIND_NO_HISTORY:
2599 	      no_history.push_back (ordered_remove (moving, ix));
2600 	      break;
2601 
2602 	    default:
2603 	      eventing = unordered_remove (moving, ix);
2604 	      break;
2605 	    }
2606 	}
2607     }
2608 
2609   if (eventing == NULL)
2610     {
2611       /* We started with at least one moving thread.  This thread must have
2612 	 either stopped or reached the end of its execution history.
2613 
2614 	 In the former case, EVENTING must not be NULL.
2615 	 In the latter case, NO_HISTORY must not be empty.  */
2616       gdb_assert (!no_history.empty ());
2617 
2618       /* We kept threads moving at the end of their execution history.  Stop
2619 	 EVENTING now that we are going to report its stop.  */
2620       eventing = unordered_remove (no_history, 0);
2621       eventing->btrace.flags &= ~BTHR_MOVE;
2622 
2623       *status = btrace_step_no_history ();
2624     }
2625 
2626   gdb_assert (eventing != NULL);
2627 
2628   /* We kept threads replaying at the end of their execution history.  Stop
2629      replaying EVENTING now that we are going to report its stop.  */
2630   record_btrace_stop_replaying_at_end (eventing);
2631 
2632   /* Stop all other threads. */
2633   if (!target_is_non_stop_p ())
2634     {
2635       for (thread_info *tp : current_inferior ()->non_exited_threads ())
2636 	record_btrace_cancel_resume (tp);
2637     }
2638 
2639   /* In async mode, we need to announce further events.  */
2640   if (target_is_async_p ())
2641     record_btrace_maybe_mark_async_event (moving, no_history);
2642 
2643   /* Start record histories anew from the current position.  */
2644   record_btrace_clear_histories (&eventing->btrace);
2645 
2646   /* We moved the replay position but did not update registers.  */
2647   registers_changed_thread (eventing);
2648 
2649   DEBUG ("wait ended by thread %s (%s): %s",
2650 	 print_thread_id (eventing),
2651 	 target_pid_to_str (eventing->ptid).c_str (),
2652 	 target_waitstatus_to_string (status).c_str ());
2653 
2654   return eventing->ptid;
2655 }
2656 
2657 /* The stop method of target record-btrace.  */
2658 
2659 void
stop(ptid_t ptid)2660 record_btrace_target::stop (ptid_t ptid)
2661 {
2662   DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2663 
2664   /* As long as we're not replaying, just forward the request.  */
2665   if ((::execution_direction != EXEC_REVERSE)
2666       && !record_is_replaying (minus_one_ptid))
2667     {
2668       this->beneath ()->stop (ptid);
2669     }
2670   else
2671     {
2672       process_stratum_target *proc_target
2673 	= current_inferior ()->process_target ();
2674 
2675       for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2676 	{
2677 	  tp->btrace.flags &= ~BTHR_MOVE;
2678 	  tp->btrace.flags |= BTHR_STOP;
2679 	}
2680     }
2681  }
2682 
2683 /* The can_execute_reverse method of target record-btrace.  */
2684 
2685 bool
can_execute_reverse()2686 record_btrace_target::can_execute_reverse ()
2687 {
2688   return true;
2689 }
2690 
2691 /* The stopped_by_sw_breakpoint method of target record-btrace.  */
2692 
2693 bool
stopped_by_sw_breakpoint()2694 record_btrace_target::stopped_by_sw_breakpoint ()
2695 {
2696   if (record_is_replaying (minus_one_ptid))
2697     {
2698       struct thread_info *tp = inferior_thread ();
2699 
2700       return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2701     }
2702 
2703   return this->beneath ()->stopped_by_sw_breakpoint ();
2704 }
2705 
2706 /* The supports_stopped_by_sw_breakpoint method of target
2707    record-btrace.  */
2708 
2709 bool
supports_stopped_by_sw_breakpoint()2710 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2711 {
2712   if (record_is_replaying (minus_one_ptid))
2713     return true;
2714 
2715   return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2716 }
2717 
2718 /* The stopped_by_sw_breakpoint method of target record-btrace.  */
2719 
2720 bool
stopped_by_hw_breakpoint()2721 record_btrace_target::stopped_by_hw_breakpoint ()
2722 {
2723   if (record_is_replaying (minus_one_ptid))
2724     {
2725       struct thread_info *tp = inferior_thread ();
2726 
2727       return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2728     }
2729 
2730   return this->beneath ()->stopped_by_hw_breakpoint ();
2731 }
2732 
2733 /* The supports_stopped_by_hw_breakpoint method of target
2734    record-btrace.  */
2735 
2736 bool
supports_stopped_by_hw_breakpoint()2737 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2738 {
2739   if (record_is_replaying (minus_one_ptid))
2740     return true;
2741 
2742   return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2743 }
2744 
2745 /* The update_thread_list method of target record-btrace.  */
2746 
2747 void
update_thread_list()2748 record_btrace_target::update_thread_list ()
2749 {
2750   /* We don't add or remove threads during replay.  */
2751   if (record_is_replaying (minus_one_ptid))
2752     return;
2753 
2754   /* Forward the request.  */
2755   this->beneath ()->update_thread_list ();
2756 }
2757 
2758 /* The thread_alive method of target record-btrace.  */
2759 
2760 bool
thread_alive(ptid_t ptid)2761 record_btrace_target::thread_alive (ptid_t ptid)
2762 {
2763   /* We don't add or remove threads during replay.  */
2764   if (record_is_replaying (minus_one_ptid))
2765     return true;
2766 
2767   /* Forward the request.  */
2768   return this->beneath ()->thread_alive (ptid);
2769 }
2770 
2771 /* Set the replay branch trace instruction iterator.  If IT is NULL, replay
2772    is stopped.  */
2773 
2774 static void
record_btrace_set_replay(struct thread_info * tp,const struct btrace_insn_iterator * it)2775 record_btrace_set_replay (struct thread_info *tp,
2776 			  const struct btrace_insn_iterator *it)
2777 {
2778   struct btrace_thread_info *btinfo;
2779 
2780   btinfo = &tp->btrace;
2781 
2782   if (it == NULL)
2783     record_btrace_stop_replaying (tp);
2784   else
2785     {
2786       if (btinfo->replay == NULL)
2787 	record_btrace_start_replaying (tp);
2788       else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2789 	return;
2790 
2791       *btinfo->replay = *it;
2792       registers_changed_thread (tp);
2793     }
2794 
2795   /* Start anew from the new replay position.  */
2796   record_btrace_clear_histories (btinfo);
2797 
2798   inferior_thread ()->suspend.stop_pc
2799     = regcache_read_pc (get_current_regcache ());
2800   print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2801 }
2802 
2803 /* The goto_record_begin method of target record-btrace.  */
2804 
2805 void
goto_record_begin()2806 record_btrace_target::goto_record_begin ()
2807 {
2808   struct thread_info *tp;
2809   struct btrace_insn_iterator begin;
2810 
2811   tp = require_btrace_thread ();
2812 
2813   btrace_insn_begin (&begin, &tp->btrace);
2814 
2815   /* Skip gaps at the beginning of the trace.  */
2816   while (btrace_insn_get (&begin) == NULL)
2817     {
2818       unsigned int steps;
2819 
2820       steps = btrace_insn_next (&begin, 1);
2821       if (steps == 0)
2822 	error (_("No trace."));
2823     }
2824 
2825   record_btrace_set_replay (tp, &begin);
2826 }
2827 
2828 /* The goto_record_end method of target record-btrace.  */
2829 
2830 void
goto_record_end()2831 record_btrace_target::goto_record_end ()
2832 {
2833   struct thread_info *tp;
2834 
2835   tp = require_btrace_thread ();
2836 
2837   record_btrace_set_replay (tp, NULL);
2838 }
2839 
2840 /* The goto_record method of target record-btrace.  */
2841 
2842 void
goto_record(ULONGEST insn)2843 record_btrace_target::goto_record (ULONGEST insn)
2844 {
2845   struct thread_info *tp;
2846   struct btrace_insn_iterator it;
2847   unsigned int number;
2848   int found;
2849 
2850   number = insn;
2851 
2852   /* Check for wrap-arounds.  */
2853   if (number != insn)
2854     error (_("Instruction number out of range."));
2855 
2856   tp = require_btrace_thread ();
2857 
2858   found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2859 
2860   /* Check if the instruction could not be found or is a gap.  */
2861   if (found == 0 || btrace_insn_get (&it) == NULL)
2862     error (_("No such instruction."));
2863 
2864   record_btrace_set_replay (tp, &it);
2865 }
2866 
2867 /* The record_stop_replaying method of target record-btrace.  */
2868 
2869 void
record_stop_replaying()2870 record_btrace_target::record_stop_replaying ()
2871 {
2872   for (thread_info *tp : current_inferior ()->non_exited_threads ())
2873     record_btrace_stop_replaying (tp);
2874 }
2875 
2876 /* The execution_direction target method.  */
2877 
2878 enum exec_direction_kind
execution_direction()2879 record_btrace_target::execution_direction ()
2880 {
2881   return record_btrace_resume_exec_dir;
2882 }
2883 
2884 /* The prepare_to_generate_core target method.  */
2885 
2886 void
prepare_to_generate_core()2887 record_btrace_target::prepare_to_generate_core ()
2888 {
2889   record_btrace_generating_corefile = 1;
2890 }
2891 
2892 /* The done_generating_core target method.  */
2893 
2894 void
done_generating_core()2895 record_btrace_target::done_generating_core ()
2896 {
2897   record_btrace_generating_corefile = 0;
2898 }
2899 
2900 /* Start recording in BTS format.  */
2901 
2902 static void
cmd_record_btrace_bts_start(const char * args,int from_tty)2903 cmd_record_btrace_bts_start (const char *args, int from_tty)
2904 {
2905   if (args != NULL && *args != 0)
2906     error (_("Invalid argument."));
2907 
2908   record_btrace_conf.format = BTRACE_FORMAT_BTS;
2909 
2910   try
2911     {
2912       execute_command ("target record-btrace", from_tty);
2913     }
2914   catch (const gdb_exception &exception)
2915     {
2916       record_btrace_conf.format = BTRACE_FORMAT_NONE;
2917       throw;
2918     }
2919 }
2920 
2921 /* Start recording in Intel Processor Trace format.  */
2922 
2923 static void
cmd_record_btrace_pt_start(const char * args,int from_tty)2924 cmd_record_btrace_pt_start (const char *args, int from_tty)
2925 {
2926   if (args != NULL && *args != 0)
2927     error (_("Invalid argument."));
2928 
2929   record_btrace_conf.format = BTRACE_FORMAT_PT;
2930 
2931   try
2932     {
2933       execute_command ("target record-btrace", from_tty);
2934     }
2935   catch (const gdb_exception &exception)
2936     {
2937       record_btrace_conf.format = BTRACE_FORMAT_NONE;
2938       throw;
2939     }
2940 }
2941 
2942 /* Alias for "target record".  */
2943 
2944 static void
cmd_record_btrace_start(const char * args,int from_tty)2945 cmd_record_btrace_start (const char *args, int from_tty)
2946 {
2947   if (args != NULL && *args != 0)
2948     error (_("Invalid argument."));
2949 
2950   record_btrace_conf.format = BTRACE_FORMAT_PT;
2951 
2952   try
2953     {
2954       execute_command ("target record-btrace", from_tty);
2955     }
2956   catch (const gdb_exception &exception)
2957     {
2958       record_btrace_conf.format = BTRACE_FORMAT_BTS;
2959 
2960       try
2961 	{
2962 	  execute_command ("target record-btrace", from_tty);
2963 	}
2964       catch (const gdb_exception &ex)
2965 	{
2966 	  record_btrace_conf.format = BTRACE_FORMAT_NONE;
2967 	  throw;
2968 	}
2969     }
2970 }
2971 
2972 /* The "show record btrace replay-memory-access" command.  */
2973 
2974 static void
cmd_show_replay_memory_access(struct ui_file * file,int from_tty,struct cmd_list_element * c,const char * value)2975 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2976 			       struct cmd_list_element *c, const char *value)
2977 {
2978   fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2979 		    replay_memory_access);
2980 }
2981 
2982 /* The "set record btrace cpu none" command.  */
2983 
2984 static void
cmd_set_record_btrace_cpu_none(const char * args,int from_tty)2985 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2986 {
2987   if (args != nullptr && *args != 0)
2988     error (_("Trailing junk: '%s'."), args);
2989 
2990   record_btrace_cpu_state = CS_NONE;
2991 }
2992 
2993 /* The "set record btrace cpu auto" command.  */
2994 
2995 static void
cmd_set_record_btrace_cpu_auto(const char * args,int from_tty)2996 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2997 {
2998   if (args != nullptr && *args != 0)
2999     error (_("Trailing junk: '%s'."), args);
3000 
3001   record_btrace_cpu_state = CS_AUTO;
3002 }
3003 
3004 /* The "set record btrace cpu" command.  */
3005 
3006 static void
cmd_set_record_btrace_cpu(const char * args,int from_tty)3007 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3008 {
3009   if (args == nullptr)
3010     args = "";
3011 
3012   /* We use a hard-coded vendor string for now.  */
3013   unsigned int family, model, stepping;
3014   int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3015 				&model, &l1, &stepping, &l2);
3016   if (matches == 3)
3017     {
3018       if (strlen (args) != l2)
3019 	error (_("Trailing junk: '%s'."), args + l2);
3020     }
3021   else if (matches == 2)
3022     {
3023       if (strlen (args) != l1)
3024 	error (_("Trailing junk: '%s'."), args + l1);
3025 
3026       stepping = 0;
3027     }
3028   else
3029     error (_("Bad format.  See \"help set record btrace cpu\"."));
3030 
3031   if (USHRT_MAX < family)
3032     error (_("Cpu family too big."));
3033 
3034   if (UCHAR_MAX < model)
3035     error (_("Cpu model too big."));
3036 
3037   if (UCHAR_MAX < stepping)
3038     error (_("Cpu stepping too big."));
3039 
3040   record_btrace_cpu.vendor = CV_INTEL;
3041   record_btrace_cpu.family = family;
3042   record_btrace_cpu.model = model;
3043   record_btrace_cpu.stepping = stepping;
3044 
3045   record_btrace_cpu_state = CS_CPU;
3046 }
3047 
3048 /* The "show record btrace cpu" command.  */
3049 
3050 static void
cmd_show_record_btrace_cpu(const char * args,int from_tty)3051 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3052 {
3053   if (args != nullptr && *args != 0)
3054     error (_("Trailing junk: '%s'."), args);
3055 
3056   switch (record_btrace_cpu_state)
3057     {
3058     case CS_AUTO:
3059       printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3060       return;
3061 
3062     case CS_NONE:
3063       printf_unfiltered (_("btrace cpu is 'none'.\n"));
3064       return;
3065 
3066     case CS_CPU:
3067       switch (record_btrace_cpu.vendor)
3068 	{
3069 	case CV_INTEL:
3070 	  if (record_btrace_cpu.stepping == 0)
3071 	    printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3072 			       record_btrace_cpu.family,
3073 			       record_btrace_cpu.model);
3074 	  else
3075 	    printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3076 			       record_btrace_cpu.family,
3077 			       record_btrace_cpu.model,
3078 			       record_btrace_cpu.stepping);
3079 	  return;
3080 	}
3081     }
3082 
3083   error (_("Internal error: bad cpu state."));
3084 }
3085 
3086 /* The "record bts buffer-size" show value function.  */
3087 
3088 static void
show_record_bts_buffer_size_value(struct ui_file * file,int from_tty,struct cmd_list_element * c,const char * value)3089 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3090 				   struct cmd_list_element *c,
3091 				   const char *value)
3092 {
3093   fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3094 		    value);
3095 }
3096 
3097 /* The "record pt buffer-size" show value function.  */
3098 
3099 static void
show_record_pt_buffer_size_value(struct ui_file * file,int from_tty,struct cmd_list_element * c,const char * value)3100 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3101 				  struct cmd_list_element *c,
3102 				  const char *value)
3103 {
3104   fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3105 		    value);
3106 }
3107 
3108 /* Initialize btrace commands.  */
3109 
3110 void _initialize_record_btrace ();
3111 void
_initialize_record_btrace()3112 _initialize_record_btrace ()
3113 {
3114   cmd_list_element *record_btrace_cmd
3115     = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3116 		      _("Start branch trace recording."),
3117 		      &record_btrace_cmdlist, 0, &record_cmdlist);
3118   add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3119 
3120   cmd_list_element *record_btrace_bts_cmd
3121     = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3122 	       _("\
3123 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3124 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3125 This format may not be available on all processors."),
3126 	     &record_btrace_cmdlist);
3127   add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3128 		 &record_cmdlist);
3129 
3130   cmd_list_element *record_btrace_pt_cmd
3131     = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3132 	       _("\
3133 Start branch trace recording in Intel Processor Trace format.\n\n\
3134 This format may not be available on all processors."),
3135 	     &record_btrace_cmdlist);
3136   add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
3137 
3138   add_basic_prefix_cmd ("btrace", class_support,
3139 			_("Set record options."), &set_record_btrace_cmdlist,
3140 			0, &set_record_cmdlist);
3141 
3142   add_show_prefix_cmd ("btrace", class_support,
3143 		       _("Show record options."), &show_record_btrace_cmdlist,
3144 		       0, &show_record_cmdlist);
3145 
3146   add_setshow_enum_cmd ("replay-memory-access", no_class,
3147 			replay_memory_access_types, &replay_memory_access, _("\
3148 Set what memory accesses are allowed during replay."), _("\
3149 Show what memory accesses are allowed during replay."),
3150 			   _("Default is READ-ONLY.\n\n\
3151 The btrace record target does not trace data.\n\
3152 The memory therefore corresponds to the live target and not \
3153 to the current replay position.\n\n\
3154 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3155 When READ-WRITE, allow accesses to read-only and read-write memory during \
3156 replay."),
3157 			   NULL, cmd_show_replay_memory_access,
3158 			   &set_record_btrace_cmdlist,
3159 			   &show_record_btrace_cmdlist);
3160 
3161   add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3162 		  _("\
3163 Set the cpu to be used for trace decode.\n\n\
3164 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3165 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3166 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3167 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3168 When GDB does not support that cpu, this option can be used to enable\n\
3169 workarounds for a similar cpu that GDB supports.\n\n\
3170 When set to \"none\", errata workarounds are disabled."),
3171 		  &set_record_btrace_cpu_cmdlist,
3172 		  1,
3173 		  &set_record_btrace_cmdlist);
3174 
3175   add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3176 Automatically determine the cpu to be used for trace decode."),
3177 	   &set_record_btrace_cpu_cmdlist);
3178 
3179   add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3180 Do not enable errata workarounds for trace decode."),
3181 	   &set_record_btrace_cpu_cmdlist);
3182 
3183   add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3184 Show the cpu to be used for trace decode."),
3185 	   &show_record_btrace_cmdlist);
3186 
3187   add_basic_prefix_cmd ("bts", class_support,
3188 			_("Set record btrace bts options."),
3189 			&set_record_btrace_bts_cmdlist,
3190 			0,
3191 			&set_record_btrace_cmdlist);
3192 
3193   add_show_prefix_cmd ("bts", class_support,
3194 		       _("Show record btrace bts options."),
3195 		       &show_record_btrace_bts_cmdlist,
3196 		       0,
3197 		       &show_record_btrace_cmdlist);
3198 
3199   add_setshow_uinteger_cmd ("buffer-size", no_class,
3200 			    &record_btrace_conf.bts.size,
3201 			    _("Set the record/replay bts buffer size."),
3202 			    _("Show the record/replay bts buffer size."), _("\
3203 When starting recording request a trace buffer of this size.  \
3204 The actual buffer size may differ from the requested size.  \
3205 Use \"info record\" to see the actual buffer size.\n\n\
3206 Bigger buffers allow longer recording but also take more time to process \
3207 the recorded execution trace.\n\n\
3208 The trace buffer size may not be changed while recording."), NULL,
3209 			    show_record_bts_buffer_size_value,
3210 			    &set_record_btrace_bts_cmdlist,
3211 			    &show_record_btrace_bts_cmdlist);
3212 
3213   add_basic_prefix_cmd ("pt", class_support,
3214 			_("Set record btrace pt options."),
3215 			&set_record_btrace_pt_cmdlist,
3216 			0,
3217 			&set_record_btrace_cmdlist);
3218 
3219   add_show_prefix_cmd ("pt", class_support,
3220 		       _("Show record btrace pt options."),
3221 		       &show_record_btrace_pt_cmdlist,
3222 		       0,
3223 		       &show_record_btrace_cmdlist);
3224 
3225   add_setshow_uinteger_cmd ("buffer-size", no_class,
3226 			    &record_btrace_conf.pt.size,
3227 			    _("Set the record/replay pt buffer size."),
3228 			    _("Show the record/replay pt buffer size."), _("\
3229 Bigger buffers allow longer recording but also take more time to process \
3230 the recorded execution.\n\
3231 The actual buffer size may differ from the requested size.  Use \"info record\" \
3232 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3233 			    &set_record_btrace_pt_cmdlist,
3234 			    &show_record_btrace_pt_cmdlist);
3235 
3236   add_target (record_btrace_target_info, record_btrace_target_open);
3237 
3238   bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3239 			       xcalloc, xfree);
3240 
3241   record_btrace_conf.bts.size = 64 * 1024;
3242   record_btrace_conf.pt.size = 16 * 1024;
3243 }
3244