1 /* Low level interface to ptrace, for the remote server for GDB.
2    Copyright (C) 1995-2021 Free Software Foundation, Inc.
3 
4    This file is part of GDB.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
18 
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here.  If it got included by gdb_proc_service.h
56    then ELFMAG0 will have been defined.  If it didn't get included by
57    gdb_proc_service.h then including it will likely introduce a duplicate
58    definition of elf_fpregset_t.  */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62 
63 #ifndef O_LARGEFILE
64 #define O_LARGEFILE 0
65 #endif
66 
67 #ifndef AT_HWCAP2
68 #define AT_HWCAP2 26
69 #endif
70 
71 /* Some targets did not define these ptrace constants from the start,
72    so gdbserver defines them locally here.  In the future, these may
73    be removed after they are added to asm/ptrace.h.  */
74 #if !(defined(PT_TEXT_ADDR) \
75       || defined(PT_DATA_ADDR) \
76       || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels.  */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR  51*4
82 /* These are still undefined in 3.10 kernels.  */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR     (0x10000*4)
85 #define PT_DATA_ADDR     (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
87 #endif
88 #endif
89 
90 #if (defined(__UCLIBC__)		\
91      && defined(HAS_NOMMU)		\
92      && defined(PT_TEXT_ADDR)		\
93      && defined(PT_DATA_ADDR)		\
94      && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
96 #endif
97 
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
101 #endif
102 
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h.  */
105 typedef struct
106 {
107   uint32_t a_type;		/* Entry type */
108   union
109     {
110       uint32_t a_val;		/* Integer value */
111       /* We use to have pointer elements added here.  We cannot do that,
112 	 though, since it does not work when using 32-bit definitions
113 	 on 64-bit platforms and vice versa.  */
114     } a_un;
115 } Elf32_auxv_t;
116 #endif
117 
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h.  */
120 typedef struct
121 {
122   uint64_t a_type;		/* Entry type */
123   union
124     {
125       uint64_t a_val;		/* Integer value */
126       /* We use to have pointer elements added here.  We cannot do that,
127 	 though, since it does not work when using 32-bit definitions
128 	 on 64-bit platforms and vice versa.  */
129     } a_un;
130 } Elf64_auxv_t;
131 #endif
132 
133 /* Does the current host support PTRACE_GETREGSET?  */
134 int have_ptrace_getregset = -1;
135 
136 /* LWP accessors.  */
137 
138 /* See nat/linux-nat.h.  */
139 
140 ptid_t
ptid_of_lwp(struct lwp_info * lwp)141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143   return ptid_of (get_lwp_thread (lwp));
144 }
145 
146 /* See nat/linux-nat.h.  */
147 
148 void
lwp_set_arch_private_info(struct lwp_info * lwp,struct arch_lwp_info * info)149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 			   struct arch_lwp_info *info)
151 {
152   lwp->arch_private = info;
153 }
154 
155 /* See nat/linux-nat.h.  */
156 
157 struct arch_lwp_info *
lwp_arch_private_info(struct lwp_info * lwp)158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160   return lwp->arch_private;
161 }
162 
163 /* See nat/linux-nat.h.  */
164 
165 int
lwp_is_stopped(struct lwp_info * lwp)166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168   return lwp->stopped;
169 }
170 
171 /* See nat/linux-nat.h.  */
172 
173 enum target_stop_reason
lwp_stop_reason(struct lwp_info * lwp)174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176   return lwp->stop_reason;
177 }
178 
179 /* See nat/linux-nat.h.  */
180 
181 int
lwp_is_stepping(struct lwp_info * lwp)182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184   return lwp->stepping;
185 }
186 
187 /* A list of all unknown processes which receive stop signals.  Some
188    other process will presumably claim each of these as forked
189    children momentarily.  */
190 
191 struct simple_pid_list
192 {
193   /* The process ID.  */
194   int pid;
195 
196   /* The status as reported by waitpid.  */
197   int status;
198 
199   /* Next in chain.  */
200   struct simple_pid_list *next;
201 };
202 static struct simple_pid_list *stopped_pids;
203 
204 /* Trivial list manipulation functions to keep track of a list of new
205    stopped processes.  */
206 
207 static void
add_to_pid_list(struct simple_pid_list ** listp,int pid,int status)208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210   struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211 
212   new_pid->pid = pid;
213   new_pid->status = status;
214   new_pid->next = *listp;
215   *listp = new_pid;
216 }
217 
218 static int
pull_pid_from_list(struct simple_pid_list ** listp,int pid,int * statusp)219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221   struct simple_pid_list **p;
222 
223   for (p = listp; *p != NULL; p = &(*p)->next)
224     if ((*p)->pid == pid)
225       {
226 	struct simple_pid_list *next = (*p)->next;
227 
228 	*statusp = (*p)->status;
229 	xfree (*p);
230 	*p = next;
231 	return 1;
232       }
233   return 0;
234 }
235 
236 enum stopping_threads_kind
237   {
238     /* Not stopping threads presently.  */
239     NOT_STOPPING_THREADS,
240 
241     /* Stopping threads.  */
242     STOPPING_THREADS,
243 
244     /* Stopping and suspending threads.  */
245     STOPPING_AND_SUSPENDING_THREADS
246   };
247 
248 /* This is set while stop_all_lwps is in effect.  */
249 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250 
251 /* FIXME make into a target method?  */
252 int using_threads = 1;
253 
254 /* True if we're presently stabilizing threads (moving them out of
255    jump pads).  */
256 static int stabilizing_threads;
257 
258 static void unsuspend_all_lwps (struct lwp_info *except);
259 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
260 static int lwp_is_marked_dead (struct lwp_info *lwp);
261 static int kill_lwp (unsigned long lwpid, int signo);
262 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
263 static int linux_low_ptrace_options (int attached);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
265 
266 /* When the event-loop is doing a step-over, this points at the thread
267    being stepped.  */
268 static ptid_t step_over_bkpt;
269 
270 bool
low_supports_breakpoints()271 linux_process_target::low_supports_breakpoints ()
272 {
273   return false;
274 }
275 
276 CORE_ADDR
low_get_pc(regcache * regcache)277 linux_process_target::low_get_pc (regcache *regcache)
278 {
279   return 0;
280 }
281 
282 void
low_set_pc(regcache * regcache,CORE_ADDR newpc)283 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
284 {
285   gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
286 }
287 
288 std::vector<CORE_ADDR>
low_get_next_pcs(regcache * regcache)289 linux_process_target::low_get_next_pcs (regcache *regcache)
290 {
291   gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 			  "implemented");
293 }
294 
295 int
low_decr_pc_after_break()296 linux_process_target::low_decr_pc_after_break ()
297 {
298   return 0;
299 }
300 
301 /* True if LWP is stopped in its stepping range.  */
302 
303 static int
lwp_in_step_range(struct lwp_info * lwp)304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306   CORE_ADDR pc = lwp->stop_pc;
307 
308   return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310 
311 /* The read/write ends of the pipe registered as waitable file in the
312    event loop.  */
313 static int linux_event_pipe[2] = { -1, -1 };
314 
315 /* True if we're currently in async mode.  */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
317 
318 static void send_sigstop (struct lwp_info *lwp);
319 
320 /* Return non-zero if HEADER is a 64-bit ELF file.  */
321 
322 static int
elf_64_header_p(const Elf64_Ehdr * header,unsigned int * machine)323 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
324 {
325   if (header->e_ident[EI_MAG0] == ELFMAG0
326       && header->e_ident[EI_MAG1] == ELFMAG1
327       && header->e_ident[EI_MAG2] == ELFMAG2
328       && header->e_ident[EI_MAG3] == ELFMAG3)
329     {
330       *machine = header->e_machine;
331       return header->e_ident[EI_CLASS] == ELFCLASS64;
332 
333     }
334   *machine = EM_NONE;
335   return -1;
336 }
337 
338 /* Return non-zero if FILE is a 64-bit ELF file,
339    zero if the file is not a 64-bit ELF file,
340    and -1 if the file is not accessible or doesn't exist.  */
341 
342 static int
elf_64_file_p(const char * file,unsigned int * machine)343 elf_64_file_p (const char *file, unsigned int *machine)
344 {
345   Elf64_Ehdr header;
346   int fd;
347 
348   fd = open (file, O_RDONLY);
349   if (fd < 0)
350     return -1;
351 
352   if (read (fd, &header, sizeof (header)) != sizeof (header))
353     {
354       close (fd);
355       return 0;
356     }
357   close (fd);
358 
359   return elf_64_header_p (&header, machine);
360 }
361 
362 /* Accepts an integer PID; Returns true if the executable PID is
363    running is a 64-bit ELF file..  */
364 
365 int
linux_pid_exe_is_elf_64_file(int pid,unsigned int * machine)366 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
367 {
368   char file[PATH_MAX];
369 
370   sprintf (file, "/proc/%d/exe", pid);
371   return elf_64_file_p (file, machine);
372 }
373 
374 void
delete_lwp(lwp_info * lwp)375 linux_process_target::delete_lwp (lwp_info *lwp)
376 {
377   struct thread_info *thr = get_lwp_thread (lwp);
378 
379   if (debug_threads)
380     debug_printf ("deleting %ld\n", lwpid_of (thr));
381 
382   remove_thread (thr);
383 
384   low_delete_thread (lwp->arch_private);
385 
386   delete lwp;
387 }
388 
389 void
low_delete_thread(arch_lwp_info * info)390 linux_process_target::low_delete_thread (arch_lwp_info *info)
391 {
392   /* Default implementation should be overridden if architecture-specific
393      info is being used.  */
394   gdb_assert (info == nullptr);
395 }
396 
397 process_info *
add_linux_process(int pid,int attached)398 linux_process_target::add_linux_process (int pid, int attached)
399 {
400   struct process_info *proc;
401 
402   proc = add_process (pid, attached);
403   proc->priv = XCNEW (struct process_info_private);
404 
405   proc->priv->arch_private = low_new_process ();
406 
407   return proc;
408 }
409 
410 arch_process_info *
low_new_process()411 linux_process_target::low_new_process ()
412 {
413   return nullptr;
414 }
415 
416 void
low_delete_process(arch_process_info * info)417 linux_process_target::low_delete_process (arch_process_info *info)
418 {
419   /* Default implementation must be overridden if architecture-specific
420      info exists.  */
421   gdb_assert (info == nullptr);
422 }
423 
424 void
low_new_fork(process_info * parent,process_info * child)425 linux_process_target::low_new_fork (process_info *parent, process_info *child)
426 {
427   /* Nop.  */
428 }
429 
430 void
arch_setup_thread(thread_info * thread)431 linux_process_target::arch_setup_thread (thread_info *thread)
432 {
433   struct thread_info *saved_thread;
434 
435   saved_thread = current_thread;
436   current_thread = thread;
437 
438   low_arch_setup ();
439 
440   current_thread = saved_thread;
441 }
442 
443 int
handle_extended_wait(lwp_info ** orig_event_lwp,int wstat)444 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
445 					    int wstat)
446 {
447   client_state &cs = get_client_state ();
448   struct lwp_info *event_lwp = *orig_event_lwp;
449   int event = linux_ptrace_get_extended_event (wstat);
450   struct thread_info *event_thr = get_lwp_thread (event_lwp);
451   struct lwp_info *new_lwp;
452 
453   gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
454 
455   /* All extended events we currently use are mid-syscall.  Only
456      PTRACE_EVENT_STOP is delivered more like a signal-stop, but
457      you have to be using PTRACE_SEIZE to get that.  */
458   event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
459 
460   if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
461       || (event == PTRACE_EVENT_CLONE))
462     {
463       ptid_t ptid;
464       unsigned long new_pid;
465       int ret, status;
466 
467       /* Get the pid of the new lwp.  */
468       ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
469 	      &new_pid);
470 
471       /* If we haven't already seen the new PID stop, wait for it now.  */
472       if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
473 	{
474 	  /* The new child has a pending SIGSTOP.  We can't affect it until it
475 	     hits the SIGSTOP, but we're already attached.  */
476 
477 	  ret = my_waitpid (new_pid, &status, __WALL);
478 
479 	  if (ret == -1)
480 	    perror_with_name ("waiting for new child");
481 	  else if (ret != new_pid)
482 	    warning ("wait returned unexpected PID %d", ret);
483 	  else if (!WIFSTOPPED (status))
484 	    warning ("wait returned unexpected status 0x%x", status);
485 	}
486 
487       if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
488 	{
489 	  struct process_info *parent_proc;
490 	  struct process_info *child_proc;
491 	  struct lwp_info *child_lwp;
492 	  struct thread_info *child_thr;
493 
494 	  ptid = ptid_t (new_pid, new_pid, 0);
495 
496 	  if (debug_threads)
497 	    {
498 	      debug_printf ("HEW: Got fork event from LWP %ld, "
499 			    "new child is %d\n",
500 			    ptid_of (event_thr).lwp (),
501 			    ptid.pid ());
502 	    }
503 
504 	  /* Add the new process to the tables and clone the breakpoint
505 	     lists of the parent.  We need to do this even if the new process
506 	     will be detached, since we will need the process object and the
507 	     breakpoints to remove any breakpoints from memory when we
508 	     detach, and the client side will access registers.  */
509 	  child_proc = add_linux_process (new_pid, 0);
510 	  gdb_assert (child_proc != NULL);
511 	  child_lwp = add_lwp (ptid);
512 	  gdb_assert (child_lwp != NULL);
513 	  child_lwp->stopped = 1;
514 	  child_lwp->must_set_ptrace_flags = 1;
515 	  child_lwp->status_pending_p = 0;
516 	  child_thr = get_lwp_thread (child_lwp);
517 	  child_thr->last_resume_kind = resume_stop;
518 	  child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
519 
520 	  /* If we're suspending all threads, leave this one suspended
521 	     too.  If the fork/clone parent is stepping over a breakpoint,
522 	     all other threads have been suspended already.  Leave the
523 	     child suspended too.  */
524 	  if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
525 	      || event_lwp->bp_reinsert != 0)
526 	    {
527 	      if (debug_threads)
528 		debug_printf ("HEW: leaving child suspended\n");
529 	      child_lwp->suspended = 1;
530 	    }
531 
532 	  parent_proc = get_thread_process (event_thr);
533 	  child_proc->attached = parent_proc->attached;
534 
535 	  if (event_lwp->bp_reinsert != 0
536 	      && supports_software_single_step ()
537 	      && event == PTRACE_EVENT_VFORK)
538 	    {
539 	      /* If we leave single-step breakpoints there, child will
540 		 hit it, so uninsert single-step breakpoints from parent
541 		 (and child).  Once vfork child is done, reinsert
542 		 them back to parent.  */
543 	      uninsert_single_step_breakpoints (event_thr);
544 	    }
545 
546 	  clone_all_breakpoints (child_thr, event_thr);
547 
548 	  target_desc_up tdesc = allocate_target_description ();
549 	  copy_target_description (tdesc.get (), parent_proc->tdesc);
550 	  child_proc->tdesc = tdesc.release ();
551 
552 	  /* Clone arch-specific process data.  */
553 	  low_new_fork (parent_proc, child_proc);
554 
555 	  /* Save fork info in the parent thread.  */
556 	  if (event == PTRACE_EVENT_FORK)
557 	    event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
558 	  else if (event == PTRACE_EVENT_VFORK)
559 	    event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
560 
561 	  event_lwp->waitstatus.value.related_pid = ptid;
562 
563 	  /* The status_pending field contains bits denoting the
564 	     extended event, so when the pending event is handled,
565 	     the handler will look at lwp->waitstatus.  */
566 	  event_lwp->status_pending_p = 1;
567 	  event_lwp->status_pending = wstat;
568 
569 	  /* Link the threads until the parent event is passed on to
570 	     higher layers.  */
571 	  event_lwp->fork_relative = child_lwp;
572 	  child_lwp->fork_relative = event_lwp;
573 
574 	  /* If the parent thread is doing step-over with single-step
575 	     breakpoints, the list of single-step breakpoints are cloned
576 	     from the parent's.  Remove them from the child process.
577 	     In case of vfork, we'll reinsert them back once vforked
578 	     child is done.  */
579 	  if (event_lwp->bp_reinsert != 0
580 	      && supports_software_single_step ())
581 	    {
582 	      /* The child process is forked and stopped, so it is safe
583 		 to access its memory without stopping all other threads
584 		 from other processes.  */
585 	      delete_single_step_breakpoints (child_thr);
586 
587 	      gdb_assert (has_single_step_breakpoints (event_thr));
588 	      gdb_assert (!has_single_step_breakpoints (child_thr));
589 	    }
590 
591 	  /* Report the event.  */
592 	  return 0;
593 	}
594 
595       if (debug_threads)
596 	debug_printf ("HEW: Got clone event "
597 		      "from LWP %ld, new child is LWP %ld\n",
598 		      lwpid_of (event_thr), new_pid);
599 
600       ptid = ptid_t (pid_of (event_thr), new_pid, 0);
601       new_lwp = add_lwp (ptid);
602 
603       /* Either we're going to immediately resume the new thread
604 	 or leave it stopped.  resume_one_lwp is a nop if it
605 	 thinks the thread is currently running, so set this first
606 	 before calling resume_one_lwp.  */
607       new_lwp->stopped = 1;
608 
609       /* If we're suspending all threads, leave this one suspended
610 	 too.  If the fork/clone parent is stepping over a breakpoint,
611 	 all other threads have been suspended already.  Leave the
612 	 child suspended too.  */
613       if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
614 	  || event_lwp->bp_reinsert != 0)
615 	new_lwp->suspended = 1;
616 
617       /* Normally we will get the pending SIGSTOP.  But in some cases
618 	 we might get another signal delivered to the group first.
619 	 If we do get another signal, be sure not to lose it.  */
620       if (WSTOPSIG (status) != SIGSTOP)
621 	{
622 	  new_lwp->stop_expected = 1;
623 	  new_lwp->status_pending_p = 1;
624 	  new_lwp->status_pending = status;
625 	}
626       else if (cs.report_thread_events)
627 	{
628 	  new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
629 	  new_lwp->status_pending_p = 1;
630 	  new_lwp->status_pending = status;
631 	}
632 
633 #ifdef USE_THREAD_DB
634       thread_db_notice_clone (event_thr, ptid);
635 #endif
636 
637       /* Don't report the event.  */
638       return 1;
639     }
640   else if (event == PTRACE_EVENT_VFORK_DONE)
641     {
642       event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
643 
644       if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
645 	{
646 	  reinsert_single_step_breakpoints (event_thr);
647 
648 	  gdb_assert (has_single_step_breakpoints (event_thr));
649 	}
650 
651       /* Report the event.  */
652       return 0;
653     }
654   else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
655     {
656       struct process_info *proc;
657       std::vector<int> syscalls_to_catch;
658       ptid_t event_ptid;
659       pid_t event_pid;
660 
661       if (debug_threads)
662 	{
663 	  debug_printf ("HEW: Got exec event from LWP %ld\n",
664 			lwpid_of (event_thr));
665 	}
666 
667       /* Get the event ptid.  */
668       event_ptid = ptid_of (event_thr);
669       event_pid = event_ptid.pid ();
670 
671       /* Save the syscall list from the execing process.  */
672       proc = get_thread_process (event_thr);
673       syscalls_to_catch = std::move (proc->syscalls_to_catch);
674 
675       /* Delete the execing process and all its threads.  */
676       mourn (proc);
677       current_thread = NULL;
678 
679       /* Create a new process/lwp/thread.  */
680       proc = add_linux_process (event_pid, 0);
681       event_lwp = add_lwp (event_ptid);
682       event_thr = get_lwp_thread (event_lwp);
683       gdb_assert (current_thread == event_thr);
684       arch_setup_thread (event_thr);
685 
686       /* Set the event status.  */
687       event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
688       event_lwp->waitstatus.value.execd_pathname
689 	= xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
690 
691       /* Mark the exec status as pending.  */
692       event_lwp->stopped = 1;
693       event_lwp->status_pending_p = 1;
694       event_lwp->status_pending = wstat;
695       event_thr->last_resume_kind = resume_continue;
696       event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
697 
698       /* Update syscall state in the new lwp, effectively mid-syscall too.  */
699       event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
700 
701       /* Restore the list to catch.  Don't rely on the client, which is free
702 	 to avoid sending a new list when the architecture doesn't change.
703 	 Also, for ANY_SYSCALL, the architecture doesn't really matter.  */
704       proc->syscalls_to_catch = std::move (syscalls_to_catch);
705 
706       /* Report the event.  */
707       *orig_event_lwp = event_lwp;
708       return 0;
709     }
710 
711   internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
712 }
713 
714 CORE_ADDR
get_pc(lwp_info * lwp)715 linux_process_target::get_pc (lwp_info *lwp)
716 {
717   struct thread_info *saved_thread;
718   struct regcache *regcache;
719   CORE_ADDR pc;
720 
721   if (!low_supports_breakpoints ())
722     return 0;
723 
724   saved_thread = current_thread;
725   current_thread = get_lwp_thread (lwp);
726 
727   regcache = get_thread_regcache (current_thread, 1);
728   pc = low_get_pc (regcache);
729 
730   if (debug_threads)
731     debug_printf ("pc is 0x%lx\n", (long) pc);
732 
733   current_thread = saved_thread;
734   return pc;
735 }
736 
737 void
get_syscall_trapinfo(lwp_info * lwp,int * sysno)738 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
739 {
740   struct thread_info *saved_thread;
741   struct regcache *regcache;
742 
743   saved_thread = current_thread;
744   current_thread = get_lwp_thread (lwp);
745 
746   regcache = get_thread_regcache (current_thread, 1);
747   low_get_syscall_trapinfo (regcache, sysno);
748 
749   if (debug_threads)
750     debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
751 
752   current_thread = saved_thread;
753 }
754 
755 void
low_get_syscall_trapinfo(regcache * regcache,int * sysno)756 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
757 {
758   /* By default, report an unknown system call number.  */
759   *sysno = UNKNOWN_SYSCALL;
760 }
761 
762 bool
save_stop_reason(lwp_info * lwp)763 linux_process_target::save_stop_reason (lwp_info *lwp)
764 {
765   CORE_ADDR pc;
766   CORE_ADDR sw_breakpoint_pc;
767   struct thread_info *saved_thread;
768 #if USE_SIGTRAP_SIGINFO
769   siginfo_t siginfo;
770 #endif
771 
772   if (!low_supports_breakpoints ())
773     return false;
774 
775   pc = get_pc (lwp);
776   sw_breakpoint_pc = pc - low_decr_pc_after_break ();
777 
778   /* breakpoint_at reads from the current thread.  */
779   saved_thread = current_thread;
780   current_thread = get_lwp_thread (lwp);
781 
782 #if USE_SIGTRAP_SIGINFO
783   if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
784 	      (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
785     {
786       if (siginfo.si_signo == SIGTRAP)
787 	{
788 	  if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
789 	      && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
790 	    {
791 	      /* The si_code is ambiguous on this arch -- check debug
792 		 registers.  */
793 	      if (!check_stopped_by_watchpoint (lwp))
794 		lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
795 	    }
796 	  else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
797 	    {
798 	      /* If we determine the LWP stopped for a SW breakpoint,
799 		 trust it.  Particularly don't check watchpoint
800 		 registers, because at least on s390, we'd find
801 		 stopped-by-watchpoint as long as there's a watchpoint
802 		 set.  */
803 	      lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
804 	    }
805 	  else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
806 	    {
807 	      /* This can indicate either a hardware breakpoint or
808 		 hardware watchpoint.  Check debug registers.  */
809 	      if (!check_stopped_by_watchpoint (lwp))
810 		lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
811 	    }
812 	  else if (siginfo.si_code == TRAP_TRACE)
813 	    {
814 	      /* We may have single stepped an instruction that
815 		 triggered a watchpoint.  In that case, on some
816 		 architectures (such as x86), instead of TRAP_HWBKPT,
817 		 si_code indicates TRAP_TRACE, and we need to check
818 		 the debug registers separately.  */
819 	      if (!check_stopped_by_watchpoint (lwp))
820 		lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
821 	    }
822 	}
823     }
824 #else
825   /* We may have just stepped a breakpoint instruction.  E.g., in
826      non-stop mode, GDB first tells the thread A to step a range, and
827      then the user inserts a breakpoint inside the range.  In that
828      case we need to report the breakpoint PC.  */
829   if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
830       && low_breakpoint_at (sw_breakpoint_pc))
831     lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
832 
833   if (hardware_breakpoint_inserted_here (pc))
834     lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
835 
836   if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
837     check_stopped_by_watchpoint (lwp);
838 #endif
839 
840   if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
841     {
842       if (debug_threads)
843 	{
844 	  struct thread_info *thr = get_lwp_thread (lwp);
845 
846 	  debug_printf ("CSBB: %s stopped by software breakpoint\n",
847 			target_pid_to_str (ptid_of (thr)));
848 	}
849 
850       /* Back up the PC if necessary.  */
851       if (pc != sw_breakpoint_pc)
852 	{
853 	  struct regcache *regcache
854 	    = get_thread_regcache (current_thread, 1);
855 	  low_set_pc (regcache, sw_breakpoint_pc);
856 	}
857 
858       /* Update this so we record the correct stop PC below.  */
859       pc = sw_breakpoint_pc;
860     }
861   else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
862     {
863       if (debug_threads)
864 	{
865 	  struct thread_info *thr = get_lwp_thread (lwp);
866 
867 	  debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
868 			target_pid_to_str (ptid_of (thr)));
869 	}
870     }
871   else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
872     {
873       if (debug_threads)
874 	{
875 	  struct thread_info *thr = get_lwp_thread (lwp);
876 
877 	  debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
878 			target_pid_to_str (ptid_of (thr)));
879 	}
880     }
881   else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
882     {
883       if (debug_threads)
884 	{
885 	  struct thread_info *thr = get_lwp_thread (lwp);
886 
887 	  debug_printf ("CSBB: %s stopped by trace\n",
888 			target_pid_to_str (ptid_of (thr)));
889 	}
890     }
891 
892   lwp->stop_pc = pc;
893   current_thread = saved_thread;
894   return true;
895 }
896 
897 lwp_info *
add_lwp(ptid_t ptid)898 linux_process_target::add_lwp (ptid_t ptid)
899 {
900   struct lwp_info *lwp;
901 
902   lwp = new lwp_info {};
903 
904   lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
905 
906   lwp->thread = add_thread (ptid, lwp);
907 
908   low_new_thread (lwp);
909 
910   return lwp;
911 }
912 
913 void
low_new_thread(lwp_info * info)914 linux_process_target::low_new_thread (lwp_info *info)
915 {
916   /* Nop.  */
917 }
918 
919 /* Callback to be used when calling fork_inferior, responsible for
920    actually initiating the tracing of the inferior.  */
921 
922 static void
linux_ptrace_fun()923 linux_ptrace_fun ()
924 {
925   if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
926 	      (PTRACE_TYPE_ARG4) 0) < 0)
927     trace_start_error_with_name ("ptrace");
928 
929   if (setpgid (0, 0) < 0)
930     trace_start_error_with_name ("setpgid");
931 
932   /* If GDBserver is connected to gdb via stdio, redirect the inferior's
933      stdout to stderr so that inferior i/o doesn't corrupt the connection.
934      Also, redirect stdin to /dev/null.  */
935   if (remote_connection_is_stdio ())
936     {
937       if (close (0) < 0)
938 	trace_start_error_with_name ("close");
939       if (open ("/dev/null", O_RDONLY) < 0)
940 	trace_start_error_with_name ("open");
941       if (dup2 (2, 1) < 0)
942 	trace_start_error_with_name ("dup2");
943       if (write (2, "stdin/stdout redirected\n",
944 		 sizeof ("stdin/stdout redirected\n") - 1) < 0)
945 	{
946 	  /* Errors ignored.  */;
947 	}
948     }
949 }
950 
951 /* Start an inferior process and returns its pid.
952    PROGRAM is the name of the program to be started, and PROGRAM_ARGS
953    are its arguments.  */
954 
955 int
create_inferior(const char * program,const std::vector<char * > & program_args)956 linux_process_target::create_inferior (const char *program,
957 				       const std::vector<char *> &program_args)
958 {
959   client_state &cs = get_client_state ();
960   struct lwp_info *new_lwp;
961   int pid;
962   ptid_t ptid;
963 
964   {
965     maybe_disable_address_space_randomization restore_personality
966       (cs.disable_randomization);
967     std::string str_program_args = construct_inferior_arguments (program_args);
968 
969     pid = fork_inferior (program,
970 			 str_program_args.c_str (),
971 			 get_environ ()->envp (), linux_ptrace_fun,
972 			 NULL, NULL, NULL, NULL);
973   }
974 
975   add_linux_process (pid, 0);
976 
977   ptid = ptid_t (pid, pid, 0);
978   new_lwp = add_lwp (ptid);
979   new_lwp->must_set_ptrace_flags = 1;
980 
981   post_fork_inferior (pid, program);
982 
983   return pid;
984 }
985 
986 /* Implement the post_create_inferior target_ops method.  */
987 
988 void
post_create_inferior()989 linux_process_target::post_create_inferior ()
990 {
991   struct lwp_info *lwp = get_thread_lwp (current_thread);
992 
993   low_arch_setup ();
994 
995   if (lwp->must_set_ptrace_flags)
996     {
997       struct process_info *proc = current_process ();
998       int options = linux_low_ptrace_options (proc->attached);
999 
1000       linux_enable_event_reporting (lwpid_of (current_thread), options);
1001       lwp->must_set_ptrace_flags = 0;
1002     }
1003 }
1004 
1005 int
attach_lwp(ptid_t ptid)1006 linux_process_target::attach_lwp (ptid_t ptid)
1007 {
1008   struct lwp_info *new_lwp;
1009   int lwpid = ptid.lwp ();
1010 
1011   if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1012       != 0)
1013     return errno;
1014 
1015   new_lwp = add_lwp (ptid);
1016 
1017   /* We need to wait for SIGSTOP before being able to make the next
1018      ptrace call on this LWP.  */
1019   new_lwp->must_set_ptrace_flags = 1;
1020 
1021   if (linux_proc_pid_is_stopped (lwpid))
1022     {
1023       if (debug_threads)
1024 	debug_printf ("Attached to a stopped process\n");
1025 
1026       /* The process is definitely stopped.  It is in a job control
1027 	 stop, unless the kernel predates the TASK_STOPPED /
1028 	 TASK_TRACED distinction, in which case it might be in a
1029 	 ptrace stop.  Make sure it is in a ptrace stop; from there we
1030 	 can kill it, signal it, et cetera.
1031 
1032 	 First make sure there is a pending SIGSTOP.  Since we are
1033 	 already attached, the process can not transition from stopped
1034 	 to running without a PTRACE_CONT; so we know this signal will
1035 	 go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
1036 	 probably already in the queue (unless this kernel is old
1037 	 enough to use TASK_STOPPED for ptrace stops); but since
1038 	 SIGSTOP is not an RT signal, it can only be queued once.  */
1039       kill_lwp (lwpid, SIGSTOP);
1040 
1041       /* Finally, resume the stopped process.  This will deliver the
1042 	 SIGSTOP (or a higher priority signal, just like normal
1043 	 PTRACE_ATTACH), which we'll catch later on.  */
1044       ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1045     }
1046 
1047   /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1048      brings it to a halt.
1049 
1050      There are several cases to consider here:
1051 
1052      1) gdbserver has already attached to the process and is being notified
1053 	of a new thread that is being created.
1054 	In this case we should ignore that SIGSTOP and resume the
1055 	process.  This is handled below by setting stop_expected = 1,
1056 	and the fact that add_thread sets last_resume_kind ==
1057 	resume_continue.
1058 
1059      2) This is the first thread (the process thread), and we're attaching
1060 	to it via attach_inferior.
1061 	In this case we want the process thread to stop.
1062 	This is handled by having linux_attach set last_resume_kind ==
1063 	resume_stop after we return.
1064 
1065 	If the pid we are attaching to is also the tgid, we attach to and
1066 	stop all the existing threads.  Otherwise, we attach to pid and
1067 	ignore any other threads in the same group as this pid.
1068 
1069      3) GDB is connecting to gdbserver and is requesting an enumeration of all
1070 	existing threads.
1071 	In this case we want the thread to stop.
1072 	FIXME: This case is currently not properly handled.
1073 	We should wait for the SIGSTOP but don't.  Things work apparently
1074 	because enough time passes between when we ptrace (ATTACH) and when
1075 	gdb makes the next ptrace call on the thread.
1076 
1077      On the other hand, if we are currently trying to stop all threads, we
1078      should treat the new thread as if we had sent it a SIGSTOP.  This works
1079      because we are guaranteed that the add_lwp call above added us to the
1080      end of the list, and so the new thread has not yet reached
1081      wait_for_sigstop (but will).  */
1082   new_lwp->stop_expected = 1;
1083 
1084   return 0;
1085 }
1086 
1087 /* Callback for linux_proc_attach_tgid_threads.  Attach to PTID if not
1088    already attached.  Returns true if a new LWP is found, false
1089    otherwise.  */
1090 
1091 static int
attach_proc_task_lwp_callback(ptid_t ptid)1092 attach_proc_task_lwp_callback (ptid_t ptid)
1093 {
1094   /* Is this a new thread?  */
1095   if (find_thread_ptid (ptid) == NULL)
1096     {
1097       int lwpid = ptid.lwp ();
1098       int err;
1099 
1100       if (debug_threads)
1101 	debug_printf ("Found new lwp %d\n", lwpid);
1102 
1103       err = the_linux_target->attach_lwp (ptid);
1104 
1105       /* Be quiet if we simply raced with the thread exiting.  EPERM
1106 	 is returned if the thread's task still exists, and is marked
1107 	 as exited or zombie, as well as other conditions, so in that
1108 	 case, confirm the status in /proc/PID/status.  */
1109       if (err == ESRCH
1110 	  || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1111 	{
1112 	  if (debug_threads)
1113 	    {
1114 	      debug_printf ("Cannot attach to lwp %d: "
1115 			    "thread is gone (%d: %s)\n",
1116 			    lwpid, err, safe_strerror (err));
1117 	    }
1118 	}
1119       else if (err != 0)
1120 	{
1121 	  std::string reason
1122 	    = linux_ptrace_attach_fail_reason_string (ptid, err);
1123 
1124 	  warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1125 	}
1126 
1127       return 1;
1128     }
1129   return 0;
1130 }
1131 
1132 static void async_file_mark (void);
1133 
1134 /* Attach to PID.  If PID is the tgid, attach to it and all
1135    of its threads.  */
1136 
1137 int
attach(unsigned long pid)1138 linux_process_target::attach (unsigned long pid)
1139 {
1140   struct process_info *proc;
1141   struct thread_info *initial_thread;
1142   ptid_t ptid = ptid_t (pid, pid, 0);
1143   int err;
1144 
1145   proc = add_linux_process (pid, 1);
1146 
1147   /* Attach to PID.  We will check for other threads
1148      soon.  */
1149   err = attach_lwp (ptid);
1150   if (err != 0)
1151     {
1152       remove_process (proc);
1153 
1154       std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1155       error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1156     }
1157 
1158   /* Don't ignore the initial SIGSTOP if we just attached to this
1159      process.  It will be collected by wait shortly.  */
1160   initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1161   initial_thread->last_resume_kind = resume_stop;
1162 
1163   /* We must attach to every LWP.  If /proc is mounted, use that to
1164      find them now.  On the one hand, the inferior may be using raw
1165      clone instead of using pthreads.  On the other hand, even if it
1166      is using pthreads, GDB may not be connected yet (thread_db needs
1167      to do symbol lookups, through qSymbol).  Also, thread_db walks
1168      structures in the inferior's address space to find the list of
1169      threads/LWPs, and those structures may well be corrupted.  Note
1170      that once thread_db is loaded, we'll still use it to list threads
1171      and associate pthread info with each LWP.  */
1172   linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1173 
1174   /* GDB will shortly read the xml target description for this
1175      process, to figure out the process' architecture.  But the target
1176      description is only filled in when the first process/thread in
1177      the thread group reports its initial PTRACE_ATTACH SIGSTOP.  Do
1178      that now, otherwise, if GDB is fast enough, it could read the
1179      target description _before_ that initial stop.  */
1180   if (non_stop)
1181     {
1182       struct lwp_info *lwp;
1183       int wstat, lwpid;
1184       ptid_t pid_ptid = ptid_t (pid);
1185 
1186       lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1187       gdb_assert (lwpid > 0);
1188 
1189       lwp = find_lwp_pid (ptid_t (lwpid));
1190 
1191       if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1192 	{
1193 	  lwp->status_pending_p = 1;
1194 	  lwp->status_pending = wstat;
1195 	}
1196 
1197       initial_thread->last_resume_kind = resume_continue;
1198 
1199       async_file_mark ();
1200 
1201       gdb_assert (proc->tdesc != NULL);
1202     }
1203 
1204   return 0;
1205 }
1206 
1207 static int
last_thread_of_process_p(int pid)1208 last_thread_of_process_p (int pid)
1209 {
1210   bool seen_one = false;
1211 
1212   thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1213     {
1214       if (!seen_one)
1215 	{
1216 	  /* This is the first thread of this process we see.  */
1217 	  seen_one = true;
1218 	  return false;
1219 	}
1220       else
1221 	{
1222 	  /* This is the second thread of this process we see.  */
1223 	  return true;
1224 	}
1225     });
1226 
1227   return thread == NULL;
1228 }
1229 
1230 /* Kill LWP.  */
1231 
1232 static void
linux_kill_one_lwp(struct lwp_info * lwp)1233 linux_kill_one_lwp (struct lwp_info *lwp)
1234 {
1235   struct thread_info *thr = get_lwp_thread (lwp);
1236   int pid = lwpid_of (thr);
1237 
1238   /* PTRACE_KILL is unreliable.  After stepping into a signal handler,
1239      there is no signal context, and ptrace(PTRACE_KILL) (or
1240      ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1241      ptrace(CONT, pid, 0,0) and just resumes the tracee.  A better
1242      alternative is to kill with SIGKILL.  We only need one SIGKILL
1243      per process, not one for each thread.  But since we still support
1244      support debugging programs using raw clone without CLONE_THREAD,
1245      we send one for each thread.  For years, we used PTRACE_KILL
1246      only, so we're being a bit paranoid about some old kernels where
1247      PTRACE_KILL might work better (dubious if there are any such, but
1248      that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1249      second, and so we're fine everywhere.  */
1250 
1251   errno = 0;
1252   kill_lwp (pid, SIGKILL);
1253   if (debug_threads)
1254     {
1255       int save_errno = errno;
1256 
1257       debug_printf ("LKL:  kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1258 		    target_pid_to_str (ptid_of (thr)),
1259 		    save_errno ? safe_strerror (save_errno) : "OK");
1260     }
1261 
1262   errno = 0;
1263   ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1264   if (debug_threads)
1265     {
1266       int save_errno = errno;
1267 
1268       debug_printf ("LKL:  PTRACE_KILL %s, 0, 0 (%s)\n",
1269 		    target_pid_to_str (ptid_of (thr)),
1270 		    save_errno ? safe_strerror (save_errno) : "OK");
1271     }
1272 }
1273 
1274 /* Kill LWP and wait for it to die.  */
1275 
1276 static void
kill_wait_lwp(struct lwp_info * lwp)1277 kill_wait_lwp (struct lwp_info *lwp)
1278 {
1279   struct thread_info *thr = get_lwp_thread (lwp);
1280   int pid = ptid_of (thr).pid ();
1281   int lwpid = ptid_of (thr).lwp ();
1282   int wstat;
1283   int res;
1284 
1285   if (debug_threads)
1286     debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1287 
1288   do
1289     {
1290       linux_kill_one_lwp (lwp);
1291 
1292       /* Make sure it died.  Notes:
1293 
1294 	 - The loop is most likely unnecessary.
1295 
1296 	 - We don't use wait_for_event as that could delete lwps
1297 	   while we're iterating over them.  We're not interested in
1298 	   any pending status at this point, only in making sure all
1299 	   wait status on the kernel side are collected until the
1300 	   process is reaped.
1301 
1302 	 - We don't use __WALL here as the __WALL emulation relies on
1303 	   SIGCHLD, and killing a stopped process doesn't generate
1304 	   one, nor an exit status.
1305       */
1306       res = my_waitpid (lwpid, &wstat, 0);
1307       if (res == -1 && errno == ECHILD)
1308 	res = my_waitpid (lwpid, &wstat, __WCLONE);
1309     } while (res > 0 && WIFSTOPPED (wstat));
1310 
1311   /* Even if it was stopped, the child may have already disappeared.
1312      E.g., if it was killed by SIGKILL.  */
1313   if (res < 0 && errno != ECHILD)
1314     perror_with_name ("kill_wait_lwp");
1315 }
1316 
1317 /* Callback for `for_each_thread'.  Kills an lwp of a given process,
1318    except the leader.  */
1319 
1320 static void
kill_one_lwp_callback(thread_info * thread,int pid)1321 kill_one_lwp_callback (thread_info *thread, int pid)
1322 {
1323   struct lwp_info *lwp = get_thread_lwp (thread);
1324 
1325   /* We avoid killing the first thread here, because of a Linux kernel (at
1326      least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1327      the children get a chance to be reaped, it will remain a zombie
1328      forever.  */
1329 
1330   if (lwpid_of (thread) == pid)
1331     {
1332       if (debug_threads)
1333 	debug_printf ("lkop: is last of process %s\n",
1334 		      target_pid_to_str (thread->id));
1335       return;
1336     }
1337 
1338   kill_wait_lwp (lwp);
1339 }
1340 
1341 int
kill(process_info * process)1342 linux_process_target::kill (process_info *process)
1343 {
1344   int pid = process->pid;
1345 
1346   /* If we're killing a running inferior, make sure it is stopped
1347      first, as PTRACE_KILL will not work otherwise.  */
1348   stop_all_lwps (0, NULL);
1349 
1350   for_each_thread (pid, [&] (thread_info *thread)
1351     {
1352       kill_one_lwp_callback (thread, pid);
1353     });
1354 
1355   /* See the comment in linux_kill_one_lwp.  We did not kill the first
1356      thread in the list, so do so now.  */
1357   lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1358 
1359   if (lwp == NULL)
1360     {
1361       if (debug_threads)
1362 	debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1363 		      pid);
1364     }
1365   else
1366     kill_wait_lwp (lwp);
1367 
1368   mourn (process);
1369 
1370   /* Since we presently can only stop all lwps of all processes, we
1371      need to unstop lwps of other processes.  */
1372   unstop_all_lwps (0, NULL);
1373   return 0;
1374 }
1375 
1376 /* Get pending signal of THREAD, for detaching purposes.  This is the
1377    signal the thread last stopped for, which we need to deliver to the
1378    thread when detaching, otherwise, it'd be suppressed/lost.  */
1379 
1380 static int
get_detach_signal(struct thread_info * thread)1381 get_detach_signal (struct thread_info *thread)
1382 {
1383   client_state &cs = get_client_state ();
1384   enum gdb_signal signo = GDB_SIGNAL_0;
1385   int status;
1386   struct lwp_info *lp = get_thread_lwp (thread);
1387 
1388   if (lp->status_pending_p)
1389     status = lp->status_pending;
1390   else
1391     {
1392       /* If the thread had been suspended by gdbserver, and it stopped
1393 	 cleanly, then it'll have stopped with SIGSTOP.  But we don't
1394 	 want to deliver that SIGSTOP.  */
1395       if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1396 	  || thread->last_status.value.sig == GDB_SIGNAL_0)
1397 	return 0;
1398 
1399       /* Otherwise, we may need to deliver the signal we
1400 	 intercepted.  */
1401       status = lp->last_status;
1402     }
1403 
1404   if (!WIFSTOPPED (status))
1405     {
1406       if (debug_threads)
1407 	debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1408 		      target_pid_to_str (ptid_of (thread)));
1409       return 0;
1410     }
1411 
1412   /* Extended wait statuses aren't real SIGTRAPs.  */
1413   if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1414     {
1415       if (debug_threads)
1416 	debug_printf ("GPS: lwp %s had stopped with extended "
1417 		      "status: no pending signal\n",
1418 		      target_pid_to_str (ptid_of (thread)));
1419       return 0;
1420     }
1421 
1422   signo = gdb_signal_from_host (WSTOPSIG (status));
1423 
1424   if (cs.program_signals_p && !cs.program_signals[signo])
1425     {
1426       if (debug_threads)
1427 	debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1428 		      target_pid_to_str (ptid_of (thread)),
1429 		      gdb_signal_to_string (signo));
1430       return 0;
1431     }
1432   else if (!cs.program_signals_p
1433 	   /* If we have no way to know which signals GDB does not
1434 	      want to have passed to the program, assume
1435 	      SIGTRAP/SIGINT, which is GDB's default.  */
1436 	   && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1437     {
1438       if (debug_threads)
1439 	debug_printf ("GPS: lwp %s had signal %s, "
1440 		      "but we don't know if we should pass it. "
1441 		      "Default to not.\n",
1442 		      target_pid_to_str (ptid_of (thread)),
1443 		      gdb_signal_to_string (signo));
1444       return 0;
1445     }
1446   else
1447     {
1448       if (debug_threads)
1449 	debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1450 		      target_pid_to_str (ptid_of (thread)),
1451 		      gdb_signal_to_string (signo));
1452 
1453       return WSTOPSIG (status);
1454     }
1455 }
1456 
1457 void
detach_one_lwp(lwp_info * lwp)1458 linux_process_target::detach_one_lwp (lwp_info *lwp)
1459 {
1460   struct thread_info *thread = get_lwp_thread (lwp);
1461   int sig;
1462   int lwpid;
1463 
1464   /* If there is a pending SIGSTOP, get rid of it.  */
1465   if (lwp->stop_expected)
1466     {
1467       if (debug_threads)
1468 	debug_printf ("Sending SIGCONT to %s\n",
1469 		      target_pid_to_str (ptid_of (thread)));
1470 
1471       kill_lwp (lwpid_of (thread), SIGCONT);
1472       lwp->stop_expected = 0;
1473     }
1474 
1475   /* Pass on any pending signal for this thread.  */
1476   sig = get_detach_signal (thread);
1477 
1478   /* Preparing to resume may try to write registers, and fail if the
1479      lwp is zombie.  If that happens, ignore the error.  We'll handle
1480      it below, when detach fails with ESRCH.  */
1481   try
1482     {
1483       /* Flush any pending changes to the process's registers.  */
1484       regcache_invalidate_thread (thread);
1485 
1486       /* Finally, let it resume.  */
1487       low_prepare_to_resume (lwp);
1488     }
1489   catch (const gdb_exception_error &ex)
1490     {
1491       if (!check_ptrace_stopped_lwp_gone (lwp))
1492 	throw;
1493     }
1494 
1495   lwpid = lwpid_of (thread);
1496   if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1497 	      (PTRACE_TYPE_ARG4) (long) sig) < 0)
1498     {
1499       int save_errno = errno;
1500 
1501       /* We know the thread exists, so ESRCH must mean the lwp is
1502 	 zombie.  This can happen if one of the already-detached
1503 	 threads exits the whole thread group.  In that case we're
1504 	 still attached, and must reap the lwp.  */
1505       if (save_errno == ESRCH)
1506 	{
1507 	  int ret, status;
1508 
1509 	  ret = my_waitpid (lwpid, &status, __WALL);
1510 	  if (ret == -1)
1511 	    {
1512 	      warning (_("Couldn't reap LWP %d while detaching: %s"),
1513 		       lwpid, safe_strerror (errno));
1514 	    }
1515 	  else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1516 	    {
1517 	      warning (_("Reaping LWP %d while detaching "
1518 			 "returned unexpected status 0x%x"),
1519 		       lwpid, status);
1520 	    }
1521 	}
1522       else
1523 	{
1524 	  error (_("Can't detach %s: %s"),
1525 		 target_pid_to_str (ptid_of (thread)),
1526 		 safe_strerror (save_errno));
1527 	}
1528     }
1529   else if (debug_threads)
1530     {
1531       debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1532 		    target_pid_to_str (ptid_of (thread)),
1533 		    strsignal (sig));
1534     }
1535 
1536   delete_lwp (lwp);
1537 }
1538 
1539 int
detach(process_info * process)1540 linux_process_target::detach (process_info *process)
1541 {
1542   struct lwp_info *main_lwp;
1543 
1544   /* As there's a step over already in progress, let it finish first,
1545      otherwise nesting a stabilize_threads operation on top gets real
1546      messy.  */
1547   complete_ongoing_step_over ();
1548 
1549   /* Stop all threads before detaching.  First, ptrace requires that
1550      the thread is stopped to successfully detach.  Second, thread_db
1551      may need to uninstall thread event breakpoints from memory, which
1552      only works with a stopped process anyway.  */
1553   stop_all_lwps (0, NULL);
1554 
1555 #ifdef USE_THREAD_DB
1556   thread_db_detach (process);
1557 #endif
1558 
1559   /* Stabilize threads (move out of jump pads).  */
1560   target_stabilize_threads ();
1561 
1562   /* Detach from the clone lwps first.  If the thread group exits just
1563      while we're detaching, we must reap the clone lwps before we're
1564      able to reap the leader.  */
1565   for_each_thread (process->pid, [this] (thread_info *thread)
1566     {
1567       /* We don't actually detach from the thread group leader just yet.
1568 	 If the thread group exits, we must reap the zombie clone lwps
1569 	 before we're able to reap the leader.  */
1570       if (thread->id.pid () == thread->id.lwp ())
1571 	return;
1572 
1573       lwp_info *lwp = get_thread_lwp (thread);
1574       detach_one_lwp (lwp);
1575     });
1576 
1577   main_lwp = find_lwp_pid (ptid_t (process->pid));
1578   detach_one_lwp (main_lwp);
1579 
1580   mourn (process);
1581 
1582   /* Since we presently can only stop all lwps of all processes, we
1583      need to unstop lwps of other processes.  */
1584   unstop_all_lwps (0, NULL);
1585   return 0;
1586 }
1587 
1588 /* Remove all LWPs that belong to process PROC from the lwp list.  */
1589 
1590 void
mourn(process_info * process)1591 linux_process_target::mourn (process_info *process)
1592 {
1593   struct process_info_private *priv;
1594 
1595 #ifdef USE_THREAD_DB
1596   thread_db_mourn (process);
1597 #endif
1598 
1599   for_each_thread (process->pid, [this] (thread_info *thread)
1600     {
1601       delete_lwp (get_thread_lwp (thread));
1602     });
1603 
1604   /* Freeing all private data.  */
1605   priv = process->priv;
1606   low_delete_process (priv->arch_private);
1607   free (priv);
1608   process->priv = NULL;
1609 
1610   remove_process (process);
1611 }
1612 
1613 void
join(int pid)1614 linux_process_target::join (int pid)
1615 {
1616   int status, ret;
1617 
1618   do {
1619     ret = my_waitpid (pid, &status, 0);
1620     if (WIFEXITED (status) || WIFSIGNALED (status))
1621       break;
1622   } while (ret != -1 || errno != ECHILD);
1623 }
1624 
1625 /* Return true if the given thread is still alive.  */
1626 
1627 bool
thread_alive(ptid_t ptid)1628 linux_process_target::thread_alive (ptid_t ptid)
1629 {
1630   struct lwp_info *lwp = find_lwp_pid (ptid);
1631 
1632   /* We assume we always know if a thread exits.  If a whole process
1633      exited but we still haven't been able to report it to GDB, we'll
1634      hold on to the last lwp of the dead process.  */
1635   if (lwp != NULL)
1636     return !lwp_is_marked_dead (lwp);
1637   else
1638     return 0;
1639 }
1640 
1641 bool
thread_still_has_status_pending(thread_info * thread)1642 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1643 {
1644   struct lwp_info *lp = get_thread_lwp (thread);
1645 
1646   if (!lp->status_pending_p)
1647     return 0;
1648 
1649   if (thread->last_resume_kind != resume_stop
1650       && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1651 	  || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1652     {
1653       struct thread_info *saved_thread;
1654       CORE_ADDR pc;
1655       int discard = 0;
1656 
1657       gdb_assert (lp->last_status != 0);
1658 
1659       pc = get_pc (lp);
1660 
1661       saved_thread = current_thread;
1662       current_thread = thread;
1663 
1664       if (pc != lp->stop_pc)
1665 	{
1666 	  if (debug_threads)
1667 	    debug_printf ("PC of %ld changed\n",
1668 			  lwpid_of (thread));
1669 	  discard = 1;
1670 	}
1671 
1672 #if !USE_SIGTRAP_SIGINFO
1673       else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1674 	       && !low_breakpoint_at (pc))
1675 	{
1676 	  if (debug_threads)
1677 	    debug_printf ("previous SW breakpoint of %ld gone\n",
1678 			  lwpid_of (thread));
1679 	  discard = 1;
1680 	}
1681       else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1682 	       && !hardware_breakpoint_inserted_here (pc))
1683 	{
1684 	  if (debug_threads)
1685 	    debug_printf ("previous HW breakpoint of %ld gone\n",
1686 			  lwpid_of (thread));
1687 	  discard = 1;
1688 	}
1689 #endif
1690 
1691       current_thread = saved_thread;
1692 
1693       if (discard)
1694 	{
1695 	  if (debug_threads)
1696 	    debug_printf ("discarding pending breakpoint status\n");
1697 	  lp->status_pending_p = 0;
1698 	  return 0;
1699 	}
1700     }
1701 
1702   return 1;
1703 }
1704 
1705 /* Returns true if LWP is resumed from the client's perspective.  */
1706 
1707 static int
lwp_resumed(struct lwp_info * lwp)1708 lwp_resumed (struct lwp_info *lwp)
1709 {
1710   struct thread_info *thread = get_lwp_thread (lwp);
1711 
1712   if (thread->last_resume_kind != resume_stop)
1713     return 1;
1714 
1715   /* Did gdb send us a `vCont;t', but we haven't reported the
1716      corresponding stop to gdb yet?  If so, the thread is still
1717      resumed/running from gdb's perspective.  */
1718   if (thread->last_resume_kind == resume_stop
1719       && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1720     return 1;
1721 
1722   return 0;
1723 }
1724 
1725 bool
status_pending_p_callback(thread_info * thread,ptid_t ptid)1726 linux_process_target::status_pending_p_callback (thread_info *thread,
1727 						 ptid_t ptid)
1728 {
1729   struct lwp_info *lp = get_thread_lwp (thread);
1730 
1731   /* Check if we're only interested in events from a specific process
1732      or a specific LWP.  */
1733   if (!thread->id.matches (ptid))
1734     return 0;
1735 
1736   if (!lwp_resumed (lp))
1737     return 0;
1738 
1739   if (lp->status_pending_p
1740       && !thread_still_has_status_pending (thread))
1741     {
1742       resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1743       return 0;
1744     }
1745 
1746   return lp->status_pending_p;
1747 }
1748 
1749 struct lwp_info *
find_lwp_pid(ptid_t ptid)1750 find_lwp_pid (ptid_t ptid)
1751 {
1752   thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1753     {
1754       int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1755       return thr_arg->id.lwp () == lwp;
1756     });
1757 
1758   if (thread == NULL)
1759     return NULL;
1760 
1761   return get_thread_lwp (thread);
1762 }
1763 
1764 /* Return the number of known LWPs in the tgid given by PID.  */
1765 
1766 static int
num_lwps(int pid)1767 num_lwps (int pid)
1768 {
1769   int count = 0;
1770 
1771   for_each_thread (pid, [&] (thread_info *thread)
1772     {
1773       count++;
1774     });
1775 
1776   return count;
1777 }
1778 
1779 /* See nat/linux-nat.h.  */
1780 
1781 struct lwp_info *
iterate_over_lwps(ptid_t filter,gdb::function_view<iterate_over_lwps_ftype> callback)1782 iterate_over_lwps (ptid_t filter,
1783 		   gdb::function_view<iterate_over_lwps_ftype> callback)
1784 {
1785   thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1786     {
1787       lwp_info *lwp = get_thread_lwp (thr_arg);
1788 
1789       return callback (lwp);
1790     });
1791 
1792   if (thread == NULL)
1793     return NULL;
1794 
1795   return get_thread_lwp (thread);
1796 }
1797 
1798 void
check_zombie_leaders()1799 linux_process_target::check_zombie_leaders ()
1800 {
1801   for_each_process ([this] (process_info *proc) {
1802     pid_t leader_pid = pid_of (proc);
1803     struct lwp_info *leader_lp;
1804 
1805     leader_lp = find_lwp_pid (ptid_t (leader_pid));
1806 
1807     if (debug_threads)
1808       debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1809 		    "num_lwps=%d, zombie=%d\n",
1810 		    leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1811 		    linux_proc_pid_is_zombie (leader_pid));
1812 
1813     if (leader_lp != NULL && !leader_lp->stopped
1814 	/* Check if there are other threads in the group, as we may
1815 	   have raced with the inferior simply exiting.  */
1816 	&& !last_thread_of_process_p (leader_pid)
1817 	&& linux_proc_pid_is_zombie (leader_pid))
1818       {
1819 	/* A leader zombie can mean one of two things:
1820 
1821 	   - It exited, and there's an exit status pending
1822 	   available, or only the leader exited (not the whole
1823 	   program).  In the latter case, we can't waitpid the
1824 	   leader's exit status until all other threads are gone.
1825 
1826 	   - There are 3 or more threads in the group, and a thread
1827 	   other than the leader exec'd.  On an exec, the Linux
1828 	   kernel destroys all other threads (except the execing
1829 	   one) in the thread group, and resets the execing thread's
1830 	   tid to the tgid.  No exit notification is sent for the
1831 	   execing thread -- from the ptracer's perspective, it
1832 	   appears as though the execing thread just vanishes.
1833 	   Until we reap all other threads except the leader and the
1834 	   execing thread, the leader will be zombie, and the
1835 	   execing thread will be in `D (disc sleep)'.  As soon as
1836 	   all other threads are reaped, the execing thread changes
1837 	   it's tid to the tgid, and the previous (zombie) leader
1838 	   vanishes, giving place to the "new" leader.  We could try
1839 	   distinguishing the exit and exec cases, by waiting once
1840 	   more, and seeing if something comes out, but it doesn't
1841 	   sound useful.  The previous leader _does_ go away, and
1842 	   we'll re-add the new one once we see the exec event
1843 	   (which is just the same as what would happen if the
1844 	   previous leader did exit voluntarily before some other
1845 	   thread execs).  */
1846 
1847 	if (debug_threads)
1848 	  debug_printf ("CZL: Thread group leader %d zombie "
1849 			"(it exited, or another thread execd).\n",
1850 			leader_pid);
1851 
1852 	delete_lwp (leader_lp);
1853       }
1854     });
1855 }
1856 
1857 /* Callback for `find_thread'.  Returns the first LWP that is not
1858    stopped.  */
1859 
1860 static bool
not_stopped_callback(thread_info * thread,ptid_t filter)1861 not_stopped_callback (thread_info *thread, ptid_t filter)
1862 {
1863   if (!thread->id.matches (filter))
1864     return false;
1865 
1866   lwp_info *lwp = get_thread_lwp (thread);
1867 
1868   return !lwp->stopped;
1869 }
1870 
1871 /* Increment LWP's suspend count.  */
1872 
1873 static void
lwp_suspended_inc(struct lwp_info * lwp)1874 lwp_suspended_inc (struct lwp_info *lwp)
1875 {
1876   lwp->suspended++;
1877 
1878   if (debug_threads && lwp->suspended > 4)
1879     {
1880       struct thread_info *thread = get_lwp_thread (lwp);
1881 
1882       debug_printf ("LWP %ld has a suspiciously high suspend count,"
1883 		    " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1884     }
1885 }
1886 
1887 /* Decrement LWP's suspend count.  */
1888 
1889 static void
lwp_suspended_decr(struct lwp_info * lwp)1890 lwp_suspended_decr (struct lwp_info *lwp)
1891 {
1892   lwp->suspended--;
1893 
1894   if (lwp->suspended < 0)
1895     {
1896       struct thread_info *thread = get_lwp_thread (lwp);
1897 
1898       internal_error (__FILE__, __LINE__,
1899 		      "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1900 		      lwp->suspended);
1901     }
1902 }
1903 
1904 /* This function should only be called if the LWP got a SIGTRAP.
1905 
1906    Handle any tracepoint steps or hits.  Return true if a tracepoint
1907    event was handled, 0 otherwise.  */
1908 
1909 static int
handle_tracepoints(struct lwp_info * lwp)1910 handle_tracepoints (struct lwp_info *lwp)
1911 {
1912   struct thread_info *tinfo = get_lwp_thread (lwp);
1913   int tpoint_related_event = 0;
1914 
1915   gdb_assert (lwp->suspended == 0);
1916 
1917   /* If this tracepoint hit causes a tracing stop, we'll immediately
1918      uninsert tracepoints.  To do this, we temporarily pause all
1919      threads, unpatch away, and then unpause threads.  We need to make
1920      sure the unpausing doesn't resume LWP too.  */
1921   lwp_suspended_inc (lwp);
1922 
1923   /* And we need to be sure that any all-threads-stopping doesn't try
1924      to move threads out of the jump pads, as it could deadlock the
1925      inferior (LWP could be in the jump pad, maybe even holding the
1926      lock.)  */
1927 
1928   /* Do any necessary step collect actions.  */
1929   tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1930 
1931   tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1932 
1933   /* See if we just hit a tracepoint and do its main collect
1934      actions.  */
1935   tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1936 
1937   lwp_suspended_decr (lwp);
1938 
1939   gdb_assert (lwp->suspended == 0);
1940   gdb_assert (!stabilizing_threads
1941 	      || (lwp->collecting_fast_tracepoint
1942 		  != fast_tpoint_collect_result::not_collecting));
1943 
1944   if (tpoint_related_event)
1945     {
1946       if (debug_threads)
1947 	debug_printf ("got a tracepoint event\n");
1948       return 1;
1949     }
1950 
1951   return 0;
1952 }
1953 
1954 fast_tpoint_collect_result
linux_fast_tracepoint_collecting(lwp_info * lwp,fast_tpoint_collect_status * status)1955 linux_process_target::linux_fast_tracepoint_collecting
1956   (lwp_info *lwp, fast_tpoint_collect_status *status)
1957 {
1958   CORE_ADDR thread_area;
1959   struct thread_info *thread = get_lwp_thread (lwp);
1960 
1961   /* Get the thread area address.  This is used to recognize which
1962      thread is which when tracing with the in-process agent library.
1963      We don't read anything from the address, and treat it as opaque;
1964      it's the address itself that we assume is unique per-thread.  */
1965   if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1966     return fast_tpoint_collect_result::not_collecting;
1967 
1968   return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1969 }
1970 
1971 int
low_get_thread_area(int lwpid,CORE_ADDR * addrp)1972 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1973 {
1974   return -1;
1975 }
1976 
1977 bool
maybe_move_out_of_jump_pad(lwp_info * lwp,int * wstat)1978 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1979 {
1980   struct thread_info *saved_thread;
1981 
1982   saved_thread = current_thread;
1983   current_thread = get_lwp_thread (lwp);
1984 
1985   if ((wstat == NULL
1986        || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1987       && supports_fast_tracepoints ()
1988       && agent_loaded_p ())
1989     {
1990       struct fast_tpoint_collect_status status;
1991 
1992       if (debug_threads)
1993 	debug_printf ("Checking whether LWP %ld needs to move out of the "
1994 		      "jump pad.\n",
1995 		      lwpid_of (current_thread));
1996 
1997       fast_tpoint_collect_result r
1998 	= linux_fast_tracepoint_collecting (lwp, &status);
1999 
2000       if (wstat == NULL
2001 	  || (WSTOPSIG (*wstat) != SIGILL
2002 	      && WSTOPSIG (*wstat) != SIGFPE
2003 	      && WSTOPSIG (*wstat) != SIGSEGV
2004 	      && WSTOPSIG (*wstat) != SIGBUS))
2005 	{
2006 	  lwp->collecting_fast_tracepoint = r;
2007 
2008 	  if (r != fast_tpoint_collect_result::not_collecting)
2009 	    {
2010 	      if (r == fast_tpoint_collect_result::before_insn
2011 		  && lwp->exit_jump_pad_bkpt == NULL)
2012 		{
2013 		  /* Haven't executed the original instruction yet.
2014 		     Set breakpoint there, and wait till it's hit,
2015 		     then single-step until exiting the jump pad.  */
2016 		  lwp->exit_jump_pad_bkpt
2017 		    = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2018 		}
2019 
2020 	      if (debug_threads)
2021 		debug_printf ("Checking whether LWP %ld needs to move out of "
2022 			      "the jump pad...it does\n",
2023 			      lwpid_of (current_thread));
2024 	      current_thread = saved_thread;
2025 
2026 	      return true;
2027 	    }
2028 	}
2029       else
2030 	{
2031 	  /* If we get a synchronous signal while collecting, *and*
2032 	     while executing the (relocated) original instruction,
2033 	     reset the PC to point at the tpoint address, before
2034 	     reporting to GDB.  Otherwise, it's an IPA lib bug: just
2035 	     report the signal to GDB, and pray for the best.  */
2036 
2037 	  lwp->collecting_fast_tracepoint
2038 	    = fast_tpoint_collect_result::not_collecting;
2039 
2040 	  if (r != fast_tpoint_collect_result::not_collecting
2041 	      && (status.adjusted_insn_addr <= lwp->stop_pc
2042 		  && lwp->stop_pc < status.adjusted_insn_addr_end))
2043 	    {
2044 	      siginfo_t info;
2045 	      struct regcache *regcache;
2046 
2047 	      /* The si_addr on a few signals references the address
2048 		 of the faulting instruction.  Adjust that as
2049 		 well.  */
2050 	      if ((WSTOPSIG (*wstat) == SIGILL
2051 		   || WSTOPSIG (*wstat) == SIGFPE
2052 		   || WSTOPSIG (*wstat) == SIGBUS
2053 		   || WSTOPSIG (*wstat) == SIGSEGV)
2054 		  && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2055 			     (PTRACE_TYPE_ARG3) 0, &info) == 0
2056 		  /* Final check just to make sure we don't clobber
2057 		     the siginfo of non-kernel-sent signals.  */
2058 		  && (uintptr_t) info.si_addr == lwp->stop_pc)
2059 		{
2060 		  info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2061 		  ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2062 			  (PTRACE_TYPE_ARG3) 0, &info);
2063 		}
2064 
2065 	      regcache = get_thread_regcache (current_thread, 1);
2066 	      low_set_pc (regcache, status.tpoint_addr);
2067 	      lwp->stop_pc = status.tpoint_addr;
2068 
2069 	      /* Cancel any fast tracepoint lock this thread was
2070 		 holding.  */
2071 	      force_unlock_trace_buffer ();
2072 	    }
2073 
2074 	  if (lwp->exit_jump_pad_bkpt != NULL)
2075 	    {
2076 	      if (debug_threads)
2077 		debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2078 			      "stopping all threads momentarily.\n");
2079 
2080 	      stop_all_lwps (1, lwp);
2081 
2082 	      delete_breakpoint (lwp->exit_jump_pad_bkpt);
2083 	      lwp->exit_jump_pad_bkpt = NULL;
2084 
2085 	      unstop_all_lwps (1, lwp);
2086 
2087 	      gdb_assert (lwp->suspended >= 0);
2088 	    }
2089 	}
2090     }
2091 
2092   if (debug_threads)
2093     debug_printf ("Checking whether LWP %ld needs to move out of the "
2094 		  "jump pad...no\n",
2095 		  lwpid_of (current_thread));
2096 
2097   current_thread = saved_thread;
2098   return false;
2099 }
2100 
2101 /* Enqueue one signal in the "signals to report later when out of the
2102    jump pad" list.  */
2103 
2104 static void
enqueue_one_deferred_signal(struct lwp_info * lwp,int * wstat)2105 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2106 {
2107   struct thread_info *thread = get_lwp_thread (lwp);
2108 
2109   if (debug_threads)
2110     debug_printf ("Deferring signal %d for LWP %ld.\n",
2111 		  WSTOPSIG (*wstat), lwpid_of (thread));
2112 
2113   if (debug_threads)
2114     {
2115       for (const auto &sig : lwp->pending_signals_to_report)
2116 	debug_printf ("   Already queued %d\n",
2117 		      sig.signal);
2118 
2119       debug_printf ("   (no more currently queued signals)\n");
2120     }
2121 
2122   /* Don't enqueue non-RT signals if they are already in the deferred
2123      queue.  (SIGSTOP being the easiest signal to see ending up here
2124      twice)  */
2125   if (WSTOPSIG (*wstat) < __SIGRTMIN)
2126     {
2127       for (const auto &sig : lwp->pending_signals_to_report)
2128 	{
2129 	  if (sig.signal == WSTOPSIG (*wstat))
2130 	    {
2131 	      if (debug_threads)
2132 		debug_printf ("Not requeuing already queued non-RT signal %d"
2133 			      " for LWP %ld\n",
2134 			      sig.signal,
2135 			      lwpid_of (thread));
2136 	      return;
2137 	    }
2138 	}
2139     }
2140 
2141   lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2142 
2143   ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2144 	  &lwp->pending_signals_to_report.back ().info);
2145 }
2146 
2147 /* Dequeue one signal from the "signals to report later when out of
2148    the jump pad" list.  */
2149 
2150 static int
dequeue_one_deferred_signal(struct lwp_info * lwp,int * wstat)2151 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2152 {
2153   struct thread_info *thread = get_lwp_thread (lwp);
2154 
2155   if (!lwp->pending_signals_to_report.empty ())
2156     {
2157       const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2158 
2159       *wstat = W_STOPCODE (p_sig.signal);
2160       if (p_sig.info.si_signo != 0)
2161 	ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2162 		&p_sig.info);
2163 
2164       lwp->pending_signals_to_report.pop_front ();
2165 
2166       if (debug_threads)
2167 	debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2168 		      WSTOPSIG (*wstat), lwpid_of (thread));
2169 
2170       if (debug_threads)
2171 	{
2172 	  for (const auto &sig : lwp->pending_signals_to_report)
2173 	    debug_printf ("   Still queued %d\n",
2174 			  sig.signal);
2175 
2176 	  debug_printf ("   (no more queued signals)\n");
2177 	}
2178 
2179       return 1;
2180     }
2181 
2182   return 0;
2183 }
2184 
2185 bool
check_stopped_by_watchpoint(lwp_info * child)2186 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2187 {
2188   struct thread_info *saved_thread = current_thread;
2189   current_thread = get_lwp_thread (child);
2190 
2191   if (low_stopped_by_watchpoint ())
2192     {
2193       child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2194       child->stopped_data_address = low_stopped_data_address ();
2195     }
2196 
2197   current_thread = saved_thread;
2198 
2199   return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2200 }
2201 
2202 bool
low_stopped_by_watchpoint()2203 linux_process_target::low_stopped_by_watchpoint ()
2204 {
2205   return false;
2206 }
2207 
2208 CORE_ADDR
low_stopped_data_address()2209 linux_process_target::low_stopped_data_address ()
2210 {
2211   return 0;
2212 }
2213 
2214 /* Return the ptrace options that we want to try to enable.  */
2215 
2216 static int
linux_low_ptrace_options(int attached)2217 linux_low_ptrace_options (int attached)
2218 {
2219   client_state &cs = get_client_state ();
2220   int options = 0;
2221 
2222   if (!attached)
2223     options |= PTRACE_O_EXITKILL;
2224 
2225   if (cs.report_fork_events)
2226     options |= PTRACE_O_TRACEFORK;
2227 
2228   if (cs.report_vfork_events)
2229     options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2230 
2231   if (cs.report_exec_events)
2232     options |= PTRACE_O_TRACEEXEC;
2233 
2234   options |= PTRACE_O_TRACESYSGOOD;
2235 
2236   return options;
2237 }
2238 
2239 void
filter_event(int lwpid,int wstat)2240 linux_process_target::filter_event (int lwpid, int wstat)
2241 {
2242   client_state &cs = get_client_state ();
2243   struct lwp_info *child;
2244   struct thread_info *thread;
2245   int have_stop_pc = 0;
2246 
2247   child = find_lwp_pid (ptid_t (lwpid));
2248 
2249   /* Check for stop events reported by a process we didn't already
2250      know about - anything not already in our LWP list.
2251 
2252      If we're expecting to receive stopped processes after
2253      fork, vfork, and clone events, then we'll just add the
2254      new one to our list and go back to waiting for the event
2255      to be reported - the stopped process might be returned
2256      from waitpid before or after the event is.
2257 
2258      But note the case of a non-leader thread exec'ing after the
2259      leader having exited, and gone from our lists (because
2260      check_zombie_leaders deleted it).  The non-leader thread
2261      changes its tid to the tgid.  */
2262 
2263   if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2264       && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2265     {
2266       ptid_t child_ptid;
2267 
2268       /* A multi-thread exec after we had seen the leader exiting.  */
2269       if (debug_threads)
2270 	{
2271 	  debug_printf ("LLW: Re-adding thread group leader LWP %d"
2272 			"after exec.\n", lwpid);
2273 	}
2274 
2275       child_ptid = ptid_t (lwpid, lwpid, 0);
2276       child = add_lwp (child_ptid);
2277       child->stopped = 1;
2278       current_thread = child->thread;
2279     }
2280 
2281   /* If we didn't find a process, one of two things presumably happened:
2282      - A process we started and then detached from has exited.  Ignore it.
2283      - A process we are controlling has forked and the new child's stop
2284      was reported to us by the kernel.  Save its PID.  */
2285   if (child == NULL && WIFSTOPPED (wstat))
2286     {
2287       add_to_pid_list (&stopped_pids, lwpid, wstat);
2288       return;
2289     }
2290   else if (child == NULL)
2291     return;
2292 
2293   thread = get_lwp_thread (child);
2294 
2295   child->stopped = 1;
2296 
2297   child->last_status = wstat;
2298 
2299   /* Check if the thread has exited.  */
2300   if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2301     {
2302       if (debug_threads)
2303 	debug_printf ("LLFE: %d exited.\n", lwpid);
2304 
2305       if (finish_step_over (child))
2306 	{
2307 	  /* Unsuspend all other LWPs, and set them back running again.  */
2308 	  unsuspend_all_lwps (child);
2309 	}
2310 
2311       /* If there is at least one more LWP, then the exit signal was
2312 	 not the end of the debugged application and should be
2313 	 ignored, unless GDB wants to hear about thread exits.  */
2314       if (cs.report_thread_events
2315 	  || last_thread_of_process_p (pid_of (thread)))
2316 	{
2317 	  /* Since events are serialized to GDB core, and we can't
2318 	     report this one right now.  Leave the status pending for
2319 	     the next time we're able to report it.  */
2320 	  mark_lwp_dead (child, wstat);
2321 	  return;
2322 	}
2323       else
2324 	{
2325 	  delete_lwp (child);
2326 	  return;
2327 	}
2328     }
2329 
2330   gdb_assert (WIFSTOPPED (wstat));
2331 
2332   if (WIFSTOPPED (wstat))
2333     {
2334       struct process_info *proc;
2335 
2336       /* Architecture-specific setup after inferior is running.  */
2337       proc = find_process_pid (pid_of (thread));
2338       if (proc->tdesc == NULL)
2339 	{
2340 	  if (proc->attached)
2341 	    {
2342 	      /* This needs to happen after we have attached to the
2343 		 inferior and it is stopped for the first time, but
2344 		 before we access any inferior registers.  */
2345 	      arch_setup_thread (thread);
2346 	    }
2347 	  else
2348 	    {
2349 	      /* The process is started, but GDBserver will do
2350 		 architecture-specific setup after the program stops at
2351 		 the first instruction.  */
2352 	      child->status_pending_p = 1;
2353 	      child->status_pending = wstat;
2354 	      return;
2355 	    }
2356 	}
2357     }
2358 
2359   if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2360     {
2361       struct process_info *proc = find_process_pid (pid_of (thread));
2362       int options = linux_low_ptrace_options (proc->attached);
2363 
2364       linux_enable_event_reporting (lwpid, options);
2365       child->must_set_ptrace_flags = 0;
2366     }
2367 
2368   /* Always update syscall_state, even if it will be filtered later.  */
2369   if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2370     {
2371       child->syscall_state
2372 	= (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2373 	   ? TARGET_WAITKIND_SYSCALL_RETURN
2374 	   : TARGET_WAITKIND_SYSCALL_ENTRY);
2375     }
2376   else
2377     {
2378       /* Almost all other ptrace-stops are known to be outside of system
2379 	 calls, with further exceptions in handle_extended_wait.  */
2380       child->syscall_state = TARGET_WAITKIND_IGNORE;
2381     }
2382 
2383   /* Be careful to not overwrite stop_pc until save_stop_reason is
2384      called.  */
2385   if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2386       && linux_is_extended_waitstatus (wstat))
2387     {
2388       child->stop_pc = get_pc (child);
2389       if (handle_extended_wait (&child, wstat))
2390 	{
2391 	  /* The event has been handled, so just return without
2392 	     reporting it.  */
2393 	  return;
2394 	}
2395     }
2396 
2397   if (linux_wstatus_maybe_breakpoint (wstat))
2398     {
2399       if (save_stop_reason (child))
2400 	have_stop_pc = 1;
2401     }
2402 
2403   if (!have_stop_pc)
2404     child->stop_pc = get_pc (child);
2405 
2406   if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2407       && child->stop_expected)
2408     {
2409       if (debug_threads)
2410 	debug_printf ("Expected stop.\n");
2411       child->stop_expected = 0;
2412 
2413       if (thread->last_resume_kind == resume_stop)
2414 	{
2415 	  /* We want to report the stop to the core.  Treat the
2416 	     SIGSTOP as a normal event.  */
2417 	  if (debug_threads)
2418 	    debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2419 			  target_pid_to_str (ptid_of (thread)));
2420 	}
2421       else if (stopping_threads != NOT_STOPPING_THREADS)
2422 	{
2423 	  /* Stopping threads.  We don't want this SIGSTOP to end up
2424 	     pending.  */
2425 	  if (debug_threads)
2426 	    debug_printf ("LLW: SIGSTOP caught for %s "
2427 			  "while stopping threads.\n",
2428 			  target_pid_to_str (ptid_of (thread)));
2429 	  return;
2430 	}
2431       else
2432 	{
2433 	  /* This is a delayed SIGSTOP.  Filter out the event.  */
2434 	  if (debug_threads)
2435 	    debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2436 			  child->stepping ? "step" : "continue",
2437 			  target_pid_to_str (ptid_of (thread)));
2438 
2439 	  resume_one_lwp (child, child->stepping, 0, NULL);
2440 	  return;
2441 	}
2442     }
2443 
2444   child->status_pending_p = 1;
2445   child->status_pending = wstat;
2446   return;
2447 }
2448 
2449 bool
maybe_hw_step(thread_info * thread)2450 linux_process_target::maybe_hw_step (thread_info *thread)
2451 {
2452   if (supports_hardware_single_step ())
2453     return true;
2454   else
2455     {
2456       /* GDBserver must insert single-step breakpoint for software
2457 	 single step.  */
2458       gdb_assert (has_single_step_breakpoints (thread));
2459       return false;
2460     }
2461 }
2462 
2463 void
resume_stopped_resumed_lwps(thread_info * thread)2464 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2465 {
2466   struct lwp_info *lp = get_thread_lwp (thread);
2467 
2468   if (lp->stopped
2469       && !lp->suspended
2470       && !lp->status_pending_p
2471       && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2472     {
2473       int step = 0;
2474 
2475       if (thread->last_resume_kind == resume_step)
2476 	step = maybe_hw_step (thread);
2477 
2478       if (debug_threads)
2479 	debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2480 		      target_pid_to_str (ptid_of (thread)),
2481 		      paddress (lp->stop_pc),
2482 		      step);
2483 
2484       resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2485     }
2486 }
2487 
2488 int
wait_for_event_filtered(ptid_t wait_ptid,ptid_t filter_ptid,int * wstatp,int options)2489 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2490 					       ptid_t filter_ptid,
2491 					       int *wstatp, int options)
2492 {
2493   struct thread_info *event_thread;
2494   struct lwp_info *event_child, *requested_child;
2495   sigset_t block_mask, prev_mask;
2496 
2497  retry:
2498   /* N.B. event_thread points to the thread_info struct that contains
2499      event_child.  Keep them in sync.  */
2500   event_thread = NULL;
2501   event_child = NULL;
2502   requested_child = NULL;
2503 
2504   /* Check for a lwp with a pending status.  */
2505 
2506   if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2507     {
2508       event_thread = find_thread_in_random ([&] (thread_info *thread)
2509 	{
2510 	  return status_pending_p_callback (thread, filter_ptid);
2511 	});
2512 
2513       if (event_thread != NULL)
2514 	event_child = get_thread_lwp (event_thread);
2515       if (debug_threads && event_thread)
2516 	debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2517     }
2518   else if (filter_ptid != null_ptid)
2519     {
2520       requested_child = find_lwp_pid (filter_ptid);
2521 
2522       if (stopping_threads == NOT_STOPPING_THREADS
2523 	  && requested_child->status_pending_p
2524 	  && (requested_child->collecting_fast_tracepoint
2525 	      != fast_tpoint_collect_result::not_collecting))
2526 	{
2527 	  enqueue_one_deferred_signal (requested_child,
2528 				       &requested_child->status_pending);
2529 	  requested_child->status_pending_p = 0;
2530 	  requested_child->status_pending = 0;
2531 	  resume_one_lwp (requested_child, 0, 0, NULL);
2532 	}
2533 
2534       if (requested_child->suspended
2535 	  && requested_child->status_pending_p)
2536 	{
2537 	  internal_error (__FILE__, __LINE__,
2538 			  "requesting an event out of a"
2539 			  " suspended child?");
2540 	}
2541 
2542       if (requested_child->status_pending_p)
2543 	{
2544 	  event_child = requested_child;
2545 	  event_thread = get_lwp_thread (event_child);
2546 	}
2547     }
2548 
2549   if (event_child != NULL)
2550     {
2551       if (debug_threads)
2552 	debug_printf ("Got an event from pending child %ld (%04x)\n",
2553 		      lwpid_of (event_thread), event_child->status_pending);
2554       *wstatp = event_child->status_pending;
2555       event_child->status_pending_p = 0;
2556       event_child->status_pending = 0;
2557       current_thread = event_thread;
2558       return lwpid_of (event_thread);
2559     }
2560 
2561   /* But if we don't find a pending event, we'll have to wait.
2562 
2563      We only enter this loop if no process has a pending wait status.
2564      Thus any action taken in response to a wait status inside this
2565      loop is responding as soon as we detect the status, not after any
2566      pending events.  */
2567 
2568   /* Make sure SIGCHLD is blocked until the sigsuspend below.  Block
2569      all signals while here.  */
2570   sigfillset (&block_mask);
2571   gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2572 
2573   /* Always pull all events out of the kernel.  We'll randomly select
2574      an event LWP out of all that have events, to prevent
2575      starvation.  */
2576   while (event_child == NULL)
2577     {
2578       pid_t ret = 0;
2579 
2580       /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2581 	 quirks:
2582 
2583 	 - If the thread group leader exits while other threads in the
2584 	   thread group still exist, waitpid(TGID, ...) hangs.  That
2585 	   waitpid won't return an exit status until the other threads
2586 	   in the group are reaped.
2587 
2588 	 - When a non-leader thread execs, that thread just vanishes
2589 	   without reporting an exit (so we'd hang if we waited for it
2590 	   explicitly in that case).  The exec event is reported to
2591 	   the TGID pid.  */
2592       errno = 0;
2593       ret = my_waitpid (-1, wstatp, options | WNOHANG);
2594 
2595       if (debug_threads)
2596 	debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2597 		      ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2598 
2599       if (ret > 0)
2600 	{
2601 	  if (debug_threads)
2602 	    {
2603 	      debug_printf ("LLW: waitpid %ld received %s\n",
2604 			    (long) ret, status_to_str (*wstatp).c_str ());
2605 	    }
2606 
2607 	  /* Filter all events.  IOW, leave all events pending.  We'll
2608 	     randomly select an event LWP out of all that have events
2609 	     below.  */
2610 	  filter_event (ret, *wstatp);
2611 	  /* Retry until nothing comes out of waitpid.  A single
2612 	     SIGCHLD can indicate more than one child stopped.  */
2613 	  continue;
2614 	}
2615 
2616       /* Now that we've pulled all events out of the kernel, resume
2617 	 LWPs that don't have an interesting event to report.  */
2618       if (stopping_threads == NOT_STOPPING_THREADS)
2619 	for_each_thread ([this] (thread_info *thread)
2620 	  {
2621 	    resume_stopped_resumed_lwps (thread);
2622 	  });
2623 
2624       /* ... and find an LWP with a status to report to the core, if
2625 	 any.  */
2626       event_thread = find_thread_in_random ([&] (thread_info *thread)
2627 	{
2628 	  return status_pending_p_callback (thread, filter_ptid);
2629 	});
2630 
2631       if (event_thread != NULL)
2632 	{
2633 	  event_child = get_thread_lwp (event_thread);
2634 	  *wstatp = event_child->status_pending;
2635 	  event_child->status_pending_p = 0;
2636 	  event_child->status_pending = 0;
2637 	  break;
2638 	}
2639 
2640       /* Check for zombie thread group leaders.  Those can't be reaped
2641 	 until all other threads in the thread group are.  */
2642       check_zombie_leaders ();
2643 
2644       auto not_stopped = [&] (thread_info *thread)
2645 	{
2646 	  return not_stopped_callback (thread, wait_ptid);
2647 	};
2648 
2649       /* If there are no resumed children left in the set of LWPs we
2650 	 want to wait for, bail.  We can't just block in
2651 	 waitpid/sigsuspend, because lwps might have been left stopped
2652 	 in trace-stop state, and we'd be stuck forever waiting for
2653 	 their status to change (which would only happen if we resumed
2654 	 them).  Even if WNOHANG is set, this return code is preferred
2655 	 over 0 (below), as it is more detailed.  */
2656       if (find_thread (not_stopped) == NULL)
2657 	{
2658 	  if (debug_threads)
2659 	    debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2660 	  gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2661 	  return -1;
2662 	}
2663 
2664       /* No interesting event to report to the caller.  */
2665       if ((options & WNOHANG))
2666 	{
2667 	  if (debug_threads)
2668 	    debug_printf ("WNOHANG set, no event found\n");
2669 
2670 	  gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2671 	  return 0;
2672 	}
2673 
2674       /* Block until we get an event reported with SIGCHLD.  */
2675       if (debug_threads)
2676 	debug_printf ("sigsuspend'ing\n");
2677 
2678       sigsuspend (&prev_mask);
2679       gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2680       goto retry;
2681     }
2682 
2683   gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2684 
2685   current_thread = event_thread;
2686 
2687   return lwpid_of (event_thread);
2688 }
2689 
2690 int
wait_for_event(ptid_t ptid,int * wstatp,int options)2691 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2692 {
2693   return wait_for_event_filtered (ptid, ptid, wstatp, options);
2694 }
2695 
2696 /* Select one LWP out of those that have events pending.  */
2697 
2698 static void
select_event_lwp(struct lwp_info ** orig_lp)2699 select_event_lwp (struct lwp_info **orig_lp)
2700 {
2701   struct thread_info *event_thread = NULL;
2702 
2703   /* In all-stop, give preference to the LWP that is being
2704      single-stepped.  There will be at most one, and it's the LWP that
2705      the core is most interested in.  If we didn't do this, then we'd
2706      have to handle pending step SIGTRAPs somehow in case the core
2707      later continues the previously-stepped thread, otherwise we'd
2708      report the pending SIGTRAP, and the core, not having stepped the
2709      thread, wouldn't understand what the trap was for, and therefore
2710      would report it to the user as a random signal.  */
2711   if (!non_stop)
2712     {
2713       event_thread = find_thread ([] (thread_info *thread)
2714 	{
2715 	  lwp_info *lp = get_thread_lwp (thread);
2716 
2717 	  return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2718 		  && thread->last_resume_kind == resume_step
2719 		  && lp->status_pending_p);
2720 	});
2721 
2722       if (event_thread != NULL)
2723 	{
2724 	  if (debug_threads)
2725 	    debug_printf ("SEL: Select single-step %s\n",
2726 			  target_pid_to_str (ptid_of (event_thread)));
2727 	}
2728     }
2729   if (event_thread == NULL)
2730     {
2731       /* No single-stepping LWP.  Select one at random, out of those
2732 	 which have had events.  */
2733 
2734       event_thread = find_thread_in_random ([&] (thread_info *thread)
2735 	{
2736 	  lwp_info *lp = get_thread_lwp (thread);
2737 
2738 	  /* Only resumed LWPs that have an event pending. */
2739 	  return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2740 		  && lp->status_pending_p);
2741 	});
2742     }
2743 
2744   if (event_thread != NULL)
2745     {
2746       struct lwp_info *event_lp = get_thread_lwp (event_thread);
2747 
2748       /* Switch the event LWP.  */
2749       *orig_lp = event_lp;
2750     }
2751 }
2752 
2753 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2754    NULL.  */
2755 
2756 static void
unsuspend_all_lwps(struct lwp_info * except)2757 unsuspend_all_lwps (struct lwp_info *except)
2758 {
2759   for_each_thread ([&] (thread_info *thread)
2760     {
2761       lwp_info *lwp = get_thread_lwp (thread);
2762 
2763       if (lwp != except)
2764 	lwp_suspended_decr (lwp);
2765     });
2766 }
2767 
2768 static bool lwp_running (thread_info *thread);
2769 
2770 /* Stabilize threads (move out of jump pads).
2771 
2772    If a thread is midway collecting a fast tracepoint, we need to
2773    finish the collection and move it out of the jump pad before
2774    reporting the signal.
2775 
2776    This avoids recursion while collecting (when a signal arrives
2777    midway, and the signal handler itself collects), which would trash
2778    the trace buffer.  In case the user set a breakpoint in a signal
2779    handler, this avoids the backtrace showing the jump pad, etc..
2780    Most importantly, there are certain things we can't do safely if
2781    threads are stopped in a jump pad (or in its callee's).  For
2782    example:
2783 
2784      - starting a new trace run.  A thread still collecting the
2785    previous run, could trash the trace buffer when resumed.  The trace
2786    buffer control structures would have been reset but the thread had
2787    no way to tell.  The thread could even midway memcpy'ing to the
2788    buffer, which would mean that when resumed, it would clobber the
2789    trace buffer that had been set for a new run.
2790 
2791      - we can't rewrite/reuse the jump pads for new tracepoints
2792    safely.  Say you do tstart while a thread is stopped midway while
2793    collecting.  When the thread is later resumed, it finishes the
2794    collection, and returns to the jump pad, to execute the original
2795    instruction that was under the tracepoint jump at the time the
2796    older run had been started.  If the jump pad had been rewritten
2797    since for something else in the new run, the thread would now
2798    execute the wrong / random instructions.  */
2799 
2800 void
stabilize_threads()2801 linux_process_target::stabilize_threads ()
2802 {
2803   thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2804 				{
2805 				  return stuck_in_jump_pad (thread);
2806 				});
2807 
2808   if (thread_stuck != NULL)
2809     {
2810       if (debug_threads)
2811 	debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2812 		      lwpid_of (thread_stuck));
2813       return;
2814     }
2815 
2816   thread_info *saved_thread = current_thread;
2817 
2818   stabilizing_threads = 1;
2819 
2820   /* Kick 'em all.  */
2821   for_each_thread ([this] (thread_info *thread)
2822     {
2823       move_out_of_jump_pad (thread);
2824     });
2825 
2826   /* Loop until all are stopped out of the jump pads.  */
2827   while (find_thread (lwp_running) != NULL)
2828     {
2829       struct target_waitstatus ourstatus;
2830       struct lwp_info *lwp;
2831       int wstat;
2832 
2833       /* Note that we go through the full wait even loop.  While
2834 	 moving threads out of jump pad, we need to be able to step
2835 	 over internal breakpoints and such.  */
2836       wait_1 (minus_one_ptid, &ourstatus, 0);
2837 
2838       if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2839 	{
2840 	  lwp = get_thread_lwp (current_thread);
2841 
2842 	  /* Lock it.  */
2843 	  lwp_suspended_inc (lwp);
2844 
2845 	  if (ourstatus.value.sig != GDB_SIGNAL_0
2846 	      || current_thread->last_resume_kind == resume_stop)
2847 	    {
2848 	      wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2849 	      enqueue_one_deferred_signal (lwp, &wstat);
2850 	    }
2851 	}
2852     }
2853 
2854   unsuspend_all_lwps (NULL);
2855 
2856   stabilizing_threads = 0;
2857 
2858   current_thread = saved_thread;
2859 
2860   if (debug_threads)
2861     {
2862       thread_stuck = find_thread ([this] (thread_info *thread)
2863 		       {
2864 			 return stuck_in_jump_pad (thread);
2865 		       });
2866 
2867       if (thread_stuck != NULL)
2868 	debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2869 		      lwpid_of (thread_stuck));
2870     }
2871 }
2872 
2873 /* Convenience function that is called when the kernel reports an
2874    event that is not passed out to GDB.  */
2875 
2876 static ptid_t
ignore_event(struct target_waitstatus * ourstatus)2877 ignore_event (struct target_waitstatus *ourstatus)
2878 {
2879   /* If we got an event, there may still be others, as a single
2880      SIGCHLD can indicate more than one child stopped.  This forces
2881      another target_wait call.  */
2882   async_file_mark ();
2883 
2884   ourstatus->kind = TARGET_WAITKIND_IGNORE;
2885   return null_ptid;
2886 }
2887 
2888 ptid_t
filter_exit_event(lwp_info * event_child,target_waitstatus * ourstatus)2889 linux_process_target::filter_exit_event (lwp_info *event_child,
2890 					 target_waitstatus *ourstatus)
2891 {
2892   client_state &cs = get_client_state ();
2893   struct thread_info *thread = get_lwp_thread (event_child);
2894   ptid_t ptid = ptid_of (thread);
2895 
2896   if (!last_thread_of_process_p (pid_of (thread)))
2897     {
2898       if (cs.report_thread_events)
2899 	ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2900       else
2901 	ourstatus->kind = TARGET_WAITKIND_IGNORE;
2902 
2903       delete_lwp (event_child);
2904     }
2905   return ptid;
2906 }
2907 
2908 /* Returns 1 if GDB is interested in any event_child syscalls.  */
2909 
2910 static int
gdb_catching_syscalls_p(struct lwp_info * event_child)2911 gdb_catching_syscalls_p (struct lwp_info *event_child)
2912 {
2913   struct thread_info *thread = get_lwp_thread (event_child);
2914   struct process_info *proc = get_thread_process (thread);
2915 
2916   return !proc->syscalls_to_catch.empty ();
2917 }
2918 
2919 bool
gdb_catch_this_syscall(lwp_info * event_child)2920 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2921 {
2922   int sysno;
2923   struct thread_info *thread = get_lwp_thread (event_child);
2924   struct process_info *proc = get_thread_process (thread);
2925 
2926   if (proc->syscalls_to_catch.empty ())
2927     return false;
2928 
2929   if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2930     return true;
2931 
2932   get_syscall_trapinfo (event_child, &sysno);
2933 
2934   for (int iter : proc->syscalls_to_catch)
2935     if (iter == sysno)
2936       return true;
2937 
2938   return false;
2939 }
2940 
2941 ptid_t
wait_1(ptid_t ptid,target_waitstatus * ourstatus,target_wait_flags target_options)2942 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2943 			      target_wait_flags target_options)
2944 {
2945   client_state &cs = get_client_state ();
2946   int w;
2947   struct lwp_info *event_child;
2948   int options;
2949   int pid;
2950   int step_over_finished;
2951   int bp_explains_trap;
2952   int maybe_internal_trap;
2953   int report_to_gdb;
2954   int trace_event;
2955   int in_step_range;
2956   int any_resumed;
2957 
2958   if (debug_threads)
2959     {
2960       debug_enter ();
2961       debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
2962     }
2963 
2964   /* Translate generic target options into linux options.  */
2965   options = __WALL;
2966   if (target_options & TARGET_WNOHANG)
2967     options |= WNOHANG;
2968 
2969   bp_explains_trap = 0;
2970   trace_event = 0;
2971   in_step_range = 0;
2972   ourstatus->kind = TARGET_WAITKIND_IGNORE;
2973 
2974   auto status_pending_p_any = [&] (thread_info *thread)
2975     {
2976       return status_pending_p_callback (thread, minus_one_ptid);
2977     };
2978 
2979   auto not_stopped = [&] (thread_info *thread)
2980     {
2981       return not_stopped_callback (thread, minus_one_ptid);
2982     };
2983 
2984   /* Find a resumed LWP, if any.  */
2985   if (find_thread (status_pending_p_any) != NULL)
2986     any_resumed = 1;
2987   else if (find_thread (not_stopped) != NULL)
2988     any_resumed = 1;
2989   else
2990     any_resumed = 0;
2991 
2992   if (step_over_bkpt == null_ptid)
2993     pid = wait_for_event (ptid, &w, options);
2994   else
2995     {
2996       if (debug_threads)
2997 	debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2998 		      target_pid_to_str (step_over_bkpt));
2999       pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3000     }
3001 
3002   if (pid == 0 || (pid == -1 && !any_resumed))
3003     {
3004       gdb_assert (target_options & TARGET_WNOHANG);
3005 
3006       if (debug_threads)
3007 	{
3008 	  debug_printf ("wait_1 ret = null_ptid, "
3009 			"TARGET_WAITKIND_IGNORE\n");
3010 	  debug_exit ();
3011 	}
3012 
3013       ourstatus->kind = TARGET_WAITKIND_IGNORE;
3014       return null_ptid;
3015     }
3016   else if (pid == -1)
3017     {
3018       if (debug_threads)
3019 	{
3020 	  debug_printf ("wait_1 ret = null_ptid, "
3021 			"TARGET_WAITKIND_NO_RESUMED\n");
3022 	  debug_exit ();
3023 	}
3024 
3025       ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3026       return null_ptid;
3027     }
3028 
3029   event_child = get_thread_lwp (current_thread);
3030 
3031   /* wait_for_event only returns an exit status for the last
3032      child of a process.  Report it.  */
3033   if (WIFEXITED (w) || WIFSIGNALED (w))
3034     {
3035       if (WIFEXITED (w))
3036 	{
3037 	  ourstatus->kind = TARGET_WAITKIND_EXITED;
3038 	  ourstatus->value.integer = WEXITSTATUS (w);
3039 
3040 	  if (debug_threads)
3041 	    {
3042 	      debug_printf ("wait_1 ret = %s, exited with "
3043 			    "retcode %d\n",
3044 			    target_pid_to_str (ptid_of (current_thread)),
3045 			    WEXITSTATUS (w));
3046 	      debug_exit ();
3047 	    }
3048 	}
3049       else
3050 	{
3051 	  ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3052 	  ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3053 
3054 	  if (debug_threads)
3055 	    {
3056 	      debug_printf ("wait_1 ret = %s, terminated with "
3057 			    "signal %d\n",
3058 			    target_pid_to_str (ptid_of (current_thread)),
3059 			    WTERMSIG (w));
3060 	      debug_exit ();
3061 	    }
3062 	}
3063 
3064       if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3065 	return filter_exit_event (event_child, ourstatus);
3066 
3067       return ptid_of (current_thread);
3068     }
3069 
3070   /* If step-over executes a breakpoint instruction, in the case of a
3071      hardware single step it means a gdb/gdbserver breakpoint had been
3072      planted on top of a permanent breakpoint, in the case of a software
3073      single step it may just mean that gdbserver hit the reinsert breakpoint.
3074      The PC has been adjusted by save_stop_reason to point at
3075      the breakpoint address.
3076      So in the case of the hardware single step advance the PC manually
3077      past the breakpoint and in the case of software single step advance only
3078      if it's not the single_step_breakpoint we are hitting.
3079      This avoids that a program would keep trapping a permanent breakpoint
3080      forever.  */
3081   if (step_over_bkpt != null_ptid
3082       && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3083       && (event_child->stepping
3084 	  || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3085     {
3086       int increment_pc = 0;
3087       int breakpoint_kind = 0;
3088       CORE_ADDR stop_pc = event_child->stop_pc;
3089 
3090       breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3091       sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3092 
3093       if (debug_threads)
3094 	{
3095 	  debug_printf ("step-over for %s executed software breakpoint\n",
3096 			target_pid_to_str (ptid_of (current_thread)));
3097 	}
3098 
3099       if (increment_pc != 0)
3100 	{
3101 	  struct regcache *regcache
3102 	    = get_thread_regcache (current_thread, 1);
3103 
3104 	  event_child->stop_pc += increment_pc;
3105 	  low_set_pc (regcache, event_child->stop_pc);
3106 
3107 	  if (!low_breakpoint_at (event_child->stop_pc))
3108 	    event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3109 	}
3110     }
3111 
3112   /* If this event was not handled before, and is not a SIGTRAP, we
3113      report it.  SIGILL and SIGSEGV are also treated as traps in case
3114      a breakpoint is inserted at the current PC.  If this target does
3115      not support internal breakpoints at all, we also report the
3116      SIGTRAP without further processing; it's of no concern to us.  */
3117   maybe_internal_trap
3118     = (low_supports_breakpoints ()
3119        && (WSTOPSIG (w) == SIGTRAP
3120 	   || ((WSTOPSIG (w) == SIGILL
3121 		|| WSTOPSIG (w) == SIGSEGV)
3122 	       && low_breakpoint_at (event_child->stop_pc))));
3123 
3124   if (maybe_internal_trap)
3125     {
3126       /* Handle anything that requires bookkeeping before deciding to
3127 	 report the event or continue waiting.  */
3128 
3129       /* First check if we can explain the SIGTRAP with an internal
3130 	 breakpoint, or if we should possibly report the event to GDB.
3131 	 Do this before anything that may remove or insert a
3132 	 breakpoint.  */
3133       bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3134 
3135       /* We have a SIGTRAP, possibly a step-over dance has just
3136 	 finished.  If so, tweak the state machine accordingly,
3137 	 reinsert breakpoints and delete any single-step
3138 	 breakpoints.  */
3139       step_over_finished = finish_step_over (event_child);
3140 
3141       /* Now invoke the callbacks of any internal breakpoints there.  */
3142       check_breakpoints (event_child->stop_pc);
3143 
3144       /* Handle tracepoint data collecting.  This may overflow the
3145 	 trace buffer, and cause a tracing stop, removing
3146 	 breakpoints.  */
3147       trace_event = handle_tracepoints (event_child);
3148 
3149       if (bp_explains_trap)
3150 	{
3151 	  if (debug_threads)
3152 	    debug_printf ("Hit a gdbserver breakpoint.\n");
3153 	}
3154     }
3155   else
3156     {
3157       /* We have some other signal, possibly a step-over dance was in
3158 	 progress, and it should be cancelled too.  */
3159       step_over_finished = finish_step_over (event_child);
3160     }
3161 
3162   /* We have all the data we need.  Either report the event to GDB, or
3163      resume threads and keep waiting for more.  */
3164 
3165   /* If we're collecting a fast tracepoint, finish the collection and
3166      move out of the jump pad before delivering a signal.  See
3167      linux_stabilize_threads.  */
3168 
3169   if (WIFSTOPPED (w)
3170       && WSTOPSIG (w) != SIGTRAP
3171       && supports_fast_tracepoints ()
3172       && agent_loaded_p ())
3173     {
3174       if (debug_threads)
3175 	debug_printf ("Got signal %d for LWP %ld.  Check if we need "
3176 		      "to defer or adjust it.\n",
3177 		      WSTOPSIG (w), lwpid_of (current_thread));
3178 
3179       /* Allow debugging the jump pad itself.  */
3180       if (current_thread->last_resume_kind != resume_step
3181 	  && maybe_move_out_of_jump_pad (event_child, &w))
3182 	{
3183 	  enqueue_one_deferred_signal (event_child, &w);
3184 
3185 	  if (debug_threads)
3186 	    debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3187 			  WSTOPSIG (w), lwpid_of (current_thread));
3188 
3189 	  resume_one_lwp (event_child, 0, 0, NULL);
3190 
3191 	  if (debug_threads)
3192 	    debug_exit ();
3193 	  return ignore_event (ourstatus);
3194 	}
3195     }
3196 
3197   if (event_child->collecting_fast_tracepoint
3198       != fast_tpoint_collect_result::not_collecting)
3199     {
3200       if (debug_threads)
3201 	debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3202 		      "Check if we're already there.\n",
3203 		      lwpid_of (current_thread),
3204 		      (int) event_child->collecting_fast_tracepoint);
3205 
3206       trace_event = 1;
3207 
3208       event_child->collecting_fast_tracepoint
3209 	= linux_fast_tracepoint_collecting (event_child, NULL);
3210 
3211       if (event_child->collecting_fast_tracepoint
3212 	  != fast_tpoint_collect_result::before_insn)
3213 	{
3214 	  /* No longer need this breakpoint.  */
3215 	  if (event_child->exit_jump_pad_bkpt != NULL)
3216 	    {
3217 	      if (debug_threads)
3218 		debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3219 			      "stopping all threads momentarily.\n");
3220 
3221 	      /* Other running threads could hit this breakpoint.
3222 		 We don't handle moribund locations like GDB does,
3223 		 instead we always pause all threads when removing
3224 		 breakpoints, so that any step-over or
3225 		 decr_pc_after_break adjustment is always taken
3226 		 care of while the breakpoint is still
3227 		 inserted.  */
3228 	      stop_all_lwps (1, event_child);
3229 
3230 	      delete_breakpoint (event_child->exit_jump_pad_bkpt);
3231 	      event_child->exit_jump_pad_bkpt = NULL;
3232 
3233 	      unstop_all_lwps (1, event_child);
3234 
3235 	      gdb_assert (event_child->suspended >= 0);
3236 	    }
3237 	}
3238 
3239       if (event_child->collecting_fast_tracepoint
3240 	  == fast_tpoint_collect_result::not_collecting)
3241 	{
3242 	  if (debug_threads)
3243 	    debug_printf ("fast tracepoint finished "
3244 			  "collecting successfully.\n");
3245 
3246 	  /* We may have a deferred signal to report.  */
3247 	  if (dequeue_one_deferred_signal (event_child, &w))
3248 	    {
3249 	      if (debug_threads)
3250 		debug_printf ("dequeued one signal.\n");
3251 	    }
3252 	  else
3253 	    {
3254 	      if (debug_threads)
3255 		debug_printf ("no deferred signals.\n");
3256 
3257 	      if (stabilizing_threads)
3258 		{
3259 		  ourstatus->kind = TARGET_WAITKIND_STOPPED;
3260 		  ourstatus->value.sig = GDB_SIGNAL_0;
3261 
3262 		  if (debug_threads)
3263 		    {
3264 		      debug_printf ("wait_1 ret = %s, stopped "
3265 				    "while stabilizing threads\n",
3266 				    target_pid_to_str (ptid_of (current_thread)));
3267 		      debug_exit ();
3268 		    }
3269 
3270 		  return ptid_of (current_thread);
3271 		}
3272 	    }
3273 	}
3274     }
3275 
3276   /* Check whether GDB would be interested in this event.  */
3277 
3278   /* Check if GDB is interested in this syscall.  */
3279   if (WIFSTOPPED (w)
3280       && WSTOPSIG (w) == SYSCALL_SIGTRAP
3281       && !gdb_catch_this_syscall (event_child))
3282     {
3283       if (debug_threads)
3284 	{
3285 	  debug_printf ("Ignored syscall for LWP %ld.\n",
3286 			lwpid_of (current_thread));
3287 	}
3288 
3289       resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3290 
3291       if (debug_threads)
3292 	debug_exit ();
3293       return ignore_event (ourstatus);
3294     }
3295 
3296   /* If GDB is not interested in this signal, don't stop other
3297      threads, and don't report it to GDB.  Just resume the inferior
3298      right away.  We do this for threading-related signals as well as
3299      any that GDB specifically requested we ignore.  But never ignore
3300      SIGSTOP if we sent it ourselves, and do not ignore signals when
3301      stepping - they may require special handling to skip the signal
3302      handler. Also never ignore signals that could be caused by a
3303      breakpoint.  */
3304   if (WIFSTOPPED (w)
3305       && current_thread->last_resume_kind != resume_step
3306       && (
3307 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3308 	  (current_process ()->priv->thread_db != NULL
3309 	   && (WSTOPSIG (w) == __SIGRTMIN
3310 	       || WSTOPSIG (w) == __SIGRTMIN + 1))
3311 	  ||
3312 #endif
3313 	  (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3314 	   && !(WSTOPSIG (w) == SIGSTOP
3315 		&& current_thread->last_resume_kind == resume_stop)
3316 	   && !linux_wstatus_maybe_breakpoint (w))))
3317     {
3318       siginfo_t info, *info_p;
3319 
3320       if (debug_threads)
3321 	debug_printf ("Ignored signal %d for LWP %ld.\n",
3322 		      WSTOPSIG (w), lwpid_of (current_thread));
3323 
3324       if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3325 		  (PTRACE_TYPE_ARG3) 0, &info) == 0)
3326 	info_p = &info;
3327       else
3328 	info_p = NULL;
3329 
3330       if (step_over_finished)
3331 	{
3332 	  /* We cancelled this thread's step-over above.  We still
3333 	     need to unsuspend all other LWPs, and set them back
3334 	     running again while the signal handler runs.  */
3335 	  unsuspend_all_lwps (event_child);
3336 
3337 	  /* Enqueue the pending signal info so that proceed_all_lwps
3338 	     doesn't lose it.  */
3339 	  enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3340 
3341 	  proceed_all_lwps ();
3342 	}
3343       else
3344 	{
3345 	  resume_one_lwp (event_child, event_child->stepping,
3346 			  WSTOPSIG (w), info_p);
3347 	}
3348 
3349       if (debug_threads)
3350 	debug_exit ();
3351 
3352       return ignore_event (ourstatus);
3353     }
3354 
3355   /* Note that all addresses are always "out of the step range" when
3356      there's no range to begin with.  */
3357   in_step_range = lwp_in_step_range (event_child);
3358 
3359   /* If GDB wanted this thread to single step, and the thread is out
3360      of the step range, we always want to report the SIGTRAP, and let
3361      GDB handle it.  Watchpoints should always be reported.  So should
3362      signals we can't explain.  A SIGTRAP we can't explain could be a
3363      GDB breakpoint --- we may or not support Z0 breakpoints.  If we
3364      do, we're be able to handle GDB breakpoints on top of internal
3365      breakpoints, by handling the internal breakpoint and still
3366      reporting the event to GDB.  If we don't, we're out of luck, GDB
3367      won't see the breakpoint hit.  If we see a single-step event but
3368      the thread should be continuing, don't pass the trap to gdb.
3369      That indicates that we had previously finished a single-step but
3370      left the single-step pending -- see
3371      complete_ongoing_step_over.  */
3372   report_to_gdb = (!maybe_internal_trap
3373 		   || (current_thread->last_resume_kind == resume_step
3374 		       && !in_step_range)
3375 		   || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3376 		   || (!in_step_range
3377 		       && !bp_explains_trap
3378 		       && !trace_event
3379 		       && !step_over_finished
3380 		       && !(current_thread->last_resume_kind == resume_continue
3381 			    && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3382 		   || (gdb_breakpoint_here (event_child->stop_pc)
3383 		       && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3384 		       && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3385 		   || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3386 
3387   run_breakpoint_commands (event_child->stop_pc);
3388 
3389   /* We found no reason GDB would want us to stop.  We either hit one
3390      of our own breakpoints, or finished an internal step GDB
3391      shouldn't know about.  */
3392   if (!report_to_gdb)
3393     {
3394       if (debug_threads)
3395 	{
3396 	  if (bp_explains_trap)
3397 	    debug_printf ("Hit a gdbserver breakpoint.\n");
3398 	  if (step_over_finished)
3399 	    debug_printf ("Step-over finished.\n");
3400 	  if (trace_event)
3401 	    debug_printf ("Tracepoint event.\n");
3402 	  if (lwp_in_step_range (event_child))
3403 	    debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3404 			  paddress (event_child->stop_pc),
3405 			  paddress (event_child->step_range_start),
3406 			  paddress (event_child->step_range_end));
3407 	}
3408 
3409       /* We're not reporting this breakpoint to GDB, so apply the
3410 	 decr_pc_after_break adjustment to the inferior's regcache
3411 	 ourselves.  */
3412 
3413       if (low_supports_breakpoints ())
3414 	{
3415 	  struct regcache *regcache
3416 	    = get_thread_regcache (current_thread, 1);
3417 	  low_set_pc (regcache, event_child->stop_pc);
3418 	}
3419 
3420       if (step_over_finished)
3421 	{
3422 	  /* If we have finished stepping over a breakpoint, we've
3423 	     stopped and suspended all LWPs momentarily except the
3424 	     stepping one.  This is where we resume them all again.
3425 	     We're going to keep waiting, so use proceed, which
3426 	     handles stepping over the next breakpoint.  */
3427 	  unsuspend_all_lwps (event_child);
3428 	}
3429       else
3430 	{
3431 	  /* Remove the single-step breakpoints if any.  Note that
3432 	     there isn't single-step breakpoint if we finished stepping
3433 	     over.  */
3434 	  if (supports_software_single_step ()
3435 	      && has_single_step_breakpoints (current_thread))
3436 	    {
3437 	      stop_all_lwps (0, event_child);
3438 	      delete_single_step_breakpoints (current_thread);
3439 	      unstop_all_lwps (0, event_child);
3440 	    }
3441 	}
3442 
3443       if (debug_threads)
3444 	debug_printf ("proceeding all threads.\n");
3445       proceed_all_lwps ();
3446 
3447       if (debug_threads)
3448 	debug_exit ();
3449 
3450       return ignore_event (ourstatus);
3451     }
3452 
3453   if (debug_threads)
3454     {
3455       if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3456 	{
3457 	  std::string str
3458 	    = target_waitstatus_to_string (&event_child->waitstatus);
3459 
3460 	  debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3461 			lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3462 	}
3463       if (current_thread->last_resume_kind == resume_step)
3464 	{
3465 	  if (event_child->step_range_start == event_child->step_range_end)
3466 	    debug_printf ("GDB wanted to single-step, reporting event.\n");
3467 	  else if (!lwp_in_step_range (event_child))
3468 	    debug_printf ("Out of step range, reporting event.\n");
3469 	}
3470       if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3471 	debug_printf ("Stopped by watchpoint.\n");
3472       else if (gdb_breakpoint_here (event_child->stop_pc))
3473 	debug_printf ("Stopped by GDB breakpoint.\n");
3474       if (debug_threads)
3475 	debug_printf ("Hit a non-gdbserver trap event.\n");
3476     }
3477 
3478   /* Alright, we're going to report a stop.  */
3479 
3480   /* Remove single-step breakpoints.  */
3481   if (supports_software_single_step ())
3482     {
3483       /* Remove single-step breakpoints or not.  It it is true, stop all
3484 	 lwps, so that other threads won't hit the breakpoint in the
3485 	 staled memory.  */
3486       int remove_single_step_breakpoints_p = 0;
3487 
3488       if (non_stop)
3489 	{
3490 	  remove_single_step_breakpoints_p
3491 	    = has_single_step_breakpoints (current_thread);
3492 	}
3493       else
3494 	{
3495 	  /* In all-stop, a stop reply cancels all previous resume
3496 	     requests.  Delete all single-step breakpoints.  */
3497 
3498 	  find_thread ([&] (thread_info *thread) {
3499 	    if (has_single_step_breakpoints (thread))
3500 	      {
3501 		remove_single_step_breakpoints_p = 1;
3502 		return true;
3503 	      }
3504 
3505 	    return false;
3506 	  });
3507 	}
3508 
3509       if (remove_single_step_breakpoints_p)
3510 	{
3511 	  /* If we remove single-step breakpoints from memory, stop all lwps,
3512 	     so that other threads won't hit the breakpoint in the staled
3513 	     memory.  */
3514 	  stop_all_lwps (0, event_child);
3515 
3516 	  if (non_stop)
3517 	    {
3518 	      gdb_assert (has_single_step_breakpoints (current_thread));
3519 	      delete_single_step_breakpoints (current_thread);
3520 	    }
3521 	  else
3522 	    {
3523 	      for_each_thread ([] (thread_info *thread){
3524 		if (has_single_step_breakpoints (thread))
3525 		  delete_single_step_breakpoints (thread);
3526 	      });
3527 	    }
3528 
3529 	  unstop_all_lwps (0, event_child);
3530 	}
3531     }
3532 
3533   if (!stabilizing_threads)
3534     {
3535       /* In all-stop, stop all threads.  */
3536       if (!non_stop)
3537 	stop_all_lwps (0, NULL);
3538 
3539       if (step_over_finished)
3540 	{
3541 	  if (!non_stop)
3542 	    {
3543 	      /* If we were doing a step-over, all other threads but
3544 		 the stepping one had been paused in start_step_over,
3545 		 with their suspend counts incremented.  We don't want
3546 		 to do a full unstop/unpause, because we're in
3547 		 all-stop mode (so we want threads stopped), but we
3548 		 still need to unsuspend the other threads, to
3549 		 decrement their `suspended' count back.  */
3550 	      unsuspend_all_lwps (event_child);
3551 	    }
3552 	  else
3553 	    {
3554 	      /* If we just finished a step-over, then all threads had
3555 		 been momentarily paused.  In all-stop, that's fine,
3556 		 we want threads stopped by now anyway.  In non-stop,
3557 		 we need to re-resume threads that GDB wanted to be
3558 		 running.  */
3559 	      unstop_all_lwps (1, event_child);
3560 	    }
3561 	}
3562 
3563       /* If we're not waiting for a specific LWP, choose an event LWP
3564 	 from among those that have had events.  Giving equal priority
3565 	 to all LWPs that have had events helps prevent
3566 	 starvation.  */
3567       if (ptid == minus_one_ptid)
3568 	{
3569 	  event_child->status_pending_p = 1;
3570 	  event_child->status_pending = w;
3571 
3572 	  select_event_lwp (&event_child);
3573 
3574 	  /* current_thread and event_child must stay in sync.  */
3575 	  current_thread = get_lwp_thread (event_child);
3576 
3577 	  event_child->status_pending_p = 0;
3578 	  w = event_child->status_pending;
3579 	}
3580 
3581 
3582       /* Stabilize threads (move out of jump pads).  */
3583       if (!non_stop)
3584 	target_stabilize_threads ();
3585     }
3586   else
3587     {
3588       /* If we just finished a step-over, then all threads had been
3589 	 momentarily paused.  In all-stop, that's fine, we want
3590 	 threads stopped by now anyway.  In non-stop, we need to
3591 	 re-resume threads that GDB wanted to be running.  */
3592       if (step_over_finished)
3593 	unstop_all_lwps (1, event_child);
3594     }
3595 
3596   if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3597     {
3598       /* If the reported event is an exit, fork, vfork or exec, let
3599 	 GDB know.  */
3600 
3601       /* Break the unreported fork relationship chain.  */
3602       if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3603 	  || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3604 	{
3605 	  event_child->fork_relative->fork_relative = NULL;
3606 	  event_child->fork_relative = NULL;
3607 	}
3608 
3609       *ourstatus = event_child->waitstatus;
3610       /* Clear the event lwp's waitstatus since we handled it already.  */
3611       event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3612     }
3613   else
3614     ourstatus->kind = TARGET_WAITKIND_STOPPED;
3615 
3616   /* Now that we've selected our final event LWP, un-adjust its PC if
3617      it was a software breakpoint, and the client doesn't know we can
3618      adjust the breakpoint ourselves.  */
3619   if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3620       && !cs.swbreak_feature)
3621     {
3622       int decr_pc = low_decr_pc_after_break ();
3623 
3624       if (decr_pc != 0)
3625 	{
3626 	  struct regcache *regcache
3627 	    = get_thread_regcache (current_thread, 1);
3628 	  low_set_pc (regcache, event_child->stop_pc + decr_pc);
3629 	}
3630     }
3631 
3632   if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3633     {
3634       get_syscall_trapinfo (event_child,
3635 			    &ourstatus->value.syscall_number);
3636       ourstatus->kind = event_child->syscall_state;
3637     }
3638   else if (current_thread->last_resume_kind == resume_stop
3639 	   && WSTOPSIG (w) == SIGSTOP)
3640     {
3641       /* A thread that has been requested to stop by GDB with vCont;t,
3642 	 and it stopped cleanly, so report as SIG0.  The use of
3643 	 SIGSTOP is an implementation detail.  */
3644       ourstatus->value.sig = GDB_SIGNAL_0;
3645     }
3646   else if (current_thread->last_resume_kind == resume_stop
3647 	   && WSTOPSIG (w) != SIGSTOP)
3648     {
3649       /* A thread that has been requested to stop by GDB with vCont;t,
3650 	 but, it stopped for other reasons.  */
3651       ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3652     }
3653   else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3654     {
3655       ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3656     }
3657 
3658   gdb_assert (step_over_bkpt == null_ptid);
3659 
3660   if (debug_threads)
3661     {
3662       debug_printf ("wait_1 ret = %s, %d, %d\n",
3663 		    target_pid_to_str (ptid_of (current_thread)),
3664 		    ourstatus->kind, ourstatus->value.sig);
3665       debug_exit ();
3666     }
3667 
3668   if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3669     return filter_exit_event (event_child, ourstatus);
3670 
3671   return ptid_of (current_thread);
3672 }
3673 
3674 /* Get rid of any pending event in the pipe.  */
3675 static void
async_file_flush(void)3676 async_file_flush (void)
3677 {
3678   int ret;
3679   char buf;
3680 
3681   do
3682     ret = read (linux_event_pipe[0], &buf, 1);
3683   while (ret >= 0 || (ret == -1 && errno == EINTR));
3684 }
3685 
3686 /* Put something in the pipe, so the event loop wakes up.  */
3687 static void
async_file_mark(void)3688 async_file_mark (void)
3689 {
3690   int ret;
3691 
3692   async_file_flush ();
3693 
3694   do
3695     ret = write (linux_event_pipe[1], "+", 1);
3696   while (ret == 0 || (ret == -1 && errno == EINTR));
3697 
3698   /* Ignore EAGAIN.  If the pipe is full, the event loop will already
3699      be awakened anyway.  */
3700 }
3701 
3702 ptid_t
wait(ptid_t ptid,target_waitstatus * ourstatus,target_wait_flags target_options)3703 linux_process_target::wait (ptid_t ptid,
3704 			    target_waitstatus *ourstatus,
3705 			    target_wait_flags target_options)
3706 {
3707   ptid_t event_ptid;
3708 
3709   /* Flush the async file first.  */
3710   if (target_is_async_p ())
3711     async_file_flush ();
3712 
3713   do
3714     {
3715       event_ptid = wait_1 (ptid, ourstatus, target_options);
3716     }
3717   while ((target_options & TARGET_WNOHANG) == 0
3718 	 && event_ptid == null_ptid
3719 	 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3720 
3721   /* If at least one stop was reported, there may be more.  A single
3722      SIGCHLD can signal more than one child stop.  */
3723   if (target_is_async_p ()
3724       && (target_options & TARGET_WNOHANG) != 0
3725       && event_ptid != null_ptid)
3726     async_file_mark ();
3727 
3728   return event_ptid;
3729 }
3730 
3731 /* Send a signal to an LWP.  */
3732 
3733 static int
kill_lwp(unsigned long lwpid,int signo)3734 kill_lwp (unsigned long lwpid, int signo)
3735 {
3736   int ret;
3737 
3738   errno = 0;
3739   ret = syscall (__NR_tkill, lwpid, signo);
3740   if (errno == ENOSYS)
3741     {
3742       /* If tkill fails, then we are not using nptl threads, a
3743 	 configuration we no longer support.  */
3744       perror_with_name (("tkill"));
3745     }
3746   return ret;
3747 }
3748 
3749 void
linux_stop_lwp(struct lwp_info * lwp)3750 linux_stop_lwp (struct lwp_info *lwp)
3751 {
3752   send_sigstop (lwp);
3753 }
3754 
3755 static void
send_sigstop(struct lwp_info * lwp)3756 send_sigstop (struct lwp_info *lwp)
3757 {
3758   int pid;
3759 
3760   pid = lwpid_of (get_lwp_thread (lwp));
3761 
3762   /* If we already have a pending stop signal for this process, don't
3763      send another.  */
3764   if (lwp->stop_expected)
3765     {
3766       if (debug_threads)
3767 	debug_printf ("Have pending sigstop for lwp %d\n", pid);
3768 
3769       return;
3770     }
3771 
3772   if (debug_threads)
3773     debug_printf ("Sending sigstop to lwp %d\n", pid);
3774 
3775   lwp->stop_expected = 1;
3776   kill_lwp (pid, SIGSTOP);
3777 }
3778 
3779 static void
send_sigstop(thread_info * thread,lwp_info * except)3780 send_sigstop (thread_info *thread, lwp_info *except)
3781 {
3782   struct lwp_info *lwp = get_thread_lwp (thread);
3783 
3784   /* Ignore EXCEPT.  */
3785   if (lwp == except)
3786     return;
3787 
3788   if (lwp->stopped)
3789     return;
3790 
3791   send_sigstop (lwp);
3792 }
3793 
3794 /* Increment the suspend count of an LWP, and stop it, if not stopped
3795    yet.  */
3796 static void
suspend_and_send_sigstop(thread_info * thread,lwp_info * except)3797 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3798 {
3799   struct lwp_info *lwp = get_thread_lwp (thread);
3800 
3801   /* Ignore EXCEPT.  */
3802   if (lwp == except)
3803     return;
3804 
3805   lwp_suspended_inc (lwp);
3806 
3807   send_sigstop (thread, except);
3808 }
3809 
3810 static void
mark_lwp_dead(struct lwp_info * lwp,int wstat)3811 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3812 {
3813   /* Store the exit status for later.  */
3814   lwp->status_pending_p = 1;
3815   lwp->status_pending = wstat;
3816 
3817   /* Store in waitstatus as well, as there's nothing else to process
3818      for this event.  */
3819   if (WIFEXITED (wstat))
3820     {
3821       lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3822       lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3823     }
3824   else if (WIFSIGNALED (wstat))
3825     {
3826       lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3827       lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3828     }
3829 
3830   /* Prevent trying to stop it.  */
3831   lwp->stopped = 1;
3832 
3833   /* No further stops are expected from a dead lwp.  */
3834   lwp->stop_expected = 0;
3835 }
3836 
3837 /* Return true if LWP has exited already, and has a pending exit event
3838    to report to GDB.  */
3839 
3840 static int
lwp_is_marked_dead(struct lwp_info * lwp)3841 lwp_is_marked_dead (struct lwp_info *lwp)
3842 {
3843   return (lwp->status_pending_p
3844 	  && (WIFEXITED (lwp->status_pending)
3845 	      || WIFSIGNALED (lwp->status_pending)));
3846 }
3847 
3848 void
wait_for_sigstop()3849 linux_process_target::wait_for_sigstop ()
3850 {
3851   struct thread_info *saved_thread;
3852   ptid_t saved_tid;
3853   int wstat;
3854   int ret;
3855 
3856   saved_thread = current_thread;
3857   if (saved_thread != NULL)
3858     saved_tid = saved_thread->id;
3859   else
3860     saved_tid = null_ptid; /* avoid bogus unused warning */
3861 
3862   if (debug_threads)
3863     debug_printf ("wait_for_sigstop: pulling events\n");
3864 
3865   /* Passing NULL_PTID as filter indicates we want all events to be
3866      left pending.  Eventually this returns when there are no
3867      unwaited-for children left.  */
3868   ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3869   gdb_assert (ret == -1);
3870 
3871   if (saved_thread == NULL || mythread_alive (saved_tid))
3872     current_thread = saved_thread;
3873   else
3874     {
3875       if (debug_threads)
3876 	debug_printf ("Previously current thread died.\n");
3877 
3878       /* We can't change the current inferior behind GDB's back,
3879 	 otherwise, a subsequent command may apply to the wrong
3880 	 process.  */
3881       current_thread = NULL;
3882     }
3883 }
3884 
3885 bool
stuck_in_jump_pad(thread_info * thread)3886 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3887 {
3888   struct lwp_info *lwp = get_thread_lwp (thread);
3889 
3890   if (lwp->suspended != 0)
3891     {
3892       internal_error (__FILE__, __LINE__,
3893 		      "LWP %ld is suspended, suspended=%d\n",
3894 		      lwpid_of (thread), lwp->suspended);
3895     }
3896   gdb_assert (lwp->stopped);
3897 
3898   /* Allow debugging the jump pad, gdb_collect, etc..  */
3899   return (supports_fast_tracepoints ()
3900 	  && agent_loaded_p ()
3901 	  && (gdb_breakpoint_here (lwp->stop_pc)
3902 	      || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3903 	      || thread->last_resume_kind == resume_step)
3904 	  && (linux_fast_tracepoint_collecting (lwp, NULL)
3905 	      != fast_tpoint_collect_result::not_collecting));
3906 }
3907 
3908 void
move_out_of_jump_pad(thread_info * thread)3909 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3910 {
3911   struct thread_info *saved_thread;
3912   struct lwp_info *lwp = get_thread_lwp (thread);
3913   int *wstat;
3914 
3915   if (lwp->suspended != 0)
3916     {
3917       internal_error (__FILE__, __LINE__,
3918 		      "LWP %ld is suspended, suspended=%d\n",
3919 		      lwpid_of (thread), lwp->suspended);
3920     }
3921   gdb_assert (lwp->stopped);
3922 
3923   /* For gdb_breakpoint_here.  */
3924   saved_thread = current_thread;
3925   current_thread = thread;
3926 
3927   wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3928 
3929   /* Allow debugging the jump pad, gdb_collect, etc.  */
3930   if (!gdb_breakpoint_here (lwp->stop_pc)
3931       && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3932       && thread->last_resume_kind != resume_step
3933       && maybe_move_out_of_jump_pad (lwp, wstat))
3934     {
3935       if (debug_threads)
3936 	debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3937 		      lwpid_of (thread));
3938 
3939       if (wstat)
3940 	{
3941 	  lwp->status_pending_p = 0;
3942 	  enqueue_one_deferred_signal (lwp, wstat);
3943 
3944 	  if (debug_threads)
3945 	    debug_printf ("Signal %d for LWP %ld deferred "
3946 			  "(in jump pad)\n",
3947 			  WSTOPSIG (*wstat), lwpid_of (thread));
3948 	}
3949 
3950       resume_one_lwp (lwp, 0, 0, NULL);
3951     }
3952   else
3953     lwp_suspended_inc (lwp);
3954 
3955   current_thread = saved_thread;
3956 }
3957 
3958 static bool
lwp_running(thread_info * thread)3959 lwp_running (thread_info *thread)
3960 {
3961   struct lwp_info *lwp = get_thread_lwp (thread);
3962 
3963   if (lwp_is_marked_dead (lwp))
3964     return false;
3965 
3966   return !lwp->stopped;
3967 }
3968 
3969 void
stop_all_lwps(int suspend,lwp_info * except)3970 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3971 {
3972   /* Should not be called recursively.  */
3973   gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3974 
3975   if (debug_threads)
3976     {
3977       debug_enter ();
3978       debug_printf ("stop_all_lwps (%s, except=%s)\n",
3979 		    suspend ? "stop-and-suspend" : "stop",
3980 		    except != NULL
3981 		    ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3982 		    : "none");
3983     }
3984 
3985   stopping_threads = (suspend
3986 		      ? STOPPING_AND_SUSPENDING_THREADS
3987 		      : STOPPING_THREADS);
3988 
3989   if (suspend)
3990     for_each_thread ([&] (thread_info *thread)
3991       {
3992 	suspend_and_send_sigstop (thread, except);
3993       });
3994   else
3995     for_each_thread ([&] (thread_info *thread)
3996       {
3997 	 send_sigstop (thread, except);
3998       });
3999 
4000   wait_for_sigstop ();
4001   stopping_threads = NOT_STOPPING_THREADS;
4002 
4003   if (debug_threads)
4004     {
4005       debug_printf ("stop_all_lwps done, setting stopping_threads "
4006 		    "back to !stopping\n");
4007       debug_exit ();
4008     }
4009 }
4010 
4011 /* Enqueue one signal in the chain of signals which need to be
4012    delivered to this process on next resume.  */
4013 
4014 static void
enqueue_pending_signal(struct lwp_info * lwp,int signal,siginfo_t * info)4015 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4016 {
4017   lwp->pending_signals.emplace_back (signal);
4018   if (info == nullptr)
4019     memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
4020   else
4021     lwp->pending_signals.back ().info = *info;
4022 }
4023 
4024 void
install_software_single_step_breakpoints(lwp_info * lwp)4025 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4026 {
4027   struct thread_info *thread = get_lwp_thread (lwp);
4028   struct regcache *regcache = get_thread_regcache (thread, 1);
4029 
4030   scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4031 
4032   current_thread = thread;
4033   std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4034 
4035   for (CORE_ADDR pc : next_pcs)
4036     set_single_step_breakpoint (pc, current_ptid);
4037 }
4038 
4039 int
single_step(lwp_info * lwp)4040 linux_process_target::single_step (lwp_info* lwp)
4041 {
4042   int step = 0;
4043 
4044   if (supports_hardware_single_step ())
4045     {
4046       step = 1;
4047     }
4048   else if (supports_software_single_step ())
4049     {
4050       install_software_single_step_breakpoints (lwp);
4051       step = 0;
4052     }
4053   else
4054     {
4055       if (debug_threads)
4056 	debug_printf ("stepping is not implemented on this target");
4057     }
4058 
4059   return step;
4060 }
4061 
4062 /* The signal can be delivered to the inferior if we are not trying to
4063    finish a fast tracepoint collect.  Since signal can be delivered in
4064    the step-over, the program may go to signal handler and trap again
4065    after return from the signal handler.  We can live with the spurious
4066    double traps.  */
4067 
4068 static int
lwp_signal_can_be_delivered(struct lwp_info * lwp)4069 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4070 {
4071   return (lwp->collecting_fast_tracepoint
4072 	  == fast_tpoint_collect_result::not_collecting);
4073 }
4074 
4075 void
resume_one_lwp_throw(lwp_info * lwp,int step,int signal,siginfo_t * info)4076 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4077 					    int signal, siginfo_t *info)
4078 {
4079   struct thread_info *thread = get_lwp_thread (lwp);
4080   struct thread_info *saved_thread;
4081   int ptrace_request;
4082   struct process_info *proc = get_thread_process (thread);
4083 
4084   /* Note that target description may not be initialised
4085      (proc->tdesc == NULL) at this point because the program hasn't
4086      stopped at the first instruction yet.  It means GDBserver skips
4087      the extra traps from the wrapper program (see option --wrapper).
4088      Code in this function that requires register access should be
4089      guarded by proc->tdesc == NULL or something else.  */
4090 
4091   if (lwp->stopped == 0)
4092     return;
4093 
4094   gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4095 
4096   fast_tpoint_collect_result fast_tp_collecting
4097     = lwp->collecting_fast_tracepoint;
4098 
4099   gdb_assert (!stabilizing_threads
4100 	      || (fast_tp_collecting
4101 		  != fast_tpoint_collect_result::not_collecting));
4102 
4103   /* Cancel actions that rely on GDB not changing the PC (e.g., the
4104      user used the "jump" command, or "set $pc = foo").  */
4105   if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4106     {
4107       /* Collecting 'while-stepping' actions doesn't make sense
4108 	 anymore.  */
4109       release_while_stepping_state_list (thread);
4110     }
4111 
4112   /* If we have pending signals or status, and a new signal, enqueue the
4113      signal.  Also enqueue the signal if it can't be delivered to the
4114      inferior right now.  */
4115   if (signal != 0
4116       && (lwp->status_pending_p
4117 	  || !lwp->pending_signals.empty ()
4118 	  || !lwp_signal_can_be_delivered (lwp)))
4119     {
4120       enqueue_pending_signal (lwp, signal, info);
4121 
4122       /* Postpone any pending signal.  It was enqueued above.  */
4123       signal = 0;
4124     }
4125 
4126   if (lwp->status_pending_p)
4127     {
4128       if (debug_threads)
4129 	debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4130 		      " has pending status\n",
4131 		      lwpid_of (thread), step ? "step" : "continue",
4132 		      lwp->stop_expected ? "expected" : "not expected");
4133       return;
4134     }
4135 
4136   saved_thread = current_thread;
4137   current_thread = thread;
4138 
4139   /* This bit needs some thinking about.  If we get a signal that
4140      we must report while a single-step reinsert is still pending,
4141      we often end up resuming the thread.  It might be better to
4142      (ew) allow a stack of pending events; then we could be sure that
4143      the reinsert happened right away and not lose any signals.
4144 
4145      Making this stack would also shrink the window in which breakpoints are
4146      uninserted (see comment in linux_wait_for_lwp) but not enough for
4147      complete correctness, so it won't solve that problem.  It may be
4148      worthwhile just to solve this one, however.  */
4149   if (lwp->bp_reinsert != 0)
4150     {
4151       if (debug_threads)
4152 	debug_printf ("  pending reinsert at 0x%s\n",
4153 		      paddress (lwp->bp_reinsert));
4154 
4155       if (supports_hardware_single_step ())
4156 	{
4157 	  if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4158 	    {
4159 	      if (step == 0)
4160 		warning ("BAD - reinserting but not stepping.");
4161 	      if (lwp->suspended)
4162 		warning ("BAD - reinserting and suspended(%d).",
4163 				 lwp->suspended);
4164 	    }
4165 	}
4166 
4167       step = maybe_hw_step (thread);
4168     }
4169 
4170   if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4171     {
4172       if (debug_threads)
4173 	debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4174 		      " (exit-jump-pad-bkpt)\n",
4175 		      lwpid_of (thread));
4176     }
4177   else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4178     {
4179       if (debug_threads)
4180 	debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4181 		      " single-stepping\n",
4182 		      lwpid_of (thread));
4183 
4184       if (supports_hardware_single_step ())
4185 	step = 1;
4186       else
4187 	{
4188 	  internal_error (__FILE__, __LINE__,
4189 			  "moving out of jump pad single-stepping"
4190 			  " not implemented on this target");
4191 	}
4192     }
4193 
4194   /* If we have while-stepping actions in this thread set it stepping.
4195      If we have a signal to deliver, it may or may not be set to
4196      SIG_IGN, we don't know.  Assume so, and allow collecting
4197      while-stepping into a signal handler.  A possible smart thing to
4198      do would be to set an internal breakpoint at the signal return
4199      address, continue, and carry on catching this while-stepping
4200      action only when that breakpoint is hit.  A future
4201      enhancement.  */
4202   if (thread->while_stepping != NULL)
4203     {
4204       if (debug_threads)
4205 	debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4206 		      lwpid_of (thread));
4207 
4208       step = single_step (lwp);
4209     }
4210 
4211   if (proc->tdesc != NULL && low_supports_breakpoints ())
4212     {
4213       struct regcache *regcache = get_thread_regcache (current_thread, 1);
4214 
4215       lwp->stop_pc = low_get_pc (regcache);
4216 
4217       if (debug_threads)
4218 	{
4219 	  debug_printf ("  %s from pc 0x%lx\n", step ? "step" : "continue",
4220 			(long) lwp->stop_pc);
4221 	}
4222     }
4223 
4224   /* If we have pending signals, consume one if it can be delivered to
4225      the inferior.  */
4226   if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4227     {
4228       const pending_signal &p_sig = lwp->pending_signals.front ();
4229 
4230       signal = p_sig.signal;
4231       if (p_sig.info.si_signo != 0)
4232 	ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4233 		&p_sig.info);
4234 
4235       lwp->pending_signals.pop_front ();
4236     }
4237 
4238   if (debug_threads)
4239     debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4240 		  lwpid_of (thread), step ? "step" : "continue", signal,
4241 		  lwp->stop_expected ? "expected" : "not expected");
4242 
4243   low_prepare_to_resume (lwp);
4244 
4245   regcache_invalidate_thread (thread);
4246   errno = 0;
4247   lwp->stepping = step;
4248   if (step)
4249     ptrace_request = PTRACE_SINGLESTEP;
4250   else if (gdb_catching_syscalls_p (lwp))
4251     ptrace_request = PTRACE_SYSCALL;
4252   else
4253     ptrace_request = PTRACE_CONT;
4254   ptrace (ptrace_request,
4255 	  lwpid_of (thread),
4256 	  (PTRACE_TYPE_ARG3) 0,
4257 	  /* Coerce to a uintptr_t first to avoid potential gcc warning
4258 	     of coercing an 8 byte integer to a 4 byte pointer.  */
4259 	  (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4260 
4261   current_thread = saved_thread;
4262   if (errno)
4263     perror_with_name ("resuming thread");
4264 
4265   /* Successfully resumed.  Clear state that no longer makes sense,
4266      and mark the LWP as running.  Must not do this before resuming
4267      otherwise if that fails other code will be confused.  E.g., we'd
4268      later try to stop the LWP and hang forever waiting for a stop
4269      status.  Note that we must not throw after this is cleared,
4270      otherwise handle_zombie_lwp_error would get confused.  */
4271   lwp->stopped = 0;
4272   lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4273 }
4274 
4275 void
low_prepare_to_resume(lwp_info * lwp)4276 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4277 {
4278   /* Nop.  */
4279 }
4280 
4281 /* Called when we try to resume a stopped LWP and that errors out.  If
4282    the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4283    or about to become), discard the error, clear any pending status
4284    the LWP may have, and return true (we'll collect the exit status
4285    soon enough).  Otherwise, return false.  */
4286 
4287 static int
check_ptrace_stopped_lwp_gone(struct lwp_info * lp)4288 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4289 {
4290   struct thread_info *thread = get_lwp_thread (lp);
4291 
4292   /* If we get an error after resuming the LWP successfully, we'd
4293      confuse !T state for the LWP being gone.  */
4294   gdb_assert (lp->stopped);
4295 
4296   /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4297      because even if ptrace failed with ESRCH, the tracee may be "not
4298      yet fully dead", but already refusing ptrace requests.  In that
4299      case the tracee has 'R (Running)' state for a little bit
4300      (observed in Linux 3.18).  See also the note on ESRCH in the
4301      ptrace(2) man page.  Instead, check whether the LWP has any state
4302      other than ptrace-stopped.  */
4303 
4304   /* Don't assume anything if /proc/PID/status can't be read.  */
4305   if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4306     {
4307       lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4308       lp->status_pending_p = 0;
4309       return 1;
4310     }
4311   return 0;
4312 }
4313 
4314 void
resume_one_lwp(lwp_info * lwp,int step,int signal,siginfo_t * info)4315 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4316 				      siginfo_t *info)
4317 {
4318   try
4319     {
4320       resume_one_lwp_throw (lwp, step, signal, info);
4321     }
4322   catch (const gdb_exception_error &ex)
4323     {
4324       if (!check_ptrace_stopped_lwp_gone (lwp))
4325 	throw;
4326     }
4327 }
4328 
4329 /* This function is called once per thread via for_each_thread.
4330    We look up which resume request applies to THREAD and mark it with a
4331    pointer to the appropriate resume request.
4332 
4333    This algorithm is O(threads * resume elements), but resume elements
4334    is small (and will remain small at least until GDB supports thread
4335    suspension).  */
4336 
4337 static void
linux_set_resume_request(thread_info * thread,thread_resume * resume,size_t n)4338 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4339 {
4340   struct lwp_info *lwp = get_thread_lwp (thread);
4341 
4342   for (int ndx = 0; ndx < n; ndx++)
4343     {
4344       ptid_t ptid = resume[ndx].thread;
4345       if (ptid == minus_one_ptid
4346 	  || ptid == thread->id
4347 	  /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4348 	     of PID'.  */
4349 	  || (ptid.pid () == pid_of (thread)
4350 	      && (ptid.is_pid ()
4351 		  || ptid.lwp () == -1)))
4352 	{
4353 	  if (resume[ndx].kind == resume_stop
4354 	      && thread->last_resume_kind == resume_stop)
4355 	    {
4356 	      if (debug_threads)
4357 		debug_printf ("already %s LWP %ld at GDB's request\n",
4358 			      (thread->last_status.kind
4359 			       == TARGET_WAITKIND_STOPPED)
4360 			      ? "stopped"
4361 			      : "stopping",
4362 			      lwpid_of (thread));
4363 
4364 	      continue;
4365 	    }
4366 
4367 	  /* Ignore (wildcard) resume requests for already-resumed
4368 	     threads.  */
4369 	  if (resume[ndx].kind != resume_stop
4370 	      && thread->last_resume_kind != resume_stop)
4371 	    {
4372 	      if (debug_threads)
4373 		debug_printf ("already %s LWP %ld at GDB's request\n",
4374 			      (thread->last_resume_kind
4375 			       == resume_step)
4376 			      ? "stepping"
4377 			      : "continuing",
4378 			      lwpid_of (thread));
4379 	      continue;
4380 	    }
4381 
4382 	  /* Don't let wildcard resumes resume fork children that GDB
4383 	     does not yet know are new fork children.  */
4384 	  if (lwp->fork_relative != NULL)
4385 	    {
4386 	      struct lwp_info *rel = lwp->fork_relative;
4387 
4388 	      if (rel->status_pending_p
4389 		  && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4390 		      || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4391 		{
4392 		  if (debug_threads)
4393 		    debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4394 				  lwpid_of (thread));
4395 		  continue;
4396 		}
4397 	    }
4398 
4399 	  /* If the thread has a pending event that has already been
4400 	     reported to GDBserver core, but GDB has not pulled the
4401 	     event out of the vStopped queue yet, likewise, ignore the
4402 	     (wildcard) resume request.  */
4403 	  if (in_queued_stop_replies (thread->id))
4404 	    {
4405 	      if (debug_threads)
4406 		debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4407 			      lwpid_of (thread));
4408 	      continue;
4409 	    }
4410 
4411 	  lwp->resume = &resume[ndx];
4412 	  thread->last_resume_kind = lwp->resume->kind;
4413 
4414 	  lwp->step_range_start = lwp->resume->step_range_start;
4415 	  lwp->step_range_end = lwp->resume->step_range_end;
4416 
4417 	  /* If we had a deferred signal to report, dequeue one now.
4418 	     This can happen if LWP gets more than one signal while
4419 	     trying to get out of a jump pad.  */
4420 	  if (lwp->stopped
4421 	      && !lwp->status_pending_p
4422 	      && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4423 	    {
4424 	      lwp->status_pending_p = 1;
4425 
4426 	      if (debug_threads)
4427 		debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4428 			      "leaving status pending.\n",
4429 			      WSTOPSIG (lwp->status_pending),
4430 			      lwpid_of (thread));
4431 	    }
4432 
4433 	  return;
4434 	}
4435     }
4436 
4437   /* No resume action for this thread.  */
4438   lwp->resume = NULL;
4439 }
4440 
4441 bool
resume_status_pending(thread_info * thread)4442 linux_process_target::resume_status_pending (thread_info *thread)
4443 {
4444   struct lwp_info *lwp = get_thread_lwp (thread);
4445 
4446   /* LWPs which will not be resumed are not interesting, because
4447      we might not wait for them next time through linux_wait.  */
4448   if (lwp->resume == NULL)
4449     return false;
4450 
4451   return thread_still_has_status_pending (thread);
4452 }
4453 
4454 bool
thread_needs_step_over(thread_info * thread)4455 linux_process_target::thread_needs_step_over (thread_info *thread)
4456 {
4457   struct lwp_info *lwp = get_thread_lwp (thread);
4458   struct thread_info *saved_thread;
4459   CORE_ADDR pc;
4460   struct process_info *proc = get_thread_process (thread);
4461 
4462   /* GDBserver is skipping the extra traps from the wrapper program,
4463      don't have to do step over.  */
4464   if (proc->tdesc == NULL)
4465     return false;
4466 
4467   /* LWPs which will not be resumed are not interesting, because we
4468      might not wait for them next time through linux_wait.  */
4469 
4470   if (!lwp->stopped)
4471     {
4472       if (debug_threads)
4473 	debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4474 		      lwpid_of (thread));
4475       return false;
4476     }
4477 
4478   if (thread->last_resume_kind == resume_stop)
4479     {
4480       if (debug_threads)
4481 	debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4482 		      " stopped\n",
4483 		      lwpid_of (thread));
4484       return false;
4485     }
4486 
4487   gdb_assert (lwp->suspended >= 0);
4488 
4489   if (lwp->suspended)
4490     {
4491       if (debug_threads)
4492 	debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4493 		      lwpid_of (thread));
4494       return false;
4495     }
4496 
4497   if (lwp->status_pending_p)
4498     {
4499       if (debug_threads)
4500 	debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4501 		      " status.\n",
4502 		      lwpid_of (thread));
4503       return false;
4504     }
4505 
4506   /* Note: PC, not STOP_PC.  Either GDB has adjusted the PC already,
4507      or we have.  */
4508   pc = get_pc (lwp);
4509 
4510   /* If the PC has changed since we stopped, then don't do anything,
4511      and let the breakpoint/tracepoint be hit.  This happens if, for
4512      instance, GDB handled the decr_pc_after_break subtraction itself,
4513      GDB is OOL stepping this thread, or the user has issued a "jump"
4514      command, or poked thread's registers herself.  */
4515   if (pc != lwp->stop_pc)
4516     {
4517       if (debug_threads)
4518 	debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4519 		      "Old stop_pc was 0x%s, PC is now 0x%s\n",
4520 		      lwpid_of (thread),
4521 		      paddress (lwp->stop_pc), paddress (pc));
4522       return false;
4523     }
4524 
4525   /* On software single step target, resume the inferior with signal
4526      rather than stepping over.  */
4527   if (supports_software_single_step ()
4528       && !lwp->pending_signals.empty ()
4529       && lwp_signal_can_be_delivered (lwp))
4530     {
4531       if (debug_threads)
4532 	debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4533 		      " signals.\n",
4534 		      lwpid_of (thread));
4535 
4536       return false;
4537     }
4538 
4539   saved_thread = current_thread;
4540   current_thread = thread;
4541 
4542   /* We can only step over breakpoints we know about.  */
4543   if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4544     {
4545       /* Don't step over a breakpoint that GDB expects to hit
4546 	 though.  If the condition is being evaluated on the target's side
4547 	 and it evaluate to false, step over this breakpoint as well.  */
4548       if (gdb_breakpoint_here (pc)
4549 	  && gdb_condition_true_at_breakpoint (pc)
4550 	  && gdb_no_commands_at_breakpoint (pc))
4551 	{
4552 	  if (debug_threads)
4553 	    debug_printf ("Need step over [LWP %ld]? yes, but found"
4554 			  " GDB breakpoint at 0x%s; skipping step over\n",
4555 			  lwpid_of (thread), paddress (pc));
4556 
4557 	  current_thread = saved_thread;
4558 	  return false;
4559 	}
4560       else
4561 	{
4562 	  if (debug_threads)
4563 	    debug_printf ("Need step over [LWP %ld]? yes, "
4564 			  "found breakpoint at 0x%s\n",
4565 			  lwpid_of (thread), paddress (pc));
4566 
4567 	  /* We've found an lwp that needs stepping over --- return 1 so
4568 	     that find_thread stops looking.  */
4569 	  current_thread = saved_thread;
4570 
4571 	  return true;
4572 	}
4573     }
4574 
4575   current_thread = saved_thread;
4576 
4577   if (debug_threads)
4578     debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4579 		  " at 0x%s\n",
4580 		  lwpid_of (thread), paddress (pc));
4581 
4582   return false;
4583 }
4584 
4585 void
start_step_over(lwp_info * lwp)4586 linux_process_target::start_step_over (lwp_info *lwp)
4587 {
4588   struct thread_info *thread = get_lwp_thread (lwp);
4589   struct thread_info *saved_thread;
4590   CORE_ADDR pc;
4591   int step;
4592 
4593   if (debug_threads)
4594     debug_printf ("Starting step-over on LWP %ld.  Stopping all threads\n",
4595 		  lwpid_of (thread));
4596 
4597   stop_all_lwps (1, lwp);
4598 
4599   if (lwp->suspended != 0)
4600     {
4601       internal_error (__FILE__, __LINE__,
4602 		      "LWP %ld suspended=%d\n", lwpid_of (thread),
4603 		      lwp->suspended);
4604     }
4605 
4606   if (debug_threads)
4607     debug_printf ("Done stopping all threads for step-over.\n");
4608 
4609   /* Note, we should always reach here with an already adjusted PC,
4610      either by GDB (if we're resuming due to GDB's request), or by our
4611      caller, if we just finished handling an internal breakpoint GDB
4612      shouldn't care about.  */
4613   pc = get_pc (lwp);
4614 
4615   saved_thread = current_thread;
4616   current_thread = thread;
4617 
4618   lwp->bp_reinsert = pc;
4619   uninsert_breakpoints_at (pc);
4620   uninsert_fast_tracepoint_jumps_at (pc);
4621 
4622   step = single_step (lwp);
4623 
4624   current_thread = saved_thread;
4625 
4626   resume_one_lwp (lwp, step, 0, NULL);
4627 
4628   /* Require next event from this LWP.  */
4629   step_over_bkpt = thread->id;
4630 }
4631 
4632 bool
finish_step_over(lwp_info * lwp)4633 linux_process_target::finish_step_over (lwp_info *lwp)
4634 {
4635   if (lwp->bp_reinsert != 0)
4636     {
4637       struct thread_info *saved_thread = current_thread;
4638 
4639       if (debug_threads)
4640 	debug_printf ("Finished step over.\n");
4641 
4642       current_thread = get_lwp_thread (lwp);
4643 
4644       /* Reinsert any breakpoint at LWP->BP_REINSERT.  Note that there
4645 	 may be no breakpoint to reinsert there by now.  */
4646       reinsert_breakpoints_at (lwp->bp_reinsert);
4647       reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4648 
4649       lwp->bp_reinsert = 0;
4650 
4651       /* Delete any single-step breakpoints.  No longer needed.  We
4652 	 don't have to worry about other threads hitting this trap,
4653 	 and later not being able to explain it, because we were
4654 	 stepping over a breakpoint, and we hold all threads but
4655 	 LWP stopped while doing that.  */
4656       if (!supports_hardware_single_step ())
4657 	{
4658 	  gdb_assert (has_single_step_breakpoints (current_thread));
4659 	  delete_single_step_breakpoints (current_thread);
4660 	}
4661 
4662       step_over_bkpt = null_ptid;
4663       current_thread = saved_thread;
4664       return true;
4665     }
4666   else
4667     return false;
4668 }
4669 
4670 void
complete_ongoing_step_over()4671 linux_process_target::complete_ongoing_step_over ()
4672 {
4673   if (step_over_bkpt != null_ptid)
4674     {
4675       struct lwp_info *lwp;
4676       int wstat;
4677       int ret;
4678 
4679       if (debug_threads)
4680 	debug_printf ("detach: step over in progress, finish it first\n");
4681 
4682       /* Passing NULL_PTID as filter indicates we want all events to
4683 	 be left pending.  Eventually this returns when there are no
4684 	 unwaited-for children left.  */
4685       ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4686 				     __WALL);
4687       gdb_assert (ret == -1);
4688 
4689       lwp = find_lwp_pid (step_over_bkpt);
4690       if (lwp != NULL)
4691 	{
4692 	  finish_step_over (lwp);
4693 
4694 	  /* If we got our step SIGTRAP, don't leave it pending,
4695 	     otherwise we would report it to GDB as a spurious
4696 	     SIGTRAP.  */
4697 	  gdb_assert (lwp->status_pending_p);
4698 	  if (WIFSTOPPED (lwp->status_pending)
4699 	      && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4700 	    {
4701 	      thread_info *thread = get_lwp_thread (lwp);
4702 	      if (thread->last_resume_kind != resume_step)
4703 		{
4704 		  if (debug_threads)
4705 		    debug_printf ("detach: discard step-over SIGTRAP\n");
4706 
4707 		  lwp->status_pending_p = 0;
4708 		  lwp->status_pending = 0;
4709 		  resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4710 		}
4711 	      else
4712 		{
4713 		  if (debug_threads)
4714 		    debug_printf ("detach: resume_step, "
4715 				  "not discarding step-over SIGTRAP\n");
4716 		}
4717 	    }
4718 	}
4719       step_over_bkpt = null_ptid;
4720       unsuspend_all_lwps (lwp);
4721     }
4722 }
4723 
4724 void
resume_one_thread(thread_info * thread,bool leave_all_stopped)4725 linux_process_target::resume_one_thread (thread_info *thread,
4726 					 bool leave_all_stopped)
4727 {
4728   struct lwp_info *lwp = get_thread_lwp (thread);
4729   int leave_pending;
4730 
4731   if (lwp->resume == NULL)
4732     return;
4733 
4734   if (lwp->resume->kind == resume_stop)
4735     {
4736       if (debug_threads)
4737 	debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4738 
4739       if (!lwp->stopped)
4740 	{
4741 	  if (debug_threads)
4742 	    debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4743 
4744 	  /* Stop the thread, and wait for the event asynchronously,
4745 	     through the event loop.  */
4746 	  send_sigstop (lwp);
4747 	}
4748       else
4749 	{
4750 	  if (debug_threads)
4751 	    debug_printf ("already stopped LWP %ld\n",
4752 			  lwpid_of (thread));
4753 
4754 	  /* The LWP may have been stopped in an internal event that
4755 	     was not meant to be notified back to GDB (e.g., gdbserver
4756 	     breakpoint), so we should be reporting a stop event in
4757 	     this case too.  */
4758 
4759 	  /* If the thread already has a pending SIGSTOP, this is a
4760 	     no-op.  Otherwise, something later will presumably resume
4761 	     the thread and this will cause it to cancel any pending
4762 	     operation, due to last_resume_kind == resume_stop.  If
4763 	     the thread already has a pending status to report, we
4764 	     will still report it the next time we wait - see
4765 	     status_pending_p_callback.  */
4766 
4767 	  /* If we already have a pending signal to report, then
4768 	     there's no need to queue a SIGSTOP, as this means we're
4769 	     midway through moving the LWP out of the jumppad, and we
4770 	     will report the pending signal as soon as that is
4771 	     finished.  */
4772 	  if (lwp->pending_signals_to_report.empty ())
4773 	    send_sigstop (lwp);
4774 	}
4775 
4776       /* For stop requests, we're done.  */
4777       lwp->resume = NULL;
4778       thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4779       return;
4780     }
4781 
4782   /* If this thread which is about to be resumed has a pending status,
4783      then don't resume it - we can just report the pending status.
4784      Likewise if it is suspended, because e.g., another thread is
4785      stepping past a breakpoint.  Make sure to queue any signals that
4786      would otherwise be sent.  In all-stop mode, we do this decision
4787      based on if *any* thread has a pending status.  If there's a
4788      thread that needs the step-over-breakpoint dance, then don't
4789      resume any other thread but that particular one.  */
4790   leave_pending = (lwp->suspended
4791 		   || lwp->status_pending_p
4792 		   || leave_all_stopped);
4793 
4794   /* If we have a new signal, enqueue the signal.  */
4795   if (lwp->resume->sig != 0)
4796     {
4797       siginfo_t info, *info_p;
4798 
4799       /* If this is the same signal we were previously stopped by,
4800 	 make sure to queue its siginfo.  */
4801       if (WIFSTOPPED (lwp->last_status)
4802 	  && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4803 	  && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4804 		     (PTRACE_TYPE_ARG3) 0, &info) == 0)
4805 	info_p = &info;
4806       else
4807 	info_p = NULL;
4808 
4809       enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4810     }
4811 
4812   if (!leave_pending)
4813     {
4814       if (debug_threads)
4815 	debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4816 
4817       proceed_one_lwp (thread, NULL);
4818     }
4819   else
4820     {
4821       if (debug_threads)
4822 	debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4823     }
4824 
4825   thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4826   lwp->resume = NULL;
4827 }
4828 
4829 void
resume(thread_resume * resume_info,size_t n)4830 linux_process_target::resume (thread_resume *resume_info, size_t n)
4831 {
4832   struct thread_info *need_step_over = NULL;
4833 
4834   if (debug_threads)
4835     {
4836       debug_enter ();
4837       debug_printf ("linux_resume:\n");
4838     }
4839 
4840   for_each_thread ([&] (thread_info *thread)
4841     {
4842       linux_set_resume_request (thread, resume_info, n);
4843     });
4844 
4845   /* If there is a thread which would otherwise be resumed, which has
4846      a pending status, then don't resume any threads - we can just
4847      report the pending status.  Make sure to queue any signals that
4848      would otherwise be sent.  In non-stop mode, we'll apply this
4849      logic to each thread individually.  We consume all pending events
4850      before considering to start a step-over (in all-stop).  */
4851   bool any_pending = false;
4852   if (!non_stop)
4853     any_pending = find_thread ([this] (thread_info *thread)
4854 		    {
4855 		      return resume_status_pending (thread);
4856 		    }) != nullptr;
4857 
4858   /* If there is a thread which would otherwise be resumed, which is
4859      stopped at a breakpoint that needs stepping over, then don't
4860      resume any threads - have it step over the breakpoint with all
4861      other threads stopped, then resume all threads again.  Make sure
4862      to queue any signals that would otherwise be delivered or
4863      queued.  */
4864   if (!any_pending && low_supports_breakpoints ())
4865     need_step_over = find_thread ([this] (thread_info *thread)
4866 		       {
4867 			 return thread_needs_step_over (thread);
4868 		       });
4869 
4870   bool leave_all_stopped = (need_step_over != NULL || any_pending);
4871 
4872   if (debug_threads)
4873     {
4874       if (need_step_over != NULL)
4875 	debug_printf ("Not resuming all, need step over\n");
4876       else if (any_pending)
4877 	debug_printf ("Not resuming, all-stop and found "
4878 		      "an LWP with pending status\n");
4879       else
4880 	debug_printf ("Resuming, no pending status or step over needed\n");
4881     }
4882 
4883   /* Even if we're leaving threads stopped, queue all signals we'd
4884      otherwise deliver.  */
4885   for_each_thread ([&] (thread_info *thread)
4886     {
4887       resume_one_thread (thread, leave_all_stopped);
4888     });
4889 
4890   if (need_step_over)
4891     start_step_over (get_thread_lwp (need_step_over));
4892 
4893   if (debug_threads)
4894     {
4895       debug_printf ("linux_resume done\n");
4896       debug_exit ();
4897     }
4898 
4899   /* We may have events that were pending that can/should be sent to
4900      the client now.  Trigger a linux_wait call.  */
4901   if (target_is_async_p ())
4902     async_file_mark ();
4903 }
4904 
4905 void
proceed_one_lwp(thread_info * thread,lwp_info * except)4906 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4907 {
4908   struct lwp_info *lwp = get_thread_lwp (thread);
4909   int step;
4910 
4911   if (lwp == except)
4912     return;
4913 
4914   if (debug_threads)
4915     debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4916 
4917   if (!lwp->stopped)
4918     {
4919       if (debug_threads)
4920 	debug_printf ("   LWP %ld already running\n", lwpid_of (thread));
4921       return;
4922     }
4923 
4924   if (thread->last_resume_kind == resume_stop
4925       && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4926     {
4927       if (debug_threads)
4928 	debug_printf ("   client wants LWP to remain %ld stopped\n",
4929 		      lwpid_of (thread));
4930       return;
4931     }
4932 
4933   if (lwp->status_pending_p)
4934     {
4935       if (debug_threads)
4936 	debug_printf ("   LWP %ld has pending status, leaving stopped\n",
4937 		      lwpid_of (thread));
4938       return;
4939     }
4940 
4941   gdb_assert (lwp->suspended >= 0);
4942 
4943   if (lwp->suspended)
4944     {
4945       if (debug_threads)
4946 	debug_printf ("   LWP %ld is suspended\n", lwpid_of (thread));
4947       return;
4948     }
4949 
4950   if (thread->last_resume_kind == resume_stop
4951       && lwp->pending_signals_to_report.empty ()
4952       && (lwp->collecting_fast_tracepoint
4953 	  == fast_tpoint_collect_result::not_collecting))
4954     {
4955       /* We haven't reported this LWP as stopped yet (otherwise, the
4956 	 last_status.kind check above would catch it, and we wouldn't
4957 	 reach here.  This LWP may have been momentarily paused by a
4958 	 stop_all_lwps call while handling for example, another LWP's
4959 	 step-over.  In that case, the pending expected SIGSTOP signal
4960 	 that was queued at vCont;t handling time will have already
4961 	 been consumed by wait_for_sigstop, and so we need to requeue
4962 	 another one here.  Note that if the LWP already has a SIGSTOP
4963 	 pending, this is a no-op.  */
4964 
4965       if (debug_threads)
4966 	debug_printf ("Client wants LWP %ld to stop. "
4967 		      "Making sure it has a SIGSTOP pending\n",
4968 		      lwpid_of (thread));
4969 
4970       send_sigstop (lwp);
4971     }
4972 
4973   if (thread->last_resume_kind == resume_step)
4974     {
4975       if (debug_threads)
4976 	debug_printf ("   stepping LWP %ld, client wants it stepping\n",
4977 		      lwpid_of (thread));
4978 
4979       /* If resume_step is requested by GDB, install single-step
4980 	 breakpoints when the thread is about to be actually resumed if
4981 	 the single-step breakpoints weren't removed.  */
4982       if (supports_software_single_step ()
4983 	  && !has_single_step_breakpoints (thread))
4984 	install_software_single_step_breakpoints (lwp);
4985 
4986       step = maybe_hw_step (thread);
4987     }
4988   else if (lwp->bp_reinsert != 0)
4989     {
4990       if (debug_threads)
4991 	debug_printf ("   stepping LWP %ld, reinsert set\n",
4992 		      lwpid_of (thread));
4993 
4994       step = maybe_hw_step (thread);
4995     }
4996   else
4997     step = 0;
4998 
4999   resume_one_lwp (lwp, step, 0, NULL);
5000 }
5001 
5002 void
unsuspend_and_proceed_one_lwp(thread_info * thread,lwp_info * except)5003 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5004 						     lwp_info *except)
5005 {
5006   struct lwp_info *lwp = get_thread_lwp (thread);
5007 
5008   if (lwp == except)
5009     return;
5010 
5011   lwp_suspended_decr (lwp);
5012 
5013   proceed_one_lwp (thread, except);
5014 }
5015 
5016 void
proceed_all_lwps()5017 linux_process_target::proceed_all_lwps ()
5018 {
5019   struct thread_info *need_step_over;
5020 
5021   /* If there is a thread which would otherwise be resumed, which is
5022      stopped at a breakpoint that needs stepping over, then don't
5023      resume any threads - have it step over the breakpoint with all
5024      other threads stopped, then resume all threads again.  */
5025 
5026   if (low_supports_breakpoints ())
5027     {
5028       need_step_over = find_thread ([this] (thread_info *thread)
5029 			 {
5030 			   return thread_needs_step_over (thread);
5031 			 });
5032 
5033       if (need_step_over != NULL)
5034 	{
5035 	  if (debug_threads)
5036 	    debug_printf ("proceed_all_lwps: found "
5037 			  "thread %ld needing a step-over\n",
5038 			  lwpid_of (need_step_over));
5039 
5040 	  start_step_over (get_thread_lwp (need_step_over));
5041 	  return;
5042 	}
5043     }
5044 
5045   if (debug_threads)
5046     debug_printf ("Proceeding, no step-over needed\n");
5047 
5048   for_each_thread ([this] (thread_info *thread)
5049     {
5050       proceed_one_lwp (thread, NULL);
5051     });
5052 }
5053 
5054 void
unstop_all_lwps(int unsuspend,lwp_info * except)5055 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5056 {
5057   if (debug_threads)
5058     {
5059       debug_enter ();
5060       if (except)
5061 	debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5062 		      lwpid_of (get_lwp_thread (except)));
5063       else
5064 	debug_printf ("unstopping all lwps\n");
5065     }
5066 
5067   if (unsuspend)
5068     for_each_thread ([&] (thread_info *thread)
5069       {
5070 	unsuspend_and_proceed_one_lwp (thread, except);
5071       });
5072   else
5073     for_each_thread ([&] (thread_info *thread)
5074       {
5075 	proceed_one_lwp (thread, except);
5076       });
5077 
5078   if (debug_threads)
5079     {
5080       debug_printf ("unstop_all_lwps done\n");
5081       debug_exit ();
5082     }
5083 }
5084 
5085 
5086 #ifdef HAVE_LINUX_REGSETS
5087 
5088 #define use_linux_regsets 1
5089 
5090 /* Returns true if REGSET has been disabled.  */
5091 
5092 static int
regset_disabled(struct regsets_info * info,struct regset_info * regset)5093 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5094 {
5095   return (info->disabled_regsets != NULL
5096 	  && info->disabled_regsets[regset - info->regsets]);
5097 }
5098 
5099 /* Disable REGSET.  */
5100 
5101 static void
disable_regset(struct regsets_info * info,struct regset_info * regset)5102 disable_regset (struct regsets_info *info, struct regset_info *regset)
5103 {
5104   int dr_offset;
5105 
5106   dr_offset = regset - info->regsets;
5107   if (info->disabled_regsets == NULL)
5108     info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5109   info->disabled_regsets[dr_offset] = 1;
5110 }
5111 
5112 static int
regsets_fetch_inferior_registers(struct regsets_info * regsets_info,struct regcache * regcache)5113 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5114 				  struct regcache *regcache)
5115 {
5116   struct regset_info *regset;
5117   int saw_general_regs = 0;
5118   int pid;
5119   struct iovec iov;
5120 
5121   pid = lwpid_of (current_thread);
5122   for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5123     {
5124       void *buf, *data;
5125       int nt_type, res;
5126 
5127       if (regset->size == 0 || regset_disabled (regsets_info, regset))
5128 	continue;
5129 
5130       buf = xmalloc (regset->size);
5131 
5132       nt_type = regset->nt_type;
5133       if (nt_type)
5134 	{
5135 	  iov.iov_base = buf;
5136 	  iov.iov_len = regset->size;
5137 	  data = (void *) &iov;
5138 	}
5139       else
5140 	data = buf;
5141 
5142 #ifndef __sparc__
5143       res = ptrace (regset->get_request, pid,
5144 		    (PTRACE_TYPE_ARG3) (long) nt_type, data);
5145 #else
5146       res = ptrace (regset->get_request, pid, data, nt_type);
5147 #endif
5148       if (res < 0)
5149 	{
5150 	  if (errno == EIO
5151 	      || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5152 	    {
5153 	      /* If we get EIO on a regset, or an EINVAL and the regset is
5154 		 optional, do not try it again for this process mode.  */
5155 	      disable_regset (regsets_info, regset);
5156 	    }
5157 	  else if (errno == ENODATA)
5158 	    {
5159 	      /* ENODATA may be returned if the regset is currently
5160 		 not "active".  This can happen in normal operation,
5161 		 so suppress the warning in this case.  */
5162 	    }
5163 	  else if (errno == ESRCH)
5164 	    {
5165 	      /* At this point, ESRCH should mean the process is
5166 		 already gone, in which case we simply ignore attempts
5167 		 to read its registers.  */
5168 	    }
5169 	  else
5170 	    {
5171 	      char s[256];
5172 	      sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5173 		       pid);
5174 	      perror (s);
5175 	    }
5176 	}
5177       else
5178 	{
5179 	  if (regset->type == GENERAL_REGS)
5180 	    saw_general_regs = 1;
5181 	  regset->store_function (regcache, buf);
5182 	}
5183       free (buf);
5184     }
5185   if (saw_general_regs)
5186     return 0;
5187   else
5188     return 1;
5189 }
5190 
5191 static int
regsets_store_inferior_registers(struct regsets_info * regsets_info,struct regcache * regcache)5192 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5193 				  struct regcache *regcache)
5194 {
5195   struct regset_info *regset;
5196   int saw_general_regs = 0;
5197   int pid;
5198   struct iovec iov;
5199 
5200   pid = lwpid_of (current_thread);
5201   for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5202     {
5203       void *buf, *data;
5204       int nt_type, res;
5205 
5206       if (regset->size == 0 || regset_disabled (regsets_info, regset)
5207 	  || regset->fill_function == NULL)
5208 	continue;
5209 
5210       buf = xmalloc (regset->size);
5211 
5212       /* First fill the buffer with the current register set contents,
5213 	 in case there are any items in the kernel's regset that are
5214 	 not in gdbserver's regcache.  */
5215 
5216       nt_type = regset->nt_type;
5217       if (nt_type)
5218 	{
5219 	  iov.iov_base = buf;
5220 	  iov.iov_len = regset->size;
5221 	  data = (void *) &iov;
5222 	}
5223       else
5224 	data = buf;
5225 
5226 #ifndef __sparc__
5227       res = ptrace (regset->get_request, pid,
5228 		    (PTRACE_TYPE_ARG3) (long) nt_type, data);
5229 #else
5230       res = ptrace (regset->get_request, pid, data, nt_type);
5231 #endif
5232 
5233       if (res == 0)
5234 	{
5235 	  /* Then overlay our cached registers on that.  */
5236 	  regset->fill_function (regcache, buf);
5237 
5238 	  /* Only now do we write the register set.  */
5239 #ifndef __sparc__
5240 	  res = ptrace (regset->set_request, pid,
5241 			(PTRACE_TYPE_ARG3) (long) nt_type, data);
5242 #else
5243 	  res = ptrace (regset->set_request, pid, data, nt_type);
5244 #endif
5245 	}
5246 
5247       if (res < 0)
5248 	{
5249 	  if (errno == EIO
5250 	      || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5251 	    {
5252 	      /* If we get EIO on a regset, or an EINVAL and the regset is
5253 		 optional, do not try it again for this process mode.  */
5254 	      disable_regset (regsets_info, regset);
5255 	    }
5256 	  else if (errno == ESRCH)
5257 	    {
5258 	      /* At this point, ESRCH should mean the process is
5259 		 already gone, in which case we simply ignore attempts
5260 		 to change its registers.  See also the related
5261 		 comment in resume_one_lwp.  */
5262 	      free (buf);
5263 	      return 0;
5264 	    }
5265 	  else
5266 	    {
5267 	      perror ("Warning: ptrace(regsets_store_inferior_registers)");
5268 	    }
5269 	}
5270       else if (regset->type == GENERAL_REGS)
5271 	saw_general_regs = 1;
5272       free (buf);
5273     }
5274   if (saw_general_regs)
5275     return 0;
5276   else
5277     return 1;
5278 }
5279 
5280 #else /* !HAVE_LINUX_REGSETS */
5281 
5282 #define use_linux_regsets 0
5283 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5284 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5285 
5286 #endif
5287 
5288 /* Return 1 if register REGNO is supported by one of the regset ptrace
5289    calls or 0 if it has to be transferred individually.  */
5290 
5291 static int
linux_register_in_regsets(const struct regs_info * regs_info,int regno)5292 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5293 {
5294   unsigned char mask = 1 << (regno % 8);
5295   size_t index = regno / 8;
5296 
5297   return (use_linux_regsets
5298 	  && (regs_info->regset_bitmap == NULL
5299 	      || (regs_info->regset_bitmap[index] & mask) != 0));
5300 }
5301 
5302 #ifdef HAVE_LINUX_USRREGS
5303 
5304 static int
register_addr(const struct usrregs_info * usrregs,int regnum)5305 register_addr (const struct usrregs_info *usrregs, int regnum)
5306 {
5307   int addr;
5308 
5309   if (regnum < 0 || regnum >= usrregs->num_regs)
5310     error ("Invalid register number %d.", regnum);
5311 
5312   addr = usrregs->regmap[regnum];
5313 
5314   return addr;
5315 }
5316 
5317 
5318 void
fetch_register(const usrregs_info * usrregs,regcache * regcache,int regno)5319 linux_process_target::fetch_register (const usrregs_info *usrregs,
5320 				      regcache *regcache, int regno)
5321 {
5322   CORE_ADDR regaddr;
5323   int i, size;
5324   char *buf;
5325   int pid;
5326 
5327   if (regno >= usrregs->num_regs)
5328     return;
5329   if (low_cannot_fetch_register (regno))
5330     return;
5331 
5332   regaddr = register_addr (usrregs, regno);
5333   if (regaddr == -1)
5334     return;
5335 
5336   size = ((register_size (regcache->tdesc, regno)
5337 	   + sizeof (PTRACE_XFER_TYPE) - 1)
5338 	  & -sizeof (PTRACE_XFER_TYPE));
5339   buf = (char *) alloca (size);
5340 
5341   pid = lwpid_of (current_thread);
5342   for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5343     {
5344       errno = 0;
5345       *(PTRACE_XFER_TYPE *) (buf + i) =
5346 	ptrace (PTRACE_PEEKUSER, pid,
5347 		/* Coerce to a uintptr_t first to avoid potential gcc warning
5348 		   of coercing an 8 byte integer to a 4 byte pointer.  */
5349 		(PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5350       regaddr += sizeof (PTRACE_XFER_TYPE);
5351       if (errno != 0)
5352 	{
5353 	  /* Mark register REGNO unavailable.  */
5354 	  supply_register (regcache, regno, NULL);
5355 	  return;
5356 	}
5357     }
5358 
5359   low_supply_ptrace_register (regcache, regno, buf);
5360 }
5361 
5362 void
store_register(const usrregs_info * usrregs,regcache * regcache,int regno)5363 linux_process_target::store_register (const usrregs_info *usrregs,
5364 				      regcache *regcache, int regno)
5365 {
5366   CORE_ADDR regaddr;
5367   int i, size;
5368   char *buf;
5369   int pid;
5370 
5371   if (regno >= usrregs->num_regs)
5372     return;
5373   if (low_cannot_store_register (regno))
5374     return;
5375 
5376   regaddr = register_addr (usrregs, regno);
5377   if (regaddr == -1)
5378     return;
5379 
5380   size = ((register_size (regcache->tdesc, regno)
5381 	   + sizeof (PTRACE_XFER_TYPE) - 1)
5382 	  & -sizeof (PTRACE_XFER_TYPE));
5383   buf = (char *) alloca (size);
5384   memset (buf, 0, size);
5385 
5386   low_collect_ptrace_register (regcache, regno, buf);
5387 
5388   pid = lwpid_of (current_thread);
5389   for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5390     {
5391       errno = 0;
5392       ptrace (PTRACE_POKEUSER, pid,
5393 	    /* Coerce to a uintptr_t first to avoid potential gcc warning
5394 	       about coercing an 8 byte integer to a 4 byte pointer.  */
5395 	      (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5396 	      (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5397       if (errno != 0)
5398 	{
5399 	  /* At this point, ESRCH should mean the process is
5400 	     already gone, in which case we simply ignore attempts
5401 	     to change its registers.  See also the related
5402 	     comment in resume_one_lwp.  */
5403 	  if (errno == ESRCH)
5404 	    return;
5405 
5406 
5407 	  if (!low_cannot_store_register (regno))
5408 	    error ("writing register %d: %s", regno, safe_strerror (errno));
5409 	}
5410       regaddr += sizeof (PTRACE_XFER_TYPE);
5411     }
5412 }
5413 #endif /* HAVE_LINUX_USRREGS */
5414 
5415 void
low_collect_ptrace_register(regcache * regcache,int regno,char * buf)5416 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5417 						   int regno, char *buf)
5418 {
5419   collect_register (regcache, regno, buf);
5420 }
5421 
5422 void
low_supply_ptrace_register(regcache * regcache,int regno,const char * buf)5423 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5424 						  int regno, const char *buf)
5425 {
5426   supply_register (regcache, regno, buf);
5427 }
5428 
5429 void
usr_fetch_inferior_registers(const regs_info * regs_info,regcache * regcache,int regno,int all)5430 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5431 						    regcache *regcache,
5432 						    int regno, int all)
5433 {
5434 #ifdef HAVE_LINUX_USRREGS
5435   struct usrregs_info *usr = regs_info->usrregs;
5436 
5437   if (regno == -1)
5438     {
5439       for (regno = 0; regno < usr->num_regs; regno++)
5440 	if (all || !linux_register_in_regsets (regs_info, regno))
5441 	  fetch_register (usr, regcache, regno);
5442     }
5443   else
5444     fetch_register (usr, regcache, regno);
5445 #endif
5446 }
5447 
5448 void
usr_store_inferior_registers(const regs_info * regs_info,regcache * regcache,int regno,int all)5449 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5450 						    regcache *regcache,
5451 						    int regno, int all)
5452 {
5453 #ifdef HAVE_LINUX_USRREGS
5454   struct usrregs_info *usr = regs_info->usrregs;
5455 
5456   if (regno == -1)
5457     {
5458       for (regno = 0; regno < usr->num_regs; regno++)
5459 	if (all || !linux_register_in_regsets (regs_info, regno))
5460 	  store_register (usr, regcache, regno);
5461     }
5462   else
5463     store_register (usr, regcache, regno);
5464 #endif
5465 }
5466 
5467 void
fetch_registers(regcache * regcache,int regno)5468 linux_process_target::fetch_registers (regcache *regcache, int regno)
5469 {
5470   int use_regsets;
5471   int all = 0;
5472   const regs_info *regs_info = get_regs_info ();
5473 
5474   if (regno == -1)
5475     {
5476       if (regs_info->usrregs != NULL)
5477 	for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5478 	  low_fetch_register (regcache, regno);
5479 
5480       all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5481       if (regs_info->usrregs != NULL)
5482 	usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5483     }
5484   else
5485     {
5486       if (low_fetch_register (regcache, regno))
5487 	return;
5488 
5489       use_regsets = linux_register_in_regsets (regs_info, regno);
5490       if (use_regsets)
5491 	all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5492 						regcache);
5493       if ((!use_regsets || all) && regs_info->usrregs != NULL)
5494 	usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5495     }
5496 }
5497 
5498 void
store_registers(regcache * regcache,int regno)5499 linux_process_target::store_registers (regcache *regcache, int regno)
5500 {
5501   int use_regsets;
5502   int all = 0;
5503   const regs_info *regs_info = get_regs_info ();
5504 
5505   if (regno == -1)
5506     {
5507       all = regsets_store_inferior_registers (regs_info->regsets_info,
5508 					      regcache);
5509       if (regs_info->usrregs != NULL)
5510 	usr_store_inferior_registers (regs_info, regcache, regno, all);
5511     }
5512   else
5513     {
5514       use_regsets = linux_register_in_regsets (regs_info, regno);
5515       if (use_regsets)
5516 	all = regsets_store_inferior_registers (regs_info->regsets_info,
5517 						regcache);
5518       if ((!use_regsets || all) && regs_info->usrregs != NULL)
5519 	usr_store_inferior_registers (regs_info, regcache, regno, 1);
5520     }
5521 }
5522 
5523 bool
low_fetch_register(regcache * regcache,int regno)5524 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5525 {
5526   return false;
5527 }
5528 
5529 /* A wrapper for the read_memory target op.  */
5530 
5531 static int
linux_read_memory(CORE_ADDR memaddr,unsigned char * myaddr,int len)5532 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5533 {
5534   return the_target->read_memory (memaddr, myaddr, len);
5535 }
5536 
5537 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5538    to debugger memory starting at MYADDR.  */
5539 
5540 int
read_memory(CORE_ADDR memaddr,unsigned char * myaddr,int len)5541 linux_process_target::read_memory (CORE_ADDR memaddr,
5542 				   unsigned char *myaddr, int len)
5543 {
5544   int pid = lwpid_of (current_thread);
5545   PTRACE_XFER_TYPE *buffer;
5546   CORE_ADDR addr;
5547   int count;
5548   char filename[64];
5549   int i;
5550   int ret;
5551   int fd;
5552 
5553   /* Try using /proc.  Don't bother for one word.  */
5554   if (len >= 3 * sizeof (long))
5555     {
5556       int bytes;
5557 
5558       /* We could keep this file open and cache it - possibly one per
5559 	 thread.  That requires some juggling, but is even faster.  */
5560       sprintf (filename, "/proc/%d/mem", pid);
5561       fd = open (filename, O_RDONLY | O_LARGEFILE);
5562       if (fd == -1)
5563 	goto no_proc;
5564 
5565       /* If pread64 is available, use it.  It's faster if the kernel
5566 	 supports it (only one syscall), and it's 64-bit safe even on
5567 	 32-bit platforms (for instance, SPARC debugging a SPARC64
5568 	 application).  */
5569 #ifdef HAVE_PREAD64
5570       bytes = pread64 (fd, myaddr, len, memaddr);
5571 #else
5572       bytes = -1;
5573       if (lseek (fd, memaddr, SEEK_SET) != -1)
5574 	bytes = read (fd, myaddr, len);
5575 #endif
5576 
5577       close (fd);
5578       if (bytes == len)
5579 	return 0;
5580 
5581       /* Some data was read, we'll try to get the rest with ptrace.  */
5582       if (bytes > 0)
5583 	{
5584 	  memaddr += bytes;
5585 	  myaddr += bytes;
5586 	  len -= bytes;
5587 	}
5588     }
5589 
5590  no_proc:
5591   /* Round starting address down to longword boundary.  */
5592   addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5593   /* Round ending address up; get number of longwords that makes.  */
5594   count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5595 	   / sizeof (PTRACE_XFER_TYPE));
5596   /* Allocate buffer of that many longwords.  */
5597   buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5598 
5599   /* Read all the longwords */
5600   errno = 0;
5601   for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5602     {
5603       /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5604 	 about coercing an 8 byte integer to a 4 byte pointer.  */
5605       buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5606 			  (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5607 			  (PTRACE_TYPE_ARG4) 0);
5608       if (errno)
5609 	break;
5610     }
5611   ret = errno;
5612 
5613   /* Copy appropriate bytes out of the buffer.  */
5614   if (i > 0)
5615     {
5616       i *= sizeof (PTRACE_XFER_TYPE);
5617       i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5618       memcpy (myaddr,
5619 	      (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5620 	      i < len ? i : len);
5621     }
5622 
5623   return ret;
5624 }
5625 
5626 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5627    memory at MEMADDR.  On failure (cannot write to the inferior)
5628    returns the value of errno.  Always succeeds if LEN is zero.  */
5629 
5630 int
write_memory(CORE_ADDR memaddr,const unsigned char * myaddr,int len)5631 linux_process_target::write_memory (CORE_ADDR memaddr,
5632 				    const unsigned char *myaddr, int len)
5633 {
5634   int i;
5635   /* Round starting address down to longword boundary.  */
5636   CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5637   /* Round ending address up; get number of longwords that makes.  */
5638   int count
5639     = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5640     / sizeof (PTRACE_XFER_TYPE);
5641 
5642   /* Allocate buffer of that many longwords.  */
5643   PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5644 
5645   int pid = lwpid_of (current_thread);
5646 
5647   if (len == 0)
5648     {
5649       /* Zero length write always succeeds.  */
5650       return 0;
5651     }
5652 
5653   if (debug_threads)
5654     {
5655       /* Dump up to four bytes.  */
5656       char str[4 * 2 + 1];
5657       char *p = str;
5658       int dump = len < 4 ? len : 4;
5659 
5660       for (i = 0; i < dump; i++)
5661 	{
5662 	  sprintf (p, "%02x", myaddr[i]);
5663 	  p += 2;
5664 	}
5665       *p = '\0';
5666 
5667       debug_printf ("Writing %s to 0x%08lx in process %d\n",
5668 		    str, (long) memaddr, pid);
5669     }
5670 
5671   /* Fill start and end extra bytes of buffer with existing memory data.  */
5672 
5673   errno = 0;
5674   /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5675      about coercing an 8 byte integer to a 4 byte pointer.  */
5676   buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5677 		      (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5678 		      (PTRACE_TYPE_ARG4) 0);
5679   if (errno)
5680     return errno;
5681 
5682   if (count > 1)
5683     {
5684       errno = 0;
5685       buffer[count - 1]
5686 	= ptrace (PTRACE_PEEKTEXT, pid,
5687 		  /* Coerce to a uintptr_t first to avoid potential gcc warning
5688 		     about coercing an 8 byte integer to a 4 byte pointer.  */
5689 		  (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5690 						  * sizeof (PTRACE_XFER_TYPE)),
5691 		  (PTRACE_TYPE_ARG4) 0);
5692       if (errno)
5693 	return errno;
5694     }
5695 
5696   /* Copy data to be written over corresponding part of buffer.  */
5697 
5698   memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5699 	  myaddr, len);
5700 
5701   /* Write the entire buffer.  */
5702 
5703   for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5704     {
5705       errno = 0;
5706       ptrace (PTRACE_POKETEXT, pid,
5707 	      /* Coerce to a uintptr_t first to avoid potential gcc warning
5708 		 about coercing an 8 byte integer to a 4 byte pointer.  */
5709 	      (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5710 	      (PTRACE_TYPE_ARG4) buffer[i]);
5711       if (errno)
5712 	return errno;
5713     }
5714 
5715   return 0;
5716 }
5717 
5718 void
look_up_symbols()5719 linux_process_target::look_up_symbols ()
5720 {
5721 #ifdef USE_THREAD_DB
5722   struct process_info *proc = current_process ();
5723 
5724   if (proc->priv->thread_db != NULL)
5725     return;
5726 
5727   thread_db_init ();
5728 #endif
5729 }
5730 
5731 void
request_interrupt()5732 linux_process_target::request_interrupt ()
5733 {
5734   /* Send a SIGINT to the process group.  This acts just like the user
5735      typed a ^C on the controlling terminal.  */
5736   ::kill (-signal_pid, SIGINT);
5737 }
5738 
5739 bool
supports_read_auxv()5740 linux_process_target::supports_read_auxv ()
5741 {
5742   return true;
5743 }
5744 
5745 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5746    to debugger memory starting at MYADDR.  */
5747 
5748 int
read_auxv(CORE_ADDR offset,unsigned char * myaddr,unsigned int len)5749 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5750 				 unsigned int len)
5751 {
5752   char filename[PATH_MAX];
5753   int fd, n;
5754   int pid = lwpid_of (current_thread);
5755 
5756   xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5757 
5758   fd = open (filename, O_RDONLY);
5759   if (fd < 0)
5760     return -1;
5761 
5762   if (offset != (CORE_ADDR) 0
5763       && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5764     n = -1;
5765   else
5766     n = read (fd, myaddr, len);
5767 
5768   close (fd);
5769 
5770   return n;
5771 }
5772 
5773 int
insert_point(enum raw_bkpt_type type,CORE_ADDR addr,int size,raw_breakpoint * bp)5774 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5775 				    int size, raw_breakpoint *bp)
5776 {
5777   if (type == raw_bkpt_type_sw)
5778     return insert_memory_breakpoint (bp);
5779   else
5780     return low_insert_point (type, addr, size, bp);
5781 }
5782 
5783 int
low_insert_point(raw_bkpt_type type,CORE_ADDR addr,int size,raw_breakpoint * bp)5784 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5785 					int size, raw_breakpoint *bp)
5786 {
5787   /* Unsupported (see target.h).  */
5788   return 1;
5789 }
5790 
5791 int
remove_point(enum raw_bkpt_type type,CORE_ADDR addr,int size,raw_breakpoint * bp)5792 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5793 				    int size, raw_breakpoint *bp)
5794 {
5795   if (type == raw_bkpt_type_sw)
5796     return remove_memory_breakpoint (bp);
5797   else
5798     return low_remove_point (type, addr, size, bp);
5799 }
5800 
5801 int
low_remove_point(raw_bkpt_type type,CORE_ADDR addr,int size,raw_breakpoint * bp)5802 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5803 					int size, raw_breakpoint *bp)
5804 {
5805   /* Unsupported (see target.h).  */
5806   return 1;
5807 }
5808 
5809 /* Implement the stopped_by_sw_breakpoint target_ops
5810    method.  */
5811 
5812 bool
stopped_by_sw_breakpoint()5813 linux_process_target::stopped_by_sw_breakpoint ()
5814 {
5815   struct lwp_info *lwp = get_thread_lwp (current_thread);
5816 
5817   return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5818 }
5819 
5820 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5821    method.  */
5822 
5823 bool
supports_stopped_by_sw_breakpoint()5824 linux_process_target::supports_stopped_by_sw_breakpoint ()
5825 {
5826   return USE_SIGTRAP_SIGINFO;
5827 }
5828 
5829 /* Implement the stopped_by_hw_breakpoint target_ops
5830    method.  */
5831 
5832 bool
stopped_by_hw_breakpoint()5833 linux_process_target::stopped_by_hw_breakpoint ()
5834 {
5835   struct lwp_info *lwp = get_thread_lwp (current_thread);
5836 
5837   return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5838 }
5839 
5840 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5841    method.  */
5842 
5843 bool
supports_stopped_by_hw_breakpoint()5844 linux_process_target::supports_stopped_by_hw_breakpoint ()
5845 {
5846   return USE_SIGTRAP_SIGINFO;
5847 }
5848 
5849 /* Implement the supports_hardware_single_step target_ops method.  */
5850 
5851 bool
supports_hardware_single_step()5852 linux_process_target::supports_hardware_single_step ()
5853 {
5854   return true;
5855 }
5856 
5857 bool
stopped_by_watchpoint()5858 linux_process_target::stopped_by_watchpoint ()
5859 {
5860   struct lwp_info *lwp = get_thread_lwp (current_thread);
5861 
5862   return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5863 }
5864 
5865 CORE_ADDR
stopped_data_address()5866 linux_process_target::stopped_data_address ()
5867 {
5868   struct lwp_info *lwp = get_thread_lwp (current_thread);
5869 
5870   return lwp->stopped_data_address;
5871 }
5872 
5873 /* This is only used for targets that define PT_TEXT_ADDR,
5874    PT_DATA_ADDR and PT_TEXT_END_ADDR.  If those are not defined, supposedly
5875    the target has different ways of acquiring this information, like
5876    loadmaps.  */
5877 
5878 bool
supports_read_offsets()5879 linux_process_target::supports_read_offsets ()
5880 {
5881 #ifdef SUPPORTS_READ_OFFSETS
5882   return true;
5883 #else
5884   return false;
5885 #endif
5886 }
5887 
5888 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5889    to tell gdb about.  */
5890 
5891 int
read_offsets(CORE_ADDR * text_p,CORE_ADDR * data_p)5892 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5893 {
5894 #ifdef SUPPORTS_READ_OFFSETS
5895   unsigned long text, text_end, data;
5896   int pid = lwpid_of (current_thread);
5897 
5898   errno = 0;
5899 
5900   text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5901 		 (PTRACE_TYPE_ARG4) 0);
5902   text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5903 		     (PTRACE_TYPE_ARG4) 0);
5904   data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5905 		 (PTRACE_TYPE_ARG4) 0);
5906 
5907   if (errno == 0)
5908     {
5909       /* Both text and data offsets produced at compile-time (and so
5910 	 used by gdb) are relative to the beginning of the program,
5911 	 with the data segment immediately following the text segment.
5912 	 However, the actual runtime layout in memory may put the data
5913 	 somewhere else, so when we send gdb a data base-address, we
5914 	 use the real data base address and subtract the compile-time
5915 	 data base-address from it (which is just the length of the
5916 	 text segment).  BSS immediately follows data in both
5917 	 cases.  */
5918       *text_p = text;
5919       *data_p = data - (text_end - text);
5920 
5921       return 1;
5922     }
5923   return 0;
5924 #else
5925   gdb_assert_not_reached ("target op read_offsets not supported");
5926 #endif
5927 }
5928 
5929 bool
supports_get_tls_address()5930 linux_process_target::supports_get_tls_address ()
5931 {
5932 #ifdef USE_THREAD_DB
5933   return true;
5934 #else
5935   return false;
5936 #endif
5937 }
5938 
5939 int
get_tls_address(thread_info * thread,CORE_ADDR offset,CORE_ADDR load_module,CORE_ADDR * address)5940 linux_process_target::get_tls_address (thread_info *thread,
5941 				       CORE_ADDR offset,
5942 				       CORE_ADDR load_module,
5943 				       CORE_ADDR *address)
5944 {
5945 #ifdef USE_THREAD_DB
5946   return thread_db_get_tls_address (thread, offset, load_module, address);
5947 #else
5948   return -1;
5949 #endif
5950 }
5951 
5952 bool
supports_qxfer_osdata()5953 linux_process_target::supports_qxfer_osdata ()
5954 {
5955   return true;
5956 }
5957 
5958 int
qxfer_osdata(const char * annex,unsigned char * readbuf,unsigned const char * writebuf,CORE_ADDR offset,int len)5959 linux_process_target::qxfer_osdata (const char *annex,
5960 				    unsigned char *readbuf,
5961 				    unsigned const char *writebuf,
5962 				    CORE_ADDR offset, int len)
5963 {
5964   return linux_common_xfer_osdata (annex, readbuf, offset, len);
5965 }
5966 
5967 void
siginfo_fixup(siginfo_t * siginfo,gdb_byte * inf_siginfo,int direction)5968 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5969 				     gdb_byte *inf_siginfo, int direction)
5970 {
5971   bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5972 
5973   /* If there was no callback, or the callback didn't do anything,
5974      then just do a straight memcpy.  */
5975   if (!done)
5976     {
5977       if (direction == 1)
5978 	memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5979       else
5980 	memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5981     }
5982 }
5983 
5984 bool
low_siginfo_fixup(siginfo_t * native,gdb_byte * inf,int direction)5985 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5986 					 int direction)
5987 {
5988   return false;
5989 }
5990 
5991 bool
supports_qxfer_siginfo()5992 linux_process_target::supports_qxfer_siginfo ()
5993 {
5994   return true;
5995 }
5996 
5997 int
qxfer_siginfo(const char * annex,unsigned char * readbuf,unsigned const char * writebuf,CORE_ADDR offset,int len)5998 linux_process_target::qxfer_siginfo (const char *annex,
5999 				     unsigned char *readbuf,
6000 				     unsigned const char *writebuf,
6001 				     CORE_ADDR offset, int len)
6002 {
6003   int pid;
6004   siginfo_t siginfo;
6005   gdb_byte inf_siginfo[sizeof (siginfo_t)];
6006 
6007   if (current_thread == NULL)
6008     return -1;
6009 
6010   pid = lwpid_of (current_thread);
6011 
6012   if (debug_threads)
6013     debug_printf ("%s siginfo for lwp %d.\n",
6014 		  readbuf != NULL ? "Reading" : "Writing",
6015 		  pid);
6016 
6017   if (offset >= sizeof (siginfo))
6018     return -1;
6019 
6020   if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6021     return -1;
6022 
6023   /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6024      SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
6025      inferior with a 64-bit GDBSERVER should look the same as debugging it
6026      with a 32-bit GDBSERVER, we need to convert it.  */
6027   siginfo_fixup (&siginfo, inf_siginfo, 0);
6028 
6029   if (offset + len > sizeof (siginfo))
6030     len = sizeof (siginfo) - offset;
6031 
6032   if (readbuf != NULL)
6033     memcpy (readbuf, inf_siginfo + offset, len);
6034   else
6035     {
6036       memcpy (inf_siginfo + offset, writebuf, len);
6037 
6038       /* Convert back to ptrace layout before flushing it out.  */
6039       siginfo_fixup (&siginfo, inf_siginfo, 1);
6040 
6041       if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6042 	return -1;
6043     }
6044 
6045   return len;
6046 }
6047 
6048 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6049    so we notice when children change state; as the handler for the
6050    sigsuspend in my_waitpid.  */
6051 
6052 static void
sigchld_handler(int signo)6053 sigchld_handler (int signo)
6054 {
6055   int old_errno = errno;
6056 
6057   if (debug_threads)
6058     {
6059       do
6060 	{
6061 	  /* Use the async signal safe debug function.  */
6062 	  if (debug_write ("sigchld_handler\n",
6063 			   sizeof ("sigchld_handler\n") - 1) < 0)
6064 	    break; /* just ignore */
6065 	} while (0);
6066     }
6067 
6068   if (target_is_async_p ())
6069     async_file_mark (); /* trigger a linux_wait */
6070 
6071   errno = old_errno;
6072 }
6073 
6074 bool
supports_non_stop()6075 linux_process_target::supports_non_stop ()
6076 {
6077   return true;
6078 }
6079 
6080 bool
async(bool enable)6081 linux_process_target::async (bool enable)
6082 {
6083   bool previous = target_is_async_p ();
6084 
6085   if (debug_threads)
6086     debug_printf ("linux_async (%d), previous=%d\n",
6087 		  enable, previous);
6088 
6089   if (previous != enable)
6090     {
6091       sigset_t mask;
6092       sigemptyset (&mask);
6093       sigaddset (&mask, SIGCHLD);
6094 
6095       gdb_sigmask (SIG_BLOCK, &mask, NULL);
6096 
6097       if (enable)
6098 	{
6099 	  if (pipe (linux_event_pipe) == -1)
6100 	    {
6101 	      linux_event_pipe[0] = -1;
6102 	      linux_event_pipe[1] = -1;
6103 	      gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6104 
6105 	      warning ("creating event pipe failed.");
6106 	      return previous;
6107 	    }
6108 
6109 	  fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6110 	  fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6111 
6112 	  /* Register the event loop handler.  */
6113 	  add_file_handler (linux_event_pipe[0],
6114 			    handle_target_event, NULL,
6115 			    "linux-low");
6116 
6117 	  /* Always trigger a linux_wait.  */
6118 	  async_file_mark ();
6119 	}
6120       else
6121 	{
6122 	  delete_file_handler (linux_event_pipe[0]);
6123 
6124 	  close (linux_event_pipe[0]);
6125 	  close (linux_event_pipe[1]);
6126 	  linux_event_pipe[0] = -1;
6127 	  linux_event_pipe[1] = -1;
6128 	}
6129 
6130       gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6131     }
6132 
6133   return previous;
6134 }
6135 
6136 int
start_non_stop(bool nonstop)6137 linux_process_target::start_non_stop (bool nonstop)
6138 {
6139   /* Register or unregister from event-loop accordingly.  */
6140   target_async (nonstop);
6141 
6142   if (target_is_async_p () != (nonstop != false))
6143     return -1;
6144 
6145   return 0;
6146 }
6147 
6148 bool
supports_multi_process()6149 linux_process_target::supports_multi_process ()
6150 {
6151   return true;
6152 }
6153 
6154 /* Check if fork events are supported.  */
6155 
6156 bool
supports_fork_events()6157 linux_process_target::supports_fork_events ()
6158 {
6159   return linux_supports_tracefork ();
6160 }
6161 
6162 /* Check if vfork events are supported.  */
6163 
6164 bool
supports_vfork_events()6165 linux_process_target::supports_vfork_events ()
6166 {
6167   return linux_supports_tracefork ();
6168 }
6169 
6170 /* Check if exec events are supported.  */
6171 
6172 bool
supports_exec_events()6173 linux_process_target::supports_exec_events ()
6174 {
6175   return linux_supports_traceexec ();
6176 }
6177 
6178 /* Target hook for 'handle_new_gdb_connection'.  Causes a reset of the
6179    ptrace flags for all inferiors.  This is in case the new GDB connection
6180    doesn't support the same set of events that the previous one did.  */
6181 
6182 void
handle_new_gdb_connection()6183 linux_process_target::handle_new_gdb_connection ()
6184 {
6185   /* Request that all the lwps reset their ptrace options.  */
6186   for_each_thread ([] (thread_info *thread)
6187     {
6188       struct lwp_info *lwp = get_thread_lwp (thread);
6189 
6190       if (!lwp->stopped)
6191 	{
6192 	  /* Stop the lwp so we can modify its ptrace options.  */
6193 	  lwp->must_set_ptrace_flags = 1;
6194 	  linux_stop_lwp (lwp);
6195 	}
6196       else
6197 	{
6198 	  /* Already stopped; go ahead and set the ptrace options.  */
6199 	  struct process_info *proc = find_process_pid (pid_of (thread));
6200 	  int options = linux_low_ptrace_options (proc->attached);
6201 
6202 	  linux_enable_event_reporting (lwpid_of (thread), options);
6203 	  lwp->must_set_ptrace_flags = 0;
6204 	}
6205     });
6206 }
6207 
6208 int
handle_monitor_command(char * mon)6209 linux_process_target::handle_monitor_command (char *mon)
6210 {
6211 #ifdef USE_THREAD_DB
6212   return thread_db_handle_monitor_command (mon);
6213 #else
6214   return 0;
6215 #endif
6216 }
6217 
6218 int
core_of_thread(ptid_t ptid)6219 linux_process_target::core_of_thread (ptid_t ptid)
6220 {
6221   return linux_common_core_of_thread (ptid);
6222 }
6223 
6224 bool
supports_disable_randomization()6225 linux_process_target::supports_disable_randomization ()
6226 {
6227   return true;
6228 }
6229 
6230 bool
supports_agent()6231 linux_process_target::supports_agent ()
6232 {
6233   return true;
6234 }
6235 
6236 bool
supports_range_stepping()6237 linux_process_target::supports_range_stepping ()
6238 {
6239   if (supports_software_single_step ())
6240     return true;
6241 
6242   return low_supports_range_stepping ();
6243 }
6244 
6245 bool
low_supports_range_stepping()6246 linux_process_target::low_supports_range_stepping ()
6247 {
6248   return false;
6249 }
6250 
6251 bool
supports_pid_to_exec_file()6252 linux_process_target::supports_pid_to_exec_file ()
6253 {
6254   return true;
6255 }
6256 
6257 const char *
pid_to_exec_file(int pid)6258 linux_process_target::pid_to_exec_file (int pid)
6259 {
6260   return linux_proc_pid_to_exec_file (pid);
6261 }
6262 
6263 bool
supports_multifs()6264 linux_process_target::supports_multifs ()
6265 {
6266   return true;
6267 }
6268 
6269 int
multifs_open(int pid,const char * filename,int flags,mode_t mode)6270 linux_process_target::multifs_open (int pid, const char *filename,
6271 				    int flags, mode_t mode)
6272 {
6273   return linux_mntns_open_cloexec (pid, filename, flags, mode);
6274 }
6275 
6276 int
multifs_unlink(int pid,const char * filename)6277 linux_process_target::multifs_unlink (int pid, const char *filename)
6278 {
6279   return linux_mntns_unlink (pid, filename);
6280 }
6281 
6282 ssize_t
multifs_readlink(int pid,const char * filename,char * buf,size_t bufsiz)6283 linux_process_target::multifs_readlink (int pid, const char *filename,
6284 					char *buf, size_t bufsiz)
6285 {
6286   return linux_mntns_readlink (pid, filename, buf, bufsiz);
6287 }
6288 
6289 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6290 struct target_loadseg
6291 {
6292   /* Core address to which the segment is mapped.  */
6293   Elf32_Addr addr;
6294   /* VMA recorded in the program header.  */
6295   Elf32_Addr p_vaddr;
6296   /* Size of this segment in memory.  */
6297   Elf32_Word p_memsz;
6298 };
6299 
6300 # if defined PT_GETDSBT
6301 struct target_loadmap
6302 {
6303   /* Protocol version number, must be zero.  */
6304   Elf32_Word version;
6305   /* Pointer to the DSBT table, its size, and the DSBT index.  */
6306   unsigned *dsbt_table;
6307   unsigned dsbt_size, dsbt_index;
6308   /* Number of segments in this map.  */
6309   Elf32_Word nsegs;
6310   /* The actual memory map.  */
6311   struct target_loadseg segs[/*nsegs*/];
6312 };
6313 #  define LINUX_LOADMAP		PT_GETDSBT
6314 #  define LINUX_LOADMAP_EXEC	PTRACE_GETDSBT_EXEC
6315 #  define LINUX_LOADMAP_INTERP	PTRACE_GETDSBT_INTERP
6316 # else
6317 struct target_loadmap
6318 {
6319   /* Protocol version number, must be zero.  */
6320   Elf32_Half version;
6321   /* Number of segments in this map.  */
6322   Elf32_Half nsegs;
6323   /* The actual memory map.  */
6324   struct target_loadseg segs[/*nsegs*/];
6325 };
6326 #  define LINUX_LOADMAP		PTRACE_GETFDPIC
6327 #  define LINUX_LOADMAP_EXEC	PTRACE_GETFDPIC_EXEC
6328 #  define LINUX_LOADMAP_INTERP	PTRACE_GETFDPIC_INTERP
6329 # endif
6330 
6331 bool
supports_read_loadmap()6332 linux_process_target::supports_read_loadmap ()
6333 {
6334   return true;
6335 }
6336 
6337 int
read_loadmap(const char * annex,CORE_ADDR offset,unsigned char * myaddr,unsigned int len)6338 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6339 				    unsigned char *myaddr, unsigned int len)
6340 {
6341   int pid = lwpid_of (current_thread);
6342   int addr = -1;
6343   struct target_loadmap *data = NULL;
6344   unsigned int actual_length, copy_length;
6345 
6346   if (strcmp (annex, "exec") == 0)
6347     addr = (int) LINUX_LOADMAP_EXEC;
6348   else if (strcmp (annex, "interp") == 0)
6349     addr = (int) LINUX_LOADMAP_INTERP;
6350   else
6351     return -1;
6352 
6353   if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6354     return -1;
6355 
6356   if (data == NULL)
6357     return -1;
6358 
6359   actual_length = sizeof (struct target_loadmap)
6360     + sizeof (struct target_loadseg) * data->nsegs;
6361 
6362   if (offset < 0 || offset > actual_length)
6363     return -1;
6364 
6365   copy_length = actual_length - offset < len ? actual_length - offset : len;
6366   memcpy (myaddr, (char *) data + offset, copy_length);
6367   return copy_length;
6368 }
6369 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6370 
6371 bool
supports_catch_syscall()6372 linux_process_target::supports_catch_syscall ()
6373 {
6374   return (low_supports_catch_syscall ()
6375 	  && linux_supports_tracesysgood ());
6376 }
6377 
6378 bool
low_supports_catch_syscall()6379 linux_process_target::low_supports_catch_syscall ()
6380 {
6381   return false;
6382 }
6383 
6384 CORE_ADDR
read_pc(regcache * regcache)6385 linux_process_target::read_pc (regcache *regcache)
6386 {
6387   if (!low_supports_breakpoints ())
6388     return 0;
6389 
6390   return low_get_pc (regcache);
6391 }
6392 
6393 void
write_pc(regcache * regcache,CORE_ADDR pc)6394 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6395 {
6396   gdb_assert (low_supports_breakpoints ());
6397 
6398   low_set_pc (regcache, pc);
6399 }
6400 
6401 bool
supports_thread_stopped()6402 linux_process_target::supports_thread_stopped ()
6403 {
6404   return true;
6405 }
6406 
6407 bool
thread_stopped(thread_info * thread)6408 linux_process_target::thread_stopped (thread_info *thread)
6409 {
6410   return get_thread_lwp (thread)->stopped;
6411 }
6412 
6413 /* This exposes stop-all-threads functionality to other modules.  */
6414 
6415 void
pause_all(bool freeze)6416 linux_process_target::pause_all (bool freeze)
6417 {
6418   stop_all_lwps (freeze, NULL);
6419 }
6420 
6421 /* This exposes unstop-all-threads functionality to other gdbserver
6422    modules.  */
6423 
6424 void
unpause_all(bool unfreeze)6425 linux_process_target::unpause_all (bool unfreeze)
6426 {
6427   unstop_all_lwps (unfreeze, NULL);
6428 }
6429 
6430 int
prepare_to_access_memory()6431 linux_process_target::prepare_to_access_memory ()
6432 {
6433   /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6434      running LWP.  */
6435   if (non_stop)
6436     target_pause_all (true);
6437   return 0;
6438 }
6439 
6440 void
done_accessing_memory()6441 linux_process_target::done_accessing_memory ()
6442 {
6443   /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6444      running LWP.  */
6445   if (non_stop)
6446     target_unpause_all (true);
6447 }
6448 
6449 /* Extract &phdr and num_phdr in the inferior.  Return 0 on success.  */
6450 
6451 static int
get_phdr_phnum_from_proc_auxv(const int pid,const int is_elf64,CORE_ADDR * phdr_memaddr,int * num_phdr)6452 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6453 			       CORE_ADDR *phdr_memaddr, int *num_phdr)
6454 {
6455   char filename[PATH_MAX];
6456   int fd;
6457   const int auxv_size = is_elf64
6458     ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6459   char buf[sizeof (Elf64_auxv_t)];  /* The larger of the two.  */
6460 
6461   xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6462 
6463   fd = open (filename, O_RDONLY);
6464   if (fd < 0)
6465     return 1;
6466 
6467   *phdr_memaddr = 0;
6468   *num_phdr = 0;
6469   while (read (fd, buf, auxv_size) == auxv_size
6470 	 && (*phdr_memaddr == 0 || *num_phdr == 0))
6471     {
6472       if (is_elf64)
6473 	{
6474 	  Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6475 
6476 	  switch (aux->a_type)
6477 	    {
6478 	    case AT_PHDR:
6479 	      *phdr_memaddr = aux->a_un.a_val;
6480 	      break;
6481 	    case AT_PHNUM:
6482 	      *num_phdr = aux->a_un.a_val;
6483 	      break;
6484 	    }
6485 	}
6486       else
6487 	{
6488 	  Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6489 
6490 	  switch (aux->a_type)
6491 	    {
6492 	    case AT_PHDR:
6493 	      *phdr_memaddr = aux->a_un.a_val;
6494 	      break;
6495 	    case AT_PHNUM:
6496 	      *num_phdr = aux->a_un.a_val;
6497 	      break;
6498 	    }
6499 	}
6500     }
6501 
6502   close (fd);
6503 
6504   if (*phdr_memaddr == 0 || *num_phdr == 0)
6505     {
6506       warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6507 	       "phdr_memaddr = %ld, phdr_num = %d",
6508 	       (long) *phdr_memaddr, *num_phdr);
6509       return 2;
6510     }
6511 
6512   return 0;
6513 }
6514 
6515 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present.  */
6516 
6517 static CORE_ADDR
get_dynamic(const int pid,const int is_elf64)6518 get_dynamic (const int pid, const int is_elf64)
6519 {
6520   CORE_ADDR phdr_memaddr, relocation;
6521   int num_phdr, i;
6522   unsigned char *phdr_buf;
6523   const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6524 
6525   if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6526     return 0;
6527 
6528   gdb_assert (num_phdr < 100);  /* Basic sanity check.  */
6529   phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6530 
6531   if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6532     return 0;
6533 
6534   /* Compute relocation: it is expected to be 0 for "regular" executables,
6535      non-zero for PIE ones.  */
6536   relocation = -1;
6537   for (i = 0; relocation == -1 && i < num_phdr; i++)
6538     if (is_elf64)
6539       {
6540 	Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6541 
6542 	if (p->p_type == PT_PHDR)
6543 	  relocation = phdr_memaddr - p->p_vaddr;
6544       }
6545     else
6546       {
6547 	Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6548 
6549 	if (p->p_type == PT_PHDR)
6550 	  relocation = phdr_memaddr - p->p_vaddr;
6551       }
6552 
6553   if (relocation == -1)
6554     {
6555       /* PT_PHDR is optional, but necessary for PIE in general.  Fortunately
6556 	 any real world executables, including PIE executables, have always
6557 	 PT_PHDR present.  PT_PHDR is not present in some shared libraries or
6558 	 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6559 	 or present DT_DEBUG anyway (fpc binaries are statically linked).
6560 
6561 	 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6562 
6563 	 GDB could find RELOCATION also from AT_ENTRY - e_entry.  */
6564 
6565       return 0;
6566     }
6567 
6568   for (i = 0; i < num_phdr; i++)
6569     {
6570       if (is_elf64)
6571 	{
6572 	  Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6573 
6574 	  if (p->p_type == PT_DYNAMIC)
6575 	    return p->p_vaddr + relocation;
6576 	}
6577       else
6578 	{
6579 	  Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6580 
6581 	  if (p->p_type == PT_DYNAMIC)
6582 	    return p->p_vaddr + relocation;
6583 	}
6584     }
6585 
6586   return 0;
6587 }
6588 
6589 /* Return &_r_debug in the inferior, or -1 if not present.  Return value
6590    can be 0 if the inferior does not yet have the library list initialized.
6591    We look for DT_MIPS_RLD_MAP first.  MIPS executables use this instead of
6592    DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too.  */
6593 
6594 static CORE_ADDR
get_r_debug(const int pid,const int is_elf64)6595 get_r_debug (const int pid, const int is_elf64)
6596 {
6597   CORE_ADDR dynamic_memaddr;
6598   const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6599   unsigned char buf[sizeof (Elf64_Dyn)];  /* The larger of the two.  */
6600   CORE_ADDR map = -1;
6601 
6602   dynamic_memaddr = get_dynamic (pid, is_elf64);
6603   if (dynamic_memaddr == 0)
6604     return map;
6605 
6606   while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6607     {
6608       if (is_elf64)
6609 	{
6610 	  Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6611 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6612 	  union
6613 	    {
6614 	      Elf64_Xword map;
6615 	      unsigned char buf[sizeof (Elf64_Xword)];
6616 	    }
6617 	  rld_map;
6618 #endif
6619 #ifdef DT_MIPS_RLD_MAP
6620 	  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6621 	    {
6622 	      if (linux_read_memory (dyn->d_un.d_val,
6623 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6624 		return rld_map.map;
6625 	      else
6626 		break;
6627 	    }
6628 #endif	/* DT_MIPS_RLD_MAP */
6629 #ifdef DT_MIPS_RLD_MAP_REL
6630 	  if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6631 	    {
6632 	      if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6633 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6634 		return rld_map.map;
6635 	      else
6636 		break;
6637 	    }
6638 #endif	/* DT_MIPS_RLD_MAP_REL */
6639 
6640 	  if (dyn->d_tag == DT_DEBUG && map == -1)
6641 	    map = dyn->d_un.d_val;
6642 
6643 	  if (dyn->d_tag == DT_NULL)
6644 	    break;
6645 	}
6646       else
6647 	{
6648 	  Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6649 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6650 	  union
6651 	    {
6652 	      Elf32_Word map;
6653 	      unsigned char buf[sizeof (Elf32_Word)];
6654 	    }
6655 	  rld_map;
6656 #endif
6657 #ifdef DT_MIPS_RLD_MAP
6658 	  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6659 	    {
6660 	      if (linux_read_memory (dyn->d_un.d_val,
6661 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6662 		return rld_map.map;
6663 	      else
6664 		break;
6665 	    }
6666 #endif	/* DT_MIPS_RLD_MAP */
6667 #ifdef DT_MIPS_RLD_MAP_REL
6668 	  if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6669 	    {
6670 	      if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6671 				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6672 		return rld_map.map;
6673 	      else
6674 		break;
6675 	    }
6676 #endif	/* DT_MIPS_RLD_MAP_REL */
6677 
6678 	  if (dyn->d_tag == DT_DEBUG && map == -1)
6679 	    map = dyn->d_un.d_val;
6680 
6681 	  if (dyn->d_tag == DT_NULL)
6682 	    break;
6683 	}
6684 
6685       dynamic_memaddr += dyn_size;
6686     }
6687 
6688   return map;
6689 }
6690 
6691 /* Read one pointer from MEMADDR in the inferior.  */
6692 
6693 static int
read_one_ptr(CORE_ADDR memaddr,CORE_ADDR * ptr,int ptr_size)6694 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6695 {
6696   int ret;
6697 
6698   /* Go through a union so this works on either big or little endian
6699      hosts, when the inferior's pointer size is smaller than the size
6700      of CORE_ADDR.  It is assumed the inferior's endianness is the
6701      same of the superior's.  */
6702   union
6703   {
6704     CORE_ADDR core_addr;
6705     unsigned int ui;
6706     unsigned char uc;
6707   } addr;
6708 
6709   ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6710   if (ret == 0)
6711     {
6712       if (ptr_size == sizeof (CORE_ADDR))
6713 	*ptr = addr.core_addr;
6714       else if (ptr_size == sizeof (unsigned int))
6715 	*ptr = addr.ui;
6716       else
6717 	gdb_assert_not_reached ("unhandled pointer size");
6718     }
6719   return ret;
6720 }
6721 
6722 bool
supports_qxfer_libraries_svr4()6723 linux_process_target::supports_qxfer_libraries_svr4 ()
6724 {
6725   return true;
6726 }
6727 
6728 struct link_map_offsets
6729   {
6730     /* Offset and size of r_debug.r_version.  */
6731     int r_version_offset;
6732 
6733     /* Offset and size of r_debug.r_map.  */
6734     int r_map_offset;
6735 
6736     /* Offset to l_addr field in struct link_map.  */
6737     int l_addr_offset;
6738 
6739     /* Offset to l_name field in struct link_map.  */
6740     int l_name_offset;
6741 
6742     /* Offset to l_ld field in struct link_map.  */
6743     int l_ld_offset;
6744 
6745     /* Offset to l_next field in struct link_map.  */
6746     int l_next_offset;
6747 
6748     /* Offset to l_prev field in struct link_map.  */
6749     int l_prev_offset;
6750   };
6751 
6752 /* Construct qXfer:libraries-svr4:read reply.  */
6753 
6754 int
qxfer_libraries_svr4(const char * annex,unsigned char * readbuf,unsigned const char * writebuf,CORE_ADDR offset,int len)6755 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6756 					    unsigned char *readbuf,
6757 					    unsigned const char *writebuf,
6758 					    CORE_ADDR offset, int len)
6759 {
6760   struct process_info_private *const priv = current_process ()->priv;
6761   char filename[PATH_MAX];
6762   int pid, is_elf64;
6763 
6764   static const struct link_map_offsets lmo_32bit_offsets =
6765     {
6766       0,     /* r_version offset. */
6767       4,     /* r_debug.r_map offset.  */
6768       0,     /* l_addr offset in link_map.  */
6769       4,     /* l_name offset in link_map.  */
6770       8,     /* l_ld offset in link_map.  */
6771       12,    /* l_next offset in link_map.  */
6772       16     /* l_prev offset in link_map.  */
6773     };
6774 
6775   static const struct link_map_offsets lmo_64bit_offsets =
6776     {
6777       0,     /* r_version offset. */
6778       8,     /* r_debug.r_map offset.  */
6779       0,     /* l_addr offset in link_map.  */
6780       8,     /* l_name offset in link_map.  */
6781       16,    /* l_ld offset in link_map.  */
6782       24,    /* l_next offset in link_map.  */
6783       32     /* l_prev offset in link_map.  */
6784     };
6785   const struct link_map_offsets *lmo;
6786   unsigned int machine;
6787   int ptr_size;
6788   CORE_ADDR lm_addr = 0, lm_prev = 0;
6789   CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6790   int header_done = 0;
6791 
6792   if (writebuf != NULL)
6793     return -2;
6794   if (readbuf == NULL)
6795     return -1;
6796 
6797   pid = lwpid_of (current_thread);
6798   xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6799   is_elf64 = elf_64_file_p (filename, &machine);
6800   lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6801   ptr_size = is_elf64 ? 8 : 4;
6802 
6803   while (annex[0] != '\0')
6804     {
6805       const char *sep;
6806       CORE_ADDR *addrp;
6807       int name_len;
6808 
6809       sep = strchr (annex, '=');
6810       if (sep == NULL)
6811 	break;
6812 
6813       name_len = sep - annex;
6814       if (name_len == 5 && startswith (annex, "start"))
6815 	addrp = &lm_addr;
6816       else if (name_len == 4 && startswith (annex, "prev"))
6817 	addrp = &lm_prev;
6818       else
6819 	{
6820 	  annex = strchr (sep, ';');
6821 	  if (annex == NULL)
6822 	    break;
6823 	  annex++;
6824 	  continue;
6825 	}
6826 
6827       annex = decode_address_to_semicolon (addrp, sep + 1);
6828     }
6829 
6830   if (lm_addr == 0)
6831     {
6832       int r_version = 0;
6833 
6834       if (priv->r_debug == 0)
6835 	priv->r_debug = get_r_debug (pid, is_elf64);
6836 
6837       /* We failed to find DT_DEBUG.  Such situation will not change
6838 	 for this inferior - do not retry it.  Report it to GDB as
6839 	 E01, see for the reasons at the GDB solib-svr4.c side.  */
6840       if (priv->r_debug == (CORE_ADDR) -1)
6841 	return -1;
6842 
6843       if (priv->r_debug != 0)
6844 	{
6845 	  if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6846 				 (unsigned char *) &r_version,
6847 				 sizeof (r_version)) != 0
6848 	      || r_version != 1)
6849 	    {
6850 	      warning ("unexpected r_debug version %d", r_version);
6851 	    }
6852 	  else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6853 				 &lm_addr, ptr_size) != 0)
6854 	    {
6855 	      warning ("unable to read r_map from 0x%lx",
6856 		       (long) priv->r_debug + lmo->r_map_offset);
6857 	    }
6858 	}
6859     }
6860 
6861   std::string document = "<library-list-svr4 version=\"1.0\"";
6862 
6863   while (lm_addr
6864 	 && read_one_ptr (lm_addr + lmo->l_name_offset,
6865 			  &l_name, ptr_size) == 0
6866 	 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6867 			  &l_addr, ptr_size) == 0
6868 	 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6869 			  &l_ld, ptr_size) == 0
6870 	 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6871 			  &l_prev, ptr_size) == 0
6872 	 && read_one_ptr (lm_addr + lmo->l_next_offset,
6873 			  &l_next, ptr_size) == 0)
6874     {
6875       unsigned char libname[PATH_MAX];
6876 
6877       if (lm_prev != l_prev)
6878 	{
6879 	  warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6880 		   (long) lm_prev, (long) l_prev);
6881 	  break;
6882 	}
6883 
6884       /* Ignore the first entry even if it has valid name as the first entry
6885 	 corresponds to the main executable.  The first entry should not be
6886 	 skipped if the dynamic loader was loaded late by a static executable
6887 	 (see solib-svr4.c parameter ignore_first).  But in such case the main
6888 	 executable does not have PT_DYNAMIC present and this function already
6889 	 exited above due to failed get_r_debug.  */
6890       if (lm_prev == 0)
6891 	string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6892       else
6893 	{
6894 	  /* Not checking for error because reading may stop before
6895 	     we've got PATH_MAX worth of characters.  */
6896 	  libname[0] = '\0';
6897 	  linux_read_memory (l_name, libname, sizeof (libname) - 1);
6898 	  libname[sizeof (libname) - 1] = '\0';
6899 	  if (libname[0] != '\0')
6900 	    {
6901 	      if (!header_done)
6902 		{
6903 		  /* Terminate `<library-list-svr4'.  */
6904 		  document += '>';
6905 		  header_done = 1;
6906 		}
6907 
6908 	      string_appendf (document, "<library name=\"");
6909 	      xml_escape_text_append (&document, (char *) libname);
6910 	      string_appendf (document, "\" lm=\"0x%lx\" "
6911 			      "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6912 			      (unsigned long) lm_addr, (unsigned long) l_addr,
6913 			      (unsigned long) l_ld);
6914 	    }
6915 	}
6916 
6917       lm_prev = lm_addr;
6918       lm_addr = l_next;
6919     }
6920 
6921   if (!header_done)
6922     {
6923       /* Empty list; terminate `<library-list-svr4'.  */
6924       document += "/>";
6925     }
6926   else
6927     document += "</library-list-svr4>";
6928 
6929   int document_len = document.length ();
6930   if (offset < document_len)
6931     document_len -= offset;
6932   else
6933     document_len = 0;
6934   if (len > document_len)
6935     len = document_len;
6936 
6937   memcpy (readbuf, document.data () + offset, len);
6938 
6939   return len;
6940 }
6941 
6942 #ifdef HAVE_LINUX_BTRACE
6943 
6944 btrace_target_info *
enable_btrace(ptid_t ptid,const btrace_config * conf)6945 linux_process_target::enable_btrace (ptid_t ptid,
6946 				     const btrace_config *conf)
6947 {
6948   return linux_enable_btrace (ptid, conf);
6949 }
6950 
6951 /* See to_disable_btrace target method.  */
6952 
6953 int
disable_btrace(btrace_target_info * tinfo)6954 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6955 {
6956   enum btrace_error err;
6957 
6958   err = linux_disable_btrace (tinfo);
6959   return (err == BTRACE_ERR_NONE ? 0 : -1);
6960 }
6961 
6962 /* Encode an Intel Processor Trace configuration.  */
6963 
6964 static void
linux_low_encode_pt_config(struct buffer * buffer,const struct btrace_data_pt_config * config)6965 linux_low_encode_pt_config (struct buffer *buffer,
6966 			    const struct btrace_data_pt_config *config)
6967 {
6968   buffer_grow_str (buffer, "<pt-config>\n");
6969 
6970   switch (config->cpu.vendor)
6971     {
6972     case CV_INTEL:
6973       buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6974 			 "model=\"%u\" stepping=\"%u\"/>\n",
6975 			 config->cpu.family, config->cpu.model,
6976 			 config->cpu.stepping);
6977       break;
6978 
6979     default:
6980       break;
6981     }
6982 
6983   buffer_grow_str (buffer, "</pt-config>\n");
6984 }
6985 
6986 /* Encode a raw buffer.  */
6987 
6988 static void
linux_low_encode_raw(struct buffer * buffer,const gdb_byte * data,unsigned int size)6989 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6990 		      unsigned int size)
6991 {
6992   if (size == 0)
6993     return;
6994 
6995   /* We use hex encoding - see gdbsupport/rsp-low.h.  */
6996   buffer_grow_str (buffer, "<raw>\n");
6997 
6998   while (size-- > 0)
6999     {
7000       char elem[2];
7001 
7002       elem[0] = tohex ((*data >> 4) & 0xf);
7003       elem[1] = tohex (*data++ & 0xf);
7004 
7005       buffer_grow (buffer, elem, 2);
7006     }
7007 
7008   buffer_grow_str (buffer, "</raw>\n");
7009 }
7010 
7011 /* See to_read_btrace target method.  */
7012 
7013 int
read_btrace(btrace_target_info * tinfo,buffer * buffer,enum btrace_read_type type)7014 linux_process_target::read_btrace (btrace_target_info *tinfo,
7015 				   buffer *buffer,
7016 				   enum btrace_read_type type)
7017 {
7018   struct btrace_data btrace;
7019   enum btrace_error err;
7020 
7021   err = linux_read_btrace (&btrace, tinfo, type);
7022   if (err != BTRACE_ERR_NONE)
7023     {
7024       if (err == BTRACE_ERR_OVERFLOW)
7025 	buffer_grow_str0 (buffer, "E.Overflow.");
7026       else
7027 	buffer_grow_str0 (buffer, "E.Generic Error.");
7028 
7029       return -1;
7030     }
7031 
7032   switch (btrace.format)
7033     {
7034     case BTRACE_FORMAT_NONE:
7035       buffer_grow_str0 (buffer, "E.No Trace.");
7036       return -1;
7037 
7038     case BTRACE_FORMAT_BTS:
7039       buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7040       buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7041 
7042       for (const btrace_block &block : *btrace.variant.bts.blocks)
7043 	buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7044 			   paddress (block.begin), paddress (block.end));
7045 
7046       buffer_grow_str0 (buffer, "</btrace>\n");
7047       break;
7048 
7049     case BTRACE_FORMAT_PT:
7050       buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7051       buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7052       buffer_grow_str (buffer, "<pt>\n");
7053 
7054       linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7055 
7056       linux_low_encode_raw (buffer, btrace.variant.pt.data,
7057 			    btrace.variant.pt.size);
7058 
7059       buffer_grow_str (buffer, "</pt>\n");
7060       buffer_grow_str0 (buffer, "</btrace>\n");
7061       break;
7062 
7063     default:
7064       buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7065       return -1;
7066     }
7067 
7068   return 0;
7069 }
7070 
7071 /* See to_btrace_conf target method.  */
7072 
7073 int
read_btrace_conf(const btrace_target_info * tinfo,buffer * buffer)7074 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7075 					buffer *buffer)
7076 {
7077   const struct btrace_config *conf;
7078 
7079   buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7080   buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7081 
7082   conf = linux_btrace_conf (tinfo);
7083   if (conf != NULL)
7084     {
7085       switch (conf->format)
7086 	{
7087 	case BTRACE_FORMAT_NONE:
7088 	  break;
7089 
7090 	case BTRACE_FORMAT_BTS:
7091 	  buffer_xml_printf (buffer, "<bts");
7092 	  buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7093 	  buffer_xml_printf (buffer, " />\n");
7094 	  break;
7095 
7096 	case BTRACE_FORMAT_PT:
7097 	  buffer_xml_printf (buffer, "<pt");
7098 	  buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7099 	  buffer_xml_printf (buffer, "/>\n");
7100 	  break;
7101 	}
7102     }
7103 
7104   buffer_grow_str0 (buffer, "</btrace-conf>\n");
7105   return 0;
7106 }
7107 #endif /* HAVE_LINUX_BTRACE */
7108 
7109 /* See nat/linux-nat.h.  */
7110 
7111 ptid_t
current_lwp_ptid(void)7112 current_lwp_ptid (void)
7113 {
7114   return ptid_of (current_thread);
7115 }
7116 
7117 const char *
thread_name(ptid_t thread)7118 linux_process_target::thread_name (ptid_t thread)
7119 {
7120   return linux_proc_tid_get_name (thread);
7121 }
7122 
7123 #if USE_THREAD_DB
7124 bool
thread_handle(ptid_t ptid,gdb_byte ** handle,int * handle_len)7125 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7126 				     int *handle_len)
7127 {
7128   return thread_db_thread_handle (ptid, handle, handle_len);
7129 }
7130 #endif
7131 
7132 /* Default implementation of linux_target_ops method "set_pc" for
7133    32-bit pc register which is literally named "pc".  */
7134 
7135 void
linux_set_pc_32bit(struct regcache * regcache,CORE_ADDR pc)7136 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7137 {
7138   uint32_t newpc = pc;
7139 
7140   supply_register_by_name (regcache, "pc", &newpc);
7141 }
7142 
7143 /* Default implementation of linux_target_ops method "get_pc" for
7144    32-bit pc register which is literally named "pc".  */
7145 
7146 CORE_ADDR
linux_get_pc_32bit(struct regcache * regcache)7147 linux_get_pc_32bit (struct regcache *regcache)
7148 {
7149   uint32_t pc;
7150 
7151   collect_register_by_name (regcache, "pc", &pc);
7152   if (debug_threads)
7153     debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7154   return pc;
7155 }
7156 
7157 /* Default implementation of linux_target_ops method "set_pc" for
7158    64-bit pc register which is literally named "pc".  */
7159 
7160 void
linux_set_pc_64bit(struct regcache * regcache,CORE_ADDR pc)7161 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7162 {
7163   uint64_t newpc = pc;
7164 
7165   supply_register_by_name (regcache, "pc", &newpc);
7166 }
7167 
7168 /* Default implementation of linux_target_ops method "get_pc" for
7169    64-bit pc register which is literally named "pc".  */
7170 
7171 CORE_ADDR
linux_get_pc_64bit(struct regcache * regcache)7172 linux_get_pc_64bit (struct regcache *regcache)
7173 {
7174   uint64_t pc;
7175 
7176   collect_register_by_name (regcache, "pc", &pc);
7177   if (debug_threads)
7178     debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7179   return pc;
7180 }
7181 
7182 /* See linux-low.h.  */
7183 
7184 int
linux_get_auxv(int wordsize,CORE_ADDR match,CORE_ADDR * valp)7185 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7186 {
7187   gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7188   int offset = 0;
7189 
7190   gdb_assert (wordsize == 4 || wordsize == 8);
7191 
7192   while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7193     {
7194       if (wordsize == 4)
7195 	{
7196 	  uint32_t *data_p = (uint32_t *) data;
7197 	  if (data_p[0] == match)
7198 	    {
7199 	      *valp = data_p[1];
7200 	      return 1;
7201 	    }
7202 	}
7203       else
7204 	{
7205 	  uint64_t *data_p = (uint64_t *) data;
7206 	  if (data_p[0] == match)
7207 	    {
7208 	      *valp = data_p[1];
7209 	      return 1;
7210 	    }
7211 	}
7212 
7213       offset += 2 * wordsize;
7214     }
7215 
7216   return 0;
7217 }
7218 
7219 /* See linux-low.h.  */
7220 
7221 CORE_ADDR
linux_get_hwcap(int wordsize)7222 linux_get_hwcap (int wordsize)
7223 {
7224   CORE_ADDR hwcap = 0;
7225   linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7226   return hwcap;
7227 }
7228 
7229 /* See linux-low.h.  */
7230 
7231 CORE_ADDR
linux_get_hwcap2(int wordsize)7232 linux_get_hwcap2 (int wordsize)
7233 {
7234   CORE_ADDR hwcap2 = 0;
7235   linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7236   return hwcap2;
7237 }
7238 
7239 #ifdef HAVE_LINUX_REGSETS
7240 void
initialize_regsets_info(struct regsets_info * info)7241 initialize_regsets_info (struct regsets_info *info)
7242 {
7243   for (info->num_regsets = 0;
7244        info->regsets[info->num_regsets].size >= 0;
7245        info->num_regsets++)
7246     ;
7247 }
7248 #endif
7249 
7250 void
initialize_low(void)7251 initialize_low (void)
7252 {
7253   struct sigaction sigchld_action;
7254 
7255   memset (&sigchld_action, 0, sizeof (sigchld_action));
7256   set_target_ops (the_linux_target);
7257 
7258   linux_ptrace_init_warnings ();
7259   linux_proc_init_warnings ();
7260 
7261   sigchld_action.sa_handler = sigchld_handler;
7262   sigemptyset (&sigchld_action.sa_mask);
7263   sigchld_action.sa_flags = SA_RESTART;
7264   sigaction (SIGCHLD, &sigchld_action, NULL);
7265 
7266   initialize_low_arch ();
7267 
7268   linux_check_ptrace_features ();
7269 }
7270