1 /* GNU/Linux native-dependent code common to multiple platforms. 2 3 Copyright 2001, 2002, 2003, 2004 Free Software Foundation, Inc. 4 5 This file is part of GDB. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 59 Temple Place - Suite 330, 20 Boston, MA 02111-1307, USA. */ 21 22 #include "defs.h" 23 #include "inferior.h" 24 #include "target.h" 25 #include "gdb_string.h" 26 #include "gdb_wait.h" 27 #include "gdb_assert.h" 28 #ifdef HAVE_TKILL_SYSCALL 29 #include <unistd.h> 30 #include <sys/syscall.h> 31 #endif 32 #include <sys/ptrace.h> 33 #include "linux-nat.h" 34 #include "gdbthread.h" 35 #include "gdbcmd.h" 36 #include "regcache.h" 37 #include <sys/param.h> /* for MAXPATHLEN */ 38 #include <sys/procfs.h> /* for elf_gregset etc. */ 39 #include "elf-bfd.h" /* for elfcore_write_* */ 40 #include "gregset.h" /* for gregset */ 41 #include "gdbcore.h" /* for get_exec_file */ 42 #include <ctype.h> /* for isdigit */ 43 #include "gdbthread.h" /* for struct thread_info etc. */ 44 #include "gdb_stat.h" /* for struct stat */ 45 #include <fcntl.h> /* for O_RDONLY */ 46 47 #ifndef O_LARGEFILE 48 #define O_LARGEFILE 0 49 #endif 50 51 /* If the system headers did not provide the constants, hard-code the normal 52 values. */ 53 #ifndef PTRACE_EVENT_FORK 54 55 #define PTRACE_SETOPTIONS 0x4200 56 #define PTRACE_GETEVENTMSG 0x4201 57 58 /* options set using PTRACE_SETOPTIONS */ 59 #define PTRACE_O_TRACESYSGOOD 0x00000001 60 #define PTRACE_O_TRACEFORK 0x00000002 61 #define PTRACE_O_TRACEVFORK 0x00000004 62 #define PTRACE_O_TRACECLONE 0x00000008 63 #define PTRACE_O_TRACEEXEC 0x00000010 64 #define PTRACE_O_TRACEVFORKDONE 0x00000020 65 #define PTRACE_O_TRACEEXIT 0x00000040 66 67 /* Wait extended result codes for the above trace options. */ 68 #define PTRACE_EVENT_FORK 1 69 #define PTRACE_EVENT_VFORK 2 70 #define PTRACE_EVENT_CLONE 3 71 #define PTRACE_EVENT_EXEC 4 72 #define PTRACE_EVENT_VFORKDONE 5 73 #define PTRACE_EVENT_EXIT 6 74 75 #endif /* PTRACE_EVENT_FORK */ 76 77 /* We can't always assume that this flag is available, but all systems 78 with the ptrace event handlers also have __WALL, so it's safe to use 79 here. */ 80 #ifndef __WALL 81 #define __WALL 0x40000000 /* Wait for any child. */ 82 #endif 83 84 static int debug_linux_nat; 85 86 static int linux_parent_pid; 87 88 struct simple_pid_list 89 { 90 int pid; 91 struct simple_pid_list *next; 92 }; 93 struct simple_pid_list *stopped_pids; 94 95 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK 96 can not be used, 1 if it can. */ 97 98 static int linux_supports_tracefork_flag = -1; 99 100 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have 101 PTRACE_O_TRACEVFORKDONE. */ 102 103 static int linux_supports_tracevforkdone_flag = -1; 104 105 106 /* Trivial list manipulation functions to keep track of a list of 107 new stopped processes. */ 108 static void 109 add_to_pid_list (struct simple_pid_list **listp, int pid) 110 { 111 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list)); 112 new_pid->pid = pid; 113 new_pid->next = *listp; 114 *listp = new_pid; 115 } 116 117 static int 118 pull_pid_from_list (struct simple_pid_list **listp, int pid) 119 { 120 struct simple_pid_list **p; 121 122 for (p = listp; *p != NULL; p = &(*p)->next) 123 if ((*p)->pid == pid) 124 { 125 struct simple_pid_list *next = (*p)->next; 126 xfree (*p); 127 *p = next; 128 return 1; 129 } 130 return 0; 131 } 132 133 void 134 linux_record_stopped_pid (int pid) 135 { 136 add_to_pid_list (&stopped_pids, pid); 137 } 138 139 140 /* A helper function for linux_test_for_tracefork, called after fork (). */ 141 142 static void 143 linux_tracefork_child (void) 144 { 145 int ret; 146 147 ptrace (PTRACE_TRACEME, 0, 0, 0); 148 kill (getpid (), SIGSTOP); 149 fork (); 150 exit (0); 151 } 152 153 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. We 154 create a child process, attach to it, use PTRACE_SETOPTIONS to enable 155 fork tracing, and let it fork. If the process exits, we assume that 156 we can't use TRACEFORK; if we get the fork notification, and we can 157 extract the new child's PID, then we assume that we can. */ 158 159 static void 160 linux_test_for_tracefork (void) 161 { 162 int child_pid, ret, status; 163 long second_pid; 164 165 child_pid = fork (); 166 if (child_pid == -1) 167 perror_with_name ("linux_test_for_tracefork: fork"); 168 169 if (child_pid == 0) 170 linux_tracefork_child (); 171 172 ret = waitpid (child_pid, &status, 0); 173 if (ret == -1) 174 perror_with_name ("linux_test_for_tracefork: waitpid"); 175 else if (ret != child_pid) 176 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret); 177 if (! WIFSTOPPED (status)) 178 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status); 179 180 linux_supports_tracefork_flag = 0; 181 182 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK); 183 if (ret != 0) 184 { 185 ptrace (PTRACE_KILL, child_pid, 0, 0); 186 waitpid (child_pid, &status, 0); 187 return; 188 } 189 190 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */ 191 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, 192 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE); 193 linux_supports_tracevforkdone_flag = (ret == 0); 194 195 ptrace (PTRACE_CONT, child_pid, 0, 0); 196 ret = waitpid (child_pid, &status, 0); 197 if (ret == child_pid && WIFSTOPPED (status) 198 && status >> 16 == PTRACE_EVENT_FORK) 199 { 200 second_pid = 0; 201 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid); 202 if (ret == 0 && second_pid != 0) 203 { 204 int second_status; 205 206 linux_supports_tracefork_flag = 1; 207 waitpid (second_pid, &second_status, 0); 208 ptrace (PTRACE_DETACH, second_pid, 0, 0); 209 } 210 } 211 212 if (WIFSTOPPED (status)) 213 { 214 ptrace (PTRACE_DETACH, child_pid, 0, 0); 215 waitpid (child_pid, &status, 0); 216 } 217 } 218 219 /* Return non-zero iff we have tracefork functionality available. 220 This function also sets linux_supports_tracefork_flag. */ 221 222 static int 223 linux_supports_tracefork (void) 224 { 225 if (linux_supports_tracefork_flag == -1) 226 linux_test_for_tracefork (); 227 return linux_supports_tracefork_flag; 228 } 229 230 static int 231 linux_supports_tracevforkdone (void) 232 { 233 if (linux_supports_tracefork_flag == -1) 234 linux_test_for_tracefork (); 235 return linux_supports_tracevforkdone_flag; 236 } 237 238 239 void 240 linux_enable_event_reporting (ptid_t ptid) 241 { 242 int pid = ptid_get_pid (ptid); 243 int options; 244 245 if (! linux_supports_tracefork ()) 246 return; 247 248 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC 249 | PTRACE_O_TRACECLONE; 250 if (linux_supports_tracevforkdone ()) 251 options |= PTRACE_O_TRACEVFORKDONE; 252 253 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support 254 read-only process state. */ 255 256 ptrace (PTRACE_SETOPTIONS, pid, 0, options); 257 } 258 259 void 260 child_post_attach (int pid) 261 { 262 linux_enable_event_reporting (pid_to_ptid (pid)); 263 } 264 265 void 266 linux_child_post_startup_inferior (ptid_t ptid) 267 { 268 linux_enable_event_reporting (ptid); 269 } 270 271 #ifndef LINUX_CHILD_POST_STARTUP_INFERIOR 272 void 273 child_post_startup_inferior (ptid_t ptid) 274 { 275 linux_child_post_startup_inferior (ptid); 276 } 277 #endif 278 279 int 280 child_follow_fork (int follow_child) 281 { 282 ptid_t last_ptid; 283 struct target_waitstatus last_status; 284 int has_vforked; 285 int parent_pid, child_pid; 286 287 get_last_target_status (&last_ptid, &last_status); 288 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED); 289 parent_pid = ptid_get_pid (last_ptid); 290 child_pid = last_status.value.related_pid; 291 292 if (! follow_child) 293 { 294 /* We're already attached to the parent, by default. */ 295 296 /* Before detaching from the child, remove all breakpoints from 297 it. (This won't actually modify the breakpoint list, but will 298 physically remove the breakpoints from the child.) */ 299 /* If we vforked this will remove the breakpoints from the parent 300 also, but they'll be reinserted below. */ 301 detach_breakpoints (child_pid); 302 303 fprintf_filtered (gdb_stdout, 304 "Detaching after fork from child process %d.\n", 305 child_pid); 306 307 ptrace (PTRACE_DETACH, child_pid, 0, 0); 308 309 if (has_vforked) 310 { 311 if (linux_supports_tracevforkdone ()) 312 { 313 int status; 314 315 ptrace (PTRACE_CONT, parent_pid, 0, 0); 316 waitpid (parent_pid, &status, __WALL); 317 if ((status >> 16) != PTRACE_EVENT_VFORKDONE) 318 warning ("Unexpected waitpid result %06x when waiting for " 319 "vfork-done", status); 320 } 321 else 322 { 323 /* We can't insert breakpoints until the child has 324 finished with the shared memory region. We need to 325 wait until that happens. Ideal would be to just 326 call: 327 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0); 328 - waitpid (parent_pid, &status, __WALL); 329 However, most architectures can't handle a syscall 330 being traced on the way out if it wasn't traced on 331 the way in. 332 333 We might also think to loop, continuing the child 334 until it exits or gets a SIGTRAP. One problem is 335 that the child might call ptrace with PTRACE_TRACEME. 336 337 There's no simple and reliable way to figure out when 338 the vforked child will be done with its copy of the 339 shared memory. We could step it out of the syscall, 340 two instructions, let it go, and then single-step the 341 parent once. When we have hardware single-step, this 342 would work; with software single-step it could still 343 be made to work but we'd have to be able to insert 344 single-step breakpoints in the child, and we'd have 345 to insert -just- the single-step breakpoint in the 346 parent. Very awkward. 347 348 In the end, the best we can do is to make sure it 349 runs for a little while. Hopefully it will be out of 350 range of any breakpoints we reinsert. Usually this 351 is only the single-step breakpoint at vfork's return 352 point. */ 353 354 usleep (10000); 355 } 356 357 /* Since we vforked, breakpoints were removed in the parent 358 too. Put them back. */ 359 reattach_breakpoints (parent_pid); 360 } 361 } 362 else 363 { 364 char child_pid_spelling[40]; 365 366 /* Needed to keep the breakpoint lists in sync. */ 367 if (! has_vforked) 368 detach_breakpoints (child_pid); 369 370 /* Before detaching from the parent, remove all breakpoints from it. */ 371 remove_breakpoints (); 372 373 fprintf_filtered (gdb_stdout, 374 "Attaching after fork to child process %d.\n", 375 child_pid); 376 377 /* If we're vforking, we may want to hold on to the parent until 378 the child exits or execs. At exec time we can remove the old 379 breakpoints from the parent and detach it; at exit time we 380 could do the same (or even, sneakily, resume debugging it - the 381 child's exec has failed, or something similar). 382 383 This doesn't clean up "properly", because we can't call 384 target_detach, but that's OK; if the current target is "child", 385 then it doesn't need any further cleanups, and lin_lwp will 386 generally not encounter vfork (vfork is defined to fork 387 in libpthread.so). 388 389 The holding part is very easy if we have VFORKDONE events; 390 but keeping track of both processes is beyond GDB at the 391 moment. So we don't expose the parent to the rest of GDB. 392 Instead we quietly hold onto it until such time as we can 393 safely resume it. */ 394 395 if (has_vforked) 396 linux_parent_pid = parent_pid; 397 else 398 target_detach (NULL, 0); 399 400 inferior_ptid = pid_to_ptid (child_pid); 401 push_target (&deprecated_child_ops); 402 403 /* Reset breakpoints in the child as appropriate. */ 404 follow_inferior_reset_breakpoints (); 405 } 406 407 return 0; 408 } 409 410 ptid_t 411 linux_handle_extended_wait (int pid, int status, 412 struct target_waitstatus *ourstatus) 413 { 414 int event = status >> 16; 415 416 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK 417 || event == PTRACE_EVENT_CLONE) 418 { 419 unsigned long new_pid; 420 int ret; 421 422 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid); 423 424 /* If we haven't already seen the new PID stop, wait for it now. */ 425 if (! pull_pid_from_list (&stopped_pids, new_pid)) 426 { 427 /* The new child has a pending SIGSTOP. We can't affect it until it 428 hits the SIGSTOP, but we're already attached. */ 429 do { 430 ret = waitpid (new_pid, &status, 431 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0); 432 } while (ret == -1 && errno == EINTR); 433 if (ret == -1) 434 perror_with_name ("waiting for new child"); 435 else if (ret != new_pid) 436 internal_error (__FILE__, __LINE__, 437 "wait returned unexpected PID %d", ret); 438 else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP) 439 internal_error (__FILE__, __LINE__, 440 "wait returned unexpected status 0x%x", status); 441 } 442 443 if (event == PTRACE_EVENT_FORK) 444 ourstatus->kind = TARGET_WAITKIND_FORKED; 445 else if (event == PTRACE_EVENT_VFORK) 446 ourstatus->kind = TARGET_WAITKIND_VFORKED; 447 else 448 ourstatus->kind = TARGET_WAITKIND_SPURIOUS; 449 450 ourstatus->value.related_pid = new_pid; 451 return inferior_ptid; 452 } 453 454 if (event == PTRACE_EVENT_EXEC) 455 { 456 ourstatus->kind = TARGET_WAITKIND_EXECD; 457 ourstatus->value.execd_pathname 458 = xstrdup (child_pid_to_exec_file (pid)); 459 460 if (linux_parent_pid) 461 { 462 detach_breakpoints (linux_parent_pid); 463 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0); 464 465 linux_parent_pid = 0; 466 } 467 468 return inferior_ptid; 469 } 470 471 internal_error (__FILE__, __LINE__, 472 "unknown ptrace event %d", event); 473 } 474 475 476 int 477 child_insert_fork_catchpoint (int pid) 478 { 479 if (! linux_supports_tracefork ()) 480 error ("Your system does not support fork catchpoints."); 481 482 return 0; 483 } 484 485 int 486 child_insert_vfork_catchpoint (int pid) 487 { 488 if (!linux_supports_tracefork ()) 489 error ("Your system does not support vfork catchpoints."); 490 491 return 0; 492 } 493 494 int 495 child_insert_exec_catchpoint (int pid) 496 { 497 if (!linux_supports_tracefork ()) 498 error ("Your system does not support exec catchpoints."); 499 500 return 0; 501 } 502 503 void 504 kill_inferior (void) 505 { 506 int status; 507 int pid = PIDGET (inferior_ptid); 508 struct target_waitstatus last; 509 ptid_t last_ptid; 510 int ret; 511 512 if (pid == 0) 513 return; 514 515 /* If we're stopped while forking and we haven't followed yet, kill the 516 other task. We need to do this first because the parent will be 517 sleeping if this is a vfork. */ 518 519 get_last_target_status (&last_ptid, &last); 520 521 if (last.kind == TARGET_WAITKIND_FORKED 522 || last.kind == TARGET_WAITKIND_VFORKED) 523 { 524 ptrace (PT_KILL, last.value.related_pid, 0, 0); 525 wait (&status); 526 } 527 528 /* Kill the current process. */ 529 ptrace (PT_KILL, pid, 0, 0); 530 ret = wait (&status); 531 532 /* We might get a SIGCHLD instead of an exit status. This is 533 aggravated by the first kill above - a child has just died. */ 534 535 while (ret == pid && WIFSTOPPED (status)) 536 { 537 ptrace (PT_KILL, pid, 0, 0); 538 ret = wait (&status); 539 } 540 541 target_mourn_inferior (); 542 } 543 544 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's 545 are processes sharing the same VM space. A multi-threaded process 546 is basically a group of such processes. However, such a grouping 547 is almost entirely a user-space issue; the kernel doesn't enforce 548 such a grouping at all (this might change in the future). In 549 general, we'll rely on the threads library (i.e. the GNU/Linux 550 Threads library) to provide such a grouping. 551 552 It is perfectly well possible to write a multi-threaded application 553 without the assistance of a threads library, by using the clone 554 system call directly. This module should be able to give some 555 rudimentary support for debugging such applications if developers 556 specify the CLONE_PTRACE flag in the clone system call, and are 557 using the Linux kernel 2.4 or above. 558 559 Note that there are some peculiarities in GNU/Linux that affect 560 this code: 561 562 - In general one should specify the __WCLONE flag to waitpid in 563 order to make it report events for any of the cloned processes 564 (and leave it out for the initial process). However, if a cloned 565 process has exited the exit status is only reported if the 566 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but 567 we cannot use it since GDB must work on older systems too. 568 569 - When a traced, cloned process exits and is waited for by the 570 debugger, the kernel reassigns it to the original parent and 571 keeps it around as a "zombie". Somehow, the GNU/Linux Threads 572 library doesn't notice this, which leads to the "zombie problem": 573 When debugged a multi-threaded process that spawns a lot of 574 threads will run out of processes, even if the threads exit, 575 because the "zombies" stay around. */ 576 577 /* List of known LWPs. */ 578 static struct lwp_info *lwp_list; 579 580 /* Number of LWPs in the list. */ 581 static int num_lwps; 582 583 /* Non-zero if we're running in "threaded" mode. */ 584 static int threaded; 585 586 587 #define GET_LWP(ptid) ptid_get_lwp (ptid) 588 #define GET_PID(ptid) ptid_get_pid (ptid) 589 #define is_lwp(ptid) (GET_LWP (ptid) != 0) 590 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0) 591 592 /* If the last reported event was a SIGTRAP, this variable is set to 593 the process id of the LWP/thread that got it. */ 594 ptid_t trap_ptid; 595 596 597 /* This module's target-specific operations. */ 598 static struct target_ops linux_nat_ops; 599 600 /* Since we cannot wait (in linux_nat_wait) for the initial process and 601 any cloned processes with a single call to waitpid, we have to use 602 the WNOHANG flag and call waitpid in a loop. To optimize 603 things a bit we use `sigsuspend' to wake us up when a process has 604 something to report (it will send us a SIGCHLD if it has). To make 605 this work we have to juggle with the signal mask. We save the 606 original signal mask such that we can restore it before creating a 607 new process in order to avoid blocking certain signals in the 608 inferior. We then block SIGCHLD during the waitpid/sigsuspend 609 loop. */ 610 611 /* Original signal mask. */ 612 static sigset_t normal_mask; 613 614 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in 615 _initialize_linux_nat. */ 616 static sigset_t suspend_mask; 617 618 /* Signals to block to make that sigsuspend work. */ 619 static sigset_t blocked_mask; 620 621 622 /* Prototypes for local functions. */ 623 static int stop_wait_callback (struct lwp_info *lp, void *data); 624 static int linux_nat_thread_alive (ptid_t ptid); 625 626 /* Convert wait status STATUS to a string. Used for printing debug 627 messages only. */ 628 629 static char * 630 status_to_str (int status) 631 { 632 static char buf[64]; 633 634 if (WIFSTOPPED (status)) 635 snprintf (buf, sizeof (buf), "%s (stopped)", 636 strsignal (WSTOPSIG (status))); 637 else if (WIFSIGNALED (status)) 638 snprintf (buf, sizeof (buf), "%s (terminated)", 639 strsignal (WSTOPSIG (status))); 640 else 641 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status)); 642 643 return buf; 644 } 645 646 /* Initialize the list of LWPs. Note that this module, contrary to 647 what GDB's generic threads layer does for its thread list, 648 re-initializes the LWP lists whenever we mourn or detach (which 649 doesn't involve mourning) the inferior. */ 650 651 static void 652 init_lwp_list (void) 653 { 654 struct lwp_info *lp, *lpnext; 655 656 for (lp = lwp_list; lp; lp = lpnext) 657 { 658 lpnext = lp->next; 659 xfree (lp); 660 } 661 662 lwp_list = NULL; 663 num_lwps = 0; 664 threaded = 0; 665 } 666 667 /* Add the LWP specified by PID to the list. If this causes the 668 number of LWPs to become larger than one, go into "threaded" mode. 669 Return a pointer to the structure describing the new LWP. */ 670 671 static struct lwp_info * 672 add_lwp (ptid_t ptid) 673 { 674 struct lwp_info *lp; 675 676 gdb_assert (is_lwp (ptid)); 677 678 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info)); 679 680 memset (lp, 0, sizeof (struct lwp_info)); 681 682 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE; 683 684 lp->ptid = ptid; 685 686 lp->next = lwp_list; 687 lwp_list = lp; 688 if (++num_lwps > 1) 689 threaded = 1; 690 691 return lp; 692 } 693 694 /* Remove the LWP specified by PID from the list. */ 695 696 static void 697 delete_lwp (ptid_t ptid) 698 { 699 struct lwp_info *lp, *lpprev; 700 701 lpprev = NULL; 702 703 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next) 704 if (ptid_equal (lp->ptid, ptid)) 705 break; 706 707 if (!lp) 708 return; 709 710 /* We don't go back to "non-threaded" mode if the number of threads 711 becomes less than two. */ 712 num_lwps--; 713 714 if (lpprev) 715 lpprev->next = lp->next; 716 else 717 lwp_list = lp->next; 718 719 xfree (lp); 720 } 721 722 /* Return a pointer to the structure describing the LWP corresponding 723 to PID. If no corresponding LWP could be found, return NULL. */ 724 725 static struct lwp_info * 726 find_lwp_pid (ptid_t ptid) 727 { 728 struct lwp_info *lp; 729 int lwp; 730 731 if (is_lwp (ptid)) 732 lwp = GET_LWP (ptid); 733 else 734 lwp = GET_PID (ptid); 735 736 for (lp = lwp_list; lp; lp = lp->next) 737 if (lwp == GET_LWP (lp->ptid)) 738 return lp; 739 740 return NULL; 741 } 742 743 /* Call CALLBACK with its second argument set to DATA for every LWP in 744 the list. If CALLBACK returns 1 for a particular LWP, return a 745 pointer to the structure describing that LWP immediately. 746 Otherwise return NULL. */ 747 748 struct lwp_info * 749 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data) 750 { 751 struct lwp_info *lp, *lpnext; 752 753 for (lp = lwp_list; lp; lp = lpnext) 754 { 755 lpnext = lp->next; 756 if ((*callback) (lp, data)) 757 return lp; 758 } 759 760 return NULL; 761 } 762 763 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print 764 a message telling the user that a new LWP has been added to the 765 process. */ 766 767 void 768 lin_lwp_attach_lwp (ptid_t ptid, int verbose) 769 { 770 struct lwp_info *lp, *found_lp; 771 772 gdb_assert (is_lwp (ptid)); 773 774 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events 775 to interrupt either the ptrace() or waitpid() calls below. */ 776 if (!sigismember (&blocked_mask, SIGCHLD)) 777 { 778 sigaddset (&blocked_mask, SIGCHLD); 779 sigprocmask (SIG_BLOCK, &blocked_mask, NULL); 780 } 781 782 if (verbose) 783 printf_filtered ("[New %s]\n", target_pid_to_str (ptid)); 784 785 found_lp = lp = find_lwp_pid (ptid); 786 if (lp == NULL) 787 lp = add_lwp (ptid); 788 789 /* We assume that we're already attached to any LWP that has an id 790 equal to the overall process id, and to any LWP that is already 791 in our list of LWPs. If we're not seeing exit events from threads 792 and we've had PID wraparound since we last tried to stop all threads, 793 this assumption might be wrong; fortunately, this is very unlikely 794 to happen. */ 795 if (GET_LWP (ptid) != GET_PID (ptid) && found_lp == NULL) 796 { 797 pid_t pid; 798 int status; 799 800 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0) 801 error ("Can't attach %s: %s", target_pid_to_str (ptid), 802 safe_strerror (errno)); 803 804 if (debug_linux_nat) 805 fprintf_unfiltered (gdb_stdlog, 806 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n", 807 target_pid_to_str (ptid)); 808 809 pid = waitpid (GET_LWP (ptid), &status, 0); 810 if (pid == -1 && errno == ECHILD) 811 { 812 /* Try again with __WCLONE to check cloned processes. */ 813 pid = waitpid (GET_LWP (ptid), &status, __WCLONE); 814 lp->cloned = 1; 815 } 816 817 gdb_assert (pid == GET_LWP (ptid) 818 && WIFSTOPPED (status) && WSTOPSIG (status)); 819 820 child_post_attach (pid); 821 822 lp->stopped = 1; 823 824 if (debug_linux_nat) 825 { 826 fprintf_unfiltered (gdb_stdlog, 827 "LLAL: waitpid %s received %s\n", 828 target_pid_to_str (ptid), 829 status_to_str (status)); 830 } 831 } 832 else 833 { 834 /* We assume that the LWP representing the original process is 835 already stopped. Mark it as stopped in the data structure 836 that the linux ptrace layer uses to keep track of threads. 837 Note that this won't have already been done since the main 838 thread will have, we assume, been stopped by an attach from a 839 different layer. */ 840 lp->stopped = 1; 841 } 842 } 843 844 static void 845 linux_nat_attach (char *args, int from_tty) 846 { 847 struct lwp_info *lp; 848 pid_t pid; 849 int status; 850 851 /* FIXME: We should probably accept a list of process id's, and 852 attach all of them. */ 853 deprecated_child_ops.to_attach (args, from_tty); 854 855 /* Add the initial process as the first LWP to the list. */ 856 lp = add_lwp (BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid))); 857 858 /* Make sure the initial process is stopped. The user-level threads 859 layer might want to poke around in the inferior, and that won't 860 work if things haven't stabilized yet. */ 861 pid = waitpid (GET_PID (inferior_ptid), &status, 0); 862 if (pid == -1 && errno == ECHILD) 863 { 864 warning ("%s is a cloned process", target_pid_to_str (inferior_ptid)); 865 866 /* Try again with __WCLONE to check cloned processes. */ 867 pid = waitpid (GET_PID (inferior_ptid), &status, __WCLONE); 868 lp->cloned = 1; 869 } 870 871 gdb_assert (pid == GET_PID (inferior_ptid) 872 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP); 873 874 lp->stopped = 1; 875 876 /* Fake the SIGSTOP that core GDB expects. */ 877 lp->status = W_STOPCODE (SIGSTOP); 878 lp->resumed = 1; 879 if (debug_linux_nat) 880 { 881 fprintf_unfiltered (gdb_stdlog, 882 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid); 883 } 884 } 885 886 static int 887 detach_callback (struct lwp_info *lp, void *data) 888 { 889 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status)); 890 891 if (debug_linux_nat && lp->status) 892 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n", 893 strsignal (WSTOPSIG (lp->status)), 894 target_pid_to_str (lp->ptid)); 895 896 while (lp->signalled && lp->stopped) 897 { 898 errno = 0; 899 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 900 WSTOPSIG (lp->status)) < 0) 901 error ("Can't continue %s: %s", target_pid_to_str (lp->ptid), 902 safe_strerror (errno)); 903 904 if (debug_linux_nat) 905 fprintf_unfiltered (gdb_stdlog, 906 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n", 907 target_pid_to_str (lp->ptid), 908 status_to_str (lp->status)); 909 910 lp->stopped = 0; 911 lp->signalled = 0; 912 lp->status = 0; 913 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback 914 here. But since lp->signalled was cleared above, 915 stop_wait_callback didn't do anything; the process was left 916 running. Shouldn't we be waiting for it to stop? 917 I've removed the call, since stop_wait_callback now does do 918 something when called with lp->signalled == 0. */ 919 920 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status)); 921 } 922 923 /* We don't actually detach from the LWP that has an id equal to the 924 overall process id just yet. */ 925 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid)) 926 { 927 errno = 0; 928 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0, 929 WSTOPSIG (lp->status)) < 0) 930 error ("Can't detach %s: %s", target_pid_to_str (lp->ptid), 931 safe_strerror (errno)); 932 933 if (debug_linux_nat) 934 fprintf_unfiltered (gdb_stdlog, 935 "PTRACE_DETACH (%s, %s, 0) (OK)\n", 936 target_pid_to_str (lp->ptid), 937 strsignal (WSTOPSIG (lp->status))); 938 939 delete_lwp (lp->ptid); 940 } 941 942 return 0; 943 } 944 945 static void 946 linux_nat_detach (char *args, int from_tty) 947 { 948 iterate_over_lwps (detach_callback, NULL); 949 950 /* Only the initial process should be left right now. */ 951 gdb_assert (num_lwps == 1); 952 953 trap_ptid = null_ptid; 954 955 /* Destroy LWP info; it's no longer valid. */ 956 init_lwp_list (); 957 958 /* Restore the original signal mask. */ 959 sigprocmask (SIG_SETMASK, &normal_mask, NULL); 960 sigemptyset (&blocked_mask); 961 962 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid)); 963 deprecated_child_ops.to_detach (args, from_tty); 964 } 965 966 /* Resume LP. */ 967 968 static int 969 resume_callback (struct lwp_info *lp, void *data) 970 { 971 if (lp->stopped && lp->status == 0) 972 { 973 struct thread_info *tp; 974 975 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), 0, TARGET_SIGNAL_0); 976 if (debug_linux_nat) 977 fprintf_unfiltered (gdb_stdlog, 978 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n", 979 target_pid_to_str (lp->ptid)); 980 lp->stopped = 0; 981 lp->step = 0; 982 } 983 984 return 0; 985 } 986 987 static int 988 resume_clear_callback (struct lwp_info *lp, void *data) 989 { 990 lp->resumed = 0; 991 return 0; 992 } 993 994 static int 995 resume_set_callback (struct lwp_info *lp, void *data) 996 { 997 lp->resumed = 1; 998 return 0; 999 } 1000 1001 static void 1002 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo) 1003 { 1004 struct lwp_info *lp; 1005 int resume_all; 1006 1007 /* A specific PTID means `step only this process id'. */ 1008 resume_all = (PIDGET (ptid) == -1); 1009 1010 if (resume_all) 1011 iterate_over_lwps (resume_set_callback, NULL); 1012 else 1013 iterate_over_lwps (resume_clear_callback, NULL); 1014 1015 /* If PID is -1, it's the current inferior that should be 1016 handled specially. */ 1017 if (PIDGET (ptid) == -1) 1018 ptid = inferior_ptid; 1019 1020 lp = find_lwp_pid (ptid); 1021 if (lp) 1022 { 1023 ptid = pid_to_ptid (GET_LWP (lp->ptid)); 1024 1025 /* Remember if we're stepping. */ 1026 lp->step = step; 1027 1028 /* Mark this LWP as resumed. */ 1029 lp->resumed = 1; 1030 1031 /* If we have a pending wait status for this thread, there is no 1032 point in resuming the process. */ 1033 if (lp->status) 1034 { 1035 /* FIXME: What should we do if we are supposed to continue 1036 this thread with a signal? */ 1037 gdb_assert (signo == TARGET_SIGNAL_0); 1038 return; 1039 } 1040 1041 /* Mark LWP as not stopped to prevent it from being continued by 1042 resume_callback. */ 1043 lp->stopped = 0; 1044 } 1045 1046 if (resume_all) 1047 iterate_over_lwps (resume_callback, NULL); 1048 1049 child_resume (ptid, step, signo); 1050 if (debug_linux_nat) 1051 fprintf_unfiltered (gdb_stdlog, 1052 "LLR: %s %s, %s (resume event thread)\n", 1053 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", 1054 target_pid_to_str (ptid), 1055 signo ? strsignal (signo) : "0"); 1056 } 1057 1058 /* Issue kill to specified lwp. */ 1059 1060 static int tkill_failed; 1061 1062 static int 1063 kill_lwp (int lwpid, int signo) 1064 { 1065 errno = 0; 1066 1067 /* Use tkill, if possible, in case we are using nptl threads. If tkill 1068 fails, then we are not using nptl threads and we should be using kill. */ 1069 1070 #ifdef HAVE_TKILL_SYSCALL 1071 if (!tkill_failed) 1072 { 1073 int ret = syscall (__NR_tkill, lwpid, signo); 1074 if (errno != ENOSYS) 1075 return ret; 1076 errno = 0; 1077 tkill_failed = 1; 1078 } 1079 #endif 1080 1081 return kill (lwpid, signo); 1082 } 1083 1084 /* Handle a GNU/Linux extended wait response. Most of the work we 1085 just pass off to linux_handle_extended_wait, but if it reports a 1086 clone event we need to add the new LWP to our list (and not report 1087 the trap to higher layers). This function returns non-zero if 1088 the event should be ignored and we should wait again. */ 1089 1090 static int 1091 linux_nat_handle_extended (struct lwp_info *lp, int status) 1092 { 1093 linux_handle_extended_wait (GET_LWP (lp->ptid), status, 1094 &lp->waitstatus); 1095 1096 /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */ 1097 if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS) 1098 { 1099 struct lwp_info *new_lp; 1100 new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid, 1101 GET_PID (inferior_ptid))); 1102 new_lp->cloned = 1; 1103 new_lp->stopped = 1; 1104 1105 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE; 1106 1107 if (debug_linux_nat) 1108 fprintf_unfiltered (gdb_stdlog, 1109 "LLHE: Got clone event from LWP %ld, resuming\n", 1110 GET_LWP (lp->ptid)); 1111 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); 1112 1113 return 1; 1114 } 1115 1116 return 0; 1117 } 1118 1119 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has 1120 exited. */ 1121 1122 static int 1123 wait_lwp (struct lwp_info *lp) 1124 { 1125 pid_t pid; 1126 int status; 1127 int thread_dead = 0; 1128 1129 gdb_assert (!lp->stopped); 1130 gdb_assert (lp->status == 0); 1131 1132 pid = waitpid (GET_LWP (lp->ptid), &status, 0); 1133 if (pid == -1 && errno == ECHILD) 1134 { 1135 pid = waitpid (GET_LWP (lp->ptid), &status, __WCLONE); 1136 if (pid == -1 && errno == ECHILD) 1137 { 1138 /* The thread has previously exited. We need to delete it 1139 now because, for some vendor 2.4 kernels with NPTL 1140 support backported, there won't be an exit event unless 1141 it is the main thread. 2.6 kernels will report an exit 1142 event for each thread that exits, as expected. */ 1143 thread_dead = 1; 1144 if (debug_linux_nat) 1145 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n", 1146 target_pid_to_str (lp->ptid)); 1147 } 1148 } 1149 1150 if (!thread_dead) 1151 { 1152 gdb_assert (pid == GET_LWP (lp->ptid)); 1153 1154 if (debug_linux_nat) 1155 { 1156 fprintf_unfiltered (gdb_stdlog, 1157 "WL: waitpid %s received %s\n", 1158 target_pid_to_str (lp->ptid), 1159 status_to_str (status)); 1160 } 1161 } 1162 1163 /* Check if the thread has exited. */ 1164 if (WIFEXITED (status) || WIFSIGNALED (status)) 1165 { 1166 thread_dead = 1; 1167 if (debug_linux_nat) 1168 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n", 1169 target_pid_to_str (lp->ptid)); 1170 } 1171 1172 if (thread_dead) 1173 { 1174 if (in_thread_list (lp->ptid)) 1175 { 1176 /* Core GDB cannot deal with us deleting the current thread. */ 1177 if (!ptid_equal (lp->ptid, inferior_ptid)) 1178 delete_thread (lp->ptid); 1179 printf_unfiltered ("[%s exited]\n", 1180 target_pid_to_str (lp->ptid)); 1181 } 1182 1183 delete_lwp (lp->ptid); 1184 return 0; 1185 } 1186 1187 gdb_assert (WIFSTOPPED (status)); 1188 1189 /* Handle GNU/Linux's extended waitstatus for trace events. */ 1190 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) 1191 { 1192 if (debug_linux_nat) 1193 fprintf_unfiltered (gdb_stdlog, 1194 "WL: Handling extended status 0x%06x\n", 1195 status); 1196 if (linux_nat_handle_extended (lp, status)) 1197 return wait_lwp (lp); 1198 } 1199 1200 return status; 1201 } 1202 1203 /* Send a SIGSTOP to LP. */ 1204 1205 static int 1206 stop_callback (struct lwp_info *lp, void *data) 1207 { 1208 if (!lp->stopped && !lp->signalled) 1209 { 1210 int ret; 1211 1212 if (debug_linux_nat) 1213 { 1214 fprintf_unfiltered (gdb_stdlog, 1215 "SC: kill %s **<SIGSTOP>**\n", 1216 target_pid_to_str (lp->ptid)); 1217 } 1218 errno = 0; 1219 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP); 1220 if (debug_linux_nat) 1221 { 1222 fprintf_unfiltered (gdb_stdlog, 1223 "SC: lwp kill %d %s\n", 1224 ret, 1225 errno ? safe_strerror (errno) : "ERRNO-OK"); 1226 } 1227 1228 lp->signalled = 1; 1229 gdb_assert (lp->status == 0); 1230 } 1231 1232 return 0; 1233 } 1234 1235 /* Wait until LP is stopped. If DATA is non-null it is interpreted as 1236 a pointer to a set of signals to be flushed immediately. */ 1237 1238 static int 1239 stop_wait_callback (struct lwp_info *lp, void *data) 1240 { 1241 sigset_t *flush_mask = data; 1242 1243 if (!lp->stopped) 1244 { 1245 int status; 1246 1247 status = wait_lwp (lp); 1248 if (status == 0) 1249 return 0; 1250 1251 /* Ignore any signals in FLUSH_MASK. */ 1252 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status))) 1253 { 1254 if (!lp->signalled) 1255 { 1256 lp->stopped = 1; 1257 return 0; 1258 } 1259 1260 errno = 0; 1261 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); 1262 if (debug_linux_nat) 1263 fprintf_unfiltered (gdb_stdlog, 1264 "PTRACE_CONT %s, 0, 0 (%s)\n", 1265 target_pid_to_str (lp->ptid), 1266 errno ? safe_strerror (errno) : "OK"); 1267 1268 return stop_wait_callback (lp, flush_mask); 1269 } 1270 1271 if (WSTOPSIG (status) != SIGSTOP) 1272 { 1273 if (WSTOPSIG (status) == SIGTRAP) 1274 { 1275 /* If a LWP other than the LWP that we're reporting an 1276 event for has hit a GDB breakpoint (as opposed to 1277 some random trap signal), then just arrange for it to 1278 hit it again later. We don't keep the SIGTRAP status 1279 and don't forward the SIGTRAP signal to the LWP. We 1280 will handle the current event, eventually we will 1281 resume all LWPs, and this one will get its breakpoint 1282 trap again. 1283 1284 If we do not do this, then we run the risk that the 1285 user will delete or disable the breakpoint, but the 1286 thread will have already tripped on it. */ 1287 1288 /* Now resume this LWP and get the SIGSTOP event. */ 1289 errno = 0; 1290 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); 1291 if (debug_linux_nat) 1292 { 1293 fprintf_unfiltered (gdb_stdlog, 1294 "PTRACE_CONT %s, 0, 0 (%s)\n", 1295 target_pid_to_str (lp->ptid), 1296 errno ? safe_strerror (errno) : "OK"); 1297 1298 fprintf_unfiltered (gdb_stdlog, 1299 "SWC: Candidate SIGTRAP event in %s\n", 1300 target_pid_to_str (lp->ptid)); 1301 } 1302 /* Hold the SIGTRAP for handling by linux_nat_wait. */ 1303 stop_wait_callback (lp, data); 1304 /* If there's another event, throw it back into the queue. */ 1305 if (lp->status) 1306 { 1307 if (debug_linux_nat) 1308 { 1309 fprintf_unfiltered (gdb_stdlog, 1310 "SWC: kill %s, %s\n", 1311 target_pid_to_str (lp->ptid), 1312 status_to_str ((int) status)); 1313 } 1314 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status)); 1315 } 1316 /* Save the sigtrap event. */ 1317 lp->status = status; 1318 return 0; 1319 } 1320 else 1321 { 1322 /* The thread was stopped with a signal other than 1323 SIGSTOP, and didn't accidentally trip a breakpoint. */ 1324 1325 if (debug_linux_nat) 1326 { 1327 fprintf_unfiltered (gdb_stdlog, 1328 "SWC: Pending event %s in %s\n", 1329 status_to_str ((int) status), 1330 target_pid_to_str (lp->ptid)); 1331 } 1332 /* Now resume this LWP and get the SIGSTOP event. */ 1333 errno = 0; 1334 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); 1335 if (debug_linux_nat) 1336 fprintf_unfiltered (gdb_stdlog, 1337 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n", 1338 target_pid_to_str (lp->ptid), 1339 errno ? safe_strerror (errno) : "OK"); 1340 1341 /* Hold this event/waitstatus while we check to see if 1342 there are any more (we still want to get that SIGSTOP). */ 1343 stop_wait_callback (lp, data); 1344 /* If the lp->status field is still empty, use it to hold 1345 this event. If not, then this event must be returned 1346 to the event queue of the LWP. */ 1347 if (lp->status == 0) 1348 lp->status = status; 1349 else 1350 { 1351 if (debug_linux_nat) 1352 { 1353 fprintf_unfiltered (gdb_stdlog, 1354 "SWC: kill %s, %s\n", 1355 target_pid_to_str (lp->ptid), 1356 status_to_str ((int) status)); 1357 } 1358 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status)); 1359 } 1360 return 0; 1361 } 1362 } 1363 else 1364 { 1365 /* We caught the SIGSTOP that we intended to catch, so 1366 there's no SIGSTOP pending. */ 1367 lp->stopped = 1; 1368 lp->signalled = 0; 1369 } 1370 } 1371 1372 return 0; 1373 } 1374 1375 /* Check whether PID has any pending signals in FLUSH_MASK. If so set 1376 the appropriate bits in PENDING, and return 1 - otherwise return 0. */ 1377 1378 static int 1379 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask) 1380 { 1381 sigset_t blocked, ignored; 1382 int i; 1383 1384 linux_proc_pending_signals (pid, pending, &blocked, &ignored); 1385 1386 if (!flush_mask) 1387 return 0; 1388 1389 for (i = 1; i < NSIG; i++) 1390 if (sigismember (pending, i)) 1391 if (!sigismember (flush_mask, i) 1392 || sigismember (&blocked, i) 1393 || sigismember (&ignored, i)) 1394 sigdelset (pending, i); 1395 1396 if (sigisemptyset (pending)) 1397 return 0; 1398 1399 return 1; 1400 } 1401 1402 /* DATA is interpreted as a mask of signals to flush. If LP has 1403 signals pending, and they are all in the flush mask, then arrange 1404 to flush them. LP should be stopped, as should all other threads 1405 it might share a signal queue with. */ 1406 1407 static int 1408 flush_callback (struct lwp_info *lp, void *data) 1409 { 1410 sigset_t *flush_mask = data; 1411 sigset_t pending, intersection, blocked, ignored; 1412 int pid, status; 1413 1414 /* Normally, when an LWP exits, it is removed from the LWP list. The 1415 last LWP isn't removed till later, however. So if there is only 1416 one LWP on the list, make sure it's alive. */ 1417 if (lwp_list == lp && lp->next == NULL) 1418 if (!linux_nat_thread_alive (lp->ptid)) 1419 return 0; 1420 1421 /* Just because the LWP is stopped doesn't mean that new signals 1422 can't arrive from outside, so this function must be careful of 1423 race conditions. However, because all threads are stopped, we 1424 can assume that the pending mask will not shrink unless we resume 1425 the LWP, and that it will then get another signal. We can't 1426 control which one, however. */ 1427 1428 if (lp->status) 1429 { 1430 if (debug_linux_nat) 1431 printf_unfiltered ("FC: LP has pending status %06x\n", lp->status); 1432 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status))) 1433 lp->status = 0; 1434 } 1435 1436 while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask)) 1437 { 1438 int ret; 1439 1440 errno = 0; 1441 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0); 1442 if (debug_linux_nat) 1443 fprintf_unfiltered (gdb_stderr, 1444 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno); 1445 1446 lp->stopped = 0; 1447 stop_wait_callback (lp, flush_mask); 1448 if (debug_linux_nat) 1449 fprintf_unfiltered (gdb_stderr, 1450 "FC: Wait finished; saved status is %d\n", 1451 lp->status); 1452 } 1453 1454 return 0; 1455 } 1456 1457 /* Return non-zero if LP has a wait status pending. */ 1458 1459 static int 1460 status_callback (struct lwp_info *lp, void *data) 1461 { 1462 /* Only report a pending wait status if we pretend that this has 1463 indeed been resumed. */ 1464 return (lp->status != 0 && lp->resumed); 1465 } 1466 1467 /* Return non-zero if LP isn't stopped. */ 1468 1469 static int 1470 running_callback (struct lwp_info *lp, void *data) 1471 { 1472 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed)); 1473 } 1474 1475 /* Count the LWP's that have had events. */ 1476 1477 static int 1478 count_events_callback (struct lwp_info *lp, void *data) 1479 { 1480 int *count = data; 1481 1482 gdb_assert (count != NULL); 1483 1484 /* Count only LWPs that have a SIGTRAP event pending. */ 1485 if (lp->status != 0 1486 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP) 1487 (*count)++; 1488 1489 return 0; 1490 } 1491 1492 /* Select the LWP (if any) that is currently being single-stepped. */ 1493 1494 static int 1495 select_singlestep_lwp_callback (struct lwp_info *lp, void *data) 1496 { 1497 if (lp->step && lp->status != 0) 1498 return 1; 1499 else 1500 return 0; 1501 } 1502 1503 /* Select the Nth LWP that has had a SIGTRAP event. */ 1504 1505 static int 1506 select_event_lwp_callback (struct lwp_info *lp, void *data) 1507 { 1508 int *selector = data; 1509 1510 gdb_assert (selector != NULL); 1511 1512 /* Select only LWPs that have a SIGTRAP event pending. */ 1513 if (lp->status != 0 1514 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP) 1515 if ((*selector)-- == 0) 1516 return 1; 1517 1518 return 0; 1519 } 1520 1521 static int 1522 cancel_breakpoints_callback (struct lwp_info *lp, void *data) 1523 { 1524 struct lwp_info *event_lp = data; 1525 1526 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */ 1527 if (lp == event_lp) 1528 return 0; 1529 1530 /* If a LWP other than the LWP that we're reporting an event for has 1531 hit a GDB breakpoint (as opposed to some random trap signal), 1532 then just arrange for it to hit it again later. We don't keep 1533 the SIGTRAP status and don't forward the SIGTRAP signal to the 1534 LWP. We will handle the current event, eventually we will resume 1535 all LWPs, and this one will get its breakpoint trap again. 1536 1537 If we do not do this, then we run the risk that the user will 1538 delete or disable the breakpoint, but the LWP will have already 1539 tripped on it. */ 1540 1541 if (lp->status != 0 1542 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP 1543 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) - 1544 DECR_PC_AFTER_BREAK)) 1545 { 1546 if (debug_linux_nat) 1547 fprintf_unfiltered (gdb_stdlog, 1548 "CBC: Push back breakpoint for %s\n", 1549 target_pid_to_str (lp->ptid)); 1550 1551 /* Back up the PC if necessary. */ 1552 if (DECR_PC_AFTER_BREAK) 1553 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid); 1554 1555 /* Throw away the SIGTRAP. */ 1556 lp->status = 0; 1557 } 1558 1559 return 0; 1560 } 1561 1562 /* Select one LWP out of those that have events pending. */ 1563 1564 static void 1565 select_event_lwp (struct lwp_info **orig_lp, int *status) 1566 { 1567 int num_events = 0; 1568 int random_selector; 1569 struct lwp_info *event_lp; 1570 1571 /* Record the wait status for the origional LWP. */ 1572 (*orig_lp)->status = *status; 1573 1574 /* Give preference to any LWP that is being single-stepped. */ 1575 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL); 1576 if (event_lp != NULL) 1577 { 1578 if (debug_linux_nat) 1579 fprintf_unfiltered (gdb_stdlog, 1580 "SEL: Select single-step %s\n", 1581 target_pid_to_str (event_lp->ptid)); 1582 } 1583 else 1584 { 1585 /* No single-stepping LWP. Select one at random, out of those 1586 which have had SIGTRAP events. */ 1587 1588 /* First see how many SIGTRAP events we have. */ 1589 iterate_over_lwps (count_events_callback, &num_events); 1590 1591 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */ 1592 random_selector = (int) 1593 ((num_events * (double) rand ()) / (RAND_MAX + 1.0)); 1594 1595 if (debug_linux_nat && num_events > 1) 1596 fprintf_unfiltered (gdb_stdlog, 1597 "SEL: Found %d SIGTRAP events, selecting #%d\n", 1598 num_events, random_selector); 1599 1600 event_lp = iterate_over_lwps (select_event_lwp_callback, 1601 &random_selector); 1602 } 1603 1604 if (event_lp != NULL) 1605 { 1606 /* Switch the event LWP. */ 1607 *orig_lp = event_lp; 1608 *status = event_lp->status; 1609 } 1610 1611 /* Flush the wait status for the event LWP. */ 1612 (*orig_lp)->status = 0; 1613 } 1614 1615 /* Return non-zero if LP has been resumed. */ 1616 1617 static int 1618 resumed_callback (struct lwp_info *lp, void *data) 1619 { 1620 return lp->resumed; 1621 } 1622 1623 #ifdef CHILD_WAIT 1624 1625 /* We need to override child_wait to support attaching to cloned 1626 processes, since a normal wait (as done by the default version) 1627 ignores those processes. */ 1628 1629 /* Wait for child PTID to do something. Return id of the child, 1630 minus_one_ptid in case of error; store status into *OURSTATUS. */ 1631 1632 ptid_t 1633 child_wait (ptid_t ptid, struct target_waitstatus *ourstatus) 1634 { 1635 int save_errno; 1636 int status; 1637 pid_t pid; 1638 1639 ourstatus->kind = TARGET_WAITKIND_IGNORE; 1640 1641 do 1642 { 1643 set_sigint_trap (); /* Causes SIGINT to be passed on to the 1644 attached process. */ 1645 set_sigio_trap (); 1646 1647 pid = waitpid (GET_PID (ptid), &status, 0); 1648 if (pid == -1 && errno == ECHILD) 1649 /* Try again with __WCLONE to check cloned processes. */ 1650 pid = waitpid (GET_PID (ptid), &status, __WCLONE); 1651 1652 if (debug_linux_nat) 1653 { 1654 fprintf_unfiltered (gdb_stdlog, 1655 "CW: waitpid %ld received %s\n", 1656 (long) pid, status_to_str (status)); 1657 } 1658 1659 save_errno = errno; 1660 1661 /* Make sure we don't report an event for the exit of the 1662 original program, if we've detached from it. */ 1663 if (pid != -1 && !WIFSTOPPED (status) && pid != GET_PID (inferior_ptid)) 1664 { 1665 pid = -1; 1666 save_errno = EINTR; 1667 } 1668 1669 /* Check for stop events reported by a process we didn't already 1670 know about - in this case, anything other than inferior_ptid. 1671 1672 If we're expecting to receive stopped processes after fork, 1673 vfork, and clone events, then we'll just add the new one to 1674 our list and go back to waiting for the event to be reported 1675 - the stopped process might be returned from waitpid before 1676 or after the event is. If we want to handle debugging of 1677 CLONE_PTRACE processes we need to do more here, i.e. switch 1678 to multi-threaded mode. */ 1679 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP 1680 && pid != GET_PID (inferior_ptid)) 1681 { 1682 linux_record_stopped_pid (pid); 1683 pid = -1; 1684 save_errno = EINTR; 1685 } 1686 1687 /* Handle GNU/Linux's extended waitstatus for trace events. */ 1688 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP 1689 && status >> 16 != 0) 1690 { 1691 linux_handle_extended_wait (pid, status, ourstatus); 1692 1693 /* If we see a clone event, detach the child, and don't 1694 report the event. It would be nice to offer some way to 1695 switch into a non-thread-db based threaded mode at this 1696 point. */ 1697 if (ourstatus->kind == TARGET_WAITKIND_SPURIOUS) 1698 { 1699 ptrace (PTRACE_DETACH, ourstatus->value.related_pid, 0, 0); 1700 ourstatus->kind = TARGET_WAITKIND_IGNORE; 1701 ptrace (PTRACE_CONT, pid, 0, 0); 1702 pid = -1; 1703 save_errno = EINTR; 1704 } 1705 } 1706 1707 clear_sigio_trap (); 1708 clear_sigint_trap (); 1709 } 1710 while (pid == -1 && save_errno == EINTR); 1711 1712 if (pid == -1) 1713 { 1714 warning ("Child process unexpectedly missing: %s", 1715 safe_strerror (errno)); 1716 1717 /* Claim it exited with unknown signal. */ 1718 ourstatus->kind = TARGET_WAITKIND_SIGNALLED; 1719 ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN; 1720 return minus_one_ptid; 1721 } 1722 1723 if (ourstatus->kind == TARGET_WAITKIND_IGNORE) 1724 store_waitstatus (ourstatus, status); 1725 1726 return pid_to_ptid (pid); 1727 } 1728 1729 #endif 1730 1731 /* Stop an active thread, verify it still exists, then resume it. */ 1732 1733 static int 1734 stop_and_resume_callback (struct lwp_info *lp, void *data) 1735 { 1736 struct lwp_info *ptr; 1737 1738 if (!lp->stopped && !lp->signalled) 1739 { 1740 stop_callback (lp, NULL); 1741 stop_wait_callback (lp, NULL); 1742 /* Resume if the lwp still exists. */ 1743 for (ptr = lwp_list; ptr; ptr = ptr->next) 1744 if (lp == ptr) 1745 { 1746 resume_callback (lp, NULL); 1747 resume_set_callback (lp, NULL); 1748 } 1749 } 1750 return 0; 1751 } 1752 1753 static ptid_t 1754 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus) 1755 { 1756 struct lwp_info *lp = NULL; 1757 int options = 0; 1758 int status = 0; 1759 pid_t pid = PIDGET (ptid); 1760 sigset_t flush_mask; 1761 1762 sigemptyset (&flush_mask); 1763 1764 /* Make sure SIGCHLD is blocked. */ 1765 if (!sigismember (&blocked_mask, SIGCHLD)) 1766 { 1767 sigaddset (&blocked_mask, SIGCHLD); 1768 sigprocmask (SIG_BLOCK, &blocked_mask, NULL); 1769 } 1770 1771 retry: 1772 1773 /* Make sure there is at least one LWP that has been resumed, at 1774 least if there are any LWPs at all. */ 1775 gdb_assert (num_lwps == 0 || iterate_over_lwps (resumed_callback, NULL)); 1776 1777 /* First check if there is a LWP with a wait status pending. */ 1778 if (pid == -1) 1779 { 1780 /* Any LWP that's been resumed will do. */ 1781 lp = iterate_over_lwps (status_callback, NULL); 1782 if (lp) 1783 { 1784 status = lp->status; 1785 lp->status = 0; 1786 1787 if (debug_linux_nat && status) 1788 fprintf_unfiltered (gdb_stdlog, 1789 "LLW: Using pending wait status %s for %s.\n", 1790 status_to_str (status), 1791 target_pid_to_str (lp->ptid)); 1792 } 1793 1794 /* But if we don't fine one, we'll have to wait, and check both 1795 cloned and uncloned processes. We start with the cloned 1796 processes. */ 1797 options = __WCLONE | WNOHANG; 1798 } 1799 else if (is_lwp (ptid)) 1800 { 1801 if (debug_linux_nat) 1802 fprintf_unfiltered (gdb_stdlog, 1803 "LLW: Waiting for specific LWP %s.\n", 1804 target_pid_to_str (ptid)); 1805 1806 /* We have a specific LWP to check. */ 1807 lp = find_lwp_pid (ptid); 1808 gdb_assert (lp); 1809 status = lp->status; 1810 lp->status = 0; 1811 1812 if (debug_linux_nat && status) 1813 fprintf_unfiltered (gdb_stdlog, 1814 "LLW: Using pending wait status %s for %s.\n", 1815 status_to_str (status), 1816 target_pid_to_str (lp->ptid)); 1817 1818 /* If we have to wait, take into account whether PID is a cloned 1819 process or not. And we have to convert it to something that 1820 the layer beneath us can understand. */ 1821 options = lp->cloned ? __WCLONE : 0; 1822 pid = GET_LWP (ptid); 1823 } 1824 1825 if (status && lp->signalled) 1826 { 1827 /* A pending SIGSTOP may interfere with the normal stream of 1828 events. In a typical case where interference is a problem, 1829 we have a SIGSTOP signal pending for LWP A while 1830 single-stepping it, encounter an event in LWP B, and take the 1831 pending SIGSTOP while trying to stop LWP A. After processing 1832 the event in LWP B, LWP A is continued, and we'll never see 1833 the SIGTRAP associated with the last time we were 1834 single-stepping LWP A. */ 1835 1836 /* Resume the thread. It should halt immediately returning the 1837 pending SIGSTOP. */ 1838 registers_changed (); 1839 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, 1840 TARGET_SIGNAL_0); 1841 if (debug_linux_nat) 1842 fprintf_unfiltered (gdb_stdlog, 1843 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n", 1844 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT", 1845 target_pid_to_str (lp->ptid)); 1846 lp->stopped = 0; 1847 gdb_assert (lp->resumed); 1848 1849 /* This should catch the pending SIGSTOP. */ 1850 stop_wait_callback (lp, NULL); 1851 } 1852 1853 set_sigint_trap (); /* Causes SIGINT to be passed on to the 1854 attached process. */ 1855 set_sigio_trap (); 1856 1857 while (status == 0) 1858 { 1859 pid_t lwpid; 1860 1861 lwpid = waitpid (pid, &status, options); 1862 if (lwpid > 0) 1863 { 1864 gdb_assert (pid == -1 || lwpid == pid); 1865 1866 if (debug_linux_nat) 1867 { 1868 fprintf_unfiltered (gdb_stdlog, 1869 "LLW: waitpid %ld received %s\n", 1870 (long) lwpid, status_to_str (status)); 1871 } 1872 1873 lp = find_lwp_pid (pid_to_ptid (lwpid)); 1874 1875 /* Check for stop events reported by a process we didn't 1876 already know about - anything not already in our LWP 1877 list. 1878 1879 If we're expecting to receive stopped processes after 1880 fork, vfork, and clone events, then we'll just add the 1881 new one to our list and go back to waiting for the event 1882 to be reported - the stopped process might be returned 1883 from waitpid before or after the event is. */ 1884 if (WIFSTOPPED (status) && !lp) 1885 { 1886 linux_record_stopped_pid (lwpid); 1887 status = 0; 1888 continue; 1889 } 1890 1891 /* Make sure we don't report an event for the exit of an LWP not in 1892 our list, i.e. not part of the current process. This can happen 1893 if we detach from a program we original forked and then it 1894 exits. */ 1895 if (!WIFSTOPPED (status) && !lp) 1896 { 1897 status = 0; 1898 continue; 1899 } 1900 1901 /* NOTE drow/2003-06-17: This code seems to be meant for debugging 1902 CLONE_PTRACE processes which do not use the thread library - 1903 otherwise we wouldn't find the new LWP this way. That doesn't 1904 currently work, and the following code is currently unreachable 1905 due to the two blocks above. If it's fixed some day, this code 1906 should be broken out into a function so that we can also pick up 1907 LWPs from the new interface. */ 1908 if (!lp) 1909 { 1910 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid))); 1911 if (options & __WCLONE) 1912 lp->cloned = 1; 1913 1914 if (threaded) 1915 { 1916 gdb_assert (WIFSTOPPED (status) 1917 && WSTOPSIG (status) == SIGSTOP); 1918 lp->signalled = 1; 1919 1920 if (!in_thread_list (inferior_ptid)) 1921 { 1922 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), 1923 GET_PID (inferior_ptid)); 1924 add_thread (inferior_ptid); 1925 } 1926 1927 add_thread (lp->ptid); 1928 printf_unfiltered ("[New %s]\n", 1929 target_pid_to_str (lp->ptid)); 1930 } 1931 } 1932 1933 /* Handle GNU/Linux's extended waitstatus for trace events. */ 1934 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0) 1935 { 1936 if (debug_linux_nat) 1937 fprintf_unfiltered (gdb_stdlog, 1938 "LLW: Handling extended status 0x%06x\n", 1939 status); 1940 if (linux_nat_handle_extended (lp, status)) 1941 { 1942 status = 0; 1943 continue; 1944 } 1945 } 1946 1947 /* Check if the thread has exited. */ 1948 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1) 1949 { 1950 if (in_thread_list (lp->ptid)) 1951 { 1952 /* Core GDB cannot deal with us deleting the current 1953 thread. */ 1954 if (!ptid_equal (lp->ptid, inferior_ptid)) 1955 delete_thread (lp->ptid); 1956 printf_unfiltered ("[%s exited]\n", 1957 target_pid_to_str (lp->ptid)); 1958 } 1959 1960 /* If this is the main thread, we must stop all threads and 1961 verify if they are still alive. This is because in the nptl 1962 thread model, there is no signal issued for exiting LWPs 1963 other than the main thread. We only get the main thread 1964 exit signal once all child threads have already exited. 1965 If we stop all the threads and use the stop_wait_callback 1966 to check if they have exited we can determine whether this 1967 signal should be ignored or whether it means the end of the 1968 debugged application, regardless of which threading model 1969 is being used. */ 1970 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)) 1971 { 1972 lp->stopped = 1; 1973 iterate_over_lwps (stop_and_resume_callback, NULL); 1974 } 1975 1976 if (debug_linux_nat) 1977 fprintf_unfiltered (gdb_stdlog, 1978 "LLW: %s exited.\n", 1979 target_pid_to_str (lp->ptid)); 1980 1981 delete_lwp (lp->ptid); 1982 1983 /* If there is at least one more LWP, then the exit signal 1984 was not the end of the debugged application and should be 1985 ignored. */ 1986 if (num_lwps > 0) 1987 { 1988 /* Make sure there is at least one thread running. */ 1989 gdb_assert (iterate_over_lwps (running_callback, NULL)); 1990 1991 /* Discard the event. */ 1992 status = 0; 1993 continue; 1994 } 1995 } 1996 1997 /* Check if the current LWP has previously exited. In the nptl 1998 thread model, LWPs other than the main thread do not issue 1999 signals when they exit so we must check whenever the thread 2000 has stopped. A similar check is made in stop_wait_callback(). */ 2001 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid)) 2002 { 2003 if (in_thread_list (lp->ptid)) 2004 { 2005 /* Core GDB cannot deal with us deleting the current 2006 thread. */ 2007 if (!ptid_equal (lp->ptid, inferior_ptid)) 2008 delete_thread (lp->ptid); 2009 printf_unfiltered ("[%s exited]\n", 2010 target_pid_to_str (lp->ptid)); 2011 } 2012 if (debug_linux_nat) 2013 fprintf_unfiltered (gdb_stdlog, 2014 "LLW: %s exited.\n", 2015 target_pid_to_str (lp->ptid)); 2016 2017 delete_lwp (lp->ptid); 2018 2019 /* Make sure there is at least one thread running. */ 2020 gdb_assert (iterate_over_lwps (running_callback, NULL)); 2021 2022 /* Discard the event. */ 2023 status = 0; 2024 continue; 2025 } 2026 2027 /* Make sure we don't report a SIGSTOP that we sent 2028 ourselves in an attempt to stop an LWP. */ 2029 if (lp->signalled 2030 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP) 2031 { 2032 if (debug_linux_nat) 2033 fprintf_unfiltered (gdb_stdlog, 2034 "LLW: Delayed SIGSTOP caught for %s.\n", 2035 target_pid_to_str (lp->ptid)); 2036 2037 /* This is a delayed SIGSTOP. */ 2038 lp->signalled = 0; 2039 2040 registers_changed (); 2041 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, 2042 TARGET_SIGNAL_0); 2043 if (debug_linux_nat) 2044 fprintf_unfiltered (gdb_stdlog, 2045 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n", 2046 lp->step ? 2047 "PTRACE_SINGLESTEP" : "PTRACE_CONT", 2048 target_pid_to_str (lp->ptid)); 2049 2050 lp->stopped = 0; 2051 gdb_assert (lp->resumed); 2052 2053 /* Discard the event. */ 2054 status = 0; 2055 continue; 2056 } 2057 2058 break; 2059 } 2060 2061 if (pid == -1) 2062 { 2063 /* Alternate between checking cloned and uncloned processes. */ 2064 options ^= __WCLONE; 2065 2066 /* And suspend every time we have checked both. */ 2067 if (options & __WCLONE) 2068 sigsuspend (&suspend_mask); 2069 } 2070 2071 /* We shouldn't end up here unless we want to try again. */ 2072 gdb_assert (status == 0); 2073 } 2074 2075 clear_sigio_trap (); 2076 clear_sigint_trap (); 2077 2078 gdb_assert (lp); 2079 2080 /* Don't report signals that GDB isn't interested in, such as 2081 signals that are neither printed nor stopped upon. Stopping all 2082 threads can be a bit time-consuming so if we want decent 2083 performance with heavily multi-threaded programs, especially when 2084 they're using a high frequency timer, we'd better avoid it if we 2085 can. */ 2086 2087 if (WIFSTOPPED (status)) 2088 { 2089 int signo = target_signal_from_host (WSTOPSIG (status)); 2090 2091 if (signal_stop_state (signo) == 0 2092 && signal_print_state (signo) == 0 2093 && signal_pass_state (signo) == 1) 2094 { 2095 /* FIMXE: kettenis/2001-06-06: Should we resume all threads 2096 here? It is not clear we should. GDB may not expect 2097 other threads to run. On the other hand, not resuming 2098 newly attached threads may cause an unwanted delay in 2099 getting them running. */ 2100 registers_changed (); 2101 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, signo); 2102 if (debug_linux_nat) 2103 fprintf_unfiltered (gdb_stdlog, 2104 "LLW: %s %s, %s (preempt 'handle')\n", 2105 lp->step ? 2106 "PTRACE_SINGLESTEP" : "PTRACE_CONT", 2107 target_pid_to_str (lp->ptid), 2108 signo ? strsignal (signo) : "0"); 2109 lp->stopped = 0; 2110 status = 0; 2111 goto retry; 2112 } 2113 2114 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0) 2115 { 2116 /* If ^C/BREAK is typed at the tty/console, SIGINT gets 2117 forwarded to the entire process group, that is, all LWP's 2118 will receive it. Since we only want to report it once, 2119 we try to flush it from all LWPs except this one. */ 2120 sigaddset (&flush_mask, SIGINT); 2121 } 2122 } 2123 2124 /* This LWP is stopped now. */ 2125 lp->stopped = 1; 2126 2127 if (debug_linux_nat) 2128 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n", 2129 status_to_str (status), target_pid_to_str (lp->ptid)); 2130 2131 /* Now stop all other LWP's ... */ 2132 iterate_over_lwps (stop_callback, NULL); 2133 2134 /* ... and wait until all of them have reported back that they're no 2135 longer running. */ 2136 iterate_over_lwps (stop_wait_callback, &flush_mask); 2137 iterate_over_lwps (flush_callback, &flush_mask); 2138 2139 /* If we're not waiting for a specific LWP, choose an event LWP from 2140 among those that have had events. Giving equal priority to all 2141 LWPs that have had events helps prevent starvation. */ 2142 if (pid == -1) 2143 select_event_lwp (&lp, &status); 2144 2145 /* Now that we've selected our final event LWP, cancel any 2146 breakpoints in other LWPs that have hit a GDB breakpoint. See 2147 the comment in cancel_breakpoints_callback to find out why. */ 2148 iterate_over_lwps (cancel_breakpoints_callback, lp); 2149 2150 /* If we're not running in "threaded" mode, we'll report the bare 2151 process id. */ 2152 2153 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP) 2154 { 2155 trap_ptid = (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid))); 2156 if (debug_linux_nat) 2157 fprintf_unfiltered (gdb_stdlog, 2158 "LLW: trap_ptid is %s.\n", 2159 target_pid_to_str (trap_ptid)); 2160 } 2161 else 2162 trap_ptid = null_ptid; 2163 2164 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE) 2165 { 2166 *ourstatus = lp->waitstatus; 2167 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE; 2168 } 2169 else 2170 store_waitstatus (ourstatus, status); 2171 2172 return (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid))); 2173 } 2174 2175 static int 2176 kill_callback (struct lwp_info *lp, void *data) 2177 { 2178 errno = 0; 2179 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0); 2180 if (debug_linux_nat) 2181 fprintf_unfiltered (gdb_stdlog, 2182 "KC: PTRACE_KILL %s, 0, 0 (%s)\n", 2183 target_pid_to_str (lp->ptid), 2184 errno ? safe_strerror (errno) : "OK"); 2185 2186 return 0; 2187 } 2188 2189 static int 2190 kill_wait_callback (struct lwp_info *lp, void *data) 2191 { 2192 pid_t pid; 2193 2194 /* We must make sure that there are no pending events (delayed 2195 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current 2196 program doesn't interfere with any following debugging session. */ 2197 2198 /* For cloned processes we must check both with __WCLONE and 2199 without, since the exit status of a cloned process isn't reported 2200 with __WCLONE. */ 2201 if (lp->cloned) 2202 { 2203 do 2204 { 2205 pid = waitpid (GET_LWP (lp->ptid), NULL, __WCLONE); 2206 if (pid != (pid_t) -1 && debug_linux_nat) 2207 { 2208 fprintf_unfiltered (gdb_stdlog, 2209 "KWC: wait %s received unknown.\n", 2210 target_pid_to_str (lp->ptid)); 2211 } 2212 } 2213 while (pid == GET_LWP (lp->ptid)); 2214 2215 gdb_assert (pid == -1 && errno == ECHILD); 2216 } 2217 2218 do 2219 { 2220 pid = waitpid (GET_LWP (lp->ptid), NULL, 0); 2221 if (pid != (pid_t) -1 && debug_linux_nat) 2222 { 2223 fprintf_unfiltered (gdb_stdlog, 2224 "KWC: wait %s received unk.\n", 2225 target_pid_to_str (lp->ptid)); 2226 } 2227 } 2228 while (pid == GET_LWP (lp->ptid)); 2229 2230 gdb_assert (pid == -1 && errno == ECHILD); 2231 return 0; 2232 } 2233 2234 static void 2235 linux_nat_kill (void) 2236 { 2237 /* Kill all LWP's ... */ 2238 iterate_over_lwps (kill_callback, NULL); 2239 2240 /* ... and wait until we've flushed all events. */ 2241 iterate_over_lwps (kill_wait_callback, NULL); 2242 2243 target_mourn_inferior (); 2244 } 2245 2246 static void 2247 linux_nat_create_inferior (char *exec_file, char *allargs, char **env, 2248 int from_tty) 2249 { 2250 deprecated_child_ops.to_create_inferior (exec_file, allargs, env, from_tty); 2251 } 2252 2253 static void 2254 linux_nat_mourn_inferior (void) 2255 { 2256 trap_ptid = null_ptid; 2257 2258 /* Destroy LWP info; it's no longer valid. */ 2259 init_lwp_list (); 2260 2261 /* Restore the original signal mask. */ 2262 sigprocmask (SIG_SETMASK, &normal_mask, NULL); 2263 sigemptyset (&blocked_mask); 2264 2265 deprecated_child_ops.to_mourn_inferior (); 2266 } 2267 2268 static int 2269 linux_nat_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write, 2270 struct mem_attrib *attrib, struct target_ops *target) 2271 { 2272 struct cleanup *old_chain = save_inferior_ptid (); 2273 int xfer; 2274 2275 if (is_lwp (inferior_ptid)) 2276 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid)); 2277 2278 xfer = linux_proc_xfer_memory (memaddr, myaddr, len, write, attrib, target); 2279 if (xfer == 0) 2280 xfer = child_xfer_memory (memaddr, myaddr, len, write, attrib, target); 2281 2282 do_cleanups (old_chain); 2283 return xfer; 2284 } 2285 2286 static int 2287 linux_nat_thread_alive (ptid_t ptid) 2288 { 2289 gdb_assert (is_lwp (ptid)); 2290 2291 errno = 0; 2292 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0); 2293 if (debug_linux_nat) 2294 fprintf_unfiltered (gdb_stdlog, 2295 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n", 2296 target_pid_to_str (ptid), 2297 errno ? safe_strerror (errno) : "OK"); 2298 if (errno) 2299 return 0; 2300 2301 return 1; 2302 } 2303 2304 static char * 2305 linux_nat_pid_to_str (ptid_t ptid) 2306 { 2307 static char buf[64]; 2308 2309 if (is_lwp (ptid)) 2310 { 2311 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid)); 2312 return buf; 2313 } 2314 2315 return normal_pid_to_str (ptid); 2316 } 2317 2318 static void 2319 init_linux_nat_ops (void) 2320 { 2321 #if 0 2322 linux_nat_ops.to_open = linux_nat_open; 2323 #endif 2324 linux_nat_ops.to_shortname = "lwp-layer"; 2325 linux_nat_ops.to_longname = "lwp-layer"; 2326 linux_nat_ops.to_doc = "Low level threads support (LWP layer)"; 2327 linux_nat_ops.to_attach = linux_nat_attach; 2328 linux_nat_ops.to_detach = linux_nat_detach; 2329 linux_nat_ops.to_resume = linux_nat_resume; 2330 linux_nat_ops.to_wait = linux_nat_wait; 2331 /* fetch_inferior_registers and store_inferior_registers will 2332 honor the LWP id, so we can use them directly. */ 2333 linux_nat_ops.to_fetch_registers = fetch_inferior_registers; 2334 linux_nat_ops.to_store_registers = store_inferior_registers; 2335 linux_nat_ops.deprecated_xfer_memory = linux_nat_xfer_memory; 2336 linux_nat_ops.to_kill = linux_nat_kill; 2337 linux_nat_ops.to_create_inferior = linux_nat_create_inferior; 2338 linux_nat_ops.to_mourn_inferior = linux_nat_mourn_inferior; 2339 linux_nat_ops.to_thread_alive = linux_nat_thread_alive; 2340 linux_nat_ops.to_pid_to_str = linux_nat_pid_to_str; 2341 linux_nat_ops.to_post_startup_inferior = child_post_startup_inferior; 2342 linux_nat_ops.to_post_attach = child_post_attach; 2343 linux_nat_ops.to_insert_fork_catchpoint = child_insert_fork_catchpoint; 2344 linux_nat_ops.to_insert_vfork_catchpoint = child_insert_vfork_catchpoint; 2345 linux_nat_ops.to_insert_exec_catchpoint = child_insert_exec_catchpoint; 2346 2347 linux_nat_ops.to_stratum = thread_stratum; 2348 linux_nat_ops.to_has_thread_control = tc_schedlock; 2349 linux_nat_ops.to_magic = OPS_MAGIC; 2350 } 2351 2352 static void 2353 sigchld_handler (int signo) 2354 { 2355 /* Do nothing. The only reason for this handler is that it allows 2356 us to use sigsuspend in linux_nat_wait above to wait for the 2357 arrival of a SIGCHLD. */ 2358 } 2359 2360 /* Accepts an integer PID; Returns a string representing a file that 2361 can be opened to get the symbols for the child process. */ 2362 2363 char * 2364 child_pid_to_exec_file (int pid) 2365 { 2366 char *name1, *name2; 2367 2368 name1 = xmalloc (MAXPATHLEN); 2369 name2 = xmalloc (MAXPATHLEN); 2370 make_cleanup (xfree, name1); 2371 make_cleanup (xfree, name2); 2372 memset (name2, 0, MAXPATHLEN); 2373 2374 sprintf (name1, "/proc/%d/exe", pid); 2375 if (readlink (name1, name2, MAXPATHLEN) > 0) 2376 return name2; 2377 else 2378 return name1; 2379 } 2380 2381 /* Service function for corefiles and info proc. */ 2382 2383 static int 2384 read_mapping (FILE *mapfile, 2385 long long *addr, 2386 long long *endaddr, 2387 char *permissions, 2388 long long *offset, 2389 char *device, long long *inode, char *filename) 2390 { 2391 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx", 2392 addr, endaddr, permissions, offset, device, inode); 2393 2394 if (ret > 0 && ret != EOF && *inode != 0) 2395 { 2396 /* Eat everything up to EOL for the filename. This will prevent 2397 weird filenames (such as one with embedded whitespace) from 2398 confusing this code. It also makes this code more robust in 2399 respect to annotations the kernel may add after the filename. 2400 2401 Note the filename is used for informational purposes 2402 only. */ 2403 ret += fscanf (mapfile, "%[^\n]\n", filename); 2404 } 2405 else 2406 { 2407 filename[0] = '\0'; /* no filename */ 2408 fscanf (mapfile, "\n"); 2409 } 2410 return (ret != 0 && ret != EOF); 2411 } 2412 2413 /* Fills the "to_find_memory_regions" target vector. Lists the memory 2414 regions in the inferior for a corefile. */ 2415 2416 static int 2417 linux_nat_find_memory_regions (int (*func) (CORE_ADDR, 2418 unsigned long, 2419 int, int, int, void *), void *obfd) 2420 { 2421 long long pid = PIDGET (inferior_ptid); 2422 char mapsfilename[MAXPATHLEN]; 2423 FILE *mapsfile; 2424 long long addr, endaddr, size, offset, inode; 2425 char permissions[8], device[8], filename[MAXPATHLEN]; 2426 int read, write, exec; 2427 int ret; 2428 2429 /* Compose the filename for the /proc memory map, and open it. */ 2430 sprintf (mapsfilename, "/proc/%lld/maps", pid); 2431 if ((mapsfile = fopen (mapsfilename, "r")) == NULL) 2432 error ("Could not open %s\n", mapsfilename); 2433 2434 if (info_verbose) 2435 fprintf_filtered (gdb_stdout, 2436 "Reading memory regions from %s\n", mapsfilename); 2437 2438 /* Now iterate until end-of-file. */ 2439 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0], 2440 &offset, &device[0], &inode, &filename[0])) 2441 { 2442 size = endaddr - addr; 2443 2444 /* Get the segment's permissions. */ 2445 read = (strchr (permissions, 'r') != 0); 2446 write = (strchr (permissions, 'w') != 0); 2447 exec = (strchr (permissions, 'x') != 0); 2448 2449 if (info_verbose) 2450 { 2451 fprintf_filtered (gdb_stdout, 2452 "Save segment, %lld bytes at 0x%s (%c%c%c)", 2453 size, paddr_nz (addr), 2454 read ? 'r' : ' ', 2455 write ? 'w' : ' ', exec ? 'x' : ' '); 2456 if (filename && filename[0]) 2457 fprintf_filtered (gdb_stdout, " for %s", filename); 2458 fprintf_filtered (gdb_stdout, "\n"); 2459 } 2460 2461 /* Invoke the callback function to create the corefile 2462 segment. */ 2463 func (addr, size, read, write, exec, obfd); 2464 } 2465 fclose (mapsfile); 2466 return 0; 2467 } 2468 2469 /* Records the thread's register state for the corefile note 2470 section. */ 2471 2472 static char * 2473 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid, 2474 char *note_data, int *note_size) 2475 { 2476 gdb_gregset_t gregs; 2477 gdb_fpregset_t fpregs; 2478 #ifdef FILL_FPXREGSET 2479 gdb_fpxregset_t fpxregs; 2480 #endif 2481 unsigned long lwp = ptid_get_lwp (ptid); 2482 2483 fill_gregset (&gregs, -1); 2484 note_data = (char *) elfcore_write_prstatus (obfd, 2485 note_data, 2486 note_size, 2487 lwp, 2488 stop_signal, &gregs); 2489 2490 fill_fpregset (&fpregs, -1); 2491 note_data = (char *) elfcore_write_prfpreg (obfd, 2492 note_data, 2493 note_size, 2494 &fpregs, sizeof (fpregs)); 2495 #ifdef FILL_FPXREGSET 2496 fill_fpxregset (&fpxregs, -1); 2497 note_data = (char *) elfcore_write_prxfpreg (obfd, 2498 note_data, 2499 note_size, 2500 &fpxregs, sizeof (fpxregs)); 2501 #endif 2502 return note_data; 2503 } 2504 2505 struct linux_nat_corefile_thread_data 2506 { 2507 bfd *obfd; 2508 char *note_data; 2509 int *note_size; 2510 int num_notes; 2511 }; 2512 2513 /* Called by gdbthread.c once per thread. Records the thread's 2514 register state for the corefile note section. */ 2515 2516 static int 2517 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data) 2518 { 2519 struct linux_nat_corefile_thread_data *args = data; 2520 ptid_t saved_ptid = inferior_ptid; 2521 2522 inferior_ptid = ti->ptid; 2523 registers_changed (); 2524 target_fetch_registers (-1); /* FIXME should not be necessary; 2525 fill_gregset should do it automatically. */ 2526 args->note_data = linux_nat_do_thread_registers (args->obfd, 2527 ti->ptid, 2528 args->note_data, 2529 args->note_size); 2530 args->num_notes++; 2531 inferior_ptid = saved_ptid; 2532 registers_changed (); 2533 target_fetch_registers (-1); /* FIXME should not be necessary; 2534 fill_gregset should do it automatically. */ 2535 return 0; 2536 } 2537 2538 /* Records the register state for the corefile note section. */ 2539 2540 static char * 2541 linux_nat_do_registers (bfd *obfd, ptid_t ptid, 2542 char *note_data, int *note_size) 2543 { 2544 registers_changed (); 2545 target_fetch_registers (-1); /* FIXME should not be necessary; 2546 fill_gregset should do it automatically. */ 2547 return linux_nat_do_thread_registers (obfd, 2548 ptid_build (ptid_get_pid (inferior_ptid), 2549 ptid_get_pid (inferior_ptid), 2550 0), 2551 note_data, note_size); 2552 return note_data; 2553 } 2554 2555 /* Fills the "to_make_corefile_note" target vector. Builds the note 2556 section for a corefile, and returns it in a malloc buffer. */ 2557 2558 static char * 2559 linux_nat_make_corefile_notes (bfd *obfd, int *note_size) 2560 { 2561 struct linux_nat_corefile_thread_data thread_args; 2562 struct cleanup *old_chain; 2563 char fname[16] = { '\0' }; 2564 char psargs[80] = { '\0' }; 2565 char *note_data = NULL; 2566 ptid_t current_ptid = inferior_ptid; 2567 char *auxv; 2568 int auxv_len; 2569 2570 if (get_exec_file (0)) 2571 { 2572 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname)); 2573 strncpy (psargs, get_exec_file (0), sizeof (psargs)); 2574 if (get_inferior_args ()) 2575 { 2576 strncat (psargs, " ", sizeof (psargs) - strlen (psargs)); 2577 strncat (psargs, get_inferior_args (), 2578 sizeof (psargs) - strlen (psargs)); 2579 } 2580 note_data = (char *) elfcore_write_prpsinfo (obfd, 2581 note_data, 2582 note_size, fname, psargs); 2583 } 2584 2585 /* Dump information for threads. */ 2586 thread_args.obfd = obfd; 2587 thread_args.note_data = note_data; 2588 thread_args.note_size = note_size; 2589 thread_args.num_notes = 0; 2590 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args); 2591 if (thread_args.num_notes == 0) 2592 { 2593 /* iterate_over_threads didn't come up with any threads; just 2594 use inferior_ptid. */ 2595 note_data = linux_nat_do_registers (obfd, inferior_ptid, 2596 note_data, note_size); 2597 } 2598 else 2599 { 2600 note_data = thread_args.note_data; 2601 } 2602 2603 auxv_len = target_auxv_read (¤t_target, &auxv); 2604 if (auxv_len > 0) 2605 { 2606 note_data = elfcore_write_note (obfd, note_data, note_size, 2607 "CORE", NT_AUXV, auxv, auxv_len); 2608 xfree (auxv); 2609 } 2610 2611 make_cleanup (xfree, note_data); 2612 return note_data; 2613 } 2614 2615 /* Implement the "info proc" command. */ 2616 2617 static void 2618 linux_nat_info_proc_cmd (char *args, int from_tty) 2619 { 2620 long long pid = PIDGET (inferior_ptid); 2621 FILE *procfile; 2622 char **argv = NULL; 2623 char buffer[MAXPATHLEN]; 2624 char fname1[MAXPATHLEN], fname2[MAXPATHLEN]; 2625 int cmdline_f = 1; 2626 int cwd_f = 1; 2627 int exe_f = 1; 2628 int mappings_f = 0; 2629 int environ_f = 0; 2630 int status_f = 0; 2631 int stat_f = 0; 2632 int all = 0; 2633 struct stat dummy; 2634 2635 if (args) 2636 { 2637 /* Break up 'args' into an argv array. */ 2638 if ((argv = buildargv (args)) == NULL) 2639 nomem (0); 2640 else 2641 make_cleanup_freeargv (argv); 2642 } 2643 while (argv != NULL && *argv != NULL) 2644 { 2645 if (isdigit (argv[0][0])) 2646 { 2647 pid = strtoul (argv[0], NULL, 10); 2648 } 2649 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0) 2650 { 2651 mappings_f = 1; 2652 } 2653 else if (strcmp (argv[0], "status") == 0) 2654 { 2655 status_f = 1; 2656 } 2657 else if (strcmp (argv[0], "stat") == 0) 2658 { 2659 stat_f = 1; 2660 } 2661 else if (strcmp (argv[0], "cmd") == 0) 2662 { 2663 cmdline_f = 1; 2664 } 2665 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0) 2666 { 2667 exe_f = 1; 2668 } 2669 else if (strcmp (argv[0], "cwd") == 0) 2670 { 2671 cwd_f = 1; 2672 } 2673 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0) 2674 { 2675 all = 1; 2676 } 2677 else 2678 { 2679 /* [...] (future options here) */ 2680 } 2681 argv++; 2682 } 2683 if (pid == 0) 2684 error ("No current process: you must name one."); 2685 2686 sprintf (fname1, "/proc/%lld", pid); 2687 if (stat (fname1, &dummy) != 0) 2688 error ("No /proc directory: '%s'", fname1); 2689 2690 printf_filtered ("process %lld\n", pid); 2691 if (cmdline_f || all) 2692 { 2693 sprintf (fname1, "/proc/%lld/cmdline", pid); 2694 if ((procfile = fopen (fname1, "r")) > 0) 2695 { 2696 fgets (buffer, sizeof (buffer), procfile); 2697 printf_filtered ("cmdline = '%s'\n", buffer); 2698 fclose (procfile); 2699 } 2700 else 2701 warning ("unable to open /proc file '%s'", fname1); 2702 } 2703 if (cwd_f || all) 2704 { 2705 sprintf (fname1, "/proc/%lld/cwd", pid); 2706 memset (fname2, 0, sizeof (fname2)); 2707 if (readlink (fname1, fname2, sizeof (fname2)) > 0) 2708 printf_filtered ("cwd = '%s'\n", fname2); 2709 else 2710 warning ("unable to read link '%s'", fname1); 2711 } 2712 if (exe_f || all) 2713 { 2714 sprintf (fname1, "/proc/%lld/exe", pid); 2715 memset (fname2, 0, sizeof (fname2)); 2716 if (readlink (fname1, fname2, sizeof (fname2)) > 0) 2717 printf_filtered ("exe = '%s'\n", fname2); 2718 else 2719 warning ("unable to read link '%s'", fname1); 2720 } 2721 if (mappings_f || all) 2722 { 2723 sprintf (fname1, "/proc/%lld/maps", pid); 2724 if ((procfile = fopen (fname1, "r")) > 0) 2725 { 2726 long long addr, endaddr, size, offset, inode; 2727 char permissions[8], device[8], filename[MAXPATHLEN]; 2728 2729 printf_filtered ("Mapped address spaces:\n\n"); 2730 if (TARGET_ADDR_BIT == 32) 2731 { 2732 printf_filtered ("\t%10s %10s %10s %10s %7s\n", 2733 "Start Addr", 2734 " End Addr", 2735 " Size", " Offset", "objfile"); 2736 } 2737 else 2738 { 2739 printf_filtered (" %18s %18s %10s %10s %7s\n", 2740 "Start Addr", 2741 " End Addr", 2742 " Size", " Offset", "objfile"); 2743 } 2744 2745 while (read_mapping (procfile, &addr, &endaddr, &permissions[0], 2746 &offset, &device[0], &inode, &filename[0])) 2747 { 2748 size = endaddr - addr; 2749 2750 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered 2751 calls here (and possibly above) should be abstracted 2752 out into their own functions? Andrew suggests using 2753 a generic local_address_string instead to print out 2754 the addresses; that makes sense to me, too. */ 2755 2756 if (TARGET_ADDR_BIT == 32) 2757 { 2758 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n", 2759 (unsigned long) addr, /* FIXME: pr_addr */ 2760 (unsigned long) endaddr, 2761 (int) size, 2762 (unsigned int) offset, 2763 filename[0] ? filename : ""); 2764 } 2765 else 2766 { 2767 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n", 2768 (unsigned long) addr, /* FIXME: pr_addr */ 2769 (unsigned long) endaddr, 2770 (int) size, 2771 (unsigned int) offset, 2772 filename[0] ? filename : ""); 2773 } 2774 } 2775 2776 fclose (procfile); 2777 } 2778 else 2779 warning ("unable to open /proc file '%s'", fname1); 2780 } 2781 if (status_f || all) 2782 { 2783 sprintf (fname1, "/proc/%lld/status", pid); 2784 if ((procfile = fopen (fname1, "r")) > 0) 2785 { 2786 while (fgets (buffer, sizeof (buffer), procfile) != NULL) 2787 puts_filtered (buffer); 2788 fclose (procfile); 2789 } 2790 else 2791 warning ("unable to open /proc file '%s'", fname1); 2792 } 2793 if (stat_f || all) 2794 { 2795 sprintf (fname1, "/proc/%lld/stat", pid); 2796 if ((procfile = fopen (fname1, "r")) > 0) 2797 { 2798 int itmp; 2799 char ctmp; 2800 2801 if (fscanf (procfile, "%d ", &itmp) > 0) 2802 printf_filtered ("Process: %d\n", itmp); 2803 if (fscanf (procfile, "%s ", &buffer[0]) > 0) 2804 printf_filtered ("Exec file: %s\n", buffer); 2805 if (fscanf (procfile, "%c ", &ctmp) > 0) 2806 printf_filtered ("State: %c\n", ctmp); 2807 if (fscanf (procfile, "%d ", &itmp) > 0) 2808 printf_filtered ("Parent process: %d\n", itmp); 2809 if (fscanf (procfile, "%d ", &itmp) > 0) 2810 printf_filtered ("Process group: %d\n", itmp); 2811 if (fscanf (procfile, "%d ", &itmp) > 0) 2812 printf_filtered ("Session id: %d\n", itmp); 2813 if (fscanf (procfile, "%d ", &itmp) > 0) 2814 printf_filtered ("TTY: %d\n", itmp); 2815 if (fscanf (procfile, "%d ", &itmp) > 0) 2816 printf_filtered ("TTY owner process group: %d\n", itmp); 2817 if (fscanf (procfile, "%u ", &itmp) > 0) 2818 printf_filtered ("Flags: 0x%x\n", itmp); 2819 if (fscanf (procfile, "%u ", &itmp) > 0) 2820 printf_filtered ("Minor faults (no memory page): %u\n", 2821 (unsigned int) itmp); 2822 if (fscanf (procfile, "%u ", &itmp) > 0) 2823 printf_filtered ("Minor faults, children: %u\n", 2824 (unsigned int) itmp); 2825 if (fscanf (procfile, "%u ", &itmp) > 0) 2826 printf_filtered ("Major faults (memory page faults): %u\n", 2827 (unsigned int) itmp); 2828 if (fscanf (procfile, "%u ", &itmp) > 0) 2829 printf_filtered ("Major faults, children: %u\n", 2830 (unsigned int) itmp); 2831 if (fscanf (procfile, "%d ", &itmp) > 0) 2832 printf_filtered ("utime: %d\n", itmp); 2833 if (fscanf (procfile, "%d ", &itmp) > 0) 2834 printf_filtered ("stime: %d\n", itmp); 2835 if (fscanf (procfile, "%d ", &itmp) > 0) 2836 printf_filtered ("utime, children: %d\n", itmp); 2837 if (fscanf (procfile, "%d ", &itmp) > 0) 2838 printf_filtered ("stime, children: %d\n", itmp); 2839 if (fscanf (procfile, "%d ", &itmp) > 0) 2840 printf_filtered ("jiffies remaining in current time slice: %d\n", 2841 itmp); 2842 if (fscanf (procfile, "%d ", &itmp) > 0) 2843 printf_filtered ("'nice' value: %d\n", itmp); 2844 if (fscanf (procfile, "%u ", &itmp) > 0) 2845 printf_filtered ("jiffies until next timeout: %u\n", 2846 (unsigned int) itmp); 2847 if (fscanf (procfile, "%u ", &itmp) > 0) 2848 printf_filtered ("jiffies until next SIGALRM: %u\n", 2849 (unsigned int) itmp); 2850 if (fscanf (procfile, "%d ", &itmp) > 0) 2851 printf_filtered ("start time (jiffies since system boot): %d\n", 2852 itmp); 2853 if (fscanf (procfile, "%u ", &itmp) > 0) 2854 printf_filtered ("Virtual memory size: %u\n", 2855 (unsigned int) itmp); 2856 if (fscanf (procfile, "%u ", &itmp) > 0) 2857 printf_filtered ("Resident set size: %u\n", (unsigned int) itmp); 2858 if (fscanf (procfile, "%u ", &itmp) > 0) 2859 printf_filtered ("rlim: %u\n", (unsigned int) itmp); 2860 if (fscanf (procfile, "%u ", &itmp) > 0) 2861 printf_filtered ("Start of text: 0x%x\n", itmp); 2862 if (fscanf (procfile, "%u ", &itmp) > 0) 2863 printf_filtered ("End of text: 0x%x\n", itmp); 2864 if (fscanf (procfile, "%u ", &itmp) > 0) 2865 printf_filtered ("Start of stack: 0x%x\n", itmp); 2866 #if 0 /* Don't know how architecture-dependent the rest is... 2867 Anyway the signal bitmap info is available from "status". */ 2868 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */ 2869 printf_filtered ("Kernel stack pointer: 0x%x\n", itmp); 2870 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */ 2871 printf_filtered ("Kernel instr pointer: 0x%x\n", itmp); 2872 if (fscanf (procfile, "%d ", &itmp) > 0) 2873 printf_filtered ("Pending signals bitmap: 0x%x\n", itmp); 2874 if (fscanf (procfile, "%d ", &itmp) > 0) 2875 printf_filtered ("Blocked signals bitmap: 0x%x\n", itmp); 2876 if (fscanf (procfile, "%d ", &itmp) > 0) 2877 printf_filtered ("Ignored signals bitmap: 0x%x\n", itmp); 2878 if (fscanf (procfile, "%d ", &itmp) > 0) 2879 printf_filtered ("Catched signals bitmap: 0x%x\n", itmp); 2880 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */ 2881 printf_filtered ("wchan (system call): 0x%x\n", itmp); 2882 #endif 2883 fclose (procfile); 2884 } 2885 else 2886 warning ("unable to open /proc file '%s'", fname1); 2887 } 2888 } 2889 2890 int 2891 linux_proc_xfer_memory (CORE_ADDR addr, char *myaddr, int len, int write, 2892 struct mem_attrib *attrib, struct target_ops *target) 2893 { 2894 int fd, ret; 2895 char filename[64]; 2896 2897 if (write) 2898 return 0; 2899 2900 /* Don't bother for one word. */ 2901 if (len < 3 * sizeof (long)) 2902 return 0; 2903 2904 /* We could keep this file open and cache it - possibly one per 2905 thread. That requires some juggling, but is even faster. */ 2906 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid)); 2907 fd = open (filename, O_RDONLY | O_LARGEFILE); 2908 if (fd == -1) 2909 return 0; 2910 2911 /* If pread64 is available, use it. It's faster if the kernel 2912 supports it (only one syscall), and it's 64-bit safe even on 2913 32-bit platforms (for instance, SPARC debugging a SPARC64 2914 application). */ 2915 #ifdef HAVE_PREAD64 2916 if (pread64 (fd, myaddr, len, addr) != len) 2917 #else 2918 if (lseek (fd, addr, SEEK_SET) == -1 || read (fd, myaddr, len) != len) 2919 #endif 2920 ret = 0; 2921 else 2922 ret = len; 2923 2924 close (fd); 2925 return ret; 2926 } 2927 2928 /* Parse LINE as a signal set and add its set bits to SIGS. */ 2929 2930 static void 2931 add_line_to_sigset (const char *line, sigset_t *sigs) 2932 { 2933 int len = strlen (line) - 1; 2934 const char *p; 2935 int signum; 2936 2937 if (line[len] != '\n') 2938 error ("Could not parse signal set: %s", line); 2939 2940 p = line; 2941 signum = len * 4; 2942 while (len-- > 0) 2943 { 2944 int digit; 2945 2946 if (*p >= '0' && *p <= '9') 2947 digit = *p - '0'; 2948 else if (*p >= 'a' && *p <= 'f') 2949 digit = *p - 'a' + 10; 2950 else 2951 error ("Could not parse signal set: %s", line); 2952 2953 signum -= 4; 2954 2955 if (digit & 1) 2956 sigaddset (sigs, signum + 1); 2957 if (digit & 2) 2958 sigaddset (sigs, signum + 2); 2959 if (digit & 4) 2960 sigaddset (sigs, signum + 3); 2961 if (digit & 8) 2962 sigaddset (sigs, signum + 4); 2963 2964 p++; 2965 } 2966 } 2967 2968 /* Find process PID's pending signals from /proc/pid/status and set 2969 SIGS to match. */ 2970 2971 void 2972 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored) 2973 { 2974 FILE *procfile; 2975 char buffer[MAXPATHLEN], fname[MAXPATHLEN]; 2976 int signum; 2977 2978 sigemptyset (pending); 2979 sigemptyset (blocked); 2980 sigemptyset (ignored); 2981 sprintf (fname, "/proc/%d/status", pid); 2982 procfile = fopen (fname, "r"); 2983 if (procfile == NULL) 2984 error ("Could not open %s", fname); 2985 2986 while (fgets (buffer, MAXPATHLEN, procfile) != NULL) 2987 { 2988 /* Normal queued signals are on the SigPnd line in the status 2989 file. However, 2.6 kernels also have a "shared" pending 2990 queue for delivering signals to a thread group, so check for 2991 a ShdPnd line also. 2992 2993 Unfortunately some Red Hat kernels include the shared pending 2994 queue but not the ShdPnd status field. */ 2995 2996 if (strncmp (buffer, "SigPnd:\t", 8) == 0) 2997 add_line_to_sigset (buffer + 8, pending); 2998 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0) 2999 add_line_to_sigset (buffer + 8, pending); 3000 else if (strncmp (buffer, "SigBlk:\t", 8) == 0) 3001 add_line_to_sigset (buffer + 8, blocked); 3002 else if (strncmp (buffer, "SigIgn:\t", 8) == 0) 3003 add_line_to_sigset (buffer + 8, ignored); 3004 } 3005 3006 fclose (procfile); 3007 } 3008 3009 void 3010 _initialize_linux_nat (void) 3011 { 3012 struct sigaction action; 3013 extern void thread_db_init (struct target_ops *); 3014 3015 deprecated_child_ops.to_find_memory_regions = linux_nat_find_memory_regions; 3016 deprecated_child_ops.to_make_corefile_notes = linux_nat_make_corefile_notes; 3017 3018 add_info ("proc", linux_nat_info_proc_cmd, 3019 "Show /proc process information about any running process.\n\ 3020 Specify any process id, or use the program being debugged by default.\n\ 3021 Specify any of the following keywords for detailed info:\n\ 3022 mappings -- list of mapped memory regions.\n\ 3023 stat -- list a bunch of random process info.\n\ 3024 status -- list a different bunch of random process info.\n\ 3025 all -- list all available /proc info."); 3026 3027 init_linux_nat_ops (); 3028 add_target (&linux_nat_ops); 3029 thread_db_init (&linux_nat_ops); 3030 3031 /* Save the original signal mask. */ 3032 sigprocmask (SIG_SETMASK, NULL, &normal_mask); 3033 3034 action.sa_handler = sigchld_handler; 3035 sigemptyset (&action.sa_mask); 3036 action.sa_flags = 0; 3037 sigaction (SIGCHLD, &action, NULL); 3038 3039 /* Make sure we don't block SIGCHLD during a sigsuspend. */ 3040 sigprocmask (SIG_SETMASK, NULL, &suspend_mask); 3041 sigdelset (&suspend_mask, SIGCHLD); 3042 3043 sigemptyset (&blocked_mask); 3044 3045 deprecated_add_show_from_set 3046 (add_set_cmd ("lin-lwp", no_class, var_zinteger, 3047 (char *) &debug_linux_nat, 3048 "Set debugging of GNU/Linux lwp module.\n\ 3049 Enables printf debugging output.\n", &setdebuglist), &showdebuglist); 3050 } 3051 3052 3053 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to 3054 the GNU/Linux Threads library and therefore doesn't really belong 3055 here. */ 3056 3057 /* Read variable NAME in the target and return its value if found. 3058 Otherwise return zero. It is assumed that the type of the variable 3059 is `int'. */ 3060 3061 static int 3062 get_signo (const char *name) 3063 { 3064 struct minimal_symbol *ms; 3065 int signo; 3066 3067 ms = lookup_minimal_symbol (name, NULL, NULL); 3068 if (ms == NULL) 3069 return 0; 3070 3071 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (char *) &signo, 3072 sizeof (signo)) != 0) 3073 return 0; 3074 3075 return signo; 3076 } 3077 3078 /* Return the set of signals used by the threads library in *SET. */ 3079 3080 void 3081 lin_thread_get_thread_signals (sigset_t *set) 3082 { 3083 struct sigaction action; 3084 int restart, cancel; 3085 3086 sigemptyset (set); 3087 3088 restart = get_signo ("__pthread_sig_restart"); 3089 if (restart == 0) 3090 return; 3091 3092 cancel = get_signo ("__pthread_sig_cancel"); 3093 if (cancel == 0) 3094 return; 3095 3096 sigaddset (set, restart); 3097 sigaddset (set, cancel); 3098 3099 /* The GNU/Linux Threads library makes terminating threads send a 3100 special "cancel" signal instead of SIGCHLD. Make sure we catch 3101 those (to prevent them from terminating GDB itself, which is 3102 likely to be their default action) and treat them the same way as 3103 SIGCHLD. */ 3104 3105 action.sa_handler = sigchld_handler; 3106 sigemptyset (&action.sa_mask); 3107 action.sa_flags = 0; 3108 sigaction (cancel, &action, NULL); 3109 3110 /* We block the "cancel" signal throughout this code ... */ 3111 sigaddset (&blocked_mask, cancel); 3112 sigprocmask (SIG_BLOCK, &blocked_mask, NULL); 3113 3114 /* ... except during a sigsuspend. */ 3115 sigdelset (&suspend_mask, cancel); 3116 } 3117