xref: /dragonfly/contrib/gdb-7/gdb/solib-svr4.c (revision a32bc35d)
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2 
3    Copyright (C) 1990-1996, 1998-2001, 2003-2012 Free Software
4    Foundation, Inc.
5 
6    This file is part of GDB.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #include "defs.h"
22 
23 #include "elf/external.h"
24 #include "elf/common.h"
25 #include "elf/mips.h"
26 
27 #include "symtab.h"
28 #include "bfd.h"
29 #include "symfile.h"
30 #include "objfiles.h"
31 #include "gdbcore.h"
32 #include "target.h"
33 #include "inferior.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37 
38 #include "gdb_assert.h"
39 
40 #include "solist.h"
41 #include "solib.h"
42 #include "solib-svr4.h"
43 
44 #include "bfd-target.h"
45 #include "elf-bfd.h"
46 #include "exec.h"
47 #include "auxv.h"
48 #include "exceptions.h"
49 
50 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
51 static int svr4_have_link_map_offsets (void);
52 static void svr4_relocate_main_executable (void);
53 
54 /* Link map info to include in an allocated so_list entry.  */
55 
56 struct lm_info
57   {
58     /* Amount by which addresses in the binary should be relocated to
59        match the inferior.  The direct inferior value is L_ADDR_INFERIOR.
60        When prelinking is involved and the prelink base address changes,
61        we may need a different offset - the recomputed offset is in L_ADDR.
62        It is commonly the same value.  It is cached as we want to warn about
63        the difference and compute it only once.  L_ADDR is valid
64        iff L_ADDR_P.  */
65     CORE_ADDR l_addr, l_addr_inferior;
66     unsigned int l_addr_p : 1;
67 
68     /* The target location of lm.  */
69     CORE_ADDR lm_addr;
70 
71     /* Values read in from inferior's fields of the same name.  */
72     CORE_ADDR l_ld, l_next, l_prev, l_name;
73   };
74 
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76    GDB can try to place a breakpoint to monitor shared library
77    events.
78 
79    If none of these symbols are found, or other errors occur, then
80    SVR4 systems will fall back to using a symbol as the "startup
81    mapping complete" breakpoint address.  */
82 
83 static const char * const solib_break_names[] =
84 {
85   "r_debug_state",
86   "_r_debug_state",
87   "_dl_debug_state",
88   "rtld_db_dlactivity",
89   "__dl_rtld_db_dlactivity",
90   "_rtld_debug_state",
91 
92   NULL
93 };
94 
95 static const char * const bkpt_names[] =
96 {
97   "_start",
98   "__start",
99   "main",
100   NULL
101 };
102 
103 static const  char * const main_name_list[] =
104 {
105   "main_$main",
106   NULL
107 };
108 
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110    the same shared library.  */
111 
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115   if (strcmp (gdb_so_name, inferior_so_name) == 0)
116     return 1;
117 
118   /* On Solaris, when starting inferior we think that dynamic linker is
119      /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120      contains /lib/ld.so.1.  Sometimes one file is a link to another, but
121      sometimes they have identical content, but are not linked to each
122      other.  We don't restrict this check for Solaris, but the chances
123      of running into this situation elsewhere are very low.  */
124   if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125       && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126     return 1;
127 
128   /* Similarly, we observed the same issue with sparc64, but with
129      different locations.  */
130   if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131       && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132     return 1;
133 
134   return 0;
135 }
136 
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140   return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142 
143 static struct lm_info *
144 lm_info_read (CORE_ADDR lm_addr)
145 {
146   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
147   gdb_byte *lm;
148   struct lm_info *lm_info;
149   struct cleanup *back_to;
150 
151   lm = xmalloc (lmo->link_map_size);
152   back_to = make_cleanup (xfree, lm);
153 
154   if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
155     {
156       warning (_("Error reading shared library list entry at %s"),
157 	       paddress (target_gdbarch, lm_addr)),
158       lm_info = NULL;
159     }
160   else
161     {
162       struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
163 
164       lm_info = xzalloc (sizeof (*lm_info));
165       lm_info->lm_addr = lm_addr;
166 
167       lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
168 							ptr_type);
169       lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
170       lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
171 					       ptr_type);
172       lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
173 					       ptr_type);
174       lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
175 					       ptr_type);
176     }
177 
178   do_cleanups (back_to);
179 
180   return lm_info;
181 }
182 
183 static int
184 has_lm_dynamic_from_link_map (void)
185 {
186   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
187 
188   return lmo->l_ld_offset >= 0;
189 }
190 
191 static CORE_ADDR
192 lm_addr_check (struct so_list *so, bfd *abfd)
193 {
194   if (!so->lm_info->l_addr_p)
195     {
196       struct bfd_section *dyninfo_sect;
197       CORE_ADDR l_addr, l_dynaddr, dynaddr;
198 
199       l_addr = so->lm_info->l_addr_inferior;
200 
201       if (! abfd || ! has_lm_dynamic_from_link_map ())
202 	goto set_addr;
203 
204       l_dynaddr = so->lm_info->l_ld;
205 
206       dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
207       if (dyninfo_sect == NULL)
208 	goto set_addr;
209 
210       dynaddr = bfd_section_vma (abfd, dyninfo_sect);
211 
212       if (dynaddr + l_addr != l_dynaddr)
213 	{
214 	  CORE_ADDR align = 0x1000;
215 	  CORE_ADDR minpagesize = align;
216 
217 	  if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
218 	    {
219 	      Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
220 	      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
221 	      int i;
222 
223 	      align = 1;
224 
225 	      for (i = 0; i < ehdr->e_phnum; i++)
226 		if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
227 		  align = phdr[i].p_align;
228 
229 	      minpagesize = get_elf_backend_data (abfd)->minpagesize;
230 	    }
231 
232 	  /* Turn it into a mask.  */
233 	  align--;
234 
235 	  /* If the changes match the alignment requirements, we
236 	     assume we're using a core file that was generated by the
237 	     same binary, just prelinked with a different base offset.
238 	     If it doesn't match, we may have a different binary, the
239 	     same binary with the dynamic table loaded at an unrelated
240 	     location, or anything, really.  To avoid regressions,
241 	     don't adjust the base offset in the latter case, although
242 	     odds are that, if things really changed, debugging won't
243 	     quite work.
244 
245 	     One could expect more the condition
246 	       ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
247 	     but the one below is relaxed for PPC.  The PPC kernel supports
248 	     either 4k or 64k page sizes.  To be prepared for 64k pages,
249 	     PPC ELF files are built using an alignment requirement of 64k.
250 	     However, when running on a kernel supporting 4k pages, the memory
251 	     mapping of the library may not actually happen on a 64k boundary!
252 
253 	     (In the usual case where (l_addr & align) == 0, this check is
254 	     equivalent to the possibly expected check above.)
255 
256 	     Even on PPC it must be zero-aligned at least for MINPAGESIZE.  */
257 
258 	  l_addr = l_dynaddr - dynaddr;
259 
260 	  if ((l_addr & (minpagesize - 1)) == 0
261 	      && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
262 	    {
263 	      if (info_verbose)
264 		printf_unfiltered (_("Using PIC (Position Independent Code) "
265 				     "prelink displacement %s for \"%s\".\n"),
266 				   paddress (target_gdbarch, l_addr),
267 				   so->so_name);
268 	    }
269 	  else
270 	    {
271 	      /* There is no way to verify the library file matches.  prelink
272 		 can during prelinking of an unprelinked file (or unprelinking
273 		 of a prelinked file) shift the DYNAMIC segment by arbitrary
274 		 offset without any page size alignment.  There is no way to
275 		 find out the ELF header and/or Program Headers for a limited
276 		 verification if it they match.  One could do a verification
277 		 of the DYNAMIC segment.  Still the found address is the best
278 		 one GDB could find.  */
279 
280 	      warning (_(".dynamic section for \"%s\" "
281 			 "is not at the expected address "
282 			 "(wrong library or version mismatch?)"), so->so_name);
283 	    }
284 	}
285 
286     set_addr:
287       so->lm_info->l_addr = l_addr;
288       so->lm_info->l_addr_p = 1;
289     }
290 
291   return so->lm_info->l_addr;
292 }
293 
294 /* Per pspace SVR4 specific data.  */
295 
296 struct svr4_info
297 {
298   CORE_ADDR debug_base;	/* Base of dynamic linker structures.  */
299 
300   /* Validity flag for debug_loader_offset.  */
301   int debug_loader_offset_p;
302 
303   /* Load address for the dynamic linker, inferred.  */
304   CORE_ADDR debug_loader_offset;
305 
306   /* Name of the dynamic linker, valid if debug_loader_offset_p.  */
307   char *debug_loader_name;
308 
309   /* Load map address for the main executable.  */
310   CORE_ADDR main_lm_addr;
311 
312   CORE_ADDR interp_text_sect_low;
313   CORE_ADDR interp_text_sect_high;
314   CORE_ADDR interp_plt_sect_low;
315   CORE_ADDR interp_plt_sect_high;
316 };
317 
318 /* Per-program-space data key.  */
319 static const struct program_space_data *solib_svr4_pspace_data;
320 
321 static void
322 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
323 {
324   struct svr4_info *info;
325 
326   info = program_space_data (pspace, solib_svr4_pspace_data);
327   xfree (info);
328 }
329 
330 /* Get the current svr4 data.  If none is found yet, add it now.  This
331    function always returns a valid object.  */
332 
333 static struct svr4_info *
334 get_svr4_info (void)
335 {
336   struct svr4_info *info;
337 
338   info = program_space_data (current_program_space, solib_svr4_pspace_data);
339   if (info != NULL)
340     return info;
341 
342   info = XZALLOC (struct svr4_info);
343   set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
344   return info;
345 }
346 
347 /* Local function prototypes */
348 
349 static int match_main (const char *);
350 
351 /* Read program header TYPE from inferior memory.  The header is found
352    by scanning the OS auxillary vector.
353 
354    If TYPE == -1, return the program headers instead of the contents of
355    one program header.
356 
357    Return a pointer to allocated memory holding the program header contents,
358    or NULL on failure.  If sucessful, and unless P_SECT_SIZE is NULL, the
359    size of those contents is returned to P_SECT_SIZE.  Likewise, the target
360    architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE.  */
361 
362 static gdb_byte *
363 read_program_header (int type, int *p_sect_size, int *p_arch_size)
364 {
365   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
366   CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
367   int arch_size, sect_size;
368   CORE_ADDR sect_addr;
369   gdb_byte *buf;
370   int pt_phdr_p = 0;
371 
372   /* Get required auxv elements from target.  */
373   if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
374     return 0;
375   if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
376     return 0;
377   if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
378     return 0;
379   if (!at_phdr || !at_phnum)
380     return 0;
381 
382   /* Determine ELF architecture type.  */
383   if (at_phent == sizeof (Elf32_External_Phdr))
384     arch_size = 32;
385   else if (at_phent == sizeof (Elf64_External_Phdr))
386     arch_size = 64;
387   else
388     return 0;
389 
390   /* Find the requested segment.  */
391   if (type == -1)
392     {
393       sect_addr = at_phdr;
394       sect_size = at_phent * at_phnum;
395     }
396   else if (arch_size == 32)
397     {
398       Elf32_External_Phdr phdr;
399       int i;
400 
401       /* Search for requested PHDR.  */
402       for (i = 0; i < at_phnum; i++)
403 	{
404 	  int p_type;
405 
406 	  if (target_read_memory (at_phdr + i * sizeof (phdr),
407 				  (gdb_byte *)&phdr, sizeof (phdr)))
408 	    return 0;
409 
410 	  p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
411 					     4, byte_order);
412 
413 	  if (p_type == PT_PHDR)
414 	    {
415 	      pt_phdr_p = 1;
416 	      pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
417 						  4, byte_order);
418 	    }
419 
420 	  if (p_type == type)
421 	    break;
422 	}
423 
424       if (i == at_phnum)
425 	return 0;
426 
427       /* Retrieve address and size.  */
428       sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
429 					    4, byte_order);
430       sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
431 					    4, byte_order);
432     }
433   else
434     {
435       Elf64_External_Phdr phdr;
436       int i;
437 
438       /* Search for requested PHDR.  */
439       for (i = 0; i < at_phnum; i++)
440 	{
441 	  int p_type;
442 
443 	  if (target_read_memory (at_phdr + i * sizeof (phdr),
444 				  (gdb_byte *)&phdr, sizeof (phdr)))
445 	    return 0;
446 
447 	  p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
448 					     4, byte_order);
449 
450 	  if (p_type == PT_PHDR)
451 	    {
452 	      pt_phdr_p = 1;
453 	      pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
454 						  8, byte_order);
455 	    }
456 
457 	  if (p_type == type)
458 	    break;
459 	}
460 
461       if (i == at_phnum)
462 	return 0;
463 
464       /* Retrieve address and size.  */
465       sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
466 					    8, byte_order);
467       sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
468 					    8, byte_order);
469     }
470 
471   /* PT_PHDR is optional, but we really need it
472      for PIE to make this work in general.  */
473 
474   if (pt_phdr_p)
475     {
476       /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
477 	 Relocation offset is the difference between the two. */
478       sect_addr = sect_addr + (at_phdr - pt_phdr);
479     }
480 
481   /* Read in requested program header.  */
482   buf = xmalloc (sect_size);
483   if (target_read_memory (sect_addr, buf, sect_size))
484     {
485       xfree (buf);
486       return NULL;
487     }
488 
489   if (p_arch_size)
490     *p_arch_size = arch_size;
491   if (p_sect_size)
492     *p_sect_size = sect_size;
493 
494   return buf;
495 }
496 
497 
498 /* Return program interpreter string.  */
499 static gdb_byte *
500 find_program_interpreter (void)
501 {
502   gdb_byte *buf = NULL;
503 
504   /* If we have an exec_bfd, use its section table.  */
505   if (exec_bfd
506       && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
507    {
508      struct bfd_section *interp_sect;
509 
510      interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
511      if (interp_sect != NULL)
512       {
513 	int sect_size = bfd_section_size (exec_bfd, interp_sect);
514 
515 	buf = xmalloc (sect_size);
516 	bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
517       }
518    }
519 
520   /* If we didn't find it, use the target auxillary vector.  */
521   if (!buf)
522     buf = read_program_header (PT_INTERP, NULL, NULL);
523 
524   return buf;
525 }
526 
527 
528 /* Scan for DYNTAG in .dynamic section of ABFD.  If DYNTAG is found 1 is
529    returned and the corresponding PTR is set.  */
530 
531 static int
532 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
533 {
534   int arch_size, step, sect_size;
535   long dyn_tag;
536   CORE_ADDR dyn_ptr, dyn_addr;
537   gdb_byte *bufend, *bufstart, *buf;
538   Elf32_External_Dyn *x_dynp_32;
539   Elf64_External_Dyn *x_dynp_64;
540   struct bfd_section *sect;
541   struct target_section *target_section;
542 
543   if (abfd == NULL)
544     return 0;
545 
546   if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
547     return 0;
548 
549   arch_size = bfd_get_arch_size (abfd);
550   if (arch_size == -1)
551     return 0;
552 
553   /* Find the start address of the .dynamic section.  */
554   sect = bfd_get_section_by_name (abfd, ".dynamic");
555   if (sect == NULL)
556     return 0;
557 
558   for (target_section = current_target_sections->sections;
559        target_section < current_target_sections->sections_end;
560        target_section++)
561     if (sect == target_section->the_bfd_section)
562       break;
563   if (target_section < current_target_sections->sections_end)
564     dyn_addr = target_section->addr;
565   else
566     {
567       /* ABFD may come from OBJFILE acting only as a symbol file without being
568 	 loaded into the target (see add_symbol_file_command).  This case is
569 	 such fallback to the file VMA address without the possibility of
570 	 having the section relocated to its actual in-memory address.  */
571 
572       dyn_addr = bfd_section_vma (abfd, sect);
573     }
574 
575   /* Read in .dynamic from the BFD.  We will get the actual value
576      from memory later.  */
577   sect_size = bfd_section_size (abfd, sect);
578   buf = bufstart = alloca (sect_size);
579   if (!bfd_get_section_contents (abfd, sect,
580 				 buf, 0, sect_size))
581     return 0;
582 
583   /* Iterate over BUF and scan for DYNTAG.  If found, set PTR and return.  */
584   step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
585 			   : sizeof (Elf64_External_Dyn);
586   for (bufend = buf + sect_size;
587        buf < bufend;
588        buf += step)
589   {
590     if (arch_size == 32)
591       {
592 	x_dynp_32 = (Elf32_External_Dyn *) buf;
593 	dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
594 	dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
595       }
596     else
597       {
598 	x_dynp_64 = (Elf64_External_Dyn *) buf;
599 	dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
600 	dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
601       }
602      if (dyn_tag == DT_NULL)
603        return 0;
604      if (dyn_tag == dyntag)
605        {
606 	 /* If requested, try to read the runtime value of this .dynamic
607 	    entry.  */
608 	 if (ptr)
609 	   {
610 	     struct type *ptr_type;
611 	     gdb_byte ptr_buf[8];
612 	     CORE_ADDR ptr_addr;
613 
614 	     ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
615 	     ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
616 	     if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
617 	       dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
618 	     *ptr = dyn_ptr;
619 	   }
620 	 return 1;
621        }
622   }
623 
624   return 0;
625 }
626 
627 /* Scan for DYNTAG in .dynamic section of the target's main executable,
628    found by consulting the OS auxillary vector.  If DYNTAG is found 1 is
629    returned and the corresponding PTR is set.  */
630 
631 static int
632 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
633 {
634   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
635   int sect_size, arch_size, step;
636   long dyn_tag;
637   CORE_ADDR dyn_ptr;
638   gdb_byte *bufend, *bufstart, *buf;
639 
640   /* Read in .dynamic section.  */
641   buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
642   if (!buf)
643     return 0;
644 
645   /* Iterate over BUF and scan for DYNTAG.  If found, set PTR and return.  */
646   step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
647 			   : sizeof (Elf64_External_Dyn);
648   for (bufend = buf + sect_size;
649        buf < bufend;
650        buf += step)
651   {
652     if (arch_size == 32)
653       {
654 	Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
655 
656 	dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
657 					    4, byte_order);
658 	dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
659 					    4, byte_order);
660       }
661     else
662       {
663 	Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
664 
665 	dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
666 					    8, byte_order);
667 	dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
668 					    8, byte_order);
669       }
670     if (dyn_tag == DT_NULL)
671       break;
672 
673     if (dyn_tag == dyntag)
674       {
675 	if (ptr)
676 	  *ptr = dyn_ptr;
677 
678 	xfree (bufstart);
679 	return 1;
680       }
681   }
682 
683   xfree (bufstart);
684   return 0;
685 }
686 
687 /* Locate the base address of dynamic linker structs for SVR4 elf
688    targets.
689 
690    For SVR4 elf targets the address of the dynamic linker's runtime
691    structure is contained within the dynamic info section in the
692    executable file.  The dynamic section is also mapped into the
693    inferior address space.  Because the runtime loader fills in the
694    real address before starting the inferior, we have to read in the
695    dynamic info section from the inferior address space.
696    If there are any errors while trying to find the address, we
697    silently return 0, otherwise the found address is returned.  */
698 
699 static CORE_ADDR
700 elf_locate_base (void)
701 {
702   struct minimal_symbol *msymbol;
703   CORE_ADDR dyn_ptr;
704 
705   /* Look for DT_MIPS_RLD_MAP first.  MIPS executables use this
706      instead of DT_DEBUG, although they sometimes contain an unused
707      DT_DEBUG.  */
708   if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
709       || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
710     {
711       struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
712       gdb_byte *pbuf;
713       int pbuf_size = TYPE_LENGTH (ptr_type);
714 
715       pbuf = alloca (pbuf_size);
716       /* DT_MIPS_RLD_MAP contains a pointer to the address
717 	 of the dynamic link structure.  */
718       if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
719 	return 0;
720       return extract_typed_address (pbuf, ptr_type);
721     }
722 
723   /* Find DT_DEBUG.  */
724   if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
725       || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
726     return dyn_ptr;
727 
728   /* This may be a static executable.  Look for the symbol
729      conventionally named _r_debug, as a last resort.  */
730   msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
731   if (msymbol != NULL)
732     return SYMBOL_VALUE_ADDRESS (msymbol);
733 
734   /* DT_DEBUG entry not found.  */
735   return 0;
736 }
737 
738 /* Locate the base address of dynamic linker structs.
739 
740    For both the SunOS and SVR4 shared library implementations, if the
741    inferior executable has been linked dynamically, there is a single
742    address somewhere in the inferior's data space which is the key to
743    locating all of the dynamic linker's runtime structures.  This
744    address is the value of the debug base symbol.  The job of this
745    function is to find and return that address, or to return 0 if there
746    is no such address (the executable is statically linked for example).
747 
748    For SunOS, the job is almost trivial, since the dynamic linker and
749    all of it's structures are statically linked to the executable at
750    link time.  Thus the symbol for the address we are looking for has
751    already been added to the minimal symbol table for the executable's
752    objfile at the time the symbol file's symbols were read, and all we
753    have to do is look it up there.  Note that we explicitly do NOT want
754    to find the copies in the shared library.
755 
756    The SVR4 version is a bit more complicated because the address
757    is contained somewhere in the dynamic info section.  We have to go
758    to a lot more work to discover the address of the debug base symbol.
759    Because of this complexity, we cache the value we find and return that
760    value on subsequent invocations.  Note there is no copy in the
761    executable symbol tables.  */
762 
763 static CORE_ADDR
764 locate_base (struct svr4_info *info)
765 {
766   /* Check to see if we have a currently valid address, and if so, avoid
767      doing all this work again and just return the cached address.  If
768      we have no cached address, try to locate it in the dynamic info
769      section for ELF executables.  There's no point in doing any of this
770      though if we don't have some link map offsets to work with.  */
771 
772   if (info->debug_base == 0 && svr4_have_link_map_offsets ())
773     info->debug_base = elf_locate_base ();
774   return info->debug_base;
775 }
776 
777 /* Find the first element in the inferior's dynamic link map, and
778    return its address in the inferior.  Return zero if the address
779    could not be determined.
780 
781    FIXME: Perhaps we should validate the info somehow, perhaps by
782    checking r_version for a known version number, or r_state for
783    RT_CONSISTENT.  */
784 
785 static CORE_ADDR
786 solib_svr4_r_map (struct svr4_info *info)
787 {
788   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
789   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
790   CORE_ADDR addr = 0;
791   volatile struct gdb_exception ex;
792 
793   TRY_CATCH (ex, RETURN_MASK_ERROR)
794     {
795       addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
796                                         ptr_type);
797     }
798   exception_print (gdb_stderr, ex);
799   return addr;
800 }
801 
802 /* Find r_brk from the inferior's debug base.  */
803 
804 static CORE_ADDR
805 solib_svr4_r_brk (struct svr4_info *info)
806 {
807   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
808   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
809 
810   return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
811 				    ptr_type);
812 }
813 
814 /* Find the link map for the dynamic linker (if it is not in the
815    normal list of loaded shared objects).  */
816 
817 static CORE_ADDR
818 solib_svr4_r_ldsomap (struct svr4_info *info)
819 {
820   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
821   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
822   enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
823   ULONGEST version;
824 
825   /* Check version, and return zero if `struct r_debug' doesn't have
826      the r_ldsomap member.  */
827   version
828     = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
829 				    lmo->r_version_size, byte_order);
830   if (version < 2 || lmo->r_ldsomap_offset == -1)
831     return 0;
832 
833   return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
834 				    ptr_type);
835 }
836 
837 /* On Solaris systems with some versions of the dynamic linker,
838    ld.so's l_name pointer points to the SONAME in the string table
839    rather than into writable memory.  So that GDB can find shared
840    libraries when loading a core file generated by gcore, ensure that
841    memory areas containing the l_name string are saved in the core
842    file.  */
843 
844 static int
845 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
846 {
847   struct svr4_info *info;
848   CORE_ADDR ldsomap;
849   struct so_list *new;
850   struct cleanup *old_chain;
851   struct link_map_offsets *lmo;
852   CORE_ADDR name_lm;
853 
854   info = get_svr4_info ();
855 
856   info->debug_base = 0;
857   locate_base (info);
858   if (!info->debug_base)
859     return 0;
860 
861   ldsomap = solib_svr4_r_ldsomap (info);
862   if (!ldsomap)
863     return 0;
864 
865   lmo = svr4_fetch_link_map_offsets ();
866   new = XZALLOC (struct so_list);
867   old_chain = make_cleanup (xfree, new);
868   new->lm_info = lm_info_read (ldsomap);
869   make_cleanup (xfree, new->lm_info);
870   name_lm = new->lm_info ? new->lm_info->l_name : 0;
871   do_cleanups (old_chain);
872 
873   return (name_lm >= vaddr && name_lm < vaddr + size);
874 }
875 
876 /* Implement the "open_symbol_file_object" target_so_ops method.
877 
878    If no open symbol file, attempt to locate and open the main symbol
879    file.  On SVR4 systems, this is the first link map entry.  If its
880    name is here, we can open it.  Useful when attaching to a process
881    without first loading its symbol file.  */
882 
883 static int
884 open_symbol_file_object (void *from_ttyp)
885 {
886   CORE_ADDR lm, l_name;
887   char *filename;
888   int errcode;
889   int from_tty = *(int *)from_ttyp;
890   struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
891   struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
892   int l_name_size = TYPE_LENGTH (ptr_type);
893   gdb_byte *l_name_buf = xmalloc (l_name_size);
894   struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
895   struct svr4_info *info = get_svr4_info ();
896 
897   if (symfile_objfile)
898     if (!query (_("Attempt to reload symbols from process? ")))
899       {
900 	do_cleanups (cleanups);
901 	return 0;
902       }
903 
904   /* Always locate the debug struct, in case it has moved.  */
905   info->debug_base = 0;
906   if (locate_base (info) == 0)
907     {
908       do_cleanups (cleanups);
909       return 0;	/* failed somehow...  */
910     }
911 
912   /* First link map member should be the executable.  */
913   lm = solib_svr4_r_map (info);
914   if (lm == 0)
915     {
916       do_cleanups (cleanups);
917       return 0;	/* failed somehow...  */
918     }
919 
920   /* Read address of name from target memory to GDB.  */
921   read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
922 
923   /* Convert the address to host format.  */
924   l_name = extract_typed_address (l_name_buf, ptr_type);
925 
926   if (l_name == 0)
927     {
928       do_cleanups (cleanups);
929       return 0;		/* No filename.  */
930     }
931 
932   /* Now fetch the filename from target memory.  */
933   target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
934   make_cleanup (xfree, filename);
935 
936   if (errcode)
937     {
938       warning (_("failed to read exec filename from attached file: %s"),
939 	       safe_strerror (errcode));
940       do_cleanups (cleanups);
941       return 0;
942     }
943 
944   /* Have a pathname: read the symbol file.  */
945   symbol_file_add_main (filename, from_tty);
946 
947   do_cleanups (cleanups);
948   return 1;
949 }
950 
951 /* Data exchange structure for the XML parser as returned by
952    svr4_current_sos_via_xfer_libraries.  */
953 
954 struct svr4_library_list
955 {
956   struct so_list *head, **tailp;
957 
958   /* Inferior address of struct link_map used for the main executable.  It is
959      NULL if not known.  */
960   CORE_ADDR main_lm;
961 };
962 
963 /* Implementation for target_so_ops.free_so.  */
964 
965 static void
966 svr4_free_so (struct so_list *so)
967 {
968   xfree (so->lm_info);
969 }
970 
971 /* Free so_list built so far (called via cleanup).  */
972 
973 static void
974 svr4_free_library_list (void *p_list)
975 {
976   struct so_list *list = *(struct so_list **) p_list;
977 
978   while (list != NULL)
979     {
980       struct so_list *next = list->next;
981 
982       svr4_free_so (list);
983       list = next;
984     }
985 }
986 
987 #ifdef HAVE_LIBEXPAT
988 
989 #include "xml-support.h"
990 
991 /* Handle the start of a <library> element.  Note: new elements are added
992    at the tail of the list, keeping the list in order.  */
993 
994 static void
995 library_list_start_library (struct gdb_xml_parser *parser,
996 			    const struct gdb_xml_element *element,
997 			    void *user_data, VEC(gdb_xml_value_s) *attributes)
998 {
999   struct svr4_library_list *list = user_data;
1000   const char *name = xml_find_attribute (attributes, "name")->value;
1001   ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1002   ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1003   ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1004   struct so_list *new_elem;
1005 
1006   new_elem = XZALLOC (struct so_list);
1007   new_elem->lm_info = XZALLOC (struct lm_info);
1008   new_elem->lm_info->lm_addr = *lmp;
1009   new_elem->lm_info->l_addr_inferior = *l_addrp;
1010   new_elem->lm_info->l_ld = *l_ldp;
1011 
1012   strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1013   new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1014   strcpy (new_elem->so_original_name, new_elem->so_name);
1015 
1016   *list->tailp = new_elem;
1017   list->tailp = &new_elem->next;
1018 }
1019 
1020 /* Handle the start of a <library-list-svr4> element.  */
1021 
1022 static void
1023 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1024 			      const struct gdb_xml_element *element,
1025 			      void *user_data, VEC(gdb_xml_value_s) *attributes)
1026 {
1027   struct svr4_library_list *list = user_data;
1028   const char *version = xml_find_attribute (attributes, "version")->value;
1029   struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1030 
1031   if (strcmp (version, "1.0") != 0)
1032     gdb_xml_error (parser,
1033 		   _("SVR4 Library list has unsupported version \"%s\""),
1034 		   version);
1035 
1036   if (main_lm)
1037     list->main_lm = *(ULONGEST *) main_lm->value;
1038 }
1039 
1040 /* The allowed elements and attributes for an XML library list.
1041    The root element is a <library-list>.  */
1042 
1043 static const struct gdb_xml_attribute svr4_library_attributes[] =
1044 {
1045   { "name", GDB_XML_AF_NONE, NULL, NULL },
1046   { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1047   { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1048   { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1049   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1050 };
1051 
1052 static const struct gdb_xml_element svr4_library_list_children[] =
1053 {
1054   {
1055     "library", svr4_library_attributes, NULL,
1056     GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1057     library_list_start_library, NULL
1058   },
1059   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1060 };
1061 
1062 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1063 {
1064   { "version", GDB_XML_AF_NONE, NULL, NULL },
1065   { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1066   { NULL, GDB_XML_AF_NONE, NULL, NULL }
1067 };
1068 
1069 static const struct gdb_xml_element svr4_library_list_elements[] =
1070 {
1071   { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1072     GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1073   { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1074 };
1075 
1076 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN.  Return 1 if
1077 
1078    Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1079    case.  Return 1 if *SO_LIST_RETURN contains the library list, it may be
1080    empty, caller is responsible for freeing all its entries.  */
1081 
1082 static int
1083 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1084 {
1085   struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1086 					  &list->head);
1087 
1088   memset (list, 0, sizeof (*list));
1089   list->tailp = &list->head;
1090   if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1091 			   svr4_library_list_elements, document, list) == 0)
1092     {
1093       /* Parsed successfully, keep the result.  */
1094       discard_cleanups (back_to);
1095       return 1;
1096     }
1097 
1098   do_cleanups (back_to);
1099   return 0;
1100 }
1101 
1102 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1103 
1104    Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1105    case.  Return 1 if *SO_LIST_RETURN contains the library list, it may be
1106    empty, caller is responsible for freeing all its entries.  */
1107 
1108 static int
1109 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1110 {
1111   char *svr4_library_document;
1112   int result;
1113   struct cleanup *back_to;
1114 
1115   /* Fetch the list of shared libraries.  */
1116   svr4_library_document = target_read_stralloc (&current_target,
1117 						TARGET_OBJECT_LIBRARIES_SVR4,
1118 						NULL);
1119   if (svr4_library_document == NULL)
1120     return 0;
1121 
1122   back_to = make_cleanup (xfree, svr4_library_document);
1123   result = svr4_parse_libraries (svr4_library_document, list);
1124   do_cleanups (back_to);
1125 
1126   return result;
1127 }
1128 
1129 #else
1130 
1131 static int
1132 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1133 {
1134   return 0;
1135 }
1136 
1137 #endif
1138 
1139 /* If no shared library information is available from the dynamic
1140    linker, build a fallback list from other sources.  */
1141 
1142 static struct so_list *
1143 svr4_default_sos (void)
1144 {
1145   struct svr4_info *info = get_svr4_info ();
1146   struct so_list *new;
1147 
1148   if (!info->debug_loader_offset_p)
1149     return NULL;
1150 
1151   new = XZALLOC (struct so_list);
1152 
1153   new->lm_info = xzalloc (sizeof (struct lm_info));
1154 
1155   /* Nothing will ever check the other fields if we set l_addr_p.  */
1156   new->lm_info->l_addr = info->debug_loader_offset;
1157   new->lm_info->l_addr_p = 1;
1158 
1159   strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1160   new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1161   strcpy (new->so_original_name, new->so_name);
1162 
1163   return new;
1164 }
1165 
1166 /* Read the whole inferior libraries chain starting at address LM.  Add the
1167    entries to the tail referenced by LINK_PTR_PTR.  Ignore the first entry if
1168    IGNORE_FIRST and set global MAIN_LM_ADDR according to it.  */
1169 
1170 static void
1171 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1172 		   int ignore_first)
1173 {
1174   CORE_ADDR prev_lm = 0, next_lm;
1175 
1176   for (; lm != 0; prev_lm = lm, lm = next_lm)
1177     {
1178       struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1179       struct so_list *new;
1180       struct cleanup *old_chain;
1181       int errcode;
1182       char *buffer;
1183 
1184       new = XZALLOC (struct so_list);
1185       old_chain = make_cleanup_free_so (new);
1186 
1187       new->lm_info = lm_info_read (lm);
1188       if (new->lm_info == NULL)
1189 	{
1190 	  do_cleanups (old_chain);
1191 	  break;
1192 	}
1193 
1194       next_lm = new->lm_info->l_next;
1195 
1196       if (new->lm_info->l_prev != prev_lm)
1197 	{
1198 	  warning (_("Corrupted shared library list: %s != %s"),
1199 		   paddress (target_gdbarch, prev_lm),
1200 		   paddress (target_gdbarch, new->lm_info->l_prev));
1201 	  do_cleanups (old_chain);
1202 	  break;
1203 	}
1204 
1205       /* For SVR4 versions, the first entry in the link map is for the
1206          inferior executable, so we must ignore it.  For some versions of
1207          SVR4, it has no name.  For others (Solaris 2.3 for example), it
1208          does have a name, so we can no longer use a missing name to
1209          decide when to ignore it.  */
1210       if (ignore_first && new->lm_info->l_prev == 0)
1211 	{
1212 	  struct svr4_info *info = get_svr4_info ();
1213 
1214 	  info->main_lm_addr = new->lm_info->lm_addr;
1215 	  do_cleanups (old_chain);
1216 	  continue;
1217 	}
1218 
1219       /* Extract this shared object's name.  */
1220       target_read_string (new->lm_info->l_name, &buffer,
1221 			  SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1222       if (errcode != 0)
1223 	{
1224 	  warning (_("Can't read pathname for load map: %s."),
1225 		   safe_strerror (errcode));
1226 	  do_cleanups (old_chain);
1227 	  continue;
1228 	}
1229 
1230       strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1231       new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1232       strcpy (new->so_original_name, new->so_name);
1233       xfree (buffer);
1234 
1235       /* If this entry has no name, or its name matches the name
1236 	 for the main executable, don't include it in the list.  */
1237       if (! new->so_name[0] || match_main (new->so_name))
1238 	{
1239 	  do_cleanups (old_chain);
1240 	  continue;
1241 	}
1242 
1243       discard_cleanups (old_chain);
1244       new->next = 0;
1245       **link_ptr_ptr = new;
1246       *link_ptr_ptr = &new->next;
1247     }
1248 }
1249 
1250 /* Implement the "current_sos" target_so_ops method.  */
1251 
1252 static struct so_list *
1253 svr4_current_sos (void)
1254 {
1255   CORE_ADDR lm;
1256   struct so_list *head = NULL;
1257   struct so_list **link_ptr = &head;
1258   struct svr4_info *info;
1259   struct cleanup *back_to;
1260   int ignore_first;
1261   struct svr4_library_list library_list;
1262 
1263   if (svr4_current_sos_via_xfer_libraries (&library_list))
1264     {
1265       if (library_list.main_lm)
1266 	{
1267 	  info = get_svr4_info ();
1268 	  info->main_lm_addr = library_list.main_lm;
1269 	}
1270 
1271       return library_list.head ? library_list.head : svr4_default_sos ();
1272     }
1273 
1274   info = get_svr4_info ();
1275 
1276   /* Always locate the debug struct, in case it has moved.  */
1277   info->debug_base = 0;
1278   locate_base (info);
1279 
1280   /* If we can't find the dynamic linker's base structure, this
1281      must not be a dynamically linked executable.  Hmm.  */
1282   if (! info->debug_base)
1283     return svr4_default_sos ();
1284 
1285   /* Assume that everything is a library if the dynamic loader was loaded
1286      late by a static executable.  */
1287   if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1288     ignore_first = 0;
1289   else
1290     ignore_first = 1;
1291 
1292   back_to = make_cleanup (svr4_free_library_list, &head);
1293 
1294   /* Walk the inferior's link map list, and build our list of
1295      `struct so_list' nodes.  */
1296   lm = solib_svr4_r_map (info);
1297   if (lm)
1298     svr4_read_so_list (lm, &link_ptr, ignore_first);
1299 
1300   /* On Solaris, the dynamic linker is not in the normal list of
1301      shared objects, so make sure we pick it up too.  Having
1302      symbol information for the dynamic linker is quite crucial
1303      for skipping dynamic linker resolver code.  */
1304   lm = solib_svr4_r_ldsomap (info);
1305   if (lm)
1306     svr4_read_so_list (lm, &link_ptr, 0);
1307 
1308   discard_cleanups (back_to);
1309 
1310   if (head == NULL)
1311     return svr4_default_sos ();
1312 
1313   return head;
1314 }
1315 
1316 /* Get the address of the link_map for a given OBJFILE.  */
1317 
1318 CORE_ADDR
1319 svr4_fetch_objfile_link_map (struct objfile *objfile)
1320 {
1321   struct so_list *so;
1322   struct svr4_info *info = get_svr4_info ();
1323 
1324   /* Cause svr4_current_sos() to be run if it hasn't been already.  */
1325   if (info->main_lm_addr == 0)
1326     solib_add (NULL, 0, &current_target, auto_solib_add);
1327 
1328   /* svr4_current_sos() will set main_lm_addr for the main executable.  */
1329   if (objfile == symfile_objfile)
1330     return info->main_lm_addr;
1331 
1332   /* The other link map addresses may be found by examining the list
1333      of shared libraries.  */
1334   for (so = master_so_list (); so; so = so->next)
1335     if (so->objfile == objfile)
1336       return so->lm_info->lm_addr;
1337 
1338   /* Not found!  */
1339   return 0;
1340 }
1341 
1342 /* On some systems, the only way to recognize the link map entry for
1343    the main executable file is by looking at its name.  Return
1344    non-zero iff SONAME matches one of the known main executable names.  */
1345 
1346 static int
1347 match_main (const char *soname)
1348 {
1349   const char * const *mainp;
1350 
1351   for (mainp = main_name_list; *mainp != NULL; mainp++)
1352     {
1353       if (strcmp (soname, *mainp) == 0)
1354 	return (1);
1355     }
1356 
1357   return (0);
1358 }
1359 
1360 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1361    SVR4 run time loader.  */
1362 
1363 int
1364 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1365 {
1366   struct svr4_info *info = get_svr4_info ();
1367 
1368   return ((pc >= info->interp_text_sect_low
1369 	   && pc < info->interp_text_sect_high)
1370 	  || (pc >= info->interp_plt_sect_low
1371 	      && pc < info->interp_plt_sect_high)
1372 	  || in_plt_section (pc, NULL)
1373 	  || in_gnu_ifunc_stub (pc));
1374 }
1375 
1376 /* Given an executable's ABFD and target, compute the entry-point
1377    address.  */
1378 
1379 static CORE_ADDR
1380 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1381 {
1382   /* KevinB wrote ... for most targets, the address returned by
1383      bfd_get_start_address() is the entry point for the start
1384      function.  But, for some targets, bfd_get_start_address() returns
1385      the address of a function descriptor from which the entry point
1386      address may be extracted.  This address is extracted by
1387      gdbarch_convert_from_func_ptr_addr().  The method
1388      gdbarch_convert_from_func_ptr_addr() is the merely the identify
1389      function for targets which don't use function descriptors.  */
1390   return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1391 					     bfd_get_start_address (abfd),
1392 					     targ);
1393 }
1394 
1395 /* Helper function for gdb_bfd_lookup_symbol.  */
1396 
1397 static int
1398 cmp_name_and_sec_flags (asymbol *sym, void *data)
1399 {
1400   return (strcmp (sym->name, (const char *) data) == 0
1401 	  && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1402 }
1403 /* Arrange for dynamic linker to hit breakpoint.
1404 
1405    Both the SunOS and the SVR4 dynamic linkers have, as part of their
1406    debugger interface, support for arranging for the inferior to hit
1407    a breakpoint after mapping in the shared libraries.  This function
1408    enables that breakpoint.
1409 
1410    For SunOS, there is a special flag location (in_debugger) which we
1411    set to 1.  When the dynamic linker sees this flag set, it will set
1412    a breakpoint at a location known only to itself, after saving the
1413    original contents of that place and the breakpoint address itself,
1414    in it's own internal structures.  When we resume the inferior, it
1415    will eventually take a SIGTRAP when it runs into the breakpoint.
1416    We handle this (in a different place) by restoring the contents of
1417    the breakpointed location (which is only known after it stops),
1418    chasing around to locate the shared libraries that have been
1419    loaded, then resuming.
1420 
1421    For SVR4, the debugger interface structure contains a member (r_brk)
1422    which is statically initialized at the time the shared library is
1423    built, to the offset of a function (_r_debug_state) which is guaran-
1424    teed to be called once before mapping in a library, and again when
1425    the mapping is complete.  At the time we are examining this member,
1426    it contains only the unrelocated offset of the function, so we have
1427    to do our own relocation.  Later, when the dynamic linker actually
1428    runs, it relocates r_brk to be the actual address of _r_debug_state().
1429 
1430    The debugger interface structure also contains an enumeration which
1431    is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1432    depending upon whether or not the library is being mapped or unmapped,
1433    and then set to RT_CONSISTENT after the library is mapped/unmapped.  */
1434 
1435 static int
1436 enable_break (struct svr4_info *info, int from_tty)
1437 {
1438   struct minimal_symbol *msymbol;
1439   const char * const *bkpt_namep;
1440   asection *interp_sect;
1441   gdb_byte *interp_name;
1442   CORE_ADDR sym_addr;
1443 
1444   info->interp_text_sect_low = info->interp_text_sect_high = 0;
1445   info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1446 
1447   /* If we already have a shared library list in the target, and
1448      r_debug contains r_brk, set the breakpoint there - this should
1449      mean r_brk has already been relocated.  Assume the dynamic linker
1450      is the object containing r_brk.  */
1451 
1452   solib_add (NULL, from_tty, &current_target, auto_solib_add);
1453   sym_addr = 0;
1454   if (info->debug_base && solib_svr4_r_map (info) != 0)
1455     sym_addr = solib_svr4_r_brk (info);
1456 
1457   if (sym_addr != 0)
1458     {
1459       struct obj_section *os;
1460 
1461       sym_addr = gdbarch_addr_bits_remove
1462 	(target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1463 							     sym_addr,
1464 							     &current_target));
1465 
1466       /* On at least some versions of Solaris there's a dynamic relocation
1467 	 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1468 	 we get control before the dynamic linker has self-relocated.
1469 	 Check if SYM_ADDR is in a known section, if it is assume we can
1470 	 trust its value.  This is just a heuristic though, it could go away
1471 	 or be replaced if it's getting in the way.
1472 
1473 	 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1474 	 however it's spelled in your particular system) is ARM or Thumb.
1475 	 That knowledge is encoded in the address, if it's Thumb the low bit
1476 	 is 1.  However, we've stripped that info above and it's not clear
1477 	 what all the consequences are of passing a non-addr_bits_remove'd
1478 	 address to create_solib_event_breakpoint.  The call to
1479 	 find_pc_section verifies we know about the address and have some
1480 	 hope of computing the right kind of breakpoint to use (via
1481 	 symbol info).  It does mean that GDB needs to be pointed at a
1482 	 non-stripped version of the dynamic linker in order to obtain
1483 	 information it already knows about.  Sigh.  */
1484 
1485       os = find_pc_section (sym_addr);
1486       if (os != NULL)
1487 	{
1488 	  /* Record the relocated start and end address of the dynamic linker
1489 	     text and plt section for svr4_in_dynsym_resolve_code.  */
1490 	  bfd *tmp_bfd;
1491 	  CORE_ADDR load_addr;
1492 
1493 	  tmp_bfd = os->objfile->obfd;
1494 	  load_addr = ANOFFSET (os->objfile->section_offsets,
1495 				os->objfile->sect_index_text);
1496 
1497 	  interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1498 	  if (interp_sect)
1499 	    {
1500 	      info->interp_text_sect_low =
1501 		bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1502 	      info->interp_text_sect_high =
1503 		info->interp_text_sect_low
1504 		+ bfd_section_size (tmp_bfd, interp_sect);
1505 	    }
1506 	  interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1507 	  if (interp_sect)
1508 	    {
1509 	      info->interp_plt_sect_low =
1510 		bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1511 	      info->interp_plt_sect_high =
1512 		info->interp_plt_sect_low
1513 		+ bfd_section_size (tmp_bfd, interp_sect);
1514 	    }
1515 
1516 	  create_solib_event_breakpoint (target_gdbarch, sym_addr);
1517 	  return 1;
1518 	}
1519     }
1520 
1521   /* Find the program interpreter; if not found, warn the user and drop
1522      into the old breakpoint at symbol code.  */
1523   interp_name = find_program_interpreter ();
1524   if (interp_name)
1525     {
1526       CORE_ADDR load_addr = 0;
1527       int load_addr_found = 0;
1528       int loader_found_in_list = 0;
1529       struct so_list *so;
1530       bfd *tmp_bfd = NULL;
1531       struct target_ops *tmp_bfd_target;
1532       volatile struct gdb_exception ex;
1533 
1534       sym_addr = 0;
1535 
1536       /* Now we need to figure out where the dynamic linker was
1537          loaded so that we can load its symbols and place a breakpoint
1538          in the dynamic linker itself.
1539 
1540          This address is stored on the stack.  However, I've been unable
1541          to find any magic formula to find it for Solaris (appears to
1542          be trivial on GNU/Linux).  Therefore, we have to try an alternate
1543          mechanism to find the dynamic linker's base address.  */
1544 
1545       TRY_CATCH (ex, RETURN_MASK_ALL)
1546         {
1547 	  tmp_bfd = solib_bfd_open (interp_name);
1548 	}
1549       if (tmp_bfd == NULL)
1550 	goto bkpt_at_symbol;
1551 
1552       /* Now convert the TMP_BFD into a target.  That way target, as
1553          well as BFD operations can be used.  Note that closing the
1554          target will also close the underlying bfd.  */
1555       tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1556 
1557       /* On a running target, we can get the dynamic linker's base
1558          address from the shared library table.  */
1559       so = master_so_list ();
1560       while (so)
1561 	{
1562 	  if (svr4_same_1 (interp_name, so->so_original_name))
1563 	    {
1564 	      load_addr_found = 1;
1565 	      loader_found_in_list = 1;
1566 	      load_addr = lm_addr_check (so, tmp_bfd);
1567 	      break;
1568 	    }
1569 	  so = so->next;
1570 	}
1571 
1572       /* If we were not able to find the base address of the loader
1573          from our so_list, then try using the AT_BASE auxilliary entry.  */
1574       if (!load_addr_found)
1575         if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1576 	  {
1577 	    int addr_bit = gdbarch_addr_bit (target_gdbarch);
1578 
1579 	    /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1580 	       that `+ load_addr' will overflow CORE_ADDR width not creating
1581 	       invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1582 	       GDB.  */
1583 
1584 	    if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1585 	      {
1586 		CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1587 		CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1588 							      tmp_bfd_target);
1589 
1590 		gdb_assert (load_addr < space_size);
1591 
1592 		/* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1593 		   64bit ld.so with 32bit executable, it should not happen.  */
1594 
1595 		if (tmp_entry_point < space_size
1596 		    && tmp_entry_point + load_addr >= space_size)
1597 		  load_addr -= space_size;
1598 	      }
1599 
1600 	    load_addr_found = 1;
1601 	  }
1602 
1603       /* Otherwise we find the dynamic linker's base address by examining
1604 	 the current pc (which should point at the entry point for the
1605 	 dynamic linker) and subtracting the offset of the entry point.
1606 
1607          This is more fragile than the previous approaches, but is a good
1608          fallback method because it has actually been working well in
1609          most cases.  */
1610       if (!load_addr_found)
1611 	{
1612 	  struct regcache *regcache
1613 	    = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1614 
1615 	  load_addr = (regcache_read_pc (regcache)
1616 		       - exec_entry_point (tmp_bfd, tmp_bfd_target));
1617 	}
1618 
1619       if (!loader_found_in_list)
1620 	{
1621 	  info->debug_loader_name = xstrdup (interp_name);
1622 	  info->debug_loader_offset_p = 1;
1623 	  info->debug_loader_offset = load_addr;
1624 	  solib_add (NULL, from_tty, &current_target, auto_solib_add);
1625 	}
1626 
1627       /* Record the relocated start and end address of the dynamic linker
1628          text and plt section for svr4_in_dynsym_resolve_code.  */
1629       interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1630       if (interp_sect)
1631 	{
1632 	  info->interp_text_sect_low =
1633 	    bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1634 	  info->interp_text_sect_high =
1635 	    info->interp_text_sect_low
1636 	    + bfd_section_size (tmp_bfd, interp_sect);
1637 	}
1638       interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1639       if (interp_sect)
1640 	{
1641 	  info->interp_plt_sect_low =
1642 	    bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1643 	  info->interp_plt_sect_high =
1644 	    info->interp_plt_sect_low
1645 	    + bfd_section_size (tmp_bfd, interp_sect);
1646 	}
1647 
1648       /* Now try to set a breakpoint in the dynamic linker.  */
1649       for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1650 	{
1651 	  sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1652 					    (void *) *bkpt_namep);
1653 	  if (sym_addr != 0)
1654 	    break;
1655 	}
1656 
1657       if (sym_addr != 0)
1658 	/* Convert 'sym_addr' from a function pointer to an address.
1659 	   Because we pass tmp_bfd_target instead of the current
1660 	   target, this will always produce an unrelocated value.  */
1661 	sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1662 						       sym_addr,
1663 						       tmp_bfd_target);
1664 
1665       /* We're done with both the temporary bfd and target.  Remember,
1666          closing the target closes the underlying bfd.  */
1667       target_close (tmp_bfd_target, 0);
1668 
1669       if (sym_addr != 0)
1670 	{
1671 	  create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1672 	  xfree (interp_name);
1673 	  return 1;
1674 	}
1675 
1676       /* For whatever reason we couldn't set a breakpoint in the dynamic
1677          linker.  Warn and drop into the old code.  */
1678     bkpt_at_symbol:
1679       xfree (interp_name);
1680       warning (_("Unable to find dynamic linker breakpoint function.\n"
1681                "GDB will be unable to debug shared library initializers\n"
1682                "and track explicitly loaded dynamic code."));
1683     }
1684 
1685   /* Scan through the lists of symbols, trying to look up the symbol and
1686      set a breakpoint there.  Terminate loop when we/if we succeed.  */
1687 
1688   for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1689     {
1690       msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1691       if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1692 	{
1693 	  sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1694 	  sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1695 							 sym_addr,
1696 							 &current_target);
1697 	  create_solib_event_breakpoint (target_gdbarch, sym_addr);
1698 	  return 1;
1699 	}
1700     }
1701 
1702   if (!current_inferior ()->attach_flag)
1703     {
1704       for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1705 	{
1706 	  msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1707 	  if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1708 	    {
1709 	      sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1710 	      sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1711 							     sym_addr,
1712 							     &current_target);
1713 	      create_solib_event_breakpoint (target_gdbarch, sym_addr);
1714 	      return 1;
1715 	    }
1716 	}
1717     }
1718   return 0;
1719 }
1720 
1721 /* Implement the "special_symbol_handling" target_so_ops method.  */
1722 
1723 static void
1724 svr4_special_symbol_handling (void)
1725 {
1726   /* Nothing to do.  */
1727 }
1728 
1729 /* Read the ELF program headers from ABFD.  Return the contents and
1730    set *PHDRS_SIZE to the size of the program headers.  */
1731 
1732 static gdb_byte *
1733 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1734 {
1735   Elf_Internal_Ehdr *ehdr;
1736   gdb_byte *buf;
1737 
1738   ehdr = elf_elfheader (abfd);
1739 
1740   *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1741   if (*phdrs_size == 0)
1742     return NULL;
1743 
1744   buf = xmalloc (*phdrs_size);
1745   if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1746       || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1747     {
1748       xfree (buf);
1749       return NULL;
1750     }
1751 
1752   return buf;
1753 }
1754 
1755 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1756    exec_bfd.  Otherwise return 0.
1757 
1758    We relocate all of the sections by the same amount.  This
1759    behavior is mandated by recent editions of the System V ABI.
1760    According to the System V Application Binary Interface,
1761    Edition 4.1, page 5-5:
1762 
1763      ...  Though the system chooses virtual addresses for
1764      individual processes, it maintains the segments' relative
1765      positions.  Because position-independent code uses relative
1766      addressesing between segments, the difference between
1767      virtual addresses in memory must match the difference
1768      between virtual addresses in the file.  The difference
1769      between the virtual address of any segment in memory and
1770      the corresponding virtual address in the file is thus a
1771      single constant value for any one executable or shared
1772      object in a given process.  This difference is the base
1773      address.  One use of the base address is to relocate the
1774      memory image of the program during dynamic linking.
1775 
1776    The same language also appears in Edition 4.0 of the System V
1777    ABI and is left unspecified in some of the earlier editions.
1778 
1779    Decide if the objfile needs to be relocated.  As indicated above, we will
1780    only be here when execution is stopped.  But during attachment PC can be at
1781    arbitrary address therefore regcache_read_pc can be misleading (contrary to
1782    the auxv AT_ENTRY value).  Moreover for executable with interpreter section
1783    regcache_read_pc would point to the interpreter and not the main executable.
1784 
1785    So, to summarize, relocations are necessary when the start address obtained
1786    from the executable is different from the address in auxv AT_ENTRY entry.
1787 
1788    [ The astute reader will note that we also test to make sure that
1789      the executable in question has the DYNAMIC flag set.  It is my
1790      opinion that this test is unnecessary (undesirable even).  It
1791      was added to avoid inadvertent relocation of an executable
1792      whose e_type member in the ELF header is not ET_DYN.  There may
1793      be a time in the future when it is desirable to do relocations
1794      on other types of files as well in which case this condition
1795      should either be removed or modified to accomodate the new file
1796      type.  - Kevin, Nov 2000. ]  */
1797 
1798 static int
1799 svr4_exec_displacement (CORE_ADDR *displacementp)
1800 {
1801   /* ENTRY_POINT is a possible function descriptor - before
1802      a call to gdbarch_convert_from_func_ptr_addr.  */
1803   CORE_ADDR entry_point, displacement;
1804 
1805   if (exec_bfd == NULL)
1806     return 0;
1807 
1808   /* Therefore for ELF it is ET_EXEC and not ET_DYN.  Both shared libraries
1809      being executed themselves and PIE (Position Independent Executable)
1810      executables are ET_DYN.  */
1811 
1812   if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1813     return 0;
1814 
1815   if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1816     return 0;
1817 
1818   displacement = entry_point - bfd_get_start_address (exec_bfd);
1819 
1820   /* Verify the DISPLACEMENT candidate complies with the required page
1821      alignment.  It is cheaper than the program headers comparison below.  */
1822 
1823   if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1824     {
1825       const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1826 
1827       /* p_align of PT_LOAD segments does not specify any alignment but
1828 	 only congruency of addresses:
1829 	   p_offset % p_align == p_vaddr % p_align
1830 	 Kernel is free to load the executable with lower alignment.  */
1831 
1832       if ((displacement & (elf->minpagesize - 1)) != 0)
1833 	return 0;
1834     }
1835 
1836   /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1837      comparing their program headers.  If the program headers in the auxilliary
1838      vector do not match the program headers in the executable, then we are
1839      looking at a different file than the one used by the kernel - for
1840      instance, "gdb program" connected to "gdbserver :PORT ld.so program".  */
1841 
1842   if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1843     {
1844       /* Be optimistic and clear OK only if GDB was able to verify the headers
1845 	 really do not match.  */
1846       int phdrs_size, phdrs2_size, ok = 1;
1847       gdb_byte *buf, *buf2;
1848       int arch_size;
1849 
1850       buf = read_program_header (-1, &phdrs_size, &arch_size);
1851       buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1852       if (buf != NULL && buf2 != NULL)
1853 	{
1854 	  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
1855 
1856 	  /* We are dealing with three different addresses.  EXEC_BFD
1857 	     represents current address in on-disk file.  target memory content
1858 	     may be different from EXEC_BFD as the file may have been prelinked
1859 	     to a different address after the executable has been loaded.
1860 	     Moreover the address of placement in target memory can be
1861 	     different from what the program headers in target memory say -
1862 	     this is the goal of PIE.
1863 
1864 	     Detected DISPLACEMENT covers both the offsets of PIE placement and
1865 	     possible new prelink performed after start of the program.  Here
1866 	     relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1867 	     content offset for the verification purpose.  */
1868 
1869 	  if (phdrs_size != phdrs2_size
1870 	      || bfd_get_arch_size (exec_bfd) != arch_size)
1871 	    ok = 0;
1872 	  else if (arch_size == 32
1873 		   && phdrs_size >= sizeof (Elf32_External_Phdr)
1874 	           && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1875 	    {
1876 	      Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1877 	      Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1878 	      CORE_ADDR displacement = 0;
1879 	      int i;
1880 
1881 	      /* DISPLACEMENT could be found more easily by the difference of
1882 		 ehdr2->e_entry.  But we haven't read the ehdr yet, and we
1883 		 already have enough information to compute that displacement
1884 		 with what we've read.  */
1885 
1886 	      for (i = 0; i < ehdr2->e_phnum; i++)
1887 		if (phdr2[i].p_type == PT_LOAD)
1888 		  {
1889 		    Elf32_External_Phdr *phdrp;
1890 		    gdb_byte *buf_vaddr_p, *buf_paddr_p;
1891 		    CORE_ADDR vaddr, paddr;
1892 		    CORE_ADDR displacement_vaddr = 0;
1893 		    CORE_ADDR displacement_paddr = 0;
1894 
1895 		    phdrp = &((Elf32_External_Phdr *) buf)[i];
1896 		    buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1897 		    buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1898 
1899 		    vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1900 						      byte_order);
1901 		    displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1902 
1903 		    paddr = extract_unsigned_integer (buf_paddr_p, 4,
1904 						      byte_order);
1905 		    displacement_paddr = paddr - phdr2[i].p_paddr;
1906 
1907 		    if (displacement_vaddr == displacement_paddr)
1908 		      displacement = displacement_vaddr;
1909 
1910 		    break;
1911 		  }
1912 
1913 	      /* Now compare BUF and BUF2 with optional DISPLACEMENT.  */
1914 
1915 	      for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1916 		{
1917 		  Elf32_External_Phdr *phdrp;
1918 		  Elf32_External_Phdr *phdr2p;
1919 		  gdb_byte *buf_vaddr_p, *buf_paddr_p;
1920 		  CORE_ADDR vaddr, paddr;
1921 		  asection *plt2_asect;
1922 
1923 		  phdrp = &((Elf32_External_Phdr *) buf)[i];
1924 		  buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1925 		  buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1926 		  phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1927 
1928 		  /* PT_GNU_STACK is an exception by being never relocated by
1929 		     prelink as its addresses are always zero.  */
1930 
1931 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1932 		    continue;
1933 
1934 		  /* Check also other adjustment combinations - PR 11786.  */
1935 
1936 		  vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1937 						    byte_order);
1938 		  vaddr -= displacement;
1939 		  store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1940 
1941 		  paddr = extract_unsigned_integer (buf_paddr_p, 4,
1942 						    byte_order);
1943 		  paddr -= displacement;
1944 		  store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1945 
1946 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1947 		    continue;
1948 
1949 		  /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS.  */
1950 		  plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1951 		  if (plt2_asect)
1952 		    {
1953 		      int content2;
1954 		      gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1955 		      CORE_ADDR filesz;
1956 
1957 		      content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1958 				  & SEC_HAS_CONTENTS) != 0;
1959 
1960 		      filesz = extract_unsigned_integer (buf_filesz_p, 4,
1961 							 byte_order);
1962 
1963 		      /* PLT2_ASECT is from on-disk file (exec_bfd) while
1964 			 FILESZ is from the in-memory image.  */
1965 		      if (content2)
1966 			filesz += bfd_get_section_size (plt2_asect);
1967 		      else
1968 			filesz -= bfd_get_section_size (plt2_asect);
1969 
1970 		      store_unsigned_integer (buf_filesz_p, 4, byte_order,
1971 					      filesz);
1972 
1973 		      if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1974 			continue;
1975 		    }
1976 
1977 		  ok = 0;
1978 		  break;
1979 		}
1980 	    }
1981 	  else if (arch_size == 64
1982 		   && phdrs_size >= sizeof (Elf64_External_Phdr)
1983 	           && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1984 	    {
1985 	      Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1986 	      Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1987 	      CORE_ADDR displacement = 0;
1988 	      int i;
1989 
1990 	      /* DISPLACEMENT could be found more easily by the difference of
1991 		 ehdr2->e_entry.  But we haven't read the ehdr yet, and we
1992 		 already have enough information to compute that displacement
1993 		 with what we've read.  */
1994 
1995 	      for (i = 0; i < ehdr2->e_phnum; i++)
1996 		if (phdr2[i].p_type == PT_LOAD)
1997 		  {
1998 		    Elf64_External_Phdr *phdrp;
1999 		    gdb_byte *buf_vaddr_p, *buf_paddr_p;
2000 		    CORE_ADDR vaddr, paddr;
2001 		    CORE_ADDR displacement_vaddr = 0;
2002 		    CORE_ADDR displacement_paddr = 0;
2003 
2004 		    phdrp = &((Elf64_External_Phdr *) buf)[i];
2005 		    buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2006 		    buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2007 
2008 		    vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2009 						      byte_order);
2010 		    displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2011 
2012 		    paddr = extract_unsigned_integer (buf_paddr_p, 8,
2013 						      byte_order);
2014 		    displacement_paddr = paddr - phdr2[i].p_paddr;
2015 
2016 		    if (displacement_vaddr == displacement_paddr)
2017 		      displacement = displacement_vaddr;
2018 
2019 		    break;
2020 		  }
2021 
2022 	      /* Now compare BUF and BUF2 with optional DISPLACEMENT.  */
2023 
2024 	      for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2025 		{
2026 		  Elf64_External_Phdr *phdrp;
2027 		  Elf64_External_Phdr *phdr2p;
2028 		  gdb_byte *buf_vaddr_p, *buf_paddr_p;
2029 		  CORE_ADDR vaddr, paddr;
2030 		  asection *plt2_asect;
2031 
2032 		  phdrp = &((Elf64_External_Phdr *) buf)[i];
2033 		  buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2034 		  buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2035 		  phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2036 
2037 		  /* PT_GNU_STACK is an exception by being never relocated by
2038 		     prelink as its addresses are always zero.  */
2039 
2040 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2041 		    continue;
2042 
2043 		  /* Check also other adjustment combinations - PR 11786.  */
2044 
2045 		  vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2046 						    byte_order);
2047 		  vaddr -= displacement;
2048 		  store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2049 
2050 		  paddr = extract_unsigned_integer (buf_paddr_p, 8,
2051 						    byte_order);
2052 		  paddr -= displacement;
2053 		  store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2054 
2055 		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2056 		    continue;
2057 
2058 		  /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS.  */
2059 		  plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2060 		  if (plt2_asect)
2061 		    {
2062 		      int content2;
2063 		      gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2064 		      CORE_ADDR filesz;
2065 
2066 		      content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2067 				  & SEC_HAS_CONTENTS) != 0;
2068 
2069 		      filesz = extract_unsigned_integer (buf_filesz_p, 8,
2070 							 byte_order);
2071 
2072 		      /* PLT2_ASECT is from on-disk file (exec_bfd) while
2073 			 FILESZ is from the in-memory image.  */
2074 		      if (content2)
2075 			filesz += bfd_get_section_size (plt2_asect);
2076 		      else
2077 			filesz -= bfd_get_section_size (plt2_asect);
2078 
2079 		      store_unsigned_integer (buf_filesz_p, 8, byte_order,
2080 					      filesz);
2081 
2082 		      if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2083 			continue;
2084 		    }
2085 
2086 		  ok = 0;
2087 		  break;
2088 		}
2089 	    }
2090 	  else
2091 	    ok = 0;
2092 	}
2093 
2094       xfree (buf);
2095       xfree (buf2);
2096 
2097       if (!ok)
2098 	return 0;
2099     }
2100 
2101   if (info_verbose)
2102     {
2103       /* It can be printed repeatedly as there is no easy way to check
2104 	 the executable symbols/file has been already relocated to
2105 	 displacement.  */
2106 
2107       printf_unfiltered (_("Using PIE (Position Independent Executable) "
2108 			   "displacement %s for \"%s\".\n"),
2109 			 paddress (target_gdbarch, displacement),
2110 			 bfd_get_filename (exec_bfd));
2111     }
2112 
2113   *displacementp = displacement;
2114   return 1;
2115 }
2116 
2117 /* Relocate the main executable.  This function should be called upon
2118    stopping the inferior process at the entry point to the program.
2119    The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2120    different, the main executable is relocated by the proper amount.  */
2121 
2122 static void
2123 svr4_relocate_main_executable (void)
2124 {
2125   CORE_ADDR displacement;
2126 
2127   /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2128      probably contains the offsets computed using the PIE displacement
2129      from the previous run, which of course are irrelevant for this run.
2130      So we need to determine the new PIE displacement and recompute the
2131      section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2132      already contains pre-computed offsets.
2133 
2134      If we cannot compute the PIE displacement, either:
2135 
2136        - The executable is not PIE.
2137 
2138        - SYMFILE_OBJFILE does not match the executable started in the target.
2139 	 This can happen for main executable symbols loaded at the host while
2140 	 `ld.so --ld-args main-executable' is loaded in the target.
2141 
2142      Then we leave the section offsets untouched and use them as is for
2143      this run.  Either:
2144 
2145        - These section offsets were properly reset earlier, and thus
2146 	 already contain the correct values.  This can happen for instance
2147 	 when reconnecting via the remote protocol to a target that supports
2148 	 the `qOffsets' packet.
2149 
2150        - The section offsets were not reset earlier, and the best we can
2151 	 hope is that the old offsets are still applicable to the new run.  */
2152 
2153   if (! svr4_exec_displacement (&displacement))
2154     return;
2155 
2156   /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2157      addresses.  */
2158 
2159   if (symfile_objfile)
2160     {
2161       struct section_offsets *new_offsets;
2162       int i;
2163 
2164       new_offsets = alloca (symfile_objfile->num_sections
2165 			    * sizeof (*new_offsets));
2166 
2167       for (i = 0; i < symfile_objfile->num_sections; i++)
2168 	new_offsets->offsets[i] = displacement;
2169 
2170       objfile_relocate (symfile_objfile, new_offsets);
2171     }
2172   else if (exec_bfd)
2173     {
2174       asection *asect;
2175 
2176       for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2177 	exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2178 				  (bfd_section_vma (exec_bfd, asect)
2179 				   + displacement));
2180     }
2181 }
2182 
2183 /* Implement the "create_inferior_hook" target_solib_ops method.
2184 
2185    For SVR4 executables, this first instruction is either the first
2186    instruction in the dynamic linker (for dynamically linked
2187    executables) or the instruction at "start" for statically linked
2188    executables.  For dynamically linked executables, the system
2189    first exec's /lib/libc.so.N, which contains the dynamic linker,
2190    and starts it running.  The dynamic linker maps in any needed
2191    shared libraries, maps in the actual user executable, and then
2192    jumps to "start" in the user executable.
2193 
2194    We can arrange to cooperate with the dynamic linker to discover the
2195    names of shared libraries that are dynamically linked, and the base
2196    addresses to which they are linked.
2197 
2198    This function is responsible for discovering those names and
2199    addresses, and saving sufficient information about them to allow
2200    their symbols to be read at a later time.
2201 
2202    FIXME
2203 
2204    Between enable_break() and disable_break(), this code does not
2205    properly handle hitting breakpoints which the user might have
2206    set in the startup code or in the dynamic linker itself.  Proper
2207    handling will probably have to wait until the implementation is
2208    changed to use the "breakpoint handler function" method.
2209 
2210    Also, what if child has exit()ed?  Must exit loop somehow.  */
2211 
2212 static void
2213 svr4_solib_create_inferior_hook (int from_tty)
2214 {
2215 #if defined(_SCO_DS)
2216   struct inferior *inf;
2217   struct thread_info *tp;
2218 #endif /* defined(_SCO_DS) */
2219   struct svr4_info *info;
2220 
2221   info = get_svr4_info ();
2222 
2223   /* Relocate the main executable if necessary.  */
2224   svr4_relocate_main_executable ();
2225 
2226   /* No point setting a breakpoint in the dynamic linker if we can't
2227      hit it (e.g., a core file, or a trace file).  */
2228   if (!target_has_execution)
2229     return;
2230 
2231   if (!svr4_have_link_map_offsets ())
2232     return;
2233 
2234   if (!enable_break (info, from_tty))
2235     return;
2236 
2237 #if defined(_SCO_DS)
2238   /* SCO needs the loop below, other systems should be using the
2239      special shared library breakpoints and the shared library breakpoint
2240      service routine.
2241 
2242      Now run the target.  It will eventually hit the breakpoint, at
2243      which point all of the libraries will have been mapped in and we
2244      can go groveling around in the dynamic linker structures to find
2245      out what we need to know about them.  */
2246 
2247   inf = current_inferior ();
2248   tp = inferior_thread ();
2249 
2250   clear_proceed_status ();
2251   inf->control.stop_soon = STOP_QUIETLY;
2252   tp->suspend.stop_signal = TARGET_SIGNAL_0;
2253   do
2254     {
2255       target_resume (pid_to_ptid (-1), 0, tp->suspend.stop_signal);
2256       wait_for_inferior ();
2257     }
2258   while (tp->suspend.stop_signal != TARGET_SIGNAL_TRAP);
2259   inf->control.stop_soon = NO_STOP_QUIETLY;
2260 #endif /* defined(_SCO_DS) */
2261 }
2262 
2263 static void
2264 svr4_clear_solib (void)
2265 {
2266   struct svr4_info *info;
2267 
2268   info = get_svr4_info ();
2269   info->debug_base = 0;
2270   info->debug_loader_offset_p = 0;
2271   info->debug_loader_offset = 0;
2272   xfree (info->debug_loader_name);
2273   info->debug_loader_name = NULL;
2274 }
2275 
2276 /* Clear any bits of ADDR that wouldn't fit in a target-format
2277    data pointer.  "Data pointer" here refers to whatever sort of
2278    address the dynamic linker uses to manage its sections.  At the
2279    moment, we don't support shared libraries on any processors where
2280    code and data pointers are different sizes.
2281 
2282    This isn't really the right solution.  What we really need here is
2283    a way to do arithmetic on CORE_ADDR values that respects the
2284    natural pointer/address correspondence.  (For example, on the MIPS,
2285    converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2286    sign-extend the value.  There, simply truncating the bits above
2287    gdbarch_ptr_bit, as we do below, is no good.)  This should probably
2288    be a new gdbarch method or something.  */
2289 static CORE_ADDR
2290 svr4_truncate_ptr (CORE_ADDR addr)
2291 {
2292   if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
2293     /* We don't need to truncate anything, and the bit twiddling below
2294        will fail due to overflow problems.  */
2295     return addr;
2296   else
2297     return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
2298 }
2299 
2300 
2301 static void
2302 svr4_relocate_section_addresses (struct so_list *so,
2303                                  struct target_section *sec)
2304 {
2305   sec->addr    = svr4_truncate_ptr (sec->addr    + lm_addr_check (so,
2306 								  sec->bfd));
2307   sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2308 								  sec->bfd));
2309 }
2310 
2311 
2312 /* Architecture-specific operations.  */
2313 
2314 /* Per-architecture data key.  */
2315 static struct gdbarch_data *solib_svr4_data;
2316 
2317 struct solib_svr4_ops
2318 {
2319   /* Return a description of the layout of `struct link_map'.  */
2320   struct link_map_offsets *(*fetch_link_map_offsets)(void);
2321 };
2322 
2323 /* Return a default for the architecture-specific operations.  */
2324 
2325 static void *
2326 solib_svr4_init (struct obstack *obstack)
2327 {
2328   struct solib_svr4_ops *ops;
2329 
2330   ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2331   ops->fetch_link_map_offsets = NULL;
2332   return ops;
2333 }
2334 
2335 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2336    GDBARCH to FLMO.  Also, install SVR4 solib_ops into GDBARCH.  */
2337 
2338 void
2339 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2340                                        struct link_map_offsets *(*flmo) (void))
2341 {
2342   struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2343 
2344   ops->fetch_link_map_offsets = flmo;
2345 
2346   set_solib_ops (gdbarch, &svr4_so_ops);
2347 }
2348 
2349 /* Fetch a link_map_offsets structure using the architecture-specific
2350    `struct link_map_offsets' fetcher.  */
2351 
2352 static struct link_map_offsets *
2353 svr4_fetch_link_map_offsets (void)
2354 {
2355   struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2356 
2357   gdb_assert (ops->fetch_link_map_offsets);
2358   return ops->fetch_link_map_offsets ();
2359 }
2360 
2361 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise.  */
2362 
2363 static int
2364 svr4_have_link_map_offsets (void)
2365 {
2366   struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2367 
2368   return (ops->fetch_link_map_offsets != NULL);
2369 }
2370 
2371 
2372 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2373    `struct r_debug' and a `struct link_map' that are binary compatible
2374    with the origional SVR4 implementation.  */
2375 
2376 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2377    for an ILP32 SVR4 system.  */
2378 
2379 struct link_map_offsets *
2380 svr4_ilp32_fetch_link_map_offsets (void)
2381 {
2382   static struct link_map_offsets lmo;
2383   static struct link_map_offsets *lmp = NULL;
2384 
2385   if (lmp == NULL)
2386     {
2387       lmp = &lmo;
2388 
2389       lmo.r_version_offset = 0;
2390       lmo.r_version_size = 4;
2391       lmo.r_map_offset = 4;
2392       lmo.r_brk_offset = 8;
2393       lmo.r_ldsomap_offset = 20;
2394 
2395       /* Everything we need is in the first 20 bytes.  */
2396       lmo.link_map_size = 20;
2397       lmo.l_addr_offset = 0;
2398       lmo.l_name_offset = 4;
2399       lmo.l_ld_offset = 8;
2400       lmo.l_next_offset = 12;
2401       lmo.l_prev_offset = 16;
2402     }
2403 
2404   return lmp;
2405 }
2406 
2407 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2408    for an LP64 SVR4 system.  */
2409 
2410 struct link_map_offsets *
2411 svr4_lp64_fetch_link_map_offsets (void)
2412 {
2413   static struct link_map_offsets lmo;
2414   static struct link_map_offsets *lmp = NULL;
2415 
2416   if (lmp == NULL)
2417     {
2418       lmp = &lmo;
2419 
2420       lmo.r_version_offset = 0;
2421       lmo.r_version_size = 4;
2422       lmo.r_map_offset = 8;
2423       lmo.r_brk_offset = 16;
2424       lmo.r_ldsomap_offset = 40;
2425 
2426       /* Everything we need is in the first 40 bytes.  */
2427       lmo.link_map_size = 40;
2428       lmo.l_addr_offset = 0;
2429       lmo.l_name_offset = 8;
2430       lmo.l_ld_offset = 16;
2431       lmo.l_next_offset = 24;
2432       lmo.l_prev_offset = 32;
2433     }
2434 
2435   return lmp;
2436 }
2437 
2438 
2439 struct target_so_ops svr4_so_ops;
2440 
2441 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic.  Those DSOs have a
2442    different rule for symbol lookup.  The lookup begins here in the DSO, not in
2443    the main executable.  */
2444 
2445 static struct symbol *
2446 elf_lookup_lib_symbol (const struct objfile *objfile,
2447 		       const char *name,
2448 		       const domain_enum domain)
2449 {
2450   bfd *abfd;
2451 
2452   if (objfile == symfile_objfile)
2453     abfd = exec_bfd;
2454   else
2455     {
2456       /* OBJFILE should have been passed as the non-debug one.  */
2457       gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2458 
2459       abfd = objfile->obfd;
2460     }
2461 
2462   if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2463     return NULL;
2464 
2465   return lookup_global_symbol_from_objfile (objfile, name, domain);
2466 }
2467 
2468 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2469 
2470 void
2471 _initialize_svr4_solib (void)
2472 {
2473   solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2474   solib_svr4_pspace_data
2475     = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2476 
2477   svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2478   svr4_so_ops.free_so = svr4_free_so;
2479   svr4_so_ops.clear_solib = svr4_clear_solib;
2480   svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2481   svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2482   svr4_so_ops.current_sos = svr4_current_sos;
2483   svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2484   svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2485   svr4_so_ops.bfd_open = solib_bfd_open;
2486   svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2487   svr4_so_ops.same = svr4_same;
2488   svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2489 }
2490