* solib-svr4.c (svr4_keep_data_in_core): Remove unused lmo.
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "regcache.h"
34 #include "gdbthread.h"
35 #include "observer.h"
36
37 #include "gdb_assert.h"
38
39 #include "solist.h"
40 #include "solib.h"
41 #include "solib-svr4.h"
42
43 #include "bfd-target.h"
44 #include "elf-bfd.h"
45 #include "exec.h"
46 #include "auxv.h"
47 #include "exceptions.h"
48 #include "gdb_bfd.h"
49
50 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
51 static int svr4_have_link_map_offsets (void);
52 static void svr4_relocate_main_executable (void);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135 }
136
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142
143 static struct lm_info *
144 lm_info_read (CORE_ADDR lm_addr)
145 {
146 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
147 gdb_byte *lm;
148 struct lm_info *lm_info;
149 struct cleanup *back_to;
150
151 lm = xmalloc (lmo->link_map_size);
152 back_to = make_cleanup (xfree, lm);
153
154 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
155 {
156 warning (_("Error reading shared library list entry at %s"),
157 paddress (target_gdbarch (), lm_addr)),
158 lm_info = NULL;
159 }
160 else
161 {
162 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
163
164 lm_info = xzalloc (sizeof (*lm_info));
165 lm_info->lm_addr = lm_addr;
166
167 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
168 ptr_type);
169 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
170 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
171 ptr_type);
172 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
173 ptr_type);
174 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
175 ptr_type);
176 }
177
178 do_cleanups (back_to);
179
180 return lm_info;
181 }
182
183 static int
184 has_lm_dynamic_from_link_map (void)
185 {
186 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
187
188 return lmo->l_ld_offset >= 0;
189 }
190
191 static CORE_ADDR
192 lm_addr_check (struct so_list *so, bfd *abfd)
193 {
194 if (!so->lm_info->l_addr_p)
195 {
196 struct bfd_section *dyninfo_sect;
197 CORE_ADDR l_addr, l_dynaddr, dynaddr;
198
199 l_addr = so->lm_info->l_addr_inferior;
200
201 if (! abfd || ! has_lm_dynamic_from_link_map ())
202 goto set_addr;
203
204 l_dynaddr = so->lm_info->l_ld;
205
206 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
207 if (dyninfo_sect == NULL)
208 goto set_addr;
209
210 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
211
212 if (dynaddr + l_addr != l_dynaddr)
213 {
214 CORE_ADDR align = 0x1000;
215 CORE_ADDR minpagesize = align;
216
217 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
218 {
219 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
220 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
221 int i;
222
223 align = 1;
224
225 for (i = 0; i < ehdr->e_phnum; i++)
226 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
227 align = phdr[i].p_align;
228
229 minpagesize = get_elf_backend_data (abfd)->minpagesize;
230 }
231
232 /* Turn it into a mask. */
233 align--;
234
235 /* If the changes match the alignment requirements, we
236 assume we're using a core file that was generated by the
237 same binary, just prelinked with a different base offset.
238 If it doesn't match, we may have a different binary, the
239 same binary with the dynamic table loaded at an unrelated
240 location, or anything, really. To avoid regressions,
241 don't adjust the base offset in the latter case, although
242 odds are that, if things really changed, debugging won't
243 quite work.
244
245 One could expect more the condition
246 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
247 but the one below is relaxed for PPC. The PPC kernel supports
248 either 4k or 64k page sizes. To be prepared for 64k pages,
249 PPC ELF files are built using an alignment requirement of 64k.
250 However, when running on a kernel supporting 4k pages, the memory
251 mapping of the library may not actually happen on a 64k boundary!
252
253 (In the usual case where (l_addr & align) == 0, this check is
254 equivalent to the possibly expected check above.)
255
256 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
257
258 l_addr = l_dynaddr - dynaddr;
259
260 if ((l_addr & (minpagesize - 1)) == 0
261 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
262 {
263 if (info_verbose)
264 printf_unfiltered (_("Using PIC (Position Independent Code) "
265 "prelink displacement %s for \"%s\".\n"),
266 paddress (target_gdbarch (), l_addr),
267 so->so_name);
268 }
269 else
270 {
271 /* There is no way to verify the library file matches. prelink
272 can during prelinking of an unprelinked file (or unprelinking
273 of a prelinked file) shift the DYNAMIC segment by arbitrary
274 offset without any page size alignment. There is no way to
275 find out the ELF header and/or Program Headers for a limited
276 verification if it they match. One could do a verification
277 of the DYNAMIC segment. Still the found address is the best
278 one GDB could find. */
279
280 warning (_(".dynamic section for \"%s\" "
281 "is not at the expected address "
282 "(wrong library or version mismatch?)"), so->so_name);
283 }
284 }
285
286 set_addr:
287 so->lm_info->l_addr = l_addr;
288 so->lm_info->l_addr_p = 1;
289 }
290
291 return so->lm_info->l_addr;
292 }
293
294 /* Per pspace SVR4 specific data. */
295
296 struct svr4_info
297 {
298 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
299
300 /* Validity flag for debug_loader_offset. */
301 int debug_loader_offset_p;
302
303 /* Load address for the dynamic linker, inferred. */
304 CORE_ADDR debug_loader_offset;
305
306 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
307 char *debug_loader_name;
308
309 /* Load map address for the main executable. */
310 CORE_ADDR main_lm_addr;
311
312 CORE_ADDR interp_text_sect_low;
313 CORE_ADDR interp_text_sect_high;
314 CORE_ADDR interp_plt_sect_low;
315 CORE_ADDR interp_plt_sect_high;
316 };
317
318 /* Per-program-space data key. */
319 static const struct program_space_data *solib_svr4_pspace_data;
320
321 static void
322 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
323 {
324 struct svr4_info *info;
325
326 info = program_space_data (pspace, solib_svr4_pspace_data);
327 xfree (info);
328 }
329
330 /* Get the current svr4 data. If none is found yet, add it now. This
331 function always returns a valid object. */
332
333 static struct svr4_info *
334 get_svr4_info (void)
335 {
336 struct svr4_info *info;
337
338 info = program_space_data (current_program_space, solib_svr4_pspace_data);
339 if (info != NULL)
340 return info;
341
342 info = XZALLOC (struct svr4_info);
343 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
344 return info;
345 }
346
347 /* Local function prototypes */
348
349 static int match_main (const char *);
350
351 /* Read program header TYPE from inferior memory. The header is found
352 by scanning the OS auxillary vector.
353
354 If TYPE == -1, return the program headers instead of the contents of
355 one program header.
356
357 Return a pointer to allocated memory holding the program header contents,
358 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
359 size of those contents is returned to P_SECT_SIZE. Likewise, the target
360 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
361
362 static gdb_byte *
363 read_program_header (int type, int *p_sect_size, int *p_arch_size)
364 {
365 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
366 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
367 int arch_size, sect_size;
368 CORE_ADDR sect_addr;
369 gdb_byte *buf;
370 int pt_phdr_p = 0;
371
372 /* Get required auxv elements from target. */
373 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
374 return 0;
375 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
376 return 0;
377 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
378 return 0;
379 if (!at_phdr || !at_phnum)
380 return 0;
381
382 /* Determine ELF architecture type. */
383 if (at_phent == sizeof (Elf32_External_Phdr))
384 arch_size = 32;
385 else if (at_phent == sizeof (Elf64_External_Phdr))
386 arch_size = 64;
387 else
388 return 0;
389
390 /* Find the requested segment. */
391 if (type == -1)
392 {
393 sect_addr = at_phdr;
394 sect_size = at_phent * at_phnum;
395 }
396 else if (arch_size == 32)
397 {
398 Elf32_External_Phdr phdr;
399 int i;
400
401 /* Search for requested PHDR. */
402 for (i = 0; i < at_phnum; i++)
403 {
404 int p_type;
405
406 if (target_read_memory (at_phdr + i * sizeof (phdr),
407 (gdb_byte *)&phdr, sizeof (phdr)))
408 return 0;
409
410 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
411 4, byte_order);
412
413 if (p_type == PT_PHDR)
414 {
415 pt_phdr_p = 1;
416 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
417 4, byte_order);
418 }
419
420 if (p_type == type)
421 break;
422 }
423
424 if (i == at_phnum)
425 return 0;
426
427 /* Retrieve address and size. */
428 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
429 4, byte_order);
430 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
431 4, byte_order);
432 }
433 else
434 {
435 Elf64_External_Phdr phdr;
436 int i;
437
438 /* Search for requested PHDR. */
439 for (i = 0; i < at_phnum; i++)
440 {
441 int p_type;
442
443 if (target_read_memory (at_phdr + i * sizeof (phdr),
444 (gdb_byte *)&phdr, sizeof (phdr)))
445 return 0;
446
447 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
448 4, byte_order);
449
450 if (p_type == PT_PHDR)
451 {
452 pt_phdr_p = 1;
453 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
454 8, byte_order);
455 }
456
457 if (p_type == type)
458 break;
459 }
460
461 if (i == at_phnum)
462 return 0;
463
464 /* Retrieve address and size. */
465 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
466 8, byte_order);
467 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
468 8, byte_order);
469 }
470
471 /* PT_PHDR is optional, but we really need it
472 for PIE to make this work in general. */
473
474 if (pt_phdr_p)
475 {
476 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
477 Relocation offset is the difference between the two. */
478 sect_addr = sect_addr + (at_phdr - pt_phdr);
479 }
480
481 /* Read in requested program header. */
482 buf = xmalloc (sect_size);
483 if (target_read_memory (sect_addr, buf, sect_size))
484 {
485 xfree (buf);
486 return NULL;
487 }
488
489 if (p_arch_size)
490 *p_arch_size = arch_size;
491 if (p_sect_size)
492 *p_sect_size = sect_size;
493
494 return buf;
495 }
496
497
498 /* Return program interpreter string. */
499 static gdb_byte *
500 find_program_interpreter (void)
501 {
502 gdb_byte *buf = NULL;
503
504 /* If we have an exec_bfd, use its section table. */
505 if (exec_bfd
506 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
507 {
508 struct bfd_section *interp_sect;
509
510 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
511 if (interp_sect != NULL)
512 {
513 int sect_size = bfd_section_size (exec_bfd, interp_sect);
514
515 buf = xmalloc (sect_size);
516 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
517 }
518 }
519
520 /* If we didn't find it, use the target auxillary vector. */
521 if (!buf)
522 buf = read_program_header (PT_INTERP, NULL, NULL);
523
524 return buf;
525 }
526
527
528 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
529 returned and the corresponding PTR is set. */
530
531 static int
532 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
533 {
534 int arch_size, step, sect_size;
535 long dyn_tag;
536 CORE_ADDR dyn_ptr, dyn_addr;
537 gdb_byte *bufend, *bufstart, *buf;
538 Elf32_External_Dyn *x_dynp_32;
539 Elf64_External_Dyn *x_dynp_64;
540 struct bfd_section *sect;
541 struct target_section *target_section;
542
543 if (abfd == NULL)
544 return 0;
545
546 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
547 return 0;
548
549 arch_size = bfd_get_arch_size (abfd);
550 if (arch_size == -1)
551 return 0;
552
553 /* Find the start address of the .dynamic section. */
554 sect = bfd_get_section_by_name (abfd, ".dynamic");
555 if (sect == NULL)
556 return 0;
557
558 for (target_section = current_target_sections->sections;
559 target_section < current_target_sections->sections_end;
560 target_section++)
561 if (sect == target_section->the_bfd_section)
562 break;
563 if (target_section < current_target_sections->sections_end)
564 dyn_addr = target_section->addr;
565 else
566 {
567 /* ABFD may come from OBJFILE acting only as a symbol file without being
568 loaded into the target (see add_symbol_file_command). This case is
569 such fallback to the file VMA address without the possibility of
570 having the section relocated to its actual in-memory address. */
571
572 dyn_addr = bfd_section_vma (abfd, sect);
573 }
574
575 /* Read in .dynamic from the BFD. We will get the actual value
576 from memory later. */
577 sect_size = bfd_section_size (abfd, sect);
578 buf = bufstart = alloca (sect_size);
579 if (!bfd_get_section_contents (abfd, sect,
580 buf, 0, sect_size))
581 return 0;
582
583 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
584 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
585 : sizeof (Elf64_External_Dyn);
586 for (bufend = buf + sect_size;
587 buf < bufend;
588 buf += step)
589 {
590 if (arch_size == 32)
591 {
592 x_dynp_32 = (Elf32_External_Dyn *) buf;
593 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
594 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
595 }
596 else
597 {
598 x_dynp_64 = (Elf64_External_Dyn *) buf;
599 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
600 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
601 }
602 if (dyn_tag == DT_NULL)
603 return 0;
604 if (dyn_tag == dyntag)
605 {
606 /* If requested, try to read the runtime value of this .dynamic
607 entry. */
608 if (ptr)
609 {
610 struct type *ptr_type;
611 gdb_byte ptr_buf[8];
612 CORE_ADDR ptr_addr;
613
614 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
615 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
616 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
617 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
618 *ptr = dyn_ptr;
619 }
620 return 1;
621 }
622 }
623
624 return 0;
625 }
626
627 /* Scan for DYNTAG in .dynamic section of the target's main executable,
628 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
629 returned and the corresponding PTR is set. */
630
631 static int
632 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
633 {
634 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
635 int sect_size, arch_size, step;
636 long dyn_tag;
637 CORE_ADDR dyn_ptr;
638 gdb_byte *bufend, *bufstart, *buf;
639
640 /* Read in .dynamic section. */
641 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
642 if (!buf)
643 return 0;
644
645 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
646 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
647 : sizeof (Elf64_External_Dyn);
648 for (bufend = buf + sect_size;
649 buf < bufend;
650 buf += step)
651 {
652 if (arch_size == 32)
653 {
654 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
655
656 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
657 4, byte_order);
658 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
659 4, byte_order);
660 }
661 else
662 {
663 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
664
665 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
666 8, byte_order);
667 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
668 8, byte_order);
669 }
670 if (dyn_tag == DT_NULL)
671 break;
672
673 if (dyn_tag == dyntag)
674 {
675 if (ptr)
676 *ptr = dyn_ptr;
677
678 xfree (bufstart);
679 return 1;
680 }
681 }
682
683 xfree (bufstart);
684 return 0;
685 }
686
687 /* Locate the base address of dynamic linker structs for SVR4 elf
688 targets.
689
690 For SVR4 elf targets the address of the dynamic linker's runtime
691 structure is contained within the dynamic info section in the
692 executable file. The dynamic section is also mapped into the
693 inferior address space. Because the runtime loader fills in the
694 real address before starting the inferior, we have to read in the
695 dynamic info section from the inferior address space.
696 If there are any errors while trying to find the address, we
697 silently return 0, otherwise the found address is returned. */
698
699 static CORE_ADDR
700 elf_locate_base (void)
701 {
702 struct minimal_symbol *msymbol;
703 CORE_ADDR dyn_ptr;
704
705 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
706 instead of DT_DEBUG, although they sometimes contain an unused
707 DT_DEBUG. */
708 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
709 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
710 {
711 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
712 gdb_byte *pbuf;
713 int pbuf_size = TYPE_LENGTH (ptr_type);
714
715 pbuf = alloca (pbuf_size);
716 /* DT_MIPS_RLD_MAP contains a pointer to the address
717 of the dynamic link structure. */
718 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
719 return 0;
720 return extract_typed_address (pbuf, ptr_type);
721 }
722
723 /* Find DT_DEBUG. */
724 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
725 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
726 return dyn_ptr;
727
728 /* This may be a static executable. Look for the symbol
729 conventionally named _r_debug, as a last resort. */
730 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
731 if (msymbol != NULL)
732 return SYMBOL_VALUE_ADDRESS (msymbol);
733
734 /* DT_DEBUG entry not found. */
735 return 0;
736 }
737
738 /* Locate the base address of dynamic linker structs.
739
740 For both the SunOS and SVR4 shared library implementations, if the
741 inferior executable has been linked dynamically, there is a single
742 address somewhere in the inferior's data space which is the key to
743 locating all of the dynamic linker's runtime structures. This
744 address is the value of the debug base symbol. The job of this
745 function is to find and return that address, or to return 0 if there
746 is no such address (the executable is statically linked for example).
747
748 For SunOS, the job is almost trivial, since the dynamic linker and
749 all of it's structures are statically linked to the executable at
750 link time. Thus the symbol for the address we are looking for has
751 already been added to the minimal symbol table for the executable's
752 objfile at the time the symbol file's symbols were read, and all we
753 have to do is look it up there. Note that we explicitly do NOT want
754 to find the copies in the shared library.
755
756 The SVR4 version is a bit more complicated because the address
757 is contained somewhere in the dynamic info section. We have to go
758 to a lot more work to discover the address of the debug base symbol.
759 Because of this complexity, we cache the value we find and return that
760 value on subsequent invocations. Note there is no copy in the
761 executable symbol tables. */
762
763 static CORE_ADDR
764 locate_base (struct svr4_info *info)
765 {
766 /* Check to see if we have a currently valid address, and if so, avoid
767 doing all this work again and just return the cached address. If
768 we have no cached address, try to locate it in the dynamic info
769 section for ELF executables. There's no point in doing any of this
770 though if we don't have some link map offsets to work with. */
771
772 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
773 info->debug_base = elf_locate_base ();
774 return info->debug_base;
775 }
776
777 /* Find the first element in the inferior's dynamic link map, and
778 return its address in the inferior. Return zero if the address
779 could not be determined.
780
781 FIXME: Perhaps we should validate the info somehow, perhaps by
782 checking r_version for a known version number, or r_state for
783 RT_CONSISTENT. */
784
785 static CORE_ADDR
786 solib_svr4_r_map (struct svr4_info *info)
787 {
788 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
789 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
790 CORE_ADDR addr = 0;
791 volatile struct gdb_exception ex;
792
793 TRY_CATCH (ex, RETURN_MASK_ERROR)
794 {
795 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
796 ptr_type);
797 }
798 exception_print (gdb_stderr, ex);
799 return addr;
800 }
801
802 /* Find r_brk from the inferior's debug base. */
803
804 static CORE_ADDR
805 solib_svr4_r_brk (struct svr4_info *info)
806 {
807 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
808 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
809
810 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
811 ptr_type);
812 }
813
814 /* Find the link map for the dynamic linker (if it is not in the
815 normal list of loaded shared objects). */
816
817 static CORE_ADDR
818 solib_svr4_r_ldsomap (struct svr4_info *info)
819 {
820 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
821 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
822 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
823 ULONGEST version;
824
825 /* Check version, and return zero if `struct r_debug' doesn't have
826 the r_ldsomap member. */
827 version
828 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
829 lmo->r_version_size, byte_order);
830 if (version < 2 || lmo->r_ldsomap_offset == -1)
831 return 0;
832
833 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
834 ptr_type);
835 }
836
837 /* On Solaris systems with some versions of the dynamic linker,
838 ld.so's l_name pointer points to the SONAME in the string table
839 rather than into writable memory. So that GDB can find shared
840 libraries when loading a core file generated by gcore, ensure that
841 memory areas containing the l_name string are saved in the core
842 file. */
843
844 static int
845 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
846 {
847 struct svr4_info *info;
848 CORE_ADDR ldsomap;
849 struct so_list *new;
850 struct cleanup *old_chain;
851 CORE_ADDR name_lm;
852
853 info = get_svr4_info ();
854
855 info->debug_base = 0;
856 locate_base (info);
857 if (!info->debug_base)
858 return 0;
859
860 ldsomap = solib_svr4_r_ldsomap (info);
861 if (!ldsomap)
862 return 0;
863
864 new = XZALLOC (struct so_list);
865 old_chain = make_cleanup (xfree, new);
866 new->lm_info = lm_info_read (ldsomap);
867 make_cleanup (xfree, new->lm_info);
868 name_lm = new->lm_info ? new->lm_info->l_name : 0;
869 do_cleanups (old_chain);
870
871 return (name_lm >= vaddr && name_lm < vaddr + size);
872 }
873
874 /* Implement the "open_symbol_file_object" target_so_ops method.
875
876 If no open symbol file, attempt to locate and open the main symbol
877 file. On SVR4 systems, this is the first link map entry. If its
878 name is here, we can open it. Useful when attaching to a process
879 without first loading its symbol file. */
880
881 static int
882 open_symbol_file_object (void *from_ttyp)
883 {
884 CORE_ADDR lm, l_name;
885 char *filename;
886 int errcode;
887 int from_tty = *(int *)from_ttyp;
888 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
889 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
890 int l_name_size = TYPE_LENGTH (ptr_type);
891 gdb_byte *l_name_buf = xmalloc (l_name_size);
892 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
893 struct svr4_info *info = get_svr4_info ();
894
895 if (symfile_objfile)
896 if (!query (_("Attempt to reload symbols from process? ")))
897 {
898 do_cleanups (cleanups);
899 return 0;
900 }
901
902 /* Always locate the debug struct, in case it has moved. */
903 info->debug_base = 0;
904 if (locate_base (info) == 0)
905 {
906 do_cleanups (cleanups);
907 return 0; /* failed somehow... */
908 }
909
910 /* First link map member should be the executable. */
911 lm = solib_svr4_r_map (info);
912 if (lm == 0)
913 {
914 do_cleanups (cleanups);
915 return 0; /* failed somehow... */
916 }
917
918 /* Read address of name from target memory to GDB. */
919 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
920
921 /* Convert the address to host format. */
922 l_name = extract_typed_address (l_name_buf, ptr_type);
923
924 if (l_name == 0)
925 {
926 do_cleanups (cleanups);
927 return 0; /* No filename. */
928 }
929
930 /* Now fetch the filename from target memory. */
931 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
932 make_cleanup (xfree, filename);
933
934 if (errcode)
935 {
936 warning (_("failed to read exec filename from attached file: %s"),
937 safe_strerror (errcode));
938 do_cleanups (cleanups);
939 return 0;
940 }
941
942 /* Have a pathname: read the symbol file. */
943 symbol_file_add_main (filename, from_tty);
944
945 do_cleanups (cleanups);
946 return 1;
947 }
948
949 /* Data exchange structure for the XML parser as returned by
950 svr4_current_sos_via_xfer_libraries. */
951
952 struct svr4_library_list
953 {
954 struct so_list *head, **tailp;
955
956 /* Inferior address of struct link_map used for the main executable. It is
957 NULL if not known. */
958 CORE_ADDR main_lm;
959 };
960
961 /* Implementation for target_so_ops.free_so. */
962
963 static void
964 svr4_free_so (struct so_list *so)
965 {
966 xfree (so->lm_info);
967 }
968
969 /* Free so_list built so far (called via cleanup). */
970
971 static void
972 svr4_free_library_list (void *p_list)
973 {
974 struct so_list *list = *(struct so_list **) p_list;
975
976 while (list != NULL)
977 {
978 struct so_list *next = list->next;
979
980 free_so (list);
981 list = next;
982 }
983 }
984
985 #ifdef HAVE_LIBEXPAT
986
987 #include "xml-support.h"
988
989 /* Handle the start of a <library> element. Note: new elements are added
990 at the tail of the list, keeping the list in order. */
991
992 static void
993 library_list_start_library (struct gdb_xml_parser *parser,
994 const struct gdb_xml_element *element,
995 void *user_data, VEC(gdb_xml_value_s) *attributes)
996 {
997 struct svr4_library_list *list = user_data;
998 const char *name = xml_find_attribute (attributes, "name")->value;
999 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1000 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1001 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1002 struct so_list *new_elem;
1003
1004 new_elem = XZALLOC (struct so_list);
1005 new_elem->lm_info = XZALLOC (struct lm_info);
1006 new_elem->lm_info->lm_addr = *lmp;
1007 new_elem->lm_info->l_addr_inferior = *l_addrp;
1008 new_elem->lm_info->l_ld = *l_ldp;
1009
1010 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1011 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1012 strcpy (new_elem->so_original_name, new_elem->so_name);
1013
1014 *list->tailp = new_elem;
1015 list->tailp = &new_elem->next;
1016 }
1017
1018 /* Handle the start of a <library-list-svr4> element. */
1019
1020 static void
1021 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1022 const struct gdb_xml_element *element,
1023 void *user_data, VEC(gdb_xml_value_s) *attributes)
1024 {
1025 struct svr4_library_list *list = user_data;
1026 const char *version = xml_find_attribute (attributes, "version")->value;
1027 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1028
1029 if (strcmp (version, "1.0") != 0)
1030 gdb_xml_error (parser,
1031 _("SVR4 Library list has unsupported version \"%s\""),
1032 version);
1033
1034 if (main_lm)
1035 list->main_lm = *(ULONGEST *) main_lm->value;
1036 }
1037
1038 /* The allowed elements and attributes for an XML library list.
1039 The root element is a <library-list>. */
1040
1041 static const struct gdb_xml_attribute svr4_library_attributes[] =
1042 {
1043 { "name", GDB_XML_AF_NONE, NULL, NULL },
1044 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1045 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1046 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1047 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1048 };
1049
1050 static const struct gdb_xml_element svr4_library_list_children[] =
1051 {
1052 {
1053 "library", svr4_library_attributes, NULL,
1054 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1055 library_list_start_library, NULL
1056 },
1057 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1058 };
1059
1060 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1061 {
1062 { "version", GDB_XML_AF_NONE, NULL, NULL },
1063 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1064 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1065 };
1066
1067 static const struct gdb_xml_element svr4_library_list_elements[] =
1068 {
1069 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1070 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1071 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1072 };
1073
1074 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1075
1076 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1077 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1078 empty, caller is responsible for freeing all its entries. */
1079
1080 static int
1081 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1082 {
1083 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1084 &list->head);
1085
1086 memset (list, 0, sizeof (*list));
1087 list->tailp = &list->head;
1088 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1089 svr4_library_list_elements, document, list) == 0)
1090 {
1091 /* Parsed successfully, keep the result. */
1092 discard_cleanups (back_to);
1093 return 1;
1094 }
1095
1096 do_cleanups (back_to);
1097 return 0;
1098 }
1099
1100 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1101
1102 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1103 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1104 empty, caller is responsible for freeing all its entries. */
1105
1106 static int
1107 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1108 {
1109 char *svr4_library_document;
1110 int result;
1111 struct cleanup *back_to;
1112
1113 /* Fetch the list of shared libraries. */
1114 svr4_library_document = target_read_stralloc (&current_target,
1115 TARGET_OBJECT_LIBRARIES_SVR4,
1116 NULL);
1117 if (svr4_library_document == NULL)
1118 return 0;
1119
1120 back_to = make_cleanup (xfree, svr4_library_document);
1121 result = svr4_parse_libraries (svr4_library_document, list);
1122 do_cleanups (back_to);
1123
1124 return result;
1125 }
1126
1127 #else
1128
1129 static int
1130 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1131 {
1132 return 0;
1133 }
1134
1135 #endif
1136
1137 /* If no shared library information is available from the dynamic
1138 linker, build a fallback list from other sources. */
1139
1140 static struct so_list *
1141 svr4_default_sos (void)
1142 {
1143 struct svr4_info *info = get_svr4_info ();
1144 struct so_list *new;
1145
1146 if (!info->debug_loader_offset_p)
1147 return NULL;
1148
1149 new = XZALLOC (struct so_list);
1150
1151 new->lm_info = xzalloc (sizeof (struct lm_info));
1152
1153 /* Nothing will ever check the other fields if we set l_addr_p. */
1154 new->lm_info->l_addr = info->debug_loader_offset;
1155 new->lm_info->l_addr_p = 1;
1156
1157 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1158 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1159 strcpy (new->so_original_name, new->so_name);
1160
1161 return new;
1162 }
1163
1164 /* Read the whole inferior libraries chain starting at address LM. Add the
1165 entries to the tail referenced by LINK_PTR_PTR. Ignore the first entry if
1166 IGNORE_FIRST and set global MAIN_LM_ADDR according to it. */
1167
1168 static void
1169 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1170 int ignore_first)
1171 {
1172 CORE_ADDR prev_lm = 0, next_lm;
1173
1174 for (; lm != 0; prev_lm = lm, lm = next_lm)
1175 {
1176 struct so_list *new;
1177 struct cleanup *old_chain;
1178 int errcode;
1179 char *buffer;
1180
1181 new = XZALLOC (struct so_list);
1182 old_chain = make_cleanup_free_so (new);
1183
1184 new->lm_info = lm_info_read (lm);
1185 if (new->lm_info == NULL)
1186 {
1187 do_cleanups (old_chain);
1188 break;
1189 }
1190
1191 next_lm = new->lm_info->l_next;
1192
1193 if (new->lm_info->l_prev != prev_lm)
1194 {
1195 warning (_("Corrupted shared library list: %s != %s"),
1196 paddress (target_gdbarch (), prev_lm),
1197 paddress (target_gdbarch (), new->lm_info->l_prev));
1198 do_cleanups (old_chain);
1199 break;
1200 }
1201
1202 /* For SVR4 versions, the first entry in the link map is for the
1203 inferior executable, so we must ignore it. For some versions of
1204 SVR4, it has no name. For others (Solaris 2.3 for example), it
1205 does have a name, so we can no longer use a missing name to
1206 decide when to ignore it. */
1207 if (ignore_first && new->lm_info->l_prev == 0)
1208 {
1209 struct svr4_info *info = get_svr4_info ();
1210
1211 info->main_lm_addr = new->lm_info->lm_addr;
1212 do_cleanups (old_chain);
1213 continue;
1214 }
1215
1216 /* Extract this shared object's name. */
1217 target_read_string (new->lm_info->l_name, &buffer,
1218 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1219 if (errcode != 0)
1220 {
1221 warning (_("Can't read pathname for load map: %s."),
1222 safe_strerror (errcode));
1223 do_cleanups (old_chain);
1224 continue;
1225 }
1226
1227 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1228 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1229 strcpy (new->so_original_name, new->so_name);
1230 xfree (buffer);
1231
1232 /* If this entry has no name, or its name matches the name
1233 for the main executable, don't include it in the list. */
1234 if (! new->so_name[0] || match_main (new->so_name))
1235 {
1236 do_cleanups (old_chain);
1237 continue;
1238 }
1239
1240 discard_cleanups (old_chain);
1241 new->next = 0;
1242 **link_ptr_ptr = new;
1243 *link_ptr_ptr = &new->next;
1244 }
1245 }
1246
1247 /* Implement the "current_sos" target_so_ops method. */
1248
1249 static struct so_list *
1250 svr4_current_sos (void)
1251 {
1252 CORE_ADDR lm;
1253 struct so_list *head = NULL;
1254 struct so_list **link_ptr = &head;
1255 struct svr4_info *info;
1256 struct cleanup *back_to;
1257 int ignore_first;
1258 struct svr4_library_list library_list;
1259
1260 /* Fall back to manual examination of the target if the packet is not
1261 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1262 tests a case where gdbserver cannot find the shared libraries list while
1263 GDB itself is able to find it via SYMFILE_OBJFILE.
1264
1265 Unfortunately statically linked inferiors will also fall back through this
1266 suboptimal code path. */
1267
1268 if (svr4_current_sos_via_xfer_libraries (&library_list))
1269 {
1270 if (library_list.main_lm)
1271 {
1272 info = get_svr4_info ();
1273 info->main_lm_addr = library_list.main_lm;
1274 }
1275
1276 return library_list.head ? library_list.head : svr4_default_sos ();
1277 }
1278
1279 info = get_svr4_info ();
1280
1281 /* Always locate the debug struct, in case it has moved. */
1282 info->debug_base = 0;
1283 locate_base (info);
1284
1285 /* If we can't find the dynamic linker's base structure, this
1286 must not be a dynamically linked executable. Hmm. */
1287 if (! info->debug_base)
1288 return svr4_default_sos ();
1289
1290 /* Assume that everything is a library if the dynamic loader was loaded
1291 late by a static executable. */
1292 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1293 ignore_first = 0;
1294 else
1295 ignore_first = 1;
1296
1297 back_to = make_cleanup (svr4_free_library_list, &head);
1298
1299 /* Walk the inferior's link map list, and build our list of
1300 `struct so_list' nodes. */
1301 lm = solib_svr4_r_map (info);
1302 if (lm)
1303 svr4_read_so_list (lm, &link_ptr, ignore_first);
1304
1305 /* On Solaris, the dynamic linker is not in the normal list of
1306 shared objects, so make sure we pick it up too. Having
1307 symbol information for the dynamic linker is quite crucial
1308 for skipping dynamic linker resolver code. */
1309 lm = solib_svr4_r_ldsomap (info);
1310 if (lm)
1311 svr4_read_so_list (lm, &link_ptr, 0);
1312
1313 discard_cleanups (back_to);
1314
1315 if (head == NULL)
1316 return svr4_default_sos ();
1317
1318 return head;
1319 }
1320
1321 /* Get the address of the link_map for a given OBJFILE. */
1322
1323 CORE_ADDR
1324 svr4_fetch_objfile_link_map (struct objfile *objfile)
1325 {
1326 struct so_list *so;
1327 struct svr4_info *info = get_svr4_info ();
1328
1329 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1330 if (info->main_lm_addr == 0)
1331 solib_add (NULL, 0, &current_target, auto_solib_add);
1332
1333 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1334 if (objfile == symfile_objfile)
1335 return info->main_lm_addr;
1336
1337 /* The other link map addresses may be found by examining the list
1338 of shared libraries. */
1339 for (so = master_so_list (); so; so = so->next)
1340 if (so->objfile == objfile)
1341 return so->lm_info->lm_addr;
1342
1343 /* Not found! */
1344 return 0;
1345 }
1346
1347 /* On some systems, the only way to recognize the link map entry for
1348 the main executable file is by looking at its name. Return
1349 non-zero iff SONAME matches one of the known main executable names. */
1350
1351 static int
1352 match_main (const char *soname)
1353 {
1354 const char * const *mainp;
1355
1356 for (mainp = main_name_list; *mainp != NULL; mainp++)
1357 {
1358 if (strcmp (soname, *mainp) == 0)
1359 return (1);
1360 }
1361
1362 return (0);
1363 }
1364
1365 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1366 SVR4 run time loader. */
1367
1368 int
1369 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1370 {
1371 struct svr4_info *info = get_svr4_info ();
1372
1373 return ((pc >= info->interp_text_sect_low
1374 && pc < info->interp_text_sect_high)
1375 || (pc >= info->interp_plt_sect_low
1376 && pc < info->interp_plt_sect_high)
1377 || in_plt_section (pc, NULL)
1378 || in_gnu_ifunc_stub (pc));
1379 }
1380
1381 /* Given an executable's ABFD and target, compute the entry-point
1382 address. */
1383
1384 static CORE_ADDR
1385 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1386 {
1387 CORE_ADDR addr;
1388
1389 /* KevinB wrote ... for most targets, the address returned by
1390 bfd_get_start_address() is the entry point for the start
1391 function. But, for some targets, bfd_get_start_address() returns
1392 the address of a function descriptor from which the entry point
1393 address may be extracted. This address is extracted by
1394 gdbarch_convert_from_func_ptr_addr(). The method
1395 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1396 function for targets which don't use function descriptors. */
1397 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1398 bfd_get_start_address (abfd),
1399 targ);
1400 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1401 }
1402
1403 /* Helper function for gdb_bfd_lookup_symbol. */
1404
1405 static int
1406 cmp_name_and_sec_flags (asymbol *sym, void *data)
1407 {
1408 return (strcmp (sym->name, (const char *) data) == 0
1409 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1410 }
1411 /* Arrange for dynamic linker to hit breakpoint.
1412
1413 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1414 debugger interface, support for arranging for the inferior to hit
1415 a breakpoint after mapping in the shared libraries. This function
1416 enables that breakpoint.
1417
1418 For SunOS, there is a special flag location (in_debugger) which we
1419 set to 1. When the dynamic linker sees this flag set, it will set
1420 a breakpoint at a location known only to itself, after saving the
1421 original contents of that place and the breakpoint address itself,
1422 in it's own internal structures. When we resume the inferior, it
1423 will eventually take a SIGTRAP when it runs into the breakpoint.
1424 We handle this (in a different place) by restoring the contents of
1425 the breakpointed location (which is only known after it stops),
1426 chasing around to locate the shared libraries that have been
1427 loaded, then resuming.
1428
1429 For SVR4, the debugger interface structure contains a member (r_brk)
1430 which is statically initialized at the time the shared library is
1431 built, to the offset of a function (_r_debug_state) which is guaran-
1432 teed to be called once before mapping in a library, and again when
1433 the mapping is complete. At the time we are examining this member,
1434 it contains only the unrelocated offset of the function, so we have
1435 to do our own relocation. Later, when the dynamic linker actually
1436 runs, it relocates r_brk to be the actual address of _r_debug_state().
1437
1438 The debugger interface structure also contains an enumeration which
1439 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1440 depending upon whether or not the library is being mapped or unmapped,
1441 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1442
1443 static int
1444 enable_break (struct svr4_info *info, int from_tty)
1445 {
1446 struct minimal_symbol *msymbol;
1447 const char * const *bkpt_namep;
1448 asection *interp_sect;
1449 gdb_byte *interp_name;
1450 CORE_ADDR sym_addr;
1451
1452 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1453 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1454
1455 /* If we already have a shared library list in the target, and
1456 r_debug contains r_brk, set the breakpoint there - this should
1457 mean r_brk has already been relocated. Assume the dynamic linker
1458 is the object containing r_brk. */
1459
1460 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1461 sym_addr = 0;
1462 if (info->debug_base && solib_svr4_r_map (info) != 0)
1463 sym_addr = solib_svr4_r_brk (info);
1464
1465 if (sym_addr != 0)
1466 {
1467 struct obj_section *os;
1468
1469 sym_addr = gdbarch_addr_bits_remove
1470 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1471 sym_addr,
1472 &current_target));
1473
1474 /* On at least some versions of Solaris there's a dynamic relocation
1475 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1476 we get control before the dynamic linker has self-relocated.
1477 Check if SYM_ADDR is in a known section, if it is assume we can
1478 trust its value. This is just a heuristic though, it could go away
1479 or be replaced if it's getting in the way.
1480
1481 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1482 however it's spelled in your particular system) is ARM or Thumb.
1483 That knowledge is encoded in the address, if it's Thumb the low bit
1484 is 1. However, we've stripped that info above and it's not clear
1485 what all the consequences are of passing a non-addr_bits_remove'd
1486 address to create_solib_event_breakpoint. The call to
1487 find_pc_section verifies we know about the address and have some
1488 hope of computing the right kind of breakpoint to use (via
1489 symbol info). It does mean that GDB needs to be pointed at a
1490 non-stripped version of the dynamic linker in order to obtain
1491 information it already knows about. Sigh. */
1492
1493 os = find_pc_section (sym_addr);
1494 if (os != NULL)
1495 {
1496 /* Record the relocated start and end address of the dynamic linker
1497 text and plt section for svr4_in_dynsym_resolve_code. */
1498 bfd *tmp_bfd;
1499 CORE_ADDR load_addr;
1500
1501 tmp_bfd = os->objfile->obfd;
1502 load_addr = ANOFFSET (os->objfile->section_offsets,
1503 SECT_OFF_TEXT (os->objfile));
1504
1505 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1506 if (interp_sect)
1507 {
1508 info->interp_text_sect_low =
1509 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1510 info->interp_text_sect_high =
1511 info->interp_text_sect_low
1512 + bfd_section_size (tmp_bfd, interp_sect);
1513 }
1514 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1515 if (interp_sect)
1516 {
1517 info->interp_plt_sect_low =
1518 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1519 info->interp_plt_sect_high =
1520 info->interp_plt_sect_low
1521 + bfd_section_size (tmp_bfd, interp_sect);
1522 }
1523
1524 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1525 return 1;
1526 }
1527 }
1528
1529 /* Find the program interpreter; if not found, warn the user and drop
1530 into the old breakpoint at symbol code. */
1531 interp_name = find_program_interpreter ();
1532 if (interp_name)
1533 {
1534 CORE_ADDR load_addr = 0;
1535 int load_addr_found = 0;
1536 int loader_found_in_list = 0;
1537 struct so_list *so;
1538 bfd *tmp_bfd = NULL;
1539 struct target_ops *tmp_bfd_target;
1540 volatile struct gdb_exception ex;
1541
1542 sym_addr = 0;
1543
1544 /* Now we need to figure out where the dynamic linker was
1545 loaded so that we can load its symbols and place a breakpoint
1546 in the dynamic linker itself.
1547
1548 This address is stored on the stack. However, I've been unable
1549 to find any magic formula to find it for Solaris (appears to
1550 be trivial on GNU/Linux). Therefore, we have to try an alternate
1551 mechanism to find the dynamic linker's base address. */
1552
1553 TRY_CATCH (ex, RETURN_MASK_ALL)
1554 {
1555 tmp_bfd = solib_bfd_open (interp_name);
1556 }
1557 if (tmp_bfd == NULL)
1558 goto bkpt_at_symbol;
1559
1560 /* Now convert the TMP_BFD into a target. That way target, as
1561 well as BFD operations can be used. */
1562 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1563 /* target_bfd_reopen acquired its own reference, so we can
1564 release ours now. */
1565 gdb_bfd_unref (tmp_bfd);
1566
1567 /* On a running target, we can get the dynamic linker's base
1568 address from the shared library table. */
1569 so = master_so_list ();
1570 while (so)
1571 {
1572 if (svr4_same_1 (interp_name, so->so_original_name))
1573 {
1574 load_addr_found = 1;
1575 loader_found_in_list = 1;
1576 load_addr = lm_addr_check (so, tmp_bfd);
1577 break;
1578 }
1579 so = so->next;
1580 }
1581
1582 /* If we were not able to find the base address of the loader
1583 from our so_list, then try using the AT_BASE auxilliary entry. */
1584 if (!load_addr_found)
1585 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1586 {
1587 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
1588
1589 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1590 that `+ load_addr' will overflow CORE_ADDR width not creating
1591 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1592 GDB. */
1593
1594 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1595 {
1596 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1597 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1598 tmp_bfd_target);
1599
1600 gdb_assert (load_addr < space_size);
1601
1602 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1603 64bit ld.so with 32bit executable, it should not happen. */
1604
1605 if (tmp_entry_point < space_size
1606 && tmp_entry_point + load_addr >= space_size)
1607 load_addr -= space_size;
1608 }
1609
1610 load_addr_found = 1;
1611 }
1612
1613 /* Otherwise we find the dynamic linker's base address by examining
1614 the current pc (which should point at the entry point for the
1615 dynamic linker) and subtracting the offset of the entry point.
1616
1617 This is more fragile than the previous approaches, but is a good
1618 fallback method because it has actually been working well in
1619 most cases. */
1620 if (!load_addr_found)
1621 {
1622 struct regcache *regcache
1623 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1624
1625 load_addr = (regcache_read_pc (regcache)
1626 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1627 }
1628
1629 if (!loader_found_in_list)
1630 {
1631 info->debug_loader_name = xstrdup (interp_name);
1632 info->debug_loader_offset_p = 1;
1633 info->debug_loader_offset = load_addr;
1634 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1635 }
1636
1637 /* Record the relocated start and end address of the dynamic linker
1638 text and plt section for svr4_in_dynsym_resolve_code. */
1639 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1640 if (interp_sect)
1641 {
1642 info->interp_text_sect_low =
1643 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1644 info->interp_text_sect_high =
1645 info->interp_text_sect_low
1646 + bfd_section_size (tmp_bfd, interp_sect);
1647 }
1648 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1649 if (interp_sect)
1650 {
1651 info->interp_plt_sect_low =
1652 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1653 info->interp_plt_sect_high =
1654 info->interp_plt_sect_low
1655 + bfd_section_size (tmp_bfd, interp_sect);
1656 }
1657
1658 /* Now try to set a breakpoint in the dynamic linker. */
1659 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1660 {
1661 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1662 (void *) *bkpt_namep);
1663 if (sym_addr != 0)
1664 break;
1665 }
1666
1667 if (sym_addr != 0)
1668 /* Convert 'sym_addr' from a function pointer to an address.
1669 Because we pass tmp_bfd_target instead of the current
1670 target, this will always produce an unrelocated value. */
1671 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1672 sym_addr,
1673 tmp_bfd_target);
1674
1675 /* We're done with both the temporary bfd and target. Closing
1676 the target closes the underlying bfd, because it holds the
1677 only remaining reference. */
1678 target_close (tmp_bfd_target, 0);
1679
1680 if (sym_addr != 0)
1681 {
1682 create_solib_event_breakpoint (target_gdbarch (), load_addr + sym_addr);
1683 xfree (interp_name);
1684 return 1;
1685 }
1686
1687 /* For whatever reason we couldn't set a breakpoint in the dynamic
1688 linker. Warn and drop into the old code. */
1689 bkpt_at_symbol:
1690 xfree (interp_name);
1691 warning (_("Unable to find dynamic linker breakpoint function.\n"
1692 "GDB will be unable to debug shared library initializers\n"
1693 "and track explicitly loaded dynamic code."));
1694 }
1695
1696 /* Scan through the lists of symbols, trying to look up the symbol and
1697 set a breakpoint there. Terminate loop when we/if we succeed. */
1698
1699 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1700 {
1701 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1702 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1703 {
1704 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1705 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1706 sym_addr,
1707 &current_target);
1708 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1709 return 1;
1710 }
1711 }
1712
1713 if (interp_name != NULL && !current_inferior ()->attach_flag)
1714 {
1715 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1716 {
1717 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1718 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1719 {
1720 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1721 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1722 sym_addr,
1723 &current_target);
1724 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1725 return 1;
1726 }
1727 }
1728 }
1729 return 0;
1730 }
1731
1732 /* Implement the "special_symbol_handling" target_so_ops method. */
1733
1734 static void
1735 svr4_special_symbol_handling (void)
1736 {
1737 /* Nothing to do. */
1738 }
1739
1740 /* Read the ELF program headers from ABFD. Return the contents and
1741 set *PHDRS_SIZE to the size of the program headers. */
1742
1743 static gdb_byte *
1744 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1745 {
1746 Elf_Internal_Ehdr *ehdr;
1747 gdb_byte *buf;
1748
1749 ehdr = elf_elfheader (abfd);
1750
1751 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1752 if (*phdrs_size == 0)
1753 return NULL;
1754
1755 buf = xmalloc (*phdrs_size);
1756 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1757 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1758 {
1759 xfree (buf);
1760 return NULL;
1761 }
1762
1763 return buf;
1764 }
1765
1766 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1767 exec_bfd. Otherwise return 0.
1768
1769 We relocate all of the sections by the same amount. This
1770 behavior is mandated by recent editions of the System V ABI.
1771 According to the System V Application Binary Interface,
1772 Edition 4.1, page 5-5:
1773
1774 ... Though the system chooses virtual addresses for
1775 individual processes, it maintains the segments' relative
1776 positions. Because position-independent code uses relative
1777 addressesing between segments, the difference between
1778 virtual addresses in memory must match the difference
1779 between virtual addresses in the file. The difference
1780 between the virtual address of any segment in memory and
1781 the corresponding virtual address in the file is thus a
1782 single constant value for any one executable or shared
1783 object in a given process. This difference is the base
1784 address. One use of the base address is to relocate the
1785 memory image of the program during dynamic linking.
1786
1787 The same language also appears in Edition 4.0 of the System V
1788 ABI and is left unspecified in some of the earlier editions.
1789
1790 Decide if the objfile needs to be relocated. As indicated above, we will
1791 only be here when execution is stopped. But during attachment PC can be at
1792 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1793 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1794 regcache_read_pc would point to the interpreter and not the main executable.
1795
1796 So, to summarize, relocations are necessary when the start address obtained
1797 from the executable is different from the address in auxv AT_ENTRY entry.
1798
1799 [ The astute reader will note that we also test to make sure that
1800 the executable in question has the DYNAMIC flag set. It is my
1801 opinion that this test is unnecessary (undesirable even). It
1802 was added to avoid inadvertent relocation of an executable
1803 whose e_type member in the ELF header is not ET_DYN. There may
1804 be a time in the future when it is desirable to do relocations
1805 on other types of files as well in which case this condition
1806 should either be removed or modified to accomodate the new file
1807 type. - Kevin, Nov 2000. ] */
1808
1809 static int
1810 svr4_exec_displacement (CORE_ADDR *displacementp)
1811 {
1812 /* ENTRY_POINT is a possible function descriptor - before
1813 a call to gdbarch_convert_from_func_ptr_addr. */
1814 CORE_ADDR entry_point, displacement;
1815
1816 if (exec_bfd == NULL)
1817 return 0;
1818
1819 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1820 being executed themselves and PIE (Position Independent Executable)
1821 executables are ET_DYN. */
1822
1823 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1824 return 0;
1825
1826 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1827 return 0;
1828
1829 displacement = entry_point - bfd_get_start_address (exec_bfd);
1830
1831 /* Verify the DISPLACEMENT candidate complies with the required page
1832 alignment. It is cheaper than the program headers comparison below. */
1833
1834 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1835 {
1836 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1837
1838 /* p_align of PT_LOAD segments does not specify any alignment but
1839 only congruency of addresses:
1840 p_offset % p_align == p_vaddr % p_align
1841 Kernel is free to load the executable with lower alignment. */
1842
1843 if ((displacement & (elf->minpagesize - 1)) != 0)
1844 return 0;
1845 }
1846
1847 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1848 comparing their program headers. If the program headers in the auxilliary
1849 vector do not match the program headers in the executable, then we are
1850 looking at a different file than the one used by the kernel - for
1851 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1852
1853 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1854 {
1855 /* Be optimistic and clear OK only if GDB was able to verify the headers
1856 really do not match. */
1857 int phdrs_size, phdrs2_size, ok = 1;
1858 gdb_byte *buf, *buf2;
1859 int arch_size;
1860
1861 buf = read_program_header (-1, &phdrs_size, &arch_size);
1862 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1863 if (buf != NULL && buf2 != NULL)
1864 {
1865 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
1866
1867 /* We are dealing with three different addresses. EXEC_BFD
1868 represents current address in on-disk file. target memory content
1869 may be different from EXEC_BFD as the file may have been prelinked
1870 to a different address after the executable has been loaded.
1871 Moreover the address of placement in target memory can be
1872 different from what the program headers in target memory say -
1873 this is the goal of PIE.
1874
1875 Detected DISPLACEMENT covers both the offsets of PIE placement and
1876 possible new prelink performed after start of the program. Here
1877 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1878 content offset for the verification purpose. */
1879
1880 if (phdrs_size != phdrs2_size
1881 || bfd_get_arch_size (exec_bfd) != arch_size)
1882 ok = 0;
1883 else if (arch_size == 32
1884 && phdrs_size >= sizeof (Elf32_External_Phdr)
1885 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1886 {
1887 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1888 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1889 CORE_ADDR displacement = 0;
1890 int i;
1891
1892 /* DISPLACEMENT could be found more easily by the difference of
1893 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1894 already have enough information to compute that displacement
1895 with what we've read. */
1896
1897 for (i = 0; i < ehdr2->e_phnum; i++)
1898 if (phdr2[i].p_type == PT_LOAD)
1899 {
1900 Elf32_External_Phdr *phdrp;
1901 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1902 CORE_ADDR vaddr, paddr;
1903 CORE_ADDR displacement_vaddr = 0;
1904 CORE_ADDR displacement_paddr = 0;
1905
1906 phdrp = &((Elf32_External_Phdr *) buf)[i];
1907 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1908 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1909
1910 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1911 byte_order);
1912 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1913
1914 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1915 byte_order);
1916 displacement_paddr = paddr - phdr2[i].p_paddr;
1917
1918 if (displacement_vaddr == displacement_paddr)
1919 displacement = displacement_vaddr;
1920
1921 break;
1922 }
1923
1924 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1925
1926 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1927 {
1928 Elf32_External_Phdr *phdrp;
1929 Elf32_External_Phdr *phdr2p;
1930 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1931 CORE_ADDR vaddr, paddr;
1932 asection *plt2_asect;
1933
1934 phdrp = &((Elf32_External_Phdr *) buf)[i];
1935 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1936 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1937 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1938
1939 /* PT_GNU_STACK is an exception by being never relocated by
1940 prelink as its addresses are always zero. */
1941
1942 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1943 continue;
1944
1945 /* Check also other adjustment combinations - PR 11786. */
1946
1947 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1948 byte_order);
1949 vaddr -= displacement;
1950 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1951
1952 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1953 byte_order);
1954 paddr -= displacement;
1955 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1956
1957 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1958 continue;
1959
1960 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1961 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1962 if (plt2_asect)
1963 {
1964 int content2;
1965 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1966 CORE_ADDR filesz;
1967
1968 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1969 & SEC_HAS_CONTENTS) != 0;
1970
1971 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1972 byte_order);
1973
1974 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1975 FILESZ is from the in-memory image. */
1976 if (content2)
1977 filesz += bfd_get_section_size (plt2_asect);
1978 else
1979 filesz -= bfd_get_section_size (plt2_asect);
1980
1981 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1982 filesz);
1983
1984 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1985 continue;
1986 }
1987
1988 ok = 0;
1989 break;
1990 }
1991 }
1992 else if (arch_size == 64
1993 && phdrs_size >= sizeof (Elf64_External_Phdr)
1994 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1995 {
1996 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1997 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1998 CORE_ADDR displacement = 0;
1999 int i;
2000
2001 /* DISPLACEMENT could be found more easily by the difference of
2002 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2003 already have enough information to compute that displacement
2004 with what we've read. */
2005
2006 for (i = 0; i < ehdr2->e_phnum; i++)
2007 if (phdr2[i].p_type == PT_LOAD)
2008 {
2009 Elf64_External_Phdr *phdrp;
2010 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2011 CORE_ADDR vaddr, paddr;
2012 CORE_ADDR displacement_vaddr = 0;
2013 CORE_ADDR displacement_paddr = 0;
2014
2015 phdrp = &((Elf64_External_Phdr *) buf)[i];
2016 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2017 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2018
2019 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2020 byte_order);
2021 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2022
2023 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2024 byte_order);
2025 displacement_paddr = paddr - phdr2[i].p_paddr;
2026
2027 if (displacement_vaddr == displacement_paddr)
2028 displacement = displacement_vaddr;
2029
2030 break;
2031 }
2032
2033 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2034
2035 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2036 {
2037 Elf64_External_Phdr *phdrp;
2038 Elf64_External_Phdr *phdr2p;
2039 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2040 CORE_ADDR vaddr, paddr;
2041 asection *plt2_asect;
2042
2043 phdrp = &((Elf64_External_Phdr *) buf)[i];
2044 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2045 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2046 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2047
2048 /* PT_GNU_STACK is an exception by being never relocated by
2049 prelink as its addresses are always zero. */
2050
2051 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2052 continue;
2053
2054 /* Check also other adjustment combinations - PR 11786. */
2055
2056 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2057 byte_order);
2058 vaddr -= displacement;
2059 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2060
2061 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2062 byte_order);
2063 paddr -= displacement;
2064 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2065
2066 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2067 continue;
2068
2069 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2070 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2071 if (plt2_asect)
2072 {
2073 int content2;
2074 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2075 CORE_ADDR filesz;
2076
2077 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2078 & SEC_HAS_CONTENTS) != 0;
2079
2080 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2081 byte_order);
2082
2083 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2084 FILESZ is from the in-memory image. */
2085 if (content2)
2086 filesz += bfd_get_section_size (plt2_asect);
2087 else
2088 filesz -= bfd_get_section_size (plt2_asect);
2089
2090 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2091 filesz);
2092
2093 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2094 continue;
2095 }
2096
2097 ok = 0;
2098 break;
2099 }
2100 }
2101 else
2102 ok = 0;
2103 }
2104
2105 xfree (buf);
2106 xfree (buf2);
2107
2108 if (!ok)
2109 return 0;
2110 }
2111
2112 if (info_verbose)
2113 {
2114 /* It can be printed repeatedly as there is no easy way to check
2115 the executable symbols/file has been already relocated to
2116 displacement. */
2117
2118 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2119 "displacement %s for \"%s\".\n"),
2120 paddress (target_gdbarch (), displacement),
2121 bfd_get_filename (exec_bfd));
2122 }
2123
2124 *displacementp = displacement;
2125 return 1;
2126 }
2127
2128 /* Relocate the main executable. This function should be called upon
2129 stopping the inferior process at the entry point to the program.
2130 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2131 different, the main executable is relocated by the proper amount. */
2132
2133 static void
2134 svr4_relocate_main_executable (void)
2135 {
2136 CORE_ADDR displacement;
2137
2138 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2139 probably contains the offsets computed using the PIE displacement
2140 from the previous run, which of course are irrelevant for this run.
2141 So we need to determine the new PIE displacement and recompute the
2142 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2143 already contains pre-computed offsets.
2144
2145 If we cannot compute the PIE displacement, either:
2146
2147 - The executable is not PIE.
2148
2149 - SYMFILE_OBJFILE does not match the executable started in the target.
2150 This can happen for main executable symbols loaded at the host while
2151 `ld.so --ld-args main-executable' is loaded in the target.
2152
2153 Then we leave the section offsets untouched and use them as is for
2154 this run. Either:
2155
2156 - These section offsets were properly reset earlier, and thus
2157 already contain the correct values. This can happen for instance
2158 when reconnecting via the remote protocol to a target that supports
2159 the `qOffsets' packet.
2160
2161 - The section offsets were not reset earlier, and the best we can
2162 hope is that the old offsets are still applicable to the new run. */
2163
2164 if (! svr4_exec_displacement (&displacement))
2165 return;
2166
2167 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2168 addresses. */
2169
2170 if (symfile_objfile)
2171 {
2172 struct section_offsets *new_offsets;
2173 int i;
2174
2175 new_offsets = alloca (symfile_objfile->num_sections
2176 * sizeof (*new_offsets));
2177
2178 for (i = 0; i < symfile_objfile->num_sections; i++)
2179 new_offsets->offsets[i] = displacement;
2180
2181 objfile_relocate (symfile_objfile, new_offsets);
2182 }
2183 else if (exec_bfd)
2184 {
2185 asection *asect;
2186
2187 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2188 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2189 (bfd_section_vma (exec_bfd, asect)
2190 + displacement));
2191 }
2192 }
2193
2194 /* Implement the "create_inferior_hook" target_solib_ops method.
2195
2196 For SVR4 executables, this first instruction is either the first
2197 instruction in the dynamic linker (for dynamically linked
2198 executables) or the instruction at "start" for statically linked
2199 executables. For dynamically linked executables, the system
2200 first exec's /lib/libc.so.N, which contains the dynamic linker,
2201 and starts it running. The dynamic linker maps in any needed
2202 shared libraries, maps in the actual user executable, and then
2203 jumps to "start" in the user executable.
2204
2205 We can arrange to cooperate with the dynamic linker to discover the
2206 names of shared libraries that are dynamically linked, and the base
2207 addresses to which they are linked.
2208
2209 This function is responsible for discovering those names and
2210 addresses, and saving sufficient information about them to allow
2211 their symbols to be read at a later time. */
2212
2213 static void
2214 svr4_solib_create_inferior_hook (int from_tty)
2215 {
2216 struct svr4_info *info;
2217
2218 info = get_svr4_info ();
2219
2220 /* Relocate the main executable if necessary. */
2221 svr4_relocate_main_executable ();
2222
2223 /* No point setting a breakpoint in the dynamic linker if we can't
2224 hit it (e.g., a core file, or a trace file). */
2225 if (!target_has_execution)
2226 return;
2227
2228 if (!svr4_have_link_map_offsets ())
2229 return;
2230
2231 if (!enable_break (info, from_tty))
2232 return;
2233 }
2234
2235 static void
2236 svr4_clear_solib (void)
2237 {
2238 struct svr4_info *info;
2239
2240 info = get_svr4_info ();
2241 info->debug_base = 0;
2242 info->debug_loader_offset_p = 0;
2243 info->debug_loader_offset = 0;
2244 xfree (info->debug_loader_name);
2245 info->debug_loader_name = NULL;
2246 }
2247
2248 /* Clear any bits of ADDR that wouldn't fit in a target-format
2249 data pointer. "Data pointer" here refers to whatever sort of
2250 address the dynamic linker uses to manage its sections. At the
2251 moment, we don't support shared libraries on any processors where
2252 code and data pointers are different sizes.
2253
2254 This isn't really the right solution. What we really need here is
2255 a way to do arithmetic on CORE_ADDR values that respects the
2256 natural pointer/address correspondence. (For example, on the MIPS,
2257 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2258 sign-extend the value. There, simply truncating the bits above
2259 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2260 be a new gdbarch method or something. */
2261 static CORE_ADDR
2262 svr4_truncate_ptr (CORE_ADDR addr)
2263 {
2264 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2265 /* We don't need to truncate anything, and the bit twiddling below
2266 will fail due to overflow problems. */
2267 return addr;
2268 else
2269 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2270 }
2271
2272
2273 static void
2274 svr4_relocate_section_addresses (struct so_list *so,
2275 struct target_section *sec)
2276 {
2277 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2278 sec->bfd));
2279 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2280 sec->bfd));
2281 }
2282 \f
2283
2284 /* Architecture-specific operations. */
2285
2286 /* Per-architecture data key. */
2287 static struct gdbarch_data *solib_svr4_data;
2288
2289 struct solib_svr4_ops
2290 {
2291 /* Return a description of the layout of `struct link_map'. */
2292 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2293 };
2294
2295 /* Return a default for the architecture-specific operations. */
2296
2297 static void *
2298 solib_svr4_init (struct obstack *obstack)
2299 {
2300 struct solib_svr4_ops *ops;
2301
2302 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2303 ops->fetch_link_map_offsets = NULL;
2304 return ops;
2305 }
2306
2307 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2308 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2309
2310 void
2311 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2312 struct link_map_offsets *(*flmo) (void))
2313 {
2314 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2315
2316 ops->fetch_link_map_offsets = flmo;
2317
2318 set_solib_ops (gdbarch, &svr4_so_ops);
2319 }
2320
2321 /* Fetch a link_map_offsets structure using the architecture-specific
2322 `struct link_map_offsets' fetcher. */
2323
2324 static struct link_map_offsets *
2325 svr4_fetch_link_map_offsets (void)
2326 {
2327 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2328
2329 gdb_assert (ops->fetch_link_map_offsets);
2330 return ops->fetch_link_map_offsets ();
2331 }
2332
2333 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2334
2335 static int
2336 svr4_have_link_map_offsets (void)
2337 {
2338 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2339
2340 return (ops->fetch_link_map_offsets != NULL);
2341 }
2342 \f
2343
2344 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2345 `struct r_debug' and a `struct link_map' that are binary compatible
2346 with the origional SVR4 implementation. */
2347
2348 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2349 for an ILP32 SVR4 system. */
2350
2351 struct link_map_offsets *
2352 svr4_ilp32_fetch_link_map_offsets (void)
2353 {
2354 static struct link_map_offsets lmo;
2355 static struct link_map_offsets *lmp = NULL;
2356
2357 if (lmp == NULL)
2358 {
2359 lmp = &lmo;
2360
2361 lmo.r_version_offset = 0;
2362 lmo.r_version_size = 4;
2363 lmo.r_map_offset = 4;
2364 lmo.r_brk_offset = 8;
2365 lmo.r_ldsomap_offset = 20;
2366
2367 /* Everything we need is in the first 20 bytes. */
2368 lmo.link_map_size = 20;
2369 lmo.l_addr_offset = 0;
2370 lmo.l_name_offset = 4;
2371 lmo.l_ld_offset = 8;
2372 lmo.l_next_offset = 12;
2373 lmo.l_prev_offset = 16;
2374 }
2375
2376 return lmp;
2377 }
2378
2379 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2380 for an LP64 SVR4 system. */
2381
2382 struct link_map_offsets *
2383 svr4_lp64_fetch_link_map_offsets (void)
2384 {
2385 static struct link_map_offsets lmo;
2386 static struct link_map_offsets *lmp = NULL;
2387
2388 if (lmp == NULL)
2389 {
2390 lmp = &lmo;
2391
2392 lmo.r_version_offset = 0;
2393 lmo.r_version_size = 4;
2394 lmo.r_map_offset = 8;
2395 lmo.r_brk_offset = 16;
2396 lmo.r_ldsomap_offset = 40;
2397
2398 /* Everything we need is in the first 40 bytes. */
2399 lmo.link_map_size = 40;
2400 lmo.l_addr_offset = 0;
2401 lmo.l_name_offset = 8;
2402 lmo.l_ld_offset = 16;
2403 lmo.l_next_offset = 24;
2404 lmo.l_prev_offset = 32;
2405 }
2406
2407 return lmp;
2408 }
2409 \f
2410
2411 struct target_so_ops svr4_so_ops;
2412
2413 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2414 different rule for symbol lookup. The lookup begins here in the DSO, not in
2415 the main executable. */
2416
2417 static struct symbol *
2418 elf_lookup_lib_symbol (const struct objfile *objfile,
2419 const char *name,
2420 const domain_enum domain)
2421 {
2422 bfd *abfd;
2423
2424 if (objfile == symfile_objfile)
2425 abfd = exec_bfd;
2426 else
2427 {
2428 /* OBJFILE should have been passed as the non-debug one. */
2429 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2430
2431 abfd = objfile->obfd;
2432 }
2433
2434 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2435 return NULL;
2436
2437 return lookup_global_symbol_from_objfile (objfile, name, domain);
2438 }
2439
2440 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2441
2442 void
2443 _initialize_svr4_solib (void)
2444 {
2445 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2446 solib_svr4_pspace_data
2447 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
2448
2449 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2450 svr4_so_ops.free_so = svr4_free_so;
2451 svr4_so_ops.clear_solib = svr4_clear_solib;
2452 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2453 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2454 svr4_so_ops.current_sos = svr4_current_sos;
2455 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2456 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2457 svr4_so_ops.bfd_open = solib_bfd_open;
2458 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2459 svr4_so_ops.same = svr4_same;
2460 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2461 }
This page took 0.077912 seconds and 5 git commands to generate.