1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 /* Shortcuts to overlay sections. */
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count
;
308 /* The stub section for each overlay section. */
311 struct elf_link_hash_entry
*ovly_load
;
312 struct elf_link_hash_entry
*ovly_return
;
313 unsigned long ovly_load_r_symndx
;
315 /* Number of overlay buffers. */
316 unsigned int num_buf
;
318 /* Total number of overlays. */
319 unsigned int num_overlays
;
321 /* How much memory we have. */
322 unsigned int local_store
;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed
;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved
;
328 /* Count of overlay stubs needed in non-overlay area. */
329 unsigned int non_ovly_stub
;
331 /* Stash various callbacks for --auto-overlay. */
332 void (*spu_elf_load_ovl_mgr
) (void);
333 FILE *(*spu_elf_open_overlay_script
) (void);
334 void (*spu_elf_relink
) (void);
336 /* Bit 0 set if --auto-overlay.
337 Bit 1 set if --auto-relink.
338 Bit 2 set if --overlay-rodata. */
339 unsigned int auto_overlay
: 3;
340 #define AUTO_OVERLAY 1
341 #define AUTO_RELINK 2
342 #define OVERLAY_RODATA 4
344 /* Set if we should emit symbols for stubs. */
345 unsigned int emit_stub_syms
:1;
347 /* Set if we want stubs on calls out of overlay regions to
348 non-overlay regions. */
349 unsigned int non_overlay_stubs
: 1;
352 unsigned int stub_err
: 1;
354 /* Set if stack size analysis should be done. */
355 unsigned int stack_analysis
: 1;
357 /* Set if __stack_* syms will be emitted. */
358 unsigned int emit_stack_syms
: 1;
361 /* Hijack the generic got fields for overlay stub accounting. */
365 struct got_entry
*next
;
371 #define spu_hash_table(p) \
372 ((struct spu_link_hash_table *) ((p)->hash))
374 /* Create a spu ELF linker hash table. */
376 static struct bfd_link_hash_table
*
377 spu_elf_link_hash_table_create (bfd
*abfd
)
379 struct spu_link_hash_table
*htab
;
381 htab
= bfd_malloc (sizeof (*htab
));
385 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
386 _bfd_elf_link_hash_newfunc
,
387 sizeof (struct elf_link_hash_entry
)))
393 memset (&htab
->ovtab
, 0,
394 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
396 htab
->elf
.init_got_refcount
.refcount
= 0;
397 htab
->elf
.init_got_refcount
.glist
= NULL
;
398 htab
->elf
.init_got_offset
.offset
= 0;
399 htab
->elf
.init_got_offset
.glist
= NULL
;
400 return &htab
->elf
.root
;
403 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
404 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
405 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
408 get_sym_h (struct elf_link_hash_entry
**hp
,
409 Elf_Internal_Sym
**symp
,
411 Elf_Internal_Sym
**locsymsp
,
412 unsigned long r_symndx
,
415 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
417 if (r_symndx
>= symtab_hdr
->sh_info
)
419 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
420 struct elf_link_hash_entry
*h
;
422 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
423 while (h
->root
.type
== bfd_link_hash_indirect
424 || h
->root
.type
== bfd_link_hash_warning
)
425 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
435 asection
*symsec
= NULL
;
436 if (h
->root
.type
== bfd_link_hash_defined
437 || h
->root
.type
== bfd_link_hash_defweak
)
438 symsec
= h
->root
.u
.def
.section
;
444 Elf_Internal_Sym
*sym
;
445 Elf_Internal_Sym
*locsyms
= *locsymsp
;
449 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
451 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
453 0, NULL
, NULL
, NULL
);
458 sym
= locsyms
+ r_symndx
;
467 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
473 /* Create the note section if not already present. This is done early so
474 that the linker maps the sections to the right place in the output. */
477 spu_elf_create_sections (struct bfd_link_info
*info
,
482 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
484 /* Stash some options away where we can get at them later. */
485 htab
->stack_analysis
= stack_analysis
;
486 htab
->emit_stack_syms
= emit_stack_syms
;
488 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
489 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
494 /* Make SPU_PTNOTE_SPUNAME section. */
501 ibfd
= info
->input_bfds
;
502 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
503 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
505 || !bfd_set_section_alignment (ibfd
, s
, 4))
508 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
509 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
510 size
+= (name_len
+ 3) & -4;
512 if (!bfd_set_section_size (ibfd
, s
, size
))
515 data
= bfd_zalloc (ibfd
, size
);
519 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
520 bfd_put_32 (ibfd
, name_len
, data
+ 4);
521 bfd_put_32 (ibfd
, 1, data
+ 8);
522 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
523 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
524 bfd_get_filename (info
->output_bfd
), name_len
);
531 /* qsort predicate to sort sections by vma. */
534 sort_sections (const void *a
, const void *b
)
536 const asection
*const *s1
= a
;
537 const asection
*const *s2
= b
;
538 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
541 return delta
< 0 ? -1 : 1;
543 return (*s1
)->index
- (*s2
)->index
;
546 /* Identify overlays in the output bfd, and number them. */
549 spu_elf_find_overlays (struct bfd_link_info
*info
)
551 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
552 asection
**alloc_sec
;
553 unsigned int i
, n
, ovl_index
, num_buf
;
557 if (info
->output_bfd
->section_count
< 2)
561 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
562 if (alloc_sec
== NULL
)
565 /* Pick out all the alloced sections. */
566 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
567 if ((s
->flags
& SEC_ALLOC
) != 0
568 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
578 /* Sort them by vma. */
579 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
581 /* Look for overlapping vmas. Any with overlap must be overlays.
582 Count them. Also count the number of overlay regions. */
583 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
584 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
587 if (s
->vma
< ovl_end
)
589 asection
*s0
= alloc_sec
[i
- 1];
591 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
593 alloc_sec
[ovl_index
] = s0
;
594 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
595 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
597 alloc_sec
[ovl_index
] = s
;
598 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
599 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
600 if (s0
->vma
!= s
->vma
)
602 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
603 "do not start at the same address.\n"),
607 if (ovl_end
< s
->vma
+ s
->size
)
608 ovl_end
= s
->vma
+ s
->size
;
611 ovl_end
= s
->vma
+ s
->size
;
614 htab
->num_overlays
= ovl_index
;
615 htab
->num_buf
= num_buf
;
616 htab
->ovl_sec
= alloc_sec
;
617 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
618 FALSE
, FALSE
, FALSE
);
619 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
620 FALSE
, FALSE
, FALSE
);
621 return ovl_index
!= 0;
624 /* Support two sizes of overlay stubs, a slower more compact stub of two
625 intructions, and a faster stub of four instructions. */
626 #ifndef OVL_STUB_SIZE
627 /* Default to faster. */
628 #define OVL_STUB_SIZE 16
629 /* #define OVL_STUB_SIZE 8 */
631 #define BRSL 0x33000000
632 #define BR 0x32000000
633 #define NOP 0x40200000
634 #define LNOP 0x00200000
635 #define ILA 0x42000000
637 /* Return true for all relative and absolute branch instructions.
645 brhnz 00100011 0.. */
648 is_branch (const unsigned char *insn
)
650 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
653 /* Return true for all indirect branch instructions.
661 bihnz 00100101 011 */
664 is_indirect_branch (const unsigned char *insn
)
666 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
669 /* Return true for branch hint instructions.
674 is_hint (const unsigned char *insn
)
676 return (insn
[0] & 0xfc) == 0x10;
679 /* True if INPUT_SECTION might need overlay stubs. */
682 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
684 /* No stubs for debug sections and suchlike. */
685 if ((input_section
->flags
& SEC_ALLOC
) == 0)
688 /* No stubs for link-once sections that will be discarded. */
689 if (input_section
->output_section
== NULL
690 || input_section
->output_section
->owner
!= output_bfd
)
693 /* Don't create stubs for .eh_frame references. */
694 if (strcmp (input_section
->name
, ".eh_frame") == 0)
708 /* Return non-zero if this reloc symbol should go via an overlay stub.
709 Return 2 if the stub must be in non-overlay area. */
711 static enum _stub_type
712 needs_ovl_stub (struct elf_link_hash_entry
*h
,
713 Elf_Internal_Sym
*sym
,
715 asection
*input_section
,
716 Elf_Internal_Rela
*irela
,
718 struct bfd_link_info
*info
)
720 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
721 enum elf_spu_reloc_type r_type
;
722 unsigned int sym_type
;
724 enum _stub_type ret
= no_stub
;
727 || sym_sec
->output_section
== NULL
728 || sym_sec
->output_section
->owner
!= info
->output_bfd
729 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
734 /* Ensure no stubs for user supplied overlay manager syms. */
735 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
738 /* setjmp always goes via an overlay stub, because then the return
739 and hence the longjmp goes via __ovly_return. That magically
740 makes setjmp/longjmp between overlays work. */
741 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
742 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
746 /* Usually, symbols in non-overlay sections don't need stubs. */
747 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
748 && !htab
->non_overlay_stubs
)
754 sym_type
= ELF_ST_TYPE (sym
->st_info
);
756 r_type
= ELF32_R_TYPE (irela
->r_info
);
758 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
762 if (contents
== NULL
)
765 if (!bfd_get_section_contents (input_section
->owner
,
772 contents
+= irela
->r_offset
;
774 if (is_branch (contents
) || is_hint (contents
))
777 if ((contents
[0] & 0xfd) == 0x31
778 && sym_type
!= STT_FUNC
781 /* It's common for people to write assembly and forget
782 to give function symbols the right type. Handle
783 calls to such symbols, but warn so that (hopefully)
784 people will fix their code. We need the symbol
785 type to be correct to distinguish function pointer
786 initialisation from other pointer initialisations. */
787 const char *sym_name
;
790 sym_name
= h
->root
.root
.string
;
793 Elf_Internal_Shdr
*symtab_hdr
;
794 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
795 sym_name
= bfd_elf_sym_name (input_section
->owner
,
800 (*_bfd_error_handler
) (_("warning: call to non-function"
801 " symbol %s defined in %B"),
802 sym_sec
->owner
, sym_name
);
808 if (sym_type
!= STT_FUNC
810 && (sym_sec
->flags
& SEC_CODE
) == 0)
813 /* A reference from some other section to a symbol in an overlay
814 section needs a stub. */
815 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
816 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
819 /* If this insn isn't a branch then we are possibly taking the
820 address of a function and passing it out somehow. */
821 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
825 count_stub (struct spu_link_hash_table
*htab
,
828 enum _stub_type stub_type
,
829 struct elf_link_hash_entry
*h
,
830 const Elf_Internal_Rela
*irela
)
832 unsigned int ovl
= 0;
833 struct got_entry
*g
, **head
;
836 /* If this instruction is a branch or call, we need a stub
837 for it. One stub per function per overlay.
838 If it isn't a branch, then we are taking the address of
839 this function so need a stub in the non-overlay area
840 for it. One stub per function. */
841 if (stub_type
!= nonovl_stub
)
842 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
845 head
= &h
->got
.glist
;
848 if (elf_local_got_ents (ibfd
) == NULL
)
850 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
851 * sizeof (*elf_local_got_ents (ibfd
)));
852 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
853 if (elf_local_got_ents (ibfd
) == NULL
)
856 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
861 addend
= irela
->r_addend
;
865 struct got_entry
*gnext
;
867 for (g
= *head
; g
!= NULL
; g
= g
->next
)
868 if (g
->addend
== addend
&& g
->ovl
== 0)
873 /* Need a new non-overlay area stub. Zap other stubs. */
874 for (g
= *head
; g
!= NULL
; g
= gnext
)
877 if (g
->addend
== addend
)
879 htab
->stub_count
[g
->ovl
] -= 1;
887 for (g
= *head
; g
!= NULL
; g
= g
->next
)
888 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
894 g
= bfd_malloc (sizeof *g
);
899 g
->stub_addr
= (bfd_vma
) -1;
903 htab
->stub_count
[ovl
] += 1;
909 /* Two instruction overlay stubs look like:
912 .word target_ovl_and_address
914 ovl_and_address is a word with the overlay number in the top 14 bits
915 and local store address in the bottom 18 bits.
917 Four instruction overlay stubs look like:
921 ila $79,target_address
925 build_stub (struct spu_link_hash_table
*htab
,
928 enum _stub_type stub_type
,
929 struct elf_link_hash_entry
*h
,
930 const Elf_Internal_Rela
*irela
,
935 struct got_entry
*g
, **head
;
937 bfd_vma addend
, val
, from
, to
;
940 if (stub_type
!= nonovl_stub
)
941 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
944 head
= &h
->got
.glist
;
946 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
950 addend
= irela
->r_addend
;
952 for (g
= *head
; g
!= NULL
; g
= g
->next
)
953 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
958 if (g
->ovl
== 0 && ovl
!= 0)
961 if (g
->stub_addr
!= (bfd_vma
) -1)
964 sec
= htab
->stub_sec
[ovl
];
965 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
966 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
968 to
= (htab
->ovly_load
->root
.u
.def
.value
969 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
970 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
972 if (OVL_STUB_SIZE
== 16)
974 if (((dest
| to
| from
) & 3) != 0
975 || val
+ 0x20000 >= 0x40000)
980 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
982 if (OVL_STUB_SIZE
== 16)
984 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
985 sec
->contents
+ sec
->size
);
986 bfd_put_32 (sec
->owner
, LNOP
,
987 sec
->contents
+ sec
->size
+ 4);
988 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
989 sec
->contents
+ sec
->size
+ 8);
990 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
991 sec
->contents
+ sec
->size
+ 12);
993 else if (OVL_STUB_SIZE
== 8)
995 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
996 sec
->contents
+ sec
->size
);
998 val
= (dest
& 0x3ffff) | (ovl
<< 18);
999 bfd_put_32 (sec
->owner
, val
,
1000 sec
->contents
+ sec
->size
+ 4);
1004 sec
->size
+= OVL_STUB_SIZE
;
1006 if (htab
->emit_stub_syms
)
1012 len
= 8 + sizeof (".ovl_call.") - 1;
1014 len
+= strlen (h
->root
.root
.string
);
1019 add
= (int) irela
->r_addend
& 0xffffffff;
1022 name
= bfd_malloc (len
);
1026 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1028 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1030 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1031 dest_sec
->id
& 0xffffffff,
1032 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1034 sprintf (name
+ len
- 9, "+%x", add
);
1036 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1040 if (h
->root
.type
== bfd_link_hash_new
)
1042 h
->root
.type
= bfd_link_hash_defined
;
1043 h
->root
.u
.def
.section
= sec
;
1044 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
1045 h
->size
= OVL_STUB_SIZE
;
1049 h
->ref_regular_nonweak
= 1;
1050 h
->forced_local
= 1;
1058 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1062 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1064 /* Symbols starting with _SPUEAR_ need a stub because they may be
1065 invoked by the PPU. */
1066 struct bfd_link_info
*info
= inf
;
1067 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1070 if ((h
->root
.type
== bfd_link_hash_defined
1071 || h
->root
.type
== bfd_link_hash_defweak
)
1073 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1074 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1075 && sym_sec
->output_section
!= NULL
1076 && sym_sec
->output_section
->owner
== info
->output_bfd
1077 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1078 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1079 || htab
->non_overlay_stubs
))
1081 count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1088 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1090 /* Symbols starting with _SPUEAR_ need a stub because they may be
1091 invoked by the PPU. */
1092 struct bfd_link_info
*info
= inf
;
1093 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1096 if ((h
->root
.type
== bfd_link_hash_defined
1097 || h
->root
.type
== bfd_link_hash_defweak
)
1099 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1100 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1101 && sym_sec
->output_section
!= NULL
1102 && sym_sec
->output_section
->owner
== info
->output_bfd
1103 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1104 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1105 || htab
->non_overlay_stubs
))
1107 build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1108 h
->root
.u
.def
.value
, sym_sec
);
1114 /* Size or build stubs. */
1117 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1119 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1122 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1124 extern const bfd_target bfd_elf32_spu_vec
;
1125 Elf_Internal_Shdr
*symtab_hdr
;
1127 Elf_Internal_Sym
*local_syms
= NULL
;
1129 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1132 /* We'll need the symbol table in a second. */
1133 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1134 if (symtab_hdr
->sh_info
== 0)
1137 /* Walk over each section attached to the input bfd. */
1138 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1140 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1142 /* If there aren't any relocs, then there's nothing more to do. */
1143 if ((isec
->flags
& SEC_RELOC
) == 0
1144 || isec
->reloc_count
== 0)
1147 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1150 /* Get the relocs. */
1151 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1153 if (internal_relocs
== NULL
)
1154 goto error_ret_free_local
;
1156 /* Now examine each relocation. */
1157 irela
= internal_relocs
;
1158 irelaend
= irela
+ isec
->reloc_count
;
1159 for (; irela
< irelaend
; irela
++)
1161 enum elf_spu_reloc_type r_type
;
1162 unsigned int r_indx
;
1164 Elf_Internal_Sym
*sym
;
1165 struct elf_link_hash_entry
*h
;
1166 enum _stub_type stub_type
;
1168 r_type
= ELF32_R_TYPE (irela
->r_info
);
1169 r_indx
= ELF32_R_SYM (irela
->r_info
);
1171 if (r_type
>= R_SPU_max
)
1173 bfd_set_error (bfd_error_bad_value
);
1174 error_ret_free_internal
:
1175 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1176 free (internal_relocs
);
1177 error_ret_free_local
:
1178 if (local_syms
!= NULL
1179 && (symtab_hdr
->contents
1180 != (unsigned char *) local_syms
))
1185 /* Determine the reloc target section. */
1186 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1187 goto error_ret_free_internal
;
1189 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1191 if (stub_type
== no_stub
)
1193 else if (stub_type
== stub_error
)
1194 goto error_ret_free_internal
;
1196 if (htab
->stub_count
== NULL
)
1199 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1200 htab
->stub_count
= bfd_zmalloc (amt
);
1201 if (htab
->stub_count
== NULL
)
1202 goto error_ret_free_internal
;
1207 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1208 goto error_ret_free_internal
;
1215 dest
= h
->root
.u
.def
.value
;
1217 dest
= sym
->st_value
;
1218 dest
+= irela
->r_addend
;
1219 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1221 goto error_ret_free_internal
;
1225 /* We're done with the internal relocs, free them. */
1226 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1227 free (internal_relocs
);
1230 if (local_syms
!= NULL
1231 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1233 if (!info
->keep_memory
)
1236 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1243 /* Allocate space for overlay call and return stubs. */
1246 spu_elf_size_stubs (struct bfd_link_info
*info
,
1247 void (*place_spu_section
) (asection
*, asection
*,
1249 int non_overlay_stubs
)
1251 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1258 htab
->non_overlay_stubs
= non_overlay_stubs
;
1259 if (!process_stubs (info
, FALSE
))
1262 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1266 if (htab
->stub_count
== NULL
)
1269 ibfd
= info
->input_bfds
;
1270 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1271 htab
->stub_sec
= bfd_zmalloc (amt
);
1272 if (htab
->stub_sec
== NULL
)
1275 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1276 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1277 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1278 htab
->stub_sec
[0] = stub
;
1280 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1282 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1283 (*place_spu_section
) (stub
, NULL
, ".text");
1285 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1287 asection
*osec
= htab
->ovl_sec
[i
];
1288 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1289 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1290 htab
->stub_sec
[ovl
] = stub
;
1292 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1294 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1295 (*place_spu_section
) (stub
, osec
, NULL
);
1298 /* htab->ovtab consists of two arrays.
1308 . } _ovly_buf_table[];
1311 flags
= (SEC_ALLOC
| SEC_LOAD
1312 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1313 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1314 if (htab
->ovtab
== NULL
1315 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1318 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1319 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1321 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1322 if (htab
->toe
== NULL
1323 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1325 htab
->toe
->size
= 16;
1326 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1331 /* Functions to handle embedded spu_ovl.o object. */
1334 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1340 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1346 struct _ovl_stream
*os
;
1350 os
= (struct _ovl_stream
*) stream
;
1351 max
= (const char *) os
->end
- (const char *) os
->start
;
1353 if ((ufile_ptr
) offset
>= max
)
1357 if (count
> max
- offset
)
1358 count
= max
- offset
;
1360 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1365 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1367 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1374 return *ovl_bfd
!= NULL
;
1377 /* Define an STT_OBJECT symbol. */
1379 static struct elf_link_hash_entry
*
1380 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1382 struct elf_link_hash_entry
*h
;
1384 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1388 if (h
->root
.type
!= bfd_link_hash_defined
1391 h
->root
.type
= bfd_link_hash_defined
;
1392 h
->root
.u
.def
.section
= htab
->ovtab
;
1393 h
->type
= STT_OBJECT
;
1396 h
->ref_regular_nonweak
= 1;
1401 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1402 h
->root
.u
.def
.section
->owner
,
1403 h
->root
.root
.string
);
1404 bfd_set_error (bfd_error_bad_value
);
1411 /* Fill in all stubs and the overlay tables. */
1414 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1416 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1417 struct elf_link_hash_entry
*h
;
1423 htab
->emit_stub_syms
= emit_syms
;
1424 if (htab
->stub_count
== NULL
)
1427 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1428 if (htab
->stub_sec
[i
]->size
!= 0)
1430 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1431 htab
->stub_sec
[i
]->size
);
1432 if (htab
->stub_sec
[i
]->contents
== NULL
)
1434 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1435 htab
->stub_sec
[i
]->size
= 0;
1438 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1439 htab
->ovly_load
= h
;
1440 BFD_ASSERT (h
!= NULL
1441 && (h
->root
.type
== bfd_link_hash_defined
1442 || h
->root
.type
== bfd_link_hash_defweak
)
1445 s
= h
->root
.u
.def
.section
->output_section
;
1446 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1448 (*_bfd_error_handler
) (_("%s in overlay section"),
1449 h
->root
.root
.string
);
1450 bfd_set_error (bfd_error_bad_value
);
1454 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1455 htab
->ovly_return
= h
;
1457 /* Fill in all the stubs. */
1458 process_stubs (info
, TRUE
);
1460 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1464 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1466 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1468 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1469 bfd_set_error (bfd_error_bad_value
);
1472 htab
->stub_sec
[i
]->rawsize
= 0;
1477 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1478 bfd_set_error (bfd_error_bad_value
);
1482 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1483 if (htab
->ovtab
->contents
== NULL
)
1486 /* Write out _ovly_table. */
1487 p
= htab
->ovtab
->contents
;
1488 /* set low bit of .size to mark non-overlay area as present. */
1490 obfd
= htab
->ovtab
->output_section
->owner
;
1491 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1493 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1497 unsigned long off
= ovl_index
* 16;
1498 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1500 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1501 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1502 /* file_off written later in spu_elf_modify_program_headers. */
1503 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1507 h
= define_ovtab_symbol (htab
, "_ovly_table");
1510 h
->root
.u
.def
.value
= 16;
1511 h
->size
= htab
->num_overlays
* 16;
1513 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1516 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1519 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1522 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1523 h
->size
= htab
->num_buf
* 4;
1525 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1528 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1531 h
= define_ovtab_symbol (htab
, "_EAR_");
1534 h
->root
.u
.def
.section
= htab
->toe
;
1535 h
->root
.u
.def
.value
= 0;
1541 /* Check that all loadable section VMAs lie in the range
1542 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1545 spu_elf_check_vma (struct bfd_link_info
*info
,
1549 unsigned int overlay_fixed
,
1550 unsigned int reserved
,
1551 void (*spu_elf_load_ovl_mgr
) (void),
1552 FILE *(*spu_elf_open_overlay_script
) (void),
1553 void (*spu_elf_relink
) (void))
1555 struct elf_segment_map
*m
;
1557 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1558 bfd
*abfd
= info
->output_bfd
;
1560 if (auto_overlay
& AUTO_OVERLAY
)
1561 htab
->auto_overlay
= auto_overlay
;
1562 htab
->local_store
= hi
+ 1 - lo
;
1563 htab
->overlay_fixed
= overlay_fixed
;
1564 htab
->reserved
= reserved
;
1565 htab
->spu_elf_load_ovl_mgr
= spu_elf_load_ovl_mgr
;
1566 htab
->spu_elf_open_overlay_script
= spu_elf_open_overlay_script
;
1567 htab
->spu_elf_relink
= spu_elf_relink
;
1569 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1570 if (m
->p_type
== PT_LOAD
)
1571 for (i
= 0; i
< m
->count
; i
++)
1572 if (m
->sections
[i
]->size
!= 0
1573 && (m
->sections
[i
]->vma
< lo
1574 || m
->sections
[i
]->vma
> hi
1575 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1576 return m
->sections
[i
];
1578 /* No need for overlays if it all fits. */
1579 htab
->auto_overlay
= 0;
1583 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1584 Search for stack adjusting insns, and return the sp delta. */
1587 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1592 memset (reg
, 0, sizeof (reg
));
1593 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1595 unsigned char buf
[4];
1599 /* Assume no relocs on stack adjusing insns. */
1600 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1603 if (buf
[0] == 0x24 /* stqd */)
1607 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1608 /* Partly decoded immediate field. */
1609 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1611 if (buf
[0] == 0x1c /* ai */)
1614 imm
= (imm
^ 0x200) - 0x200;
1615 reg
[rt
] = reg
[ra
] + imm
;
1617 if (rt
== 1 /* sp */)
1624 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1626 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1628 reg
[rt
] = reg
[ra
] + reg
[rb
];
1632 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1634 if (buf
[0] >= 0x42 /* ila */)
1635 imm
|= (buf
[0] & 1) << 17;
1640 if (buf
[0] == 0x40 /* il */)
1642 if ((buf
[1] & 0x80) == 0)
1644 imm
= (imm
^ 0x8000) - 0x8000;
1646 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1652 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1654 reg
[rt
] |= imm
& 0xffff;
1657 else if (buf
[0] == 0x04 /* ori */)
1660 imm
= (imm
^ 0x200) - 0x200;
1661 reg
[rt
] = reg
[ra
] | imm
;
1664 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1665 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1667 /* Used in pic reg load. Say rt is trashed. */
1671 else if (is_branch (buf
) || is_indirect_branch (buf
))
1672 /* If we hit a branch then we must be out of the prologue. */
1681 /* qsort predicate to sort symbols by section and value. */
1683 static Elf_Internal_Sym
*sort_syms_syms
;
1684 static asection
**sort_syms_psecs
;
1687 sort_syms (const void *a
, const void *b
)
1689 Elf_Internal_Sym
*const *s1
= a
;
1690 Elf_Internal_Sym
*const *s2
= b
;
1691 asection
*sec1
,*sec2
;
1692 bfd_signed_vma delta
;
1694 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1695 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1698 return sec1
->index
- sec2
->index
;
1700 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1702 return delta
< 0 ? -1 : 1;
1704 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1706 return delta
< 0 ? -1 : 1;
1708 return *s1
< *s2
? -1 : 1;
1713 struct function_info
*fun
;
1714 struct call_info
*next
;
1716 unsigned int max_depth
;
1717 unsigned int is_tail
: 1;
1718 unsigned int is_pasted
: 1;
1721 struct function_info
1723 /* List of functions called. Also branches to hot/cold part of
1725 struct call_info
*call_list
;
1726 /* For hot/cold part of function, point to owner. */
1727 struct function_info
*start
;
1728 /* Symbol at start of function. */
1730 Elf_Internal_Sym
*sym
;
1731 struct elf_link_hash_entry
*h
;
1733 /* Function section. */
1736 /* Where last called from, and number of sections called from. */
1737 asection
*last_caller
;
1738 unsigned int call_count
;
1739 /* Address range of (this part of) function. */
1743 /* Distance from root of call tree. Tail and hot/cold branches
1744 count as one deeper. We aren't counting stack frames here. */
1746 /* Set if global symbol. */
1747 unsigned int global
: 1;
1748 /* Set if known to be start of function (as distinct from a hunk
1749 in hot/cold section. */
1750 unsigned int is_func
: 1;
1751 /* Set if not a root node. */
1752 unsigned int non_root
: 1;
1753 /* Flags used during call tree traversal. It's cheaper to replicate
1754 the visit flags than have one which needs clearing after a traversal. */
1755 unsigned int visit1
: 1;
1756 unsigned int visit2
: 1;
1757 unsigned int marking
: 1;
1758 unsigned int visit3
: 1;
1759 unsigned int visit4
: 1;
1760 unsigned int visit5
: 1;
1761 unsigned int visit6
: 1;
1762 unsigned int visit7
: 1;
1765 struct spu_elf_stack_info
1769 /* Variable size array describing functions, one per contiguous
1770 address range belonging to a function. */
1771 struct function_info fun
[1];
1774 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1775 entries for section SEC. */
1777 static struct spu_elf_stack_info
*
1778 alloc_stack_info (asection
*sec
, int max_fun
)
1780 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1783 amt
= sizeof (struct spu_elf_stack_info
);
1784 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1785 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1786 if (sec_data
->u
.i
.stack_info
!= NULL
)
1787 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1788 return sec_data
->u
.i
.stack_info
;
1791 /* Add a new struct function_info describing a (part of a) function
1792 starting at SYM_H. Keep the array sorted by address. */
1794 static struct function_info
*
1795 maybe_insert_function (asection
*sec
,
1798 bfd_boolean is_func
)
1800 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1801 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1807 sinfo
= alloc_stack_info (sec
, 20);
1814 Elf_Internal_Sym
*sym
= sym_h
;
1815 off
= sym
->st_value
;
1816 size
= sym
->st_size
;
1820 struct elf_link_hash_entry
*h
= sym_h
;
1821 off
= h
->root
.u
.def
.value
;
1825 for (i
= sinfo
->num_fun
; --i
>= 0; )
1826 if (sinfo
->fun
[i
].lo
<= off
)
1831 /* Don't add another entry for an alias, but do update some
1833 if (sinfo
->fun
[i
].lo
== off
)
1835 /* Prefer globals over local syms. */
1836 if (global
&& !sinfo
->fun
[i
].global
)
1838 sinfo
->fun
[i
].global
= TRUE
;
1839 sinfo
->fun
[i
].u
.h
= sym_h
;
1842 sinfo
->fun
[i
].is_func
= TRUE
;
1843 return &sinfo
->fun
[i
];
1845 /* Ignore a zero-size symbol inside an existing function. */
1846 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1847 return &sinfo
->fun
[i
];
1850 if (sinfo
->num_fun
>= sinfo
->max_fun
)
1852 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1853 bfd_size_type old
= amt
;
1855 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1856 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1857 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1858 sinfo
= bfd_realloc (sinfo
, amt
);
1861 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1862 sec_data
->u
.i
.stack_info
= sinfo
;
1865 if (++i
< sinfo
->num_fun
)
1866 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1867 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1868 sinfo
->fun
[i
].is_func
= is_func
;
1869 sinfo
->fun
[i
].global
= global
;
1870 sinfo
->fun
[i
].sec
= sec
;
1872 sinfo
->fun
[i
].u
.h
= sym_h
;
1874 sinfo
->fun
[i
].u
.sym
= sym_h
;
1875 sinfo
->fun
[i
].lo
= off
;
1876 sinfo
->fun
[i
].hi
= off
+ size
;
1877 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1878 sinfo
->num_fun
+= 1;
1879 return &sinfo
->fun
[i
];
1882 /* Return the name of FUN. */
1885 func_name (struct function_info
*fun
)
1889 Elf_Internal_Shdr
*symtab_hdr
;
1891 while (fun
->start
!= NULL
)
1895 return fun
->u
.h
->root
.root
.string
;
1898 if (fun
->u
.sym
->st_name
== 0)
1900 size_t len
= strlen (sec
->name
);
1901 char *name
= bfd_malloc (len
+ 10);
1904 sprintf (name
, "%s+%lx", sec
->name
,
1905 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1909 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1910 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1913 /* Read the instruction at OFF in SEC. Return true iff the instruction
1914 is a nop, lnop, or stop 0 (all zero insn). */
1917 is_nop (asection
*sec
, bfd_vma off
)
1919 unsigned char insn
[4];
1921 if (off
+ 4 > sec
->size
1922 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1924 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1926 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1931 /* Extend the range of FUN to cover nop padding up to LIMIT.
1932 Return TRUE iff some instruction other than a NOP was found. */
1935 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1937 bfd_vma off
= (fun
->hi
+ 3) & -4;
1939 while (off
< limit
&& is_nop (fun
->sec
, off
))
1950 /* Check and fix overlapping function ranges. Return TRUE iff there
1951 are gaps in the current info we have about functions in SEC. */
1954 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1956 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1957 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1959 bfd_boolean gaps
= FALSE
;
1964 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1965 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1967 /* Fix overlapping symbols. */
1968 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1969 const char *f2
= func_name (&sinfo
->fun
[i
]);
1971 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1972 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1974 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1977 if (sinfo
->num_fun
== 0)
1981 if (sinfo
->fun
[0].lo
!= 0)
1983 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1985 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1987 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1988 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1990 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1996 /* Search current function info for a function that contains address
1997 OFFSET in section SEC. */
1999 static struct function_info
*
2000 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2002 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2003 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2007 hi
= sinfo
->num_fun
;
2010 mid
= (lo
+ hi
) / 2;
2011 if (offset
< sinfo
->fun
[mid
].lo
)
2013 else if (offset
>= sinfo
->fun
[mid
].hi
)
2016 return &sinfo
->fun
[mid
];
2018 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2023 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2024 if CALLEE was new. If this function return FALSE, CALLEE should
2028 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2030 struct call_info
**pp
, *p
;
2032 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2033 if (p
->fun
== callee
->fun
)
2035 /* Tail calls use less stack than normal calls. Retain entry
2036 for normal call over one for tail call. */
2037 p
->is_tail
&= callee
->is_tail
;
2040 p
->fun
->start
= NULL
;
2041 p
->fun
->is_func
= TRUE
;
2044 /* Reorder list so most recent call is first. */
2046 p
->next
= caller
->call_list
;
2047 caller
->call_list
= p
;
2050 callee
->next
= caller
->call_list
;
2052 caller
->call_list
= callee
;
2056 /* Copy CALL and insert the copy into CALLER. */
2059 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2061 struct call_info
*callee
;
2062 callee
= bfd_malloc (sizeof (*callee
));
2066 if (!insert_callee (caller
, callee
))
2071 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2072 overlay stub sections. */
2075 interesting_section (asection
*s
, bfd
*obfd
)
2077 return (s
->output_section
!= NULL
2078 && s
->output_section
->owner
== obfd
2079 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2080 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2084 /* Rummage through the relocs for SEC, looking for function calls.
2085 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2086 mark destination symbols on calls as being functions. Also
2087 look at branches, which may be tail calls or go to hot/cold
2088 section part of same function. */
2091 mark_functions_via_relocs (asection
*sec
,
2092 struct bfd_link_info
*info
,
2095 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2096 Elf_Internal_Shdr
*symtab_hdr
;
2098 static bfd_boolean warned
;
2100 if (!interesting_section (sec
, info
->output_bfd
)
2101 || sec
->reloc_count
== 0)
2104 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2106 if (internal_relocs
== NULL
)
2109 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2110 psyms
= &symtab_hdr
->contents
;
2111 irela
= internal_relocs
;
2112 irelaend
= irela
+ sec
->reloc_count
;
2113 for (; irela
< irelaend
; irela
++)
2115 enum elf_spu_reloc_type r_type
;
2116 unsigned int r_indx
;
2118 Elf_Internal_Sym
*sym
;
2119 struct elf_link_hash_entry
*h
;
2121 bfd_boolean reject
, is_call
;
2122 struct function_info
*caller
;
2123 struct call_info
*callee
;
2126 r_type
= ELF32_R_TYPE (irela
->r_info
);
2127 if (r_type
!= R_SPU_REL16
2128 && r_type
!= R_SPU_ADDR16
)
2131 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
))
2135 r_indx
= ELF32_R_SYM (irela
->r_info
);
2136 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2140 || sym_sec
->output_section
== NULL
2141 || sym_sec
->output_section
->owner
!= info
->output_bfd
)
2147 unsigned char insn
[4];
2149 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2150 irela
->r_offset
, 4))
2152 if (is_branch (insn
))
2154 is_call
= (insn
[0] & 0xfd) == 0x31;
2155 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2156 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2159 info
->callbacks
->einfo
2160 (_("%B(%A+0x%v): call to non-code section"
2161 " %B(%A), analysis incomplete\n"),
2162 sec
->owner
, sec
, irela
->r_offset
,
2163 sym_sec
->owner
, sym_sec
);
2171 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
)
2179 /* For --auto-overlay, count possible stubs we need for
2180 function pointer references. */
2181 unsigned int sym_type
;
2185 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2186 if (sym_type
== STT_FUNC
)
2187 spu_hash_table (info
)->non_ovly_stub
+= 1;
2192 val
= h
->root
.u
.def
.value
;
2194 val
= sym
->st_value
;
2195 val
+= irela
->r_addend
;
2199 struct function_info
*fun
;
2201 if (irela
->r_addend
!= 0)
2203 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2206 fake
->st_value
= val
;
2208 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2212 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2214 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2217 if (irela
->r_addend
!= 0
2218 && fun
->u
.sym
!= sym
)
2223 caller
= find_function (sec
, irela
->r_offset
, info
);
2226 callee
= bfd_malloc (sizeof *callee
);
2230 callee
->fun
= find_function (sym_sec
, val
, info
);
2231 if (callee
->fun
== NULL
)
2233 callee
->is_tail
= !is_call
;
2234 callee
->is_pasted
= FALSE
;
2236 if (callee
->fun
->last_caller
!= sec
)
2238 callee
->fun
->last_caller
= sec
;
2239 callee
->fun
->call_count
+= 1;
2241 if (!insert_callee (caller
, callee
))
2244 && !callee
->fun
->is_func
2245 && callee
->fun
->stack
== 0)
2247 /* This is either a tail call or a branch from one part of
2248 the function to another, ie. hot/cold section. If the
2249 destination has been called by some other function then
2250 it is a separate function. We also assume that functions
2251 are not split across input files. */
2252 if (sec
->owner
!= sym_sec
->owner
)
2254 callee
->fun
->start
= NULL
;
2255 callee
->fun
->is_func
= TRUE
;
2257 else if (callee
->fun
->start
== NULL
)
2258 callee
->fun
->start
= caller
;
2261 struct function_info
*callee_start
;
2262 struct function_info
*caller_start
;
2263 callee_start
= callee
->fun
;
2264 while (callee_start
->start
)
2265 callee_start
= callee_start
->start
;
2266 caller_start
= caller
;
2267 while (caller_start
->start
)
2268 caller_start
= caller_start
->start
;
2269 if (caller_start
!= callee_start
)
2271 callee
->fun
->start
= NULL
;
2272 callee
->fun
->is_func
= TRUE
;
2281 /* Handle something like .init or .fini, which has a piece of a function.
2282 These sections are pasted together to form a single function. */
2285 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2287 struct bfd_link_order
*l
;
2288 struct _spu_elf_section_data
*sec_data
;
2289 struct spu_elf_stack_info
*sinfo
;
2290 Elf_Internal_Sym
*fake
;
2291 struct function_info
*fun
, *fun_start
;
2293 fake
= bfd_zmalloc (sizeof (*fake
));
2297 fake
->st_size
= sec
->size
;
2299 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2300 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2304 /* Find a function immediately preceding this section. */
2306 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2308 if (l
->u
.indirect
.section
== sec
)
2310 if (fun_start
!= NULL
)
2312 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2316 fun
->start
= fun_start
;
2318 callee
->is_tail
= TRUE
;
2319 callee
->is_pasted
= TRUE
;
2321 if (!insert_callee (fun_start
, callee
))
2327 if (l
->type
== bfd_indirect_link_order
2328 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2329 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2330 && sinfo
->num_fun
!= 0)
2331 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2334 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2338 /* Map address ranges in code sections to functions. */
2341 discover_functions (struct bfd_link_info
*info
)
2345 Elf_Internal_Sym
***psym_arr
;
2346 asection
***sec_arr
;
2347 bfd_boolean gaps
= FALSE
;
2350 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2353 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2354 if (psym_arr
== NULL
)
2356 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2357 if (sec_arr
== NULL
)
2361 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2363 ibfd
= ibfd
->link_next
, bfd_idx
++)
2365 extern const bfd_target bfd_elf32_spu_vec
;
2366 Elf_Internal_Shdr
*symtab_hdr
;
2369 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2370 asection
**psecs
, **p
;
2372 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2375 /* Read all the symbols. */
2376 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2377 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2381 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2382 if (interesting_section (sec
, info
->output_bfd
))
2390 if (symtab_hdr
->contents
!= NULL
)
2392 /* Don't use cached symbols since the generic ELF linker
2393 code only reads local symbols, and we need globals too. */
2394 free (symtab_hdr
->contents
);
2395 symtab_hdr
->contents
= NULL
;
2397 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2399 symtab_hdr
->contents
= (void *) syms
;
2403 /* Select defined function symbols that are going to be output. */
2404 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2407 psym_arr
[bfd_idx
] = psyms
;
2408 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2411 sec_arr
[bfd_idx
] = psecs
;
2412 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2413 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2414 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2418 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2419 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2422 symcount
= psy
- psyms
;
2425 /* Sort them by section and offset within section. */
2426 sort_syms_syms
= syms
;
2427 sort_syms_psecs
= psecs
;
2428 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2430 /* Now inspect the function symbols. */
2431 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2433 asection
*s
= psecs
[*psy
- syms
];
2434 Elf_Internal_Sym
**psy2
;
2436 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2437 if (psecs
[*psy2
- syms
] != s
)
2440 if (!alloc_stack_info (s
, psy2
- psy
))
2445 /* First install info about properly typed and sized functions.
2446 In an ideal world this will cover all code sections, except
2447 when partitioning functions into hot and cold sections,
2448 and the horrible pasted together .init and .fini functions. */
2449 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2452 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2454 asection
*s
= psecs
[sy
- syms
];
2455 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2460 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2461 if (interesting_section (sec
, info
->output_bfd
))
2462 gaps
|= check_function_ranges (sec
, info
);
2467 /* See if we can discover more function symbols by looking at
2469 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2471 ibfd
= ibfd
->link_next
, bfd_idx
++)
2475 if (psym_arr
[bfd_idx
] == NULL
)
2478 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2479 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2483 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2485 ibfd
= ibfd
->link_next
, bfd_idx
++)
2487 Elf_Internal_Shdr
*symtab_hdr
;
2489 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2492 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2495 psecs
= sec_arr
[bfd_idx
];
2497 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2498 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2501 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2502 if (interesting_section (sec
, info
->output_bfd
))
2503 gaps
|= check_function_ranges (sec
, info
);
2507 /* Finally, install all globals. */
2508 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2512 s
= psecs
[sy
- syms
];
2514 /* Global syms might be improperly typed functions. */
2515 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2516 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2518 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2524 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2526 extern const bfd_target bfd_elf32_spu_vec
;
2529 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2532 /* Some of the symbols we've installed as marking the
2533 beginning of functions may have a size of zero. Extend
2534 the range of such functions to the beginning of the
2535 next symbol of interest. */
2536 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2537 if (interesting_section (sec
, info
->output_bfd
))
2539 struct _spu_elf_section_data
*sec_data
;
2540 struct spu_elf_stack_info
*sinfo
;
2542 sec_data
= spu_elf_section_data (sec
);
2543 sinfo
= sec_data
->u
.i
.stack_info
;
2547 bfd_vma hi
= sec
->size
;
2549 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2551 sinfo
->fun
[fun_idx
].hi
= hi
;
2552 hi
= sinfo
->fun
[fun_idx
].lo
;
2555 /* No symbols in this section. Must be .init or .fini
2556 or something similar. */
2557 else if (!pasted_function (sec
, info
))
2563 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2565 ibfd
= ibfd
->link_next
, bfd_idx
++)
2567 if (psym_arr
[bfd_idx
] == NULL
)
2570 free (psym_arr
[bfd_idx
]);
2571 free (sec_arr
[bfd_idx
]);
2580 /* Iterate over all function_info we have collected, calling DOIT on
2581 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2585 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2586 struct bfd_link_info
*,
2588 struct bfd_link_info
*info
,
2594 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2596 extern const bfd_target bfd_elf32_spu_vec
;
2599 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2602 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2604 struct _spu_elf_section_data
*sec_data
;
2605 struct spu_elf_stack_info
*sinfo
;
2607 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2608 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2611 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2612 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2613 if (!doit (&sinfo
->fun
[i
], info
, param
))
2621 /* Transfer call info attached to struct function_info entries for
2622 all of a given function's sections to the first entry. */
2625 transfer_calls (struct function_info
*fun
,
2626 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2627 void *param ATTRIBUTE_UNUSED
)
2629 struct function_info
*start
= fun
->start
;
2633 struct call_info
*call
, *call_next
;
2635 while (start
->start
!= NULL
)
2636 start
= start
->start
;
2637 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2639 call_next
= call
->next
;
2640 if (!insert_callee (start
, call
))
2643 fun
->call_list
= NULL
;
2648 /* Mark nodes in the call graph that are called by some other node. */
2651 mark_non_root (struct function_info
*fun
,
2652 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2653 void *param ATTRIBUTE_UNUSED
)
2655 struct call_info
*call
;
2660 for (call
= fun
->call_list
; call
; call
= call
->next
)
2662 call
->fun
->non_root
= TRUE
;
2663 mark_non_root (call
->fun
, 0, 0);
2668 /* Remove cycles from the call graph. Set depth of nodes. */
2671 remove_cycles (struct function_info
*fun
,
2672 struct bfd_link_info
*info
,
2675 struct call_info
**callp
, *call
;
2676 unsigned int depth
= *(unsigned int *) param
;
2677 unsigned int max_depth
= depth
;
2681 fun
->marking
= TRUE
;
2683 callp
= &fun
->call_list
;
2684 while ((call
= *callp
) != NULL
)
2686 if (!call
->fun
->visit2
)
2688 call
->max_depth
= depth
+ !call
->is_pasted
;
2689 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2691 if (max_depth
< call
->max_depth
)
2692 max_depth
= call
->max_depth
;
2694 else if (call
->fun
->marking
)
2696 if (!spu_hash_table (info
)->auto_overlay
)
2698 const char *f1
= func_name (fun
);
2699 const char *f2
= func_name (call
->fun
);
2701 info
->callbacks
->info (_("Stack analysis will ignore the call "
2705 *callp
= call
->next
;
2709 callp
= &call
->next
;
2711 fun
->marking
= FALSE
;
2712 *(unsigned int *) param
= max_depth
;
2716 /* Populate call_list for each function. */
2719 build_call_tree (struct bfd_link_info
*info
)
2724 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2726 extern const bfd_target bfd_elf32_spu_vec
;
2729 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2732 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2733 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2737 /* Transfer call info from hot/cold section part of function
2739 if (!spu_hash_table (info
)->auto_overlay
2740 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2743 /* Find the call graph root(s). */
2744 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2747 /* Remove cycles from the call graph. We start from the root node(s)
2748 so that we break cycles in a reasonable place. */
2750 return for_each_node (remove_cycles
, info
, &depth
, TRUE
);
2753 /* qsort predicate to sort calls by max_depth then count. */
2756 sort_calls (const void *a
, const void *b
)
2758 struct call_info
*const *c1
= a
;
2759 struct call_info
*const *c2
= b
;
2762 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2766 delta
= (*c2
)->count
- (*c1
)->count
;
2774 unsigned int max_overlay_size
;
2777 /* Set linker_mark and gc_mark on any sections that we will put in
2778 overlays. These flags are used by the generic ELF linker, but we
2779 won't be continuing on to bfd_elf_final_link so it is OK to use
2780 them. linker_mark is clear before we get here. Set segment_mark
2781 on sections that are part of a pasted function (excluding the last
2784 Set up function rodata section if --overlay-rodata. We don't
2785 currently include merged string constant rodata sections since
2787 Sort the call graph so that the deepest nodes will be visited
2791 mark_overlay_section (struct function_info
*fun
,
2792 struct bfd_link_info
*info
,
2795 struct call_info
*call
;
2797 struct _mos_param
*mos_param
= param
;
2803 if (!fun
->sec
->linker_mark
)
2805 fun
->sec
->linker_mark
= 1;
2806 fun
->sec
->gc_mark
= 1;
2807 fun
->sec
->segment_mark
= 0;
2808 /* Ensure SEC_CODE is set on this text section (it ought to
2809 be!), and SEC_CODE is clear on rodata sections. We use
2810 this flag to differentiate the two overlay section types. */
2811 fun
->sec
->flags
|= SEC_CODE
;
2812 if (spu_hash_table (info
)->auto_overlay
& OVERLAY_RODATA
)
2817 /* Find the rodata section corresponding to this function's
2819 if (strcmp (fun
->sec
->name
, ".text") == 0)
2821 name
= bfd_malloc (sizeof (".rodata"));
2824 memcpy (name
, ".rodata", sizeof (".rodata"));
2826 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2828 size_t len
= strlen (fun
->sec
->name
);
2829 name
= bfd_malloc (len
+ 3);
2832 memcpy (name
, ".rodata", sizeof (".rodata"));
2833 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2835 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2837 size_t len
= strlen (fun
->sec
->name
) + 1;
2838 name
= bfd_malloc (len
);
2841 memcpy (name
, fun
->sec
->name
, len
);
2847 asection
*rodata
= NULL
;
2848 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2849 if (group_sec
== NULL
)
2850 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2852 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2854 if (strcmp (group_sec
->name
, name
) == 0)
2859 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2861 fun
->rodata
= rodata
;
2864 fun
->rodata
->linker_mark
= 1;
2865 fun
->rodata
->gc_mark
= 1;
2866 fun
->rodata
->flags
&= ~SEC_CODE
;
2870 size
= fun
->sec
->size
;
2872 size
+= fun
->rodata
->size
;
2873 if (mos_param
->max_overlay_size
< size
)
2874 mos_param
->max_overlay_size
= size
;
2878 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2883 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2887 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2888 calls
[count
++] = call
;
2890 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2892 fun
->call_list
= NULL
;
2896 calls
[count
]->next
= fun
->call_list
;
2897 fun
->call_list
= calls
[count
];
2902 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2904 if (call
->is_pasted
)
2906 /* There can only be one is_pasted call per function_info. */
2907 BFD_ASSERT (!fun
->sec
->segment_mark
);
2908 fun
->sec
->segment_mark
= 1;
2910 if (!mark_overlay_section (call
->fun
, info
, param
))
2914 /* Don't put entry code into an overlay. The overlay manager needs
2916 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2917 == info
->output_bfd
->start_address
)
2919 fun
->sec
->linker_mark
= 0;
2920 if (fun
->rodata
!= NULL
)
2921 fun
->rodata
->linker_mark
= 0;
2927 asection
*exclude_input_section
;
2928 asection
*exclude_output_section
;
2929 unsigned long clearing
;
2932 /* Undo some of mark_overlay_section's work. */
2935 unmark_overlay_section (struct function_info
*fun
,
2936 struct bfd_link_info
*info
,
2939 struct call_info
*call
;
2940 struct _uos_param
*uos_param
= param
;
2941 unsigned int excluded
= 0;
2949 if (fun
->sec
== uos_param
->exclude_input_section
2950 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
2953 uos_param
->clearing
+= excluded
;
2955 if (uos_param
->clearing
)
2957 fun
->sec
->linker_mark
= 0;
2959 fun
->rodata
->linker_mark
= 0;
2962 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2963 if (!unmark_overlay_section (call
->fun
, info
, param
))
2966 uos_param
->clearing
-= excluded
;
2971 unsigned int lib_size
;
2972 asection
**lib_sections
;
2975 /* Add sections we have marked as belonging to overlays to an array
2976 for consideration as non-overlay sections. The array consist of
2977 pairs of sections, (text,rodata), for functions in the call graph. */
2980 collect_lib_sections (struct function_info
*fun
,
2981 struct bfd_link_info
*info
,
2984 struct _cl_param
*lib_param
= param
;
2985 struct call_info
*call
;
2992 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
2995 size
= fun
->sec
->size
;
2997 size
+= fun
->rodata
->size
;
2998 if (size
> lib_param
->lib_size
)
3001 *lib_param
->lib_sections
++ = fun
->sec
;
3002 fun
->sec
->gc_mark
= 0;
3003 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3005 *lib_param
->lib_sections
++ = fun
->rodata
;
3006 fun
->rodata
->gc_mark
= 0;
3009 *lib_param
->lib_sections
++ = NULL
;
3011 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3012 collect_lib_sections (call
->fun
, info
, param
);
3017 /* qsort predicate to sort sections by call count. */
3020 sort_lib (const void *a
, const void *b
)
3022 asection
*const *s1
= a
;
3023 asection
*const *s2
= b
;
3024 struct _spu_elf_section_data
*sec_data
;
3025 struct spu_elf_stack_info
*sinfo
;
3029 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3030 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3033 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3034 delta
-= sinfo
->fun
[i
].call_count
;
3037 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3038 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3041 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3042 delta
+= sinfo
->fun
[i
].call_count
;
3051 /* Remove some sections from those marked to be in overlays. Choose
3052 those that are called from many places, likely library functions. */
3055 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3058 asection
**lib_sections
;
3059 unsigned int i
, lib_count
;
3060 struct _cl_param collect_lib_param
;
3061 struct function_info dummy_caller
;
3063 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3065 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3067 extern const bfd_target bfd_elf32_spu_vec
;
3070 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3073 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3074 if (sec
->linker_mark
3075 && sec
->size
< lib_size
3076 && (sec
->flags
& SEC_CODE
) != 0)
3079 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3080 if (lib_sections
== NULL
)
3081 return (unsigned int) -1;
3082 collect_lib_param
.lib_size
= lib_size
;
3083 collect_lib_param
.lib_sections
= lib_sections
;
3084 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3086 return (unsigned int) -1;
3087 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3089 /* Sort sections so that those with the most calls are first. */
3091 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3093 for (i
= 0; i
< lib_count
; i
++)
3095 unsigned int tmp
, stub_size
;
3097 struct _spu_elf_section_data
*sec_data
;
3098 struct spu_elf_stack_info
*sinfo
;
3100 sec
= lib_sections
[2 * i
];
3101 /* If this section is OK, its size must be less than lib_size. */
3103 /* If it has a rodata section, then add that too. */
3104 if (lib_sections
[2 * i
+ 1])
3105 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3106 /* Add any new overlay call stubs needed by the section. */
3109 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3110 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3113 struct call_info
*call
;
3115 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3116 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3117 if (call
->fun
->sec
->linker_mark
)
3119 struct call_info
*p
;
3120 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3121 if (p
->fun
== call
->fun
)
3124 stub_size
+= OVL_STUB_SIZE
;
3127 if (tmp
+ stub_size
< lib_size
)
3129 struct call_info
**pp
, *p
;
3131 /* This section fits. Mark it as non-overlay. */
3132 lib_sections
[2 * i
]->linker_mark
= 0;
3133 if (lib_sections
[2 * i
+ 1])
3134 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3135 lib_size
-= tmp
+ stub_size
;
3136 /* Call stubs to the section we just added are no longer
3138 pp
= &dummy_caller
.call_list
;
3139 while ((p
= *pp
) != NULL
)
3140 if (!p
->fun
->sec
->linker_mark
)
3142 lib_size
+= OVL_STUB_SIZE
;
3148 /* Add new call stubs to dummy_caller. */
3149 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3150 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3153 struct call_info
*call
;
3155 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3156 for (call
= sinfo
->fun
[k
].call_list
;
3159 if (call
->fun
->sec
->linker_mark
)
3161 struct call_info
*callee
;
3162 callee
= bfd_malloc (sizeof (*callee
));
3164 return (unsigned int) -1;
3166 if (!insert_callee (&dummy_caller
, callee
))
3172 while (dummy_caller
.call_list
!= NULL
)
3174 struct call_info
*call
= dummy_caller
.call_list
;
3175 dummy_caller
.call_list
= call
->next
;
3178 for (i
= 0; i
< 2 * lib_count
; i
++)
3179 if (lib_sections
[i
])
3180 lib_sections
[i
]->gc_mark
= 1;
3181 free (lib_sections
);
3185 /* Build an array of overlay sections. The deepest node's section is
3186 added first, then its parent node's section, then everything called
3187 from the parent section. The idea being to group sections to
3188 minimise calls between different overlays. */
3191 collect_overlays (struct function_info
*fun
,
3192 struct bfd_link_info
*info
,
3195 struct call_info
*call
;
3196 bfd_boolean added_fun
;
3197 asection
***ovly_sections
= param
;
3203 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3204 if (!call
->is_pasted
)
3206 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3212 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3214 fun
->sec
->gc_mark
= 0;
3215 *(*ovly_sections
)++ = fun
->sec
;
3216 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3218 fun
->rodata
->gc_mark
= 0;
3219 *(*ovly_sections
)++ = fun
->rodata
;
3222 *(*ovly_sections
)++ = NULL
;
3225 /* Pasted sections must stay with the first section. We don't
3226 put pasted sections in the array, just the first section.
3227 Mark subsequent sections as already considered. */
3228 if (fun
->sec
->segment_mark
)
3230 struct function_info
*call_fun
= fun
;
3233 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3234 if (call
->is_pasted
)
3236 call_fun
= call
->fun
;
3237 call_fun
->sec
->gc_mark
= 0;
3238 if (call_fun
->rodata
)
3239 call_fun
->rodata
->gc_mark
= 0;
3245 while (call_fun
->sec
->segment_mark
);
3249 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3250 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3255 struct _spu_elf_section_data
*sec_data
;
3256 struct spu_elf_stack_info
*sinfo
;
3258 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3259 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3262 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3263 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3271 struct _sum_stack_param
{
3273 size_t overall_stack
;
3274 bfd_boolean emit_stack_syms
;
3277 /* Descend the call graph for FUN, accumulating total stack required. */
3280 sum_stack (struct function_info
*fun
,
3281 struct bfd_link_info
*info
,
3284 struct call_info
*call
;
3285 struct function_info
*max
;
3286 size_t stack
, cum_stack
;
3288 bfd_boolean has_call
;
3289 struct _sum_stack_param
*sum_stack_param
= param
;
3290 struct spu_link_hash_table
*htab
;
3292 cum_stack
= fun
->stack
;
3293 sum_stack_param
->cum_stack
= cum_stack
;
3299 for (call
= fun
->call_list
; call
; call
= call
->next
)
3301 if (!call
->is_pasted
)
3303 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3305 stack
= sum_stack_param
->cum_stack
;
3306 /* Include caller stack for normal calls, don't do so for
3307 tail calls. fun->stack here is local stack usage for
3309 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3310 stack
+= fun
->stack
;
3311 if (cum_stack
< stack
)
3318 sum_stack_param
->cum_stack
= cum_stack
;
3320 /* Now fun->stack holds cumulative stack. */
3321 fun
->stack
= cum_stack
;
3325 && sum_stack_param
->overall_stack
< cum_stack
)
3326 sum_stack_param
->overall_stack
= cum_stack
;
3328 htab
= spu_hash_table (info
);
3329 if (htab
->auto_overlay
)
3332 f1
= func_name (fun
);
3334 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3335 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3336 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3340 info
->callbacks
->minfo (_(" calls:\n"));
3341 for (call
= fun
->call_list
; call
; call
= call
->next
)
3342 if (!call
->is_pasted
)
3344 const char *f2
= func_name (call
->fun
);
3345 const char *ann1
= call
->fun
== max
? "*" : " ";
3346 const char *ann2
= call
->is_tail
? "t" : " ";
3348 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3352 if (sum_stack_param
->emit_stack_syms
)
3354 char *name
= bfd_malloc (18 + strlen (f1
));
3355 struct elf_link_hash_entry
*h
;
3360 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3361 sprintf (name
, "__stack_%s", f1
);
3363 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3365 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3368 && (h
->root
.type
== bfd_link_hash_new
3369 || h
->root
.type
== bfd_link_hash_undefined
3370 || h
->root
.type
== bfd_link_hash_undefweak
))
3372 h
->root
.type
= bfd_link_hash_defined
;
3373 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3374 h
->root
.u
.def
.value
= cum_stack
;
3379 h
->ref_regular_nonweak
= 1;
3380 h
->forced_local
= 1;
3388 /* SEC is part of a pasted function. Return the call_info for the
3389 next section of this function. */
3391 static struct call_info
*
3392 find_pasted_call (asection
*sec
)
3394 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3395 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3396 struct call_info
*call
;
3399 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3400 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3401 if (call
->is_pasted
)
3407 /* qsort predicate to sort bfds by file name. */
3410 sort_bfds (const void *a
, const void *b
)
3412 bfd
*const *abfd1
= a
;
3413 bfd
*const *abfd2
= b
;
3415 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3418 /* Handle --auto-overlay. */
3420 static void spu_elf_auto_overlay (struct bfd_link_info
*, void (*) (void))
3424 spu_elf_auto_overlay (struct bfd_link_info
*info
,
3425 void (*spu_elf_load_ovl_mgr
) (void))
3429 struct elf_segment_map
*m
;
3430 unsigned int fixed_size
, lo
, hi
;
3431 struct spu_link_hash_table
*htab
;
3432 unsigned int base
, i
, count
, bfd_count
;
3434 asection
**ovly_sections
, **ovly_p
;
3436 unsigned int total_overlay_size
, overlay_size
;
3437 struct elf_link_hash_entry
*h
;
3438 struct _mos_param mos_param
;
3439 struct _uos_param uos_param
;
3440 struct function_info dummy_caller
;
3442 /* Find the extents of our loadable image. */
3443 lo
= (unsigned int) -1;
3445 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3446 if (m
->p_type
== PT_LOAD
)
3447 for (i
= 0; i
< m
->count
; i
++)
3448 if (m
->sections
[i
]->size
!= 0)
3450 if (m
->sections
[i
]->vma
< lo
)
3451 lo
= m
->sections
[i
]->vma
;
3452 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3453 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3455 fixed_size
= hi
+ 1 - lo
;
3457 if (!discover_functions (info
))
3460 if (!build_call_tree (info
))
3463 uos_param
.exclude_input_section
= 0;
3464 uos_param
.exclude_output_section
3465 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3467 htab
= spu_hash_table (info
);
3468 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3469 FALSE
, FALSE
, FALSE
);
3471 && (h
->root
.type
== bfd_link_hash_defined
3472 || h
->root
.type
== bfd_link_hash_defweak
)
3475 /* We have a user supplied overlay manager. */
3476 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3480 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3481 builtin version to .text, and will adjust .text size. */
3482 asection
*text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3484 fixed_size
-= text
->size
;
3485 spu_elf_load_ovl_mgr ();
3486 text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3488 fixed_size
+= text
->size
;
3491 /* Mark overlay sections, and find max overlay section size. */
3492 mos_param
.max_overlay_size
= 0;
3493 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3496 /* We can't put the overlay manager or interrupt routines in
3498 uos_param
.clearing
= 0;
3499 if ((uos_param
.exclude_input_section
3500 || uos_param
.exclude_output_section
)
3501 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3505 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3507 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3508 if (bfd_arr
== NULL
)
3511 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3514 total_overlay_size
= 0;
3515 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3517 extern const bfd_target bfd_elf32_spu_vec
;
3519 unsigned int old_count
;
3521 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3525 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3526 if (sec
->linker_mark
)
3528 if ((sec
->flags
& SEC_CODE
) != 0)
3530 fixed_size
-= sec
->size
;
3531 total_overlay_size
+= sec
->size
;
3533 if (count
!= old_count
)
3534 bfd_arr
[bfd_count
++] = ibfd
;
3537 /* Since the overlay link script selects sections by file name and
3538 section name, ensure that file names are unique. */
3541 bfd_boolean ok
= TRUE
;
3543 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3544 for (i
= 1; i
< bfd_count
; ++i
)
3545 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3547 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3549 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3550 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3551 bfd_arr
[i
- 1]->filename
,
3552 bfd_arr
[i
- 1]->my_archive
->filename
);
3554 info
->callbacks
->einfo (_("%s in both %s and %s\n"),
3555 bfd_arr
[i
- 1]->filename
,
3556 bfd_arr
[i
- 1]->my_archive
->filename
,
3557 bfd_arr
[i
]->my_archive
->filename
);
3559 else if (bfd_arr
[i
- 1]->my_archive
)
3560 info
->callbacks
->einfo (_("%s in %s and as an object\n"),
3561 bfd_arr
[i
- 1]->filename
,
3562 bfd_arr
[i
- 1]->my_archive
->filename
);
3563 else if (bfd_arr
[i
]->my_archive
)
3564 info
->callbacks
->einfo (_("%s in %s and as an object\n"),
3565 bfd_arr
[i
]->filename
,
3566 bfd_arr
[i
]->my_archive
->filename
);
3568 info
->callbacks
->einfo (_("%s duplicated\n"),
3569 bfd_arr
[i
]->filename
);
3574 /* FIXME: modify plain object files from foo.o to ./foo.o
3575 and emit EXCLUDE_FILE to handle the duplicates in
3576 archives. There is a pathological case we can't handle:
3577 We may have duplicate file names within a single archive. */
3578 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3579 "object files in auto-overlay script\n"));
3580 bfd_set_error (bfd_error_bad_value
);
3586 if (htab
->reserved
== 0)
3588 struct _sum_stack_param sum_stack_param
;
3590 sum_stack_param
.emit_stack_syms
= 0;
3591 sum_stack_param
.overall_stack
= 0;
3592 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3594 htab
->reserved
= sum_stack_param
.overall_stack
;
3596 fixed_size
+= htab
->reserved
;
3597 fixed_size
+= htab
->non_ovly_stub
* OVL_STUB_SIZE
;
3598 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3600 /* Guess number of overlays. Assuming overlay buffer is on
3601 average only half full should be conservative. */
3602 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3603 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3604 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3607 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3608 info
->callbacks
->einfo (_("non-overlay plus maximum overlay size "
3609 "of 0x%x exceeds local store\n"),
3610 fixed_size
+ mos_param
.max_overlay_size
);
3612 /* Now see if we should put some functions in the non-overlay area. */
3613 if (fixed_size
< htab
->overlay_fixed
3614 && htab
->overlay_fixed
+ mos_param
.max_overlay_size
< htab
->local_store
)
3616 unsigned int lib_size
= htab
->overlay_fixed
- fixed_size
;
3617 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3618 if (lib_size
== (unsigned int) -1)
3620 fixed_size
= htab
->overlay_fixed
- lib_size
;
3623 /* Build an array of sections, suitably sorted to place into
3625 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3626 if (ovly_sections
== NULL
)
3628 ovly_p
= ovly_sections
;
3629 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3631 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3633 script
= htab
->spu_elf_open_overlay_script ();
3635 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3638 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3639 overlay_size
= htab
->local_store
- fixed_size
;
3642 while (base
< count
)
3644 unsigned int size
= 0;
3647 for (i
= base
; i
< count
; i
++)
3651 unsigned int stub_size
;
3652 struct call_info
*call
, *pasty
;
3653 struct _spu_elf_section_data
*sec_data
;
3654 struct spu_elf_stack_info
*sinfo
;
3657 /* See whether we can add this section to the current
3658 overlay without overflowing our overlay buffer. */
3659 sec
= ovly_sections
[2 * i
];
3660 tmp
= size
+ sec
->size
;
3661 if (ovly_sections
[2 * i
+ 1])
3662 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3663 if (tmp
> overlay_size
)
3665 if (sec
->segment_mark
)
3667 /* Pasted sections must stay together, so add their
3669 struct call_info
*pasty
= find_pasted_call (sec
);
3670 while (pasty
!= NULL
)
3672 struct function_info
*call_fun
= pasty
->fun
;
3673 tmp
+= call_fun
->sec
->size
;
3674 if (call_fun
->rodata
)
3675 tmp
+= call_fun
->rodata
->size
;
3676 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3677 if (pasty
->is_pasted
)
3681 if (tmp
> overlay_size
)
3684 /* If we add this section, we might need new overlay call
3685 stubs. Add any overlay section calls to dummy_call. */
3687 sec_data
= spu_elf_section_data (sec
);
3688 sinfo
= sec_data
->u
.i
.stack_info
;
3689 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3690 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3691 if (call
->is_pasted
)
3693 BFD_ASSERT (pasty
== NULL
);
3696 else if (call
->fun
->sec
->linker_mark
)
3698 if (!copy_callee (&dummy_caller
, call
))
3701 while (pasty
!= NULL
)
3703 struct function_info
*call_fun
= pasty
->fun
;
3705 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3706 if (call
->is_pasted
)
3708 BFD_ASSERT (pasty
== NULL
);
3711 else if (!copy_callee (&dummy_caller
, call
))
3715 /* Calculate call stub size. */
3717 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3721 stub_size
+= OVL_STUB_SIZE
;
3722 /* If the call is within this overlay, we won't need a
3724 for (k
= base
; k
< i
+ 1; k
++)
3725 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3727 stub_size
-= OVL_STUB_SIZE
;
3731 if (tmp
+ stub_size
> overlay_size
)
3739 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3740 ovly_sections
[2 * i
]->owner
,
3741 ovly_sections
[2 * i
],
3742 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3743 bfd_set_error (bfd_error_bad_value
);
3747 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3749 for (j
= base
; j
< i
; j
++)
3751 asection
*sec
= ovly_sections
[2 * j
];
3753 if (fprintf (script
, " [%c]%s (%s)\n",
3754 sec
->owner
->filename
[0],
3755 sec
->owner
->filename
+ 1,
3758 if (sec
->segment_mark
)
3760 struct call_info
*call
= find_pasted_call (sec
);
3761 while (call
!= NULL
)
3763 struct function_info
*call_fun
= call
->fun
;
3764 sec
= call_fun
->sec
;
3765 if (fprintf (script
, " [%c]%s (%s)\n",
3766 sec
->owner
->filename
[0],
3767 sec
->owner
->filename
+ 1,
3770 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3771 if (call
->is_pasted
)
3777 for (j
= base
; j
< i
; j
++)
3779 asection
*sec
= ovly_sections
[2 * j
+ 1];
3780 if (sec
!= NULL
&& fprintf (script
, " [%c]%s (%s)\n",
3781 sec
->owner
->filename
[0],
3782 sec
->owner
->filename
+ 1,
3786 sec
= ovly_sections
[2 * j
];
3787 if (sec
->segment_mark
)
3789 struct call_info
*call
= find_pasted_call (sec
);
3790 while (call
!= NULL
)
3792 struct function_info
*call_fun
= call
->fun
;
3793 sec
= call_fun
->rodata
;
3794 if (sec
!= NULL
&& fprintf (script
, " [%c]%s (%s)\n",
3795 sec
->owner
->filename
[0],
3796 sec
->owner
->filename
+ 1,
3799 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3800 if (call
->is_pasted
)
3806 if (fprintf (script
, " }\n") <= 0)
3809 while (dummy_caller
.call_list
!= NULL
)
3811 struct call_info
*call
= dummy_caller
.call_list
;
3812 dummy_caller
.call_list
= call
->next
;
3818 free (ovly_sections
);
3820 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3822 if (fclose (script
) != 0)
3825 if (htab
->auto_overlay
& AUTO_RELINK
)
3826 htab
->spu_elf_relink ();
3831 bfd_set_error (bfd_error_system_call
);
3833 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3837 /* Provide an estimate of total stack required. */
3840 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
3842 struct _sum_stack_param sum_stack_param
;
3844 if (!discover_functions (info
))
3847 if (!build_call_tree (info
))
3850 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3851 info
->callbacks
->minfo (_("\nStack size for functions. "
3852 "Annotations: '*' max stack, 't' tail call\n"));
3854 sum_stack_param
.emit_stack_syms
= emit_stack_syms
;
3855 sum_stack_param
.overall_stack
= 0;
3856 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3859 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3860 (bfd_vma
) sum_stack_param
.overall_stack
);
3864 /* Perform a final link. */
3867 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3869 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3871 if (htab
->auto_overlay
)
3872 spu_elf_auto_overlay (info
, htab
->spu_elf_load_ovl_mgr
);
3874 if (htab
->stack_analysis
3875 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
3876 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3878 return bfd_elf_final_link (output_bfd
, info
);
3881 /* Called when not normally emitting relocs, ie. !info->relocatable
3882 and !info->emitrelocations. Returns a count of special relocs
3883 that need to be emitted. */
3886 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
3888 unsigned int count
= 0;
3889 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3891 for (; relocs
< relend
; relocs
++)
3893 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
3894 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3901 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3904 spu_elf_relocate_section (bfd
*output_bfd
,
3905 struct bfd_link_info
*info
,
3907 asection
*input_section
,
3909 Elf_Internal_Rela
*relocs
,
3910 Elf_Internal_Sym
*local_syms
,
3911 asection
**local_sections
)
3913 Elf_Internal_Shdr
*symtab_hdr
;
3914 struct elf_link_hash_entry
**sym_hashes
;
3915 Elf_Internal_Rela
*rel
, *relend
;
3916 struct spu_link_hash_table
*htab
;
3917 asection
*ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3919 bfd_boolean emit_these_relocs
= FALSE
;
3920 bfd_boolean is_ea_sym
;
3923 htab
= spu_hash_table (info
);
3924 stubs
= (htab
->stub_sec
!= NULL
3925 && maybe_needs_stubs (input_section
, output_bfd
));
3926 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3927 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3930 relend
= relocs
+ input_section
->reloc_count
;
3931 for (; rel
< relend
; rel
++)
3934 reloc_howto_type
*howto
;
3935 unsigned int r_symndx
;
3936 Elf_Internal_Sym
*sym
;
3938 struct elf_link_hash_entry
*h
;
3939 const char *sym_name
;
3942 bfd_reloc_status_type r
;
3943 bfd_boolean unresolved_reloc
;
3945 enum _stub_type stub_type
;
3947 r_symndx
= ELF32_R_SYM (rel
->r_info
);
3948 r_type
= ELF32_R_TYPE (rel
->r_info
);
3949 howto
= elf_howto_table
+ r_type
;
3950 unresolved_reloc
= FALSE
;
3955 if (r_symndx
< symtab_hdr
->sh_info
)
3957 sym
= local_syms
+ r_symndx
;
3958 sec
= local_sections
[r_symndx
];
3959 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
3960 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
3964 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
3965 r_symndx
, symtab_hdr
, sym_hashes
,
3967 unresolved_reloc
, warned
);
3968 sym_name
= h
->root
.root
.string
;
3971 if (sec
!= NULL
&& elf_discarded_section (sec
))
3973 /* For relocs against symbols from removed linkonce sections,
3974 or sections discarded by a linker script, we just want the
3975 section contents zeroed. Avoid any special processing. */
3976 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
3982 if (info
->relocatable
)
3985 is_ea_sym
= (ea
!= NULL
3987 && sec
->output_section
== ea
);
3989 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3993 /* ._ea is a special section that isn't allocated in SPU
3994 memory, but rather occupies space in PPU memory as
3995 part of an embedded ELF image. If this reloc is
3996 against a symbol defined in ._ea, then transform the
3997 reloc into an equivalent one without a symbol
3998 relative to the start of the ELF image. */
3999 rel
->r_addend
+= (relocation
4001 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4002 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4004 emit_these_relocs
= TRUE
;
4009 unresolved_reloc
= TRUE
;
4011 if (unresolved_reloc
)
4013 (*_bfd_error_handler
)
4014 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4016 bfd_get_section_name (input_bfd
, input_section
),
4017 (long) rel
->r_offset
,
4023 /* If this symbol is in an overlay area, we may need to relocate
4024 to the overlay stub. */
4025 addend
= rel
->r_addend
;
4027 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4028 contents
, info
)) != no_stub
)
4030 unsigned int ovl
= 0;
4031 struct got_entry
*g
, **head
;
4033 if (stub_type
!= nonovl_stub
)
4034 ovl
= (spu_elf_section_data (input_section
->output_section
)
4038 head
= &h
->got
.glist
;
4040 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4042 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4043 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4048 relocation
= g
->stub_addr
;
4052 r
= _bfd_final_link_relocate (howto
,
4056 rel
->r_offset
, relocation
, addend
);
4058 if (r
!= bfd_reloc_ok
)
4060 const char *msg
= (const char *) 0;
4064 case bfd_reloc_overflow
:
4065 if (!((*info
->callbacks
->reloc_overflow
)
4066 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4067 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4071 case bfd_reloc_undefined
:
4072 if (!((*info
->callbacks
->undefined_symbol
)
4073 (info
, sym_name
, input_bfd
, input_section
,
4074 rel
->r_offset
, TRUE
)))
4078 case bfd_reloc_outofrange
:
4079 msg
= _("internal error: out of range error");
4082 case bfd_reloc_notsupported
:
4083 msg
= _("internal error: unsupported relocation error");
4086 case bfd_reloc_dangerous
:
4087 msg
= _("internal error: dangerous error");
4091 msg
= _("internal error: unknown error");
4096 if (!((*info
->callbacks
->warning
)
4097 (info
, msg
, sym_name
, input_bfd
, input_section
,
4106 && emit_these_relocs
4107 && !info
->emitrelocations
)
4109 Elf_Internal_Rela
*wrel
;
4110 Elf_Internal_Shdr
*rel_hdr
;
4112 wrel
= rel
= relocs
;
4113 relend
= relocs
+ input_section
->reloc_count
;
4114 for (; rel
< relend
; rel
++)
4118 r_type
= ELF32_R_TYPE (rel
->r_info
);
4119 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4122 input_section
->reloc_count
= wrel
- relocs
;
4123 /* Backflips for _bfd_elf_link_output_relocs. */
4124 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4125 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4132 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4135 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4136 const char *sym_name ATTRIBUTE_UNUSED
,
4137 Elf_Internal_Sym
*sym
,
4138 asection
*sym_sec ATTRIBUTE_UNUSED
,
4139 struct elf_link_hash_entry
*h
)
4141 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4143 if (!info
->relocatable
4144 && htab
->stub_sec
!= NULL
4146 && (h
->root
.type
== bfd_link_hash_defined
4147 || h
->root
.type
== bfd_link_hash_defweak
)
4149 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4151 struct got_entry
*g
;
4153 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4154 if (g
->addend
== 0 && g
->ovl
== 0)
4156 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4157 (htab
->stub_sec
[0]->output_section
->owner
,
4158 htab
->stub_sec
[0]->output_section
));
4159 sym
->st_value
= g
->stub_addr
;
4167 static int spu_plugin
= 0;
4170 spu_elf_plugin (int val
)
4175 /* Set ELF header e_type for plugins. */
4178 spu_elf_post_process_headers (bfd
*abfd
,
4179 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4183 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4185 i_ehdrp
->e_type
= ET_DYN
;
4189 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4190 segments for overlays. */
4193 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4195 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4196 int extra
= htab
->num_overlays
;
4202 sec
= bfd_get_section_by_name (abfd
, ".toe");
4203 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4209 /* Remove .toe section from other PT_LOAD segments and put it in
4210 a segment of its own. Put overlays in separate segments too. */
4213 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4216 struct elf_segment_map
*m
;
4222 toe
= bfd_get_section_by_name (abfd
, ".toe");
4223 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4224 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4225 for (i
= 0; i
< m
->count
; i
++)
4226 if ((s
= m
->sections
[i
]) == toe
4227 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4229 struct elf_segment_map
*m2
;
4232 if (i
+ 1 < m
->count
)
4234 amt
= sizeof (struct elf_segment_map
);
4235 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4236 m2
= bfd_zalloc (abfd
, amt
);
4239 m2
->count
= m
->count
- (i
+ 1);
4240 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4241 m2
->count
* sizeof (m
->sections
[0]));
4242 m2
->p_type
= PT_LOAD
;
4250 amt
= sizeof (struct elf_segment_map
);
4251 m2
= bfd_zalloc (abfd
, amt
);
4254 m2
->p_type
= PT_LOAD
;
4256 m2
->sections
[0] = s
;
4266 /* Tweak the section type of .note.spu_name. */
4269 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4270 Elf_Internal_Shdr
*hdr
,
4273 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4274 hdr
->sh_type
= SHT_NOTE
;
4278 /* Tweak phdrs before writing them out. */
4281 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4283 const struct elf_backend_data
*bed
;
4284 struct elf_obj_tdata
*tdata
;
4285 Elf_Internal_Phdr
*phdr
, *last
;
4286 struct spu_link_hash_table
*htab
;
4293 bed
= get_elf_backend_data (abfd
);
4294 tdata
= elf_tdata (abfd
);
4296 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4297 htab
= spu_hash_table (info
);
4298 if (htab
->num_overlays
!= 0)
4300 struct elf_segment_map
*m
;
4303 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4305 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4307 /* Mark this as an overlay header. */
4308 phdr
[i
].p_flags
|= PF_OVERLAY
;
4310 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4312 bfd_byte
*p
= htab
->ovtab
->contents
;
4313 unsigned int off
= o
* 16 + 8;
4315 /* Write file_off into _ovly_table. */
4316 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4321 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4322 of 16. This should always be possible when using the standard
4323 linker scripts, but don't create overlapping segments if
4324 someone is playing games with linker scripts. */
4326 for (i
= count
; i
-- != 0; )
4327 if (phdr
[i
].p_type
== PT_LOAD
)
4331 adjust
= -phdr
[i
].p_filesz
& 15;
4334 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4337 adjust
= -phdr
[i
].p_memsz
& 15;
4340 && phdr
[i
].p_filesz
!= 0
4341 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4342 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4345 if (phdr
[i
].p_filesz
!= 0)
4349 if (i
== (unsigned int) -1)
4350 for (i
= count
; i
-- != 0; )
4351 if (phdr
[i
].p_type
== PT_LOAD
)
4355 adjust
= -phdr
[i
].p_filesz
& 15;
4356 phdr
[i
].p_filesz
+= adjust
;
4358 adjust
= -phdr
[i
].p_memsz
& 15;
4359 phdr
[i
].p_memsz
+= adjust
;
4365 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4366 #define TARGET_BIG_NAME "elf32-spu"
4367 #define ELF_ARCH bfd_arch_spu
4368 #define ELF_MACHINE_CODE EM_SPU
4369 /* This matches the alignment need for DMA. */
4370 #define ELF_MAXPAGESIZE 0x80
4371 #define elf_backend_rela_normal 1
4372 #define elf_backend_can_gc_sections 1
4374 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4375 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4376 #define elf_info_to_howto spu_elf_info_to_howto
4377 #define elf_backend_count_relocs spu_elf_count_relocs
4378 #define elf_backend_relocate_section spu_elf_relocate_section
4379 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4380 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4381 #define elf_backend_object_p spu_elf_object_p
4382 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4383 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4385 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4386 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4387 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4388 #define elf_backend_post_process_headers spu_elf_post_process_headers
4389 #define elf_backend_fake_sections spu_elf_fake_sections
4390 #define elf_backend_special_sections spu_elf_special_sections
4391 #define bfd_elf32_bfd_final_link spu_elf_final_link
4393 #include "elf32-target.h"