1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 /* Shortcuts to overlay sections. */
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count
;
308 /* The stub section for each overlay section. */
311 struct elf_link_hash_entry
*ovly_load
;
312 struct elf_link_hash_entry
*ovly_return
;
313 unsigned long ovly_load_r_symndx
;
315 /* Number of overlay buffers. */
316 unsigned int num_buf
;
318 /* Total number of overlays. */
319 unsigned int num_overlays
;
321 /* How much memory we have. */
322 unsigned int local_store
;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed
;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved
;
328 /* Count of overlay stubs needed in non-overlay area. */
329 unsigned int non_ovly_stub
;
331 /* Stash various callbacks for --auto-overlay. */
332 void (*spu_elf_load_ovl_mgr
) (void);
333 FILE *(*spu_elf_open_overlay_script
) (void);
334 void (*spu_elf_relink
) (void);
336 /* Bit 0 set if --auto-overlay.
337 Bit 1 set if --auto-relink.
338 Bit 2 set if --overlay-rodata. */
339 unsigned int auto_overlay
: 3;
340 #define AUTO_OVERLAY 1
341 #define AUTO_RELINK 2
342 #define OVERLAY_RODATA 4
344 /* Set if we should emit symbols for stubs. */
345 unsigned int emit_stub_syms
:1;
347 /* Set if we want stubs on calls out of overlay regions to
348 non-overlay regions. */
349 unsigned int non_overlay_stubs
: 1;
352 unsigned int stub_err
: 1;
354 /* Set if stack size analysis should be done. */
355 unsigned int stack_analysis
: 1;
357 /* Set if __stack_* syms will be emitted. */
358 unsigned int emit_stack_syms
: 1;
361 /* Hijack the generic got fields for overlay stub accounting. */
365 struct got_entry
*next
;
371 #define spu_hash_table(p) \
372 ((struct spu_link_hash_table *) ((p)->hash))
374 /* Create a spu ELF linker hash table. */
376 static struct bfd_link_hash_table
*
377 spu_elf_link_hash_table_create (bfd
*abfd
)
379 struct spu_link_hash_table
*htab
;
381 htab
= bfd_malloc (sizeof (*htab
));
385 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
386 _bfd_elf_link_hash_newfunc
,
387 sizeof (struct elf_link_hash_entry
)))
393 memset (&htab
->ovtab
, 0,
394 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
396 htab
->elf
.init_got_refcount
.refcount
= 0;
397 htab
->elf
.init_got_refcount
.glist
= NULL
;
398 htab
->elf
.init_got_offset
.offset
= 0;
399 htab
->elf
.init_got_offset
.glist
= NULL
;
400 return &htab
->elf
.root
;
403 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
404 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
405 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
408 get_sym_h (struct elf_link_hash_entry
**hp
,
409 Elf_Internal_Sym
**symp
,
411 Elf_Internal_Sym
**locsymsp
,
412 unsigned long r_symndx
,
415 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
417 if (r_symndx
>= symtab_hdr
->sh_info
)
419 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
420 struct elf_link_hash_entry
*h
;
422 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
423 while (h
->root
.type
== bfd_link_hash_indirect
424 || h
->root
.type
== bfd_link_hash_warning
)
425 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
435 asection
*symsec
= NULL
;
436 if (h
->root
.type
== bfd_link_hash_defined
437 || h
->root
.type
== bfd_link_hash_defweak
)
438 symsec
= h
->root
.u
.def
.section
;
444 Elf_Internal_Sym
*sym
;
445 Elf_Internal_Sym
*locsyms
= *locsymsp
;
449 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
452 size_t symcount
= symtab_hdr
->sh_info
;
454 /* If we are reading symbols into the contents, then
455 read the global syms too. This is done to cache
456 syms for later stack analysis. */
457 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
458 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
459 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
466 sym
= locsyms
+ r_symndx
;
475 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
481 /* Create the note section if not already present. This is done early so
482 that the linker maps the sections to the right place in the output. */
485 spu_elf_create_sections (struct bfd_link_info
*info
,
490 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
492 /* Stash some options away where we can get at them later. */
493 htab
->stack_analysis
= stack_analysis
;
494 htab
->emit_stack_syms
= emit_stack_syms
;
496 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
497 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
502 /* Make SPU_PTNOTE_SPUNAME section. */
509 ibfd
= info
->input_bfds
;
510 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
511 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
513 || !bfd_set_section_alignment (ibfd
, s
, 4))
516 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
517 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
518 size
+= (name_len
+ 3) & -4;
520 if (!bfd_set_section_size (ibfd
, s
, size
))
523 data
= bfd_zalloc (ibfd
, size
);
527 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
528 bfd_put_32 (ibfd
, name_len
, data
+ 4);
529 bfd_put_32 (ibfd
, 1, data
+ 8);
530 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
531 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
532 bfd_get_filename (info
->output_bfd
), name_len
);
539 /* qsort predicate to sort sections by vma. */
542 sort_sections (const void *a
, const void *b
)
544 const asection
*const *s1
= a
;
545 const asection
*const *s2
= b
;
546 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
549 return delta
< 0 ? -1 : 1;
551 return (*s1
)->index
- (*s2
)->index
;
554 /* Identify overlays in the output bfd, and number them. */
557 spu_elf_find_overlays (struct bfd_link_info
*info
)
559 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
560 asection
**alloc_sec
;
561 unsigned int i
, n
, ovl_index
, num_buf
;
565 if (info
->output_bfd
->section_count
< 2)
569 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
570 if (alloc_sec
== NULL
)
573 /* Pick out all the alloced sections. */
574 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
575 if ((s
->flags
& SEC_ALLOC
) != 0
576 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
586 /* Sort them by vma. */
587 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
589 /* Look for overlapping vmas. Any with overlap must be overlays.
590 Count them. Also count the number of overlay regions. */
591 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
592 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
595 if (s
->vma
< ovl_end
)
597 asection
*s0
= alloc_sec
[i
- 1];
599 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
601 alloc_sec
[ovl_index
] = s0
;
602 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
603 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
605 alloc_sec
[ovl_index
] = s
;
606 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
607 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
608 if (s0
->vma
!= s
->vma
)
610 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
611 "do not start at the same address.\n"),
615 if (ovl_end
< s
->vma
+ s
->size
)
616 ovl_end
= s
->vma
+ s
->size
;
619 ovl_end
= s
->vma
+ s
->size
;
622 htab
->num_overlays
= ovl_index
;
623 htab
->num_buf
= num_buf
;
624 htab
->ovl_sec
= alloc_sec
;
625 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
626 FALSE
, FALSE
, FALSE
);
627 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
628 FALSE
, FALSE
, FALSE
);
629 return ovl_index
!= 0;
632 /* Support two sizes of overlay stubs, a slower more compact stub of two
633 intructions, and a faster stub of four instructions. */
634 #ifndef OVL_STUB_SIZE
635 /* Default to faster. */
636 #define OVL_STUB_SIZE 16
637 /* #define OVL_STUB_SIZE 8 */
639 #define BRSL 0x33000000
640 #define BR 0x32000000
641 #define NOP 0x40200000
642 #define LNOP 0x00200000
643 #define ILA 0x42000000
645 /* Return true for all relative and absolute branch instructions.
653 brhnz 00100011 0.. */
656 is_branch (const unsigned char *insn
)
658 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
661 /* Return true for all indirect branch instructions.
669 bihnz 00100101 011 */
672 is_indirect_branch (const unsigned char *insn
)
674 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
677 /* Return true for branch hint instructions.
682 is_hint (const unsigned char *insn
)
684 return (insn
[0] & 0xfc) == 0x10;
687 /* True if INPUT_SECTION might need overlay stubs. */
690 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
692 /* No stubs for debug sections and suchlike. */
693 if ((input_section
->flags
& SEC_ALLOC
) == 0)
696 /* No stubs for link-once sections that will be discarded. */
697 if (input_section
->output_section
== NULL
698 || input_section
->output_section
->owner
!= output_bfd
)
701 /* Don't create stubs for .eh_frame references. */
702 if (strcmp (input_section
->name
, ".eh_frame") == 0)
716 /* Return non-zero if this reloc symbol should go via an overlay stub.
717 Return 2 if the stub must be in non-overlay area. */
719 static enum _stub_type
720 needs_ovl_stub (struct elf_link_hash_entry
*h
,
721 Elf_Internal_Sym
*sym
,
723 asection
*input_section
,
724 Elf_Internal_Rela
*irela
,
726 struct bfd_link_info
*info
)
728 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
729 enum elf_spu_reloc_type r_type
;
730 unsigned int sym_type
;
732 enum _stub_type ret
= no_stub
;
735 || sym_sec
->output_section
== NULL
736 || sym_sec
->output_section
->owner
!= info
->output_bfd
737 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
742 /* Ensure no stubs for user supplied overlay manager syms. */
743 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
746 /* setjmp always goes via an overlay stub, because then the return
747 and hence the longjmp goes via __ovly_return. That magically
748 makes setjmp/longjmp between overlays work. */
749 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
750 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
754 /* Usually, symbols in non-overlay sections don't need stubs. */
755 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
756 && !htab
->non_overlay_stubs
)
762 sym_type
= ELF_ST_TYPE (sym
->st_info
);
764 r_type
= ELF32_R_TYPE (irela
->r_info
);
766 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
770 if (contents
== NULL
)
773 if (!bfd_get_section_contents (input_section
->owner
,
780 contents
+= irela
->r_offset
;
782 if (is_branch (contents
) || is_hint (contents
))
785 if ((contents
[0] & 0xfd) == 0x31
786 && sym_type
!= STT_FUNC
789 /* It's common for people to write assembly and forget
790 to give function symbols the right type. Handle
791 calls to such symbols, but warn so that (hopefully)
792 people will fix their code. We need the symbol
793 type to be correct to distinguish function pointer
794 initialisation from other pointer initialisations. */
795 const char *sym_name
;
798 sym_name
= h
->root
.root
.string
;
801 Elf_Internal_Shdr
*symtab_hdr
;
802 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
803 sym_name
= bfd_elf_sym_name (input_section
->owner
,
808 (*_bfd_error_handler
) (_("warning: call to non-function"
809 " symbol %s defined in %B"),
810 sym_sec
->owner
, sym_name
);
816 if (sym_type
!= STT_FUNC
818 && (sym_sec
->flags
& SEC_CODE
) == 0)
821 /* A reference from some other section to a symbol in an overlay
822 section needs a stub. */
823 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
824 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
827 /* If this insn isn't a branch then we are possibly taking the
828 address of a function and passing it out somehow. */
829 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
833 count_stub (struct spu_link_hash_table
*htab
,
836 enum _stub_type stub_type
,
837 struct elf_link_hash_entry
*h
,
838 const Elf_Internal_Rela
*irela
)
840 unsigned int ovl
= 0;
841 struct got_entry
*g
, **head
;
844 /* If this instruction is a branch or call, we need a stub
845 for it. One stub per function per overlay.
846 If it isn't a branch, then we are taking the address of
847 this function so need a stub in the non-overlay area
848 for it. One stub per function. */
849 if (stub_type
!= nonovl_stub
)
850 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
853 head
= &h
->got
.glist
;
856 if (elf_local_got_ents (ibfd
) == NULL
)
858 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
859 * sizeof (*elf_local_got_ents (ibfd
)));
860 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
861 if (elf_local_got_ents (ibfd
) == NULL
)
864 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
869 addend
= irela
->r_addend
;
873 struct got_entry
*gnext
;
875 for (g
= *head
; g
!= NULL
; g
= g
->next
)
876 if (g
->addend
== addend
&& g
->ovl
== 0)
881 /* Need a new non-overlay area stub. Zap other stubs. */
882 for (g
= *head
; g
!= NULL
; g
= gnext
)
885 if (g
->addend
== addend
)
887 htab
->stub_count
[g
->ovl
] -= 1;
895 for (g
= *head
; g
!= NULL
; g
= g
->next
)
896 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
902 g
= bfd_malloc (sizeof *g
);
907 g
->stub_addr
= (bfd_vma
) -1;
911 htab
->stub_count
[ovl
] += 1;
917 /* Two instruction overlay stubs look like:
920 .word target_ovl_and_address
922 ovl_and_address is a word with the overlay number in the top 14 bits
923 and local store address in the bottom 18 bits.
925 Four instruction overlay stubs look like:
929 ila $79,target_address
933 build_stub (struct spu_link_hash_table
*htab
,
936 enum _stub_type stub_type
,
937 struct elf_link_hash_entry
*h
,
938 const Elf_Internal_Rela
*irela
,
943 struct got_entry
*g
, **head
;
945 bfd_vma addend
, val
, from
, to
;
948 if (stub_type
!= nonovl_stub
)
949 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
952 head
= &h
->got
.glist
;
954 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
958 addend
= irela
->r_addend
;
960 for (g
= *head
; g
!= NULL
; g
= g
->next
)
961 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
966 if (g
->ovl
== 0 && ovl
!= 0)
969 if (g
->stub_addr
!= (bfd_vma
) -1)
972 sec
= htab
->stub_sec
[ovl
];
973 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
974 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
976 to
= (htab
->ovly_load
->root
.u
.def
.value
977 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
978 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
980 if (OVL_STUB_SIZE
== 16)
982 if (((dest
| to
| from
) & 3) != 0
983 || val
+ 0x20000 >= 0x40000)
988 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
990 if (OVL_STUB_SIZE
== 16)
992 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
993 sec
->contents
+ sec
->size
);
994 bfd_put_32 (sec
->owner
, LNOP
,
995 sec
->contents
+ sec
->size
+ 4);
996 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
997 sec
->contents
+ sec
->size
+ 8);
998 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
999 sec
->contents
+ sec
->size
+ 12);
1001 else if (OVL_STUB_SIZE
== 8)
1003 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
1004 sec
->contents
+ sec
->size
);
1006 val
= (dest
& 0x3ffff) | (ovl
<< 18);
1007 bfd_put_32 (sec
->owner
, val
,
1008 sec
->contents
+ sec
->size
+ 4);
1012 sec
->size
+= OVL_STUB_SIZE
;
1014 if (htab
->emit_stub_syms
)
1020 len
= 8 + sizeof (".ovl_call.") - 1;
1022 len
+= strlen (h
->root
.root
.string
);
1027 add
= (int) irela
->r_addend
& 0xffffffff;
1030 name
= bfd_malloc (len
);
1034 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1036 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1038 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1039 dest_sec
->id
& 0xffffffff,
1040 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1042 sprintf (name
+ len
- 9, "+%x", add
);
1044 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1048 if (h
->root
.type
== bfd_link_hash_new
)
1050 h
->root
.type
= bfd_link_hash_defined
;
1051 h
->root
.u
.def
.section
= sec
;
1052 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
1053 h
->size
= OVL_STUB_SIZE
;
1057 h
->ref_regular_nonweak
= 1;
1058 h
->forced_local
= 1;
1066 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1070 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1072 /* Symbols starting with _SPUEAR_ need a stub because they may be
1073 invoked by the PPU. */
1074 struct bfd_link_info
*info
= inf
;
1075 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1078 if ((h
->root
.type
== bfd_link_hash_defined
1079 || h
->root
.type
== bfd_link_hash_defweak
)
1081 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1082 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1083 && sym_sec
->output_section
!= NULL
1084 && sym_sec
->output_section
->owner
== info
->output_bfd
1085 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1086 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1087 || htab
->non_overlay_stubs
))
1089 count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1096 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1098 /* Symbols starting with _SPUEAR_ need a stub because they may be
1099 invoked by the PPU. */
1100 struct bfd_link_info
*info
= inf
;
1101 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1104 if ((h
->root
.type
== bfd_link_hash_defined
1105 || h
->root
.type
== bfd_link_hash_defweak
)
1107 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1108 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1109 && sym_sec
->output_section
!= NULL
1110 && sym_sec
->output_section
->owner
== info
->output_bfd
1111 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1112 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1113 || htab
->non_overlay_stubs
))
1115 build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1116 h
->root
.u
.def
.value
, sym_sec
);
1122 /* Size or build stubs. */
1125 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1127 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1130 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1132 extern const bfd_target bfd_elf32_spu_vec
;
1133 Elf_Internal_Shdr
*symtab_hdr
;
1135 Elf_Internal_Sym
*local_syms
= NULL
;
1138 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1141 /* We'll need the symbol table in a second. */
1142 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1143 if (symtab_hdr
->sh_info
== 0)
1146 /* Arrange to read and keep global syms for later stack analysis. */
1147 psyms
= &local_syms
;
1148 if (htab
->stack_analysis
)
1149 psyms
= &symtab_hdr
->contents
;
1151 /* Walk over each section attached to the input bfd. */
1152 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1154 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1156 /* If there aren't any relocs, then there's nothing more to do. */
1157 if ((isec
->flags
& SEC_RELOC
) == 0
1158 || isec
->reloc_count
== 0)
1161 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1164 /* Get the relocs. */
1165 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1167 if (internal_relocs
== NULL
)
1168 goto error_ret_free_local
;
1170 /* Now examine each relocation. */
1171 irela
= internal_relocs
;
1172 irelaend
= irela
+ isec
->reloc_count
;
1173 for (; irela
< irelaend
; irela
++)
1175 enum elf_spu_reloc_type r_type
;
1176 unsigned int r_indx
;
1178 Elf_Internal_Sym
*sym
;
1179 struct elf_link_hash_entry
*h
;
1180 enum _stub_type stub_type
;
1182 r_type
= ELF32_R_TYPE (irela
->r_info
);
1183 r_indx
= ELF32_R_SYM (irela
->r_info
);
1185 if (r_type
>= R_SPU_max
)
1187 bfd_set_error (bfd_error_bad_value
);
1188 error_ret_free_internal
:
1189 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1190 free (internal_relocs
);
1191 error_ret_free_local
:
1192 if (local_syms
!= NULL
1193 && (symtab_hdr
->contents
1194 != (unsigned char *) local_syms
))
1199 /* Determine the reloc target section. */
1200 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
1201 goto error_ret_free_internal
;
1203 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1205 if (stub_type
== no_stub
)
1207 else if (stub_type
== stub_error
)
1208 goto error_ret_free_internal
;
1210 if (htab
->stub_count
== NULL
)
1213 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1214 htab
->stub_count
= bfd_zmalloc (amt
);
1215 if (htab
->stub_count
== NULL
)
1216 goto error_ret_free_internal
;
1221 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1222 goto error_ret_free_internal
;
1229 dest
= h
->root
.u
.def
.value
;
1231 dest
= sym
->st_value
;
1232 dest
+= irela
->r_addend
;
1233 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1235 goto error_ret_free_internal
;
1239 /* We're done with the internal relocs, free them. */
1240 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1241 free (internal_relocs
);
1244 if (local_syms
!= NULL
1245 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1247 if (!info
->keep_memory
)
1250 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1257 /* Allocate space for overlay call and return stubs. */
1260 spu_elf_size_stubs (struct bfd_link_info
*info
,
1261 void (*place_spu_section
) (asection
*, asection
*,
1263 int non_overlay_stubs
)
1265 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1272 htab
->non_overlay_stubs
= non_overlay_stubs
;
1273 if (!process_stubs (info
, FALSE
))
1276 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1280 if (htab
->stub_count
== NULL
)
1283 ibfd
= info
->input_bfds
;
1284 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1285 htab
->stub_sec
= bfd_zmalloc (amt
);
1286 if (htab
->stub_sec
== NULL
)
1289 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1290 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1291 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1292 htab
->stub_sec
[0] = stub
;
1294 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1296 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1297 (*place_spu_section
) (stub
, NULL
, ".text");
1299 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1301 asection
*osec
= htab
->ovl_sec
[i
];
1302 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1303 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1304 htab
->stub_sec
[ovl
] = stub
;
1306 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1308 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1309 (*place_spu_section
) (stub
, osec
, NULL
);
1312 /* htab->ovtab consists of two arrays.
1322 . } _ovly_buf_table[];
1325 flags
= (SEC_ALLOC
| SEC_LOAD
1326 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1327 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1328 if (htab
->ovtab
== NULL
1329 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1332 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1333 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1335 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1336 if (htab
->toe
== NULL
1337 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1339 htab
->toe
->size
= 16;
1340 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1345 /* Functions to handle embedded spu_ovl.o object. */
1348 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1354 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1360 struct _ovl_stream
*os
;
1364 os
= (struct _ovl_stream
*) stream
;
1365 max
= (const char *) os
->end
- (const char *) os
->start
;
1367 if ((ufile_ptr
) offset
>= max
)
1371 if (count
> max
- offset
)
1372 count
= max
- offset
;
1374 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1379 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1381 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1388 return *ovl_bfd
!= NULL
;
1391 /* Define an STT_OBJECT symbol. */
1393 static struct elf_link_hash_entry
*
1394 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1396 struct elf_link_hash_entry
*h
;
1398 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1402 if (h
->root
.type
!= bfd_link_hash_defined
1405 h
->root
.type
= bfd_link_hash_defined
;
1406 h
->root
.u
.def
.section
= htab
->ovtab
;
1407 h
->type
= STT_OBJECT
;
1410 h
->ref_regular_nonweak
= 1;
1415 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1416 h
->root
.u
.def
.section
->owner
,
1417 h
->root
.root
.string
);
1418 bfd_set_error (bfd_error_bad_value
);
1425 /* Fill in all stubs and the overlay tables. */
1428 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1430 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1431 struct elf_link_hash_entry
*h
;
1437 htab
->emit_stub_syms
= emit_syms
;
1438 if (htab
->stub_count
== NULL
)
1441 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1442 if (htab
->stub_sec
[i
]->size
!= 0)
1444 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1445 htab
->stub_sec
[i
]->size
);
1446 if (htab
->stub_sec
[i
]->contents
== NULL
)
1448 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1449 htab
->stub_sec
[i
]->size
= 0;
1452 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1453 htab
->ovly_load
= h
;
1454 BFD_ASSERT (h
!= NULL
1455 && (h
->root
.type
== bfd_link_hash_defined
1456 || h
->root
.type
== bfd_link_hash_defweak
)
1459 s
= h
->root
.u
.def
.section
->output_section
;
1460 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1462 (*_bfd_error_handler
) (_("%s in overlay section"),
1463 h
->root
.root
.string
);
1464 bfd_set_error (bfd_error_bad_value
);
1468 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1469 htab
->ovly_return
= h
;
1471 /* Fill in all the stubs. */
1472 process_stubs (info
, TRUE
);
1474 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1478 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1480 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1482 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1483 bfd_set_error (bfd_error_bad_value
);
1486 htab
->stub_sec
[i
]->rawsize
= 0;
1491 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1492 bfd_set_error (bfd_error_bad_value
);
1496 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1497 if (htab
->ovtab
->contents
== NULL
)
1500 /* Write out _ovly_table. */
1501 p
= htab
->ovtab
->contents
;
1502 /* set low bit of .size to mark non-overlay area as present. */
1504 obfd
= htab
->ovtab
->output_section
->owner
;
1505 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1507 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1511 unsigned long off
= ovl_index
* 16;
1512 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1514 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1515 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1516 /* file_off written later in spu_elf_modify_program_headers. */
1517 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1521 h
= define_ovtab_symbol (htab
, "_ovly_table");
1524 h
->root
.u
.def
.value
= 16;
1525 h
->size
= htab
->num_overlays
* 16;
1527 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1530 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1533 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1536 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1537 h
->size
= htab
->num_buf
* 4;
1539 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1542 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1545 h
= define_ovtab_symbol (htab
, "_EAR_");
1548 h
->root
.u
.def
.section
= htab
->toe
;
1549 h
->root
.u
.def
.value
= 0;
1555 /* Check that all loadable section VMAs lie in the range
1556 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1559 spu_elf_check_vma (struct bfd_link_info
*info
,
1563 unsigned int overlay_fixed
,
1564 unsigned int reserved
,
1565 void (*spu_elf_load_ovl_mgr
) (void),
1566 FILE *(*spu_elf_open_overlay_script
) (void),
1567 void (*spu_elf_relink
) (void))
1569 struct elf_segment_map
*m
;
1571 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1572 bfd
*abfd
= info
->output_bfd
;
1574 if (auto_overlay
& AUTO_OVERLAY
)
1575 htab
->auto_overlay
= auto_overlay
;
1576 htab
->local_store
= hi
+ 1 - lo
;
1577 htab
->overlay_fixed
= overlay_fixed
;
1578 htab
->reserved
= reserved
;
1579 htab
->spu_elf_load_ovl_mgr
= spu_elf_load_ovl_mgr
;
1580 htab
->spu_elf_open_overlay_script
= spu_elf_open_overlay_script
;
1581 htab
->spu_elf_relink
= spu_elf_relink
;
1583 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1584 if (m
->p_type
== PT_LOAD
)
1585 for (i
= 0; i
< m
->count
; i
++)
1586 if (m
->sections
[i
]->size
!= 0
1587 && (m
->sections
[i
]->vma
< lo
1588 || m
->sections
[i
]->vma
> hi
1589 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1590 return m
->sections
[i
];
1592 /* No need for overlays if it all fits. */
1593 htab
->auto_overlay
= 0;
1597 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1598 Search for stack adjusting insns, and return the sp delta. */
1601 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1606 memset (reg
, 0, sizeof (reg
));
1607 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1609 unsigned char buf
[4];
1613 /* Assume no relocs on stack adjusing insns. */
1614 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1617 if (buf
[0] == 0x24 /* stqd */)
1621 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1622 /* Partly decoded immediate field. */
1623 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1625 if (buf
[0] == 0x1c /* ai */)
1628 imm
= (imm
^ 0x200) - 0x200;
1629 reg
[rt
] = reg
[ra
] + imm
;
1631 if (rt
== 1 /* sp */)
1638 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1640 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1642 reg
[rt
] = reg
[ra
] + reg
[rb
];
1646 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1648 if (buf
[0] >= 0x42 /* ila */)
1649 imm
|= (buf
[0] & 1) << 17;
1654 if (buf
[0] == 0x40 /* il */)
1656 if ((buf
[1] & 0x80) == 0)
1658 imm
= (imm
^ 0x8000) - 0x8000;
1660 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1666 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1668 reg
[rt
] |= imm
& 0xffff;
1671 else if (buf
[0] == 0x04 /* ori */)
1674 imm
= (imm
^ 0x200) - 0x200;
1675 reg
[rt
] = reg
[ra
] | imm
;
1678 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1679 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1681 /* Used in pic reg load. Say rt is trashed. */
1685 else if (is_branch (buf
) || is_indirect_branch (buf
))
1686 /* If we hit a branch then we must be out of the prologue. */
1695 /* qsort predicate to sort symbols by section and value. */
1697 static Elf_Internal_Sym
*sort_syms_syms
;
1698 static asection
**sort_syms_psecs
;
1701 sort_syms (const void *a
, const void *b
)
1703 Elf_Internal_Sym
*const *s1
= a
;
1704 Elf_Internal_Sym
*const *s2
= b
;
1705 asection
*sec1
,*sec2
;
1706 bfd_signed_vma delta
;
1708 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1709 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1712 return sec1
->index
- sec2
->index
;
1714 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1716 return delta
< 0 ? -1 : 1;
1718 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1720 return delta
< 0 ? -1 : 1;
1722 return *s1
< *s2
? -1 : 1;
1727 struct function_info
*fun
;
1728 struct call_info
*next
;
1730 unsigned int max_depth
;
1731 unsigned int is_tail
: 1;
1732 unsigned int is_pasted
: 1;
1735 struct function_info
1737 /* List of functions called. Also branches to hot/cold part of
1739 struct call_info
*call_list
;
1740 /* For hot/cold part of function, point to owner. */
1741 struct function_info
*start
;
1742 /* Symbol at start of function. */
1744 Elf_Internal_Sym
*sym
;
1745 struct elf_link_hash_entry
*h
;
1747 /* Function section. */
1750 /* Where last called from, and number of sections called from. */
1751 asection
*last_caller
;
1752 unsigned int call_count
;
1753 /* Address range of (this part of) function. */
1757 /* Distance from root of call tree. Tail and hot/cold branches
1758 count as one deeper. We aren't counting stack frames here. */
1760 /* Set if global symbol. */
1761 unsigned int global
: 1;
1762 /* Set if known to be start of function (as distinct from a hunk
1763 in hot/cold section. */
1764 unsigned int is_func
: 1;
1765 /* Set if not a root node. */
1766 unsigned int non_root
: 1;
1767 /* Flags used during call tree traversal. It's cheaper to replicate
1768 the visit flags than have one which needs clearing after a traversal. */
1769 unsigned int visit1
: 1;
1770 unsigned int visit2
: 1;
1771 unsigned int marking
: 1;
1772 unsigned int visit3
: 1;
1773 unsigned int visit4
: 1;
1774 unsigned int visit5
: 1;
1775 unsigned int visit6
: 1;
1776 unsigned int visit7
: 1;
1779 struct spu_elf_stack_info
1783 /* Variable size array describing functions, one per contiguous
1784 address range belonging to a function. */
1785 struct function_info fun
[1];
1788 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1789 entries for section SEC. */
1791 static struct spu_elf_stack_info
*
1792 alloc_stack_info (asection
*sec
, int max_fun
)
1794 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1797 amt
= sizeof (struct spu_elf_stack_info
);
1798 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1799 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1800 if (sec_data
->u
.i
.stack_info
!= NULL
)
1801 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1802 return sec_data
->u
.i
.stack_info
;
1805 /* Add a new struct function_info describing a (part of a) function
1806 starting at SYM_H. Keep the array sorted by address. */
1808 static struct function_info
*
1809 maybe_insert_function (asection
*sec
,
1812 bfd_boolean is_func
)
1814 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1815 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1821 sinfo
= alloc_stack_info (sec
, 20);
1828 Elf_Internal_Sym
*sym
= sym_h
;
1829 off
= sym
->st_value
;
1830 size
= sym
->st_size
;
1834 struct elf_link_hash_entry
*h
= sym_h
;
1835 off
= h
->root
.u
.def
.value
;
1839 for (i
= sinfo
->num_fun
; --i
>= 0; )
1840 if (sinfo
->fun
[i
].lo
<= off
)
1845 /* Don't add another entry for an alias, but do update some
1847 if (sinfo
->fun
[i
].lo
== off
)
1849 /* Prefer globals over local syms. */
1850 if (global
&& !sinfo
->fun
[i
].global
)
1852 sinfo
->fun
[i
].global
= TRUE
;
1853 sinfo
->fun
[i
].u
.h
= sym_h
;
1856 sinfo
->fun
[i
].is_func
= TRUE
;
1857 return &sinfo
->fun
[i
];
1859 /* Ignore a zero-size symbol inside an existing function. */
1860 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1861 return &sinfo
->fun
[i
];
1864 if (++i
< sinfo
->num_fun
)
1865 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1866 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1867 else if (i
>= sinfo
->max_fun
)
1869 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1870 bfd_size_type old
= amt
;
1872 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1873 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1874 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1875 sinfo
= bfd_realloc (sinfo
, amt
);
1878 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1879 sec_data
->u
.i
.stack_info
= sinfo
;
1881 sinfo
->fun
[i
].is_func
= is_func
;
1882 sinfo
->fun
[i
].global
= global
;
1883 sinfo
->fun
[i
].sec
= sec
;
1885 sinfo
->fun
[i
].u
.h
= sym_h
;
1887 sinfo
->fun
[i
].u
.sym
= sym_h
;
1888 sinfo
->fun
[i
].lo
= off
;
1889 sinfo
->fun
[i
].hi
= off
+ size
;
1890 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1891 sinfo
->num_fun
+= 1;
1892 return &sinfo
->fun
[i
];
1895 /* Return the name of FUN. */
1898 func_name (struct function_info
*fun
)
1902 Elf_Internal_Shdr
*symtab_hdr
;
1904 while (fun
->start
!= NULL
)
1908 return fun
->u
.h
->root
.root
.string
;
1911 if (fun
->u
.sym
->st_name
== 0)
1913 size_t len
= strlen (sec
->name
);
1914 char *name
= bfd_malloc (len
+ 10);
1917 sprintf (name
, "%s+%lx", sec
->name
,
1918 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1922 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1923 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1926 /* Read the instruction at OFF in SEC. Return true iff the instruction
1927 is a nop, lnop, or stop 0 (all zero insn). */
1930 is_nop (asection
*sec
, bfd_vma off
)
1932 unsigned char insn
[4];
1934 if (off
+ 4 > sec
->size
1935 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1937 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1939 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1944 /* Extend the range of FUN to cover nop padding up to LIMIT.
1945 Return TRUE iff some instruction other than a NOP was found. */
1948 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1950 bfd_vma off
= (fun
->hi
+ 3) & -4;
1952 while (off
< limit
&& is_nop (fun
->sec
, off
))
1963 /* Check and fix overlapping function ranges. Return TRUE iff there
1964 are gaps in the current info we have about functions in SEC. */
1967 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1969 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1970 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1972 bfd_boolean gaps
= FALSE
;
1977 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1978 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1980 /* Fix overlapping symbols. */
1981 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1982 const char *f2
= func_name (&sinfo
->fun
[i
]);
1984 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1985 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1987 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1990 if (sinfo
->num_fun
== 0)
1994 if (sinfo
->fun
[0].lo
!= 0)
1996 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1998 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2000 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2001 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2003 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2009 /* Search current function info for a function that contains address
2010 OFFSET in section SEC. */
2012 static struct function_info
*
2013 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2015 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2016 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2020 hi
= sinfo
->num_fun
;
2023 mid
= (lo
+ hi
) / 2;
2024 if (offset
< sinfo
->fun
[mid
].lo
)
2026 else if (offset
>= sinfo
->fun
[mid
].hi
)
2029 return &sinfo
->fun
[mid
];
2031 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2036 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2037 if CALLEE was new. If this function return FALSE, CALLEE should
2041 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2043 struct call_info
**pp
, *p
;
2045 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2046 if (p
->fun
== callee
->fun
)
2048 /* Tail calls use less stack than normal calls. Retain entry
2049 for normal call over one for tail call. */
2050 p
->is_tail
&= callee
->is_tail
;
2053 p
->fun
->start
= NULL
;
2054 p
->fun
->is_func
= TRUE
;
2057 /* Reorder list so most recent call is first. */
2059 p
->next
= caller
->call_list
;
2060 caller
->call_list
= p
;
2063 callee
->next
= caller
->call_list
;
2065 caller
->call_list
= callee
;
2069 /* Copy CALL and insert the copy into CALLER. */
2072 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2074 struct call_info
*callee
;
2075 callee
= bfd_malloc (sizeof (*callee
));
2079 if (!insert_callee (caller
, callee
))
2084 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2085 overlay stub sections. */
2088 interesting_section (asection
*s
, bfd
*obfd
)
2090 return (s
->output_section
!= NULL
2091 && s
->output_section
->owner
== obfd
2092 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2093 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2097 /* Rummage through the relocs for SEC, looking for function calls.
2098 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2099 mark destination symbols on calls as being functions. Also
2100 look at branches, which may be tail calls or go to hot/cold
2101 section part of same function. */
2104 mark_functions_via_relocs (asection
*sec
,
2105 struct bfd_link_info
*info
,
2108 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2109 Elf_Internal_Shdr
*symtab_hdr
;
2110 Elf_Internal_Sym
*syms
;
2112 static bfd_boolean warned
;
2114 if (!interesting_section (sec
, info
->output_bfd
)
2115 || sec
->reloc_count
== 0)
2118 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2120 if (internal_relocs
== NULL
)
2123 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2124 psyms
= &symtab_hdr
->contents
;
2125 syms
= *(Elf_Internal_Sym
**) psyms
;
2126 irela
= internal_relocs
;
2127 irelaend
= irela
+ sec
->reloc_count
;
2128 for (; irela
< irelaend
; irela
++)
2130 enum elf_spu_reloc_type r_type
;
2131 unsigned int r_indx
;
2133 Elf_Internal_Sym
*sym
;
2134 struct elf_link_hash_entry
*h
;
2136 bfd_boolean reject
, is_call
;
2137 struct function_info
*caller
;
2138 struct call_info
*callee
;
2141 r_type
= ELF32_R_TYPE (irela
->r_info
);
2142 if (r_type
!= R_SPU_REL16
2143 && r_type
!= R_SPU_ADDR16
)
2146 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
))
2150 r_indx
= ELF32_R_SYM (irela
->r_info
);
2151 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2155 || sym_sec
->output_section
== NULL
2156 || sym_sec
->output_section
->owner
!= info
->output_bfd
)
2162 unsigned char insn
[4];
2164 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2165 irela
->r_offset
, 4))
2167 if (is_branch (insn
))
2169 is_call
= (insn
[0] & 0xfd) == 0x31;
2170 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2171 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2174 info
->callbacks
->einfo
2175 (_("%B(%A+0x%v): call to non-code section"
2176 " %B(%A), analysis incomplete\n"),
2177 sec
->owner
, sec
, irela
->r_offset
,
2178 sym_sec
->owner
, sym_sec
);
2186 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
)
2194 /* For --auto-overlay, count possible stubs we need for
2195 function pointer references. */
2196 unsigned int sym_type
;
2200 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2201 if (sym_type
== STT_FUNC
)
2202 spu_hash_table (info
)->non_ovly_stub
+= 1;
2207 val
= h
->root
.u
.def
.value
;
2209 val
= sym
->st_value
;
2210 val
+= irela
->r_addend
;
2214 struct function_info
*fun
;
2216 if (irela
->r_addend
!= 0)
2218 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2221 fake
->st_value
= val
;
2223 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2227 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2229 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2232 if (irela
->r_addend
!= 0
2233 && fun
->u
.sym
!= sym
)
2238 caller
= find_function (sec
, irela
->r_offset
, info
);
2241 callee
= bfd_malloc (sizeof *callee
);
2245 callee
->fun
= find_function (sym_sec
, val
, info
);
2246 if (callee
->fun
== NULL
)
2248 callee
->is_tail
= !is_call
;
2249 callee
->is_pasted
= FALSE
;
2251 if (callee
->fun
->last_caller
!= sec
)
2253 callee
->fun
->last_caller
= sec
;
2254 callee
->fun
->call_count
+= 1;
2256 if (!insert_callee (caller
, callee
))
2259 && !callee
->fun
->is_func
2260 && callee
->fun
->stack
== 0)
2262 /* This is either a tail call or a branch from one part of
2263 the function to another, ie. hot/cold section. If the
2264 destination has been called by some other function then
2265 it is a separate function. We also assume that functions
2266 are not split across input files. */
2267 if (sec
->owner
!= sym_sec
->owner
)
2269 callee
->fun
->start
= NULL
;
2270 callee
->fun
->is_func
= TRUE
;
2272 else if (callee
->fun
->start
== NULL
)
2273 callee
->fun
->start
= caller
;
2276 struct function_info
*callee_start
;
2277 struct function_info
*caller_start
;
2278 callee_start
= callee
->fun
;
2279 while (callee_start
->start
)
2280 callee_start
= callee_start
->start
;
2281 caller_start
= caller
;
2282 while (caller_start
->start
)
2283 caller_start
= caller_start
->start
;
2284 if (caller_start
!= callee_start
)
2286 callee
->fun
->start
= NULL
;
2287 callee
->fun
->is_func
= TRUE
;
2296 /* Handle something like .init or .fini, which has a piece of a function.
2297 These sections are pasted together to form a single function. */
2300 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2302 struct bfd_link_order
*l
;
2303 struct _spu_elf_section_data
*sec_data
;
2304 struct spu_elf_stack_info
*sinfo
;
2305 Elf_Internal_Sym
*fake
;
2306 struct function_info
*fun
, *fun_start
;
2308 fake
= bfd_zmalloc (sizeof (*fake
));
2312 fake
->st_size
= sec
->size
;
2314 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2315 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2319 /* Find a function immediately preceding this section. */
2321 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2323 if (l
->u
.indirect
.section
== sec
)
2325 if (fun_start
!= NULL
)
2327 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2331 fun
->start
= fun_start
;
2333 callee
->is_tail
= TRUE
;
2334 callee
->is_pasted
= TRUE
;
2336 if (!insert_callee (fun_start
, callee
))
2342 if (l
->type
== bfd_indirect_link_order
2343 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2344 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2345 && sinfo
->num_fun
!= 0)
2346 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2349 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2353 /* Map address ranges in code sections to functions. */
2356 discover_functions (struct bfd_link_info
*info
)
2360 Elf_Internal_Sym
***psym_arr
;
2361 asection
***sec_arr
;
2362 bfd_boolean gaps
= FALSE
;
2365 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2368 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2369 if (psym_arr
== NULL
)
2371 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2372 if (sec_arr
== NULL
)
2376 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2378 ibfd
= ibfd
->link_next
, bfd_idx
++)
2380 extern const bfd_target bfd_elf32_spu_vec
;
2381 Elf_Internal_Shdr
*symtab_hdr
;
2384 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2385 asection
**psecs
, **p
;
2387 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2390 /* Read all the symbols. */
2391 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2392 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2396 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2397 if (interesting_section (sec
, info
->output_bfd
))
2405 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2408 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2410 symtab_hdr
->contents
= (void *) syms
;
2415 /* Select defined function symbols that are going to be output. */
2416 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2419 psym_arr
[bfd_idx
] = psyms
;
2420 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2423 sec_arr
[bfd_idx
] = psecs
;
2424 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2425 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2426 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2430 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2431 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2434 symcount
= psy
- psyms
;
2437 /* Sort them by section and offset within section. */
2438 sort_syms_syms
= syms
;
2439 sort_syms_psecs
= psecs
;
2440 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2442 /* Now inspect the function symbols. */
2443 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2445 asection
*s
= psecs
[*psy
- syms
];
2446 Elf_Internal_Sym
**psy2
;
2448 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2449 if (psecs
[*psy2
- syms
] != s
)
2452 if (!alloc_stack_info (s
, psy2
- psy
))
2457 /* First install info about properly typed and sized functions.
2458 In an ideal world this will cover all code sections, except
2459 when partitioning functions into hot and cold sections,
2460 and the horrible pasted together .init and .fini functions. */
2461 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2464 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2466 asection
*s
= psecs
[sy
- syms
];
2467 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2472 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2473 if (interesting_section (sec
, info
->output_bfd
))
2474 gaps
|= check_function_ranges (sec
, info
);
2479 /* See if we can discover more function symbols by looking at
2481 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2483 ibfd
= ibfd
->link_next
, bfd_idx
++)
2487 if (psym_arr
[bfd_idx
] == NULL
)
2490 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2491 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2495 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2497 ibfd
= ibfd
->link_next
, bfd_idx
++)
2499 Elf_Internal_Shdr
*symtab_hdr
;
2501 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2504 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2507 psecs
= sec_arr
[bfd_idx
];
2509 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2510 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2513 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2514 if (interesting_section (sec
, info
->output_bfd
))
2515 gaps
|= check_function_ranges (sec
, info
);
2519 /* Finally, install all globals. */
2520 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2524 s
= psecs
[sy
- syms
];
2526 /* Global syms might be improperly typed functions. */
2527 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2528 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2530 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2536 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2538 extern const bfd_target bfd_elf32_spu_vec
;
2541 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2544 /* Some of the symbols we've installed as marking the
2545 beginning of functions may have a size of zero. Extend
2546 the range of such functions to the beginning of the
2547 next symbol of interest. */
2548 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2549 if (interesting_section (sec
, info
->output_bfd
))
2551 struct _spu_elf_section_data
*sec_data
;
2552 struct spu_elf_stack_info
*sinfo
;
2554 sec_data
= spu_elf_section_data (sec
);
2555 sinfo
= sec_data
->u
.i
.stack_info
;
2559 bfd_vma hi
= sec
->size
;
2561 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2563 sinfo
->fun
[fun_idx
].hi
= hi
;
2564 hi
= sinfo
->fun
[fun_idx
].lo
;
2567 /* No symbols in this section. Must be .init or .fini
2568 or something similar. */
2569 else if (!pasted_function (sec
, info
))
2575 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2577 ibfd
= ibfd
->link_next
, bfd_idx
++)
2579 if (psym_arr
[bfd_idx
] == NULL
)
2582 free (psym_arr
[bfd_idx
]);
2583 free (sec_arr
[bfd_idx
]);
2592 /* Iterate over all function_info we have collected, calling DOIT on
2593 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2597 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2598 struct bfd_link_info
*,
2600 struct bfd_link_info
*info
,
2606 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2608 extern const bfd_target bfd_elf32_spu_vec
;
2611 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2614 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2616 struct _spu_elf_section_data
*sec_data
;
2617 struct spu_elf_stack_info
*sinfo
;
2619 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2620 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2623 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2624 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2625 if (!doit (&sinfo
->fun
[i
], info
, param
))
2633 /* Transfer call info attached to struct function_info entries for
2634 all of a given function's sections to the first entry. */
2637 transfer_calls (struct function_info
*fun
,
2638 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2639 void *param ATTRIBUTE_UNUSED
)
2641 struct function_info
*start
= fun
->start
;
2645 struct call_info
*call
, *call_next
;
2647 while (start
->start
!= NULL
)
2648 start
= start
->start
;
2649 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2651 call_next
= call
->next
;
2652 if (!insert_callee (start
, call
))
2655 fun
->call_list
= NULL
;
2660 /* Mark nodes in the call graph that are called by some other node. */
2663 mark_non_root (struct function_info
*fun
,
2664 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2665 void *param ATTRIBUTE_UNUSED
)
2667 struct call_info
*call
;
2672 for (call
= fun
->call_list
; call
; call
= call
->next
)
2674 call
->fun
->non_root
= TRUE
;
2675 mark_non_root (call
->fun
, 0, 0);
2680 /* Remove cycles from the call graph. Set depth of nodes. */
2683 remove_cycles (struct function_info
*fun
,
2684 struct bfd_link_info
*info
,
2687 struct call_info
**callp
, *call
;
2688 unsigned int depth
= *(unsigned int *) param
;
2689 unsigned int max_depth
= depth
;
2693 fun
->marking
= TRUE
;
2695 callp
= &fun
->call_list
;
2696 while ((call
= *callp
) != NULL
)
2698 if (!call
->fun
->visit2
)
2700 call
->max_depth
= depth
+ !call
->is_pasted
;
2701 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2703 if (max_depth
< call
->max_depth
)
2704 max_depth
= call
->max_depth
;
2706 else if (call
->fun
->marking
)
2708 if (!spu_hash_table (info
)->auto_overlay
)
2710 const char *f1
= func_name (fun
);
2711 const char *f2
= func_name (call
->fun
);
2713 info
->callbacks
->info (_("Stack analysis will ignore the call "
2717 *callp
= call
->next
;
2721 callp
= &call
->next
;
2723 fun
->marking
= FALSE
;
2724 *(unsigned int *) param
= max_depth
;
2728 /* Populate call_list for each function. */
2731 build_call_tree (struct bfd_link_info
*info
)
2736 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2738 extern const bfd_target bfd_elf32_spu_vec
;
2741 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2744 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2745 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2749 /* Transfer call info from hot/cold section part of function
2751 if (!spu_hash_table (info
)->auto_overlay
2752 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2755 /* Find the call graph root(s). */
2756 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2759 /* Remove cycles from the call graph. We start from the root node(s)
2760 so that we break cycles in a reasonable place. */
2762 return for_each_node (remove_cycles
, info
, &depth
, TRUE
);
2765 /* qsort predicate to sort calls by max_depth then count. */
2768 sort_calls (const void *a
, const void *b
)
2770 struct call_info
*const *c1
= a
;
2771 struct call_info
*const *c2
= b
;
2774 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2778 delta
= (*c2
)->count
- (*c1
)->count
;
2786 unsigned int max_overlay_size
;
2789 /* Set linker_mark and gc_mark on any sections that we will put in
2790 overlays. These flags are used by the generic ELF linker, but we
2791 won't be continuing on to bfd_elf_final_link so it is OK to use
2792 them. linker_mark is clear before we get here. Set segment_mark
2793 on sections that are part of a pasted function (excluding the last
2796 Set up function rodata section if --overlay-rodata. We don't
2797 currently include merged string constant rodata sections since
2799 Sort the call graph so that the deepest nodes will be visited
2803 mark_overlay_section (struct function_info
*fun
,
2804 struct bfd_link_info
*info
,
2807 struct call_info
*call
;
2809 struct _mos_param
*mos_param
= param
;
2815 if (!fun
->sec
->linker_mark
)
2817 fun
->sec
->linker_mark
= 1;
2818 fun
->sec
->gc_mark
= 1;
2819 fun
->sec
->segment_mark
= 0;
2820 /* Ensure SEC_CODE is set on this text section (it ought to
2821 be!), and SEC_CODE is clear on rodata sections. We use
2822 this flag to differentiate the two overlay section types. */
2823 fun
->sec
->flags
|= SEC_CODE
;
2824 if (spu_hash_table (info
)->auto_overlay
& OVERLAY_RODATA
)
2829 /* Find the rodata section corresponding to this function's
2831 if (strcmp (fun
->sec
->name
, ".text") == 0)
2833 name
= bfd_malloc (sizeof (".rodata"));
2836 memcpy (name
, ".rodata", sizeof (".rodata"));
2838 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2840 size_t len
= strlen (fun
->sec
->name
);
2841 name
= bfd_malloc (len
+ 3);
2844 memcpy (name
, ".rodata", sizeof (".rodata"));
2845 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2847 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2849 size_t len
= strlen (fun
->sec
->name
) + 1;
2850 name
= bfd_malloc (len
);
2853 memcpy (name
, fun
->sec
->name
, len
);
2859 asection
*rodata
= NULL
;
2860 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2861 if (group_sec
== NULL
)
2862 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2864 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2866 if (strcmp (group_sec
->name
, name
) == 0)
2871 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2873 fun
->rodata
= rodata
;
2876 fun
->rodata
->linker_mark
= 1;
2877 fun
->rodata
->gc_mark
= 1;
2878 fun
->rodata
->flags
&= ~SEC_CODE
;
2882 size
= fun
->sec
->size
;
2884 size
+= fun
->rodata
->size
;
2885 if (mos_param
->max_overlay_size
< size
)
2886 mos_param
->max_overlay_size
= size
;
2890 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2895 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2899 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2900 calls
[count
++] = call
;
2902 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2904 fun
->call_list
= NULL
;
2908 calls
[count
]->next
= fun
->call_list
;
2909 fun
->call_list
= calls
[count
];
2914 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2916 if (call
->is_pasted
)
2918 /* There can only be one is_pasted call per function_info. */
2919 BFD_ASSERT (!fun
->sec
->segment_mark
);
2920 fun
->sec
->segment_mark
= 1;
2922 if (!mark_overlay_section (call
->fun
, info
, param
))
2926 /* Don't put entry code into an overlay. The overlay manager needs
2928 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2929 == info
->output_bfd
->start_address
)
2931 fun
->sec
->linker_mark
= 0;
2932 if (fun
->rodata
!= NULL
)
2933 fun
->rodata
->linker_mark
= 0;
2939 asection
*exclude_input_section
;
2940 asection
*exclude_output_section
;
2941 unsigned long clearing
;
2944 /* Undo some of mark_overlay_section's work. */
2947 unmark_overlay_section (struct function_info
*fun
,
2948 struct bfd_link_info
*info
,
2951 struct call_info
*call
;
2952 struct _uos_param
*uos_param
= param
;
2953 unsigned int excluded
= 0;
2961 if (fun
->sec
== uos_param
->exclude_input_section
2962 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
2965 uos_param
->clearing
+= excluded
;
2967 if (uos_param
->clearing
)
2969 fun
->sec
->linker_mark
= 0;
2971 fun
->rodata
->linker_mark
= 0;
2974 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2975 if (!unmark_overlay_section (call
->fun
, info
, param
))
2978 uos_param
->clearing
-= excluded
;
2983 unsigned int lib_size
;
2984 asection
**lib_sections
;
2987 /* Add sections we have marked as belonging to overlays to an array
2988 for consideration as non-overlay sections. The array consist of
2989 pairs of sections, (text,rodata), for functions in the call graph. */
2992 collect_lib_sections (struct function_info
*fun
,
2993 struct bfd_link_info
*info
,
2996 struct _cl_param
*lib_param
= param
;
2997 struct call_info
*call
;
3004 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3007 size
= fun
->sec
->size
;
3009 size
+= fun
->rodata
->size
;
3010 if (size
> lib_param
->lib_size
)
3013 *lib_param
->lib_sections
++ = fun
->sec
;
3014 fun
->sec
->gc_mark
= 0;
3015 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3017 *lib_param
->lib_sections
++ = fun
->rodata
;
3018 fun
->rodata
->gc_mark
= 0;
3021 *lib_param
->lib_sections
++ = NULL
;
3023 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3024 collect_lib_sections (call
->fun
, info
, param
);
3029 /* qsort predicate to sort sections by call count. */
3032 sort_lib (const void *a
, const void *b
)
3034 asection
*const *s1
= a
;
3035 asection
*const *s2
= b
;
3036 struct _spu_elf_section_data
*sec_data
;
3037 struct spu_elf_stack_info
*sinfo
;
3041 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3042 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3045 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3046 delta
-= sinfo
->fun
[i
].call_count
;
3049 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3050 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3053 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3054 delta
+= sinfo
->fun
[i
].call_count
;
3063 /* Remove some sections from those marked to be in overlays. Choose
3064 those that are called from many places, likely library functions. */
3067 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3070 asection
**lib_sections
;
3071 unsigned int i
, lib_count
;
3072 struct _cl_param collect_lib_param
;
3073 struct function_info dummy_caller
;
3075 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3077 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3079 extern const bfd_target bfd_elf32_spu_vec
;
3082 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3085 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3086 if (sec
->linker_mark
3087 && sec
->size
< lib_size
3088 && (sec
->flags
& SEC_CODE
) != 0)
3091 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3092 if (lib_sections
== NULL
)
3093 return (unsigned int) -1;
3094 collect_lib_param
.lib_size
= lib_size
;
3095 collect_lib_param
.lib_sections
= lib_sections
;
3096 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3098 return (unsigned int) -1;
3099 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3101 /* Sort sections so that those with the most calls are first. */
3103 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3105 for (i
= 0; i
< lib_count
; i
++)
3107 unsigned int tmp
, stub_size
;
3109 struct _spu_elf_section_data
*sec_data
;
3110 struct spu_elf_stack_info
*sinfo
;
3112 sec
= lib_sections
[2 * i
];
3113 /* If this section is OK, its size must be less than lib_size. */
3115 /* If it has a rodata section, then add that too. */
3116 if (lib_sections
[2 * i
+ 1])
3117 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3118 /* Add any new overlay call stubs needed by the section. */
3121 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3122 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3125 struct call_info
*call
;
3127 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3128 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3129 if (call
->fun
->sec
->linker_mark
)
3131 struct call_info
*p
;
3132 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3133 if (p
->fun
== call
->fun
)
3136 stub_size
+= OVL_STUB_SIZE
;
3139 if (tmp
+ stub_size
< lib_size
)
3141 struct call_info
**pp
, *p
;
3143 /* This section fits. Mark it as non-overlay. */
3144 lib_sections
[2 * i
]->linker_mark
= 0;
3145 if (lib_sections
[2 * i
+ 1])
3146 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3147 lib_size
-= tmp
+ stub_size
;
3148 /* Call stubs to the section we just added are no longer
3150 pp
= &dummy_caller
.call_list
;
3151 while ((p
= *pp
) != NULL
)
3152 if (!p
->fun
->sec
->linker_mark
)
3154 lib_size
+= OVL_STUB_SIZE
;
3160 /* Add new call stubs to dummy_caller. */
3161 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3162 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3165 struct call_info
*call
;
3167 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3168 for (call
= sinfo
->fun
[k
].call_list
;
3171 if (call
->fun
->sec
->linker_mark
)
3173 struct call_info
*callee
;
3174 callee
= bfd_malloc (sizeof (*callee
));
3176 return (unsigned int) -1;
3178 if (!insert_callee (&dummy_caller
, callee
))
3184 while (dummy_caller
.call_list
!= NULL
)
3186 struct call_info
*call
= dummy_caller
.call_list
;
3187 dummy_caller
.call_list
= call
->next
;
3190 for (i
= 0; i
< 2 * lib_count
; i
++)
3191 if (lib_sections
[i
])
3192 lib_sections
[i
]->gc_mark
= 1;
3193 free (lib_sections
);
3197 /* Build an array of overlay sections. The deepest node's section is
3198 added first, then its parent node's section, then everything called
3199 from the parent section. The idea being to group sections to
3200 minimise calls between different overlays. */
3203 collect_overlays (struct function_info
*fun
,
3204 struct bfd_link_info
*info
,
3207 struct call_info
*call
;
3208 bfd_boolean added_fun
;
3209 asection
***ovly_sections
= param
;
3215 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3216 if (!call
->is_pasted
)
3218 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3224 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3226 fun
->sec
->gc_mark
= 0;
3227 *(*ovly_sections
)++ = fun
->sec
;
3228 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3230 fun
->rodata
->gc_mark
= 0;
3231 *(*ovly_sections
)++ = fun
->rodata
;
3234 *(*ovly_sections
)++ = NULL
;
3237 /* Pasted sections must stay with the first section. We don't
3238 put pasted sections in the array, just the first section.
3239 Mark subsequent sections as already considered. */
3240 if (fun
->sec
->segment_mark
)
3242 struct function_info
*call_fun
= fun
;
3245 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3246 if (call
->is_pasted
)
3248 call_fun
= call
->fun
;
3249 call_fun
->sec
->gc_mark
= 0;
3250 if (call_fun
->rodata
)
3251 call_fun
->rodata
->gc_mark
= 0;
3257 while (call_fun
->sec
->segment_mark
);
3261 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3262 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3267 struct _spu_elf_section_data
*sec_data
;
3268 struct spu_elf_stack_info
*sinfo
;
3270 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3271 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3274 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3275 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3283 struct _sum_stack_param
{
3285 size_t overall_stack
;
3286 bfd_boolean emit_stack_syms
;
3289 /* Descend the call graph for FUN, accumulating total stack required. */
3292 sum_stack (struct function_info
*fun
,
3293 struct bfd_link_info
*info
,
3296 struct call_info
*call
;
3297 struct function_info
*max
;
3298 size_t stack
, cum_stack
;
3300 bfd_boolean has_call
;
3301 struct _sum_stack_param
*sum_stack_param
= param
;
3302 struct spu_link_hash_table
*htab
;
3304 cum_stack
= fun
->stack
;
3305 sum_stack_param
->cum_stack
= cum_stack
;
3311 for (call
= fun
->call_list
; call
; call
= call
->next
)
3313 if (!call
->is_pasted
)
3315 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3317 stack
= sum_stack_param
->cum_stack
;
3318 /* Include caller stack for normal calls, don't do so for
3319 tail calls. fun->stack here is local stack usage for
3321 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3322 stack
+= fun
->stack
;
3323 if (cum_stack
< stack
)
3330 sum_stack_param
->cum_stack
= cum_stack
;
3332 /* Now fun->stack holds cumulative stack. */
3333 fun
->stack
= cum_stack
;
3337 && sum_stack_param
->overall_stack
< cum_stack
)
3338 sum_stack_param
->overall_stack
= cum_stack
;
3340 htab
= spu_hash_table (info
);
3341 if (htab
->auto_overlay
)
3344 f1
= func_name (fun
);
3346 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3347 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3348 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3352 info
->callbacks
->minfo (_(" calls:\n"));
3353 for (call
= fun
->call_list
; call
; call
= call
->next
)
3354 if (!call
->is_pasted
)
3356 const char *f2
= func_name (call
->fun
);
3357 const char *ann1
= call
->fun
== max
? "*" : " ";
3358 const char *ann2
= call
->is_tail
? "t" : " ";
3360 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3364 if (sum_stack_param
->emit_stack_syms
)
3366 char *name
= bfd_malloc (18 + strlen (f1
));
3367 struct elf_link_hash_entry
*h
;
3372 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3373 sprintf (name
, "__stack_%s", f1
);
3375 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3377 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3380 && (h
->root
.type
== bfd_link_hash_new
3381 || h
->root
.type
== bfd_link_hash_undefined
3382 || h
->root
.type
== bfd_link_hash_undefweak
))
3384 h
->root
.type
= bfd_link_hash_defined
;
3385 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3386 h
->root
.u
.def
.value
= cum_stack
;
3391 h
->ref_regular_nonweak
= 1;
3392 h
->forced_local
= 1;
3400 /* SEC is part of a pasted function. Return the call_info for the
3401 next section of this function. */
3403 static struct call_info
*
3404 find_pasted_call (asection
*sec
)
3406 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3407 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3408 struct call_info
*call
;
3411 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3412 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3413 if (call
->is_pasted
)
3419 /* qsort predicate to sort bfds by file name. */
3422 sort_bfds (const void *a
, const void *b
)
3424 bfd
*const *abfd1
= a
;
3425 bfd
*const *abfd2
= b
;
3427 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3430 /* Handle --auto-overlay. */
3432 static void spu_elf_auto_overlay (struct bfd_link_info
*, void (*) (void))
3436 spu_elf_auto_overlay (struct bfd_link_info
*info
,
3437 void (*spu_elf_load_ovl_mgr
) (void))
3441 struct elf_segment_map
*m
;
3442 unsigned int fixed_size
, lo
, hi
;
3443 struct spu_link_hash_table
*htab
;
3444 unsigned int base
, i
, count
, bfd_count
;
3446 asection
**ovly_sections
, **ovly_p
;
3448 unsigned int total_overlay_size
, overlay_size
;
3449 struct elf_link_hash_entry
*h
;
3450 struct _mos_param mos_param
;
3451 struct _uos_param uos_param
;
3452 struct function_info dummy_caller
;
3454 /* Find the extents of our loadable image. */
3455 lo
= (unsigned int) -1;
3457 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3458 if (m
->p_type
== PT_LOAD
)
3459 for (i
= 0; i
< m
->count
; i
++)
3460 if (m
->sections
[i
]->size
!= 0)
3462 if (m
->sections
[i
]->vma
< lo
)
3463 lo
= m
->sections
[i
]->vma
;
3464 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3465 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3467 fixed_size
= hi
+ 1 - lo
;
3469 if (!discover_functions (info
))
3472 if (!build_call_tree (info
))
3475 uos_param
.exclude_input_section
= 0;
3476 uos_param
.exclude_output_section
3477 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3479 htab
= spu_hash_table (info
);
3480 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3481 FALSE
, FALSE
, FALSE
);
3483 && (h
->root
.type
== bfd_link_hash_defined
3484 || h
->root
.type
== bfd_link_hash_defweak
)
3487 /* We have a user supplied overlay manager. */
3488 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3492 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3493 builtin version to .text, and will adjust .text size. */
3494 asection
*text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3496 fixed_size
-= text
->size
;
3497 spu_elf_load_ovl_mgr ();
3498 text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3500 fixed_size
+= text
->size
;
3503 /* Mark overlay sections, and find max overlay section size. */
3504 mos_param
.max_overlay_size
= 0;
3505 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3508 /* We can't put the overlay manager or interrupt routines in
3510 uos_param
.clearing
= 0;
3511 if ((uos_param
.exclude_input_section
3512 || uos_param
.exclude_output_section
)
3513 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3517 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3519 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3520 if (bfd_arr
== NULL
)
3523 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3526 total_overlay_size
= 0;
3527 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3529 extern const bfd_target bfd_elf32_spu_vec
;
3531 unsigned int old_count
;
3533 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3537 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3538 if (sec
->linker_mark
)
3540 if ((sec
->flags
& SEC_CODE
) != 0)
3542 fixed_size
-= sec
->size
;
3543 total_overlay_size
+= sec
->size
;
3545 if (count
!= old_count
)
3546 bfd_arr
[bfd_count
++] = ibfd
;
3549 /* Since the overlay link script selects sections by file name and
3550 section name, ensure that file names are unique. */
3553 bfd_boolean ok
= TRUE
;
3555 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3556 for (i
= 1; i
< bfd_count
; ++i
)
3557 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3559 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3561 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3562 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3563 bfd_arr
[i
- 1]->filename
,
3564 bfd_arr
[i
- 1]->my_archive
->filename
);
3566 info
->callbacks
->einfo (_("%s in both %s and %s\n"),
3567 bfd_arr
[i
- 1]->filename
,
3568 bfd_arr
[i
- 1]->my_archive
->filename
,
3569 bfd_arr
[i
]->my_archive
->filename
);
3571 else if (bfd_arr
[i
- 1]->my_archive
)
3572 info
->callbacks
->einfo (_("%s in %s and as an object\n"),
3573 bfd_arr
[i
- 1]->filename
,
3574 bfd_arr
[i
- 1]->my_archive
->filename
);
3575 else if (bfd_arr
[i
]->my_archive
)
3576 info
->callbacks
->einfo (_("%s in %s and as an object\n"),
3577 bfd_arr
[i
]->filename
,
3578 bfd_arr
[i
]->my_archive
->filename
);
3580 info
->callbacks
->einfo (_("%s duplicated\n"),
3581 bfd_arr
[i
]->filename
);
3586 /* FIXME: modify plain object files from foo.o to ./foo.o
3587 and emit EXCLUDE_FILE to handle the duplicates in
3588 archives. There is a pathological case we can't handle:
3589 We may have duplicate file names within a single archive. */
3590 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3591 "object files in auto-overlay script\n"));
3592 bfd_set_error (bfd_error_bad_value
);
3598 if (htab
->reserved
== 0)
3600 struct _sum_stack_param sum_stack_param
;
3602 sum_stack_param
.emit_stack_syms
= 0;
3603 sum_stack_param
.overall_stack
= 0;
3604 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3606 htab
->reserved
= sum_stack_param
.overall_stack
;
3608 fixed_size
+= htab
->reserved
;
3609 fixed_size
+= htab
->non_ovly_stub
* OVL_STUB_SIZE
;
3610 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3612 /* Guess number of overlays. Assuming overlay buffer is on
3613 average only half full should be conservative. */
3614 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3615 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3616 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3619 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3620 info
->callbacks
->einfo (_("non-overlay plus maximum overlay size "
3621 "of 0x%x exceeds local store\n"),
3622 fixed_size
+ mos_param
.max_overlay_size
);
3624 /* Now see if we should put some functions in the non-overlay area. */
3625 if (fixed_size
< htab
->overlay_fixed
3626 && htab
->overlay_fixed
+ mos_param
.max_overlay_size
< htab
->local_store
)
3628 unsigned int lib_size
= htab
->overlay_fixed
- fixed_size
;
3629 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3630 if (lib_size
== (unsigned int) -1)
3632 fixed_size
= htab
->overlay_fixed
- lib_size
;
3635 /* Build an array of sections, suitably sorted to place into
3637 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3638 if (ovly_sections
== NULL
)
3640 ovly_p
= ovly_sections
;
3641 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3643 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3645 script
= htab
->spu_elf_open_overlay_script ();
3647 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3650 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3651 overlay_size
= htab
->local_store
- fixed_size
;
3654 while (base
< count
)
3656 unsigned int size
= 0;
3659 for (i
= base
; i
< count
; i
++)
3663 unsigned int stub_size
;
3664 struct call_info
*call
, *pasty
;
3665 struct _spu_elf_section_data
*sec_data
;
3666 struct spu_elf_stack_info
*sinfo
;
3669 /* See whether we can add this section to the current
3670 overlay without overflowing our overlay buffer. */
3671 sec
= ovly_sections
[2 * i
];
3672 tmp
= size
+ sec
->size
;
3673 if (ovly_sections
[2 * i
+ 1])
3674 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3675 if (tmp
> overlay_size
)
3677 if (sec
->segment_mark
)
3679 /* Pasted sections must stay together, so add their
3681 struct call_info
*pasty
= find_pasted_call (sec
);
3682 while (pasty
!= NULL
)
3684 struct function_info
*call_fun
= pasty
->fun
;
3685 tmp
+= call_fun
->sec
->size
;
3686 if (call_fun
->rodata
)
3687 tmp
+= call_fun
->rodata
->size
;
3688 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3689 if (pasty
->is_pasted
)
3693 if (tmp
> overlay_size
)
3696 /* If we add this section, we might need new overlay call
3697 stubs. Add any overlay section calls to dummy_call. */
3699 sec_data
= spu_elf_section_data (sec
);
3700 sinfo
= sec_data
->u
.i
.stack_info
;
3701 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3702 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3703 if (call
->is_pasted
)
3705 BFD_ASSERT (pasty
== NULL
);
3708 else if (call
->fun
->sec
->linker_mark
)
3710 if (!copy_callee (&dummy_caller
, call
))
3713 while (pasty
!= NULL
)
3715 struct function_info
*call_fun
= pasty
->fun
;
3717 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3718 if (call
->is_pasted
)
3720 BFD_ASSERT (pasty
== NULL
);
3723 else if (!copy_callee (&dummy_caller
, call
))
3727 /* Calculate call stub size. */
3729 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3733 stub_size
+= OVL_STUB_SIZE
;
3734 /* If the call is within this overlay, we won't need a
3736 for (k
= base
; k
< i
+ 1; k
++)
3737 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3739 stub_size
-= OVL_STUB_SIZE
;
3743 if (tmp
+ stub_size
> overlay_size
)
3751 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3752 ovly_sections
[2 * i
]->owner
,
3753 ovly_sections
[2 * i
],
3754 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3755 bfd_set_error (bfd_error_bad_value
);
3759 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3761 for (j
= base
; j
< i
; j
++)
3763 asection
*sec
= ovly_sections
[2 * j
];
3765 if (fprintf (script
, " [%c]%s (%s)\n",
3766 sec
->owner
->filename
[0],
3767 sec
->owner
->filename
+ 1,
3770 if (sec
->segment_mark
)
3772 struct call_info
*call
= find_pasted_call (sec
);
3773 while (call
!= NULL
)
3775 struct function_info
*call_fun
= call
->fun
;
3776 sec
= call_fun
->sec
;
3777 if (fprintf (script
, " [%c]%s (%s)\n",
3778 sec
->owner
->filename
[0],
3779 sec
->owner
->filename
+ 1,
3782 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3783 if (call
->is_pasted
)
3789 for (j
= base
; j
< i
; j
++)
3791 asection
*sec
= ovly_sections
[2 * j
+ 1];
3792 if (sec
!= NULL
&& fprintf (script
, " [%c]%s (%s)\n",
3793 sec
->owner
->filename
[0],
3794 sec
->owner
->filename
+ 1,
3798 sec
= ovly_sections
[2 * j
];
3799 if (sec
->segment_mark
)
3801 struct call_info
*call
= find_pasted_call (sec
);
3802 while (call
!= NULL
)
3804 struct function_info
*call_fun
= call
->fun
;
3805 sec
= call_fun
->rodata
;
3806 if (sec
!= NULL
&& fprintf (script
, " [%c]%s (%s)\n",
3807 sec
->owner
->filename
[0],
3808 sec
->owner
->filename
+ 1,
3811 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3812 if (call
->is_pasted
)
3818 if (fprintf (script
, " }\n") <= 0)
3821 while (dummy_caller
.call_list
!= NULL
)
3823 struct call_info
*call
= dummy_caller
.call_list
;
3824 dummy_caller
.call_list
= call
->next
;
3830 free (ovly_sections
);
3832 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3834 if (fclose (script
) != 0)
3837 if (htab
->auto_overlay
& AUTO_RELINK
)
3838 htab
->spu_elf_relink ();
3843 bfd_set_error (bfd_error_system_call
);
3845 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3849 /* Provide an estimate of total stack required. */
3852 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
3854 struct _sum_stack_param sum_stack_param
;
3856 if (!discover_functions (info
))
3859 if (!build_call_tree (info
))
3862 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3863 info
->callbacks
->minfo (_("\nStack size for functions. "
3864 "Annotations: '*' max stack, 't' tail call\n"));
3866 sum_stack_param
.emit_stack_syms
= emit_stack_syms
;
3867 sum_stack_param
.overall_stack
= 0;
3868 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3871 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3872 (bfd_vma
) sum_stack_param
.overall_stack
);
3876 /* Perform a final link. */
3879 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3881 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3883 if (htab
->auto_overlay
)
3884 spu_elf_auto_overlay (info
, htab
->spu_elf_load_ovl_mgr
);
3886 if (htab
->stack_analysis
3887 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
3888 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3890 return bfd_elf_final_link (output_bfd
, info
);
3893 /* Called when not normally emitting relocs, ie. !info->relocatable
3894 and !info->emitrelocations. Returns a count of special relocs
3895 that need to be emitted. */
3898 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
3900 unsigned int count
= 0;
3901 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3903 for (; relocs
< relend
; relocs
++)
3905 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
3906 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3913 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3916 spu_elf_relocate_section (bfd
*output_bfd
,
3917 struct bfd_link_info
*info
,
3919 asection
*input_section
,
3921 Elf_Internal_Rela
*relocs
,
3922 Elf_Internal_Sym
*local_syms
,
3923 asection
**local_sections
)
3925 Elf_Internal_Shdr
*symtab_hdr
;
3926 struct elf_link_hash_entry
**sym_hashes
;
3927 Elf_Internal_Rela
*rel
, *relend
;
3928 struct spu_link_hash_table
*htab
;
3929 asection
*ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3931 bfd_boolean emit_these_relocs
= FALSE
;
3932 bfd_boolean is_ea_sym
;
3935 htab
= spu_hash_table (info
);
3936 stubs
= (htab
->stub_sec
!= NULL
3937 && maybe_needs_stubs (input_section
, output_bfd
));
3938 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3939 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3942 relend
= relocs
+ input_section
->reloc_count
;
3943 for (; rel
< relend
; rel
++)
3946 reloc_howto_type
*howto
;
3947 unsigned int r_symndx
;
3948 Elf_Internal_Sym
*sym
;
3950 struct elf_link_hash_entry
*h
;
3951 const char *sym_name
;
3954 bfd_reloc_status_type r
;
3955 bfd_boolean unresolved_reloc
;
3957 enum _stub_type stub_type
;
3959 r_symndx
= ELF32_R_SYM (rel
->r_info
);
3960 r_type
= ELF32_R_TYPE (rel
->r_info
);
3961 howto
= elf_howto_table
+ r_type
;
3962 unresolved_reloc
= FALSE
;
3967 if (r_symndx
< symtab_hdr
->sh_info
)
3969 sym
= local_syms
+ r_symndx
;
3970 sec
= local_sections
[r_symndx
];
3971 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
3972 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
3976 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
3977 r_symndx
, symtab_hdr
, sym_hashes
,
3979 unresolved_reloc
, warned
);
3980 sym_name
= h
->root
.root
.string
;
3983 if (sec
!= NULL
&& elf_discarded_section (sec
))
3985 /* For relocs against symbols from removed linkonce sections,
3986 or sections discarded by a linker script, we just want the
3987 section contents zeroed. Avoid any special processing. */
3988 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
3994 if (info
->relocatable
)
3997 is_ea_sym
= (ea
!= NULL
3999 && sec
->output_section
== ea
);
4001 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4005 /* ._ea is a special section that isn't allocated in SPU
4006 memory, but rather occupies space in PPU memory as
4007 part of an embedded ELF image. If this reloc is
4008 against a symbol defined in ._ea, then transform the
4009 reloc into an equivalent one without a symbol
4010 relative to the start of the ELF image. */
4011 rel
->r_addend
+= (relocation
4013 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4014 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4016 emit_these_relocs
= TRUE
;
4021 unresolved_reloc
= TRUE
;
4023 if (unresolved_reloc
)
4025 (*_bfd_error_handler
)
4026 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4028 bfd_get_section_name (input_bfd
, input_section
),
4029 (long) rel
->r_offset
,
4035 /* If this symbol is in an overlay area, we may need to relocate
4036 to the overlay stub. */
4037 addend
= rel
->r_addend
;
4039 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4040 contents
, info
)) != no_stub
)
4042 unsigned int ovl
= 0;
4043 struct got_entry
*g
, **head
;
4045 if (stub_type
!= nonovl_stub
)
4046 ovl
= (spu_elf_section_data (input_section
->output_section
)
4050 head
= &h
->got
.glist
;
4052 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4054 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4055 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4060 relocation
= g
->stub_addr
;
4064 r
= _bfd_final_link_relocate (howto
,
4068 rel
->r_offset
, relocation
, addend
);
4070 if (r
!= bfd_reloc_ok
)
4072 const char *msg
= (const char *) 0;
4076 case bfd_reloc_overflow
:
4077 if (!((*info
->callbacks
->reloc_overflow
)
4078 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4079 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4083 case bfd_reloc_undefined
:
4084 if (!((*info
->callbacks
->undefined_symbol
)
4085 (info
, sym_name
, input_bfd
, input_section
,
4086 rel
->r_offset
, TRUE
)))
4090 case bfd_reloc_outofrange
:
4091 msg
= _("internal error: out of range error");
4094 case bfd_reloc_notsupported
:
4095 msg
= _("internal error: unsupported relocation error");
4098 case bfd_reloc_dangerous
:
4099 msg
= _("internal error: dangerous error");
4103 msg
= _("internal error: unknown error");
4108 if (!((*info
->callbacks
->warning
)
4109 (info
, msg
, sym_name
, input_bfd
, input_section
,
4118 && emit_these_relocs
4119 && !info
->emitrelocations
)
4121 Elf_Internal_Rela
*wrel
;
4122 Elf_Internal_Shdr
*rel_hdr
;
4124 wrel
= rel
= relocs
;
4125 relend
= relocs
+ input_section
->reloc_count
;
4126 for (; rel
< relend
; rel
++)
4130 r_type
= ELF32_R_TYPE (rel
->r_info
);
4131 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4134 input_section
->reloc_count
= wrel
- relocs
;
4135 /* Backflips for _bfd_elf_link_output_relocs. */
4136 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4137 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4144 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4147 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4148 const char *sym_name ATTRIBUTE_UNUSED
,
4149 Elf_Internal_Sym
*sym
,
4150 asection
*sym_sec ATTRIBUTE_UNUSED
,
4151 struct elf_link_hash_entry
*h
)
4153 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4155 if (!info
->relocatable
4156 && htab
->stub_sec
!= NULL
4158 && (h
->root
.type
== bfd_link_hash_defined
4159 || h
->root
.type
== bfd_link_hash_defweak
)
4161 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4163 struct got_entry
*g
;
4165 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4166 if (g
->addend
== 0 && g
->ovl
== 0)
4168 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4169 (htab
->stub_sec
[0]->output_section
->owner
,
4170 htab
->stub_sec
[0]->output_section
));
4171 sym
->st_value
= g
->stub_addr
;
4179 static int spu_plugin
= 0;
4182 spu_elf_plugin (int val
)
4187 /* Set ELF header e_type for plugins. */
4190 spu_elf_post_process_headers (bfd
*abfd
,
4191 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4195 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4197 i_ehdrp
->e_type
= ET_DYN
;
4201 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4202 segments for overlays. */
4205 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4207 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4208 int extra
= htab
->num_overlays
;
4214 sec
= bfd_get_section_by_name (abfd
, ".toe");
4215 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4221 /* Remove .toe section from other PT_LOAD segments and put it in
4222 a segment of its own. Put overlays in separate segments too. */
4225 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4228 struct elf_segment_map
*m
;
4234 toe
= bfd_get_section_by_name (abfd
, ".toe");
4235 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4236 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4237 for (i
= 0; i
< m
->count
; i
++)
4238 if ((s
= m
->sections
[i
]) == toe
4239 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4241 struct elf_segment_map
*m2
;
4244 if (i
+ 1 < m
->count
)
4246 amt
= sizeof (struct elf_segment_map
);
4247 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4248 m2
= bfd_zalloc (abfd
, amt
);
4251 m2
->count
= m
->count
- (i
+ 1);
4252 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4253 m2
->count
* sizeof (m
->sections
[0]));
4254 m2
->p_type
= PT_LOAD
;
4262 amt
= sizeof (struct elf_segment_map
);
4263 m2
= bfd_zalloc (abfd
, amt
);
4266 m2
->p_type
= PT_LOAD
;
4268 m2
->sections
[0] = s
;
4278 /* Tweak the section type of .note.spu_name. */
4281 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4282 Elf_Internal_Shdr
*hdr
,
4285 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4286 hdr
->sh_type
= SHT_NOTE
;
4290 /* Tweak phdrs before writing them out. */
4293 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4295 const struct elf_backend_data
*bed
;
4296 struct elf_obj_tdata
*tdata
;
4297 Elf_Internal_Phdr
*phdr
, *last
;
4298 struct spu_link_hash_table
*htab
;
4305 bed
= get_elf_backend_data (abfd
);
4306 tdata
= elf_tdata (abfd
);
4308 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4309 htab
= spu_hash_table (info
);
4310 if (htab
->num_overlays
!= 0)
4312 struct elf_segment_map
*m
;
4315 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4317 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4319 /* Mark this as an overlay header. */
4320 phdr
[i
].p_flags
|= PF_OVERLAY
;
4322 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4324 bfd_byte
*p
= htab
->ovtab
->contents
;
4325 unsigned int off
= o
* 16 + 8;
4327 /* Write file_off into _ovly_table. */
4328 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4333 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4334 of 16. This should always be possible when using the standard
4335 linker scripts, but don't create overlapping segments if
4336 someone is playing games with linker scripts. */
4338 for (i
= count
; i
-- != 0; )
4339 if (phdr
[i
].p_type
== PT_LOAD
)
4343 adjust
= -phdr
[i
].p_filesz
& 15;
4346 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4349 adjust
= -phdr
[i
].p_memsz
& 15;
4352 && phdr
[i
].p_filesz
!= 0
4353 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4354 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4357 if (phdr
[i
].p_filesz
!= 0)
4361 if (i
== (unsigned int) -1)
4362 for (i
= count
; i
-- != 0; )
4363 if (phdr
[i
].p_type
== PT_LOAD
)
4367 adjust
= -phdr
[i
].p_filesz
& 15;
4368 phdr
[i
].p_filesz
+= adjust
;
4370 adjust
= -phdr
[i
].p_memsz
& 15;
4371 phdr
[i
].p_memsz
+= adjust
;
4377 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4378 #define TARGET_BIG_NAME "elf32-spu"
4379 #define ELF_ARCH bfd_arch_spu
4380 #define ELF_MACHINE_CODE EM_SPU
4381 /* This matches the alignment need for DMA. */
4382 #define ELF_MAXPAGESIZE 0x80
4383 #define elf_backend_rela_normal 1
4384 #define elf_backend_can_gc_sections 1
4386 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4387 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4388 #define elf_info_to_howto spu_elf_info_to_howto
4389 #define elf_backend_count_relocs spu_elf_count_relocs
4390 #define elf_backend_relocate_section spu_elf_relocate_section
4391 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4392 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4393 #define elf_backend_object_p spu_elf_object_p
4394 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4395 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4397 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4398 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4399 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4400 #define elf_backend_post_process_headers spu_elf_post_process_headers
4401 #define elf_backend_fake_sections spu_elf_fake_sections
4402 #define elf_backend_special_sections spu_elf_special_sections
4403 #define bfd_elf32_bfd_final_link spu_elf_final_link
4405 #include "elf32-target.h"