1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 /* Shortcuts to overlay sections. */
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count
;
308 /* The stub section for each overlay section. */
311 struct elf_link_hash_entry
*ovly_load
;
312 struct elf_link_hash_entry
*ovly_return
;
313 unsigned long ovly_load_r_symndx
;
315 /* Number of overlay buffers. */
316 unsigned int num_buf
;
318 /* Total number of overlays. */
319 unsigned int num_overlays
;
321 /* How much memory we have. */
322 unsigned int local_store
;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed
;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved
;
328 /* If reserved is not specified, stack analysis will calculate a value
329 for the stack. This parameter adjusts that value to allow for
330 negative sp access (the ABI says 2000 bytes below sp are valid,
331 and the overlay manager uses some of this area). */
332 int extra_stack_space
;
333 /* Count of overlay stubs needed in non-overlay area. */
334 unsigned int non_ovly_stub
;
336 /* Stash various callbacks for --auto-overlay. */
337 void (*spu_elf_load_ovl_mgr
) (void);
338 FILE *(*spu_elf_open_overlay_script
) (void);
339 void (*spu_elf_relink
) (void);
341 /* Bit 0 set if --auto-overlay.
342 Bit 1 set if --auto-relink.
343 Bit 2 set if --overlay-rodata. */
344 unsigned int auto_overlay
: 3;
345 #define AUTO_OVERLAY 1
346 #define AUTO_RELINK 2
347 #define OVERLAY_RODATA 4
349 /* Set if we should emit symbols for stubs. */
350 unsigned int emit_stub_syms
:1;
352 /* Set if we want stubs on calls out of overlay regions to
353 non-overlay regions. */
354 unsigned int non_overlay_stubs
: 1;
357 unsigned int stub_err
: 1;
359 /* Set if stack size analysis should be done. */
360 unsigned int stack_analysis
: 1;
362 /* Set if __stack_* syms will be emitted. */
363 unsigned int emit_stack_syms
: 1;
366 /* Hijack the generic got fields for overlay stub accounting. */
370 struct got_entry
*next
;
376 #define spu_hash_table(p) \
377 ((struct spu_link_hash_table *) ((p)->hash))
379 /* Create a spu ELF linker hash table. */
381 static struct bfd_link_hash_table
*
382 spu_elf_link_hash_table_create (bfd
*abfd
)
384 struct spu_link_hash_table
*htab
;
386 htab
= bfd_malloc (sizeof (*htab
));
390 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
391 _bfd_elf_link_hash_newfunc
,
392 sizeof (struct elf_link_hash_entry
)))
398 memset (&htab
->ovtab
, 0,
399 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
401 htab
->elf
.init_got_refcount
.refcount
= 0;
402 htab
->elf
.init_got_refcount
.glist
= NULL
;
403 htab
->elf
.init_got_offset
.offset
= 0;
404 htab
->elf
.init_got_offset
.glist
= NULL
;
405 return &htab
->elf
.root
;
408 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
410 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
413 get_sym_h (struct elf_link_hash_entry
**hp
,
414 Elf_Internal_Sym
**symp
,
416 Elf_Internal_Sym
**locsymsp
,
417 unsigned long r_symndx
,
420 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
422 if (r_symndx
>= symtab_hdr
->sh_info
)
424 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
425 struct elf_link_hash_entry
*h
;
427 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
428 while (h
->root
.type
== bfd_link_hash_indirect
429 || h
->root
.type
== bfd_link_hash_warning
)
430 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
440 asection
*symsec
= NULL
;
441 if (h
->root
.type
== bfd_link_hash_defined
442 || h
->root
.type
== bfd_link_hash_defweak
)
443 symsec
= h
->root
.u
.def
.section
;
449 Elf_Internal_Sym
*sym
;
450 Elf_Internal_Sym
*locsyms
= *locsymsp
;
454 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
456 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
458 0, NULL
, NULL
, NULL
);
463 sym
= locsyms
+ r_symndx
;
472 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
478 /* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
482 spu_elf_create_sections (struct bfd_link_info
*info
,
487 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
489 /* Stash some options away where we can get at them later. */
490 htab
->stack_analysis
= stack_analysis
;
491 htab
->emit_stack_syms
= emit_stack_syms
;
493 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
494 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
499 /* Make SPU_PTNOTE_SPUNAME section. */
506 ibfd
= info
->input_bfds
;
507 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
508 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
510 || !bfd_set_section_alignment (ibfd
, s
, 4))
513 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
514 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
515 size
+= (name_len
+ 3) & -4;
517 if (!bfd_set_section_size (ibfd
, s
, size
))
520 data
= bfd_zalloc (ibfd
, size
);
524 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
525 bfd_put_32 (ibfd
, name_len
, data
+ 4);
526 bfd_put_32 (ibfd
, 1, data
+ 8);
527 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
528 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
529 bfd_get_filename (info
->output_bfd
), name_len
);
536 /* qsort predicate to sort sections by vma. */
539 sort_sections (const void *a
, const void *b
)
541 const asection
*const *s1
= a
;
542 const asection
*const *s2
= b
;
543 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
546 return delta
< 0 ? -1 : 1;
548 return (*s1
)->index
- (*s2
)->index
;
551 /* Identify overlays in the output bfd, and number them. */
554 spu_elf_find_overlays (struct bfd_link_info
*info
)
556 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
557 asection
**alloc_sec
;
558 unsigned int i
, n
, ovl_index
, num_buf
;
562 if (info
->output_bfd
->section_count
< 2)
566 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
567 if (alloc_sec
== NULL
)
570 /* Pick out all the alloced sections. */
571 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
572 if ((s
->flags
& SEC_ALLOC
) != 0
573 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
583 /* Sort them by vma. */
584 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
586 /* Look for overlapping vmas. Any with overlap must be overlays.
587 Count them. Also count the number of overlay regions. */
588 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
589 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
592 if (s
->vma
< ovl_end
)
594 asection
*s0
= alloc_sec
[i
- 1];
596 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
598 alloc_sec
[ovl_index
] = s0
;
599 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
600 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
602 alloc_sec
[ovl_index
] = s
;
603 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
604 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
605 if (s0
->vma
!= s
->vma
)
607 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
608 "do not start at the same address.\n"),
612 if (ovl_end
< s
->vma
+ s
->size
)
613 ovl_end
= s
->vma
+ s
->size
;
616 ovl_end
= s
->vma
+ s
->size
;
619 htab
->num_overlays
= ovl_index
;
620 htab
->num_buf
= num_buf
;
621 htab
->ovl_sec
= alloc_sec
;
622 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
623 FALSE
, FALSE
, FALSE
);
624 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
625 FALSE
, FALSE
, FALSE
);
626 return ovl_index
!= 0;
629 /* Support two sizes of overlay stubs, a slower more compact stub of two
630 intructions, and a faster stub of four instructions. */
631 #ifndef OVL_STUB_SIZE
632 /* Default to faster. */
633 #define OVL_STUB_SIZE 16
634 /* #define OVL_STUB_SIZE 8 */
636 #define BRSL 0x33000000
637 #define BR 0x32000000
638 #define NOP 0x40200000
639 #define LNOP 0x00200000
640 #define ILA 0x42000000
642 /* Return true for all relative and absolute branch instructions.
650 brhnz 00100011 0.. */
653 is_branch (const unsigned char *insn
)
655 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
658 /* Return true for all indirect branch instructions.
666 bihnz 00100101 011 */
669 is_indirect_branch (const unsigned char *insn
)
671 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
674 /* Return true for branch hint instructions.
679 is_hint (const unsigned char *insn
)
681 return (insn
[0] & 0xfc) == 0x10;
684 /* True if INPUT_SECTION might need overlay stubs. */
687 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
689 /* No stubs for debug sections and suchlike. */
690 if ((input_section
->flags
& SEC_ALLOC
) == 0)
693 /* No stubs for link-once sections that will be discarded. */
694 if (input_section
->output_section
== NULL
695 || input_section
->output_section
->owner
!= output_bfd
)
698 /* Don't create stubs for .eh_frame references. */
699 if (strcmp (input_section
->name
, ".eh_frame") == 0)
713 /* Return non-zero if this reloc symbol should go via an overlay stub.
714 Return 2 if the stub must be in non-overlay area. */
716 static enum _stub_type
717 needs_ovl_stub (struct elf_link_hash_entry
*h
,
718 Elf_Internal_Sym
*sym
,
720 asection
*input_section
,
721 Elf_Internal_Rela
*irela
,
723 struct bfd_link_info
*info
)
725 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
726 enum elf_spu_reloc_type r_type
;
727 unsigned int sym_type
;
729 enum _stub_type ret
= no_stub
;
732 || sym_sec
->output_section
== NULL
733 || sym_sec
->output_section
->owner
!= info
->output_bfd
734 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
739 /* Ensure no stubs for user supplied overlay manager syms. */
740 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
743 /* setjmp always goes via an overlay stub, because then the return
744 and hence the longjmp goes via __ovly_return. That magically
745 makes setjmp/longjmp between overlays work. */
746 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
747 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
751 /* Usually, symbols in non-overlay sections don't need stubs. */
752 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
753 && !htab
->non_overlay_stubs
)
759 sym_type
= ELF_ST_TYPE (sym
->st_info
);
761 r_type
= ELF32_R_TYPE (irela
->r_info
);
763 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
767 if (contents
== NULL
)
770 if (!bfd_get_section_contents (input_section
->owner
,
777 contents
+= irela
->r_offset
;
779 if (is_branch (contents
) || is_hint (contents
))
782 if ((contents
[0] & 0xfd) == 0x31
783 && sym_type
!= STT_FUNC
786 /* It's common for people to write assembly and forget
787 to give function symbols the right type. Handle
788 calls to such symbols, but warn so that (hopefully)
789 people will fix their code. We need the symbol
790 type to be correct to distinguish function pointer
791 initialisation from other pointer initialisations. */
792 const char *sym_name
;
795 sym_name
= h
->root
.root
.string
;
798 Elf_Internal_Shdr
*symtab_hdr
;
799 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
800 sym_name
= bfd_elf_sym_name (input_section
->owner
,
805 (*_bfd_error_handler
) (_("warning: call to non-function"
806 " symbol %s defined in %B"),
807 sym_sec
->owner
, sym_name
);
813 if (sym_type
!= STT_FUNC
815 && (sym_sec
->flags
& SEC_CODE
) == 0)
818 /* A reference from some other section to a symbol in an overlay
819 section needs a stub. */
820 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
821 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
824 /* If this insn isn't a branch then we are possibly taking the
825 address of a function and passing it out somehow. */
826 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
830 count_stub (struct spu_link_hash_table
*htab
,
833 enum _stub_type stub_type
,
834 struct elf_link_hash_entry
*h
,
835 const Elf_Internal_Rela
*irela
)
837 unsigned int ovl
= 0;
838 struct got_entry
*g
, **head
;
841 /* If this instruction is a branch or call, we need a stub
842 for it. One stub per function per overlay.
843 If it isn't a branch, then we are taking the address of
844 this function so need a stub in the non-overlay area
845 for it. One stub per function. */
846 if (stub_type
!= nonovl_stub
)
847 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
850 head
= &h
->got
.glist
;
853 if (elf_local_got_ents (ibfd
) == NULL
)
855 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
856 * sizeof (*elf_local_got_ents (ibfd
)));
857 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
858 if (elf_local_got_ents (ibfd
) == NULL
)
861 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
866 addend
= irela
->r_addend
;
870 struct got_entry
*gnext
;
872 for (g
= *head
; g
!= NULL
; g
= g
->next
)
873 if (g
->addend
== addend
&& g
->ovl
== 0)
878 /* Need a new non-overlay area stub. Zap other stubs. */
879 for (g
= *head
; g
!= NULL
; g
= gnext
)
882 if (g
->addend
== addend
)
884 htab
->stub_count
[g
->ovl
] -= 1;
892 for (g
= *head
; g
!= NULL
; g
= g
->next
)
893 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
899 g
= bfd_malloc (sizeof *g
);
904 g
->stub_addr
= (bfd_vma
) -1;
908 htab
->stub_count
[ovl
] += 1;
914 /* Two instruction overlay stubs look like:
917 .word target_ovl_and_address
919 ovl_and_address is a word with the overlay number in the top 14 bits
920 and local store address in the bottom 18 bits.
922 Four instruction overlay stubs look like:
926 ila $79,target_address
930 build_stub (struct spu_link_hash_table
*htab
,
933 enum _stub_type stub_type
,
934 struct elf_link_hash_entry
*h
,
935 const Elf_Internal_Rela
*irela
,
940 struct got_entry
*g
, **head
;
942 bfd_vma addend
, val
, from
, to
;
945 if (stub_type
!= nonovl_stub
)
946 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
949 head
= &h
->got
.glist
;
951 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
955 addend
= irela
->r_addend
;
957 for (g
= *head
; g
!= NULL
; g
= g
->next
)
958 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
963 if (g
->ovl
== 0 && ovl
!= 0)
966 if (g
->stub_addr
!= (bfd_vma
) -1)
969 sec
= htab
->stub_sec
[ovl
];
970 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
971 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
973 to
= (htab
->ovly_load
->root
.u
.def
.value
974 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
975 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
977 if (OVL_STUB_SIZE
== 16)
979 if (((dest
| to
| from
) & 3) != 0
980 || val
+ 0x40000 >= 0x80000)
985 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
987 if (OVL_STUB_SIZE
== 16)
989 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
990 sec
->contents
+ sec
->size
);
991 bfd_put_32 (sec
->owner
, LNOP
,
992 sec
->contents
+ sec
->size
+ 4);
993 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
994 sec
->contents
+ sec
->size
+ 8);
995 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
996 sec
->contents
+ sec
->size
+ 12);
998 else if (OVL_STUB_SIZE
== 8)
1000 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
1001 sec
->contents
+ sec
->size
);
1003 val
= (dest
& 0x3ffff) | (ovl
<< 18);
1004 bfd_put_32 (sec
->owner
, val
,
1005 sec
->contents
+ sec
->size
+ 4);
1009 sec
->size
+= OVL_STUB_SIZE
;
1011 if (htab
->emit_stub_syms
)
1017 len
= 8 + sizeof (".ovl_call.") - 1;
1019 len
+= strlen (h
->root
.root
.string
);
1024 add
= (int) irela
->r_addend
& 0xffffffff;
1027 name
= bfd_malloc (len
);
1031 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1033 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1035 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036 dest_sec
->id
& 0xffffffff,
1037 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1039 sprintf (name
+ len
- 9, "+%x", add
);
1041 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1045 if (h
->root
.type
== bfd_link_hash_new
)
1047 h
->root
.type
= bfd_link_hash_defined
;
1048 h
->root
.u
.def
.section
= sec
;
1049 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
1050 h
->size
= OVL_STUB_SIZE
;
1054 h
->ref_regular_nonweak
= 1;
1055 h
->forced_local
= 1;
1063 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1067 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1069 /* Symbols starting with _SPUEAR_ need a stub because they may be
1070 invoked by the PPU. */
1071 struct bfd_link_info
*info
= inf
;
1072 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1075 if ((h
->root
.type
== bfd_link_hash_defined
1076 || h
->root
.type
== bfd_link_hash_defweak
)
1078 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1079 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1080 && sym_sec
->output_section
!= NULL
1081 && sym_sec
->output_section
->owner
== info
->output_bfd
1082 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1083 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1084 || htab
->non_overlay_stubs
))
1086 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1093 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1095 /* Symbols starting with _SPUEAR_ need a stub because they may be
1096 invoked by the PPU. */
1097 struct bfd_link_info
*info
= inf
;
1098 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1101 if ((h
->root
.type
== bfd_link_hash_defined
1102 || h
->root
.type
== bfd_link_hash_defweak
)
1104 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1105 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1106 && sym_sec
->output_section
!= NULL
1107 && sym_sec
->output_section
->owner
== info
->output_bfd
1108 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1109 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1110 || htab
->non_overlay_stubs
))
1112 return build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1113 h
->root
.u
.def
.value
, sym_sec
);
1119 /* Size or build stubs. */
1122 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1124 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1127 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1129 extern const bfd_target bfd_elf32_spu_vec
;
1130 Elf_Internal_Shdr
*symtab_hdr
;
1132 Elf_Internal_Sym
*local_syms
= NULL
;
1134 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1137 /* We'll need the symbol table in a second. */
1138 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1139 if (symtab_hdr
->sh_info
== 0)
1142 /* Walk over each section attached to the input bfd. */
1143 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1145 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1147 /* If there aren't any relocs, then there's nothing more to do. */
1148 if ((isec
->flags
& SEC_RELOC
) == 0
1149 || isec
->reloc_count
== 0)
1152 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1155 /* Get the relocs. */
1156 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1158 if (internal_relocs
== NULL
)
1159 goto error_ret_free_local
;
1161 /* Now examine each relocation. */
1162 irela
= internal_relocs
;
1163 irelaend
= irela
+ isec
->reloc_count
;
1164 for (; irela
< irelaend
; irela
++)
1166 enum elf_spu_reloc_type r_type
;
1167 unsigned int r_indx
;
1169 Elf_Internal_Sym
*sym
;
1170 struct elf_link_hash_entry
*h
;
1171 enum _stub_type stub_type
;
1173 r_type
= ELF32_R_TYPE (irela
->r_info
);
1174 r_indx
= ELF32_R_SYM (irela
->r_info
);
1176 if (r_type
>= R_SPU_max
)
1178 bfd_set_error (bfd_error_bad_value
);
1179 error_ret_free_internal
:
1180 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1181 free (internal_relocs
);
1182 error_ret_free_local
:
1183 if (local_syms
!= NULL
1184 && (symtab_hdr
->contents
1185 != (unsigned char *) local_syms
))
1190 /* Determine the reloc target section. */
1191 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1192 goto error_ret_free_internal
;
1194 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1196 if (stub_type
== no_stub
)
1198 else if (stub_type
== stub_error
)
1199 goto error_ret_free_internal
;
1201 if (htab
->stub_count
== NULL
)
1204 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1205 htab
->stub_count
= bfd_zmalloc (amt
);
1206 if (htab
->stub_count
== NULL
)
1207 goto error_ret_free_internal
;
1212 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1213 goto error_ret_free_internal
;
1220 dest
= h
->root
.u
.def
.value
;
1222 dest
= sym
->st_value
;
1223 dest
+= irela
->r_addend
;
1224 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1226 goto error_ret_free_internal
;
1230 /* We're done with the internal relocs, free them. */
1231 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1232 free (internal_relocs
);
1235 if (local_syms
!= NULL
1236 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1238 if (!info
->keep_memory
)
1241 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1248 /* Allocate space for overlay call and return stubs. */
1251 spu_elf_size_stubs (struct bfd_link_info
*info
,
1252 void (*place_spu_section
) (asection
*, asection
*,
1254 int non_overlay_stubs
)
1256 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1263 htab
->non_overlay_stubs
= non_overlay_stubs
;
1264 if (!process_stubs (info
, FALSE
))
1267 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1271 if (htab
->stub_count
== NULL
)
1274 ibfd
= info
->input_bfds
;
1275 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1276 htab
->stub_sec
= bfd_zmalloc (amt
);
1277 if (htab
->stub_sec
== NULL
)
1280 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1281 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1282 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1283 htab
->stub_sec
[0] = stub
;
1285 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1287 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1288 (*place_spu_section
) (stub
, NULL
, ".text");
1290 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1292 asection
*osec
= htab
->ovl_sec
[i
];
1293 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1294 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1295 htab
->stub_sec
[ovl
] = stub
;
1297 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1299 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1300 (*place_spu_section
) (stub
, osec
, NULL
);
1303 /* htab->ovtab consists of two arrays.
1313 . } _ovly_buf_table[];
1316 flags
= (SEC_ALLOC
| SEC_LOAD
1317 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1318 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1319 if (htab
->ovtab
== NULL
1320 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1323 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1324 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1326 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1327 if (htab
->toe
== NULL
1328 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1330 htab
->toe
->size
= 16;
1331 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1336 /* Functions to handle embedded spu_ovl.o object. */
1339 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1345 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1351 struct _ovl_stream
*os
;
1355 os
= (struct _ovl_stream
*) stream
;
1356 max
= (const char *) os
->end
- (const char *) os
->start
;
1358 if ((ufile_ptr
) offset
>= max
)
1362 if (count
> max
- offset
)
1363 count
= max
- offset
;
1365 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1370 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1372 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1379 return *ovl_bfd
!= NULL
;
1382 /* Define an STT_OBJECT symbol. */
1384 static struct elf_link_hash_entry
*
1385 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1387 struct elf_link_hash_entry
*h
;
1389 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1393 if (h
->root
.type
!= bfd_link_hash_defined
1396 h
->root
.type
= bfd_link_hash_defined
;
1397 h
->root
.u
.def
.section
= htab
->ovtab
;
1398 h
->type
= STT_OBJECT
;
1401 h
->ref_regular_nonweak
= 1;
1406 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1407 h
->root
.u
.def
.section
->owner
,
1408 h
->root
.root
.string
);
1409 bfd_set_error (bfd_error_bad_value
);
1416 /* Fill in all stubs and the overlay tables. */
1419 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1421 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1422 struct elf_link_hash_entry
*h
;
1428 htab
->emit_stub_syms
= emit_syms
;
1429 if (htab
->stub_count
== NULL
)
1432 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1433 if (htab
->stub_sec
[i
]->size
!= 0)
1435 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1436 htab
->stub_sec
[i
]->size
);
1437 if (htab
->stub_sec
[i
]->contents
== NULL
)
1439 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1440 htab
->stub_sec
[i
]->size
= 0;
1443 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1444 htab
->ovly_load
= h
;
1445 BFD_ASSERT (h
!= NULL
1446 && (h
->root
.type
== bfd_link_hash_defined
1447 || h
->root
.type
== bfd_link_hash_defweak
)
1450 s
= h
->root
.u
.def
.section
->output_section
;
1451 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1453 (*_bfd_error_handler
) (_("%s in overlay section"),
1454 h
->root
.root
.string
);
1455 bfd_set_error (bfd_error_bad_value
);
1459 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1460 htab
->ovly_return
= h
;
1462 /* Fill in all the stubs. */
1463 process_stubs (info
, TRUE
);
1464 if (!htab
->stub_err
)
1465 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1469 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1470 bfd_set_error (bfd_error_bad_value
);
1474 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1476 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1478 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1479 bfd_set_error (bfd_error_bad_value
);
1482 htab
->stub_sec
[i
]->rawsize
= 0;
1485 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1486 if (htab
->ovtab
->contents
== NULL
)
1489 /* Write out _ovly_table. */
1490 p
= htab
->ovtab
->contents
;
1491 /* set low bit of .size to mark non-overlay area as present. */
1493 obfd
= htab
->ovtab
->output_section
->owner
;
1494 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1496 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1500 unsigned long off
= ovl_index
* 16;
1501 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1503 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1504 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1505 /* file_off written later in spu_elf_modify_program_headers. */
1506 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1510 h
= define_ovtab_symbol (htab
, "_ovly_table");
1513 h
->root
.u
.def
.value
= 16;
1514 h
->size
= htab
->num_overlays
* 16;
1516 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1519 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1522 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1525 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1526 h
->size
= htab
->num_buf
* 4;
1528 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1531 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1534 h
= define_ovtab_symbol (htab
, "_EAR_");
1537 h
->root
.u
.def
.section
= htab
->toe
;
1538 h
->root
.u
.def
.value
= 0;
1544 /* Check that all loadable section VMAs lie in the range
1545 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1548 spu_elf_check_vma (struct bfd_link_info
*info
,
1552 unsigned int overlay_fixed
,
1553 unsigned int reserved
,
1554 int extra_stack_space
,
1555 void (*spu_elf_load_ovl_mgr
) (void),
1556 FILE *(*spu_elf_open_overlay_script
) (void),
1557 void (*spu_elf_relink
) (void))
1559 struct elf_segment_map
*m
;
1561 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1562 bfd
*abfd
= info
->output_bfd
;
1564 if (auto_overlay
& AUTO_OVERLAY
)
1565 htab
->auto_overlay
= auto_overlay
;
1566 htab
->local_store
= hi
+ 1 - lo
;
1567 htab
->overlay_fixed
= overlay_fixed
;
1568 htab
->reserved
= reserved
;
1569 htab
->extra_stack_space
= extra_stack_space
;
1570 htab
->spu_elf_load_ovl_mgr
= spu_elf_load_ovl_mgr
;
1571 htab
->spu_elf_open_overlay_script
= spu_elf_open_overlay_script
;
1572 htab
->spu_elf_relink
= spu_elf_relink
;
1574 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1575 if (m
->p_type
== PT_LOAD
)
1576 for (i
= 0; i
< m
->count
; i
++)
1577 if (m
->sections
[i
]->size
!= 0
1578 && (m
->sections
[i
]->vma
< lo
1579 || m
->sections
[i
]->vma
> hi
1580 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1581 return m
->sections
[i
];
1583 /* No need for overlays if it all fits. */
1584 htab
->auto_overlay
= 0;
1588 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1589 Search for stack adjusting insns, and return the sp delta. */
1592 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1596 memset (reg
, 0, sizeof (reg
));
1597 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
1599 unsigned char buf
[4];
1603 /* Assume no relocs on stack adjusing insns. */
1604 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1607 if (buf
[0] == 0x24 /* stqd */)
1611 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1612 /* Partly decoded immediate field. */
1613 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1615 if (buf
[0] == 0x1c /* ai */)
1618 imm
= (imm
^ 0x200) - 0x200;
1619 reg
[rt
] = reg
[ra
] + imm
;
1621 if (rt
== 1 /* sp */)
1628 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1630 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1632 reg
[rt
] = reg
[ra
] + reg
[rb
];
1640 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1642 if (buf
[0] >= 0x42 /* ila */)
1643 imm
|= (buf
[0] & 1) << 17;
1648 if (buf
[0] == 0x40 /* il */)
1650 if ((buf
[1] & 0x80) == 0)
1652 imm
= (imm
^ 0x8000) - 0x8000;
1654 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1660 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1662 reg
[rt
] |= imm
& 0xffff;
1665 else if (buf
[0] == 0x04 /* ori */)
1668 imm
= (imm
^ 0x200) - 0x200;
1669 reg
[rt
] = reg
[ra
] | imm
;
1672 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
1674 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
1675 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
1676 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
1677 | ((imm
& 0x1000) ? 0x000000ff : 0));
1680 else if (buf
[0] == 0x16 /* andbi */)
1686 reg
[rt
] = reg
[ra
] & imm
;
1689 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1691 /* Used in pic reg load. Say rt is trashed. Won't be used
1692 in stack adjust, but we need to continue past this branch. */
1696 else if (is_branch (buf
) || is_indirect_branch (buf
))
1697 /* If we hit a branch then we must be out of the prologue. */
1704 /* qsort predicate to sort symbols by section and value. */
1706 static Elf_Internal_Sym
*sort_syms_syms
;
1707 static asection
**sort_syms_psecs
;
1710 sort_syms (const void *a
, const void *b
)
1712 Elf_Internal_Sym
*const *s1
= a
;
1713 Elf_Internal_Sym
*const *s2
= b
;
1714 asection
*sec1
,*sec2
;
1715 bfd_signed_vma delta
;
1717 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1718 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1721 return sec1
->index
- sec2
->index
;
1723 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1725 return delta
< 0 ? -1 : 1;
1727 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1729 return delta
< 0 ? -1 : 1;
1731 return *s1
< *s2
? -1 : 1;
1736 struct function_info
*fun
;
1737 struct call_info
*next
;
1739 unsigned int max_depth
;
1740 unsigned int is_tail
: 1;
1741 unsigned int is_pasted
: 1;
1744 struct function_info
1746 /* List of functions called. Also branches to hot/cold part of
1748 struct call_info
*call_list
;
1749 /* For hot/cold part of function, point to owner. */
1750 struct function_info
*start
;
1751 /* Symbol at start of function. */
1753 Elf_Internal_Sym
*sym
;
1754 struct elf_link_hash_entry
*h
;
1756 /* Function section. */
1759 /* Where last called from, and number of sections called from. */
1760 asection
*last_caller
;
1761 unsigned int call_count
;
1762 /* Address range of (this part of) function. */
1766 /* Distance from root of call tree. Tail and hot/cold branches
1767 count as one deeper. We aren't counting stack frames here. */
1769 /* Set if global symbol. */
1770 unsigned int global
: 1;
1771 /* Set if known to be start of function (as distinct from a hunk
1772 in hot/cold section. */
1773 unsigned int is_func
: 1;
1774 /* Set if not a root node. */
1775 unsigned int non_root
: 1;
1776 /* Flags used during call tree traversal. It's cheaper to replicate
1777 the visit flags than have one which needs clearing after a traversal. */
1778 unsigned int visit1
: 1;
1779 unsigned int visit2
: 1;
1780 unsigned int marking
: 1;
1781 unsigned int visit3
: 1;
1782 unsigned int visit4
: 1;
1783 unsigned int visit5
: 1;
1784 unsigned int visit6
: 1;
1785 unsigned int visit7
: 1;
1788 struct spu_elf_stack_info
1792 /* Variable size array describing functions, one per contiguous
1793 address range belonging to a function. */
1794 struct function_info fun
[1];
1797 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1798 entries for section SEC. */
1800 static struct spu_elf_stack_info
*
1801 alloc_stack_info (asection
*sec
, int max_fun
)
1803 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1806 amt
= sizeof (struct spu_elf_stack_info
);
1807 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1808 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1809 if (sec_data
->u
.i
.stack_info
!= NULL
)
1810 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1811 return sec_data
->u
.i
.stack_info
;
1814 /* Add a new struct function_info describing a (part of a) function
1815 starting at SYM_H. Keep the array sorted by address. */
1817 static struct function_info
*
1818 maybe_insert_function (asection
*sec
,
1821 bfd_boolean is_func
)
1823 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1824 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1830 sinfo
= alloc_stack_info (sec
, 20);
1837 Elf_Internal_Sym
*sym
= sym_h
;
1838 off
= sym
->st_value
;
1839 size
= sym
->st_size
;
1843 struct elf_link_hash_entry
*h
= sym_h
;
1844 off
= h
->root
.u
.def
.value
;
1848 for (i
= sinfo
->num_fun
; --i
>= 0; )
1849 if (sinfo
->fun
[i
].lo
<= off
)
1854 /* Don't add another entry for an alias, but do update some
1856 if (sinfo
->fun
[i
].lo
== off
)
1858 /* Prefer globals over local syms. */
1859 if (global
&& !sinfo
->fun
[i
].global
)
1861 sinfo
->fun
[i
].global
= TRUE
;
1862 sinfo
->fun
[i
].u
.h
= sym_h
;
1865 sinfo
->fun
[i
].is_func
= TRUE
;
1866 return &sinfo
->fun
[i
];
1868 /* Ignore a zero-size symbol inside an existing function. */
1869 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1870 return &sinfo
->fun
[i
];
1873 if (sinfo
->num_fun
>= sinfo
->max_fun
)
1875 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1876 bfd_size_type old
= amt
;
1878 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1879 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1880 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1881 sinfo
= bfd_realloc (sinfo
, amt
);
1884 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1885 sec_data
->u
.i
.stack_info
= sinfo
;
1888 if (++i
< sinfo
->num_fun
)
1889 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1890 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1891 sinfo
->fun
[i
].is_func
= is_func
;
1892 sinfo
->fun
[i
].global
= global
;
1893 sinfo
->fun
[i
].sec
= sec
;
1895 sinfo
->fun
[i
].u
.h
= sym_h
;
1897 sinfo
->fun
[i
].u
.sym
= sym_h
;
1898 sinfo
->fun
[i
].lo
= off
;
1899 sinfo
->fun
[i
].hi
= off
+ size
;
1900 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1901 sinfo
->num_fun
+= 1;
1902 return &sinfo
->fun
[i
];
1905 /* Return the name of FUN. */
1908 func_name (struct function_info
*fun
)
1912 Elf_Internal_Shdr
*symtab_hdr
;
1914 while (fun
->start
!= NULL
)
1918 return fun
->u
.h
->root
.root
.string
;
1921 if (fun
->u
.sym
->st_name
== 0)
1923 size_t len
= strlen (sec
->name
);
1924 char *name
= bfd_malloc (len
+ 10);
1927 sprintf (name
, "%s+%lx", sec
->name
,
1928 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1932 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1933 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1936 /* Read the instruction at OFF in SEC. Return true iff the instruction
1937 is a nop, lnop, or stop 0 (all zero insn). */
1940 is_nop (asection
*sec
, bfd_vma off
)
1942 unsigned char insn
[4];
1944 if (off
+ 4 > sec
->size
1945 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1947 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1949 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1954 /* Extend the range of FUN to cover nop padding up to LIMIT.
1955 Return TRUE iff some instruction other than a NOP was found. */
1958 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1960 bfd_vma off
= (fun
->hi
+ 3) & -4;
1962 while (off
< limit
&& is_nop (fun
->sec
, off
))
1973 /* Check and fix overlapping function ranges. Return TRUE iff there
1974 are gaps in the current info we have about functions in SEC. */
1977 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1979 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1980 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1982 bfd_boolean gaps
= FALSE
;
1987 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1988 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1990 /* Fix overlapping symbols. */
1991 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1992 const char *f2
= func_name (&sinfo
->fun
[i
]);
1994 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1995 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1997 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2000 if (sinfo
->num_fun
== 0)
2004 if (sinfo
->fun
[0].lo
!= 0)
2006 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2008 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2010 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2011 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2013 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2019 /* Search current function info for a function that contains address
2020 OFFSET in section SEC. */
2022 static struct function_info
*
2023 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2025 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2026 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2030 hi
= sinfo
->num_fun
;
2033 mid
= (lo
+ hi
) / 2;
2034 if (offset
< sinfo
->fun
[mid
].lo
)
2036 else if (offset
>= sinfo
->fun
[mid
].hi
)
2039 return &sinfo
->fun
[mid
];
2041 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2046 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2047 if CALLEE was new. If this function return FALSE, CALLEE should
2051 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2053 struct call_info
**pp
, *p
;
2055 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2056 if (p
->fun
== callee
->fun
)
2058 /* Tail calls use less stack than normal calls. Retain entry
2059 for normal call over one for tail call. */
2060 p
->is_tail
&= callee
->is_tail
;
2063 p
->fun
->start
= NULL
;
2064 p
->fun
->is_func
= TRUE
;
2067 /* Reorder list so most recent call is first. */
2069 p
->next
= caller
->call_list
;
2070 caller
->call_list
= p
;
2073 callee
->next
= caller
->call_list
;
2075 caller
->call_list
= callee
;
2079 /* Copy CALL and insert the copy into CALLER. */
2082 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2084 struct call_info
*callee
;
2085 callee
= bfd_malloc (sizeof (*callee
));
2089 if (!insert_callee (caller
, callee
))
2094 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2095 overlay stub sections. */
2098 interesting_section (asection
*s
, bfd
*obfd
)
2100 return (s
->output_section
!= NULL
2101 && s
->output_section
->owner
== obfd
2102 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2103 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2107 /* Rummage through the relocs for SEC, looking for function calls.
2108 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2109 mark destination symbols on calls as being functions. Also
2110 look at branches, which may be tail calls or go to hot/cold
2111 section part of same function. */
2114 mark_functions_via_relocs (asection
*sec
,
2115 struct bfd_link_info
*info
,
2118 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2119 Elf_Internal_Shdr
*symtab_hdr
;
2121 static bfd_boolean warned
;
2123 if (!interesting_section (sec
, info
->output_bfd
)
2124 || sec
->reloc_count
== 0)
2127 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2129 if (internal_relocs
== NULL
)
2132 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2133 psyms
= &symtab_hdr
->contents
;
2134 irela
= internal_relocs
;
2135 irelaend
= irela
+ sec
->reloc_count
;
2136 for (; irela
< irelaend
; irela
++)
2138 enum elf_spu_reloc_type r_type
;
2139 unsigned int r_indx
;
2141 Elf_Internal_Sym
*sym
;
2142 struct elf_link_hash_entry
*h
;
2144 bfd_boolean reject
, is_call
;
2145 struct function_info
*caller
;
2146 struct call_info
*callee
;
2149 r_type
= ELF32_R_TYPE (irela
->r_info
);
2150 if (r_type
!= R_SPU_REL16
2151 && r_type
!= R_SPU_ADDR16
)
2154 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
))
2158 r_indx
= ELF32_R_SYM (irela
->r_info
);
2159 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2163 || sym_sec
->output_section
== NULL
2164 || sym_sec
->output_section
->owner
!= info
->output_bfd
)
2170 unsigned char insn
[4];
2172 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2173 irela
->r_offset
, 4))
2175 if (is_branch (insn
))
2177 is_call
= (insn
[0] & 0xfd) == 0x31;
2178 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2179 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2182 info
->callbacks
->einfo
2183 (_("%B(%A+0x%v): call to non-code section"
2184 " %B(%A), analysis incomplete\n"),
2185 sec
->owner
, sec
, irela
->r_offset
,
2186 sym_sec
->owner
, sym_sec
);
2194 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
)
2202 /* For --auto-overlay, count possible stubs we need for
2203 function pointer references. */
2204 unsigned int sym_type
;
2208 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2209 if (sym_type
== STT_FUNC
)
2210 spu_hash_table (info
)->non_ovly_stub
+= 1;
2215 val
= h
->root
.u
.def
.value
;
2217 val
= sym
->st_value
;
2218 val
+= irela
->r_addend
;
2222 struct function_info
*fun
;
2224 if (irela
->r_addend
!= 0)
2226 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2229 fake
->st_value
= val
;
2231 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2235 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2237 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2240 if (irela
->r_addend
!= 0
2241 && fun
->u
.sym
!= sym
)
2246 caller
= find_function (sec
, irela
->r_offset
, info
);
2249 callee
= bfd_malloc (sizeof *callee
);
2253 callee
->fun
= find_function (sym_sec
, val
, info
);
2254 if (callee
->fun
== NULL
)
2256 callee
->is_tail
= !is_call
;
2257 callee
->is_pasted
= FALSE
;
2259 if (callee
->fun
->last_caller
!= sec
)
2261 callee
->fun
->last_caller
= sec
;
2262 callee
->fun
->call_count
+= 1;
2264 if (!insert_callee (caller
, callee
))
2267 && !callee
->fun
->is_func
2268 && callee
->fun
->stack
== 0)
2270 /* This is either a tail call or a branch from one part of
2271 the function to another, ie. hot/cold section. If the
2272 destination has been called by some other function then
2273 it is a separate function. We also assume that functions
2274 are not split across input files. */
2275 if (sec
->owner
!= sym_sec
->owner
)
2277 callee
->fun
->start
= NULL
;
2278 callee
->fun
->is_func
= TRUE
;
2280 else if (callee
->fun
->start
== NULL
)
2281 callee
->fun
->start
= caller
;
2284 struct function_info
*callee_start
;
2285 struct function_info
*caller_start
;
2286 callee_start
= callee
->fun
;
2287 while (callee_start
->start
)
2288 callee_start
= callee_start
->start
;
2289 caller_start
= caller
;
2290 while (caller_start
->start
)
2291 caller_start
= caller_start
->start
;
2292 if (caller_start
!= callee_start
)
2294 callee
->fun
->start
= NULL
;
2295 callee
->fun
->is_func
= TRUE
;
2304 /* Handle something like .init or .fini, which has a piece of a function.
2305 These sections are pasted together to form a single function. */
2308 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2310 struct bfd_link_order
*l
;
2311 struct _spu_elf_section_data
*sec_data
;
2312 struct spu_elf_stack_info
*sinfo
;
2313 Elf_Internal_Sym
*fake
;
2314 struct function_info
*fun
, *fun_start
;
2316 fake
= bfd_zmalloc (sizeof (*fake
));
2320 fake
->st_size
= sec
->size
;
2322 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2323 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2327 /* Find a function immediately preceding this section. */
2329 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2331 if (l
->u
.indirect
.section
== sec
)
2333 if (fun_start
!= NULL
)
2335 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2339 fun
->start
= fun_start
;
2341 callee
->is_tail
= TRUE
;
2342 callee
->is_pasted
= TRUE
;
2344 if (!insert_callee (fun_start
, callee
))
2350 if (l
->type
== bfd_indirect_link_order
2351 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2352 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2353 && sinfo
->num_fun
!= 0)
2354 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2357 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2361 /* Map address ranges in code sections to functions. */
2364 discover_functions (struct bfd_link_info
*info
)
2368 Elf_Internal_Sym
***psym_arr
;
2369 asection
***sec_arr
;
2370 bfd_boolean gaps
= FALSE
;
2373 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2376 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2377 if (psym_arr
== NULL
)
2379 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2380 if (sec_arr
== NULL
)
2384 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2386 ibfd
= ibfd
->link_next
, bfd_idx
++)
2388 extern const bfd_target bfd_elf32_spu_vec
;
2389 Elf_Internal_Shdr
*symtab_hdr
;
2392 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2393 asection
**psecs
, **p
;
2395 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2398 /* Read all the symbols. */
2399 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2400 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2404 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2405 if (interesting_section (sec
, info
->output_bfd
))
2413 if (symtab_hdr
->contents
!= NULL
)
2415 /* Don't use cached symbols since the generic ELF linker
2416 code only reads local symbols, and we need globals too. */
2417 free (symtab_hdr
->contents
);
2418 symtab_hdr
->contents
= NULL
;
2420 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2422 symtab_hdr
->contents
= (void *) syms
;
2426 /* Select defined function symbols that are going to be output. */
2427 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2430 psym_arr
[bfd_idx
] = psyms
;
2431 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2434 sec_arr
[bfd_idx
] = psecs
;
2435 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2436 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2437 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2441 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2442 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2445 symcount
= psy
- psyms
;
2448 /* Sort them by section and offset within section. */
2449 sort_syms_syms
= syms
;
2450 sort_syms_psecs
= psecs
;
2451 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2453 /* Now inspect the function symbols. */
2454 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2456 asection
*s
= psecs
[*psy
- syms
];
2457 Elf_Internal_Sym
**psy2
;
2459 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2460 if (psecs
[*psy2
- syms
] != s
)
2463 if (!alloc_stack_info (s
, psy2
- psy
))
2468 /* First install info about properly typed and sized functions.
2469 In an ideal world this will cover all code sections, except
2470 when partitioning functions into hot and cold sections,
2471 and the horrible pasted together .init and .fini functions. */
2472 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2475 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2477 asection
*s
= psecs
[sy
- syms
];
2478 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2483 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2484 if (interesting_section (sec
, info
->output_bfd
))
2485 gaps
|= check_function_ranges (sec
, info
);
2490 /* See if we can discover more function symbols by looking at
2492 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2494 ibfd
= ibfd
->link_next
, bfd_idx
++)
2498 if (psym_arr
[bfd_idx
] == NULL
)
2501 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2502 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2506 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2508 ibfd
= ibfd
->link_next
, bfd_idx
++)
2510 Elf_Internal_Shdr
*symtab_hdr
;
2512 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2515 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2518 psecs
= sec_arr
[bfd_idx
];
2520 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2521 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2524 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2525 if (interesting_section (sec
, info
->output_bfd
))
2526 gaps
|= check_function_ranges (sec
, info
);
2530 /* Finally, install all globals. */
2531 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2535 s
= psecs
[sy
- syms
];
2537 /* Global syms might be improperly typed functions. */
2538 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2539 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2541 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2547 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2549 extern const bfd_target bfd_elf32_spu_vec
;
2552 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2555 /* Some of the symbols we've installed as marking the
2556 beginning of functions may have a size of zero. Extend
2557 the range of such functions to the beginning of the
2558 next symbol of interest. */
2559 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2560 if (interesting_section (sec
, info
->output_bfd
))
2562 struct _spu_elf_section_data
*sec_data
;
2563 struct spu_elf_stack_info
*sinfo
;
2565 sec_data
= spu_elf_section_data (sec
);
2566 sinfo
= sec_data
->u
.i
.stack_info
;
2570 bfd_vma hi
= sec
->size
;
2572 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2574 sinfo
->fun
[fun_idx
].hi
= hi
;
2575 hi
= sinfo
->fun
[fun_idx
].lo
;
2578 /* No symbols in this section. Must be .init or .fini
2579 or something similar. */
2580 else if (!pasted_function (sec
, info
))
2586 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2588 ibfd
= ibfd
->link_next
, bfd_idx
++)
2590 if (psym_arr
[bfd_idx
] == NULL
)
2593 free (psym_arr
[bfd_idx
]);
2594 free (sec_arr
[bfd_idx
]);
2603 /* Iterate over all function_info we have collected, calling DOIT on
2604 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2608 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2609 struct bfd_link_info
*,
2611 struct bfd_link_info
*info
,
2617 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2619 extern const bfd_target bfd_elf32_spu_vec
;
2622 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2625 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2627 struct _spu_elf_section_data
*sec_data
;
2628 struct spu_elf_stack_info
*sinfo
;
2630 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2631 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2634 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2635 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2636 if (!doit (&sinfo
->fun
[i
], info
, param
))
2644 /* Transfer call info attached to struct function_info entries for
2645 all of a given function's sections to the first entry. */
2648 transfer_calls (struct function_info
*fun
,
2649 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2650 void *param ATTRIBUTE_UNUSED
)
2652 struct function_info
*start
= fun
->start
;
2656 struct call_info
*call
, *call_next
;
2658 while (start
->start
!= NULL
)
2659 start
= start
->start
;
2660 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2662 call_next
= call
->next
;
2663 if (!insert_callee (start
, call
))
2666 fun
->call_list
= NULL
;
2671 /* Mark nodes in the call graph that are called by some other node. */
2674 mark_non_root (struct function_info
*fun
,
2675 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2676 void *param ATTRIBUTE_UNUSED
)
2678 struct call_info
*call
;
2683 for (call
= fun
->call_list
; call
; call
= call
->next
)
2685 call
->fun
->non_root
= TRUE
;
2686 mark_non_root (call
->fun
, 0, 0);
2691 /* Remove cycles from the call graph. Set depth of nodes. */
2694 remove_cycles (struct function_info
*fun
,
2695 struct bfd_link_info
*info
,
2698 struct call_info
**callp
, *call
;
2699 unsigned int depth
= *(unsigned int *) param
;
2700 unsigned int max_depth
= depth
;
2704 fun
->marking
= TRUE
;
2706 callp
= &fun
->call_list
;
2707 while ((call
= *callp
) != NULL
)
2709 if (!call
->fun
->visit2
)
2711 call
->max_depth
= depth
+ !call
->is_pasted
;
2712 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2714 if (max_depth
< call
->max_depth
)
2715 max_depth
= call
->max_depth
;
2717 else if (call
->fun
->marking
)
2719 if (!spu_hash_table (info
)->auto_overlay
)
2721 const char *f1
= func_name (fun
);
2722 const char *f2
= func_name (call
->fun
);
2724 info
->callbacks
->info (_("Stack analysis will ignore the call "
2728 *callp
= call
->next
;
2732 callp
= &call
->next
;
2734 fun
->marking
= FALSE
;
2735 *(unsigned int *) param
= max_depth
;
2739 /* Check that we actually visited all nodes in remove_cycles. If we
2740 didn't, then there is some cycle in the call graph not attached to
2741 any root node. Arbitrarily choose a node in the cycle as a new
2742 root and break the cycle. */
2745 mark_detached_root (struct function_info
*fun
,
2746 struct bfd_link_info
*info
,
2751 fun
->non_root
= FALSE
;
2752 *(unsigned int *) param
= 0;
2753 return remove_cycles (fun
, info
, param
);
2756 /* Populate call_list for each function. */
2759 build_call_tree (struct bfd_link_info
*info
)
2764 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2766 extern const bfd_target bfd_elf32_spu_vec
;
2769 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2772 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2773 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2777 /* Transfer call info from hot/cold section part of function
2779 if (!spu_hash_table (info
)->auto_overlay
2780 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2783 /* Find the call graph root(s). */
2784 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2787 /* Remove cycles from the call graph. We start from the root node(s)
2788 so that we break cycles in a reasonable place. */
2790 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
2793 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
2796 /* qsort predicate to sort calls by max_depth then count. */
2799 sort_calls (const void *a
, const void *b
)
2801 struct call_info
*const *c1
= a
;
2802 struct call_info
*const *c2
= b
;
2805 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2809 delta
= (*c2
)->count
- (*c1
)->count
;
2813 return (char *) c1
- (char *) c2
;
2817 unsigned int max_overlay_size
;
2820 /* Set linker_mark and gc_mark on any sections that we will put in
2821 overlays. These flags are used by the generic ELF linker, but we
2822 won't be continuing on to bfd_elf_final_link so it is OK to use
2823 them. linker_mark is clear before we get here. Set segment_mark
2824 on sections that are part of a pasted function (excluding the last
2827 Set up function rodata section if --overlay-rodata. We don't
2828 currently include merged string constant rodata sections since
2830 Sort the call graph so that the deepest nodes will be visited
2834 mark_overlay_section (struct function_info
*fun
,
2835 struct bfd_link_info
*info
,
2838 struct call_info
*call
;
2840 struct _mos_param
*mos_param
= param
;
2846 if (!fun
->sec
->linker_mark
)
2850 fun
->sec
->linker_mark
= 1;
2851 fun
->sec
->gc_mark
= 1;
2852 fun
->sec
->segment_mark
= 0;
2853 /* Ensure SEC_CODE is set on this text section (it ought to
2854 be!), and SEC_CODE is clear on rodata sections. We use
2855 this flag to differentiate the two overlay section types. */
2856 fun
->sec
->flags
|= SEC_CODE
;
2858 if (spu_hash_table (info
)->auto_overlay
& OVERLAY_RODATA
)
2862 /* Find the rodata section corresponding to this function's
2864 if (strcmp (fun
->sec
->name
, ".text") == 0)
2866 name
= bfd_malloc (sizeof (".rodata"));
2869 memcpy (name
, ".rodata", sizeof (".rodata"));
2871 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2873 size_t len
= strlen (fun
->sec
->name
);
2874 name
= bfd_malloc (len
+ 3);
2877 memcpy (name
, ".rodata", sizeof (".rodata"));
2878 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2880 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2882 size_t len
= strlen (fun
->sec
->name
) + 1;
2883 name
= bfd_malloc (len
);
2886 memcpy (name
, fun
->sec
->name
, len
);
2892 asection
*rodata
= NULL
;
2893 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2894 if (group_sec
== NULL
)
2895 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2897 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2899 if (strcmp (group_sec
->name
, name
) == 0)
2904 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2906 fun
->rodata
= rodata
;
2909 fun
->rodata
->linker_mark
= 1;
2910 fun
->rodata
->gc_mark
= 1;
2911 fun
->rodata
->flags
&= ~SEC_CODE
;
2916 size
= fun
->sec
->size
;
2918 size
+= fun
->rodata
->size
;
2919 if (mos_param
->max_overlay_size
< size
)
2920 mos_param
->max_overlay_size
= size
;
2923 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2928 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2932 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2933 calls
[count
++] = call
;
2935 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2937 fun
->call_list
= NULL
;
2941 calls
[count
]->next
= fun
->call_list
;
2942 fun
->call_list
= calls
[count
];
2947 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2949 if (call
->is_pasted
)
2951 /* There can only be one is_pasted call per function_info. */
2952 BFD_ASSERT (!fun
->sec
->segment_mark
);
2953 fun
->sec
->segment_mark
= 1;
2955 if (!mark_overlay_section (call
->fun
, info
, param
))
2959 /* Don't put entry code into an overlay. The overlay manager needs
2961 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2962 == info
->output_bfd
->start_address
)
2964 fun
->sec
->linker_mark
= 0;
2965 if (fun
->rodata
!= NULL
)
2966 fun
->rodata
->linker_mark
= 0;
2971 /* If non-zero then unmark functions called from those within sections
2972 that we need to unmark. Unfortunately this isn't reliable since the
2973 call graph cannot know the destination of function pointer calls. */
2974 #define RECURSE_UNMARK 0
2977 asection
*exclude_input_section
;
2978 asection
*exclude_output_section
;
2979 unsigned long clearing
;
2982 /* Undo some of mark_overlay_section's work. */
2985 unmark_overlay_section (struct function_info
*fun
,
2986 struct bfd_link_info
*info
,
2989 struct call_info
*call
;
2990 struct _uos_param
*uos_param
= param
;
2991 unsigned int excluded
= 0;
2999 if (fun
->sec
== uos_param
->exclude_input_section
3000 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3004 uos_param
->clearing
+= excluded
;
3006 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3008 fun
->sec
->linker_mark
= 0;
3010 fun
->rodata
->linker_mark
= 0;
3013 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3014 if (!unmark_overlay_section (call
->fun
, info
, param
))
3018 uos_param
->clearing
-= excluded
;
3023 unsigned int lib_size
;
3024 asection
**lib_sections
;
3027 /* Add sections we have marked as belonging to overlays to an array
3028 for consideration as non-overlay sections. The array consist of
3029 pairs of sections, (text,rodata), for functions in the call graph. */
3032 collect_lib_sections (struct function_info
*fun
,
3033 struct bfd_link_info
*info
,
3036 struct _cl_param
*lib_param
= param
;
3037 struct call_info
*call
;
3044 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3047 size
= fun
->sec
->size
;
3049 size
+= fun
->rodata
->size
;
3050 if (size
> lib_param
->lib_size
)
3053 *lib_param
->lib_sections
++ = fun
->sec
;
3054 fun
->sec
->gc_mark
= 0;
3055 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3057 *lib_param
->lib_sections
++ = fun
->rodata
;
3058 fun
->rodata
->gc_mark
= 0;
3061 *lib_param
->lib_sections
++ = NULL
;
3063 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3064 collect_lib_sections (call
->fun
, info
, param
);
3069 /* qsort predicate to sort sections by call count. */
3072 sort_lib (const void *a
, const void *b
)
3074 asection
*const *s1
= a
;
3075 asection
*const *s2
= b
;
3076 struct _spu_elf_section_data
*sec_data
;
3077 struct spu_elf_stack_info
*sinfo
;
3081 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3082 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3085 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3086 delta
-= sinfo
->fun
[i
].call_count
;
3089 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3090 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3093 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3094 delta
+= sinfo
->fun
[i
].call_count
;
3103 /* Remove some sections from those marked to be in overlays. Choose
3104 those that are called from many places, likely library functions. */
3107 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3110 asection
**lib_sections
;
3111 unsigned int i
, lib_count
;
3112 struct _cl_param collect_lib_param
;
3113 struct function_info dummy_caller
;
3115 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3117 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3119 extern const bfd_target bfd_elf32_spu_vec
;
3122 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3125 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3126 if (sec
->linker_mark
3127 && sec
->size
< lib_size
3128 && (sec
->flags
& SEC_CODE
) != 0)
3131 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3132 if (lib_sections
== NULL
)
3133 return (unsigned int) -1;
3134 collect_lib_param
.lib_size
= lib_size
;
3135 collect_lib_param
.lib_sections
= lib_sections
;
3136 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3138 return (unsigned int) -1;
3139 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3141 /* Sort sections so that those with the most calls are first. */
3143 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3145 for (i
= 0; i
< lib_count
; i
++)
3147 unsigned int tmp
, stub_size
;
3149 struct _spu_elf_section_data
*sec_data
;
3150 struct spu_elf_stack_info
*sinfo
;
3152 sec
= lib_sections
[2 * i
];
3153 /* If this section is OK, its size must be less than lib_size. */
3155 /* If it has a rodata section, then add that too. */
3156 if (lib_sections
[2 * i
+ 1])
3157 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3158 /* Add any new overlay call stubs needed by the section. */
3161 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3162 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3165 struct call_info
*call
;
3167 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3168 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3169 if (call
->fun
->sec
->linker_mark
)
3171 struct call_info
*p
;
3172 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3173 if (p
->fun
== call
->fun
)
3176 stub_size
+= OVL_STUB_SIZE
;
3179 if (tmp
+ stub_size
< lib_size
)
3181 struct call_info
**pp
, *p
;
3183 /* This section fits. Mark it as non-overlay. */
3184 lib_sections
[2 * i
]->linker_mark
= 0;
3185 if (lib_sections
[2 * i
+ 1])
3186 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3187 lib_size
-= tmp
+ stub_size
;
3188 /* Call stubs to the section we just added are no longer
3190 pp
= &dummy_caller
.call_list
;
3191 while ((p
= *pp
) != NULL
)
3192 if (!p
->fun
->sec
->linker_mark
)
3194 lib_size
+= OVL_STUB_SIZE
;
3200 /* Add new call stubs to dummy_caller. */
3201 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3202 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3205 struct call_info
*call
;
3207 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3208 for (call
= sinfo
->fun
[k
].call_list
;
3211 if (call
->fun
->sec
->linker_mark
)
3213 struct call_info
*callee
;
3214 callee
= bfd_malloc (sizeof (*callee
));
3216 return (unsigned int) -1;
3218 if (!insert_callee (&dummy_caller
, callee
))
3224 while (dummy_caller
.call_list
!= NULL
)
3226 struct call_info
*call
= dummy_caller
.call_list
;
3227 dummy_caller
.call_list
= call
->next
;
3230 for (i
= 0; i
< 2 * lib_count
; i
++)
3231 if (lib_sections
[i
])
3232 lib_sections
[i
]->gc_mark
= 1;
3233 free (lib_sections
);
3237 /* Build an array of overlay sections. The deepest node's section is
3238 added first, then its parent node's section, then everything called
3239 from the parent section. The idea being to group sections to
3240 minimise calls between different overlays. */
3243 collect_overlays (struct function_info
*fun
,
3244 struct bfd_link_info
*info
,
3247 struct call_info
*call
;
3248 bfd_boolean added_fun
;
3249 asection
***ovly_sections
= param
;
3255 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3256 if (!call
->is_pasted
)
3258 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3264 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3266 fun
->sec
->gc_mark
= 0;
3267 *(*ovly_sections
)++ = fun
->sec
;
3268 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3270 fun
->rodata
->gc_mark
= 0;
3271 *(*ovly_sections
)++ = fun
->rodata
;
3274 *(*ovly_sections
)++ = NULL
;
3277 /* Pasted sections must stay with the first section. We don't
3278 put pasted sections in the array, just the first section.
3279 Mark subsequent sections as already considered. */
3280 if (fun
->sec
->segment_mark
)
3282 struct function_info
*call_fun
= fun
;
3285 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3286 if (call
->is_pasted
)
3288 call_fun
= call
->fun
;
3289 call_fun
->sec
->gc_mark
= 0;
3290 if (call_fun
->rodata
)
3291 call_fun
->rodata
->gc_mark
= 0;
3297 while (call_fun
->sec
->segment_mark
);
3301 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3302 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3307 struct _spu_elf_section_data
*sec_data
;
3308 struct spu_elf_stack_info
*sinfo
;
3310 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3311 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3314 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3315 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3323 struct _sum_stack_param
{
3325 size_t overall_stack
;
3326 bfd_boolean emit_stack_syms
;
3329 /* Descend the call graph for FUN, accumulating total stack required. */
3332 sum_stack (struct function_info
*fun
,
3333 struct bfd_link_info
*info
,
3336 struct call_info
*call
;
3337 struct function_info
*max
;
3338 size_t stack
, cum_stack
;
3340 bfd_boolean has_call
;
3341 struct _sum_stack_param
*sum_stack_param
= param
;
3342 struct spu_link_hash_table
*htab
;
3344 cum_stack
= fun
->stack
;
3345 sum_stack_param
->cum_stack
= cum_stack
;
3351 for (call
= fun
->call_list
; call
; call
= call
->next
)
3353 if (!call
->is_pasted
)
3355 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3357 stack
= sum_stack_param
->cum_stack
;
3358 /* Include caller stack for normal calls, don't do so for
3359 tail calls. fun->stack here is local stack usage for
3361 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3362 stack
+= fun
->stack
;
3363 if (cum_stack
< stack
)
3370 sum_stack_param
->cum_stack
= cum_stack
;
3372 /* Now fun->stack holds cumulative stack. */
3373 fun
->stack
= cum_stack
;
3377 && sum_stack_param
->overall_stack
< cum_stack
)
3378 sum_stack_param
->overall_stack
= cum_stack
;
3380 htab
= spu_hash_table (info
);
3381 if (htab
->auto_overlay
)
3384 f1
= func_name (fun
);
3386 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3387 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3388 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3392 info
->callbacks
->minfo (_(" calls:\n"));
3393 for (call
= fun
->call_list
; call
; call
= call
->next
)
3394 if (!call
->is_pasted
)
3396 const char *f2
= func_name (call
->fun
);
3397 const char *ann1
= call
->fun
== max
? "*" : " ";
3398 const char *ann2
= call
->is_tail
? "t" : " ";
3400 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3404 if (sum_stack_param
->emit_stack_syms
)
3406 char *name
= bfd_malloc (18 + strlen (f1
));
3407 struct elf_link_hash_entry
*h
;
3412 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3413 sprintf (name
, "__stack_%s", f1
);
3415 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3417 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3420 && (h
->root
.type
== bfd_link_hash_new
3421 || h
->root
.type
== bfd_link_hash_undefined
3422 || h
->root
.type
== bfd_link_hash_undefweak
))
3424 h
->root
.type
= bfd_link_hash_defined
;
3425 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3426 h
->root
.u
.def
.value
= cum_stack
;
3431 h
->ref_regular_nonweak
= 1;
3432 h
->forced_local
= 1;
3440 /* SEC is part of a pasted function. Return the call_info for the
3441 next section of this function. */
3443 static struct call_info
*
3444 find_pasted_call (asection
*sec
)
3446 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3447 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3448 struct call_info
*call
;
3451 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3452 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3453 if (call
->is_pasted
)
3459 /* qsort predicate to sort bfds by file name. */
3462 sort_bfds (const void *a
, const void *b
)
3464 bfd
*const *abfd1
= a
;
3465 bfd
*const *abfd2
= b
;
3467 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3470 /* Handle --auto-overlay. */
3472 static void spu_elf_auto_overlay (struct bfd_link_info
*, void (*) (void))
3476 spu_elf_auto_overlay (struct bfd_link_info
*info
,
3477 void (*spu_elf_load_ovl_mgr
) (void))
3481 struct elf_segment_map
*m
;
3482 unsigned int fixed_size
, lo
, hi
;
3483 struct spu_link_hash_table
*htab
;
3484 unsigned int base
, i
, count
, bfd_count
;
3486 asection
**ovly_sections
, **ovly_p
;
3488 unsigned int total_overlay_size
, overlay_size
;
3489 struct elf_link_hash_entry
*h
;
3490 struct _mos_param mos_param
;
3491 struct _uos_param uos_param
;
3492 struct function_info dummy_caller
;
3494 /* Find the extents of our loadable image. */
3495 lo
= (unsigned int) -1;
3497 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3498 if (m
->p_type
== PT_LOAD
)
3499 for (i
= 0; i
< m
->count
; i
++)
3500 if (m
->sections
[i
]->size
!= 0)
3502 if (m
->sections
[i
]->vma
< lo
)
3503 lo
= m
->sections
[i
]->vma
;
3504 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3505 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3507 fixed_size
= hi
+ 1 - lo
;
3509 if (!discover_functions (info
))
3512 if (!build_call_tree (info
))
3515 uos_param
.exclude_input_section
= 0;
3516 uos_param
.exclude_output_section
3517 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3519 htab
= spu_hash_table (info
);
3520 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3521 FALSE
, FALSE
, FALSE
);
3523 && (h
->root
.type
== bfd_link_hash_defined
3524 || h
->root
.type
== bfd_link_hash_defweak
)
3527 /* We have a user supplied overlay manager. */
3528 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3532 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3533 builtin version to .text, and will adjust .text size. */
3534 asection
*text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3536 fixed_size
-= text
->size
;
3537 spu_elf_load_ovl_mgr ();
3538 text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3540 fixed_size
+= text
->size
;
3543 /* Mark overlay sections, and find max overlay section size. */
3544 mos_param
.max_overlay_size
= 0;
3545 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3548 /* We can't put the overlay manager or interrupt routines in
3550 uos_param
.clearing
= 0;
3551 if ((uos_param
.exclude_input_section
3552 || uos_param
.exclude_output_section
)
3553 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3557 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3559 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3560 if (bfd_arr
== NULL
)
3563 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3566 total_overlay_size
= 0;
3567 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3569 extern const bfd_target bfd_elf32_spu_vec
;
3571 unsigned int old_count
;
3573 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3577 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3578 if (sec
->linker_mark
)
3580 if ((sec
->flags
& SEC_CODE
) != 0)
3582 fixed_size
-= sec
->size
;
3583 total_overlay_size
+= sec
->size
;
3585 if (count
!= old_count
)
3586 bfd_arr
[bfd_count
++] = ibfd
;
3589 /* Since the overlay link script selects sections by file name and
3590 section name, ensure that file names are unique. */
3593 bfd_boolean ok
= TRUE
;
3595 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3596 for (i
= 1; i
< bfd_count
; ++i
)
3597 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3599 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3601 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3602 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3603 bfd_arr
[i
]->filename
,
3604 bfd_arr
[i
]->my_archive
->filename
);
3606 info
->callbacks
->einfo (_("%s duplicated\n"),
3607 bfd_arr
[i
]->filename
);
3613 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3614 "object files in auto-overlay script\n"));
3615 bfd_set_error (bfd_error_bad_value
);
3621 if (htab
->reserved
== 0)
3623 struct _sum_stack_param sum_stack_param
;
3625 sum_stack_param
.emit_stack_syms
= 0;
3626 sum_stack_param
.overall_stack
= 0;
3627 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3629 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
3631 fixed_size
+= htab
->reserved
;
3632 fixed_size
+= htab
->non_ovly_stub
* OVL_STUB_SIZE
;
3633 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3635 /* Guess number of overlays. Assuming overlay buffer is on
3636 average only half full should be conservative. */
3637 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3638 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3639 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3642 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3643 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
3644 "size of 0x%v exceeds local store\n"),
3645 (bfd_vma
) fixed_size
,
3646 (bfd_vma
) mos_param
.max_overlay_size
);
3648 /* Now see if we should put some functions in the non-overlay area. */
3649 else if (fixed_size
< htab
->overlay_fixed
)
3651 unsigned int max_fixed
, lib_size
;
3653 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
3654 if (max_fixed
> htab
->overlay_fixed
)
3655 max_fixed
= htab
->overlay_fixed
;
3656 lib_size
= max_fixed
- fixed_size
;
3657 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3658 if (lib_size
== (unsigned int) -1)
3660 fixed_size
= max_fixed
- lib_size
;
3663 /* Build an array of sections, suitably sorted to place into
3665 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3666 if (ovly_sections
== NULL
)
3668 ovly_p
= ovly_sections
;
3669 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3671 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3673 script
= htab
->spu_elf_open_overlay_script ();
3675 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3678 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3679 overlay_size
= htab
->local_store
- fixed_size
;
3682 while (base
< count
)
3684 unsigned int size
= 0;
3687 for (i
= base
; i
< count
; i
++)
3691 unsigned int stub_size
;
3692 struct call_info
*call
, *pasty
;
3693 struct _spu_elf_section_data
*sec_data
;
3694 struct spu_elf_stack_info
*sinfo
;
3697 /* See whether we can add this section to the current
3698 overlay without overflowing our overlay buffer. */
3699 sec
= ovly_sections
[2 * i
];
3700 tmp
= size
+ sec
->size
;
3701 if (ovly_sections
[2 * i
+ 1])
3702 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3703 if (tmp
> overlay_size
)
3705 if (sec
->segment_mark
)
3707 /* Pasted sections must stay together, so add their
3709 struct call_info
*pasty
= find_pasted_call (sec
);
3710 while (pasty
!= NULL
)
3712 struct function_info
*call_fun
= pasty
->fun
;
3713 tmp
+= call_fun
->sec
->size
;
3714 if (call_fun
->rodata
)
3715 tmp
+= call_fun
->rodata
->size
;
3716 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3717 if (pasty
->is_pasted
)
3721 if (tmp
> overlay_size
)
3724 /* If we add this section, we might need new overlay call
3725 stubs. Add any overlay section calls to dummy_call. */
3727 sec_data
= spu_elf_section_data (sec
);
3728 sinfo
= sec_data
->u
.i
.stack_info
;
3729 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3730 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3731 if (call
->is_pasted
)
3733 BFD_ASSERT (pasty
== NULL
);
3736 else if (call
->fun
->sec
->linker_mark
)
3738 if (!copy_callee (&dummy_caller
, call
))
3741 while (pasty
!= NULL
)
3743 struct function_info
*call_fun
= pasty
->fun
;
3745 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3746 if (call
->is_pasted
)
3748 BFD_ASSERT (pasty
== NULL
);
3751 else if (!copy_callee (&dummy_caller
, call
))
3755 /* Calculate call stub size. */
3757 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3761 stub_size
+= OVL_STUB_SIZE
;
3762 /* If the call is within this overlay, we won't need a
3764 for (k
= base
; k
< i
+ 1; k
++)
3765 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3767 stub_size
-= OVL_STUB_SIZE
;
3771 if (tmp
+ stub_size
> overlay_size
)
3779 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3780 ovly_sections
[2 * i
]->owner
,
3781 ovly_sections
[2 * i
],
3782 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3783 bfd_set_error (bfd_error_bad_value
);
3787 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3789 for (j
= base
; j
< i
; j
++)
3791 asection
*sec
= ovly_sections
[2 * j
];
3793 if (fprintf (script
, " %s%c%s (%s)\n",
3794 (sec
->owner
->my_archive
!= NULL
3795 ? sec
->owner
->my_archive
->filename
: ""),
3796 info
->path_separator
,
3797 sec
->owner
->filename
,
3800 if (sec
->segment_mark
)
3802 struct call_info
*call
= find_pasted_call (sec
);
3803 while (call
!= NULL
)
3805 struct function_info
*call_fun
= call
->fun
;
3806 sec
= call_fun
->sec
;
3807 if (fprintf (script
, " %s%c%s (%s)\n",
3808 (sec
->owner
->my_archive
!= NULL
3809 ? sec
->owner
->my_archive
->filename
: ""),
3810 info
->path_separator
,
3811 sec
->owner
->filename
,
3814 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3815 if (call
->is_pasted
)
3821 for (j
= base
; j
< i
; j
++)
3823 asection
*sec
= ovly_sections
[2 * j
+ 1];
3825 && fprintf (script
, " %s%c%s (%s)\n",
3826 (sec
->owner
->my_archive
!= NULL
3827 ? sec
->owner
->my_archive
->filename
: ""),
3828 info
->path_separator
,
3829 sec
->owner
->filename
,
3833 sec
= ovly_sections
[2 * j
];
3834 if (sec
->segment_mark
)
3836 struct call_info
*call
= find_pasted_call (sec
);
3837 while (call
!= NULL
)
3839 struct function_info
*call_fun
= call
->fun
;
3840 sec
= call_fun
->rodata
;
3842 && fprintf (script
, " %s%c%s (%s)\n",
3843 (sec
->owner
->my_archive
!= NULL
3844 ? sec
->owner
->my_archive
->filename
: ""),
3845 info
->path_separator
,
3846 sec
->owner
->filename
,
3849 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3850 if (call
->is_pasted
)
3856 if (fprintf (script
, " }\n") <= 0)
3859 while (dummy_caller
.call_list
!= NULL
)
3861 struct call_info
*call
= dummy_caller
.call_list
;
3862 dummy_caller
.call_list
= call
->next
;
3868 free (ovly_sections
);
3870 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3872 if (fclose (script
) != 0)
3875 if (htab
->auto_overlay
& AUTO_RELINK
)
3876 htab
->spu_elf_relink ();
3881 bfd_set_error (bfd_error_system_call
);
3883 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3887 /* Provide an estimate of total stack required. */
3890 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
3892 struct _sum_stack_param sum_stack_param
;
3894 if (!discover_functions (info
))
3897 if (!build_call_tree (info
))
3900 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3901 info
->callbacks
->minfo (_("\nStack size for functions. "
3902 "Annotations: '*' max stack, 't' tail call\n"));
3904 sum_stack_param
.emit_stack_syms
= emit_stack_syms
;
3905 sum_stack_param
.overall_stack
= 0;
3906 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3909 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3910 (bfd_vma
) sum_stack_param
.overall_stack
);
3914 /* Perform a final link. */
3917 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3919 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3921 if (htab
->auto_overlay
)
3922 spu_elf_auto_overlay (info
, htab
->spu_elf_load_ovl_mgr
);
3924 if (htab
->stack_analysis
3925 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
3926 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3928 return bfd_elf_final_link (output_bfd
, info
);
3931 /* Called when not normally emitting relocs, ie. !info->relocatable
3932 and !info->emitrelocations. Returns a count of special relocs
3933 that need to be emitted. */
3936 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
3938 Elf_Internal_Rela
*relocs
;
3939 unsigned int count
= 0;
3941 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
3945 Elf_Internal_Rela
*rel
;
3946 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3948 for (rel
= relocs
; rel
< relend
; rel
++)
3950 int r_type
= ELF32_R_TYPE (rel
->r_info
);
3951 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3955 if (elf_section_data (sec
)->relocs
!= relocs
)
3962 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3965 spu_elf_relocate_section (bfd
*output_bfd
,
3966 struct bfd_link_info
*info
,
3968 asection
*input_section
,
3970 Elf_Internal_Rela
*relocs
,
3971 Elf_Internal_Sym
*local_syms
,
3972 asection
**local_sections
)
3974 Elf_Internal_Shdr
*symtab_hdr
;
3975 struct elf_link_hash_entry
**sym_hashes
;
3976 Elf_Internal_Rela
*rel
, *relend
;
3977 struct spu_link_hash_table
*htab
;
3978 asection
*ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3980 bfd_boolean emit_these_relocs
= FALSE
;
3981 bfd_boolean is_ea_sym
;
3984 htab
= spu_hash_table (info
);
3985 stubs
= (htab
->stub_sec
!= NULL
3986 && maybe_needs_stubs (input_section
, output_bfd
));
3987 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3988 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3991 relend
= relocs
+ input_section
->reloc_count
;
3992 for (; rel
< relend
; rel
++)
3995 reloc_howto_type
*howto
;
3996 unsigned int r_symndx
;
3997 Elf_Internal_Sym
*sym
;
3999 struct elf_link_hash_entry
*h
;
4000 const char *sym_name
;
4003 bfd_reloc_status_type r
;
4004 bfd_boolean unresolved_reloc
;
4006 enum _stub_type stub_type
;
4008 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4009 r_type
= ELF32_R_TYPE (rel
->r_info
);
4010 howto
= elf_howto_table
+ r_type
;
4011 unresolved_reloc
= FALSE
;
4016 if (r_symndx
< symtab_hdr
->sh_info
)
4018 sym
= local_syms
+ r_symndx
;
4019 sec
= local_sections
[r_symndx
];
4020 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4021 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4025 if (sym_hashes
== NULL
)
4028 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4030 while (h
->root
.type
== bfd_link_hash_indirect
4031 || h
->root
.type
== bfd_link_hash_warning
)
4032 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4035 if (h
->root
.type
== bfd_link_hash_defined
4036 || h
->root
.type
== bfd_link_hash_defweak
)
4038 sec
= h
->root
.u
.def
.section
;
4040 || sec
->output_section
== NULL
)
4041 /* Set a flag that will be cleared later if we find a
4042 relocation value for this symbol. output_section
4043 is typically NULL for symbols satisfied by a shared
4045 unresolved_reloc
= TRUE
;
4047 relocation
= (h
->root
.u
.def
.value
4048 + sec
->output_section
->vma
4049 + sec
->output_offset
);
4051 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4053 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4054 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4056 else if (!info
->relocatable
4057 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4060 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4061 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4062 if (!info
->callbacks
->undefined_symbol (info
,
4063 h
->root
.root
.string
,
4066 rel
->r_offset
, err
))
4070 sym_name
= h
->root
.root
.string
;
4073 if (sec
!= NULL
&& elf_discarded_section (sec
))
4075 /* For relocs against symbols from removed linkonce sections,
4076 or sections discarded by a linker script, we just want the
4077 section contents zeroed. Avoid any special processing. */
4078 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4084 if (info
->relocatable
)
4087 is_ea_sym
= (ea
!= NULL
4089 && sec
->output_section
== ea
);
4091 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4095 /* ._ea is a special section that isn't allocated in SPU
4096 memory, but rather occupies space in PPU memory as
4097 part of an embedded ELF image. If this reloc is
4098 against a symbol defined in ._ea, then transform the
4099 reloc into an equivalent one without a symbol
4100 relative to the start of the ELF image. */
4101 rel
->r_addend
+= (relocation
4103 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4104 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4106 emit_these_relocs
= TRUE
;
4111 unresolved_reloc
= TRUE
;
4113 if (unresolved_reloc
)
4115 (*_bfd_error_handler
)
4116 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4118 bfd_get_section_name (input_bfd
, input_section
),
4119 (long) rel
->r_offset
,
4125 /* If this symbol is in an overlay area, we may need to relocate
4126 to the overlay stub. */
4127 addend
= rel
->r_addend
;
4129 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4130 contents
, info
)) != no_stub
)
4132 unsigned int ovl
= 0;
4133 struct got_entry
*g
, **head
;
4135 if (stub_type
!= nonovl_stub
)
4136 ovl
= (spu_elf_section_data (input_section
->output_section
)
4140 head
= &h
->got
.glist
;
4142 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4144 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4145 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4150 relocation
= g
->stub_addr
;
4154 r
= _bfd_final_link_relocate (howto
,
4158 rel
->r_offset
, relocation
, addend
);
4160 if (r
!= bfd_reloc_ok
)
4162 const char *msg
= (const char *) 0;
4166 case bfd_reloc_overflow
:
4167 if (!((*info
->callbacks
->reloc_overflow
)
4168 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4169 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4173 case bfd_reloc_undefined
:
4174 if (!((*info
->callbacks
->undefined_symbol
)
4175 (info
, sym_name
, input_bfd
, input_section
,
4176 rel
->r_offset
, TRUE
)))
4180 case bfd_reloc_outofrange
:
4181 msg
= _("internal error: out of range error");
4184 case bfd_reloc_notsupported
:
4185 msg
= _("internal error: unsupported relocation error");
4188 case bfd_reloc_dangerous
:
4189 msg
= _("internal error: dangerous error");
4193 msg
= _("internal error: unknown error");
4198 if (!((*info
->callbacks
->warning
)
4199 (info
, msg
, sym_name
, input_bfd
, input_section
,
4208 && emit_these_relocs
4209 && !info
->emitrelocations
)
4211 Elf_Internal_Rela
*wrel
;
4212 Elf_Internal_Shdr
*rel_hdr
;
4214 wrel
= rel
= relocs
;
4215 relend
= relocs
+ input_section
->reloc_count
;
4216 for (; rel
< relend
; rel
++)
4220 r_type
= ELF32_R_TYPE (rel
->r_info
);
4221 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4224 input_section
->reloc_count
= wrel
- relocs
;
4225 /* Backflips for _bfd_elf_link_output_relocs. */
4226 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4227 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4234 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4237 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4238 const char *sym_name ATTRIBUTE_UNUSED
,
4239 Elf_Internal_Sym
*sym
,
4240 asection
*sym_sec ATTRIBUTE_UNUSED
,
4241 struct elf_link_hash_entry
*h
)
4243 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4245 if (!info
->relocatable
4246 && htab
->stub_sec
!= NULL
4248 && (h
->root
.type
== bfd_link_hash_defined
4249 || h
->root
.type
== bfd_link_hash_defweak
)
4251 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4253 struct got_entry
*g
;
4255 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4256 if (g
->addend
== 0 && g
->ovl
== 0)
4258 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4259 (htab
->stub_sec
[0]->output_section
->owner
,
4260 htab
->stub_sec
[0]->output_section
));
4261 sym
->st_value
= g
->stub_addr
;
4269 static int spu_plugin
= 0;
4272 spu_elf_plugin (int val
)
4277 /* Set ELF header e_type for plugins. */
4280 spu_elf_post_process_headers (bfd
*abfd
,
4281 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4285 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4287 i_ehdrp
->e_type
= ET_DYN
;
4291 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4292 segments for overlays. */
4295 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4302 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4303 extra
= htab
->num_overlays
;
4309 sec
= bfd_get_section_by_name (abfd
, ".toe");
4310 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4316 /* Remove .toe section from other PT_LOAD segments and put it in
4317 a segment of its own. Put overlays in separate segments too. */
4320 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4323 struct elf_segment_map
*m
;
4329 toe
= bfd_get_section_by_name (abfd
, ".toe");
4330 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4331 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4332 for (i
= 0; i
< m
->count
; i
++)
4333 if ((s
= m
->sections
[i
]) == toe
4334 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4336 struct elf_segment_map
*m2
;
4339 if (i
+ 1 < m
->count
)
4341 amt
= sizeof (struct elf_segment_map
);
4342 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4343 m2
= bfd_zalloc (abfd
, amt
);
4346 m2
->count
= m
->count
- (i
+ 1);
4347 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4348 m2
->count
* sizeof (m
->sections
[0]));
4349 m2
->p_type
= PT_LOAD
;
4357 amt
= sizeof (struct elf_segment_map
);
4358 m2
= bfd_zalloc (abfd
, amt
);
4361 m2
->p_type
= PT_LOAD
;
4363 m2
->sections
[0] = s
;
4373 /* Tweak the section type of .note.spu_name. */
4376 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4377 Elf_Internal_Shdr
*hdr
,
4380 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4381 hdr
->sh_type
= SHT_NOTE
;
4385 /* Tweak phdrs before writing them out. */
4388 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4390 const struct elf_backend_data
*bed
;
4391 struct elf_obj_tdata
*tdata
;
4392 Elf_Internal_Phdr
*phdr
, *last
;
4393 struct spu_link_hash_table
*htab
;
4400 bed
= get_elf_backend_data (abfd
);
4401 tdata
= elf_tdata (abfd
);
4403 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4404 htab
= spu_hash_table (info
);
4405 if (htab
->num_overlays
!= 0)
4407 struct elf_segment_map
*m
;
4410 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4412 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4414 /* Mark this as an overlay header. */
4415 phdr
[i
].p_flags
|= PF_OVERLAY
;
4417 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4419 bfd_byte
*p
= htab
->ovtab
->contents
;
4420 unsigned int off
= o
* 16 + 8;
4422 /* Write file_off into _ovly_table. */
4423 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4428 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4429 of 16. This should always be possible when using the standard
4430 linker scripts, but don't create overlapping segments if
4431 someone is playing games with linker scripts. */
4433 for (i
= count
; i
-- != 0; )
4434 if (phdr
[i
].p_type
== PT_LOAD
)
4438 adjust
= -phdr
[i
].p_filesz
& 15;
4441 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4444 adjust
= -phdr
[i
].p_memsz
& 15;
4447 && phdr
[i
].p_filesz
!= 0
4448 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4449 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4452 if (phdr
[i
].p_filesz
!= 0)
4456 if (i
== (unsigned int) -1)
4457 for (i
= count
; i
-- != 0; )
4458 if (phdr
[i
].p_type
== PT_LOAD
)
4462 adjust
= -phdr
[i
].p_filesz
& 15;
4463 phdr
[i
].p_filesz
+= adjust
;
4465 adjust
= -phdr
[i
].p_memsz
& 15;
4466 phdr
[i
].p_memsz
+= adjust
;
4472 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4473 #define TARGET_BIG_NAME "elf32-spu"
4474 #define ELF_ARCH bfd_arch_spu
4475 #define ELF_MACHINE_CODE EM_SPU
4476 /* This matches the alignment need for DMA. */
4477 #define ELF_MAXPAGESIZE 0x80
4478 #define elf_backend_rela_normal 1
4479 #define elf_backend_can_gc_sections 1
4481 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4482 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4483 #define elf_info_to_howto spu_elf_info_to_howto
4484 #define elf_backend_count_relocs spu_elf_count_relocs
4485 #define elf_backend_relocate_section spu_elf_relocate_section
4486 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4487 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4488 #define elf_backend_object_p spu_elf_object_p
4489 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4490 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4492 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4493 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4494 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4495 #define elf_backend_post_process_headers spu_elf_post_process_headers
4496 #define elf_backend_fake_sections spu_elf_fake_sections
4497 #define elf_backend_special_sections spu_elf_special_sections
4498 #define bfd_elf32_bfd_final_link spu_elf_final_link
4500 #include "elf32-target.h"