1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
245 strip --strip-unneeded will not remove them. */
248 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
250 if (sym
->name
!= NULL
251 && sym
->section
!= bfd_abs_section_ptr
252 && strncmp (sym
->name
, "_EAR_", 5) == 0)
253 sym
->flags
|= BSF_KEEP
;
256 /* SPU ELF linker hash table. */
258 struct spu_link_hash_table
260 struct elf_link_hash_table elf
;
262 /* Shortcuts to overlay sections. */
267 /* Count of stubs in each overlay section. */
268 unsigned int *stub_count
;
270 /* The stub section for each overlay section. */
273 struct elf_link_hash_entry
*ovly_load
;
274 struct elf_link_hash_entry
*ovly_return
;
275 unsigned long ovly_load_r_symndx
;
277 /* Number of overlay buffers. */
278 unsigned int num_buf
;
280 /* Total number of overlays. */
281 unsigned int num_overlays
;
283 /* How much memory we have. */
284 unsigned int local_store
;
285 /* Local store --auto-overlay should reserve for non-overlay
286 functions and data. */
287 unsigned int overlay_fixed
;
288 /* Local store --auto-overlay should reserve for stack and heap. */
289 unsigned int reserved
;
290 /* Count of overlay stubs needed in non-overlay area. */
291 unsigned int non_ovly_stub
;
293 /* Stash various callbacks for --auto-overlay. */
294 void (*spu_elf_load_ovl_mgr
) (void);
295 FILE *(*spu_elf_open_overlay_script
) (void);
296 void (*spu_elf_relink
) (void);
298 /* Bit 0 set if --auto-overlay.
299 Bit 1 set if --auto-relink.
300 Bit 2 set if --overlay-rodata. */
301 unsigned int auto_overlay
: 3;
302 #define AUTO_OVERLAY 1
303 #define AUTO_RELINK 2
304 #define OVERLAY_RODATA 4
306 /* Set if we should emit symbols for stubs. */
307 unsigned int emit_stub_syms
:1;
309 /* Set if we want stubs on calls out of overlay regions to
310 non-overlay regions. */
311 unsigned int non_overlay_stubs
: 1;
314 unsigned int stub_err
: 1;
316 /* Set if stack size analysis should be done. */
317 unsigned int stack_analysis
: 1;
319 /* Set if __stack_* syms will be emitted. */
320 unsigned int emit_stack_syms
: 1;
323 /* Hijack the generic got fields for overlay stub accounting. */
327 struct got_entry
*next
;
333 #define spu_hash_table(p) \
334 ((struct spu_link_hash_table *) ((p)->hash))
336 /* Create a spu ELF linker hash table. */
338 static struct bfd_link_hash_table
*
339 spu_elf_link_hash_table_create (bfd
*abfd
)
341 struct spu_link_hash_table
*htab
;
343 htab
= bfd_malloc (sizeof (*htab
));
347 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
348 _bfd_elf_link_hash_newfunc
,
349 sizeof (struct elf_link_hash_entry
)))
355 memset (&htab
->ovtab
, 0,
356 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
358 htab
->elf
.init_got_refcount
.refcount
= 0;
359 htab
->elf
.init_got_refcount
.glist
= NULL
;
360 htab
->elf
.init_got_offset
.offset
= 0;
361 htab
->elf
.init_got_offset
.glist
= NULL
;
362 return &htab
->elf
.root
;
365 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
366 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
367 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
370 get_sym_h (struct elf_link_hash_entry
**hp
,
371 Elf_Internal_Sym
**symp
,
373 Elf_Internal_Sym
**locsymsp
,
374 unsigned long r_symndx
,
377 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
379 if (r_symndx
>= symtab_hdr
->sh_info
)
381 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
382 struct elf_link_hash_entry
*h
;
384 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
385 while (h
->root
.type
== bfd_link_hash_indirect
386 || h
->root
.type
== bfd_link_hash_warning
)
387 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
397 asection
*symsec
= NULL
;
398 if (h
->root
.type
== bfd_link_hash_defined
399 || h
->root
.type
== bfd_link_hash_defweak
)
400 symsec
= h
->root
.u
.def
.section
;
406 Elf_Internal_Sym
*sym
;
407 Elf_Internal_Sym
*locsyms
= *locsymsp
;
411 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
414 size_t symcount
= symtab_hdr
->sh_info
;
416 /* If we are reading symbols into the contents, then
417 read the global syms too. This is done to cache
418 syms for later stack analysis. */
419 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
420 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
421 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
428 sym
= locsyms
+ r_symndx
;
437 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
443 /* Create the note section if not already present. This is done early so
444 that the linker maps the sections to the right place in the output. */
447 spu_elf_create_sections (struct bfd_link_info
*info
,
452 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
454 /* Stash some options away where we can get at them later. */
455 htab
->stack_analysis
= stack_analysis
;
456 htab
->emit_stack_syms
= emit_stack_syms
;
458 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
459 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
464 /* Make SPU_PTNOTE_SPUNAME section. */
471 ibfd
= info
->input_bfds
;
472 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
473 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
475 || !bfd_set_section_alignment (ibfd
, s
, 4))
478 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
479 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
480 size
+= (name_len
+ 3) & -4;
482 if (!bfd_set_section_size (ibfd
, s
, size
))
485 data
= bfd_zalloc (ibfd
, size
);
489 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
490 bfd_put_32 (ibfd
, name_len
, data
+ 4);
491 bfd_put_32 (ibfd
, 1, data
+ 8);
492 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
493 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
494 bfd_get_filename (info
->output_bfd
), name_len
);
501 /* qsort predicate to sort sections by vma. */
504 sort_sections (const void *a
, const void *b
)
506 const asection
*const *s1
= a
;
507 const asection
*const *s2
= b
;
508 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
511 return delta
< 0 ? -1 : 1;
513 return (*s1
)->index
- (*s2
)->index
;
516 /* Identify overlays in the output bfd, and number them. */
519 spu_elf_find_overlays (struct bfd_link_info
*info
)
521 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
522 asection
**alloc_sec
;
523 unsigned int i
, n
, ovl_index
, num_buf
;
527 if (info
->output_bfd
->section_count
< 2)
531 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
532 if (alloc_sec
== NULL
)
535 /* Pick out all the alloced sections. */
536 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
537 if ((s
->flags
& SEC_ALLOC
) != 0
538 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
548 /* Sort them by vma. */
549 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
551 /* Look for overlapping vmas. Any with overlap must be overlays.
552 Count them. Also count the number of overlay regions. */
553 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
554 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
557 if (s
->vma
< ovl_end
)
559 asection
*s0
= alloc_sec
[i
- 1];
561 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
563 alloc_sec
[ovl_index
] = s0
;
564 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
565 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
567 alloc_sec
[ovl_index
] = s
;
568 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
569 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
570 if (s0
->vma
!= s
->vma
)
572 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
573 "do not start at the same address.\n"),
577 if (ovl_end
< s
->vma
+ s
->size
)
578 ovl_end
= s
->vma
+ s
->size
;
581 ovl_end
= s
->vma
+ s
->size
;
584 htab
->num_overlays
= ovl_index
;
585 htab
->num_buf
= num_buf
;
586 htab
->ovl_sec
= alloc_sec
;
587 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
588 FALSE
, FALSE
, FALSE
);
589 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
590 FALSE
, FALSE
, FALSE
);
591 return ovl_index
!= 0;
594 /* Support two sizes of overlay stubs, a slower more compact stub of two
595 intructions, and a faster stub of four instructions. */
596 #ifndef OVL_STUB_SIZE
597 /* Default to faster. */
598 #define OVL_STUB_SIZE 16
599 /* #define OVL_STUB_SIZE 8 */
601 #define BRSL 0x33000000
602 #define BR 0x32000000
603 #define NOP 0x40200000
604 #define LNOP 0x00200000
605 #define ILA 0x42000000
607 /* Return true for all relative and absolute branch instructions.
615 brhnz 00100011 0.. */
618 is_branch (const unsigned char *insn
)
620 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
623 /* Return true for all indirect branch instructions.
631 bihnz 00100101 011 */
634 is_indirect_branch (const unsigned char *insn
)
636 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
639 /* Return true for branch hint instructions.
644 is_hint (const unsigned char *insn
)
646 return (insn
[0] & 0xfc) == 0x10;
649 /* True if INPUT_SECTION might need overlay stubs. */
652 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
654 /* No stubs for debug sections and suchlike. */
655 if ((input_section
->flags
& SEC_ALLOC
) == 0)
658 /* No stubs for link-once sections that will be discarded. */
659 if (input_section
->output_section
== NULL
660 || input_section
->output_section
->owner
!= output_bfd
)
663 /* Don't create stubs for .eh_frame references. */
664 if (strcmp (input_section
->name
, ".eh_frame") == 0)
678 /* Return non-zero if this reloc symbol should go via an overlay stub.
679 Return 2 if the stub must be in non-overlay area. */
681 static enum _stub_type
682 needs_ovl_stub (struct elf_link_hash_entry
*h
,
683 Elf_Internal_Sym
*sym
,
685 asection
*input_section
,
686 Elf_Internal_Rela
*irela
,
688 struct bfd_link_info
*info
)
690 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
691 enum elf_spu_reloc_type r_type
;
692 unsigned int sym_type
;
694 enum _stub_type ret
= no_stub
;
697 || sym_sec
->output_section
== NULL
698 || sym_sec
->output_section
->owner
!= info
->output_bfd
699 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
704 /* Ensure no stubs for user supplied overlay manager syms. */
705 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
708 /* setjmp always goes via an overlay stub, because then the return
709 and hence the longjmp goes via __ovly_return. That magically
710 makes setjmp/longjmp between overlays work. */
711 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
712 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
716 /* Usually, symbols in non-overlay sections don't need stubs. */
717 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
718 && !htab
->non_overlay_stubs
)
724 sym_type
= ELF_ST_TYPE (sym
->st_info
);
726 r_type
= ELF32_R_TYPE (irela
->r_info
);
728 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
732 if (contents
== NULL
)
735 if (!bfd_get_section_contents (input_section
->owner
,
742 contents
+= irela
->r_offset
;
744 if (is_branch (contents
) || is_hint (contents
))
747 if ((contents
[0] & 0xfd) == 0x31
748 && sym_type
!= STT_FUNC
751 /* It's common for people to write assembly and forget
752 to give function symbols the right type. Handle
753 calls to such symbols, but warn so that (hopefully)
754 people will fix their code. We need the symbol
755 type to be correct to distinguish function pointer
756 initialisation from other pointer initialisations. */
757 const char *sym_name
;
760 sym_name
= h
->root
.root
.string
;
763 Elf_Internal_Shdr
*symtab_hdr
;
764 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
765 sym_name
= bfd_elf_sym_name (input_section
->owner
,
770 (*_bfd_error_handler
) (_("warning: call to non-function"
771 " symbol %s defined in %B"),
772 sym_sec
->owner
, sym_name
);
778 if (sym_type
!= STT_FUNC
780 && (sym_sec
->flags
& SEC_CODE
) == 0)
783 /* A reference from some other section to a symbol in an overlay
784 section needs a stub. */
785 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
786 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
789 /* If this insn isn't a branch then we are possibly taking the
790 address of a function and passing it out somehow. */
791 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
795 count_stub (struct spu_link_hash_table
*htab
,
798 enum _stub_type stub_type
,
799 struct elf_link_hash_entry
*h
,
800 const Elf_Internal_Rela
*irela
)
802 unsigned int ovl
= 0;
803 struct got_entry
*g
, **head
;
806 /* If this instruction is a branch or call, we need a stub
807 for it. One stub per function per overlay.
808 If it isn't a branch, then we are taking the address of
809 this function so need a stub in the non-overlay area
810 for it. One stub per function. */
811 if (stub_type
!= nonovl_stub
)
812 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
815 head
= &h
->got
.glist
;
818 if (elf_local_got_ents (ibfd
) == NULL
)
820 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
821 * sizeof (*elf_local_got_ents (ibfd
)));
822 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
823 if (elf_local_got_ents (ibfd
) == NULL
)
826 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
831 addend
= irela
->r_addend
;
835 struct got_entry
*gnext
;
837 for (g
= *head
; g
!= NULL
; g
= g
->next
)
838 if (g
->addend
== addend
&& g
->ovl
== 0)
843 /* Need a new non-overlay area stub. Zap other stubs. */
844 for (g
= *head
; g
!= NULL
; g
= gnext
)
847 if (g
->addend
== addend
)
849 htab
->stub_count
[g
->ovl
] -= 1;
857 for (g
= *head
; g
!= NULL
; g
= g
->next
)
858 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
864 g
= bfd_malloc (sizeof *g
);
869 g
->stub_addr
= (bfd_vma
) -1;
873 htab
->stub_count
[ovl
] += 1;
879 /* Two instruction overlay stubs look like:
882 .word target_ovl_and_address
884 ovl_and_address is a word with the overlay number in the top 14 bits
885 and local store address in the bottom 18 bits.
887 Four instruction overlay stubs look like:
891 ila $79,target_address
895 build_stub (struct spu_link_hash_table
*htab
,
898 enum _stub_type stub_type
,
899 struct elf_link_hash_entry
*h
,
900 const Elf_Internal_Rela
*irela
,
905 struct got_entry
*g
, **head
;
907 bfd_vma addend
, val
, from
, to
;
910 if (stub_type
!= nonovl_stub
)
911 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
914 head
= &h
->got
.glist
;
916 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
920 addend
= irela
->r_addend
;
922 for (g
= *head
; g
!= NULL
; g
= g
->next
)
923 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
928 if (g
->ovl
== 0 && ovl
!= 0)
931 if (g
->stub_addr
!= (bfd_vma
) -1)
934 sec
= htab
->stub_sec
[ovl
];
935 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
936 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
938 to
= (htab
->ovly_load
->root
.u
.def
.value
939 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
940 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
942 if (OVL_STUB_SIZE
== 16)
944 if (((dest
| to
| from
) & 3) != 0
945 || val
+ 0x20000 >= 0x40000)
950 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
952 if (OVL_STUB_SIZE
== 16)
954 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
955 sec
->contents
+ sec
->size
);
956 bfd_put_32 (sec
->owner
, LNOP
,
957 sec
->contents
+ sec
->size
+ 4);
958 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
959 sec
->contents
+ sec
->size
+ 8);
960 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
961 sec
->contents
+ sec
->size
+ 12);
963 else if (OVL_STUB_SIZE
== 8)
965 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
966 sec
->contents
+ sec
->size
);
968 val
= (dest
& 0x3ffff) | (ovl
<< 14);
969 bfd_put_32 (sec
->owner
, val
,
970 sec
->contents
+ sec
->size
+ 4);
974 sec
->size
+= OVL_STUB_SIZE
;
976 if (htab
->emit_stub_syms
)
982 len
= 8 + sizeof (".ovl_call.") - 1;
984 len
+= strlen (h
->root
.root
.string
);
989 add
= (int) irela
->r_addend
& 0xffffffff;
992 name
= bfd_malloc (len
);
996 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
998 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1000 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1001 dest_sec
->id
& 0xffffffff,
1002 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1004 sprintf (name
+ len
- 9, "+%x", add
);
1006 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1010 if (h
->root
.type
== bfd_link_hash_new
)
1012 h
->root
.type
= bfd_link_hash_defined
;
1013 h
->root
.u
.def
.section
= sec
;
1014 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
1015 h
->size
= OVL_STUB_SIZE
;
1019 h
->ref_regular_nonweak
= 1;
1020 h
->forced_local
= 1;
1028 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1032 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1034 /* Symbols starting with _SPUEAR_ need a stub because they may be
1035 invoked by the PPU. */
1036 struct bfd_link_info
*info
= inf
;
1037 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1040 if ((h
->root
.type
== bfd_link_hash_defined
1041 || h
->root
.type
== bfd_link_hash_defweak
)
1043 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1044 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1045 && sym_sec
->output_section
!= NULL
1046 && sym_sec
->output_section
->owner
== info
->output_bfd
1047 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1048 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1049 || htab
->non_overlay_stubs
))
1051 count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1058 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1060 /* Symbols starting with _SPUEAR_ need a stub because they may be
1061 invoked by the PPU. */
1062 struct bfd_link_info
*info
= inf
;
1063 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1066 if ((h
->root
.type
== bfd_link_hash_defined
1067 || h
->root
.type
== bfd_link_hash_defweak
)
1069 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1070 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1071 && sym_sec
->output_section
!= NULL
1072 && sym_sec
->output_section
->owner
== info
->output_bfd
1073 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1074 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1075 || htab
->non_overlay_stubs
))
1077 build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1078 h
->root
.u
.def
.value
, sym_sec
);
1084 /* Size or build stubs. */
1087 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1089 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1092 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1094 extern const bfd_target bfd_elf32_spu_vec
;
1095 Elf_Internal_Shdr
*symtab_hdr
;
1097 Elf_Internal_Sym
*local_syms
= NULL
;
1100 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1103 /* We'll need the symbol table in a second. */
1104 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1105 if (symtab_hdr
->sh_info
== 0)
1108 /* Arrange to read and keep global syms for later stack analysis. */
1109 psyms
= &local_syms
;
1110 if (htab
->stack_analysis
)
1111 psyms
= &symtab_hdr
->contents
;
1113 /* Walk over each section attached to the input bfd. */
1114 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1116 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1118 /* If there aren't any relocs, then there's nothing more to do. */
1119 if ((isec
->flags
& SEC_RELOC
) == 0
1120 || isec
->reloc_count
== 0)
1123 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1126 /* Get the relocs. */
1127 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1129 if (internal_relocs
== NULL
)
1130 goto error_ret_free_local
;
1132 /* Now examine each relocation. */
1133 irela
= internal_relocs
;
1134 irelaend
= irela
+ isec
->reloc_count
;
1135 for (; irela
< irelaend
; irela
++)
1137 enum elf_spu_reloc_type r_type
;
1138 unsigned int r_indx
;
1140 Elf_Internal_Sym
*sym
;
1141 struct elf_link_hash_entry
*h
;
1142 enum _stub_type stub_type
;
1144 r_type
= ELF32_R_TYPE (irela
->r_info
);
1145 r_indx
= ELF32_R_SYM (irela
->r_info
);
1147 if (r_type
>= R_SPU_max
)
1149 bfd_set_error (bfd_error_bad_value
);
1150 error_ret_free_internal
:
1151 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1152 free (internal_relocs
);
1153 error_ret_free_local
:
1154 if (local_syms
!= NULL
1155 && (symtab_hdr
->contents
1156 != (unsigned char *) local_syms
))
1161 /* Determine the reloc target section. */
1162 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
1163 goto error_ret_free_internal
;
1165 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1167 if (stub_type
== no_stub
)
1169 else if (stub_type
== stub_error
)
1170 goto error_ret_free_internal
;
1172 if (htab
->stub_count
== NULL
)
1175 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1176 htab
->stub_count
= bfd_zmalloc (amt
);
1177 if (htab
->stub_count
== NULL
)
1178 goto error_ret_free_internal
;
1183 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1184 goto error_ret_free_internal
;
1191 dest
= h
->root
.u
.def
.value
;
1193 dest
= sym
->st_value
;
1194 dest
+= irela
->r_addend
;
1195 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1197 goto error_ret_free_internal
;
1201 /* We're done with the internal relocs, free them. */
1202 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1203 free (internal_relocs
);
1206 if (local_syms
!= NULL
1207 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1209 if (!info
->keep_memory
)
1212 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1219 /* Allocate space for overlay call and return stubs. */
1222 spu_elf_size_stubs (struct bfd_link_info
*info
,
1223 void (*place_spu_section
) (asection
*, asection
*,
1225 int non_overlay_stubs
)
1227 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1234 htab
->non_overlay_stubs
= non_overlay_stubs
;
1235 if (!process_stubs (info
, FALSE
))
1238 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1242 if (htab
->stub_count
== NULL
)
1245 ibfd
= info
->input_bfds
;
1246 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1247 htab
->stub_sec
= bfd_zmalloc (amt
);
1248 if (htab
->stub_sec
== NULL
)
1251 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1252 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1253 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1254 htab
->stub_sec
[0] = stub
;
1256 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1258 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1259 (*place_spu_section
) (stub
, NULL
, ".text");
1261 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1263 asection
*osec
= htab
->ovl_sec
[i
];
1264 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1265 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1266 htab
->stub_sec
[ovl
] = stub
;
1268 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1270 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1271 (*place_spu_section
) (stub
, osec
, NULL
);
1274 /* htab->ovtab consists of two arrays.
1284 . } _ovly_buf_table[];
1287 flags
= (SEC_ALLOC
| SEC_LOAD
1288 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1289 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1290 if (htab
->ovtab
== NULL
1291 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1294 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1295 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1297 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1298 if (htab
->toe
== NULL
1299 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1301 htab
->toe
->size
= 16;
1302 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1307 /* Functions to handle embedded spu_ovl.o object. */
1310 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1316 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1322 struct _ovl_stream
*os
;
1326 os
= (struct _ovl_stream
*) stream
;
1327 max
= (const char *) os
->end
- (const char *) os
->start
;
1329 if ((ufile_ptr
) offset
>= max
)
1333 if (count
> max
- offset
)
1334 count
= max
- offset
;
1336 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1341 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1343 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1350 return *ovl_bfd
!= NULL
;
1353 /* Define an STT_OBJECT symbol. */
1355 static struct elf_link_hash_entry
*
1356 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1358 struct elf_link_hash_entry
*h
;
1360 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1364 if (h
->root
.type
!= bfd_link_hash_defined
1367 h
->root
.type
= bfd_link_hash_defined
;
1368 h
->root
.u
.def
.section
= htab
->ovtab
;
1369 h
->type
= STT_OBJECT
;
1372 h
->ref_regular_nonweak
= 1;
1377 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1378 h
->root
.u
.def
.section
->owner
,
1379 h
->root
.root
.string
);
1380 bfd_set_error (bfd_error_bad_value
);
1387 /* Fill in all stubs and the overlay tables. */
1390 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1392 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1393 struct elf_link_hash_entry
*h
;
1399 htab
->emit_stub_syms
= emit_syms
;
1400 if (htab
->stub_count
== NULL
)
1403 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1404 if (htab
->stub_sec
[i
]->size
!= 0)
1406 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1407 htab
->stub_sec
[i
]->size
);
1408 if (htab
->stub_sec
[i
]->contents
== NULL
)
1410 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1411 htab
->stub_sec
[i
]->size
= 0;
1414 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1415 htab
->ovly_load
= h
;
1416 BFD_ASSERT (h
!= NULL
1417 && (h
->root
.type
== bfd_link_hash_defined
1418 || h
->root
.type
== bfd_link_hash_defweak
)
1421 s
= h
->root
.u
.def
.section
->output_section
;
1422 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1424 (*_bfd_error_handler
) (_("%s in overlay section"),
1425 h
->root
.root
.string
);
1426 bfd_set_error (bfd_error_bad_value
);
1430 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1431 htab
->ovly_return
= h
;
1433 /* Fill in all the stubs. */
1434 process_stubs (info
, TRUE
);
1436 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1440 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1442 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1444 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1445 bfd_set_error (bfd_error_bad_value
);
1448 htab
->stub_sec
[i
]->rawsize
= 0;
1453 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1454 bfd_set_error (bfd_error_bad_value
);
1458 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1459 if (htab
->ovtab
->contents
== NULL
)
1462 /* Write out _ovly_table. */
1463 p
= htab
->ovtab
->contents
;
1464 /* set low bit of .size to mark non-overlay area as present. */
1466 obfd
= htab
->ovtab
->output_section
->owner
;
1467 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1469 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1473 unsigned long off
= ovl_index
* 16;
1474 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1476 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1477 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1478 /* file_off written later in spu_elf_modify_program_headers. */
1479 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1483 h
= define_ovtab_symbol (htab
, "_ovly_table");
1486 h
->root
.u
.def
.value
= 16;
1487 h
->size
= htab
->num_overlays
* 16;
1489 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1492 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1495 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1498 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1499 h
->size
= htab
->num_buf
* 4;
1501 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1504 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1507 h
= define_ovtab_symbol (htab
, "_EAR_");
1510 h
->root
.u
.def
.section
= htab
->toe
;
1511 h
->root
.u
.def
.value
= 0;
1517 /* Check that all loadable section VMAs lie in the range
1518 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1521 spu_elf_check_vma (struct bfd_link_info
*info
,
1525 unsigned int overlay_fixed
,
1526 unsigned int reserved
,
1527 void (*spu_elf_load_ovl_mgr
) (void),
1528 FILE *(*spu_elf_open_overlay_script
) (void),
1529 void (*spu_elf_relink
) (void))
1531 struct elf_segment_map
*m
;
1533 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1534 bfd
*abfd
= info
->output_bfd
;
1536 if (auto_overlay
& AUTO_OVERLAY
)
1537 htab
->auto_overlay
= auto_overlay
;
1538 htab
->local_store
= hi
+ 1 - lo
;
1539 htab
->overlay_fixed
= overlay_fixed
;
1540 htab
->reserved
= reserved
;
1541 htab
->spu_elf_load_ovl_mgr
= spu_elf_load_ovl_mgr
;
1542 htab
->spu_elf_open_overlay_script
= spu_elf_open_overlay_script
;
1543 htab
->spu_elf_relink
= spu_elf_relink
;
1545 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1546 if (m
->p_type
== PT_LOAD
)
1547 for (i
= 0; i
< m
->count
; i
++)
1548 if (m
->sections
[i
]->size
!= 0
1549 && (m
->sections
[i
]->vma
< lo
1550 || m
->sections
[i
]->vma
> hi
1551 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1552 return m
->sections
[i
];
1554 /* No need for overlays if it all fits. */
1555 htab
->auto_overlay
= 0;
1559 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1560 Search for stack adjusting insns, and return the sp delta. */
1563 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1568 memset (reg
, 0, sizeof (reg
));
1569 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1571 unsigned char buf
[4];
1575 /* Assume no relocs on stack adjusing insns. */
1576 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1579 if (buf
[0] == 0x24 /* stqd */)
1583 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1584 /* Partly decoded immediate field. */
1585 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1587 if (buf
[0] == 0x1c /* ai */)
1590 imm
= (imm
^ 0x200) - 0x200;
1591 reg
[rt
] = reg
[ra
] + imm
;
1593 if (rt
== 1 /* sp */)
1600 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1602 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1604 reg
[rt
] = reg
[ra
] + reg
[rb
];
1608 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1610 if (buf
[0] >= 0x42 /* ila */)
1611 imm
|= (buf
[0] & 1) << 17;
1616 if (buf
[0] == 0x40 /* il */)
1618 if ((buf
[1] & 0x80) == 0)
1620 imm
= (imm
^ 0x8000) - 0x8000;
1622 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1628 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1630 reg
[rt
] |= imm
& 0xffff;
1633 else if (buf
[0] == 0x04 /* ori */)
1636 imm
= (imm
^ 0x200) - 0x200;
1637 reg
[rt
] = reg
[ra
] | imm
;
1640 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1641 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1643 /* Used in pic reg load. Say rt is trashed. */
1647 else if (is_branch (buf
) || is_indirect_branch (buf
))
1648 /* If we hit a branch then we must be out of the prologue. */
1657 /* qsort predicate to sort symbols by section and value. */
1659 static Elf_Internal_Sym
*sort_syms_syms
;
1660 static asection
**sort_syms_psecs
;
1663 sort_syms (const void *a
, const void *b
)
1665 Elf_Internal_Sym
*const *s1
= a
;
1666 Elf_Internal_Sym
*const *s2
= b
;
1667 asection
*sec1
,*sec2
;
1668 bfd_signed_vma delta
;
1670 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1671 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1674 return sec1
->index
- sec2
->index
;
1676 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1678 return delta
< 0 ? -1 : 1;
1680 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1682 return delta
< 0 ? -1 : 1;
1684 return *s1
< *s2
? -1 : 1;
1689 struct function_info
*fun
;
1690 struct call_info
*next
;
1692 unsigned int max_depth
;
1693 unsigned int is_tail
: 1;
1694 unsigned int is_pasted
: 1;
1697 struct function_info
1699 /* List of functions called. Also branches to hot/cold part of
1701 struct call_info
*call_list
;
1702 /* For hot/cold part of function, point to owner. */
1703 struct function_info
*start
;
1704 /* Symbol at start of function. */
1706 Elf_Internal_Sym
*sym
;
1707 struct elf_link_hash_entry
*h
;
1709 /* Function section. */
1712 /* Where last called from, and number of sections called from. */
1713 asection
*last_caller
;
1714 unsigned int call_count
;
1715 /* Address range of (this part of) function. */
1719 /* Distance from root of call tree. Tail and hot/cold branches
1720 count as one deeper. We aren't counting stack frames here. */
1722 /* Set if global symbol. */
1723 unsigned int global
: 1;
1724 /* Set if known to be start of function (as distinct from a hunk
1725 in hot/cold section. */
1726 unsigned int is_func
: 1;
1727 /* Set if not a root node. */
1728 unsigned int non_root
: 1;
1729 /* Flags used during call tree traversal. It's cheaper to replicate
1730 the visit flags than have one which needs clearing after a traversal. */
1731 unsigned int visit1
: 1;
1732 unsigned int visit2
: 1;
1733 unsigned int marking
: 1;
1734 unsigned int visit3
: 1;
1735 unsigned int visit4
: 1;
1736 unsigned int visit5
: 1;
1737 unsigned int visit6
: 1;
1738 unsigned int visit7
: 1;
1741 struct spu_elf_stack_info
1745 /* Variable size array describing functions, one per contiguous
1746 address range belonging to a function. */
1747 struct function_info fun
[1];
1750 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1751 entries for section SEC. */
1753 static struct spu_elf_stack_info
*
1754 alloc_stack_info (asection
*sec
, int max_fun
)
1756 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1759 amt
= sizeof (struct spu_elf_stack_info
);
1760 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1761 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1762 if (sec_data
->u
.i
.stack_info
!= NULL
)
1763 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1764 return sec_data
->u
.i
.stack_info
;
1767 /* Add a new struct function_info describing a (part of a) function
1768 starting at SYM_H. Keep the array sorted by address. */
1770 static struct function_info
*
1771 maybe_insert_function (asection
*sec
,
1774 bfd_boolean is_func
)
1776 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1777 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1783 sinfo
= alloc_stack_info (sec
, 20);
1790 Elf_Internal_Sym
*sym
= sym_h
;
1791 off
= sym
->st_value
;
1792 size
= sym
->st_size
;
1796 struct elf_link_hash_entry
*h
= sym_h
;
1797 off
= h
->root
.u
.def
.value
;
1801 for (i
= sinfo
->num_fun
; --i
>= 0; )
1802 if (sinfo
->fun
[i
].lo
<= off
)
1807 /* Don't add another entry for an alias, but do update some
1809 if (sinfo
->fun
[i
].lo
== off
)
1811 /* Prefer globals over local syms. */
1812 if (global
&& !sinfo
->fun
[i
].global
)
1814 sinfo
->fun
[i
].global
= TRUE
;
1815 sinfo
->fun
[i
].u
.h
= sym_h
;
1818 sinfo
->fun
[i
].is_func
= TRUE
;
1819 return &sinfo
->fun
[i
];
1821 /* Ignore a zero-size symbol inside an existing function. */
1822 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1823 return &sinfo
->fun
[i
];
1826 if (++i
< sinfo
->num_fun
)
1827 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1828 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1829 else if (i
>= sinfo
->max_fun
)
1831 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1832 bfd_size_type old
= amt
;
1834 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1835 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1836 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1837 sinfo
= bfd_realloc (sinfo
, amt
);
1840 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1841 sec_data
->u
.i
.stack_info
= sinfo
;
1843 sinfo
->fun
[i
].is_func
= is_func
;
1844 sinfo
->fun
[i
].global
= global
;
1845 sinfo
->fun
[i
].sec
= sec
;
1847 sinfo
->fun
[i
].u
.h
= sym_h
;
1849 sinfo
->fun
[i
].u
.sym
= sym_h
;
1850 sinfo
->fun
[i
].lo
= off
;
1851 sinfo
->fun
[i
].hi
= off
+ size
;
1852 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1853 sinfo
->num_fun
+= 1;
1854 return &sinfo
->fun
[i
];
1857 /* Return the name of FUN. */
1860 func_name (struct function_info
*fun
)
1864 Elf_Internal_Shdr
*symtab_hdr
;
1866 while (fun
->start
!= NULL
)
1870 return fun
->u
.h
->root
.root
.string
;
1873 if (fun
->u
.sym
->st_name
== 0)
1875 size_t len
= strlen (sec
->name
);
1876 char *name
= bfd_malloc (len
+ 10);
1879 sprintf (name
, "%s+%lx", sec
->name
,
1880 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1884 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1885 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1888 /* Read the instruction at OFF in SEC. Return true iff the instruction
1889 is a nop, lnop, or stop 0 (all zero insn). */
1892 is_nop (asection
*sec
, bfd_vma off
)
1894 unsigned char insn
[4];
1896 if (off
+ 4 > sec
->size
1897 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1899 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1901 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1906 /* Extend the range of FUN to cover nop padding up to LIMIT.
1907 Return TRUE iff some instruction other than a NOP was found. */
1910 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1912 bfd_vma off
= (fun
->hi
+ 3) & -4;
1914 while (off
< limit
&& is_nop (fun
->sec
, off
))
1925 /* Check and fix overlapping function ranges. Return TRUE iff there
1926 are gaps in the current info we have about functions in SEC. */
1929 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1931 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1932 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1934 bfd_boolean gaps
= FALSE
;
1939 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1940 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1942 /* Fix overlapping symbols. */
1943 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1944 const char *f2
= func_name (&sinfo
->fun
[i
]);
1946 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1947 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1949 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1952 if (sinfo
->num_fun
== 0)
1956 if (sinfo
->fun
[0].lo
!= 0)
1958 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1960 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1962 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1963 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1965 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1971 /* Search current function info for a function that contains address
1972 OFFSET in section SEC. */
1974 static struct function_info
*
1975 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1977 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1978 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1982 hi
= sinfo
->num_fun
;
1985 mid
= (lo
+ hi
) / 2;
1986 if (offset
< sinfo
->fun
[mid
].lo
)
1988 else if (offset
>= sinfo
->fun
[mid
].hi
)
1991 return &sinfo
->fun
[mid
];
1993 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1998 /* Add CALLEE to CALLER call list if not already present. Return TRUE
1999 if CALLEE was new. If this function return FALSE, CALLEE should
2003 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2005 struct call_info
**pp
, *p
;
2007 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2008 if (p
->fun
== callee
->fun
)
2010 /* Tail calls use less stack than normal calls. Retain entry
2011 for normal call over one for tail call. */
2012 p
->is_tail
&= callee
->is_tail
;
2015 p
->fun
->start
= NULL
;
2016 p
->fun
->is_func
= TRUE
;
2019 /* Reorder list so most recent call is first. */
2021 p
->next
= caller
->call_list
;
2022 caller
->call_list
= p
;
2025 callee
->next
= caller
->call_list
;
2027 caller
->call_list
= callee
;
2031 /* Copy CALL and insert the copy into CALLER. */
2034 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2036 struct call_info
*callee
;
2037 callee
= bfd_malloc (sizeof (*callee
));
2041 if (!insert_callee (caller
, callee
))
2046 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2047 overlay stub sections. */
2050 interesting_section (asection
*s
, bfd
*obfd
)
2052 return (s
->output_section
!= NULL
2053 && s
->output_section
->owner
== obfd
2054 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2055 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2059 /* Rummage through the relocs for SEC, looking for function calls.
2060 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2061 mark destination symbols on calls as being functions. Also
2062 look at branches, which may be tail calls or go to hot/cold
2063 section part of same function. */
2066 mark_functions_via_relocs (asection
*sec
,
2067 struct bfd_link_info
*info
,
2070 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2071 Elf_Internal_Shdr
*symtab_hdr
;
2072 Elf_Internal_Sym
*syms
;
2074 static bfd_boolean warned
;
2076 if (!interesting_section (sec
, info
->output_bfd
)
2077 || sec
->reloc_count
== 0)
2080 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2082 if (internal_relocs
== NULL
)
2085 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2086 psyms
= &symtab_hdr
->contents
;
2087 syms
= *(Elf_Internal_Sym
**) psyms
;
2088 irela
= internal_relocs
;
2089 irelaend
= irela
+ sec
->reloc_count
;
2090 for (; irela
< irelaend
; irela
++)
2092 enum elf_spu_reloc_type r_type
;
2093 unsigned int r_indx
;
2095 Elf_Internal_Sym
*sym
;
2096 struct elf_link_hash_entry
*h
;
2098 bfd_boolean reject
, is_call
;
2099 struct function_info
*caller
;
2100 struct call_info
*callee
;
2103 r_type
= ELF32_R_TYPE (irela
->r_info
);
2104 if (r_type
!= R_SPU_REL16
2105 && r_type
!= R_SPU_ADDR16
)
2108 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
))
2112 r_indx
= ELF32_R_SYM (irela
->r_info
);
2113 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2117 || sym_sec
->output_section
== NULL
2118 || sym_sec
->output_section
->owner
!= info
->output_bfd
)
2124 unsigned char insn
[4];
2126 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2127 irela
->r_offset
, 4))
2129 if (is_branch (insn
))
2131 is_call
= (insn
[0] & 0xfd) == 0x31;
2132 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2133 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2136 info
->callbacks
->einfo
2137 (_("%B(%A+0x%v): call to non-code section"
2138 " %B(%A), analysis incomplete\n"),
2139 sec
->owner
, sec
, irela
->r_offset
,
2140 sym_sec
->owner
, sym_sec
);
2148 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
)
2156 /* For --auto-overlay, count possible stubs we need for
2157 function pointer references. */
2158 unsigned int sym_type
;
2162 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2163 if (sym_type
== STT_FUNC
)
2164 spu_hash_table (info
)->non_ovly_stub
+= 1;
2169 val
= h
->root
.u
.def
.value
;
2171 val
= sym
->st_value
;
2172 val
+= irela
->r_addend
;
2176 struct function_info
*fun
;
2178 if (irela
->r_addend
!= 0)
2180 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2183 fake
->st_value
= val
;
2185 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2189 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2191 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2194 if (irela
->r_addend
!= 0
2195 && fun
->u
.sym
!= sym
)
2200 caller
= find_function (sec
, irela
->r_offset
, info
);
2203 callee
= bfd_malloc (sizeof *callee
);
2207 callee
->fun
= find_function (sym_sec
, val
, info
);
2208 if (callee
->fun
== NULL
)
2210 callee
->is_tail
= !is_call
;
2211 callee
->is_pasted
= FALSE
;
2213 if (callee
->fun
->last_caller
!= sec
)
2215 callee
->fun
->last_caller
= sec
;
2216 callee
->fun
->call_count
+= 1;
2218 if (!insert_callee (caller
, callee
))
2221 && !callee
->fun
->is_func
2222 && callee
->fun
->stack
== 0)
2224 /* This is either a tail call or a branch from one part of
2225 the function to another, ie. hot/cold section. If the
2226 destination has been called by some other function then
2227 it is a separate function. We also assume that functions
2228 are not split across input files. */
2229 if (sec
->owner
!= sym_sec
->owner
)
2231 callee
->fun
->start
= NULL
;
2232 callee
->fun
->is_func
= TRUE
;
2234 else if (callee
->fun
->start
== NULL
)
2235 callee
->fun
->start
= caller
;
2238 struct function_info
*callee_start
;
2239 struct function_info
*caller_start
;
2240 callee_start
= callee
->fun
;
2241 while (callee_start
->start
)
2242 callee_start
= callee_start
->start
;
2243 caller_start
= caller
;
2244 while (caller_start
->start
)
2245 caller_start
= caller_start
->start
;
2246 if (caller_start
!= callee_start
)
2248 callee
->fun
->start
= NULL
;
2249 callee
->fun
->is_func
= TRUE
;
2258 /* Handle something like .init or .fini, which has a piece of a function.
2259 These sections are pasted together to form a single function. */
2262 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2264 struct bfd_link_order
*l
;
2265 struct _spu_elf_section_data
*sec_data
;
2266 struct spu_elf_stack_info
*sinfo
;
2267 Elf_Internal_Sym
*fake
;
2268 struct function_info
*fun
, *fun_start
;
2270 fake
= bfd_zmalloc (sizeof (*fake
));
2274 fake
->st_size
= sec
->size
;
2276 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2277 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2281 /* Find a function immediately preceding this section. */
2283 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2285 if (l
->u
.indirect
.section
== sec
)
2287 if (fun_start
!= NULL
)
2289 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2293 fun
->start
= fun_start
;
2295 callee
->is_tail
= TRUE
;
2296 callee
->is_pasted
= TRUE
;
2298 if (!insert_callee (fun_start
, callee
))
2304 if (l
->type
== bfd_indirect_link_order
2305 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2306 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2307 && sinfo
->num_fun
!= 0)
2308 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2311 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2315 /* Map address ranges in code sections to functions. */
2318 discover_functions (struct bfd_link_info
*info
)
2322 Elf_Internal_Sym
***psym_arr
;
2323 asection
***sec_arr
;
2324 bfd_boolean gaps
= FALSE
;
2327 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2330 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2331 if (psym_arr
== NULL
)
2333 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2334 if (sec_arr
== NULL
)
2338 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2340 ibfd
= ibfd
->link_next
, bfd_idx
++)
2342 extern const bfd_target bfd_elf32_spu_vec
;
2343 Elf_Internal_Shdr
*symtab_hdr
;
2346 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2347 asection
**psecs
, **p
;
2349 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2352 /* Read all the symbols. */
2353 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2354 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2358 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2359 if (interesting_section (sec
, info
->output_bfd
))
2367 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2370 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2372 symtab_hdr
->contents
= (void *) syms
;
2377 /* Select defined function symbols that are going to be output. */
2378 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2381 psym_arr
[bfd_idx
] = psyms
;
2382 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2385 sec_arr
[bfd_idx
] = psecs
;
2386 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2387 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2388 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2392 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2393 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2396 symcount
= psy
- psyms
;
2399 /* Sort them by section and offset within section. */
2400 sort_syms_syms
= syms
;
2401 sort_syms_psecs
= psecs
;
2402 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2404 /* Now inspect the function symbols. */
2405 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2407 asection
*s
= psecs
[*psy
- syms
];
2408 Elf_Internal_Sym
**psy2
;
2410 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2411 if (psecs
[*psy2
- syms
] != s
)
2414 if (!alloc_stack_info (s
, psy2
- psy
))
2419 /* First install info about properly typed and sized functions.
2420 In an ideal world this will cover all code sections, except
2421 when partitioning functions into hot and cold sections,
2422 and the horrible pasted together .init and .fini functions. */
2423 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2426 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2428 asection
*s
= psecs
[sy
- syms
];
2429 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2434 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2435 if (interesting_section (sec
, info
->output_bfd
))
2436 gaps
|= check_function_ranges (sec
, info
);
2441 /* See if we can discover more function symbols by looking at
2443 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2445 ibfd
= ibfd
->link_next
, bfd_idx
++)
2449 if (psym_arr
[bfd_idx
] == NULL
)
2452 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2453 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2457 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2459 ibfd
= ibfd
->link_next
, bfd_idx
++)
2461 Elf_Internal_Shdr
*symtab_hdr
;
2463 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2466 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2469 psecs
= sec_arr
[bfd_idx
];
2471 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2472 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2475 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2476 if (interesting_section (sec
, info
->output_bfd
))
2477 gaps
|= check_function_ranges (sec
, info
);
2481 /* Finally, install all globals. */
2482 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2486 s
= psecs
[sy
- syms
];
2488 /* Global syms might be improperly typed functions. */
2489 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2490 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2492 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2498 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2500 extern const bfd_target bfd_elf32_spu_vec
;
2503 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2506 /* Some of the symbols we've installed as marking the
2507 beginning of functions may have a size of zero. Extend
2508 the range of such functions to the beginning of the
2509 next symbol of interest. */
2510 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2511 if (interesting_section (sec
, info
->output_bfd
))
2513 struct _spu_elf_section_data
*sec_data
;
2514 struct spu_elf_stack_info
*sinfo
;
2516 sec_data
= spu_elf_section_data (sec
);
2517 sinfo
= sec_data
->u
.i
.stack_info
;
2521 bfd_vma hi
= sec
->size
;
2523 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2525 sinfo
->fun
[fun_idx
].hi
= hi
;
2526 hi
= sinfo
->fun
[fun_idx
].lo
;
2529 /* No symbols in this section. Must be .init or .fini
2530 or something similar. */
2531 else if (!pasted_function (sec
, info
))
2537 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2539 ibfd
= ibfd
->link_next
, bfd_idx
++)
2541 if (psym_arr
[bfd_idx
] == NULL
)
2544 free (psym_arr
[bfd_idx
]);
2545 free (sec_arr
[bfd_idx
]);
2554 /* Iterate over all function_info we have collected, calling DOIT on
2555 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2559 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2560 struct bfd_link_info
*,
2562 struct bfd_link_info
*info
,
2568 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2570 extern const bfd_target bfd_elf32_spu_vec
;
2573 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2576 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2578 struct _spu_elf_section_data
*sec_data
;
2579 struct spu_elf_stack_info
*sinfo
;
2581 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2582 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2585 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2586 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2587 if (!doit (&sinfo
->fun
[i
], info
, param
))
2595 /* Transfer call info attached to struct function_info entries for
2596 all of a given function's sections to the first entry. */
2599 transfer_calls (struct function_info
*fun
,
2600 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2601 void *param ATTRIBUTE_UNUSED
)
2603 struct function_info
*start
= fun
->start
;
2607 struct call_info
*call
, *call_next
;
2609 while (start
->start
!= NULL
)
2610 start
= start
->start
;
2611 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2613 call_next
= call
->next
;
2614 if (!insert_callee (start
, call
))
2617 fun
->call_list
= NULL
;
2622 /* Mark nodes in the call graph that are called by some other node. */
2625 mark_non_root (struct function_info
*fun
,
2626 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2627 void *param ATTRIBUTE_UNUSED
)
2629 struct call_info
*call
;
2634 for (call
= fun
->call_list
; call
; call
= call
->next
)
2636 call
->fun
->non_root
= TRUE
;
2637 mark_non_root (call
->fun
, 0, 0);
2642 /* Remove cycles from the call graph. Set depth of nodes. */
2645 remove_cycles (struct function_info
*fun
,
2646 struct bfd_link_info
*info
,
2649 struct call_info
**callp
, *call
;
2650 unsigned int depth
= *(unsigned int *) param
;
2651 unsigned int max_depth
= depth
;
2655 fun
->marking
= TRUE
;
2657 callp
= &fun
->call_list
;
2658 while ((call
= *callp
) != NULL
)
2660 if (!call
->fun
->visit2
)
2662 call
->max_depth
= depth
+ !call
->is_pasted
;
2663 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2665 if (max_depth
< call
->max_depth
)
2666 max_depth
= call
->max_depth
;
2668 else if (call
->fun
->marking
)
2670 if (!spu_hash_table (info
)->auto_overlay
)
2672 const char *f1
= func_name (fun
);
2673 const char *f2
= func_name (call
->fun
);
2675 info
->callbacks
->info (_("Stack analysis will ignore the call "
2679 *callp
= call
->next
;
2683 callp
= &call
->next
;
2685 fun
->marking
= FALSE
;
2686 *(unsigned int *) param
= max_depth
;
2690 /* Populate call_list for each function. */
2693 build_call_tree (struct bfd_link_info
*info
)
2698 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2700 extern const bfd_target bfd_elf32_spu_vec
;
2703 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2706 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2707 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2711 /* Transfer call info from hot/cold section part of function
2713 if (!spu_hash_table (info
)->auto_overlay
2714 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2717 /* Find the call graph root(s). */
2718 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2721 /* Remove cycles from the call graph. We start from the root node(s)
2722 so that we break cycles in a reasonable place. */
2724 return for_each_node (remove_cycles
, info
, &depth
, TRUE
);
2727 /* qsort predicate to sort calls by max_depth then count. */
2730 sort_calls (const void *a
, const void *b
)
2732 struct call_info
*const *c1
= a
;
2733 struct call_info
*const *c2
= b
;
2736 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2740 delta
= (*c2
)->count
- (*c1
)->count
;
2748 unsigned int max_overlay_size
;
2751 /* Set linker_mark and gc_mark on any sections that we will put in
2752 overlays. These flags are used by the generic ELF linker, but we
2753 won't be continuing on to bfd_elf_final_link so it is OK to use
2754 them. linker_mark is clear before we get here. Set segment_mark
2755 on sections that are part of a pasted function (excluding the last
2758 Set up function rodata section if --overlay-rodata. We don't
2759 currently include merged string constant rodata sections since
2761 Sort the call graph so that the deepest nodes will be visited
2765 mark_overlay_section (struct function_info
*fun
,
2766 struct bfd_link_info
*info
,
2769 struct call_info
*call
;
2771 struct _mos_param
*mos_param
= param
;
2777 if (!fun
->sec
->linker_mark
)
2779 fun
->sec
->linker_mark
= 1;
2780 fun
->sec
->gc_mark
= 1;
2781 fun
->sec
->segment_mark
= 0;
2782 /* Ensure SEC_CODE is set on this text section (it ought to
2783 be!), and SEC_CODE is clear on rodata sections. We use
2784 this flag to differentiate the two overlay section types. */
2785 fun
->sec
->flags
|= SEC_CODE
;
2786 if (spu_hash_table (info
)->auto_overlay
& OVERLAY_RODATA
)
2791 /* Find the rodata section corresponding to this function's
2793 if (strcmp (fun
->sec
->name
, ".text") == 0)
2795 name
= bfd_malloc (sizeof (".rodata"));
2798 memcpy (name
, ".rodata", sizeof (".rodata"));
2800 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2802 size_t len
= strlen (fun
->sec
->name
);
2803 name
= bfd_malloc (len
+ 3);
2806 memcpy (name
, ".rodata", sizeof (".rodata"));
2807 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2809 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2811 size_t len
= strlen (fun
->sec
->name
) + 1;
2812 name
= bfd_malloc (len
);
2815 memcpy (name
, fun
->sec
->name
, len
);
2821 asection
*rodata
= NULL
;
2822 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2823 if (group_sec
== NULL
)
2824 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2826 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2828 if (strcmp (group_sec
->name
, name
) == 0)
2833 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2835 fun
->rodata
= rodata
;
2838 fun
->rodata
->linker_mark
= 1;
2839 fun
->rodata
->gc_mark
= 1;
2840 fun
->rodata
->flags
&= ~SEC_CODE
;
2844 size
= fun
->sec
->size
;
2846 size
+= fun
->rodata
->size
;
2847 if (mos_param
->max_overlay_size
< size
)
2848 mos_param
->max_overlay_size
= size
;
2852 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2857 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2861 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2862 calls
[count
++] = call
;
2864 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2866 fun
->call_list
= NULL
;
2870 calls
[count
]->next
= fun
->call_list
;
2871 fun
->call_list
= calls
[count
];
2876 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2878 if (call
->is_pasted
)
2880 /* There can only be one is_pasted call per function_info. */
2881 BFD_ASSERT (!fun
->sec
->segment_mark
);
2882 fun
->sec
->segment_mark
= 1;
2884 if (!mark_overlay_section (call
->fun
, info
, param
))
2888 /* Don't put entry code into an overlay. The overlay manager needs
2890 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2891 == info
->output_bfd
->start_address
)
2893 fun
->sec
->linker_mark
= 0;
2894 if (fun
->rodata
!= NULL
)
2895 fun
->rodata
->linker_mark
= 0;
2901 asection
*exclude_input_section
;
2902 asection
*exclude_output_section
;
2903 unsigned long clearing
;
2906 /* Undo some of mark_overlay_section's work. */
2909 unmark_overlay_section (struct function_info
*fun
,
2910 struct bfd_link_info
*info
,
2913 struct call_info
*call
;
2914 struct _uos_param
*uos_param
= param
;
2915 unsigned int excluded
= 0;
2923 if (fun
->sec
== uos_param
->exclude_input_section
2924 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
2927 uos_param
->clearing
+= excluded
;
2929 if (uos_param
->clearing
)
2931 fun
->sec
->linker_mark
= 0;
2933 fun
->rodata
->linker_mark
= 0;
2936 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2937 if (!unmark_overlay_section (call
->fun
, info
, param
))
2940 uos_param
->clearing
-= excluded
;
2945 unsigned int lib_size
;
2946 asection
**lib_sections
;
2949 /* Add sections we have marked as belonging to overlays to an array
2950 for consideration as non-overlay sections. The array consist of
2951 pairs of sections, (text,rodata), for functions in the call graph. */
2954 collect_lib_sections (struct function_info
*fun
,
2955 struct bfd_link_info
*info
,
2958 struct _cl_param
*lib_param
= param
;
2959 struct call_info
*call
;
2966 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
2969 size
= fun
->sec
->size
;
2971 size
+= fun
->rodata
->size
;
2972 if (size
> lib_param
->lib_size
)
2975 *lib_param
->lib_sections
++ = fun
->sec
;
2976 fun
->sec
->gc_mark
= 0;
2977 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
2979 *lib_param
->lib_sections
++ = fun
->rodata
;
2980 fun
->rodata
->gc_mark
= 0;
2983 *lib_param
->lib_sections
++ = NULL
;
2985 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2986 collect_lib_sections (call
->fun
, info
, param
);
2991 /* qsort predicate to sort sections by call count. */
2994 sort_lib (const void *a
, const void *b
)
2996 asection
*const *s1
= a
;
2997 asection
*const *s2
= b
;
2998 struct _spu_elf_section_data
*sec_data
;
2999 struct spu_elf_stack_info
*sinfo
;
3003 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3004 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3007 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3008 delta
-= sinfo
->fun
[i
].call_count
;
3011 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3012 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3015 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3016 delta
+= sinfo
->fun
[i
].call_count
;
3025 /* Remove some sections from those marked to be in overlays. Choose
3026 those that are called from many places, likely library functions. */
3029 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3032 asection
**lib_sections
;
3033 unsigned int i
, lib_count
;
3034 struct _cl_param collect_lib_param
;
3035 struct function_info dummy_caller
;
3037 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3039 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3041 extern const bfd_target bfd_elf32_spu_vec
;
3044 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3047 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3048 if (sec
->linker_mark
3049 && sec
->size
< lib_size
3050 && (sec
->flags
& SEC_CODE
) != 0)
3053 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3054 if (lib_sections
== NULL
)
3055 return (unsigned int) -1;
3056 collect_lib_param
.lib_size
= lib_size
;
3057 collect_lib_param
.lib_sections
= lib_sections
;
3058 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3060 return (unsigned int) -1;
3061 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3063 /* Sort sections so that those with the most calls are first. */
3065 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3067 for (i
= 0; i
< lib_count
; i
++)
3069 unsigned int tmp
, stub_size
;
3071 struct _spu_elf_section_data
*sec_data
;
3072 struct spu_elf_stack_info
*sinfo
;
3074 sec
= lib_sections
[2 * i
];
3075 /* If this section is OK, its size must be less than lib_size. */
3077 /* If it has a rodata section, then add that too. */
3078 if (lib_sections
[2 * i
+ 1])
3079 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3080 /* Add any new overlay call stubs needed by the section. */
3083 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3084 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3087 struct call_info
*call
;
3089 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3090 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3091 if (call
->fun
->sec
->linker_mark
)
3093 struct call_info
*p
;
3094 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3095 if (p
->fun
== call
->fun
)
3098 stub_size
+= OVL_STUB_SIZE
;
3101 if (tmp
+ stub_size
< lib_size
)
3103 struct call_info
**pp
, *p
;
3105 /* This section fits. Mark it as non-overlay. */
3106 lib_sections
[2 * i
]->linker_mark
= 0;
3107 if (lib_sections
[2 * i
+ 1])
3108 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3109 lib_size
-= tmp
+ stub_size
;
3110 /* Call stubs to the section we just added are no longer
3112 pp
= &dummy_caller
.call_list
;
3113 while ((p
= *pp
) != NULL
)
3114 if (!p
->fun
->sec
->linker_mark
)
3116 lib_size
+= OVL_STUB_SIZE
;
3122 /* Add new call stubs to dummy_caller. */
3123 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3124 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3127 struct call_info
*call
;
3129 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3130 for (call
= sinfo
->fun
[k
].call_list
;
3133 if (call
->fun
->sec
->linker_mark
)
3135 struct call_info
*callee
;
3136 callee
= bfd_malloc (sizeof (*callee
));
3138 return (unsigned int) -1;
3140 if (!insert_callee (&dummy_caller
, callee
))
3146 while (dummy_caller
.call_list
!= NULL
)
3148 struct call_info
*call
= dummy_caller
.call_list
;
3149 dummy_caller
.call_list
= call
->next
;
3152 for (i
= 0; i
< 2 * lib_count
; i
++)
3153 if (lib_sections
[i
])
3154 lib_sections
[i
]->gc_mark
= 1;
3155 free (lib_sections
);
3159 /* Build an array of overlay sections. The deepest node's section is
3160 added first, then its parent node's section, then everything called
3161 from the parent section. The idea being to group sections to
3162 minimise calls between different overlays. */
3165 collect_overlays (struct function_info
*fun
,
3166 struct bfd_link_info
*info
,
3169 struct call_info
*call
;
3170 bfd_boolean added_fun
;
3171 asection
***ovly_sections
= param
;
3177 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3178 if (!call
->is_pasted
)
3180 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3186 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3188 fun
->sec
->gc_mark
= 0;
3189 *(*ovly_sections
)++ = fun
->sec
;
3190 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3192 fun
->rodata
->gc_mark
= 0;
3193 *(*ovly_sections
)++ = fun
->rodata
;
3196 *(*ovly_sections
)++ = NULL
;
3199 /* Pasted sections must stay with the first section. We don't
3200 put pasted sections in the array, just the first section.
3201 Mark subsequent sections as already considered. */
3202 if (fun
->sec
->segment_mark
)
3204 struct function_info
*call_fun
= fun
;
3207 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3208 if (call
->is_pasted
)
3210 call_fun
= call
->fun
;
3211 call_fun
->sec
->gc_mark
= 0;
3212 if (call_fun
->rodata
)
3213 call_fun
->rodata
->gc_mark
= 0;
3219 while (call_fun
->sec
->segment_mark
);
3223 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3224 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3229 struct _spu_elf_section_data
*sec_data
;
3230 struct spu_elf_stack_info
*sinfo
;
3232 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3233 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3236 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3237 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3245 struct _sum_stack_param
{
3247 size_t overall_stack
;
3248 bfd_boolean emit_stack_syms
;
3251 /* Descend the call graph for FUN, accumulating total stack required. */
3254 sum_stack (struct function_info
*fun
,
3255 struct bfd_link_info
*info
,
3258 struct call_info
*call
;
3259 struct function_info
*max
;
3260 size_t stack
, cum_stack
;
3262 bfd_boolean has_call
;
3263 struct _sum_stack_param
*sum_stack_param
= param
;
3264 struct spu_link_hash_table
*htab
;
3266 cum_stack
= fun
->stack
;
3267 sum_stack_param
->cum_stack
= cum_stack
;
3273 for (call
= fun
->call_list
; call
; call
= call
->next
)
3275 if (!call
->is_pasted
)
3277 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3279 stack
= sum_stack_param
->cum_stack
;
3280 /* Include caller stack for normal calls, don't do so for
3281 tail calls. fun->stack here is local stack usage for
3283 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3284 stack
+= fun
->stack
;
3285 if (cum_stack
< stack
)
3292 sum_stack_param
->cum_stack
= cum_stack
;
3294 /* Now fun->stack holds cumulative stack. */
3295 fun
->stack
= cum_stack
;
3299 && sum_stack_param
->overall_stack
< cum_stack
)
3300 sum_stack_param
->overall_stack
= cum_stack
;
3302 htab
= spu_hash_table (info
);
3303 if (htab
->auto_overlay
)
3306 f1
= func_name (fun
);
3308 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3309 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3310 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3314 info
->callbacks
->minfo (_(" calls:\n"));
3315 for (call
= fun
->call_list
; call
; call
= call
->next
)
3316 if (!call
->is_pasted
)
3318 const char *f2
= func_name (call
->fun
);
3319 const char *ann1
= call
->fun
== max
? "*" : " ";
3320 const char *ann2
= call
->is_tail
? "t" : " ";
3322 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3326 if (sum_stack_param
->emit_stack_syms
)
3328 char *name
= bfd_malloc (18 + strlen (f1
));
3329 struct elf_link_hash_entry
*h
;
3334 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3335 sprintf (name
, "__stack_%s", f1
);
3337 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3339 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3342 && (h
->root
.type
== bfd_link_hash_new
3343 || h
->root
.type
== bfd_link_hash_undefined
3344 || h
->root
.type
== bfd_link_hash_undefweak
))
3346 h
->root
.type
= bfd_link_hash_defined
;
3347 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3348 h
->root
.u
.def
.value
= cum_stack
;
3353 h
->ref_regular_nonweak
= 1;
3354 h
->forced_local
= 1;
3362 /* SEC is part of a pasted function. Return the call_info for the
3363 next section of this function. */
3365 static struct call_info
*
3366 find_pasted_call (asection
*sec
)
3368 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3369 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3370 struct call_info
*call
;
3373 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3374 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3375 if (call
->is_pasted
)
3381 /* qsort predicate to sort bfds by file name. */
3384 sort_bfds (const void *a
, const void *b
)
3386 bfd
*const *abfd1
= a
;
3387 bfd
*const *abfd2
= b
;
3389 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3392 /* Handle --auto-overlay. */
3394 static void spu_elf_auto_overlay (struct bfd_link_info
*, void (*) (void))
3398 spu_elf_auto_overlay (struct bfd_link_info
*info
,
3399 void (*spu_elf_load_ovl_mgr
) (void))
3403 struct elf_segment_map
*m
;
3404 unsigned int fixed_size
, lo
, hi
;
3405 struct spu_link_hash_table
*htab
;
3406 unsigned int base
, i
, count
, bfd_count
;
3408 asection
**ovly_sections
, **ovly_p
;
3410 unsigned int total_overlay_size
, overlay_size
;
3411 struct elf_link_hash_entry
*h
;
3412 struct _mos_param mos_param
;
3413 struct _uos_param uos_param
;
3414 struct function_info dummy_caller
;
3416 /* Find the extents of our loadable image. */
3417 lo
= (unsigned int) -1;
3419 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3420 if (m
->p_type
== PT_LOAD
)
3421 for (i
= 0; i
< m
->count
; i
++)
3422 if (m
->sections
[i
]->size
!= 0)
3424 if (m
->sections
[i
]->vma
< lo
)
3425 lo
= m
->sections
[i
]->vma
;
3426 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3427 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3429 fixed_size
= hi
+ 1 - lo
;
3431 if (!discover_functions (info
))
3434 if (!build_call_tree (info
))
3437 uos_param
.exclude_input_section
= 0;
3438 uos_param
.exclude_output_section
3439 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3441 htab
= spu_hash_table (info
);
3442 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3443 FALSE
, FALSE
, FALSE
);
3445 && (h
->root
.type
== bfd_link_hash_defined
3446 || h
->root
.type
== bfd_link_hash_defweak
)
3449 /* We have a user supplied overlay manager. */
3450 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3454 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3455 builtin version to .text, and will adjust .text size. */
3456 asection
*text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3458 fixed_size
-= text
->size
;
3459 spu_elf_load_ovl_mgr ();
3460 text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3462 fixed_size
+= text
->size
;
3465 /* Mark overlay sections, and find max overlay section size. */
3466 mos_param
.max_overlay_size
= 0;
3467 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3470 /* We can't put the overlay manager or interrupt routines in
3472 uos_param
.clearing
= 0;
3473 if ((uos_param
.exclude_input_section
3474 || uos_param
.exclude_output_section
)
3475 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3479 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3481 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3482 if (bfd_arr
== NULL
)
3485 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3488 total_overlay_size
= 0;
3489 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3491 extern const bfd_target bfd_elf32_spu_vec
;
3493 unsigned int old_count
;
3495 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3499 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3500 if (sec
->linker_mark
)
3502 if ((sec
->flags
& SEC_CODE
) != 0)
3504 fixed_size
-= sec
->size
;
3505 total_overlay_size
+= sec
->size
;
3507 if (count
!= old_count
)
3508 bfd_arr
[bfd_count
++] = ibfd
;
3511 /* Since the overlay link script selects sections by file name and
3512 section name, ensure that file names are unique. */
3515 bfd_boolean ok
= TRUE
;
3517 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3518 for (i
= 1; i
< bfd_count
; ++i
)
3519 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3521 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3523 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3524 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3525 bfd_arr
[i
- 1]->filename
,
3526 bfd_arr
[i
- 1]->my_archive
->filename
);
3528 info
->callbacks
->einfo (_("%s in both %s and %s\n"),
3529 bfd_arr
[i
- 1]->filename
,
3530 bfd_arr
[i
- 1]->my_archive
->filename
,
3531 bfd_arr
[i
]->my_archive
->filename
);
3533 else if (bfd_arr
[i
- 1]->my_archive
)
3534 info
->callbacks
->einfo (_("%s in %s and as an object\n"),
3535 bfd_arr
[i
- 1]->filename
,
3536 bfd_arr
[i
- 1]->my_archive
->filename
);
3537 else if (bfd_arr
[i
]->my_archive
)
3538 info
->callbacks
->einfo (_("%s in %s and as an object\n"),
3539 bfd_arr
[i
]->filename
,
3540 bfd_arr
[i
]->my_archive
->filename
);
3542 info
->callbacks
->einfo (_("%s duplicated\n"),
3543 bfd_arr
[i
]->filename
);
3548 /* FIXME: modify plain object files from foo.o to ./foo.o
3549 and emit EXCLUDE_FILE to handle the duplicates in
3550 archives. There is a pathological case we can't handle:
3551 We may have duplicate file names within a single archive. */
3552 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3553 "object files in auto-overlay script\n"));
3554 bfd_set_error (bfd_error_bad_value
);
3560 if (htab
->reserved
== 0)
3562 struct _sum_stack_param sum_stack_param
;
3564 sum_stack_param
.emit_stack_syms
= 0;
3565 sum_stack_param
.overall_stack
= 0;
3566 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3568 htab
->reserved
= sum_stack_param
.overall_stack
;
3570 fixed_size
+= htab
->reserved
;
3571 fixed_size
+= htab
->non_ovly_stub
* OVL_STUB_SIZE
;
3572 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3574 /* Guess number of overlays. Assuming overlay buffer is on
3575 average only half full should be conservative. */
3576 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3577 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3578 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3581 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3582 info
->callbacks
->einfo (_("non-overlay plus maximum overlay size "
3583 "of 0x%x exceeds local store\n"),
3584 fixed_size
+ mos_param
.max_overlay_size
);
3586 /* Now see if we should put some functions in the non-overlay area. */
3587 if (fixed_size
< htab
->overlay_fixed
3588 && htab
->overlay_fixed
+ mos_param
.max_overlay_size
< htab
->local_store
)
3590 unsigned int lib_size
= htab
->overlay_fixed
- fixed_size
;
3591 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3592 if (lib_size
== (unsigned int) -1)
3594 fixed_size
= htab
->overlay_fixed
- lib_size
;
3597 /* Build an array of sections, suitably sorted to place into
3599 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3600 if (ovly_sections
== NULL
)
3602 ovly_p
= ovly_sections
;
3603 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3605 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3607 script
= htab
->spu_elf_open_overlay_script ();
3609 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3612 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3613 overlay_size
= htab
->local_store
- fixed_size
;
3616 while (base
< count
)
3618 unsigned int size
= 0;
3621 for (i
= base
; i
< count
; i
++)
3625 unsigned int stub_size
;
3626 struct call_info
*call
, *pasty
;
3627 struct _spu_elf_section_data
*sec_data
;
3628 struct spu_elf_stack_info
*sinfo
;
3631 /* See whether we can add this section to the current
3632 overlay without overflowing our overlay buffer. */
3633 sec
= ovly_sections
[2 * i
];
3634 tmp
= size
+ sec
->size
;
3635 if (ovly_sections
[2 * i
+ 1])
3636 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3637 if (tmp
> overlay_size
)
3639 if (sec
->segment_mark
)
3641 /* Pasted sections must stay together, so add their
3643 struct call_info
*pasty
= find_pasted_call (sec
);
3644 while (pasty
!= NULL
)
3646 struct function_info
*call_fun
= pasty
->fun
;
3647 tmp
+= call_fun
->sec
->size
;
3648 if (call_fun
->rodata
)
3649 tmp
+= call_fun
->rodata
->size
;
3650 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3651 if (pasty
->is_pasted
)
3655 if (tmp
> overlay_size
)
3658 /* If we add this section, we might need new overlay call
3659 stubs. Add any overlay section calls to dummy_call. */
3661 sec_data
= spu_elf_section_data (sec
);
3662 sinfo
= sec_data
->u
.i
.stack_info
;
3663 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3664 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3665 if (call
->is_pasted
)
3667 BFD_ASSERT (pasty
== NULL
);
3670 else if (call
->fun
->sec
->linker_mark
)
3672 if (!copy_callee (&dummy_caller
, call
))
3675 while (pasty
!= NULL
)
3677 struct function_info
*call_fun
= pasty
->fun
;
3679 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3680 if (call
->is_pasted
)
3682 BFD_ASSERT (pasty
== NULL
);
3685 else if (!copy_callee (&dummy_caller
, call
))
3689 /* Calculate call stub size. */
3691 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3695 stub_size
+= OVL_STUB_SIZE
;
3696 /* If the call is within this overlay, we won't need a
3698 for (k
= base
; k
< i
+ 1; k
++)
3699 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3701 stub_size
-= OVL_STUB_SIZE
;
3705 if (tmp
+ stub_size
> overlay_size
)
3713 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3714 ovly_sections
[2 * i
]->owner
,
3715 ovly_sections
[2 * i
],
3716 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3717 bfd_set_error (bfd_error_bad_value
);
3721 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3723 for (j
= base
; j
< i
; j
++)
3725 asection
*sec
= ovly_sections
[2 * j
];
3727 if (fprintf (script
, " [%c]%s (%s)\n",
3728 sec
->owner
->filename
[0],
3729 sec
->owner
->filename
+ 1,
3732 if (sec
->segment_mark
)
3734 struct call_info
*call
= find_pasted_call (sec
);
3735 while (call
!= NULL
)
3737 struct function_info
*call_fun
= call
->fun
;
3738 sec
= call_fun
->sec
;
3739 if (fprintf (script
, " [%c]%s (%s)\n",
3740 sec
->owner
->filename
[0],
3741 sec
->owner
->filename
+ 1,
3744 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3745 if (call
->is_pasted
)
3751 for (j
= base
; j
< i
; j
++)
3753 asection
*sec
= ovly_sections
[2 * j
+ 1];
3754 if (sec
!= NULL
&& fprintf (script
, " [%c]%s (%s)\n",
3755 sec
->owner
->filename
[0],
3756 sec
->owner
->filename
+ 1,
3760 sec
= ovly_sections
[2 * j
];
3761 if (sec
->segment_mark
)
3763 struct call_info
*call
= find_pasted_call (sec
);
3764 while (call
!= NULL
)
3766 struct function_info
*call_fun
= call
->fun
;
3767 sec
= call_fun
->rodata
;
3768 if (sec
!= NULL
&& fprintf (script
, " [%c]%s (%s)\n",
3769 sec
->owner
->filename
[0],
3770 sec
->owner
->filename
+ 1,
3773 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3774 if (call
->is_pasted
)
3780 if (fprintf (script
, " }\n") <= 0)
3783 while (dummy_caller
.call_list
!= NULL
)
3785 struct call_info
*call
= dummy_caller
.call_list
;
3786 dummy_caller
.call_list
= call
->next
;
3792 free (ovly_sections
);
3794 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3796 if (fclose (script
) != 0)
3799 if (htab
->auto_overlay
& AUTO_RELINK
)
3800 htab
->spu_elf_relink ();
3805 bfd_set_error (bfd_error_system_call
);
3807 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3811 /* Provide an estimate of total stack required. */
3814 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
3816 struct _sum_stack_param sum_stack_param
;
3818 if (!discover_functions (info
))
3821 if (!build_call_tree (info
))
3824 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3825 info
->callbacks
->minfo (_("\nStack size for functions. "
3826 "Annotations: '*' max stack, 't' tail call\n"));
3828 sum_stack_param
.emit_stack_syms
= emit_stack_syms
;
3829 sum_stack_param
.overall_stack
= 0;
3830 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3833 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3834 (bfd_vma
) sum_stack_param
.overall_stack
);
3838 /* Perform a final link. */
3841 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3843 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3845 if (htab
->auto_overlay
)
3846 spu_elf_auto_overlay (info
, htab
->spu_elf_load_ovl_mgr
);
3848 if (htab
->stack_analysis
3849 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
3850 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3852 return bfd_elf_final_link (output_bfd
, info
);
3855 /* Called when not normally emitting relocs, ie. !info->relocatable
3856 and !info->emitrelocations. Returns a count of special relocs
3857 that need to be emitted. */
3860 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
3862 unsigned int count
= 0;
3863 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3865 for (; relocs
< relend
; relocs
++)
3867 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
3868 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3875 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3878 spu_elf_relocate_section (bfd
*output_bfd
,
3879 struct bfd_link_info
*info
,
3881 asection
*input_section
,
3883 Elf_Internal_Rela
*relocs
,
3884 Elf_Internal_Sym
*local_syms
,
3885 asection
**local_sections
)
3887 Elf_Internal_Shdr
*symtab_hdr
;
3888 struct elf_link_hash_entry
**sym_hashes
;
3889 Elf_Internal_Rela
*rel
, *relend
;
3890 struct spu_link_hash_table
*htab
;
3891 asection
*ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3893 bfd_boolean emit_these_relocs
= FALSE
;
3894 bfd_boolean is_ea_sym
;
3897 htab
= spu_hash_table (info
);
3898 stubs
= (htab
->stub_sec
!= NULL
3899 && maybe_needs_stubs (input_section
, output_bfd
));
3900 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3901 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3904 relend
= relocs
+ input_section
->reloc_count
;
3905 for (; rel
< relend
; rel
++)
3908 reloc_howto_type
*howto
;
3909 unsigned int r_symndx
;
3910 Elf_Internal_Sym
*sym
;
3912 struct elf_link_hash_entry
*h
;
3913 const char *sym_name
;
3916 bfd_reloc_status_type r
;
3917 bfd_boolean unresolved_reloc
;
3920 r_symndx
= ELF32_R_SYM (rel
->r_info
);
3921 r_type
= ELF32_R_TYPE (rel
->r_info
);
3922 howto
= elf_howto_table
+ r_type
;
3923 unresolved_reloc
= FALSE
;
3928 if (r_symndx
< symtab_hdr
->sh_info
)
3930 sym
= local_syms
+ r_symndx
;
3931 sec
= local_sections
[r_symndx
];
3932 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
3933 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
3937 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
3938 r_symndx
, symtab_hdr
, sym_hashes
,
3940 unresolved_reloc
, warned
);
3941 sym_name
= h
->root
.root
.string
;
3944 if (sec
!= NULL
&& elf_discarded_section (sec
))
3946 /* For relocs against symbols from removed linkonce sections,
3947 or sections discarded by a linker script, we just want the
3948 section contents zeroed. Avoid any special processing. */
3949 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
3955 if (info
->relocatable
)
3958 is_ea_sym
= (ea
!= NULL
3960 && sec
->output_section
== ea
);
3962 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3966 /* ._ea is a special section that isn't allocated in SPU
3967 memory, but rather occupies space in PPU memory as
3968 part of an embedded ELF image. If this reloc is
3969 against a symbol defined in ._ea, then transform the
3970 reloc into an equivalent one without a symbol
3971 relative to the start of the ELF image. */
3972 rel
->r_addend
+= (relocation
3974 + elf_section_data (ea
)->this_hdr
.sh_offset
);
3975 rel
->r_info
= ELF32_R_INFO (0, r_type
);
3977 emit_these_relocs
= TRUE
;
3982 unresolved_reloc
= TRUE
;
3984 if (unresolved_reloc
)
3986 (*_bfd_error_handler
)
3987 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
3989 bfd_get_section_name (input_bfd
, input_section
),
3990 (long) rel
->r_offset
,
3996 /* If this symbol is in an overlay area, we may need to relocate
3997 to the overlay stub. */
3998 addend
= rel
->r_addend
;
4001 enum _stub_type stub_type
;
4003 stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4005 if (stub_type
!= no_stub
)
4007 unsigned int ovl
= 0;
4008 struct got_entry
*g
, **head
;
4010 if (stub_type
!= nonovl_stub
)
4011 ovl
= (spu_elf_section_data (input_section
->output_section
)
4015 head
= &h
->got
.glist
;
4017 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4019 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4020 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4025 relocation
= g
->stub_addr
;
4030 r
= _bfd_final_link_relocate (howto
,
4034 rel
->r_offset
, relocation
, addend
);
4036 if (r
!= bfd_reloc_ok
)
4038 const char *msg
= (const char *) 0;
4042 case bfd_reloc_overflow
:
4043 if (!((*info
->callbacks
->reloc_overflow
)
4044 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4045 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4049 case bfd_reloc_undefined
:
4050 if (!((*info
->callbacks
->undefined_symbol
)
4051 (info
, sym_name
, input_bfd
, input_section
,
4052 rel
->r_offset
, TRUE
)))
4056 case bfd_reloc_outofrange
:
4057 msg
= _("internal error: out of range error");
4060 case bfd_reloc_notsupported
:
4061 msg
= _("internal error: unsupported relocation error");
4064 case bfd_reloc_dangerous
:
4065 msg
= _("internal error: dangerous error");
4069 msg
= _("internal error: unknown error");
4074 if (!((*info
->callbacks
->warning
)
4075 (info
, msg
, sym_name
, input_bfd
, input_section
,
4084 && emit_these_relocs
4085 && !info
->emitrelocations
)
4087 Elf_Internal_Rela
*wrel
;
4088 Elf_Internal_Shdr
*rel_hdr
;
4090 wrel
= rel
= relocs
;
4091 relend
= relocs
+ input_section
->reloc_count
;
4092 for (; rel
< relend
; rel
++)
4096 r_type
= ELF32_R_TYPE (rel
->r_info
);
4097 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4100 input_section
->reloc_count
= wrel
- relocs
;
4101 /* Backflips for _bfd_elf_link_output_relocs. */
4102 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4103 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4110 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4113 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4114 const char *sym_name ATTRIBUTE_UNUSED
,
4115 Elf_Internal_Sym
*sym
,
4116 asection
*sym_sec ATTRIBUTE_UNUSED
,
4117 struct elf_link_hash_entry
*h
)
4119 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4121 if (!info
->relocatable
4122 && htab
->stub_sec
!= NULL
4124 && (h
->root
.type
== bfd_link_hash_defined
4125 || h
->root
.type
== bfd_link_hash_defweak
)
4127 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4129 struct got_entry
*g
;
4131 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4132 if (g
->addend
== 0 && g
->ovl
== 0)
4134 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4135 (htab
->stub_sec
[0]->output_section
->owner
,
4136 htab
->stub_sec
[0]->output_section
));
4137 sym
->st_value
= g
->stub_addr
;
4145 static int spu_plugin
= 0;
4148 spu_elf_plugin (int val
)
4153 /* Set ELF header e_type for plugins. */
4156 spu_elf_post_process_headers (bfd
*abfd
,
4157 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4161 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4163 i_ehdrp
->e_type
= ET_DYN
;
4167 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4168 segments for overlays. */
4171 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4173 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4174 int extra
= htab
->num_overlays
;
4180 sec
= bfd_get_section_by_name (abfd
, ".toe");
4181 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4187 /* Remove .toe section from other PT_LOAD segments and put it in
4188 a segment of its own. Put overlays in separate segments too. */
4191 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4194 struct elf_segment_map
*m
;
4200 toe
= bfd_get_section_by_name (abfd
, ".toe");
4201 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4202 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4203 for (i
= 0; i
< m
->count
; i
++)
4204 if ((s
= m
->sections
[i
]) == toe
4205 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4207 struct elf_segment_map
*m2
;
4210 if (i
+ 1 < m
->count
)
4212 amt
= sizeof (struct elf_segment_map
);
4213 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4214 m2
= bfd_zalloc (abfd
, amt
);
4217 m2
->count
= m
->count
- (i
+ 1);
4218 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4219 m2
->count
* sizeof (m
->sections
[0]));
4220 m2
->p_type
= PT_LOAD
;
4228 amt
= sizeof (struct elf_segment_map
);
4229 m2
= bfd_zalloc (abfd
, amt
);
4232 m2
->p_type
= PT_LOAD
;
4234 m2
->sections
[0] = s
;
4244 /* Tweak the section type of .note.spu_name. */
4247 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4248 Elf_Internal_Shdr
*hdr
,
4251 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4252 hdr
->sh_type
= SHT_NOTE
;
4256 /* Tweak phdrs before writing them out. */
4259 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4261 const struct elf_backend_data
*bed
;
4262 struct elf_obj_tdata
*tdata
;
4263 Elf_Internal_Phdr
*phdr
, *last
;
4264 struct spu_link_hash_table
*htab
;
4271 bed
= get_elf_backend_data (abfd
);
4272 tdata
= elf_tdata (abfd
);
4274 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4275 htab
= spu_hash_table (info
);
4276 if (htab
->num_overlays
!= 0)
4278 struct elf_segment_map
*m
;
4281 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4283 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4285 /* Mark this as an overlay header. */
4286 phdr
[i
].p_flags
|= PF_OVERLAY
;
4288 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4290 bfd_byte
*p
= htab
->ovtab
->contents
;
4291 unsigned int off
= o
* 16 + 8;
4293 /* Write file_off into _ovly_table. */
4294 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4299 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4300 of 16. This should always be possible when using the standard
4301 linker scripts, but don't create overlapping segments if
4302 someone is playing games with linker scripts. */
4304 for (i
= count
; i
-- != 0; )
4305 if (phdr
[i
].p_type
== PT_LOAD
)
4309 adjust
= -phdr
[i
].p_filesz
& 15;
4312 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4315 adjust
= -phdr
[i
].p_memsz
& 15;
4318 && phdr
[i
].p_filesz
!= 0
4319 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4320 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4323 if (phdr
[i
].p_filesz
!= 0)
4327 if (i
== (unsigned int) -1)
4328 for (i
= count
; i
-- != 0; )
4329 if (phdr
[i
].p_type
== PT_LOAD
)
4333 adjust
= -phdr
[i
].p_filesz
& 15;
4334 phdr
[i
].p_filesz
+= adjust
;
4336 adjust
= -phdr
[i
].p_memsz
& 15;
4337 phdr
[i
].p_memsz
+= adjust
;
4343 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4344 #define TARGET_BIG_NAME "elf32-spu"
4345 #define ELF_ARCH bfd_arch_spu
4346 #define ELF_MACHINE_CODE EM_SPU
4347 /* This matches the alignment need for DMA. */
4348 #define ELF_MAXPAGESIZE 0x80
4349 #define elf_backend_rela_normal 1
4350 #define elf_backend_can_gc_sections 1
4352 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4353 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4354 #define elf_info_to_howto spu_elf_info_to_howto
4355 #define elf_backend_count_relocs spu_elf_count_relocs
4356 #define elf_backend_relocate_section spu_elf_relocate_section
4357 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4358 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4359 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4360 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4362 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4363 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4364 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4365 #define elf_backend_post_process_headers spu_elf_post_process_headers
4366 #define elf_backend_fake_sections spu_elf_fake_sections
4367 #define elf_backend_special_sections spu_elf_special_sections
4368 #define bfd_elf32_bfd_final_link spu_elf_final_link
4370 #include "elf32-target.h"