1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
27 #include "elf32-spu.h"
29 /* We use RELA style relocs. Don't define USE_REL. */
31 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
38 static reloc_howto_type elf_howto_table
[] = {
39 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
40 bfd_elf_generic_reloc
, "SPU_NONE",
41 FALSE
, 0, 0x00000000, FALSE
),
42 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
43 bfd_elf_generic_reloc
, "SPU_ADDR10",
44 FALSE
, 0, 0x00ffc000, FALSE
),
45 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
46 bfd_elf_generic_reloc
, "SPU_ADDR16",
47 FALSE
, 0, 0x007fff80, FALSE
),
48 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
49 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
50 FALSE
, 0, 0x007fff80, FALSE
),
51 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
52 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
53 FALSE
, 0, 0x007fff80, FALSE
),
54 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
55 bfd_elf_generic_reloc
, "SPU_ADDR18",
56 FALSE
, 0, 0x01ffff80, FALSE
),
57 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
58 bfd_elf_generic_reloc
, "SPU_ADDR32",
59 FALSE
, 0, 0xffffffff, FALSE
),
60 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
61 bfd_elf_generic_reloc
, "SPU_REL16",
62 FALSE
, 0, 0x007fff80, TRUE
),
63 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
64 bfd_elf_generic_reloc
, "SPU_ADDR7",
65 FALSE
, 0, 0x001fc000, FALSE
),
66 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
67 spu_elf_rel9
, "SPU_REL9",
68 FALSE
, 0, 0x0180007f, TRUE
),
69 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
70 spu_elf_rel9
, "SPU_REL9I",
71 FALSE
, 0, 0x0000c07f, TRUE
),
72 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
73 bfd_elf_generic_reloc
, "SPU_ADDR10I",
74 FALSE
, 0, 0x00ffc000, FALSE
),
75 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
76 bfd_elf_generic_reloc
, "SPU_ADDR16I",
77 FALSE
, 0, 0x007fff80, FALSE
),
78 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
79 bfd_elf_generic_reloc
, "SPU_REL32",
80 FALSE
, 0, 0xffffffff, TRUE
),
83 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
84 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
88 static enum elf_spu_reloc_type
89 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
95 case BFD_RELOC_SPU_IMM10W
:
97 case BFD_RELOC_SPU_IMM16W
:
99 case BFD_RELOC_SPU_LO16
:
100 return R_SPU_ADDR16_LO
;
101 case BFD_RELOC_SPU_HI16
:
102 return R_SPU_ADDR16_HI
;
103 case BFD_RELOC_SPU_IMM18
:
105 case BFD_RELOC_SPU_PCREL16
:
107 case BFD_RELOC_SPU_IMM7
:
109 case BFD_RELOC_SPU_IMM8
:
111 case BFD_RELOC_SPU_PCREL9a
:
113 case BFD_RELOC_SPU_PCREL9b
:
115 case BFD_RELOC_SPU_IMM10
:
116 return R_SPU_ADDR10I
;
117 case BFD_RELOC_SPU_IMM16
:
118 return R_SPU_ADDR16I
;
121 case BFD_RELOC_32_PCREL
:
127 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
129 Elf_Internal_Rela
*dst
)
131 enum elf_spu_reloc_type r_type
;
133 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
134 BFD_ASSERT (r_type
< R_SPU_max
);
135 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
138 static reloc_howto_type
*
139 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
140 bfd_reloc_code_real_type code
)
142 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
144 if (r_type
== R_SPU_NONE
)
147 return elf_howto_table
+ r_type
;
150 static reloc_howto_type
*
151 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
156 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
157 if (elf_howto_table
[i
].name
!= NULL
158 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
159 return &elf_howto_table
[i
];
164 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
166 static bfd_reloc_status_type
167 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
168 void *data
, asection
*input_section
,
169 bfd
*output_bfd
, char **error_message
)
171 bfd_size_type octets
;
175 /* If this is a relocatable link (output_bfd test tells us), just
176 call the generic function. Any adjustment will be done at final
178 if (output_bfd
!= NULL
)
179 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
180 input_section
, output_bfd
, error_message
);
182 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
183 return bfd_reloc_outofrange
;
184 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
186 /* Get symbol value. */
188 if (!bfd_is_com_section (symbol
->section
))
190 if (symbol
->section
->output_section
)
191 val
+= symbol
->section
->output_section
->vma
;
193 val
+= reloc_entry
->addend
;
195 /* Make it pc-relative. */
196 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
199 if (val
+ 256 >= 512)
200 return bfd_reloc_overflow
;
202 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
204 /* Move two high bits of value to REL9I and REL9 position.
205 The mask will take care of selecting the right field. */
206 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
207 insn
&= ~reloc_entry
->howto
->dst_mask
;
208 insn
|= val
& reloc_entry
->howto
->dst_mask
;
209 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
214 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
216 if (!sec
->used_by_bfd
)
218 struct _spu_elf_section_data
*sdata
;
220 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
223 sec
->used_by_bfd
= sdata
;
226 return _bfd_elf_new_section_hook (abfd
, sec
);
229 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
230 strip --strip-unneeded will not remove them. */
233 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
235 if (sym
->name
!= NULL
236 && sym
->section
!= bfd_abs_section_ptr
237 && strncmp (sym
->name
, "_EAR_", 5) == 0)
238 sym
->flags
|= BSF_KEEP
;
241 /* SPU ELF linker hash table. */
243 struct spu_link_hash_table
245 struct elf_link_hash_table elf
;
247 /* The stub hash table. */
248 struct bfd_hash_table stub_hash_table
;
250 /* Shortcuts to overlay sections. */
254 struct elf_link_hash_entry
*ovly_load
;
256 /* An array of two output sections per overlay region, chosen such that
257 the first section vma is the overlay buffer vma (ie. the section has
258 the lowest vma in the group that occupy the region), and the second
259 section vma+size specifies the end of the region. We keep pointers
260 to sections like this because section vmas may change when laying
262 asection
**ovl_region
;
264 /* Number of overlay buffers. */
265 unsigned int num_buf
;
267 /* Total number of overlays. */
268 unsigned int num_overlays
;
270 /* Set if we should emit symbols for stubs. */
271 unsigned int emit_stub_syms
:1;
273 /* Set if we want stubs on calls out of overlay regions to
274 non-overlay regions. */
275 unsigned int non_overlay_stubs
: 1;
278 unsigned int stub_overflow
: 1;
280 /* Set if stack size analysis should be done. */
281 unsigned int stack_analysis
: 1;
283 /* Set if __stack_* syms will be emitted. */
284 unsigned int emit_stack_syms
: 1;
287 #define spu_hash_table(p) \
288 ((struct spu_link_hash_table *) ((p)->hash))
290 struct spu_stub_hash_entry
292 struct bfd_hash_entry root
;
294 /* Destination of this stub. */
295 asection
*target_section
;
298 /* Offset of entry in stub section. */
301 /* Offset from this stub to stub that loads the overlay index. */
305 /* Create an entry in a spu stub hash table. */
307 static struct bfd_hash_entry
*
308 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
309 struct bfd_hash_table
*table
,
312 /* Allocate the structure if it has not already been allocated by a
316 entry
= bfd_hash_allocate (table
, sizeof (struct spu_stub_hash_entry
));
321 /* Call the allocation method of the superclass. */
322 entry
= bfd_hash_newfunc (entry
, table
, string
);
325 struct spu_stub_hash_entry
*sh
= (struct spu_stub_hash_entry
*) entry
;
327 sh
->target_section
= NULL
;
336 /* Create a spu ELF linker hash table. */
338 static struct bfd_link_hash_table
*
339 spu_elf_link_hash_table_create (bfd
*abfd
)
341 struct spu_link_hash_table
*htab
;
343 htab
= bfd_malloc (sizeof (*htab
));
347 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
348 _bfd_elf_link_hash_newfunc
,
349 sizeof (struct elf_link_hash_entry
)))
355 /* Init the stub hash table too. */
356 if (!bfd_hash_table_init (&htab
->stub_hash_table
, stub_hash_newfunc
,
357 sizeof (struct spu_stub_hash_entry
)))
360 memset (&htab
->stub
, 0,
361 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, stub
));
363 return &htab
->elf
.root
;
366 /* Free the derived linker hash table. */
369 spu_elf_link_hash_table_free (struct bfd_link_hash_table
*hash
)
371 struct spu_link_hash_table
*ret
= (struct spu_link_hash_table
*) hash
;
373 bfd_hash_table_free (&ret
->stub_hash_table
);
374 _bfd_generic_link_hash_table_free (hash
);
377 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
378 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
379 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
382 get_sym_h (struct elf_link_hash_entry
**hp
,
383 Elf_Internal_Sym
**symp
,
385 Elf_Internal_Sym
**locsymsp
,
386 unsigned long r_symndx
,
389 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
391 if (r_symndx
>= symtab_hdr
->sh_info
)
393 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
394 struct elf_link_hash_entry
*h
;
396 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
397 while (h
->root
.type
== bfd_link_hash_indirect
398 || h
->root
.type
== bfd_link_hash_warning
)
399 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
409 asection
*symsec
= NULL
;
410 if (h
->root
.type
== bfd_link_hash_defined
411 || h
->root
.type
== bfd_link_hash_defweak
)
412 symsec
= h
->root
.u
.def
.section
;
418 Elf_Internal_Sym
*sym
;
419 Elf_Internal_Sym
*locsyms
= *locsymsp
;
423 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
426 size_t symcount
= symtab_hdr
->sh_info
;
428 /* If we are reading symbols into the contents, then
429 read the global syms too. This is done to cache
430 syms for later stack analysis. */
431 if ((unsigned char **) locsymsp
== &symtab_hdr
->contents
)
432 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
433 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
440 sym
= locsyms
+ r_symndx
;
450 asection
*symsec
= NULL
;
451 if ((sym
->st_shndx
!= SHN_UNDEF
452 && sym
->st_shndx
< SHN_LORESERVE
)
453 || sym
->st_shndx
> SHN_HIRESERVE
)
454 symsec
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
462 /* Build a name for an entry in the stub hash table. We can't use a
463 local symbol name because ld -r might generate duplicate local symbols. */
466 spu_stub_name (const asection
*sym_sec
,
467 const struct elf_link_hash_entry
*h
,
468 const Elf_Internal_Rela
*rel
)
475 len
= strlen (h
->root
.root
.string
) + 1 + 8 + 1;
476 stub_name
= bfd_malloc (len
);
477 if (stub_name
== NULL
)
480 sprintf (stub_name
, "%s+%x",
482 (int) rel
->r_addend
& 0xffffffff);
487 len
= 8 + 1 + 8 + 1 + 8 + 1;
488 stub_name
= bfd_malloc (len
);
489 if (stub_name
== NULL
)
492 sprintf (stub_name
, "%x:%x+%x",
493 sym_sec
->id
& 0xffffffff,
494 (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
495 (int) rel
->r_addend
& 0xffffffff);
496 len
= strlen (stub_name
);
499 if (stub_name
[len
- 2] == '+'
500 && stub_name
[len
- 1] == '0'
501 && stub_name
[len
] == 0)
502 stub_name
[len
- 2] = 0;
507 /* Create the note section if not already present. This is done early so
508 that the linker maps the sections to the right place in the output. */
511 spu_elf_create_sections (bfd
*output_bfd
,
512 struct bfd_link_info
*info
,
517 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
519 /* Stash some options away where we can get at them later. */
520 htab
->stack_analysis
= stack_analysis
;
521 htab
->emit_stack_syms
= emit_stack_syms
;
523 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->next
)
524 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
529 /* Make SPU_PTNOTE_SPUNAME section. */
536 ibfd
= info
->input_bfds
;
537 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
538 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
540 || !bfd_set_section_alignment (ibfd
, s
, 4))
543 name_len
= strlen (bfd_get_filename (output_bfd
)) + 1;
544 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
545 size
+= (name_len
+ 3) & -4;
547 if (!bfd_set_section_size (ibfd
, s
, size
))
550 data
= bfd_zalloc (ibfd
, size
);
554 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
555 bfd_put_32 (ibfd
, name_len
, data
+ 4);
556 bfd_put_32 (ibfd
, 1, data
+ 8);
557 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
558 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
559 bfd_get_filename (output_bfd
), name_len
);
566 /* qsort predicate to sort sections by vma. */
569 sort_sections (const void *a
, const void *b
)
571 const asection
*const *s1
= a
;
572 const asection
*const *s2
= b
;
573 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
576 return delta
< 0 ? -1 : 1;
578 return (*s1
)->index
- (*s2
)->index
;
581 /* Identify overlays in the output bfd, and number them. */
584 spu_elf_find_overlays (bfd
*output_bfd
, struct bfd_link_info
*info
)
586 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
587 asection
**alloc_sec
;
588 unsigned int i
, n
, ovl_index
, num_buf
;
592 if (output_bfd
->section_count
< 2)
595 alloc_sec
= bfd_malloc (output_bfd
->section_count
* sizeof (*alloc_sec
));
596 if (alloc_sec
== NULL
)
599 /* Pick out all the alloced sections. */
600 for (n
= 0, s
= output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
601 if ((s
->flags
& SEC_ALLOC
) != 0
602 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
612 /* Sort them by vma. */
613 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
615 /* Look for overlapping vmas. Any with overlap must be overlays.
616 Count them. Also count the number of overlay regions and for
617 each region save a section from that region with the lowest vma
618 and another section with the highest end vma. */
619 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
620 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
623 if (s
->vma
< ovl_end
)
625 asection
*s0
= alloc_sec
[i
- 1];
627 if (spu_elf_section_data (s0
)->ovl_index
== 0)
629 spu_elf_section_data (s0
)->ovl_index
= ++ovl_index
;
630 alloc_sec
[num_buf
* 2] = s0
;
631 alloc_sec
[num_buf
* 2 + 1] = s0
;
634 spu_elf_section_data (s
)->ovl_index
= ++ovl_index
;
635 if (ovl_end
< s
->vma
+ s
->size
)
637 ovl_end
= s
->vma
+ s
->size
;
638 alloc_sec
[num_buf
* 2 - 1] = s
;
642 ovl_end
= s
->vma
+ s
->size
;
645 htab
->num_overlays
= ovl_index
;
646 htab
->num_buf
= num_buf
;
653 alloc_sec
= bfd_realloc (alloc_sec
, num_buf
* 2 * sizeof (*alloc_sec
));
654 if (alloc_sec
== NULL
)
657 htab
->ovl_region
= alloc_sec
;
661 /* One of these per stub. */
662 #define SIZEOF_STUB1 8
663 #define ILA_79 0x4200004f /* ila $79,function_address */
664 #define BR 0x32000000 /* br stub2 */
666 /* One of these per overlay. */
667 #define SIZEOF_STUB2 8
668 #define ILA_78 0x4200004e /* ila $78,overlay_number */
670 #define NOP 0x40200000
672 /* Return true for all relative and absolute branch instructions.
680 brhnz 00100011 0.. */
683 is_branch (const unsigned char *insn
)
685 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
688 /* Return true for branch hint instructions.
693 is_hint (const unsigned char *insn
)
695 return (insn
[0] & 0xfc) == 0x10;
698 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
701 needs_ovl_stub (const char *sym_name
,
703 asection
*input_section
,
704 struct spu_link_hash_table
*htab
,
705 bfd_boolean is_branch
)
707 if (htab
->num_overlays
== 0)
711 || sym_sec
->output_section
== NULL
712 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
715 /* setjmp always goes via an overlay stub, because then the return
716 and hence the longjmp goes via __ovly_return. That magically
717 makes setjmp/longjmp between overlays work. */
718 if (strncmp (sym_name
, "setjmp", 6) == 0
719 && (sym_name
[6] == '\0' || sym_name
[6] == '@'))
722 /* Usually, symbols in non-overlay sections don't need stubs. */
723 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
== 0
724 && !htab
->non_overlay_stubs
)
727 /* A reference from some other section to a symbol in an overlay
728 section needs a stub. */
729 if (spu_elf_section_data (sym_sec
->output_section
)->ovl_index
730 != spu_elf_section_data (input_section
->output_section
)->ovl_index
)
733 /* If this insn isn't a branch then we are possibly taking the
734 address of a function and passing it out somehow. */
739 struct bfd_hash_table
*stub_hash_table
;
740 struct spu_stub_hash_entry
**sh
;
745 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
749 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
751 /* Symbols starting with _SPUEAR_ need a stub because they may be
752 invoked by the PPU. */
753 if ((h
->root
.type
== bfd_link_hash_defined
754 || h
->root
.type
== bfd_link_hash_defweak
)
756 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
758 struct stubarr
*stubs
= inf
;
759 static Elf_Internal_Rela zero_rel
;
760 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
761 struct spu_stub_hash_entry
*sh
;
763 if (stub_name
== NULL
)
769 sh
= (struct spu_stub_hash_entry
*)
770 bfd_hash_lookup (stubs
->stub_hash_table
, stub_name
, TRUE
, FALSE
);
777 /* If this entry isn't new, we already have a stub. */
778 if (sh
->target_section
!= NULL
)
784 sh
->target_section
= h
->root
.u
.def
.section
;
785 sh
->target_off
= h
->root
.u
.def
.value
;
792 /* Called via bfd_hash_traverse to set up pointers to all symbols
793 in the stub hash table. */
796 populate_stubs (struct bfd_hash_entry
*bh
, void *inf
)
798 struct stubarr
*stubs
= inf
;
800 stubs
->sh
[--stubs
->count
] = (struct spu_stub_hash_entry
*) bh
;
804 /* qsort predicate to sort stubs by overlay number. */
807 sort_stubs (const void *a
, const void *b
)
809 const struct spu_stub_hash_entry
*const *sa
= a
;
810 const struct spu_stub_hash_entry
*const *sb
= b
;
814 i
= spu_elf_section_data ((*sa
)->target_section
->output_section
)->ovl_index
;
815 i
-= spu_elf_section_data ((*sb
)->target_section
->output_section
)->ovl_index
;
819 d
= ((*sa
)->target_section
->output_section
->vma
820 + (*sa
)->target_section
->output_offset
822 - (*sb
)->target_section
->output_section
->vma
823 - (*sb
)->target_section
->output_offset
824 - (*sb
)->target_off
);
826 return d
< 0 ? -1 : 1;
828 /* Two functions at the same address. Aliases perhaps. */
829 i
= strcmp ((*sb
)->root
.string
, (*sa
)->root
.string
);
834 /* Allocate space for overlay call and return stubs. */
837 spu_elf_size_stubs (bfd
*output_bfd
,
838 struct bfd_link_info
*info
,
839 int non_overlay_stubs
,
845 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
847 struct stubarr stubs
;
851 htab
->non_overlay_stubs
= non_overlay_stubs
;
852 stubs
.stub_hash_table
= &htab
->stub_hash_table
;
855 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
857 extern const bfd_target bfd_elf32_spu_vec
;
858 Elf_Internal_Shdr
*symtab_hdr
;
860 Elf_Internal_Sym
*local_syms
= NULL
;
861 Elf_Internal_Sym
**psyms
;
863 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
866 /* We'll need the symbol table in a second. */
867 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
868 if (symtab_hdr
->sh_info
== 0)
871 /* Arrange to read and keep global syms for later stack analysis. */
874 psyms
= (Elf_Internal_Sym
**) &symtab_hdr
->contents
;
876 /* Walk over each section attached to the input bfd. */
877 for (section
= ibfd
->sections
; section
!= NULL
; section
= section
->next
)
879 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
881 /* If there aren't any relocs, then there's nothing more to do. */
882 if ((section
->flags
& SEC_RELOC
) == 0
883 || (section
->flags
& SEC_ALLOC
) == 0
884 || (section
->flags
& SEC_LOAD
) == 0
885 || section
->reloc_count
== 0)
888 /* If this section is a link-once section that will be
889 discarded, then don't create any stubs. */
890 if (section
->output_section
== NULL
891 || section
->output_section
->owner
!= output_bfd
)
894 /* Get the relocs. */
896 = _bfd_elf_link_read_relocs (ibfd
, section
, NULL
, NULL
,
898 if (internal_relocs
== NULL
)
899 goto error_ret_free_local
;
901 /* Now examine each relocation. */
902 irela
= internal_relocs
;
903 irelaend
= irela
+ section
->reloc_count
;
904 for (; irela
< irelaend
; irela
++)
906 enum elf_spu_reloc_type r_type
;
909 Elf_Internal_Sym
*sym
;
910 struct elf_link_hash_entry
*h
;
911 const char *sym_name
;
913 struct spu_stub_hash_entry
*sh
;
914 unsigned int sym_type
;
915 enum _insn_type
{ non_branch
, branch
, call
} insn_type
;
917 r_type
= ELF32_R_TYPE (irela
->r_info
);
918 r_indx
= ELF32_R_SYM (irela
->r_info
);
920 if (r_type
>= R_SPU_max
)
922 bfd_set_error (bfd_error_bad_value
);
923 goto error_ret_free_internal
;
926 /* Determine the reloc target section. */
927 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, ibfd
))
928 goto error_ret_free_internal
;
931 || sym_sec
->output_section
== NULL
932 || sym_sec
->output_section
->owner
!= output_bfd
)
935 /* Ensure no stubs for user supplied overlay manager syms. */
937 && (strcmp (h
->root
.root
.string
, "__ovly_load") == 0
938 || strcmp (h
->root
.root
.string
, "__ovly_return") == 0))
941 insn_type
= non_branch
;
942 if (r_type
== R_SPU_REL16
943 || r_type
== R_SPU_ADDR16
)
945 unsigned char insn
[4];
947 if (!bfd_get_section_contents (ibfd
, section
, insn
,
949 goto error_ret_free_internal
;
951 if (is_branch (insn
) || is_hint (insn
))
954 if ((insn
[0] & 0xfd) == 0x31)
959 /* We are only interested in function symbols. */
963 sym_name
= h
->root
.root
.string
;
967 sym_type
= ELF_ST_TYPE (sym
->st_info
);
968 sym_name
= bfd_elf_sym_name (sym_sec
->owner
,
973 if (sym_type
!= STT_FUNC
)
975 /* It's common for people to write assembly and forget
976 to give function symbols the right type. Handle
977 calls to such symbols, but warn so that (hopefully)
978 people will fix their code. We need the symbol
979 type to be correct to distinguish function pointer
980 initialisation from other pointer initialisation. */
981 if (insn_type
== call
)
982 (*_bfd_error_handler
) (_("warning: call to non-function"
983 " symbol %s defined in %B"),
984 sym_sec
->owner
, sym_name
);
989 if (!needs_ovl_stub (sym_name
, sym_sec
, section
, htab
,
990 insn_type
!= non_branch
))
993 stub_name
= spu_stub_name (sym_sec
, h
, irela
);
994 if (stub_name
== NULL
)
995 goto error_ret_free_internal
;
997 sh
= (struct spu_stub_hash_entry
*)
998 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
,
1003 error_ret_free_internal
:
1004 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1005 free (internal_relocs
);
1006 error_ret_free_local
:
1007 if (local_syms
!= NULL
1008 && (symtab_hdr
->contents
1009 != (unsigned char *) local_syms
))
1014 /* If this entry isn't new, we already have a stub. */
1015 if (sh
->target_section
!= NULL
)
1021 sh
->target_section
= sym_sec
;
1023 sh
->target_off
= h
->root
.u
.def
.value
;
1025 sh
->target_off
= sym
->st_value
;
1026 sh
->target_off
+= irela
->r_addend
;
1031 /* We're done with the internal relocs, free them. */
1032 if (elf_section_data (section
)->relocs
!= internal_relocs
)
1033 free (internal_relocs
);
1036 if (local_syms
!= NULL
1037 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1039 if (!info
->keep_memory
)
1042 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1046 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, &stubs
);
1051 if (stubs
.count
== 0)
1054 ibfd
= info
->input_bfds
;
1055 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1056 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1057 htab
->stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1059 if (htab
->stub
== NULL
1060 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 2))
1063 flags
= (SEC_ALLOC
| SEC_LOAD
1064 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1065 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1066 *ovtab
= htab
->ovtab
;
1067 if (htab
->ovtab
== NULL
1068 || !bfd_set_section_alignment (ibfd
, htab
->stub
, 4))
1071 *toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1073 || !bfd_set_section_alignment (ibfd
, *toe
, 4))
1077 /* Retrieve all the stubs and sort. */
1078 stubs
.sh
= bfd_malloc (stubs
.count
* sizeof (*stubs
.sh
));
1079 if (stubs
.sh
== NULL
)
1082 bfd_hash_traverse (&htab
->stub_hash_table
, populate_stubs
, &stubs
);
1083 BFD_ASSERT (stubs
.count
== 0);
1086 qsort (stubs
.sh
, stubs
.count
, sizeof (*stubs
.sh
), sort_stubs
);
1088 /* Now that the stubs are sorted, place them in the stub section.
1089 Stubs are grouped per overlay
1103 for (i
= 0; i
< stubs
.count
; i
++)
1105 if (spu_elf_section_data (stubs
.sh
[group
]->target_section
1106 ->output_section
)->ovl_index
1107 != spu_elf_section_data (stubs
.sh
[i
]->target_section
1108 ->output_section
)->ovl_index
)
1110 htab
->stub
->size
+= SIZEOF_STUB2
;
1111 for (; group
!= i
; group
++)
1112 stubs
.sh
[group
]->delta
1113 = stubs
.sh
[i
- 1]->off
- stubs
.sh
[group
]->off
;
1116 || ((stubs
.sh
[i
- 1]->target_section
->output_section
->vma
1117 + stubs
.sh
[i
- 1]->target_section
->output_offset
1118 + stubs
.sh
[i
- 1]->target_off
)
1119 != (stubs
.sh
[i
]->target_section
->output_section
->vma
1120 + stubs
.sh
[i
]->target_section
->output_offset
1121 + stubs
.sh
[i
]->target_off
)))
1123 stubs
.sh
[i
]->off
= htab
->stub
->size
;
1124 htab
->stub
->size
+= SIZEOF_STUB1
;
1127 stubs
.sh
[i
]->off
= stubs
.sh
[i
- 1]->off
;
1130 htab
->stub
->size
+= SIZEOF_STUB2
;
1131 for (; group
!= i
; group
++)
1132 stubs
.sh
[group
]->delta
= stubs
.sh
[i
- 1]->off
- stubs
.sh
[group
]->off
;
1134 /* htab->ovtab consists of two arrays.
1144 . } _ovly_buf_table[]; */
1146 htab
->ovtab
->alignment_power
= 4;
1147 htab
->ovtab
->size
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1152 /* Functions to handle embedded spu_ovl.o object. */
1155 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1161 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1167 struct _ovl_stream
*os
;
1171 os
= (struct _ovl_stream
*) stream
;
1172 max
= (const char *) os
->end
- (const char *) os
->start
;
1174 if ((ufile_ptr
) offset
>= max
)
1178 if (count
> max
- offset
)
1179 count
= max
- offset
;
1181 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1186 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1188 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1195 return *ovl_bfd
!= NULL
;
1198 /* Fill in the ila and br for a stub. On the last stub for a group,
1199 write the stub that sets the overlay number too. */
1202 write_one_stub (struct bfd_hash_entry
*bh
, void *inf
)
1204 struct spu_stub_hash_entry
*ent
= (struct spu_stub_hash_entry
*) bh
;
1205 struct spu_link_hash_table
*htab
= inf
;
1206 asection
*sec
= htab
->stub
;
1207 asection
*s
= ent
->target_section
;
1211 val
= ent
->target_off
+ s
->output_offset
+ s
->output_section
->vma
;
1212 bfd_put_32 (sec
->owner
, ILA_79
+ ((val
<< 7) & 0x01ffff80),
1213 sec
->contents
+ ent
->off
);
1214 val
= ent
->delta
+ 4;
1215 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1216 sec
->contents
+ ent
->off
+ 4);
1218 /* If this is the last stub of this group, write stub2. */
1219 if (ent
->delta
== 0)
1221 bfd_put_32 (sec
->owner
, NOP
,
1222 sec
->contents
+ ent
->off
+ 4);
1224 ovl
= spu_elf_section_data (s
->output_section
)->ovl_index
;
1225 bfd_put_32 (sec
->owner
, ILA_78
+ ((ovl
<< 7) & 0x01ffff80),
1226 sec
->contents
+ ent
->off
+ 8);
1228 val
= (htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
1229 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1230 + htab
->ovly_load
->root
.u
.def
.value
1231 - (sec
->output_section
->vma
1232 + sec
->output_offset
1235 if (val
+ 0x20000 >= 0x40000)
1236 htab
->stub_overflow
= TRUE
;
1238 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
1239 sec
->contents
+ ent
->off
+ 12);
1242 if (htab
->emit_stub_syms
)
1244 struct elf_link_hash_entry
*h
;
1248 len1
= sizeof ("00000000.ovl_call.") - 1;
1249 len2
= strlen (ent
->root
.string
);
1250 name
= bfd_malloc (len1
+ len2
+ 1);
1253 memcpy (name
, "00000000.ovl_call.", len1
);
1254 memcpy (name
+ len1
, ent
->root
.string
, len2
+ 1);
1255 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1259 if (h
->root
.type
== bfd_link_hash_new
)
1261 h
->root
.type
= bfd_link_hash_defined
;
1262 h
->root
.u
.def
.section
= sec
;
1263 h
->root
.u
.def
.value
= ent
->off
;
1264 h
->size
= (ent
->delta
== 0
1265 ? SIZEOF_STUB1
+ SIZEOF_STUB2
: SIZEOF_STUB1
);
1269 h
->ref_regular_nonweak
= 1;
1270 h
->forced_local
= 1;
1278 /* Define an STT_OBJECT symbol. */
1280 static struct elf_link_hash_entry
*
1281 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1283 struct elf_link_hash_entry
*h
;
1285 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1289 if (h
->root
.type
!= bfd_link_hash_defined
1292 h
->root
.type
= bfd_link_hash_defined
;
1293 h
->root
.u
.def
.section
= htab
->ovtab
;
1294 h
->type
= STT_OBJECT
;
1297 h
->ref_regular_nonweak
= 1;
1302 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1303 h
->root
.u
.def
.section
->owner
,
1304 h
->root
.root
.string
);
1305 bfd_set_error (bfd_error_bad_value
);
1312 /* Fill in all stubs and the overlay tables. */
1315 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
, asection
*toe
)
1317 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1318 struct elf_link_hash_entry
*h
;
1324 htab
->emit_stub_syms
= emit_syms
;
1325 htab
->stub
->contents
= bfd_zalloc (htab
->stub
->owner
, htab
->stub
->size
);
1326 if (htab
->stub
->contents
== NULL
)
1329 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1330 htab
->ovly_load
= h
;
1331 BFD_ASSERT (h
!= NULL
1332 && (h
->root
.type
== bfd_link_hash_defined
1333 || h
->root
.type
== bfd_link_hash_defweak
)
1336 s
= h
->root
.u
.def
.section
->output_section
;
1337 if (spu_elf_section_data (s
)->ovl_index
)
1339 (*_bfd_error_handler
) (_("%s in overlay section"),
1340 h
->root
.u
.def
.section
->owner
);
1341 bfd_set_error (bfd_error_bad_value
);
1345 /* Write out all the stubs. */
1346 bfd_hash_traverse (&htab
->stub_hash_table
, write_one_stub
, htab
);
1348 if (htab
->stub_overflow
)
1350 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1351 bfd_set_error (bfd_error_bad_value
);
1355 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1356 if (htab
->ovtab
->contents
== NULL
)
1359 /* Write out _ovly_table. */
1360 p
= htab
->ovtab
->contents
;
1361 obfd
= htab
->ovtab
->output_section
->owner
;
1362 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1364 unsigned int ovl_index
= spu_elf_section_data (s
)->ovl_index
;
1368 unsigned int lo
, hi
, mid
;
1369 unsigned long off
= (ovl_index
- 1) * 16;
1370 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1371 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1372 /* file_off written later in spu_elf_modify_program_headers. */
1378 mid
= (lo
+ hi
) >> 1;
1379 if (htab
->ovl_region
[2 * mid
+ 1]->vma
1380 + htab
->ovl_region
[2 * mid
+ 1]->size
<= s
->vma
)
1382 else if (htab
->ovl_region
[2 * mid
]->vma
> s
->vma
)
1386 bfd_put_32 (htab
->ovtab
->owner
, mid
+ 1, p
+ off
+ 12);
1390 BFD_ASSERT (lo
< hi
);
1394 /* Write out _ovly_buf_table. */
1395 p
= htab
->ovtab
->contents
+ htab
->num_overlays
* 16;
1396 for (i
= 0; i
< htab
->num_buf
; i
++)
1398 bfd_put_32 (htab
->ovtab
->owner
, 0, p
);
1402 h
= define_ovtab_symbol (htab
, "_ovly_table");
1405 h
->root
.u
.def
.value
= 0;
1406 h
->size
= htab
->num_overlays
* 16;
1408 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1411 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1414 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1417 h
->root
.u
.def
.value
= htab
->num_overlays
* 16;
1418 h
->size
= htab
->num_buf
* 4;
1420 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1423 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + htab
->num_buf
* 4;
1426 h
= define_ovtab_symbol (htab
, "_EAR_");
1429 h
->root
.u
.def
.section
= toe
;
1430 h
->root
.u
.def
.value
= 0;
1436 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1437 Search for stack adjusting insns, and return the sp delta. */
1440 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1445 memset (reg
, 0, sizeof (reg
));
1446 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1448 unsigned char buf
[4];
1452 /* Assume no relocs on stack adjusing insns. */
1453 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1456 if (buf
[0] == 0x24 /* stqd */)
1460 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1461 /* Partly decoded immediate field. */
1462 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1464 if (buf
[0] == 0x1c /* ai */)
1467 imm
= (imm
^ 0x200) - 0x200;
1468 reg
[rt
] = reg
[ra
] + imm
;
1470 if (rt
== 1 /* sp */)
1477 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1479 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1481 reg
[rt
] = reg
[ra
] + reg
[rb
];
1485 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1487 if (buf
[0] >= 0x42 /* ila */)
1488 imm
|= (buf
[0] & 1) << 17;
1493 if (buf
[0] == 0x40 /* il */)
1495 if ((buf
[1] & 0x80) == 0)
1497 imm
= (imm
^ 0x8000) - 0x8000;
1499 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1505 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1507 reg
[rt
] |= imm
& 0xffff;
1510 else if (buf
[0] == 0x04 /* ori */)
1513 imm
= (imm
^ 0x200) - 0x200;
1514 reg
[rt
] = reg
[ra
] | imm
;
1517 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1518 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1520 /* Used in pic reg load. Say rt is trashed. */
1524 else if (is_branch (buf
))
1525 /* If we hit a branch then we must be out of the prologue. */
1534 /* qsort predicate to sort symbols by section and value. */
1536 static Elf_Internal_Sym
*sort_syms_syms
;
1537 static asection
**sort_syms_psecs
;
1540 sort_syms (const void *a
, const void *b
)
1542 Elf_Internal_Sym
*const *s1
= a
;
1543 Elf_Internal_Sym
*const *s2
= b
;
1544 asection
*sec1
,*sec2
;
1545 bfd_signed_vma delta
;
1547 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1548 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1551 return sec1
->index
- sec2
->index
;
1553 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1555 return delta
< 0 ? -1 : 1;
1557 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1559 return delta
< 0 ? -1 : 1;
1561 return *s1
< *s2
? -1 : 1;
1566 struct function_info
*fun
;
1567 struct call_info
*next
;
1571 struct function_info
1573 /* List of functions called. Also branches to hot/cold part of
1575 struct call_info
*call_list
;
1576 /* For hot/cold part of function, point to owner. */
1577 struct function_info
*start
;
1578 /* Symbol at start of function. */
1580 Elf_Internal_Sym
*sym
;
1581 struct elf_link_hash_entry
*h
;
1583 /* Function section. */
1585 /* Address range of (this part of) function. */
1589 /* Set if global symbol. */
1590 unsigned int global
: 1;
1591 /* Set if known to be start of function (as distinct from a hunk
1592 in hot/cold section. */
1593 unsigned int is_func
: 1;
1594 /* Flags used during call tree traversal. */
1595 unsigned int visit1
: 1;
1596 unsigned int non_root
: 1;
1597 unsigned int visit2
: 1;
1598 unsigned int marking
: 1;
1599 unsigned int visit3
: 1;
1602 struct spu_elf_stack_info
1606 /* Variable size array describing functions, one per contiguous
1607 address range belonging to a function. */
1608 struct function_info fun
[1];
1611 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1612 entries for section SEC. */
1614 static struct spu_elf_stack_info
*
1615 alloc_stack_info (asection
*sec
, int max_fun
)
1617 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1620 amt
= sizeof (struct spu_elf_stack_info
);
1621 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1622 sec_data
->stack_info
= bfd_zmalloc (amt
);
1623 if (sec_data
->stack_info
!= NULL
)
1624 sec_data
->stack_info
->max_fun
= max_fun
;
1625 return sec_data
->stack_info
;
1628 /* Add a new struct function_info describing a (part of a) function
1629 starting at SYM_H. Keep the array sorted by address. */
1631 static struct function_info
*
1632 maybe_insert_function (asection
*sec
,
1635 bfd_boolean is_func
)
1637 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1638 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1644 sinfo
= alloc_stack_info (sec
, 20);
1651 Elf_Internal_Sym
*sym
= sym_h
;
1652 off
= sym
->st_value
;
1653 size
= sym
->st_size
;
1657 struct elf_link_hash_entry
*h
= sym_h
;
1658 off
= h
->root
.u
.def
.value
;
1662 for (i
= sinfo
->num_fun
; --i
>= 0; )
1663 if (sinfo
->fun
[i
].lo
<= off
)
1668 /* Don't add another entry for an alias, but do update some
1670 if (sinfo
->fun
[i
].lo
== off
)
1672 /* Prefer globals over local syms. */
1673 if (global
&& !sinfo
->fun
[i
].global
)
1675 sinfo
->fun
[i
].global
= TRUE
;
1676 sinfo
->fun
[i
].u
.h
= sym_h
;
1679 sinfo
->fun
[i
].is_func
= TRUE
;
1680 return &sinfo
->fun
[i
];
1682 /* Ignore a zero-size symbol inside an existing function. */
1683 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1684 return &sinfo
->fun
[i
];
1687 if (++i
< sinfo
->num_fun
)
1688 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1689 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1690 else if (i
>= sinfo
->max_fun
)
1692 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1693 bfd_size_type old
= amt
;
1695 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1696 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1697 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1698 sinfo
= bfd_realloc (sinfo
, amt
);
1701 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1702 sec_data
->stack_info
= sinfo
;
1704 sinfo
->fun
[i
].is_func
= is_func
;
1705 sinfo
->fun
[i
].global
= global
;
1706 sinfo
->fun
[i
].sec
= sec
;
1708 sinfo
->fun
[i
].u
.h
= sym_h
;
1710 sinfo
->fun
[i
].u
.sym
= sym_h
;
1711 sinfo
->fun
[i
].lo
= off
;
1712 sinfo
->fun
[i
].hi
= off
+ size
;
1713 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1714 sinfo
->num_fun
+= 1;
1715 return &sinfo
->fun
[i
];
1718 /* Return the name of FUN. */
1721 func_name (struct function_info
*fun
)
1725 Elf_Internal_Shdr
*symtab_hdr
;
1727 while (fun
->start
!= NULL
)
1731 return fun
->u
.h
->root
.root
.string
;
1734 if (fun
->u
.sym
->st_name
== 0)
1736 size_t len
= strlen (sec
->name
);
1737 char *name
= bfd_malloc (len
+ 10);
1740 sprintf (name
, "%s+%lx", sec
->name
,
1741 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1745 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1746 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1749 /* Read the instruction at OFF in SEC. Return true iff the instruction
1750 is a nop, lnop, or stop 0 (all zero insn). */
1753 is_nop (asection
*sec
, bfd_vma off
)
1755 unsigned char insn
[4];
1757 if (off
+ 4 > sec
->size
1758 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1760 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1762 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1767 /* Extend the range of FUN to cover nop padding up to LIMIT.
1768 Return TRUE iff some instruction other than a NOP was found. */
1771 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1773 bfd_vma off
= (fun
->hi
+ 3) & -4;
1775 while (off
< limit
&& is_nop (fun
->sec
, off
))
1786 /* Check and fix overlapping function ranges. Return TRUE iff there
1787 are gaps in the current info we have about functions in SEC. */
1790 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1792 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1793 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1795 bfd_boolean gaps
= FALSE
;
1800 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1801 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1803 /* Fix overlapping symbols. */
1804 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1805 const char *f2
= func_name (&sinfo
->fun
[i
]);
1807 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1808 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1810 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1813 if (sinfo
->num_fun
== 0)
1817 if (sinfo
->fun
[0].lo
!= 0)
1819 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1821 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1823 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1824 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1826 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1832 /* Search current function info for a function that contains address
1833 OFFSET in section SEC. */
1835 static struct function_info
*
1836 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1838 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1839 struct spu_elf_stack_info
*sinfo
= sec_data
->stack_info
;
1843 hi
= sinfo
->num_fun
;
1846 mid
= (lo
+ hi
) / 2;
1847 if (offset
< sinfo
->fun
[mid
].lo
)
1849 else if (offset
>= sinfo
->fun
[mid
].hi
)
1852 return &sinfo
->fun
[mid
];
1854 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
1859 /* Add CALLEE to CALLER call list if not already present. */
1862 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
1864 struct call_info
*p
;
1865 for (p
= caller
->call_list
; p
!= NULL
; p
= p
->next
)
1866 if (p
->fun
== callee
->fun
)
1868 /* Tail calls use less stack than normal calls. Retain entry
1869 for normal call over one for tail call. */
1870 if (p
->is_tail
> callee
->is_tail
)
1871 p
->is_tail
= callee
->is_tail
;
1874 callee
->next
= caller
->call_list
;
1875 caller
->call_list
= callee
;
1879 /* Rummage through the relocs for SEC, looking for function calls.
1880 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1881 mark destination symbols on calls as being functions. Also
1882 look at branches, which may be tail calls or go to hot/cold
1883 section part of same function. */
1886 mark_functions_via_relocs (asection
*sec
,
1887 struct bfd_link_info
*info
,
1890 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1891 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1892 Elf_Internal_Sym
*syms
, **psyms
;
1893 static bfd_boolean warned
;
1895 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
1897 if (internal_relocs
== NULL
)
1900 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
1901 psyms
= (Elf_Internal_Sym
**) &symtab_hdr
->contents
;
1903 irela
= internal_relocs
;
1904 irelaend
= irela
+ sec
->reloc_count
;
1905 for (; irela
< irelaend
; irela
++)
1907 enum elf_spu_reloc_type r_type
;
1908 unsigned int r_indx
;
1910 Elf_Internal_Sym
*sym
;
1911 struct elf_link_hash_entry
*h
;
1913 unsigned char insn
[4];
1914 bfd_boolean is_call
;
1915 struct function_info
*caller
;
1916 struct call_info
*callee
;
1918 r_type
= ELF32_R_TYPE (irela
->r_info
);
1919 if (r_type
!= R_SPU_REL16
1920 && r_type
!= R_SPU_ADDR16
)
1923 r_indx
= ELF32_R_SYM (irela
->r_info
);
1924 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
1928 || sym_sec
->output_section
== NULL
1929 || sym_sec
->output_section
->owner
!= sec
->output_section
->owner
)
1932 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
1933 irela
->r_offset
, 4))
1935 if (!is_branch (insn
))
1938 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1939 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
1943 if (!call_tree
|| !warned
)
1944 info
->callbacks
->einfo (_("%B(%A+0x%v): call to non-code section"
1945 " %B(%A), stack analysis incomplete\n"),
1946 sec
->owner
, sec
, irela
->r_offset
,
1947 sym_sec
->owner
, sym_sec
);
1951 is_call
= (insn
[0] & 0xfd) == 0x31;
1954 val
= h
->root
.u
.def
.value
;
1956 val
= sym
->st_value
;
1957 val
+= irela
->r_addend
;
1961 struct function_info
*fun
;
1963 if (irela
->r_addend
!= 0)
1965 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
1968 fake
->st_value
= val
;
1970 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
1974 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
1976 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
1979 if (irela
->r_addend
!= 0
1980 && fun
->u
.sym
!= sym
)
1985 caller
= find_function (sec
, irela
->r_offset
, info
);
1988 callee
= bfd_malloc (sizeof *callee
);
1992 callee
->fun
= find_function (sym_sec
, val
, info
);
1993 if (callee
->fun
== NULL
)
1995 callee
->is_tail
= !is_call
;
1996 if (!insert_callee (caller
, callee
))
1999 && !callee
->fun
->is_func
2000 && callee
->fun
->stack
== 0)
2002 /* This is either a tail call or a branch from one part of
2003 the function to another, ie. hot/cold section. If the
2004 destination has been called by some other function then
2005 it is a separate function. We also assume that functions
2006 are not split across input files. */
2007 if (callee
->fun
->start
!= NULL
2008 || sec
->owner
!= sym_sec
->owner
)
2010 callee
->fun
->start
= NULL
;
2011 callee
->fun
->is_func
= TRUE
;
2014 callee
->fun
->start
= caller
;
2021 /* Handle something like .init or .fini, which has a piece of a function.
2022 These sections are pasted together to form a single function. */
2025 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2027 struct bfd_link_order
*l
;
2028 struct _spu_elf_section_data
*sec_data
;
2029 struct spu_elf_stack_info
*sinfo
;
2030 Elf_Internal_Sym
*fake
;
2031 struct function_info
*fun
, *fun_start
;
2033 fake
= bfd_zmalloc (sizeof (*fake
));
2037 fake
->st_size
= sec
->size
;
2039 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2040 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2044 /* Find a function immediately preceding this section. */
2046 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2048 if (l
->u
.indirect
.section
== sec
)
2050 if (fun_start
!= NULL
)
2052 if (fun_start
->start
)
2053 fun_start
= fun_start
->start
;
2054 fun
->start
= fun_start
;
2058 if (l
->type
== bfd_indirect_link_order
2059 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2060 && (sinfo
= sec_data
->stack_info
) != NULL
2061 && sinfo
->num_fun
!= 0)
2062 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2065 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2069 /* We're only interested in code sections. */
2072 interesting_section (asection
*s
, bfd
*obfd
, struct spu_link_hash_table
*htab
)
2074 return (s
!= htab
->stub
2075 && s
->output_section
!= NULL
2076 && s
->output_section
->owner
== obfd
2077 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2078 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2082 /* Map address ranges in code sections to functions. */
2085 discover_functions (bfd
*output_bfd
, struct bfd_link_info
*info
)
2087 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2090 Elf_Internal_Sym
***psym_arr
;
2091 asection
***sec_arr
;
2092 bfd_boolean gaps
= FALSE
;
2095 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2098 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2099 if (psym_arr
== NULL
)
2101 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2102 if (sec_arr
== NULL
)
2106 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2108 ibfd
= ibfd
->link_next
, bfd_idx
++)
2110 extern const bfd_target bfd_elf32_spu_vec
;
2111 Elf_Internal_Shdr
*symtab_hdr
;
2114 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2115 asection
**psecs
, **p
;
2117 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2120 /* Read all the symbols. */
2121 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2122 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2126 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2129 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2131 symtab_hdr
->contents
= (void *) syms
;
2136 /* Select defined function symbols that are going to be output. */
2137 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2140 psym_arr
[bfd_idx
] = psyms
;
2141 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2144 sec_arr
[bfd_idx
] = psecs
;
2145 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2146 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2147 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2151 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2152 if (s
!= NULL
&& interesting_section (s
, output_bfd
, htab
))
2155 symcount
= psy
- psyms
;
2158 /* Sort them by section and offset within section. */
2159 sort_syms_syms
= syms
;
2160 sort_syms_psecs
= psecs
;
2161 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2163 /* Now inspect the function symbols. */
2164 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2166 asection
*s
= psecs
[*psy
- syms
];
2167 Elf_Internal_Sym
**psy2
;
2169 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2170 if (psecs
[*psy2
- syms
] != s
)
2173 if (!alloc_stack_info (s
, psy2
- psy
))
2178 /* First install info about properly typed and sized functions.
2179 In an ideal world this will cover all code sections, except
2180 when partitioning functions into hot and cold sections,
2181 and the horrible pasted together .init and .fini functions. */
2182 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2185 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2187 asection
*s
= psecs
[sy
- syms
];
2188 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2193 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2194 if (interesting_section (sec
, output_bfd
, htab
))
2195 gaps
|= check_function_ranges (sec
, info
);
2200 /* See if we can discover more function symbols by looking at
2202 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2204 ibfd
= ibfd
->link_next
, bfd_idx
++)
2208 if (psym_arr
[bfd_idx
] == NULL
)
2211 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2212 if (interesting_section (sec
, output_bfd
, htab
)
2213 && sec
->reloc_count
!= 0)
2215 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2220 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2222 ibfd
= ibfd
->link_next
, bfd_idx
++)
2224 Elf_Internal_Shdr
*symtab_hdr
;
2226 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2229 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2232 psecs
= sec_arr
[bfd_idx
];
2234 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2235 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2238 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2239 if (interesting_section (sec
, output_bfd
, htab
))
2240 gaps
|= check_function_ranges (sec
, info
);
2244 /* Finally, install all globals. */
2245 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2249 s
= psecs
[sy
- syms
];
2251 /* Global syms might be improperly typed functions. */
2252 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2253 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2255 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2260 /* Some of the symbols we've installed as marking the
2261 beginning of functions may have a size of zero. Extend
2262 the range of such functions to the beginning of the
2263 next symbol of interest. */
2264 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2265 if (interesting_section (sec
, output_bfd
, htab
))
2267 struct _spu_elf_section_data
*sec_data
;
2268 struct spu_elf_stack_info
*sinfo
;
2270 sec_data
= spu_elf_section_data (sec
);
2271 sinfo
= sec_data
->stack_info
;
2275 bfd_vma hi
= sec
->size
;
2277 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2279 sinfo
->fun
[fun_idx
].hi
= hi
;
2280 hi
= sinfo
->fun
[fun_idx
].lo
;
2283 /* No symbols in this section. Must be .init or .fini
2284 or something similar. */
2285 else if (!pasted_function (sec
, info
))
2291 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2293 ibfd
= ibfd
->link_next
, bfd_idx
++)
2295 if (psym_arr
[bfd_idx
] == NULL
)
2298 free (psym_arr
[bfd_idx
]);
2299 free (sec_arr
[bfd_idx
]);
2308 /* Mark nodes in the call graph that are called by some other node. */
2311 mark_non_root (struct function_info
*fun
)
2313 struct call_info
*call
;
2316 for (call
= fun
->call_list
; call
; call
= call
->next
)
2318 call
->fun
->non_root
= TRUE
;
2319 if (!call
->fun
->visit1
)
2320 mark_non_root (call
->fun
);
2324 /* Remove cycles from the call graph. */
2327 call_graph_traverse (struct function_info
*fun
, struct bfd_link_info
*info
)
2329 struct call_info
**callp
, *call
;
2332 fun
->marking
= TRUE
;
2334 callp
= &fun
->call_list
;
2335 while ((call
= *callp
) != NULL
)
2337 if (!call
->fun
->visit2
)
2338 call_graph_traverse (call
->fun
, info
);
2339 else if (call
->fun
->marking
)
2341 const char *f1
= func_name (fun
);
2342 const char *f2
= func_name (call
->fun
);
2344 info
->callbacks
->info (_("Stack analysis will ignore the call "
2347 *callp
= call
->next
;
2350 callp
= &call
->next
;
2352 fun
->marking
= FALSE
;
2355 /* Populate call_list for each function. */
2358 build_call_tree (bfd
*output_bfd
, struct bfd_link_info
*info
)
2360 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2363 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2365 extern const bfd_target bfd_elf32_spu_vec
;
2368 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2371 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2373 if (!interesting_section (sec
, output_bfd
, htab
)
2374 || sec
->reloc_count
== 0)
2377 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2381 /* Transfer call info from hot/cold section part of function
2383 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2385 struct _spu_elf_section_data
*sec_data
;
2386 struct spu_elf_stack_info
*sinfo
;
2388 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2389 && (sinfo
= sec_data
->stack_info
) != NULL
)
2392 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2394 if (sinfo
->fun
[i
].start
!= NULL
)
2396 struct call_info
*call
= sinfo
->fun
[i
].call_list
;
2398 while (call
!= NULL
)
2400 struct call_info
*call_next
= call
->next
;
2401 if (!insert_callee (sinfo
->fun
[i
].start
, call
))
2405 sinfo
->fun
[i
].call_list
= NULL
;
2406 sinfo
->fun
[i
].non_root
= TRUE
;
2413 /* Find the call graph root(s). */
2414 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2416 extern const bfd_target bfd_elf32_spu_vec
;
2419 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2422 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2424 struct _spu_elf_section_data
*sec_data
;
2425 struct spu_elf_stack_info
*sinfo
;
2427 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2428 && (sinfo
= sec_data
->stack_info
) != NULL
)
2431 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2432 if (!sinfo
->fun
[i
].visit1
)
2433 mark_non_root (&sinfo
->fun
[i
]);
2438 /* Remove cycles from the call graph. We start from the root node(s)
2439 so that we break cycles in a reasonable place. */
2440 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2442 extern const bfd_target bfd_elf32_spu_vec
;
2445 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2448 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2450 struct _spu_elf_section_data
*sec_data
;
2451 struct spu_elf_stack_info
*sinfo
;
2453 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2454 && (sinfo
= sec_data
->stack_info
) != NULL
)
2457 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2458 if (!sinfo
->fun
[i
].non_root
)
2459 call_graph_traverse (&sinfo
->fun
[i
], info
);
2467 /* Descend the call graph for FUN, accumulating total stack required. */
2470 sum_stack (struct function_info
*fun
,
2471 struct bfd_link_info
*info
,
2472 int emit_stack_syms
)
2474 struct call_info
*call
;
2475 struct function_info
*max
= NULL
;
2476 bfd_vma max_stack
= fun
->stack
;
2483 for (call
= fun
->call_list
; call
; call
= call
->next
)
2485 stack
= sum_stack (call
->fun
, info
, emit_stack_syms
);
2486 /* Include caller stack for normal calls, don't do so for
2487 tail calls. fun->stack here is local stack usage for
2490 stack
+= fun
->stack
;
2491 if (max_stack
< stack
)
2498 f1
= func_name (fun
);
2499 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"), f1
, fun
->stack
, max_stack
);
2503 info
->callbacks
->minfo (_(" calls:\n"));
2504 for (call
= fun
->call_list
; call
; call
= call
->next
)
2506 const char *f2
= func_name (call
->fun
);
2507 const char *ann1
= call
->fun
== max
? "*" : " ";
2508 const char *ann2
= call
->is_tail
? "t" : " ";
2510 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
2514 /* Now fun->stack holds cumulative stack. */
2515 fun
->stack
= max_stack
;
2518 if (emit_stack_syms
)
2520 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2521 char *name
= bfd_malloc (18 + strlen (f1
));
2522 struct elf_link_hash_entry
*h
;
2526 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
2527 sprintf (name
, "__stack_%s", f1
);
2529 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
2531 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
2534 && (h
->root
.type
== bfd_link_hash_new
2535 || h
->root
.type
== bfd_link_hash_undefined
2536 || h
->root
.type
== bfd_link_hash_undefweak
))
2538 h
->root
.type
= bfd_link_hash_defined
;
2539 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2540 h
->root
.u
.def
.value
= max_stack
;
2545 h
->ref_regular_nonweak
= 1;
2546 h
->forced_local
= 1;
2555 /* Provide an estimate of total stack required. */
2558 spu_elf_stack_analysis (bfd
*output_bfd
,
2559 struct bfd_link_info
*info
,
2560 int emit_stack_syms
)
2563 bfd_vma max_stack
= 0;
2565 if (!discover_functions (output_bfd
, info
))
2568 if (!build_call_tree (output_bfd
, info
))
2571 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
2572 info
->callbacks
->minfo (_("\nStack size for functions. "
2573 "Annotations: '*' max stack, 't' tail call\n"));
2574 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2576 extern const bfd_target bfd_elf32_spu_vec
;
2579 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2582 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2584 struct _spu_elf_section_data
*sec_data
;
2585 struct spu_elf_stack_info
*sinfo
;
2587 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2588 && (sinfo
= sec_data
->stack_info
) != NULL
)
2591 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2593 if (!sinfo
->fun
[i
].non_root
)
2598 stack
= sum_stack (&sinfo
->fun
[i
], info
,
2600 f1
= func_name (&sinfo
->fun
[i
]);
2601 info
->callbacks
->info (_(" %s: 0x%v\n"),
2603 if (max_stack
< stack
)
2611 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"), max_stack
);
2615 /* Perform a final link. */
2618 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
2620 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2622 if (htab
->stack_analysis
2623 && !spu_elf_stack_analysis (output_bfd
, info
, htab
->emit_stack_syms
))
2624 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
2626 return bfd_elf_final_link (output_bfd
, info
);
2629 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2632 spu_elf_relocate_section (bfd
*output_bfd
,
2633 struct bfd_link_info
*info
,
2635 asection
*input_section
,
2637 Elf_Internal_Rela
*relocs
,
2638 Elf_Internal_Sym
*local_syms
,
2639 asection
**local_sections
)
2641 Elf_Internal_Shdr
*symtab_hdr
;
2642 struct elf_link_hash_entry
**sym_hashes
;
2643 Elf_Internal_Rela
*rel
, *relend
;
2644 struct spu_link_hash_table
*htab
;
2645 bfd_boolean ret
= TRUE
;
2647 htab
= spu_hash_table (info
);
2648 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2649 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
2652 relend
= relocs
+ input_section
->reloc_count
;
2653 for (; rel
< relend
; rel
++)
2656 reloc_howto_type
*howto
;
2657 unsigned long r_symndx
;
2658 Elf_Internal_Sym
*sym
;
2660 struct elf_link_hash_entry
*h
;
2661 const char *sym_name
;
2664 bfd_reloc_status_type r
;
2665 bfd_boolean unresolved_reloc
;
2669 r_symndx
= ELF32_R_SYM (rel
->r_info
);
2670 r_type
= ELF32_R_TYPE (rel
->r_info
);
2671 howto
= elf_howto_table
+ r_type
;
2672 unresolved_reloc
= FALSE
;
2678 if (r_symndx
< symtab_hdr
->sh_info
)
2680 sym
= local_syms
+ r_symndx
;
2681 sec
= local_sections
[r_symndx
];
2682 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
2683 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
2687 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
2688 r_symndx
, symtab_hdr
, sym_hashes
,
2690 unresolved_reloc
, warned
);
2691 sym_name
= h
->root
.root
.string
;
2694 if (sec
!= NULL
&& elf_discarded_section (sec
))
2696 /* For relocs against symbols from removed linkonce sections,
2697 or sections discarded by a linker script, we just want the
2698 section contents zeroed. Avoid any special processing. */
2699 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
2705 if (info
->relocatable
)
2708 if (unresolved_reloc
)
2710 (*_bfd_error_handler
)
2711 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2713 bfd_get_section_name (input_bfd
, input_section
),
2714 (long) rel
->r_offset
,
2720 /* If this symbol is in an overlay area, we may need to relocate
2721 to the overlay stub. */
2722 addend
= rel
->r_addend
;
2723 branch
= (is_branch (contents
+ rel
->r_offset
)
2724 || is_hint (contents
+ rel
->r_offset
));
2725 if (needs_ovl_stub (sym_name
, sec
, input_section
, htab
, branch
))
2728 struct spu_stub_hash_entry
*sh
;
2730 stub_name
= spu_stub_name (sec
, h
, rel
);
2731 if (stub_name
== NULL
)
2734 sh
= (struct spu_stub_hash_entry
*)
2735 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2738 relocation
= (htab
->stub
->output_section
->vma
2739 + htab
->stub
->output_offset
2746 r
= _bfd_final_link_relocate (howto
,
2750 rel
->r_offset
, relocation
, addend
);
2752 if (r
!= bfd_reloc_ok
)
2754 const char *msg
= (const char *) 0;
2758 case bfd_reloc_overflow
:
2759 if (!((*info
->callbacks
->reloc_overflow
)
2760 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
2761 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
2765 case bfd_reloc_undefined
:
2766 if (!((*info
->callbacks
->undefined_symbol
)
2767 (info
, sym_name
, input_bfd
, input_section
,
2768 rel
->r_offset
, TRUE
)))
2772 case bfd_reloc_outofrange
:
2773 msg
= _("internal error: out of range error");
2776 case bfd_reloc_notsupported
:
2777 msg
= _("internal error: unsupported relocation error");
2780 case bfd_reloc_dangerous
:
2781 msg
= _("internal error: dangerous error");
2785 msg
= _("internal error: unknown error");
2789 if (!((*info
->callbacks
->warning
)
2790 (info
, msg
, sym_name
, input_bfd
, input_section
,
2801 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2804 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
2805 const char *sym_name ATTRIBUTE_UNUSED
,
2806 Elf_Internal_Sym
*sym
,
2807 asection
*sym_sec ATTRIBUTE_UNUSED
,
2808 struct elf_link_hash_entry
*h
)
2810 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2812 if (!info
->relocatable
2813 && htab
->num_overlays
!= 0
2815 && (h
->root
.type
== bfd_link_hash_defined
2816 || h
->root
.type
== bfd_link_hash_defweak
)
2818 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
2820 static Elf_Internal_Rela zero_rel
;
2821 char *stub_name
= spu_stub_name (h
->root
.u
.def
.section
, h
, &zero_rel
);
2822 struct spu_stub_hash_entry
*sh
;
2824 if (stub_name
== NULL
)
2826 sh
= (struct spu_stub_hash_entry
*)
2827 bfd_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
, FALSE
);
2832 = _bfd_elf_section_from_bfd_section (htab
->stub
->output_section
->owner
,
2833 htab
->stub
->output_section
);
2834 sym
->st_value
= (htab
->stub
->output_section
->vma
2835 + htab
->stub
->output_offset
2842 static int spu_plugin
= 0;
2845 spu_elf_plugin (int val
)
2850 /* Set ELF header e_type for plugins. */
2853 spu_elf_post_process_headers (bfd
*abfd
,
2854 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
2858 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
2860 i_ehdrp
->e_type
= ET_DYN
;
2864 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2865 segments for overlays. */
2868 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
2870 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2871 int extra
= htab
->num_overlays
;
2877 sec
= bfd_get_section_by_name (abfd
, ".toe");
2878 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
2884 /* Remove .toe section from other PT_LOAD segments and put it in
2885 a segment of its own. Put overlays in separate segments too. */
2888 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
2891 struct elf_segment_map
*m
;
2897 toe
= bfd_get_section_by_name (abfd
, ".toe");
2898 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2899 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
2900 for (i
= 0; i
< m
->count
; i
++)
2901 if ((s
= m
->sections
[i
]) == toe
2902 || spu_elf_section_data (s
)->ovl_index
!= 0)
2904 struct elf_segment_map
*m2
;
2907 if (i
+ 1 < m
->count
)
2909 amt
= sizeof (struct elf_segment_map
);
2910 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
2911 m2
= bfd_zalloc (abfd
, amt
);
2914 m2
->count
= m
->count
- (i
+ 1);
2915 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
2916 m2
->count
* sizeof (m
->sections
[0]));
2917 m2
->p_type
= PT_LOAD
;
2925 amt
= sizeof (struct elf_segment_map
);
2926 m2
= bfd_zalloc (abfd
, amt
);
2929 m2
->p_type
= PT_LOAD
;
2931 m2
->sections
[0] = s
;
2941 /* Check that all loadable section VMAs lie in the range
2942 LO .. HI inclusive. */
2945 spu_elf_check_vma (bfd
*abfd
, bfd_vma lo
, bfd_vma hi
)
2947 struct elf_segment_map
*m
;
2950 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2951 if (m
->p_type
== PT_LOAD
)
2952 for (i
= 0; i
< m
->count
; i
++)
2953 if (m
->sections
[i
]->size
!= 0
2954 && (m
->sections
[i
]->vma
< lo
2955 || m
->sections
[i
]->vma
> hi
2956 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2957 return m
->sections
[i
];
2962 /* Tweak phdrs before writing them out. */
2965 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
2967 const struct elf_backend_data
*bed
;
2968 struct elf_obj_tdata
*tdata
;
2969 Elf_Internal_Phdr
*phdr
, *last
;
2970 struct spu_link_hash_table
*htab
;
2977 bed
= get_elf_backend_data (abfd
);
2978 tdata
= elf_tdata (abfd
);
2980 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
2981 htab
= spu_hash_table (info
);
2982 if (htab
->num_overlays
!= 0)
2984 struct elf_segment_map
*m
;
2987 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
2989 && (o
= spu_elf_section_data (m
->sections
[0])->ovl_index
) != 0)
2991 /* Mark this as an overlay header. */
2992 phdr
[i
].p_flags
|= PF_OVERLAY
;
2994 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
2996 bfd_byte
*p
= htab
->ovtab
->contents
;
2997 unsigned int off
= (o
- 1) * 16 + 8;
2999 /* Write file_off into _ovly_table. */
3000 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
3005 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3006 of 16. This should always be possible when using the standard
3007 linker scripts, but don't create overlapping segments if
3008 someone is playing games with linker scripts. */
3010 for (i
= count
; i
-- != 0; )
3011 if (phdr
[i
].p_type
== PT_LOAD
)
3015 adjust
= -phdr
[i
].p_filesz
& 15;
3018 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
3021 adjust
= -phdr
[i
].p_memsz
& 15;
3024 && phdr
[i
].p_filesz
!= 0
3025 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
3026 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
3029 if (phdr
[i
].p_filesz
!= 0)
3033 if (i
== (unsigned int) -1)
3034 for (i
= count
; i
-- != 0; )
3035 if (phdr
[i
].p_type
== PT_LOAD
)
3039 adjust
= -phdr
[i
].p_filesz
& 15;
3040 phdr
[i
].p_filesz
+= adjust
;
3042 adjust
= -phdr
[i
].p_memsz
& 15;
3043 phdr
[i
].p_memsz
+= adjust
;
3049 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3050 #define TARGET_BIG_NAME "elf32-spu"
3051 #define ELF_ARCH bfd_arch_spu
3052 #define ELF_MACHINE_CODE EM_SPU
3053 /* This matches the alignment need for DMA. */
3054 #define ELF_MAXPAGESIZE 0x80
3055 #define elf_backend_rela_normal 1
3056 #define elf_backend_can_gc_sections 1
3058 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3059 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3060 #define elf_info_to_howto spu_elf_info_to_howto
3061 #define elf_backend_relocate_section spu_elf_relocate_section
3062 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3063 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3064 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3065 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3066 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3068 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3069 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3070 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3071 #define elf_backend_post_process_headers spu_elf_post_process_headers
3072 #define elf_backend_special_sections spu_elf_special_sections
3073 #define bfd_elf32_bfd_final_link spu_elf_final_link
3075 #include "elf32-target.h"