1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 struct spu_elf_params
*params
;
302 /* Shortcuts to overlay sections. */
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count
;
311 /* The stub section for each overlay section. */
314 struct elf_link_hash_entry
*ovly_entry
[2];
316 /* Number of overlay buffers. */
317 unsigned int num_buf
;
319 /* Total number of overlays. */
320 unsigned int num_overlays
;
322 /* For soft icache. */
323 unsigned int line_size_log2
;
324 unsigned int num_lines_log2
;
325 unsigned int fromelem_size_log2
;
327 /* How much memory we have. */
328 unsigned int local_store
;
329 /* Local store --auto-overlay should reserve for non-overlay
330 functions and data. */
331 unsigned int overlay_fixed
;
332 /* Local store --auto-overlay should reserve for stack and heap. */
333 unsigned int reserved
;
334 /* If reserved is not specified, stack analysis will calculate a value
335 for the stack. This parameter adjusts that value to allow for
336 negative sp access (the ABI says 2000 bytes below sp are valid,
337 and the overlay manager uses some of this area). */
338 int extra_stack_space
;
339 /* Count of overlay stubs needed in non-overlay area. */
340 unsigned int non_ovly_stub
;
343 unsigned int stub_err
: 1;
346 /* Hijack the generic got fields for overlay stub accounting. */
350 struct got_entry
*next
;
359 #define spu_hash_table(p) \
360 ((struct spu_link_hash_table *) ((p)->hash))
364 struct function_info
*fun
;
365 struct call_info
*next
;
367 unsigned int max_depth
;
368 unsigned int is_tail
: 1;
369 unsigned int is_pasted
: 1;
370 unsigned int priority
: 13;
375 /* List of functions called. Also branches to hot/cold part of
377 struct call_info
*call_list
;
378 /* For hot/cold part of function, point to owner. */
379 struct function_info
*start
;
380 /* Symbol at start of function. */
382 Elf_Internal_Sym
*sym
;
383 struct elf_link_hash_entry
*h
;
385 /* Function section. */
388 /* Where last called from, and number of sections called from. */
389 asection
*last_caller
;
390 unsigned int call_count
;
391 /* Address range of (this part of) function. */
393 /* Offset where we found a store of lr, or -1 if none found. */
395 /* Offset where we found the stack adjustment insn. */
399 /* Distance from root of call tree. Tail and hot/cold branches
400 count as one deeper. We aren't counting stack frames here. */
402 /* Set if global symbol. */
403 unsigned int global
: 1;
404 /* Set if known to be start of function (as distinct from a hunk
405 in hot/cold section. */
406 unsigned int is_func
: 1;
407 /* Set if not a root node. */
408 unsigned int non_root
: 1;
409 /* Flags used during call tree traversal. It's cheaper to replicate
410 the visit flags than have one which needs clearing after a traversal. */
411 unsigned int visit1
: 1;
412 unsigned int visit2
: 1;
413 unsigned int marking
: 1;
414 unsigned int visit3
: 1;
415 unsigned int visit4
: 1;
416 unsigned int visit5
: 1;
417 unsigned int visit6
: 1;
418 unsigned int visit7
: 1;
421 struct spu_elf_stack_info
425 /* Variable size array describing functions, one per contiguous
426 address range belonging to a function. */
427 struct function_info fun
[1];
430 static struct function_info
*find_function (asection
*, bfd_vma
,
431 struct bfd_link_info
*);
433 /* Create a spu ELF linker hash table. */
435 static struct bfd_link_hash_table
*
436 spu_elf_link_hash_table_create (bfd
*abfd
)
438 struct spu_link_hash_table
*htab
;
440 htab
= bfd_malloc (sizeof (*htab
));
444 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
445 _bfd_elf_link_hash_newfunc
,
446 sizeof (struct elf_link_hash_entry
)))
452 memset (&htab
->ovtab
, 0,
453 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
455 htab
->elf
.init_got_refcount
.refcount
= 0;
456 htab
->elf
.init_got_refcount
.glist
= NULL
;
457 htab
->elf
.init_got_offset
.offset
= 0;
458 htab
->elf
.init_got_offset
.glist
= NULL
;
459 return &htab
->elf
.root
;
463 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
465 bfd_vma max_branch_log2
;
467 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
468 htab
->params
= params
;
469 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
470 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
472 /* For the software i-cache, we provide a "from" list whose size
473 is a power-of-two number of quadwords, big enough to hold one
474 byte per outgoing branch. Compute this number here. */
475 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
476 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
480 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
481 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
484 get_sym_h (struct elf_link_hash_entry
**hp
,
485 Elf_Internal_Sym
**symp
,
487 Elf_Internal_Sym
**locsymsp
,
488 unsigned long r_symndx
,
491 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
493 if (r_symndx
>= symtab_hdr
->sh_info
)
495 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
496 struct elf_link_hash_entry
*h
;
498 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
499 while (h
->root
.type
== bfd_link_hash_indirect
500 || h
->root
.type
== bfd_link_hash_warning
)
501 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
511 asection
*symsec
= NULL
;
512 if (h
->root
.type
== bfd_link_hash_defined
513 || h
->root
.type
== bfd_link_hash_defweak
)
514 symsec
= h
->root
.u
.def
.section
;
520 Elf_Internal_Sym
*sym
;
521 Elf_Internal_Sym
*locsyms
= *locsymsp
;
525 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
527 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
529 0, NULL
, NULL
, NULL
);
534 sym
= locsyms
+ r_symndx
;
543 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
549 /* Create the note section if not already present. This is done early so
550 that the linker maps the sections to the right place in the output. */
553 spu_elf_create_sections (struct bfd_link_info
*info
)
557 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
558 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
563 /* Make SPU_PTNOTE_SPUNAME section. */
570 ibfd
= info
->input_bfds
;
571 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
572 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
574 || !bfd_set_section_alignment (ibfd
, s
, 4))
577 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
578 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
579 size
+= (name_len
+ 3) & -4;
581 if (!bfd_set_section_size (ibfd
, s
, size
))
584 data
= bfd_zalloc (ibfd
, size
);
588 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
589 bfd_put_32 (ibfd
, name_len
, data
+ 4);
590 bfd_put_32 (ibfd
, 1, data
+ 8);
591 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
592 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
593 bfd_get_filename (info
->output_bfd
), name_len
);
600 /* qsort predicate to sort sections by vma. */
603 sort_sections (const void *a
, const void *b
)
605 const asection
*const *s1
= a
;
606 const asection
*const *s2
= b
;
607 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
610 return delta
< 0 ? -1 : 1;
612 return (*s1
)->index
- (*s2
)->index
;
615 /* Identify overlays in the output bfd, and number them.
616 Returns 0 on error, 1 if no overlays, 2 if overlays. */
619 spu_elf_find_overlays (struct bfd_link_info
*info
)
621 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
622 asection
**alloc_sec
;
623 unsigned int i
, n
, ovl_index
, num_buf
;
626 static const char *const entry_names
[2][2] = {
627 { "__ovly_load", "__icache_br_handler" },
628 { "__ovly_return", "__icache_call_handler" }
631 if (info
->output_bfd
->section_count
< 2)
635 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
636 if (alloc_sec
== NULL
)
639 /* Pick out all the alloced sections. */
640 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
641 if ((s
->flags
& SEC_ALLOC
) != 0
642 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
652 /* Sort them by vma. */
653 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
655 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
656 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
658 /* Look for an overlapping vma to find the first overlay section. */
659 bfd_vma vma_start
= 0;
660 bfd_vma lma_start
= 0;
662 for (i
= 1; i
< n
; i
++)
665 if (s
->vma
< ovl_end
)
667 asection
*s0
= alloc_sec
[i
- 1];
669 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
675 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
680 ovl_end
= s
->vma
+ s
->size
;
683 /* Now find any sections within the cache area. */
684 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
687 if (s
->vma
>= ovl_end
)
690 /* A section in an overlay area called .ovl.init is not
691 an overlay, in the sense that it might be loaded in
692 by the overlay manager, but rather the initial
693 section contents for the overlay buffer. */
694 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
696 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
697 if (((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
698 || ((s
->lma
- lma_start
) & (htab
->params
->line_size
- 1)))
700 info
->callbacks
->einfo (_("%X%P: overlay section %A "
701 "does not start on a cache line.\n"),
703 bfd_set_error (bfd_error_bad_value
);
706 else if (s
->size
> htab
->params
->line_size
)
708 info
->callbacks
->einfo (_("%X%P: overlay section %A "
709 "is larger than a cache line.\n"),
711 bfd_set_error (bfd_error_bad_value
);
715 alloc_sec
[ovl_index
++] = s
;
716 spu_elf_section_data (s
)->u
.o
.ovl_index
717 = ((s
->lma
- lma_start
) >> htab
->line_size_log2
) + 1;
718 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
722 /* Ensure there are no more overlay sections. */
726 if (s
->vma
< ovl_end
)
728 info
->callbacks
->einfo (_("%X%P: overlay section %A "
729 "is not in cache area.\n"),
731 bfd_set_error (bfd_error_bad_value
);
735 ovl_end
= s
->vma
+ s
->size
;
740 /* Look for overlapping vmas. Any with overlap must be overlays.
741 Count them. Also count the number of overlay regions. */
742 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
745 if (s
->vma
< ovl_end
)
747 asection
*s0
= alloc_sec
[i
- 1];
749 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
752 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
754 alloc_sec
[ovl_index
] = s0
;
755 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
756 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
759 ovl_end
= s
->vma
+ s
->size
;
761 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
763 alloc_sec
[ovl_index
] = s
;
764 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
765 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
766 if (s0
->vma
!= s
->vma
)
768 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
769 "and %A do not start at the "
772 bfd_set_error (bfd_error_bad_value
);
775 if (ovl_end
< s
->vma
+ s
->size
)
776 ovl_end
= s
->vma
+ s
->size
;
780 ovl_end
= s
->vma
+ s
->size
;
784 htab
->num_overlays
= ovl_index
;
785 htab
->num_buf
= num_buf
;
786 htab
->ovl_sec
= alloc_sec
;
791 for (i
= 0; i
< 2; i
++)
794 struct elf_link_hash_entry
*h
;
796 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
797 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
801 if (h
->root
.type
== bfd_link_hash_new
)
803 h
->root
.type
= bfd_link_hash_undefined
;
805 h
->ref_regular_nonweak
= 1;
808 htab
->ovly_entry
[i
] = h
;
814 /* Non-zero to use bra in overlay stubs rather than br. */
817 #define BRA 0x30000000
818 #define BRASL 0x31000000
819 #define BR 0x32000000
820 #define BRSL 0x33000000
821 #define NOP 0x40200000
822 #define LNOP 0x00200000
823 #define ILA 0x42000000
825 /* Return true for all relative and absolute branch instructions.
833 brhnz 00100011 0.. */
836 is_branch (const unsigned char *insn
)
838 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
841 /* Return true for all indirect branch instructions.
849 bihnz 00100101 011 */
852 is_indirect_branch (const unsigned char *insn
)
854 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
857 /* Return true for branch hint instructions.
862 is_hint (const unsigned char *insn
)
864 return (insn
[0] & 0xfc) == 0x10;
867 /* True if INPUT_SECTION might need overlay stubs. */
870 maybe_needs_stubs (asection
*input_section
)
872 /* No stubs for debug sections and suchlike. */
873 if ((input_section
->flags
& SEC_ALLOC
) == 0)
876 /* No stubs for link-once sections that will be discarded. */
877 if (input_section
->output_section
== bfd_abs_section_ptr
)
880 /* Don't create stubs for .eh_frame references. */
881 if (strcmp (input_section
->name
, ".eh_frame") == 0)
903 /* Return non-zero if this reloc symbol should go via an overlay stub.
904 Return 2 if the stub must be in non-overlay area. */
906 static enum _stub_type
907 needs_ovl_stub (struct elf_link_hash_entry
*h
,
908 Elf_Internal_Sym
*sym
,
910 asection
*input_section
,
911 Elf_Internal_Rela
*irela
,
913 struct bfd_link_info
*info
)
915 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
916 enum elf_spu_reloc_type r_type
;
917 unsigned int sym_type
;
918 bfd_boolean branch
, hint
, call
;
919 enum _stub_type ret
= no_stub
;
923 || sym_sec
->output_section
== bfd_abs_section_ptr
924 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
929 /* Ensure no stubs for user supplied overlay manager syms. */
930 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
933 /* setjmp always goes via an overlay stub, because then the return
934 and hence the longjmp goes via __ovly_return. That magically
935 makes setjmp/longjmp between overlays work. */
936 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
937 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
944 sym_type
= ELF_ST_TYPE (sym
->st_info
);
946 r_type
= ELF32_R_TYPE (irela
->r_info
);
950 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
952 if (contents
== NULL
)
955 if (!bfd_get_section_contents (input_section
->owner
,
962 contents
+= irela
->r_offset
;
964 branch
= is_branch (contents
);
965 hint
= is_hint (contents
);
968 call
= (contents
[0] & 0xfd) == 0x31;
970 && sym_type
!= STT_FUNC
973 /* It's common for people to write assembly and forget
974 to give function symbols the right type. Handle
975 calls to such symbols, but warn so that (hopefully)
976 people will fix their code. We need the symbol
977 type to be correct to distinguish function pointer
978 initialisation from other pointer initialisations. */
979 const char *sym_name
;
982 sym_name
= h
->root
.root
.string
;
985 Elf_Internal_Shdr
*symtab_hdr
;
986 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
987 sym_name
= bfd_elf_sym_name (input_section
->owner
,
992 (*_bfd_error_handler
) (_("warning: call to non-function"
993 " symbol %s defined in %B"),
994 sym_sec
->owner
, sym_name
);
1000 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1001 || (sym_type
!= STT_FUNC
1002 && !(branch
|| hint
)
1003 && (sym_sec
->flags
& SEC_CODE
) == 0))
1006 /* Usually, symbols in non-overlay sections don't need stubs. */
1007 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1008 && !htab
->params
->non_overlay_stubs
)
1011 /* A reference from some other section to a symbol in an overlay
1012 section needs a stub. */
1013 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1014 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1016 if (call
|| sym_type
== STT_FUNC
)
1017 ret
= call_ovl_stub
;
1020 ret
= br000_ovl_stub
;
1024 unsigned int lrlive
= (contents
[1] & 0x70) >> 4;
1030 /* If this insn isn't a branch then we are possibly taking the
1031 address of a function and passing it out somehow. Soft-icache code
1032 always generates inline code to do indirect branches. */
1033 if (!(branch
|| hint
)
1034 && sym_type
== STT_FUNC
1035 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1042 count_stub (struct spu_link_hash_table
*htab
,
1045 enum _stub_type stub_type
,
1046 struct elf_link_hash_entry
*h
,
1047 const Elf_Internal_Rela
*irela
)
1049 unsigned int ovl
= 0;
1050 struct got_entry
*g
, **head
;
1053 /* If this instruction is a branch or call, we need a stub
1054 for it. One stub per function per overlay.
1055 If it isn't a branch, then we are taking the address of
1056 this function so need a stub in the non-overlay area
1057 for it. One stub per function. */
1058 if (stub_type
!= nonovl_stub
)
1059 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1062 head
= &h
->got
.glist
;
1065 if (elf_local_got_ents (ibfd
) == NULL
)
1067 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1068 * sizeof (*elf_local_got_ents (ibfd
)));
1069 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1070 if (elf_local_got_ents (ibfd
) == NULL
)
1073 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1076 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1078 htab
->stub_count
[ovl
] += 1;
1084 addend
= irela
->r_addend
;
1088 struct got_entry
*gnext
;
1090 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1091 if (g
->addend
== addend
&& g
->ovl
== 0)
1096 /* Need a new non-overlay area stub. Zap other stubs. */
1097 for (g
= *head
; g
!= NULL
; g
= gnext
)
1100 if (g
->addend
== addend
)
1102 htab
->stub_count
[g
->ovl
] -= 1;
1110 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1111 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1117 g
= bfd_malloc (sizeof *g
);
1122 g
->stub_addr
= (bfd_vma
) -1;
1126 htab
->stub_count
[ovl
] += 1;
1132 /* Support two sizes of overlay stubs, a slower more compact stub of two
1133 intructions, and a faster stub of four instructions.
1134 Soft-icache stubs are four or eight words. */
1137 ovl_stub_size (struct spu_elf_params
*params
)
1139 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1143 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1145 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1148 /* Two instruction overlay stubs look like:
1150 brsl $75,__ovly_load
1151 .word target_ovl_and_address
1153 ovl_and_address is a word with the overlay number in the top 14 bits
1154 and local store address in the bottom 18 bits.
1156 Four instruction overlay stubs look like:
1160 ila $79,target_address
1163 Software icache stubs are:
1167 .word lrlive_branchlocalstoreaddr;
1168 brasl $75,__icache_br_handler
1173 build_stub (struct bfd_link_info
*info
,
1176 enum _stub_type stub_type
,
1177 struct elf_link_hash_entry
*h
,
1178 const Elf_Internal_Rela
*irela
,
1182 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1183 unsigned int ovl
, dest_ovl
, set_id
;
1184 struct got_entry
*g
, **head
;
1186 bfd_vma addend
, from
, to
, br_dest
, patt
;
1187 unsigned int lrlive
;
1190 if (stub_type
!= nonovl_stub
)
1191 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1194 head
= &h
->got
.glist
;
1196 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1200 addend
= irela
->r_addend
;
1202 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1204 g
= bfd_malloc (sizeof *g
);
1210 g
->br_addr
= (irela
->r_offset
1211 + isec
->output_offset
1212 + isec
->output_section
->vma
);
1218 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1219 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1224 if (g
->ovl
== 0 && ovl
!= 0)
1227 if (g
->stub_addr
!= (bfd_vma
) -1)
1231 sec
= htab
->stub_sec
[ovl
];
1232 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1233 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1234 g
->stub_addr
= from
;
1235 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1236 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1237 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1239 if (((dest
| to
| from
) & 3) != 0)
1244 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1246 if (htab
->params
->ovly_flavour
== ovly_normal
1247 && !htab
->params
->compact_stub
)
1249 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1250 sec
->contents
+ sec
->size
);
1251 bfd_put_32 (sec
->owner
, LNOP
,
1252 sec
->contents
+ sec
->size
+ 4);
1253 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1254 sec
->contents
+ sec
->size
+ 8);
1256 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1257 sec
->contents
+ sec
->size
+ 12);
1259 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1260 sec
->contents
+ sec
->size
+ 12);
1262 else if (htab
->params
->ovly_flavour
== ovly_normal
1263 && htab
->params
->compact_stub
)
1266 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1267 sec
->contents
+ sec
->size
);
1269 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1270 sec
->contents
+ sec
->size
);
1271 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1272 sec
->contents
+ sec
->size
+ 4);
1274 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1275 && htab
->params
->compact_stub
)
1278 if (stub_type
== nonovl_stub
)
1280 else if (stub_type
== call_ovl_stub
)
1281 /* A brsl makes lr live and *(*sp+16) is live.
1282 Tail calls have the same liveness. */
1284 else if (!htab
->params
->lrlive_analysis
)
1285 /* Assume stack frame and lr save. */
1287 else if (irela
!= NULL
)
1289 /* Analyse branch instructions. */
1290 struct function_info
*caller
;
1293 caller
= find_function (isec
, irela
->r_offset
, info
);
1294 if (caller
->start
== NULL
)
1295 off
= irela
->r_offset
;
1298 struct function_info
*found
= NULL
;
1300 /* Find the earliest piece of this function that
1301 has frame adjusting instructions. We might
1302 see dynamic frame adjustment (eg. for alloca)
1303 in some later piece, but functions using
1304 alloca always set up a frame earlier. Frame
1305 setup instructions are always in one piece. */
1306 if (caller
->lr_store
!= (bfd_vma
) -1
1307 || caller
->sp_adjust
!= (bfd_vma
) -1)
1309 while (caller
->start
!= NULL
)
1311 caller
= caller
->start
;
1312 if (caller
->lr_store
!= (bfd_vma
) -1
1313 || caller
->sp_adjust
!= (bfd_vma
) -1)
1321 if (off
> caller
->sp_adjust
)
1323 if (off
> caller
->lr_store
)
1324 /* Only *(*sp+16) is live. */
1327 /* If no lr save, then we must be in a
1328 leaf function with a frame.
1329 lr is still live. */
1332 else if (off
> caller
->lr_store
)
1334 /* Between lr save and stack adjust. */
1336 /* This should never happen since prologues won't
1341 /* On entry to function. */
1344 if (stub_type
!= br000_ovl_stub
1345 && lrlive
!= stub_type
- br000_ovl_stub
)
1346 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1347 "from analysis (%u)\n"),
1348 isec
, irela
->r_offset
, lrlive
,
1349 stub_type
- br000_ovl_stub
);
1352 /* If given lrlive info via .brinfo, use it. */
1353 if (stub_type
> br000_ovl_stub
)
1354 lrlive
= stub_type
- br000_ovl_stub
;
1357 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1358 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1359 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1361 /* The branch that uses this stub goes to stub_addr + 4. We'll
1362 set up an xor pattern that can be used by the icache manager
1363 to modify this branch to go directly to its destination. */
1365 br_dest
= g
->stub_addr
;
1368 /* Except in the case of _SPUEAR_ stubs, the branch in
1369 question is the one in the stub itself. */
1370 BFD_ASSERT (stub_type
== nonovl_stub
);
1371 g
->br_addr
= g
->stub_addr
;
1375 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1376 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1377 sec
->contents
+ sec
->size
);
1378 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1379 sec
->contents
+ sec
->size
+ 4);
1380 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1381 sec
->contents
+ sec
->size
+ 8);
1382 patt
= dest
^ br_dest
;
1383 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1384 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1385 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1386 sec
->contents
+ sec
->size
+ 12);
1389 /* Extra space for linked list entries. */
1395 sec
->size
+= ovl_stub_size (htab
->params
);
1397 if (htab
->params
->emit_stub_syms
)
1403 len
= 8 + sizeof (".ovl_call.") - 1;
1405 len
+= strlen (h
->root
.root
.string
);
1410 add
= (int) irela
->r_addend
& 0xffffffff;
1413 name
= bfd_malloc (len
);
1417 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1419 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1421 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1422 dest_sec
->id
& 0xffffffff,
1423 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1425 sprintf (name
+ len
- 9, "+%x", add
);
1427 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1431 if (h
->root
.type
== bfd_link_hash_new
)
1433 h
->root
.type
= bfd_link_hash_defined
;
1434 h
->root
.u
.def
.section
= sec
;
1435 h
->size
= ovl_stub_size (htab
->params
);
1436 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1440 h
->ref_regular_nonweak
= 1;
1441 h
->forced_local
= 1;
1449 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1453 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1455 /* Symbols starting with _SPUEAR_ need a stub because they may be
1456 invoked by the PPU. */
1457 struct bfd_link_info
*info
= inf
;
1458 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1461 if ((h
->root
.type
== bfd_link_hash_defined
1462 || h
->root
.type
== bfd_link_hash_defweak
)
1464 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1465 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1466 && sym_sec
->output_section
!= bfd_abs_section_ptr
1467 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1468 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1469 || htab
->params
->non_overlay_stubs
))
1471 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1478 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1480 /* Symbols starting with _SPUEAR_ need a stub because they may be
1481 invoked by the PPU. */
1482 struct bfd_link_info
*info
= inf
;
1483 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1486 if ((h
->root
.type
== bfd_link_hash_defined
1487 || h
->root
.type
== bfd_link_hash_defweak
)
1489 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1490 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1491 && sym_sec
->output_section
!= bfd_abs_section_ptr
1492 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1493 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1494 || htab
->params
->non_overlay_stubs
))
1496 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1497 h
->root
.u
.def
.value
, sym_sec
);
1503 /* Size or build stubs. */
1506 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1508 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1511 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1513 extern const bfd_target bfd_elf32_spu_vec
;
1514 Elf_Internal_Shdr
*symtab_hdr
;
1516 Elf_Internal_Sym
*local_syms
= NULL
;
1518 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1521 /* We'll need the symbol table in a second. */
1522 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1523 if (symtab_hdr
->sh_info
== 0)
1526 /* Walk over each section attached to the input bfd. */
1527 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1529 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1531 /* If there aren't any relocs, then there's nothing more to do. */
1532 if ((isec
->flags
& SEC_RELOC
) == 0
1533 || isec
->reloc_count
== 0)
1536 if (!maybe_needs_stubs (isec
))
1539 /* Get the relocs. */
1540 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1542 if (internal_relocs
== NULL
)
1543 goto error_ret_free_local
;
1545 /* Now examine each relocation. */
1546 irela
= internal_relocs
;
1547 irelaend
= irela
+ isec
->reloc_count
;
1548 for (; irela
< irelaend
; irela
++)
1550 enum elf_spu_reloc_type r_type
;
1551 unsigned int r_indx
;
1553 Elf_Internal_Sym
*sym
;
1554 struct elf_link_hash_entry
*h
;
1555 enum _stub_type stub_type
;
1557 r_type
= ELF32_R_TYPE (irela
->r_info
);
1558 r_indx
= ELF32_R_SYM (irela
->r_info
);
1560 if (r_type
>= R_SPU_max
)
1562 bfd_set_error (bfd_error_bad_value
);
1563 error_ret_free_internal
:
1564 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1565 free (internal_relocs
);
1566 error_ret_free_local
:
1567 if (local_syms
!= NULL
1568 && (symtab_hdr
->contents
1569 != (unsigned char *) local_syms
))
1574 /* Determine the reloc target section. */
1575 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1576 goto error_ret_free_internal
;
1578 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1580 if (stub_type
== no_stub
)
1582 else if (stub_type
== stub_error
)
1583 goto error_ret_free_internal
;
1585 if (htab
->stub_count
== NULL
)
1588 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1589 htab
->stub_count
= bfd_zmalloc (amt
);
1590 if (htab
->stub_count
== NULL
)
1591 goto error_ret_free_internal
;
1596 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1597 goto error_ret_free_internal
;
1604 dest
= h
->root
.u
.def
.value
;
1606 dest
= sym
->st_value
;
1607 dest
+= irela
->r_addend
;
1608 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1610 goto error_ret_free_internal
;
1614 /* We're done with the internal relocs, free them. */
1615 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1616 free (internal_relocs
);
1619 if (local_syms
!= NULL
1620 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1622 if (!info
->keep_memory
)
1625 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1632 /* Allocate space for overlay call and return stubs.
1633 Return 0 on error, 1 if no stubs, 2 otherwise. */
1636 spu_elf_size_stubs (struct bfd_link_info
*info
)
1638 struct spu_link_hash_table
*htab
;
1645 if (!process_stubs (info
, FALSE
))
1648 htab
= spu_hash_table (info
);
1649 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1653 if (htab
->stub_count
== NULL
)
1656 ibfd
= info
->input_bfds
;
1657 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1658 htab
->stub_sec
= bfd_zmalloc (amt
);
1659 if (htab
->stub_sec
== NULL
)
1662 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1663 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1664 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1665 htab
->stub_sec
[0] = stub
;
1667 || !bfd_set_section_alignment (ibfd
, stub
,
1668 ovl_stub_size_log2 (htab
->params
)))
1670 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1671 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1672 /* Extra space for linked list entries. */
1673 stub
->size
+= htab
->stub_count
[0] * 16;
1675 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1677 asection
*osec
= htab
->ovl_sec
[i
];
1678 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1679 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1680 htab
->stub_sec
[ovl
] = stub
;
1682 || !bfd_set_section_alignment (ibfd
, stub
,
1683 ovl_stub_size_log2 (htab
->params
)))
1685 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1688 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1690 /* Space for icache manager tables.
1691 a) Tag array, one quadword per cache line.
1692 b) Rewrite "to" list, one quadword per cache line.
1693 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1694 a power-of-two number of full quadwords) per cache line. */
1697 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1698 if (htab
->ovtab
== NULL
1699 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1702 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1703 << htab
->num_lines_log2
;
1705 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1706 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1707 if (htab
->init
== NULL
1708 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1711 htab
->init
->size
= 16;
1715 /* htab->ovtab consists of two arrays.
1725 . } _ovly_buf_table[];
1728 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1729 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1730 if (htab
->ovtab
== NULL
1731 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1734 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1737 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1738 if (htab
->toe
== NULL
1739 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1741 htab
->toe
->size
= 16;
1746 /* Called from ld to place overlay manager data sections. This is done
1747 after the overlay manager itself is loaded, mainly so that the
1748 linker's htab->init section is placed after any other .ovl.init
1752 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1754 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1758 if (htab
->stub_count
== NULL
)
1761 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1763 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1765 asection
*osec
= htab
->ovl_sec
[i
];
1766 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1767 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1770 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1771 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1774 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1776 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1778 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1781 /* Functions to handle embedded spu_ovl.o object. */
1784 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1790 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1796 struct _ovl_stream
*os
;
1800 os
= (struct _ovl_stream
*) stream
;
1801 max
= (const char *) os
->end
- (const char *) os
->start
;
1803 if ((ufile_ptr
) offset
>= max
)
1807 if (count
> max
- offset
)
1808 count
= max
- offset
;
1810 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1815 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1817 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1824 return *ovl_bfd
!= NULL
;
1828 overlay_index (asection
*sec
)
1831 || sec
->output_section
== bfd_abs_section_ptr
)
1833 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1836 /* Define an STT_OBJECT symbol. */
1838 static struct elf_link_hash_entry
*
1839 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1841 struct elf_link_hash_entry
*h
;
1843 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1847 if (h
->root
.type
!= bfd_link_hash_defined
1850 h
->root
.type
= bfd_link_hash_defined
;
1851 h
->root
.u
.def
.section
= htab
->ovtab
;
1852 h
->type
= STT_OBJECT
;
1855 h
->ref_regular_nonweak
= 1;
1858 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1860 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1861 h
->root
.u
.def
.section
->owner
,
1862 h
->root
.root
.string
);
1863 bfd_set_error (bfd_error_bad_value
);
1868 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1869 h
->root
.root
.string
);
1870 bfd_set_error (bfd_error_bad_value
);
1877 /* Fill in all stubs and the overlay tables. */
1880 spu_elf_build_stubs (struct bfd_link_info
*info
)
1882 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1883 struct elf_link_hash_entry
*h
;
1889 if (htab
->stub_count
== NULL
)
1892 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1893 if (htab
->stub_sec
[i
]->size
!= 0)
1895 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1896 htab
->stub_sec
[i
]->size
);
1897 if (htab
->stub_sec
[i
]->contents
== NULL
)
1899 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1900 htab
->stub_sec
[i
]->size
= 0;
1903 for (i
= 0; i
< 2; i
++)
1905 h
= htab
->ovly_entry
[i
];
1906 BFD_ASSERT (h
!= NULL
);
1908 if ((h
->root
.type
== bfd_link_hash_defined
1909 || h
->root
.type
== bfd_link_hash_defweak
)
1912 s
= h
->root
.u
.def
.section
->output_section
;
1913 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1915 (*_bfd_error_handler
) (_("%s in overlay section"),
1916 h
->root
.root
.string
);
1917 bfd_set_error (bfd_error_bad_value
);
1925 /* Fill in all the stubs. */
1926 process_stubs (info
, TRUE
);
1927 if (!htab
->stub_err
)
1928 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1932 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1933 bfd_set_error (bfd_error_bad_value
);
1937 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1939 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1941 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1942 bfd_set_error (bfd_error_bad_value
);
1945 htab
->stub_sec
[i
]->rawsize
= 0;
1948 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1951 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1952 if (htab
->ovtab
->contents
== NULL
)
1955 p
= htab
->ovtab
->contents
;
1956 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1960 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1963 h
->root
.u
.def
.value
= 0;
1964 h
->size
= 16 << htab
->num_lines_log2
;
1967 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
1970 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1971 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1973 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
1976 h
->root
.u
.def
.value
= off
;
1977 h
->size
= 16 << htab
->num_lines_log2
;
1980 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
1983 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1984 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1986 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
1989 h
->root
.u
.def
.value
= off
;
1990 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
1993 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
1996 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
1997 + htab
->num_lines_log2
);
1998 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2000 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2003 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2004 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2006 h
= define_ovtab_symbol (htab
, "__icache_base");
2009 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2010 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2011 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2013 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2016 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2017 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2019 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2022 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2023 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2025 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2028 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2029 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2031 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2034 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2035 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2037 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2040 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2041 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2043 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2046 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2047 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2049 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2051 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2053 if (htab
->init
->contents
== NULL
)
2056 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2059 h
->root
.u
.def
.value
= 0;
2060 h
->root
.u
.def
.section
= htab
->init
;
2066 /* Write out _ovly_table. */
2067 /* set low bit of .size to mark non-overlay area as present. */
2069 obfd
= htab
->ovtab
->output_section
->owner
;
2070 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2072 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2076 unsigned long off
= ovl_index
* 16;
2077 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2079 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2080 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2082 /* file_off written later in spu_elf_modify_program_headers. */
2083 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2087 h
= define_ovtab_symbol (htab
, "_ovly_table");
2090 h
->root
.u
.def
.value
= 16;
2091 h
->size
= htab
->num_overlays
* 16;
2093 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2096 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2099 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2102 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2103 h
->size
= htab
->num_buf
* 4;
2105 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2108 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2112 h
= define_ovtab_symbol (htab
, "_EAR_");
2115 h
->root
.u
.def
.section
= htab
->toe
;
2116 h
->root
.u
.def
.value
= 0;
2122 /* Check that all loadable section VMAs lie in the range
2123 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2126 spu_elf_check_vma (struct bfd_link_info
*info
)
2128 struct elf_segment_map
*m
;
2130 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2131 bfd
*abfd
= info
->output_bfd
;
2132 bfd_vma hi
= htab
->params
->local_store_hi
;
2133 bfd_vma lo
= htab
->params
->local_store_lo
;
2135 htab
->local_store
= hi
+ 1 - lo
;
2137 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2138 if (m
->p_type
== PT_LOAD
)
2139 for (i
= 0; i
< m
->count
; i
++)
2140 if (m
->sections
[i
]->size
!= 0
2141 && (m
->sections
[i
]->vma
< lo
2142 || m
->sections
[i
]->vma
> hi
2143 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2144 return m
->sections
[i
];
2149 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2150 Search for stack adjusting insns, and return the sp delta.
2151 If a store of lr is found save the instruction offset to *LR_STORE.
2152 If a stack adjusting instruction is found, save that offset to
2156 find_function_stack_adjust (asection
*sec
,
2163 memset (reg
, 0, sizeof (reg
));
2164 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2166 unsigned char buf
[4];
2170 /* Assume no relocs on stack adjusing insns. */
2171 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2175 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2177 if (buf
[0] == 0x24 /* stqd */)
2179 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2184 /* Partly decoded immediate field. */
2185 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2187 if (buf
[0] == 0x1c /* ai */)
2190 imm
= (imm
^ 0x200) - 0x200;
2191 reg
[rt
] = reg
[ra
] + imm
;
2193 if (rt
== 1 /* sp */)
2197 *sp_adjust
= offset
;
2201 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2203 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2205 reg
[rt
] = reg
[ra
] + reg
[rb
];
2210 *sp_adjust
= offset
;
2214 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2216 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2218 reg
[rt
] = reg
[rb
] - reg
[ra
];
2223 *sp_adjust
= offset
;
2227 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2229 if (buf
[0] >= 0x42 /* ila */)
2230 imm
|= (buf
[0] & 1) << 17;
2235 if (buf
[0] == 0x40 /* il */)
2237 if ((buf
[1] & 0x80) == 0)
2239 imm
= (imm
^ 0x8000) - 0x8000;
2241 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2247 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2249 reg
[rt
] |= imm
& 0xffff;
2252 else if (buf
[0] == 0x04 /* ori */)
2255 imm
= (imm
^ 0x200) - 0x200;
2256 reg
[rt
] = reg
[ra
] | imm
;
2259 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2261 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2262 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2263 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2264 | ((imm
& 0x1000) ? 0x000000ff : 0));
2267 else if (buf
[0] == 0x16 /* andbi */)
2273 reg
[rt
] = reg
[ra
] & imm
;
2276 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2278 /* Used in pic reg load. Say rt is trashed. Won't be used
2279 in stack adjust, but we need to continue past this branch. */
2283 else if (is_branch (buf
) || is_indirect_branch (buf
))
2284 /* If we hit a branch then we must be out of the prologue. */
2291 /* qsort predicate to sort symbols by section and value. */
2293 static Elf_Internal_Sym
*sort_syms_syms
;
2294 static asection
**sort_syms_psecs
;
2297 sort_syms (const void *a
, const void *b
)
2299 Elf_Internal_Sym
*const *s1
= a
;
2300 Elf_Internal_Sym
*const *s2
= b
;
2301 asection
*sec1
,*sec2
;
2302 bfd_signed_vma delta
;
2304 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2305 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2308 return sec1
->index
- sec2
->index
;
2310 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2312 return delta
< 0 ? -1 : 1;
2314 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2316 return delta
< 0 ? -1 : 1;
2318 return *s1
< *s2
? -1 : 1;
2321 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2322 entries for section SEC. */
2324 static struct spu_elf_stack_info
*
2325 alloc_stack_info (asection
*sec
, int max_fun
)
2327 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2330 amt
= sizeof (struct spu_elf_stack_info
);
2331 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2332 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2333 if (sec_data
->u
.i
.stack_info
!= NULL
)
2334 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2335 return sec_data
->u
.i
.stack_info
;
2338 /* Add a new struct function_info describing a (part of a) function
2339 starting at SYM_H. Keep the array sorted by address. */
2341 static struct function_info
*
2342 maybe_insert_function (asection
*sec
,
2345 bfd_boolean is_func
)
2347 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2348 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2354 sinfo
= alloc_stack_info (sec
, 20);
2361 Elf_Internal_Sym
*sym
= sym_h
;
2362 off
= sym
->st_value
;
2363 size
= sym
->st_size
;
2367 struct elf_link_hash_entry
*h
= sym_h
;
2368 off
= h
->root
.u
.def
.value
;
2372 for (i
= sinfo
->num_fun
; --i
>= 0; )
2373 if (sinfo
->fun
[i
].lo
<= off
)
2378 /* Don't add another entry for an alias, but do update some
2380 if (sinfo
->fun
[i
].lo
== off
)
2382 /* Prefer globals over local syms. */
2383 if (global
&& !sinfo
->fun
[i
].global
)
2385 sinfo
->fun
[i
].global
= TRUE
;
2386 sinfo
->fun
[i
].u
.h
= sym_h
;
2389 sinfo
->fun
[i
].is_func
= TRUE
;
2390 return &sinfo
->fun
[i
];
2392 /* Ignore a zero-size symbol inside an existing function. */
2393 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2394 return &sinfo
->fun
[i
];
2397 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2399 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2400 bfd_size_type old
= amt
;
2402 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2403 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2404 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2405 sinfo
= bfd_realloc (sinfo
, amt
);
2408 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2409 sec_data
->u
.i
.stack_info
= sinfo
;
2412 if (++i
< sinfo
->num_fun
)
2413 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2414 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2415 sinfo
->fun
[i
].is_func
= is_func
;
2416 sinfo
->fun
[i
].global
= global
;
2417 sinfo
->fun
[i
].sec
= sec
;
2419 sinfo
->fun
[i
].u
.h
= sym_h
;
2421 sinfo
->fun
[i
].u
.sym
= sym_h
;
2422 sinfo
->fun
[i
].lo
= off
;
2423 sinfo
->fun
[i
].hi
= off
+ size
;
2424 sinfo
->fun
[i
].lr_store
= -1;
2425 sinfo
->fun
[i
].sp_adjust
= -1;
2426 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2427 &sinfo
->fun
[i
].lr_store
,
2428 &sinfo
->fun
[i
].sp_adjust
);
2429 sinfo
->num_fun
+= 1;
2430 return &sinfo
->fun
[i
];
2433 /* Return the name of FUN. */
2436 func_name (struct function_info
*fun
)
2440 Elf_Internal_Shdr
*symtab_hdr
;
2442 while (fun
->start
!= NULL
)
2446 return fun
->u
.h
->root
.root
.string
;
2449 if (fun
->u
.sym
->st_name
== 0)
2451 size_t len
= strlen (sec
->name
);
2452 char *name
= bfd_malloc (len
+ 10);
2455 sprintf (name
, "%s+%lx", sec
->name
,
2456 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2460 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2461 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2464 /* Read the instruction at OFF in SEC. Return true iff the instruction
2465 is a nop, lnop, or stop 0 (all zero insn). */
2468 is_nop (asection
*sec
, bfd_vma off
)
2470 unsigned char insn
[4];
2472 if (off
+ 4 > sec
->size
2473 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2475 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2477 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2482 /* Extend the range of FUN to cover nop padding up to LIMIT.
2483 Return TRUE iff some instruction other than a NOP was found. */
2486 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2488 bfd_vma off
= (fun
->hi
+ 3) & -4;
2490 while (off
< limit
&& is_nop (fun
->sec
, off
))
2501 /* Check and fix overlapping function ranges. Return TRUE iff there
2502 are gaps in the current info we have about functions in SEC. */
2505 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2507 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2508 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2510 bfd_boolean gaps
= FALSE
;
2515 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2516 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2518 /* Fix overlapping symbols. */
2519 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2520 const char *f2
= func_name (&sinfo
->fun
[i
]);
2522 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2523 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2525 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2528 if (sinfo
->num_fun
== 0)
2532 if (sinfo
->fun
[0].lo
!= 0)
2534 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2536 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2538 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2539 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2541 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2547 /* Search current function info for a function that contains address
2548 OFFSET in section SEC. */
2550 static struct function_info
*
2551 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2553 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2554 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2558 hi
= sinfo
->num_fun
;
2561 mid
= (lo
+ hi
) / 2;
2562 if (offset
< sinfo
->fun
[mid
].lo
)
2564 else if (offset
>= sinfo
->fun
[mid
].hi
)
2567 return &sinfo
->fun
[mid
];
2569 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2571 bfd_set_error (bfd_error_bad_value
);
2575 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2576 if CALLEE was new. If this function return FALSE, CALLEE should
2580 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2582 struct call_info
**pp
, *p
;
2584 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2585 if (p
->fun
== callee
->fun
)
2587 /* Tail calls use less stack than normal calls. Retain entry
2588 for normal call over one for tail call. */
2589 p
->is_tail
&= callee
->is_tail
;
2592 p
->fun
->start
= NULL
;
2593 p
->fun
->is_func
= TRUE
;
2596 /* Reorder list so most recent call is first. */
2598 p
->next
= caller
->call_list
;
2599 caller
->call_list
= p
;
2602 callee
->next
= caller
->call_list
;
2604 caller
->call_list
= callee
;
2608 /* Copy CALL and insert the copy into CALLER. */
2611 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2613 struct call_info
*callee
;
2614 callee
= bfd_malloc (sizeof (*callee
));
2618 if (!insert_callee (caller
, callee
))
2623 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2624 overlay stub sections. */
2627 interesting_section (asection
*s
)
2629 return (s
->output_section
!= bfd_abs_section_ptr
2630 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2631 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2635 /* Rummage through the relocs for SEC, looking for function calls.
2636 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2637 mark destination symbols on calls as being functions. Also
2638 look at branches, which may be tail calls or go to hot/cold
2639 section part of same function. */
2642 mark_functions_via_relocs (asection
*sec
,
2643 struct bfd_link_info
*info
,
2646 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2647 Elf_Internal_Shdr
*symtab_hdr
;
2649 unsigned int priority
= 0;
2650 static bfd_boolean warned
;
2652 if (!interesting_section (sec
)
2653 || sec
->reloc_count
== 0)
2656 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2658 if (internal_relocs
== NULL
)
2661 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2662 psyms
= &symtab_hdr
->contents
;
2663 irela
= internal_relocs
;
2664 irelaend
= irela
+ sec
->reloc_count
;
2665 for (; irela
< irelaend
; irela
++)
2667 enum elf_spu_reloc_type r_type
;
2668 unsigned int r_indx
;
2670 Elf_Internal_Sym
*sym
;
2671 struct elf_link_hash_entry
*h
;
2673 bfd_boolean reject
, is_call
;
2674 struct function_info
*caller
;
2675 struct call_info
*callee
;
2678 r_type
= ELF32_R_TYPE (irela
->r_info
);
2679 if (r_type
!= R_SPU_REL16
2680 && r_type
!= R_SPU_ADDR16
)
2683 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
))
2687 r_indx
= ELF32_R_SYM (irela
->r_info
);
2688 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2692 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2698 unsigned char insn
[4];
2700 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2701 irela
->r_offset
, 4))
2703 if (is_branch (insn
))
2705 is_call
= (insn
[0] & 0xfd) == 0x31;
2706 priority
= insn
[1] & 0x0f;
2708 priority
|= insn
[2];
2710 priority
|= insn
[3];
2712 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2713 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2716 info
->callbacks
->einfo
2717 (_("%B(%A+0x%v): call to non-code section"
2718 " %B(%A), analysis incomplete\n"),
2719 sec
->owner
, sec
, irela
->r_offset
,
2720 sym_sec
->owner
, sym_sec
);
2728 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2736 /* For --auto-overlay, count possible stubs we need for
2737 function pointer references. */
2738 unsigned int sym_type
;
2742 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2743 if (sym_type
== STT_FUNC
)
2744 spu_hash_table (info
)->non_ovly_stub
+= 1;
2749 val
= h
->root
.u
.def
.value
;
2751 val
= sym
->st_value
;
2752 val
+= irela
->r_addend
;
2756 struct function_info
*fun
;
2758 if (irela
->r_addend
!= 0)
2760 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2763 fake
->st_value
= val
;
2765 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2769 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2771 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2774 if (irela
->r_addend
!= 0
2775 && fun
->u
.sym
!= sym
)
2780 caller
= find_function (sec
, irela
->r_offset
, info
);
2783 callee
= bfd_malloc (sizeof *callee
);
2787 callee
->fun
= find_function (sym_sec
, val
, info
);
2788 if (callee
->fun
== NULL
)
2790 callee
->is_tail
= !is_call
;
2791 callee
->is_pasted
= FALSE
;
2792 callee
->priority
= priority
;
2794 if (callee
->fun
->last_caller
!= sec
)
2796 callee
->fun
->last_caller
= sec
;
2797 callee
->fun
->call_count
+= 1;
2799 if (!insert_callee (caller
, callee
))
2802 && !callee
->fun
->is_func
2803 && callee
->fun
->stack
== 0)
2805 /* This is either a tail call or a branch from one part of
2806 the function to another, ie. hot/cold section. If the
2807 destination has been called by some other function then
2808 it is a separate function. We also assume that functions
2809 are not split across input files. */
2810 if (sec
->owner
!= sym_sec
->owner
)
2812 callee
->fun
->start
= NULL
;
2813 callee
->fun
->is_func
= TRUE
;
2815 else if (callee
->fun
->start
== NULL
)
2817 struct function_info
*caller_start
= caller
;
2818 while (caller_start
->start
)
2819 caller_start
= caller_start
->start
;
2821 if (caller_start
!= callee
->fun
)
2822 callee
->fun
->start
= caller_start
;
2826 struct function_info
*callee_start
;
2827 struct function_info
*caller_start
;
2828 callee_start
= callee
->fun
;
2829 while (callee_start
->start
)
2830 callee_start
= callee_start
->start
;
2831 caller_start
= caller
;
2832 while (caller_start
->start
)
2833 caller_start
= caller_start
->start
;
2834 if (caller_start
!= callee_start
)
2836 callee
->fun
->start
= NULL
;
2837 callee
->fun
->is_func
= TRUE
;
2846 /* Handle something like .init or .fini, which has a piece of a function.
2847 These sections are pasted together to form a single function. */
2850 pasted_function (asection
*sec
)
2852 struct bfd_link_order
*l
;
2853 struct _spu_elf_section_data
*sec_data
;
2854 struct spu_elf_stack_info
*sinfo
;
2855 Elf_Internal_Sym
*fake
;
2856 struct function_info
*fun
, *fun_start
;
2858 fake
= bfd_zmalloc (sizeof (*fake
));
2862 fake
->st_size
= sec
->size
;
2864 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2865 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2869 /* Find a function immediately preceding this section. */
2871 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2873 if (l
->u
.indirect
.section
== sec
)
2875 if (fun_start
!= NULL
)
2877 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2881 fun
->start
= fun_start
;
2883 callee
->is_tail
= TRUE
;
2884 callee
->is_pasted
= TRUE
;
2886 if (!insert_callee (fun_start
, callee
))
2892 if (l
->type
== bfd_indirect_link_order
2893 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2894 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2895 && sinfo
->num_fun
!= 0)
2896 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2899 /* Don't return an error if we did not find a function preceding this
2900 section. The section may have incorrect flags. */
2904 /* Map address ranges in code sections to functions. */
2907 discover_functions (struct bfd_link_info
*info
)
2911 Elf_Internal_Sym
***psym_arr
;
2912 asection
***sec_arr
;
2913 bfd_boolean gaps
= FALSE
;
2916 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2919 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2920 if (psym_arr
== NULL
)
2922 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2923 if (sec_arr
== NULL
)
2926 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2928 ibfd
= ibfd
->link_next
, bfd_idx
++)
2930 extern const bfd_target bfd_elf32_spu_vec
;
2931 Elf_Internal_Shdr
*symtab_hdr
;
2934 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2935 asection
**psecs
, **p
;
2937 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2940 /* Read all the symbols. */
2941 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2942 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2946 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2947 if (interesting_section (sec
))
2955 if (symtab_hdr
->contents
!= NULL
)
2957 /* Don't use cached symbols since the generic ELF linker
2958 code only reads local symbols, and we need globals too. */
2959 free (symtab_hdr
->contents
);
2960 symtab_hdr
->contents
= NULL
;
2962 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2964 symtab_hdr
->contents
= (void *) syms
;
2968 /* Select defined function symbols that are going to be output. */
2969 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2972 psym_arr
[bfd_idx
] = psyms
;
2973 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2976 sec_arr
[bfd_idx
] = psecs
;
2977 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2978 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2979 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2983 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2984 if (s
!= NULL
&& interesting_section (s
))
2987 symcount
= psy
- psyms
;
2990 /* Sort them by section and offset within section. */
2991 sort_syms_syms
= syms
;
2992 sort_syms_psecs
= psecs
;
2993 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2995 /* Now inspect the function symbols. */
2996 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2998 asection
*s
= psecs
[*psy
- syms
];
2999 Elf_Internal_Sym
**psy2
;
3001 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3002 if (psecs
[*psy2
- syms
] != s
)
3005 if (!alloc_stack_info (s
, psy2
- psy
))
3010 /* First install info about properly typed and sized functions.
3011 In an ideal world this will cover all code sections, except
3012 when partitioning functions into hot and cold sections,
3013 and the horrible pasted together .init and .fini functions. */
3014 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3017 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3019 asection
*s
= psecs
[sy
- syms
];
3020 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3025 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3026 if (interesting_section (sec
))
3027 gaps
|= check_function_ranges (sec
, info
);
3032 /* See if we can discover more function symbols by looking at
3034 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3036 ibfd
= ibfd
->link_next
, bfd_idx
++)
3040 if (psym_arr
[bfd_idx
] == NULL
)
3043 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3044 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3048 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3050 ibfd
= ibfd
->link_next
, bfd_idx
++)
3052 Elf_Internal_Shdr
*symtab_hdr
;
3054 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3057 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3060 psecs
= sec_arr
[bfd_idx
];
3062 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3063 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3066 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3067 if (interesting_section (sec
))
3068 gaps
|= check_function_ranges (sec
, info
);
3072 /* Finally, install all globals. */
3073 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3077 s
= psecs
[sy
- syms
];
3079 /* Global syms might be improperly typed functions. */
3080 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3081 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3083 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3089 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3091 extern const bfd_target bfd_elf32_spu_vec
;
3094 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3097 /* Some of the symbols we've installed as marking the
3098 beginning of functions may have a size of zero. Extend
3099 the range of such functions to the beginning of the
3100 next symbol of interest. */
3101 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3102 if (interesting_section (sec
))
3104 struct _spu_elf_section_data
*sec_data
;
3105 struct spu_elf_stack_info
*sinfo
;
3107 sec_data
= spu_elf_section_data (sec
);
3108 sinfo
= sec_data
->u
.i
.stack_info
;
3109 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3112 bfd_vma hi
= sec
->size
;
3114 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3116 sinfo
->fun
[fun_idx
].hi
= hi
;
3117 hi
= sinfo
->fun
[fun_idx
].lo
;
3120 sinfo
->fun
[0].lo
= 0;
3122 /* No symbols in this section. Must be .init or .fini
3123 or something similar. */
3124 else if (!pasted_function (sec
))
3130 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3132 ibfd
= ibfd
->link_next
, bfd_idx
++)
3134 if (psym_arr
[bfd_idx
] == NULL
)
3137 free (psym_arr
[bfd_idx
]);
3138 free (sec_arr
[bfd_idx
]);
3147 /* Iterate over all function_info we have collected, calling DOIT on
3148 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3152 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3153 struct bfd_link_info
*,
3155 struct bfd_link_info
*info
,
3161 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3163 extern const bfd_target bfd_elf32_spu_vec
;
3166 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3169 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3171 struct _spu_elf_section_data
*sec_data
;
3172 struct spu_elf_stack_info
*sinfo
;
3174 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3175 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3178 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3179 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3180 if (!doit (&sinfo
->fun
[i
], info
, param
))
3188 /* Transfer call info attached to struct function_info entries for
3189 all of a given function's sections to the first entry. */
3192 transfer_calls (struct function_info
*fun
,
3193 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3194 void *param ATTRIBUTE_UNUSED
)
3196 struct function_info
*start
= fun
->start
;
3200 struct call_info
*call
, *call_next
;
3202 while (start
->start
!= NULL
)
3203 start
= start
->start
;
3204 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3206 call_next
= call
->next
;
3207 if (!insert_callee (start
, call
))
3210 fun
->call_list
= NULL
;
3215 /* Mark nodes in the call graph that are called by some other node. */
3218 mark_non_root (struct function_info
*fun
,
3219 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3220 void *param ATTRIBUTE_UNUSED
)
3222 struct call_info
*call
;
3227 for (call
= fun
->call_list
; call
; call
= call
->next
)
3229 call
->fun
->non_root
= TRUE
;
3230 mark_non_root (call
->fun
, 0, 0);
3235 /* Remove cycles from the call graph. Set depth of nodes. */
3238 remove_cycles (struct function_info
*fun
,
3239 struct bfd_link_info
*info
,
3242 struct call_info
**callp
, *call
;
3243 unsigned int depth
= *(unsigned int *) param
;
3244 unsigned int max_depth
= depth
;
3248 fun
->marking
= TRUE
;
3250 callp
= &fun
->call_list
;
3251 while ((call
= *callp
) != NULL
)
3253 call
->max_depth
= depth
+ !call
->is_pasted
;
3254 if (!call
->fun
->visit2
)
3256 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3258 if (max_depth
< call
->max_depth
)
3259 max_depth
= call
->max_depth
;
3261 else if (call
->fun
->marking
)
3263 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3265 if (!htab
->params
->auto_overlay
3266 && htab
->params
->stack_analysis
)
3268 const char *f1
= func_name (fun
);
3269 const char *f2
= func_name (call
->fun
);
3271 info
->callbacks
->info (_("Stack analysis will ignore the call "
3275 *callp
= call
->next
;
3279 callp
= &call
->next
;
3281 fun
->marking
= FALSE
;
3282 *(unsigned int *) param
= max_depth
;
3286 /* Check that we actually visited all nodes in remove_cycles. If we
3287 didn't, then there is some cycle in the call graph not attached to
3288 any root node. Arbitrarily choose a node in the cycle as a new
3289 root and break the cycle. */
3292 mark_detached_root (struct function_info
*fun
,
3293 struct bfd_link_info
*info
,
3298 fun
->non_root
= FALSE
;
3299 *(unsigned int *) param
= 0;
3300 return remove_cycles (fun
, info
, param
);
3303 /* Populate call_list for each function. */
3306 build_call_tree (struct bfd_link_info
*info
)
3311 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3313 extern const bfd_target bfd_elf32_spu_vec
;
3316 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3319 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3320 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3324 /* Transfer call info from hot/cold section part of function
3326 if (!spu_hash_table (info
)->params
->auto_overlay
3327 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3330 /* Find the call graph root(s). */
3331 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3334 /* Remove cycles from the call graph. We start from the root node(s)
3335 so that we break cycles in a reasonable place. */
3337 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3340 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3343 /* qsort predicate to sort calls by priority, max_depth then count. */
3346 sort_calls (const void *a
, const void *b
)
3348 struct call_info
*const *c1
= a
;
3349 struct call_info
*const *c2
= b
;
3352 delta
= (*c2
)->priority
- (*c1
)->priority
;
3356 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3360 delta
= (*c2
)->count
- (*c1
)->count
;
3364 return (char *) c1
- (char *) c2
;
3368 unsigned int max_overlay_size
;
3371 /* Set linker_mark and gc_mark on any sections that we will put in
3372 overlays. These flags are used by the generic ELF linker, but we
3373 won't be continuing on to bfd_elf_final_link so it is OK to use
3374 them. linker_mark is clear before we get here. Set segment_mark
3375 on sections that are part of a pasted function (excluding the last
3378 Set up function rodata section if --overlay-rodata. We don't
3379 currently include merged string constant rodata sections since
3381 Sort the call graph so that the deepest nodes will be visited
3385 mark_overlay_section (struct function_info
*fun
,
3386 struct bfd_link_info
*info
,
3389 struct call_info
*call
;
3391 struct _mos_param
*mos_param
= param
;
3392 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3398 if (!fun
->sec
->linker_mark
3399 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3400 || htab
->params
->non_ia_text
3401 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3402 || strcmp (fun
->sec
->name
, ".init") == 0
3403 || strcmp (fun
->sec
->name
, ".fini") == 0))
3407 fun
->sec
->linker_mark
= 1;
3408 fun
->sec
->gc_mark
= 1;
3409 fun
->sec
->segment_mark
= 0;
3410 /* Ensure SEC_CODE is set on this text section (it ought to
3411 be!), and SEC_CODE is clear on rodata sections. We use
3412 this flag to differentiate the two overlay section types. */
3413 fun
->sec
->flags
|= SEC_CODE
;
3415 size
= fun
->sec
->size
;
3416 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3420 /* Find the rodata section corresponding to this function's
3422 if (strcmp (fun
->sec
->name
, ".text") == 0)
3424 name
= bfd_malloc (sizeof (".rodata"));
3427 memcpy (name
, ".rodata", sizeof (".rodata"));
3429 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3431 size_t len
= strlen (fun
->sec
->name
);
3432 name
= bfd_malloc (len
+ 3);
3435 memcpy (name
, ".rodata", sizeof (".rodata"));
3436 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3438 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3440 size_t len
= strlen (fun
->sec
->name
) + 1;
3441 name
= bfd_malloc (len
);
3444 memcpy (name
, fun
->sec
->name
, len
);
3450 asection
*rodata
= NULL
;
3451 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3452 if (group_sec
== NULL
)
3453 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3455 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3457 if (strcmp (group_sec
->name
, name
) == 0)
3462 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3464 fun
->rodata
= rodata
;
3467 size
+= fun
->rodata
->size
;
3468 if (htab
->params
->line_size
!= 0
3469 && size
> htab
->params
->line_size
)
3471 size
-= fun
->rodata
->size
;
3476 fun
->rodata
->linker_mark
= 1;
3477 fun
->rodata
->gc_mark
= 1;
3478 fun
->rodata
->flags
&= ~SEC_CODE
;
3484 if (mos_param
->max_overlay_size
< size
)
3485 mos_param
->max_overlay_size
= size
;
3488 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3493 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3497 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3498 calls
[count
++] = call
;
3500 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3502 fun
->call_list
= NULL
;
3506 calls
[count
]->next
= fun
->call_list
;
3507 fun
->call_list
= calls
[count
];
3512 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3514 if (call
->is_pasted
)
3516 /* There can only be one is_pasted call per function_info. */
3517 BFD_ASSERT (!fun
->sec
->segment_mark
);
3518 fun
->sec
->segment_mark
= 1;
3520 if (!mark_overlay_section (call
->fun
, info
, param
))
3524 /* Don't put entry code into an overlay. The overlay manager needs
3525 a stack! Also, don't mark .ovl.init as an overlay. */
3526 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3527 == info
->output_bfd
->start_address
3528 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3530 fun
->sec
->linker_mark
= 0;
3531 if (fun
->rodata
!= NULL
)
3532 fun
->rodata
->linker_mark
= 0;
3537 /* If non-zero then unmark functions called from those within sections
3538 that we need to unmark. Unfortunately this isn't reliable since the
3539 call graph cannot know the destination of function pointer calls. */
3540 #define RECURSE_UNMARK 0
3543 asection
*exclude_input_section
;
3544 asection
*exclude_output_section
;
3545 unsigned long clearing
;
3548 /* Undo some of mark_overlay_section's work. */
3551 unmark_overlay_section (struct function_info
*fun
,
3552 struct bfd_link_info
*info
,
3555 struct call_info
*call
;
3556 struct _uos_param
*uos_param
= param
;
3557 unsigned int excluded
= 0;
3565 if (fun
->sec
== uos_param
->exclude_input_section
3566 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3570 uos_param
->clearing
+= excluded
;
3572 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3574 fun
->sec
->linker_mark
= 0;
3576 fun
->rodata
->linker_mark
= 0;
3579 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3580 if (!unmark_overlay_section (call
->fun
, info
, param
))
3584 uos_param
->clearing
-= excluded
;
3589 unsigned int lib_size
;
3590 asection
**lib_sections
;
3593 /* Add sections we have marked as belonging to overlays to an array
3594 for consideration as non-overlay sections. The array consist of
3595 pairs of sections, (text,rodata), for functions in the call graph. */
3598 collect_lib_sections (struct function_info
*fun
,
3599 struct bfd_link_info
*info
,
3602 struct _cl_param
*lib_param
= param
;
3603 struct call_info
*call
;
3610 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3613 size
= fun
->sec
->size
;
3615 size
+= fun
->rodata
->size
;
3617 if (size
<= lib_param
->lib_size
)
3619 *lib_param
->lib_sections
++ = fun
->sec
;
3620 fun
->sec
->gc_mark
= 0;
3621 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3623 *lib_param
->lib_sections
++ = fun
->rodata
;
3624 fun
->rodata
->gc_mark
= 0;
3627 *lib_param
->lib_sections
++ = NULL
;
3630 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3631 collect_lib_sections (call
->fun
, info
, param
);
3636 /* qsort predicate to sort sections by call count. */
3639 sort_lib (const void *a
, const void *b
)
3641 asection
*const *s1
= a
;
3642 asection
*const *s2
= b
;
3643 struct _spu_elf_section_data
*sec_data
;
3644 struct spu_elf_stack_info
*sinfo
;
3648 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3649 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3652 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3653 delta
-= sinfo
->fun
[i
].call_count
;
3656 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3657 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3660 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3661 delta
+= sinfo
->fun
[i
].call_count
;
3670 /* Remove some sections from those marked to be in overlays. Choose
3671 those that are called from many places, likely library functions. */
3674 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3677 asection
**lib_sections
;
3678 unsigned int i
, lib_count
;
3679 struct _cl_param collect_lib_param
;
3680 struct function_info dummy_caller
;
3681 struct spu_link_hash_table
*htab
;
3683 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3685 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3687 extern const bfd_target bfd_elf32_spu_vec
;
3690 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3693 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3694 if (sec
->linker_mark
3695 && sec
->size
< lib_size
3696 && (sec
->flags
& SEC_CODE
) != 0)
3699 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3700 if (lib_sections
== NULL
)
3701 return (unsigned int) -1;
3702 collect_lib_param
.lib_size
= lib_size
;
3703 collect_lib_param
.lib_sections
= lib_sections
;
3704 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3706 return (unsigned int) -1;
3707 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3709 /* Sort sections so that those with the most calls are first. */
3711 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3713 htab
= spu_hash_table (info
);
3714 for (i
= 0; i
< lib_count
; i
++)
3716 unsigned int tmp
, stub_size
;
3718 struct _spu_elf_section_data
*sec_data
;
3719 struct spu_elf_stack_info
*sinfo
;
3721 sec
= lib_sections
[2 * i
];
3722 /* If this section is OK, its size must be less than lib_size. */
3724 /* If it has a rodata section, then add that too. */
3725 if (lib_sections
[2 * i
+ 1])
3726 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3727 /* Add any new overlay call stubs needed by the section. */
3730 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3731 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3734 struct call_info
*call
;
3736 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3737 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3738 if (call
->fun
->sec
->linker_mark
)
3740 struct call_info
*p
;
3741 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3742 if (p
->fun
== call
->fun
)
3745 stub_size
+= ovl_stub_size (htab
->params
);
3748 if (tmp
+ stub_size
< lib_size
)
3750 struct call_info
**pp
, *p
;
3752 /* This section fits. Mark it as non-overlay. */
3753 lib_sections
[2 * i
]->linker_mark
= 0;
3754 if (lib_sections
[2 * i
+ 1])
3755 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3756 lib_size
-= tmp
+ stub_size
;
3757 /* Call stubs to the section we just added are no longer
3759 pp
= &dummy_caller
.call_list
;
3760 while ((p
= *pp
) != NULL
)
3761 if (!p
->fun
->sec
->linker_mark
)
3763 lib_size
+= ovl_stub_size (htab
->params
);
3769 /* Add new call stubs to dummy_caller. */
3770 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3771 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3774 struct call_info
*call
;
3776 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3777 for (call
= sinfo
->fun
[k
].call_list
;
3780 if (call
->fun
->sec
->linker_mark
)
3782 struct call_info
*callee
;
3783 callee
= bfd_malloc (sizeof (*callee
));
3785 return (unsigned int) -1;
3787 if (!insert_callee (&dummy_caller
, callee
))
3793 while (dummy_caller
.call_list
!= NULL
)
3795 struct call_info
*call
= dummy_caller
.call_list
;
3796 dummy_caller
.call_list
= call
->next
;
3799 for (i
= 0; i
< 2 * lib_count
; i
++)
3800 if (lib_sections
[i
])
3801 lib_sections
[i
]->gc_mark
= 1;
3802 free (lib_sections
);
3806 /* Build an array of overlay sections. The deepest node's section is
3807 added first, then its parent node's section, then everything called
3808 from the parent section. The idea being to group sections to
3809 minimise calls between different overlays. */
3812 collect_overlays (struct function_info
*fun
,
3813 struct bfd_link_info
*info
,
3816 struct call_info
*call
;
3817 bfd_boolean added_fun
;
3818 asection
***ovly_sections
= param
;
3824 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3825 if (!call
->is_pasted
)
3827 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3833 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3835 fun
->sec
->gc_mark
= 0;
3836 *(*ovly_sections
)++ = fun
->sec
;
3837 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3839 fun
->rodata
->gc_mark
= 0;
3840 *(*ovly_sections
)++ = fun
->rodata
;
3843 *(*ovly_sections
)++ = NULL
;
3846 /* Pasted sections must stay with the first section. We don't
3847 put pasted sections in the array, just the first section.
3848 Mark subsequent sections as already considered. */
3849 if (fun
->sec
->segment_mark
)
3851 struct function_info
*call_fun
= fun
;
3854 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3855 if (call
->is_pasted
)
3857 call_fun
= call
->fun
;
3858 call_fun
->sec
->gc_mark
= 0;
3859 if (call_fun
->rodata
)
3860 call_fun
->rodata
->gc_mark
= 0;
3866 while (call_fun
->sec
->segment_mark
);
3870 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3871 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3876 struct _spu_elf_section_data
*sec_data
;
3877 struct spu_elf_stack_info
*sinfo
;
3879 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3880 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3883 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3884 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3892 struct _sum_stack_param
{
3894 size_t overall_stack
;
3895 bfd_boolean emit_stack_syms
;
3898 /* Descend the call graph for FUN, accumulating total stack required. */
3901 sum_stack (struct function_info
*fun
,
3902 struct bfd_link_info
*info
,
3905 struct call_info
*call
;
3906 struct function_info
*max
;
3907 size_t stack
, cum_stack
;
3909 bfd_boolean has_call
;
3910 struct _sum_stack_param
*sum_stack_param
= param
;
3911 struct spu_link_hash_table
*htab
;
3913 cum_stack
= fun
->stack
;
3914 sum_stack_param
->cum_stack
= cum_stack
;
3920 for (call
= fun
->call_list
; call
; call
= call
->next
)
3922 if (!call
->is_pasted
)
3924 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3926 stack
= sum_stack_param
->cum_stack
;
3927 /* Include caller stack for normal calls, don't do so for
3928 tail calls. fun->stack here is local stack usage for
3930 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3931 stack
+= fun
->stack
;
3932 if (cum_stack
< stack
)
3939 sum_stack_param
->cum_stack
= cum_stack
;
3941 /* Now fun->stack holds cumulative stack. */
3942 fun
->stack
= cum_stack
;
3946 && sum_stack_param
->overall_stack
< cum_stack
)
3947 sum_stack_param
->overall_stack
= cum_stack
;
3949 htab
= spu_hash_table (info
);
3950 if (htab
->params
->auto_overlay
)
3953 f1
= func_name (fun
);
3954 if (htab
->params
->stack_analysis
)
3957 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3958 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3959 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3963 info
->callbacks
->minfo (_(" calls:\n"));
3964 for (call
= fun
->call_list
; call
; call
= call
->next
)
3965 if (!call
->is_pasted
)
3967 const char *f2
= func_name (call
->fun
);
3968 const char *ann1
= call
->fun
== max
? "*" : " ";
3969 const char *ann2
= call
->is_tail
? "t" : " ";
3971 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3976 if (sum_stack_param
->emit_stack_syms
)
3978 char *name
= bfd_malloc (18 + strlen (f1
));
3979 struct elf_link_hash_entry
*h
;
3984 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3985 sprintf (name
, "__stack_%s", f1
);
3987 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3989 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3992 && (h
->root
.type
== bfd_link_hash_new
3993 || h
->root
.type
== bfd_link_hash_undefined
3994 || h
->root
.type
== bfd_link_hash_undefweak
))
3996 h
->root
.type
= bfd_link_hash_defined
;
3997 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3998 h
->root
.u
.def
.value
= cum_stack
;
4003 h
->ref_regular_nonweak
= 1;
4004 h
->forced_local
= 1;
4012 /* SEC is part of a pasted function. Return the call_info for the
4013 next section of this function. */
4015 static struct call_info
*
4016 find_pasted_call (asection
*sec
)
4018 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4019 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4020 struct call_info
*call
;
4023 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4024 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4025 if (call
->is_pasted
)
4031 /* qsort predicate to sort bfds by file name. */
4034 sort_bfds (const void *a
, const void *b
)
4036 bfd
*const *abfd1
= a
;
4037 bfd
*const *abfd2
= b
;
4039 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
4043 print_one_overlay_section (FILE *script
,
4046 unsigned int ovlynum
,
4047 unsigned int *ovly_map
,
4048 asection
**ovly_sections
,
4049 struct bfd_link_info
*info
)
4053 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4055 asection
*sec
= ovly_sections
[2 * j
];
4057 if (fprintf (script
, " %s%c%s (%s)\n",
4058 (sec
->owner
->my_archive
!= NULL
4059 ? sec
->owner
->my_archive
->filename
: ""),
4060 info
->path_separator
,
4061 sec
->owner
->filename
,
4064 if (sec
->segment_mark
)
4066 struct call_info
*call
= find_pasted_call (sec
);
4067 while (call
!= NULL
)
4069 struct function_info
*call_fun
= call
->fun
;
4070 sec
= call_fun
->sec
;
4071 if (fprintf (script
, " %s%c%s (%s)\n",
4072 (sec
->owner
->my_archive
!= NULL
4073 ? sec
->owner
->my_archive
->filename
: ""),
4074 info
->path_separator
,
4075 sec
->owner
->filename
,
4078 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4079 if (call
->is_pasted
)
4085 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4087 asection
*sec
= ovly_sections
[2 * j
+ 1];
4089 && fprintf (script
, " %s%c%s (%s)\n",
4090 (sec
->owner
->my_archive
!= NULL
4091 ? sec
->owner
->my_archive
->filename
: ""),
4092 info
->path_separator
,
4093 sec
->owner
->filename
,
4097 sec
= ovly_sections
[2 * j
];
4098 if (sec
->segment_mark
)
4100 struct call_info
*call
= find_pasted_call (sec
);
4101 while (call
!= NULL
)
4103 struct function_info
*call_fun
= call
->fun
;
4104 sec
= call_fun
->rodata
;
4106 && fprintf (script
, " %s%c%s (%s)\n",
4107 (sec
->owner
->my_archive
!= NULL
4108 ? sec
->owner
->my_archive
->filename
: ""),
4109 info
->path_separator
,
4110 sec
->owner
->filename
,
4113 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4114 if (call
->is_pasted
)
4123 /* Handle --auto-overlay. */
4126 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4130 struct elf_segment_map
*m
;
4131 unsigned int fixed_size
, lo
, hi
;
4132 struct spu_link_hash_table
*htab
;
4133 unsigned int base
, i
, count
, bfd_count
;
4134 unsigned int region
, ovlynum
;
4135 asection
**ovly_sections
, **ovly_p
;
4136 unsigned int *ovly_map
;
4138 unsigned int total_overlay_size
, overlay_size
;
4139 const char *ovly_mgr_entry
;
4140 struct elf_link_hash_entry
*h
;
4141 struct _mos_param mos_param
;
4142 struct _uos_param uos_param
;
4143 struct function_info dummy_caller
;
4145 /* Find the extents of our loadable image. */
4146 lo
= (unsigned int) -1;
4148 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4149 if (m
->p_type
== PT_LOAD
)
4150 for (i
= 0; i
< m
->count
; i
++)
4151 if (m
->sections
[i
]->size
!= 0)
4153 if (m
->sections
[i
]->vma
< lo
)
4154 lo
= m
->sections
[i
]->vma
;
4155 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4156 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4158 fixed_size
= hi
+ 1 - lo
;
4160 if (!discover_functions (info
))
4163 if (!build_call_tree (info
))
4166 htab
= spu_hash_table (info
);
4167 if (htab
->reserved
== 0)
4169 struct _sum_stack_param sum_stack_param
;
4171 sum_stack_param
.emit_stack_syms
= 0;
4172 sum_stack_param
.overall_stack
= 0;
4173 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4175 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
4178 /* No need for overlays if everything already fits. */
4179 if (fixed_size
+ htab
->reserved
<= htab
->local_store
4180 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4182 htab
->params
->auto_overlay
= 0;
4186 uos_param
.exclude_input_section
= 0;
4187 uos_param
.exclude_output_section
4188 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4190 ovly_mgr_entry
= "__ovly_load";
4191 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4192 ovly_mgr_entry
= "__icache_br_handler";
4193 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4194 FALSE
, FALSE
, FALSE
);
4196 && (h
->root
.type
== bfd_link_hash_defined
4197 || h
->root
.type
== bfd_link_hash_defweak
)
4200 /* We have a user supplied overlay manager. */
4201 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4205 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4206 builtin version to .text, and will adjust .text size. */
4207 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4210 /* Mark overlay sections, and find max overlay section size. */
4211 mos_param
.max_overlay_size
= 0;
4212 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4215 /* We can't put the overlay manager or interrupt routines in
4217 uos_param
.clearing
= 0;
4218 if ((uos_param
.exclude_input_section
4219 || uos_param
.exclude_output_section
)
4220 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4224 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4226 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4227 if (bfd_arr
== NULL
)
4230 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4233 total_overlay_size
= 0;
4234 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4236 extern const bfd_target bfd_elf32_spu_vec
;
4238 unsigned int old_count
;
4240 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4244 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4245 if (sec
->linker_mark
)
4247 if ((sec
->flags
& SEC_CODE
) != 0)
4249 fixed_size
-= sec
->size
;
4250 total_overlay_size
+= sec
->size
;
4252 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4253 && sec
->output_section
->owner
== info
->output_bfd
4254 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4255 fixed_size
-= sec
->size
;
4256 if (count
!= old_count
)
4257 bfd_arr
[bfd_count
++] = ibfd
;
4260 /* Since the overlay link script selects sections by file name and
4261 section name, ensure that file names are unique. */
4264 bfd_boolean ok
= TRUE
;
4266 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4267 for (i
= 1; i
< bfd_count
; ++i
)
4268 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4270 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4272 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4273 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4274 bfd_arr
[i
]->filename
,
4275 bfd_arr
[i
]->my_archive
->filename
);
4277 info
->callbacks
->einfo (_("%s duplicated\n"),
4278 bfd_arr
[i
]->filename
);
4284 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4285 "object files in auto-overlay script\n"));
4286 bfd_set_error (bfd_error_bad_value
);
4292 fixed_size
+= htab
->reserved
;
4293 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4294 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4296 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4298 /* Stubs in the non-icache area are bigger. */
4299 fixed_size
+= htab
->non_ovly_stub
* 16;
4300 /* Space for icache manager tables.
4301 a) Tag array, one quadword per cache line.
4302 - word 0: ia address of present line, init to zero. */
4303 fixed_size
+= 16 << htab
->num_lines_log2
;
4304 /* b) Rewrite "to" list, one quadword per cache line. */
4305 fixed_size
+= 16 << htab
->num_lines_log2
;
4306 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4307 to a power-of-two number of full quadwords) per cache line. */
4308 fixed_size
+= 16 << (htab
->fromelem_size_log2
4309 + htab
->num_lines_log2
);
4310 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4315 /* Guess number of overlays. Assuming overlay buffer is on
4316 average only half full should be conservative. */
4317 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4318 / (htab
->local_store
- fixed_size
));
4319 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4320 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4324 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4325 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4326 "size of 0x%v exceeds local store\n"),
4327 (bfd_vma
) fixed_size
,
4328 (bfd_vma
) mos_param
.max_overlay_size
);
4330 /* Now see if we should put some functions in the non-overlay area. */
4331 else if (fixed_size
< htab
->overlay_fixed
)
4333 unsigned int max_fixed
, lib_size
;
4335 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4336 if (max_fixed
> htab
->overlay_fixed
)
4337 max_fixed
= htab
->overlay_fixed
;
4338 lib_size
= max_fixed
- fixed_size
;
4339 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4340 if (lib_size
== (unsigned int) -1)
4342 fixed_size
= max_fixed
- lib_size
;
4345 /* Build an array of sections, suitably sorted to place into
4347 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4348 if (ovly_sections
== NULL
)
4350 ovly_p
= ovly_sections
;
4351 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4353 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4354 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4355 if (ovly_map
== NULL
)
4358 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4359 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4360 if (htab
->params
->line_size
!= 0)
4361 overlay_size
= htab
->params
->line_size
;
4364 while (base
< count
)
4366 unsigned int size
= 0;
4368 for (i
= base
; i
< count
; i
++)
4372 unsigned int num_stubs
;
4373 struct call_info
*call
, *pasty
;
4374 struct _spu_elf_section_data
*sec_data
;
4375 struct spu_elf_stack_info
*sinfo
;
4378 /* See whether we can add this section to the current
4379 overlay without overflowing our overlay buffer. */
4380 sec
= ovly_sections
[2 * i
];
4381 tmp
= size
+ sec
->size
;
4382 if (ovly_sections
[2 * i
+ 1])
4383 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
4384 if (tmp
> overlay_size
)
4386 if (sec
->segment_mark
)
4388 /* Pasted sections must stay together, so add their
4390 struct call_info
*pasty
= find_pasted_call (sec
);
4391 while (pasty
!= NULL
)
4393 struct function_info
*call_fun
= pasty
->fun
;
4394 tmp
+= call_fun
->sec
->size
;
4395 if (call_fun
->rodata
)
4396 tmp
+= call_fun
->rodata
->size
;
4397 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4398 if (pasty
->is_pasted
)
4402 if (tmp
> overlay_size
)
4405 /* If we add this section, we might need new overlay call
4406 stubs. Add any overlay section calls to dummy_call. */
4408 sec_data
= spu_elf_section_data (sec
);
4409 sinfo
= sec_data
->u
.i
.stack_info
;
4410 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4411 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4412 if (call
->is_pasted
)
4414 BFD_ASSERT (pasty
== NULL
);
4417 else if (call
->fun
->sec
->linker_mark
)
4419 if (!copy_callee (&dummy_caller
, call
))
4422 while (pasty
!= NULL
)
4424 struct function_info
*call_fun
= pasty
->fun
;
4426 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4427 if (call
->is_pasted
)
4429 BFD_ASSERT (pasty
== NULL
);
4432 else if (!copy_callee (&dummy_caller
, call
))
4436 /* Calculate call stub size. */
4438 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4443 /* If the call is within this overlay, we won't need a
4445 for (k
= base
; k
< i
+ 1; k
++)
4446 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4452 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4453 && num_stubs
> htab
->params
->max_branch
)
4455 if (tmp
+ num_stubs
* ovl_stub_size (htab
->params
)
4463 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4464 ovly_sections
[2 * i
]->owner
,
4465 ovly_sections
[2 * i
],
4466 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4467 bfd_set_error (bfd_error_bad_value
);
4471 while (dummy_caller
.call_list
!= NULL
)
4473 struct call_info
*call
= dummy_caller
.call_list
;
4474 dummy_caller
.call_list
= call
->next
;
4480 ovly_map
[base
++] = ovlynum
;
4483 script
= htab
->params
->spu_elf_open_overlay_script ();
4485 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4488 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4490 if (fprintf (script
,
4491 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4492 " . = ALIGN (%u);\n"
4493 " .ovl.init : { *(.ovl.init) }\n"
4494 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4495 htab
->params
->line_size
) <= 0)
4500 while (base
< count
)
4502 unsigned int indx
= ovlynum
- 1;
4503 unsigned int vma
, lma
;
4505 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4506 lma
= indx
<< htab
->line_size_log2
;
4508 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4509 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4510 ovlynum
, vma
, lma
) <= 0)
4513 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4514 ovly_map
, ovly_sections
, info
);
4515 if (base
== (unsigned) -1)
4518 if (fprintf (script
, " }\n") <= 0)
4524 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4525 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4530 if (fprintf (script
,
4531 " . = ALIGN (16);\n"
4532 " .ovl.init : { *(.ovl.init) }\n"
4533 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4536 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4540 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4548 /* We need to set lma since we are overlaying .ovl.init. */
4549 if (fprintf (script
,
4550 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4555 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4559 while (base
< count
)
4561 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4564 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4565 ovly_map
, ovly_sections
, info
);
4566 if (base
== (unsigned) -1)
4569 if (fprintf (script
, " }\n") <= 0)
4572 ovlynum
+= htab
->params
->num_lines
;
4573 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4577 if (fprintf (script
, " }\n") <= 0)
4584 free (ovly_sections
);
4586 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4588 if (fclose (script
) != 0)
4591 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4592 (*htab
->params
->spu_elf_relink
) ();
4597 bfd_set_error (bfd_error_system_call
);
4599 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4603 /* Provide an estimate of total stack required. */
4606 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4608 struct spu_link_hash_table
*htab
;
4609 struct _sum_stack_param sum_stack_param
;
4611 if (!discover_functions (info
))
4614 if (!build_call_tree (info
))
4617 htab
= spu_hash_table (info
);
4618 if (htab
->params
->stack_analysis
)
4620 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4621 info
->callbacks
->minfo (_("\nStack size for functions. "
4622 "Annotations: '*' max stack, 't' tail call\n"));
4625 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4626 sum_stack_param
.overall_stack
= 0;
4627 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4630 if (htab
->params
->stack_analysis
)
4631 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4632 (bfd_vma
) sum_stack_param
.overall_stack
);
4636 /* Perform a final link. */
4639 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4641 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4643 if (htab
->params
->auto_overlay
)
4644 spu_elf_auto_overlay (info
);
4646 if ((htab
->params
->stack_analysis
4647 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4648 && htab
->params
->lrlive_analysis
))
4649 && !spu_elf_stack_analysis (info
))
4650 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4652 if (!spu_elf_build_stubs (info
))
4653 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4655 return bfd_elf_final_link (output_bfd
, info
);
4658 /* Called when not normally emitting relocs, ie. !info->relocatable
4659 and !info->emitrelocations. Returns a count of special relocs
4660 that need to be emitted. */
4663 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4665 Elf_Internal_Rela
*relocs
;
4666 unsigned int count
= 0;
4668 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4672 Elf_Internal_Rela
*rel
;
4673 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4675 for (rel
= relocs
; rel
< relend
; rel
++)
4677 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4678 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4682 if (elf_section_data (sec
)->relocs
!= relocs
)
4689 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4692 spu_elf_relocate_section (bfd
*output_bfd
,
4693 struct bfd_link_info
*info
,
4695 asection
*input_section
,
4697 Elf_Internal_Rela
*relocs
,
4698 Elf_Internal_Sym
*local_syms
,
4699 asection
**local_sections
)
4701 Elf_Internal_Shdr
*symtab_hdr
;
4702 struct elf_link_hash_entry
**sym_hashes
;
4703 Elf_Internal_Rela
*rel
, *relend
;
4704 struct spu_link_hash_table
*htab
;
4707 bfd_boolean emit_these_relocs
= FALSE
;
4708 bfd_boolean is_ea_sym
;
4710 unsigned int iovl
= 0;
4712 htab
= spu_hash_table (info
);
4713 stubs
= (htab
->stub_sec
!= NULL
4714 && maybe_needs_stubs (input_section
));
4715 iovl
= overlay_index (input_section
);
4716 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4717 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4718 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4721 relend
= relocs
+ input_section
->reloc_count
;
4722 for (; rel
< relend
; rel
++)
4725 reloc_howto_type
*howto
;
4726 unsigned int r_symndx
;
4727 Elf_Internal_Sym
*sym
;
4729 struct elf_link_hash_entry
*h
;
4730 const char *sym_name
;
4733 bfd_reloc_status_type r
;
4734 bfd_boolean unresolved_reloc
;
4736 enum _stub_type stub_type
;
4738 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4739 r_type
= ELF32_R_TYPE (rel
->r_info
);
4740 howto
= elf_howto_table
+ r_type
;
4741 unresolved_reloc
= FALSE
;
4746 if (r_symndx
< symtab_hdr
->sh_info
)
4748 sym
= local_syms
+ r_symndx
;
4749 sec
= local_sections
[r_symndx
];
4750 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4751 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4755 if (sym_hashes
== NULL
)
4758 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4760 while (h
->root
.type
== bfd_link_hash_indirect
4761 || h
->root
.type
== bfd_link_hash_warning
)
4762 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4765 if (h
->root
.type
== bfd_link_hash_defined
4766 || h
->root
.type
== bfd_link_hash_defweak
)
4768 sec
= h
->root
.u
.def
.section
;
4770 || sec
->output_section
== NULL
)
4771 /* Set a flag that will be cleared later if we find a
4772 relocation value for this symbol. output_section
4773 is typically NULL for symbols satisfied by a shared
4775 unresolved_reloc
= TRUE
;
4777 relocation
= (h
->root
.u
.def
.value
4778 + sec
->output_section
->vma
4779 + sec
->output_offset
);
4781 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4783 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4784 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4786 else if (!info
->relocatable
4787 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4790 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4791 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4792 if (!info
->callbacks
->undefined_symbol (info
,
4793 h
->root
.root
.string
,
4796 rel
->r_offset
, err
))
4800 sym_name
= h
->root
.root
.string
;
4803 if (sec
!= NULL
&& elf_discarded_section (sec
))
4805 /* For relocs against symbols from removed linkonce sections,
4806 or sections discarded by a linker script, we just want the
4807 section contents zeroed. Avoid any special processing. */
4808 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4814 if (info
->relocatable
)
4817 is_ea_sym
= (ea
!= NULL
4819 && sec
->output_section
== ea
);
4821 /* If this symbol is in an overlay area, we may need to relocate
4822 to the overlay stub. */
4823 addend
= rel
->r_addend
;
4826 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4827 contents
, info
)) != no_stub
)
4829 unsigned int ovl
= 0;
4830 struct got_entry
*g
, **head
;
4832 if (stub_type
!= nonovl_stub
)
4836 head
= &h
->got
.glist
;
4838 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4840 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4841 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4842 ? g
->br_addr
== (rel
->r_offset
4843 + input_section
->output_offset
4844 + input_section
->output_section
->vma
)
4845 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4850 relocation
= g
->stub_addr
;
4855 /* For soft icache, encode the overlay index into addresses. */
4856 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4857 && (r_type
== R_SPU_ADDR16_HI
4858 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
4861 unsigned int ovl
= overlay_index (sec
);
4864 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4865 relocation
+= set_id
<< 18;
4870 if (unresolved_reloc
)
4872 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4876 /* ._ea is a special section that isn't allocated in SPU
4877 memory, but rather occupies space in PPU memory as
4878 part of an embedded ELF image. If this reloc is
4879 against a symbol defined in ._ea, then transform the
4880 reloc into an equivalent one without a symbol
4881 relative to the start of the ELF image. */
4882 rel
->r_addend
+= (relocation
4884 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4885 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4887 emit_these_relocs
= TRUE
;
4891 unresolved_reloc
= TRUE
;
4893 if (unresolved_reloc
)
4895 (*_bfd_error_handler
)
4896 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4898 bfd_get_section_name (input_bfd
, input_section
),
4899 (long) rel
->r_offset
,
4905 r
= _bfd_final_link_relocate (howto
,
4909 rel
->r_offset
, relocation
, addend
);
4911 if (r
!= bfd_reloc_ok
)
4913 const char *msg
= (const char *) 0;
4917 case bfd_reloc_overflow
:
4918 if (!((*info
->callbacks
->reloc_overflow
)
4919 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4920 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4924 case bfd_reloc_undefined
:
4925 if (!((*info
->callbacks
->undefined_symbol
)
4926 (info
, sym_name
, input_bfd
, input_section
,
4927 rel
->r_offset
, TRUE
)))
4931 case bfd_reloc_outofrange
:
4932 msg
= _("internal error: out of range error");
4935 case bfd_reloc_notsupported
:
4936 msg
= _("internal error: unsupported relocation error");
4939 case bfd_reloc_dangerous
:
4940 msg
= _("internal error: dangerous error");
4944 msg
= _("internal error: unknown error");
4949 if (!((*info
->callbacks
->warning
)
4950 (info
, msg
, sym_name
, input_bfd
, input_section
,
4959 && emit_these_relocs
4960 && !info
->emitrelocations
)
4962 Elf_Internal_Rela
*wrel
;
4963 Elf_Internal_Shdr
*rel_hdr
;
4965 wrel
= rel
= relocs
;
4966 relend
= relocs
+ input_section
->reloc_count
;
4967 for (; rel
< relend
; rel
++)
4971 r_type
= ELF32_R_TYPE (rel
->r_info
);
4972 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4975 input_section
->reloc_count
= wrel
- relocs
;
4976 /* Backflips for _bfd_elf_link_output_relocs. */
4977 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4978 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4985 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4988 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4989 const char *sym_name ATTRIBUTE_UNUSED
,
4990 Elf_Internal_Sym
*sym
,
4991 asection
*sym_sec ATTRIBUTE_UNUSED
,
4992 struct elf_link_hash_entry
*h
)
4994 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4996 if (!info
->relocatable
4997 && htab
->stub_sec
!= NULL
4999 && (h
->root
.type
== bfd_link_hash_defined
5000 || h
->root
.type
== bfd_link_hash_defweak
)
5002 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5004 struct got_entry
*g
;
5006 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5007 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5008 ? g
->br_addr
== g
->stub_addr
5009 : g
->addend
== 0 && g
->ovl
== 0)
5011 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5012 (htab
->stub_sec
[0]->output_section
->owner
,
5013 htab
->stub_sec
[0]->output_section
));
5014 sym
->st_value
= g
->stub_addr
;
5022 static int spu_plugin
= 0;
5025 spu_elf_plugin (int val
)
5030 /* Set ELF header e_type for plugins. */
5033 spu_elf_post_process_headers (bfd
*abfd
,
5034 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5038 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5040 i_ehdrp
->e_type
= ET_DYN
;
5044 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5045 segments for overlays. */
5048 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5055 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5056 extra
= htab
->num_overlays
;
5062 sec
= bfd_get_section_by_name (abfd
, ".toe");
5063 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5069 /* Remove .toe section from other PT_LOAD segments and put it in
5070 a segment of its own. Put overlays in separate segments too. */
5073 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5076 struct elf_segment_map
*m
, *m_overlay
;
5077 struct elf_segment_map
**p
, **p_overlay
;
5083 toe
= bfd_get_section_by_name (abfd
, ".toe");
5084 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
5085 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5086 for (i
= 0; i
< m
->count
; i
++)
5087 if ((s
= m
->sections
[i
]) == toe
5088 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5090 struct elf_segment_map
*m2
;
5093 if (i
+ 1 < m
->count
)
5095 amt
= sizeof (struct elf_segment_map
);
5096 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5097 m2
= bfd_zalloc (abfd
, amt
);
5100 m2
->count
= m
->count
- (i
+ 1);
5101 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5102 m2
->count
* sizeof (m
->sections
[0]));
5103 m2
->p_type
= PT_LOAD
;
5111 amt
= sizeof (struct elf_segment_map
);
5112 m2
= bfd_zalloc (abfd
, amt
);
5115 m2
->p_type
= PT_LOAD
;
5117 m2
->sections
[0] = s
;
5125 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5126 PT_LOAD segments. This can cause the .ovl.init section to be
5127 overwritten with the contents of some overlay segment. To work
5128 around this issue, we ensure that all PF_OVERLAY segments are
5129 sorted first amongst the program headers; this ensures that even
5130 with a broken loader, the .ovl.init section (which is not marked
5131 as PF_OVERLAY) will be placed into SPU local store on startup. */
5133 /* Move all overlay segments onto a separate list. */
5134 p
= &elf_tdata (abfd
)->segment_map
;
5135 p_overlay
= &m_overlay
;
5138 if ((*p
)->p_type
== PT_LOAD
&& (*p
)->count
== 1
5139 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5141 struct elf_segment_map
*m
= *p
;
5144 p_overlay
= &m
->next
;
5151 /* Re-insert overlay segments at the head of the segment map. */
5152 *p_overlay
= elf_tdata (abfd
)->segment_map
;
5153 elf_tdata (abfd
)->segment_map
= m_overlay
;
5158 /* Tweak the section type of .note.spu_name. */
5161 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5162 Elf_Internal_Shdr
*hdr
,
5165 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5166 hdr
->sh_type
= SHT_NOTE
;
5170 /* Tweak phdrs before writing them out. */
5173 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5175 const struct elf_backend_data
*bed
;
5176 struct elf_obj_tdata
*tdata
;
5177 Elf_Internal_Phdr
*phdr
, *last
;
5178 struct spu_link_hash_table
*htab
;
5185 bed
= get_elf_backend_data (abfd
);
5186 tdata
= elf_tdata (abfd
);
5188 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5189 htab
= spu_hash_table (info
);
5190 if (htab
->num_overlays
!= 0)
5192 struct elf_segment_map
*m
;
5195 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5197 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5199 /* Mark this as an overlay header. */
5200 phdr
[i
].p_flags
|= PF_OVERLAY
;
5202 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5203 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5205 bfd_byte
*p
= htab
->ovtab
->contents
;
5206 unsigned int off
= o
* 16 + 8;
5208 /* Write file_off into _ovly_table. */
5209 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5212 /* Soft-icache has its file offset put in .ovl.init. */
5213 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5215 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5217 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5221 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5222 of 16. This should always be possible when using the standard
5223 linker scripts, but don't create overlapping segments if
5224 someone is playing games with linker scripts. */
5226 for (i
= count
; i
-- != 0; )
5227 if (phdr
[i
].p_type
== PT_LOAD
)
5231 adjust
= -phdr
[i
].p_filesz
& 15;
5234 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5237 adjust
= -phdr
[i
].p_memsz
& 15;
5240 && phdr
[i
].p_filesz
!= 0
5241 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5242 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5245 if (phdr
[i
].p_filesz
!= 0)
5249 if (i
== (unsigned int) -1)
5250 for (i
= count
; i
-- != 0; )
5251 if (phdr
[i
].p_type
== PT_LOAD
)
5255 adjust
= -phdr
[i
].p_filesz
& 15;
5256 phdr
[i
].p_filesz
+= adjust
;
5258 adjust
= -phdr
[i
].p_memsz
& 15;
5259 phdr
[i
].p_memsz
+= adjust
;
5265 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5266 #define TARGET_BIG_NAME "elf32-spu"
5267 #define ELF_ARCH bfd_arch_spu
5268 #define ELF_MACHINE_CODE EM_SPU
5269 /* This matches the alignment need for DMA. */
5270 #define ELF_MAXPAGESIZE 0x80
5271 #define elf_backend_rela_normal 1
5272 #define elf_backend_can_gc_sections 1
5274 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5275 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5276 #define elf_info_to_howto spu_elf_info_to_howto
5277 #define elf_backend_count_relocs spu_elf_count_relocs
5278 #define elf_backend_relocate_section spu_elf_relocate_section
5279 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5280 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5281 #define elf_backend_object_p spu_elf_object_p
5282 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5283 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5285 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5286 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5287 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5288 #define elf_backend_post_process_headers spu_elf_post_process_headers
5289 #define elf_backend_fake_sections spu_elf_fake_sections
5290 #define elf_backend_special_sections spu_elf_special_sections
5291 #define bfd_elf32_bfd_final_link spu_elf_final_link
5293 #include "elf32-target.h"