1 /* SPU specific support for 32-bit ELF
3 Copyright (C) 2006-2020 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* All users of this file have bfd_octets_per_byte (abfd, sec) == 1. */
31 #define OCTETS_PER_BYTE(ABFD, SEC) 1
33 /* We use RELA style relocs. Don't define USE_REL. */
35 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
39 /* Values of type 'enum elf_spu_reloc_type' are used to index this
40 array, so it must be declared in the order of that type. */
42 static reloc_howto_type elf_howto_table
[] = {
43 HOWTO (R_SPU_NONE
, 0, 3, 0, FALSE
, 0, complain_overflow_dont
,
44 bfd_elf_generic_reloc
, "SPU_NONE",
45 FALSE
, 0, 0x00000000, FALSE
),
46 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR10",
48 FALSE
, 0, 0x00ffc000, FALSE
),
49 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
56 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
57 FALSE
, 0, 0x007fff80, FALSE
),
58 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
59 bfd_elf_generic_reloc
, "SPU_ADDR18",
60 FALSE
, 0, 0x01ffff80, FALSE
),
61 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
62 bfd_elf_generic_reloc
, "SPU_ADDR32",
63 FALSE
, 0, 0xffffffff, FALSE
),
64 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
65 bfd_elf_generic_reloc
, "SPU_REL16",
66 FALSE
, 0, 0x007fff80, TRUE
),
67 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
68 bfd_elf_generic_reloc
, "SPU_ADDR7",
69 FALSE
, 0, 0x001fc000, FALSE
),
70 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9",
72 FALSE
, 0, 0x0180007f, TRUE
),
73 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
74 spu_elf_rel9
, "SPU_REL9I",
75 FALSE
, 0, 0x0000c07f, TRUE
),
76 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR10I",
78 FALSE
, 0, 0x00ffc000, FALSE
),
79 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
80 bfd_elf_generic_reloc
, "SPU_ADDR16I",
81 FALSE
, 0, 0x007fff80, FALSE
),
82 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
83 bfd_elf_generic_reloc
, "SPU_REL32",
84 FALSE
, 0, 0xffffffff, TRUE
),
85 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
86 bfd_elf_generic_reloc
, "SPU_ADDR16X",
87 FALSE
, 0, 0x007fff80, FALSE
),
88 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU32",
90 FALSE
, 0, 0xffffffff, FALSE
),
91 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
92 bfd_elf_generic_reloc
, "SPU_PPU64",
94 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
95 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
96 FALSE
, 0, 0x00000000, FALSE
),
99 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
100 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
101 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
105 static enum elf_spu_reloc_type
106 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
111 return (enum elf_spu_reloc_type
) -1;
114 case BFD_RELOC_SPU_IMM10W
:
116 case BFD_RELOC_SPU_IMM16W
:
118 case BFD_RELOC_SPU_LO16
:
119 return R_SPU_ADDR16_LO
;
120 case BFD_RELOC_SPU_HI16
:
121 return R_SPU_ADDR16_HI
;
122 case BFD_RELOC_SPU_IMM18
:
124 case BFD_RELOC_SPU_PCREL16
:
126 case BFD_RELOC_SPU_IMM7
:
128 case BFD_RELOC_SPU_IMM8
:
130 case BFD_RELOC_SPU_PCREL9a
:
132 case BFD_RELOC_SPU_PCREL9b
:
134 case BFD_RELOC_SPU_IMM10
:
135 return R_SPU_ADDR10I
;
136 case BFD_RELOC_SPU_IMM16
:
137 return R_SPU_ADDR16I
;
140 case BFD_RELOC_32_PCREL
:
142 case BFD_RELOC_SPU_PPU32
:
144 case BFD_RELOC_SPU_PPU64
:
146 case BFD_RELOC_SPU_ADD_PIC
:
147 return R_SPU_ADD_PIC
;
152 spu_elf_info_to_howto (bfd
*abfd
,
154 Elf_Internal_Rela
*dst
)
156 enum elf_spu_reloc_type r_type
;
158 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
159 /* PR 17512: file: 90c2a92e. */
160 if (r_type
>= R_SPU_max
)
162 /* xgettext:c-format */
163 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
165 bfd_set_error (bfd_error_bad_value
);
168 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
172 static reloc_howto_type
*
173 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
174 bfd_reloc_code_real_type code
)
176 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
178 if (r_type
== (enum elf_spu_reloc_type
) -1)
181 return elf_howto_table
+ r_type
;
184 static reloc_howto_type
*
185 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
190 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
191 if (elf_howto_table
[i
].name
!= NULL
192 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
193 return &elf_howto_table
[i
];
198 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
200 static bfd_reloc_status_type
201 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
202 void *data
, asection
*input_section
,
203 bfd
*output_bfd
, char **error_message
)
205 bfd_size_type octets
;
209 /* If this is a relocatable link (output_bfd test tells us), just
210 call the generic function. Any adjustment will be done at final
212 if (output_bfd
!= NULL
)
213 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
214 input_section
, output_bfd
, error_message
);
216 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
217 return bfd_reloc_outofrange
;
218 octets
= reloc_entry
->address
* OCTETS_PER_BYTE (abfd
, input_section
);
220 /* Get symbol value. */
222 if (!bfd_is_com_section (symbol
->section
))
224 if (symbol
->section
->output_section
)
225 val
+= symbol
->section
->output_section
->vma
;
227 val
+= reloc_entry
->addend
;
229 /* Make it pc-relative. */
230 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
233 if (val
+ 256 >= 512)
234 return bfd_reloc_overflow
;
236 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
238 /* Move two high bits of value to REL9I and REL9 position.
239 The mask will take care of selecting the right field. */
240 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
241 insn
&= ~reloc_entry
->howto
->dst_mask
;
242 insn
|= val
& reloc_entry
->howto
->dst_mask
;
243 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
248 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
250 if (!sec
->used_by_bfd
)
252 struct _spu_elf_section_data
*sdata
;
254 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
257 sec
->used_by_bfd
= sdata
;
260 return _bfd_elf_new_section_hook (abfd
, sec
);
263 /* Set up overlay info for executables. */
266 spu_elf_object_p (bfd
*abfd
)
268 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
270 unsigned int i
, num_ovl
, num_buf
;
271 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
272 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
273 Elf_Internal_Phdr
*last_phdr
= NULL
;
275 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
276 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
281 if (last_phdr
== NULL
282 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
285 for (j
= 1; j
< elf_numsections (abfd
); j
++)
287 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
289 if (ELF_SECTION_SIZE (shdr
, phdr
) != 0
290 && ELF_SECTION_IN_SEGMENT (shdr
, phdr
))
292 asection
*sec
= shdr
->bfd_section
;
293 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
294 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
302 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
303 strip --strip-unneeded will not remove them. */
306 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
308 if (sym
->name
!= NULL
309 && sym
->section
!= bfd_abs_section_ptr
310 && strncmp (sym
->name
, "_EAR_", 5) == 0)
311 sym
->flags
|= BSF_KEEP
;
314 /* SPU ELF linker hash table. */
316 struct spu_link_hash_table
318 struct elf_link_hash_table elf
;
320 struct spu_elf_params
*params
;
322 /* Shortcuts to overlay sections. */
328 /* Count of stubs in each overlay section. */
329 unsigned int *stub_count
;
331 /* The stub section for each overlay section. */
334 struct elf_link_hash_entry
*ovly_entry
[2];
336 /* Number of overlay buffers. */
337 unsigned int num_buf
;
339 /* Total number of overlays. */
340 unsigned int num_overlays
;
342 /* For soft icache. */
343 unsigned int line_size_log2
;
344 unsigned int num_lines_log2
;
345 unsigned int fromelem_size_log2
;
347 /* How much memory we have. */
348 unsigned int local_store
;
350 /* Count of overlay stubs needed in non-overlay area. */
351 unsigned int non_ovly_stub
;
353 /* Pointer to the fixup section */
357 unsigned int stub_err
: 1;
360 /* Hijack the generic got fields for overlay stub accounting. */
364 struct got_entry
*next
;
373 #define spu_hash_table(p) \
374 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
375 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
379 struct function_info
*fun
;
380 struct call_info
*next
;
382 unsigned int max_depth
;
383 unsigned int is_tail
: 1;
384 unsigned int is_pasted
: 1;
385 unsigned int broken_cycle
: 1;
386 unsigned int priority
: 13;
391 /* List of functions called. Also branches to hot/cold part of
393 struct call_info
*call_list
;
394 /* For hot/cold part of function, point to owner. */
395 struct function_info
*start
;
396 /* Symbol at start of function. */
398 Elf_Internal_Sym
*sym
;
399 struct elf_link_hash_entry
*h
;
401 /* Function section. */
404 /* Where last called from, and number of sections called from. */
405 asection
*last_caller
;
406 unsigned int call_count
;
407 /* Address range of (this part of) function. */
409 /* Offset where we found a store of lr, or -1 if none found. */
411 /* Offset where we found the stack adjustment insn. */
415 /* Distance from root of call tree. Tail and hot/cold branches
416 count as one deeper. We aren't counting stack frames here. */
418 /* Set if global symbol. */
419 unsigned int global
: 1;
420 /* Set if known to be start of function (as distinct from a hunk
421 in hot/cold section. */
422 unsigned int is_func
: 1;
423 /* Set if not a root node. */
424 unsigned int non_root
: 1;
425 /* Flags used during call tree traversal. It's cheaper to replicate
426 the visit flags than have one which needs clearing after a traversal. */
427 unsigned int visit1
: 1;
428 unsigned int visit2
: 1;
429 unsigned int marking
: 1;
430 unsigned int visit3
: 1;
431 unsigned int visit4
: 1;
432 unsigned int visit5
: 1;
433 unsigned int visit6
: 1;
434 unsigned int visit7
: 1;
437 struct spu_elf_stack_info
441 /* Variable size array describing functions, one per contiguous
442 address range belonging to a function. */
443 struct function_info fun
[1];
446 static struct function_info
*find_function (asection
*, bfd_vma
,
447 struct bfd_link_info
*);
449 /* Create a spu ELF linker hash table. */
451 static struct bfd_link_hash_table
*
452 spu_elf_link_hash_table_create (bfd
*abfd
)
454 struct spu_link_hash_table
*htab
;
456 htab
= bfd_zmalloc (sizeof (*htab
));
460 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
461 _bfd_elf_link_hash_newfunc
,
462 sizeof (struct elf_link_hash_entry
),
469 htab
->elf
.init_got_refcount
.refcount
= 0;
470 htab
->elf
.init_got_refcount
.glist
= NULL
;
471 htab
->elf
.init_got_offset
.offset
= 0;
472 htab
->elf
.init_got_offset
.glist
= NULL
;
473 return &htab
->elf
.root
;
477 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
479 bfd_vma max_branch_log2
;
481 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
482 htab
->params
= params
;
483 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
484 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
486 /* For the software i-cache, we provide a "from" list whose size
487 is a power-of-two number of quadwords, big enough to hold one
488 byte per outgoing branch. Compute this number here. */
489 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
490 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
493 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
494 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
495 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
498 get_sym_h (struct elf_link_hash_entry
**hp
,
499 Elf_Internal_Sym
**symp
,
501 Elf_Internal_Sym
**locsymsp
,
502 unsigned long r_symndx
,
505 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
507 if (r_symndx
>= symtab_hdr
->sh_info
)
509 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
510 struct elf_link_hash_entry
*h
;
512 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
513 while (h
->root
.type
== bfd_link_hash_indirect
514 || h
->root
.type
== bfd_link_hash_warning
)
515 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
525 asection
*symsec
= NULL
;
526 if (h
->root
.type
== bfd_link_hash_defined
527 || h
->root
.type
== bfd_link_hash_defweak
)
528 symsec
= h
->root
.u
.def
.section
;
534 Elf_Internal_Sym
*sym
;
535 Elf_Internal_Sym
*locsyms
= *locsymsp
;
539 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
541 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
543 0, NULL
, NULL
, NULL
);
548 sym
= locsyms
+ r_symndx
;
557 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
563 /* Create the note section if not already present. This is done early so
564 that the linker maps the sections to the right place in the output. */
567 spu_elf_create_sections (struct bfd_link_info
*info
)
569 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
572 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
573 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
578 /* Make SPU_PTNOTE_SPUNAME section. */
585 ibfd
= info
->input_bfds
;
586 /* This should really be SEC_LINKER_CREATED, but then we'd need
587 to write out the section ourselves. */
588 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
589 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
591 || !bfd_set_section_alignment (s
, 4))
593 /* Because we didn't set SEC_LINKER_CREATED we need to set the
594 proper section type. */
595 elf_section_type (s
) = SHT_NOTE
;
597 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
598 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
599 size
+= (name_len
+ 3) & -4;
601 if (!bfd_set_section_size (s
, size
))
604 data
= bfd_zalloc (ibfd
, size
);
608 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
609 bfd_put_32 (ibfd
, name_len
, data
+ 4);
610 bfd_put_32 (ibfd
, 1, data
+ 8);
611 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
612 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
613 bfd_get_filename (info
->output_bfd
), name_len
);
617 if (htab
->params
->emit_fixups
)
622 if (htab
->elf
.dynobj
== NULL
)
623 htab
->elf
.dynobj
= ibfd
;
624 ibfd
= htab
->elf
.dynobj
;
625 flags
= (SEC_LOAD
| SEC_ALLOC
| SEC_READONLY
| SEC_HAS_CONTENTS
626 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
);
627 s
= bfd_make_section_anyway_with_flags (ibfd
, ".fixup", flags
);
628 if (s
== NULL
|| !bfd_set_section_alignment (s
, 2))
636 /* qsort predicate to sort sections by vma. */
639 sort_sections (const void *a
, const void *b
)
641 const asection
*const *s1
= a
;
642 const asection
*const *s2
= b
;
643 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
646 return delta
< 0 ? -1 : 1;
648 return (*s1
)->index
- (*s2
)->index
;
651 /* Identify overlays in the output bfd, and number them.
652 Returns 0 on error, 1 if no overlays, 2 if overlays. */
655 spu_elf_find_overlays (struct bfd_link_info
*info
)
657 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
658 asection
**alloc_sec
;
659 unsigned int i
, n
, ovl_index
, num_buf
;
662 static const char *const entry_names
[2][2] = {
663 { "__ovly_load", "__icache_br_handler" },
664 { "__ovly_return", "__icache_call_handler" }
667 if (info
->output_bfd
->section_count
< 2)
671 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
672 if (alloc_sec
== NULL
)
675 /* Pick out all the alloced sections. */
676 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
677 if ((s
->flags
& SEC_ALLOC
) != 0
678 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
688 /* Sort them by vma. */
689 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
691 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
692 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
694 unsigned int prev_buf
= 0, set_id
= 0;
696 /* Look for an overlapping vma to find the first overlay section. */
697 bfd_vma vma_start
= 0;
699 for (i
= 1; i
< n
; i
++)
702 if (s
->vma
< ovl_end
)
704 asection
*s0
= alloc_sec
[i
- 1];
708 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
713 ovl_end
= s
->vma
+ s
->size
;
716 /* Now find any sections within the cache area. */
717 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
720 if (s
->vma
>= ovl_end
)
723 /* A section in an overlay area called .ovl.init is not
724 an overlay, in the sense that it might be loaded in
725 by the overlay manager, but rather the initial
726 section contents for the overlay buffer. */
727 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
729 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
730 set_id
= (num_buf
== prev_buf
)? set_id
+ 1 : 0;
733 if ((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
735 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
736 "does not start on a cache line\n"),
738 bfd_set_error (bfd_error_bad_value
);
741 else if (s
->size
> htab
->params
->line_size
)
743 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
744 "is larger than a cache line\n"),
746 bfd_set_error (bfd_error_bad_value
);
750 alloc_sec
[ovl_index
++] = s
;
751 spu_elf_section_data (s
)->u
.o
.ovl_index
752 = (set_id
<< htab
->num_lines_log2
) + num_buf
;
753 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
757 /* Ensure there are no more overlay sections. */
761 if (s
->vma
< ovl_end
)
763 info
->callbacks
->einfo (_("%X%P: overlay section %pA "
764 "is not in cache area\n"),
766 bfd_set_error (bfd_error_bad_value
);
770 ovl_end
= s
->vma
+ s
->size
;
775 /* Look for overlapping vmas. Any with overlap must be overlays.
776 Count them. Also count the number of overlay regions. */
777 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
780 if (s
->vma
< ovl_end
)
782 asection
*s0
= alloc_sec
[i
- 1];
784 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
787 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
789 alloc_sec
[ovl_index
] = s0
;
790 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
791 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
794 ovl_end
= s
->vma
+ s
->size
;
796 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
798 alloc_sec
[ovl_index
] = s
;
799 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
800 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
801 if (s0
->vma
!= s
->vma
)
803 /* xgettext:c-format */
804 info
->callbacks
->einfo (_("%X%P: overlay sections %pA "
805 "and %pA do not start at the "
808 bfd_set_error (bfd_error_bad_value
);
811 if (ovl_end
< s
->vma
+ s
->size
)
812 ovl_end
= s
->vma
+ s
->size
;
816 ovl_end
= s
->vma
+ s
->size
;
820 htab
->num_overlays
= ovl_index
;
821 htab
->num_buf
= num_buf
;
822 htab
->ovl_sec
= alloc_sec
;
827 for (i
= 0; i
< 2; i
++)
830 struct elf_link_hash_entry
*h
;
832 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
833 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
837 if (h
->root
.type
== bfd_link_hash_new
)
839 h
->root
.type
= bfd_link_hash_undefined
;
841 h
->ref_regular_nonweak
= 1;
844 htab
->ovly_entry
[i
] = h
;
850 /* Non-zero to use bra in overlay stubs rather than br. */
853 #define BRA 0x30000000
854 #define BRASL 0x31000000
855 #define BR 0x32000000
856 #define BRSL 0x33000000
857 #define NOP 0x40200000
858 #define LNOP 0x00200000
859 #define ILA 0x42000000
861 /* Return true for all relative and absolute branch instructions.
869 brhnz 00100011 0.. */
872 is_branch (const unsigned char *insn
)
874 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
877 /* Return true for all indirect branch instructions.
885 bihnz 00100101 011 */
888 is_indirect_branch (const unsigned char *insn
)
890 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
893 /* Return true for branch hint instructions.
898 is_hint (const unsigned char *insn
)
900 return (insn
[0] & 0xfc) == 0x10;
903 /* True if INPUT_SECTION might need overlay stubs. */
906 maybe_needs_stubs (asection
*input_section
)
908 /* No stubs for debug sections and suchlike. */
909 if ((input_section
->flags
& SEC_ALLOC
) == 0)
912 /* No stubs for link-once sections that will be discarded. */
913 if (input_section
->output_section
== bfd_abs_section_ptr
)
916 /* Don't create stubs for .eh_frame references. */
917 if (strcmp (input_section
->name
, ".eh_frame") == 0)
939 /* Return non-zero if this reloc symbol should go via an overlay stub.
940 Return 2 if the stub must be in non-overlay area. */
942 static enum _stub_type
943 needs_ovl_stub (struct elf_link_hash_entry
*h
,
944 Elf_Internal_Sym
*sym
,
946 asection
*input_section
,
947 Elf_Internal_Rela
*irela
,
949 struct bfd_link_info
*info
)
951 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
952 enum elf_spu_reloc_type r_type
;
953 unsigned int sym_type
;
954 bfd_boolean branch
, hint
, call
;
955 enum _stub_type ret
= no_stub
;
959 || sym_sec
->output_section
== bfd_abs_section_ptr
960 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
965 /* Ensure no stubs for user supplied overlay manager syms. */
966 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
969 /* setjmp always goes via an overlay stub, because then the return
970 and hence the longjmp goes via __ovly_return. That magically
971 makes setjmp/longjmp between overlays work. */
972 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
973 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
980 sym_type
= ELF_ST_TYPE (sym
->st_info
);
982 r_type
= ELF32_R_TYPE (irela
->r_info
);
986 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
988 if (contents
== NULL
)
991 if (!bfd_get_section_contents (input_section
->owner
,
998 contents
+= irela
->r_offset
;
1000 branch
= is_branch (contents
);
1001 hint
= is_hint (contents
);
1004 call
= (contents
[0] & 0xfd) == 0x31;
1006 && sym_type
!= STT_FUNC
1007 && contents
!= insn
)
1009 /* It's common for people to write assembly and forget
1010 to give function symbols the right type. Handle
1011 calls to such symbols, but warn so that (hopefully)
1012 people will fix their code. We need the symbol
1013 type to be correct to distinguish function pointer
1014 initialisation from other pointer initialisations. */
1015 const char *sym_name
;
1018 sym_name
= h
->root
.root
.string
;
1021 Elf_Internal_Shdr
*symtab_hdr
;
1022 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
1023 sym_name
= bfd_elf_sym_name (input_section
->owner
,
1029 /* xgettext:c-format */
1030 (_("warning: call to non-function symbol %s defined in %pB"),
1031 sym_name
, sym_sec
->owner
);
1037 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1038 || (sym_type
!= STT_FUNC
1039 && !(branch
|| hint
)
1040 && (sym_sec
->flags
& SEC_CODE
) == 0))
1043 /* Usually, symbols in non-overlay sections don't need stubs. */
1044 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1045 && !htab
->params
->non_overlay_stubs
)
1048 /* A reference from some other section to a symbol in an overlay
1049 section needs a stub. */
1050 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1051 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1053 unsigned int lrlive
= 0;
1055 lrlive
= (contents
[1] & 0x70) >> 4;
1057 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1058 ret
= call_ovl_stub
;
1060 ret
= br000_ovl_stub
+ lrlive
;
1063 /* If this insn isn't a branch then we are possibly taking the
1064 address of a function and passing it out somehow. Soft-icache code
1065 always generates inline code to do indirect branches. */
1066 if (!(branch
|| hint
)
1067 && sym_type
== STT_FUNC
1068 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1075 count_stub (struct spu_link_hash_table
*htab
,
1078 enum _stub_type stub_type
,
1079 struct elf_link_hash_entry
*h
,
1080 const Elf_Internal_Rela
*irela
)
1082 unsigned int ovl
= 0;
1083 struct got_entry
*g
, **head
;
1086 /* If this instruction is a branch or call, we need a stub
1087 for it. One stub per function per overlay.
1088 If it isn't a branch, then we are taking the address of
1089 this function so need a stub in the non-overlay area
1090 for it. One stub per function. */
1091 if (stub_type
!= nonovl_stub
)
1092 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1095 head
= &h
->got
.glist
;
1098 if (elf_local_got_ents (ibfd
) == NULL
)
1100 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1101 * sizeof (*elf_local_got_ents (ibfd
)));
1102 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1103 if (elf_local_got_ents (ibfd
) == NULL
)
1106 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1109 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1111 htab
->stub_count
[ovl
] += 1;
1117 addend
= irela
->r_addend
;
1121 struct got_entry
*gnext
;
1123 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1124 if (g
->addend
== addend
&& g
->ovl
== 0)
1129 /* Need a new non-overlay area stub. Zap other stubs. */
1130 for (g
= *head
; g
!= NULL
; g
= gnext
)
1133 if (g
->addend
== addend
)
1135 htab
->stub_count
[g
->ovl
] -= 1;
1143 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1144 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1150 g
= bfd_malloc (sizeof *g
);
1155 g
->stub_addr
= (bfd_vma
) -1;
1159 htab
->stub_count
[ovl
] += 1;
1165 /* Support two sizes of overlay stubs, a slower more compact stub of two
1166 instructions, and a faster stub of four instructions.
1167 Soft-icache stubs are four or eight words. */
1170 ovl_stub_size (struct spu_elf_params
*params
)
1172 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1176 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1178 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1181 /* Two instruction overlay stubs look like:
1183 brsl $75,__ovly_load
1184 .word target_ovl_and_address
1186 ovl_and_address is a word with the overlay number in the top 14 bits
1187 and local store address in the bottom 18 bits.
1189 Four instruction overlay stubs look like:
1193 ila $79,target_address
1196 Software icache stubs are:
1200 .word lrlive_branchlocalstoreaddr;
1201 brasl $75,__icache_br_handler
1206 build_stub (struct bfd_link_info
*info
,
1209 enum _stub_type stub_type
,
1210 struct elf_link_hash_entry
*h
,
1211 const Elf_Internal_Rela
*irela
,
1215 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1216 unsigned int ovl
, dest_ovl
, set_id
;
1217 struct got_entry
*g
, **head
;
1219 bfd_vma addend
, from
, to
, br_dest
, patt
;
1220 unsigned int lrlive
;
1223 if (stub_type
!= nonovl_stub
)
1224 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1227 head
= &h
->got
.glist
;
1229 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1233 addend
= irela
->r_addend
;
1235 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1237 g
= bfd_malloc (sizeof *g
);
1243 g
->br_addr
= (irela
->r_offset
1244 + isec
->output_offset
1245 + isec
->output_section
->vma
);
1251 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1252 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1257 if (g
->ovl
== 0 && ovl
!= 0)
1260 if (g
->stub_addr
!= (bfd_vma
) -1)
1264 sec
= htab
->stub_sec
[ovl
];
1265 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1266 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1267 g
->stub_addr
= from
;
1268 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1269 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1270 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1272 if (((dest
| to
| from
) & 3) != 0)
1277 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1279 if (htab
->params
->ovly_flavour
== ovly_normal
1280 && !htab
->params
->compact_stub
)
1282 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1283 sec
->contents
+ sec
->size
);
1284 bfd_put_32 (sec
->owner
, LNOP
,
1285 sec
->contents
+ sec
->size
+ 4);
1286 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1287 sec
->contents
+ sec
->size
+ 8);
1289 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1290 sec
->contents
+ sec
->size
+ 12);
1292 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1293 sec
->contents
+ sec
->size
+ 12);
1295 else if (htab
->params
->ovly_flavour
== ovly_normal
1296 && htab
->params
->compact_stub
)
1299 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1300 sec
->contents
+ sec
->size
);
1302 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1303 sec
->contents
+ sec
->size
);
1304 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1305 sec
->contents
+ sec
->size
+ 4);
1307 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1308 && htab
->params
->compact_stub
)
1311 if (stub_type
== nonovl_stub
)
1313 else if (stub_type
== call_ovl_stub
)
1314 /* A brsl makes lr live and *(*sp+16) is live.
1315 Tail calls have the same liveness. */
1317 else if (!htab
->params
->lrlive_analysis
)
1318 /* Assume stack frame and lr save. */
1320 else if (irela
!= NULL
)
1322 /* Analyse branch instructions. */
1323 struct function_info
*caller
;
1326 caller
= find_function (isec
, irela
->r_offset
, info
);
1327 if (caller
->start
== NULL
)
1328 off
= irela
->r_offset
;
1331 struct function_info
*found
= NULL
;
1333 /* Find the earliest piece of this function that
1334 has frame adjusting instructions. We might
1335 see dynamic frame adjustment (eg. for alloca)
1336 in some later piece, but functions using
1337 alloca always set up a frame earlier. Frame
1338 setup instructions are always in one piece. */
1339 if (caller
->lr_store
!= (bfd_vma
) -1
1340 || caller
->sp_adjust
!= (bfd_vma
) -1)
1342 while (caller
->start
!= NULL
)
1344 caller
= caller
->start
;
1345 if (caller
->lr_store
!= (bfd_vma
) -1
1346 || caller
->sp_adjust
!= (bfd_vma
) -1)
1354 if (off
> caller
->sp_adjust
)
1356 if (off
> caller
->lr_store
)
1357 /* Only *(*sp+16) is live. */
1360 /* If no lr save, then we must be in a
1361 leaf function with a frame.
1362 lr is still live. */
1365 else if (off
> caller
->lr_store
)
1367 /* Between lr save and stack adjust. */
1369 /* This should never happen since prologues won't
1374 /* On entry to function. */
1377 if (stub_type
!= br000_ovl_stub
1378 && lrlive
!= stub_type
- br000_ovl_stub
)
1379 /* xgettext:c-format */
1380 info
->callbacks
->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1381 "from analysis (%u)\n"),
1382 isec
, irela
->r_offset
, lrlive
,
1383 stub_type
- br000_ovl_stub
);
1386 /* If given lrlive info via .brinfo, use it. */
1387 if (stub_type
> br000_ovl_stub
)
1388 lrlive
= stub_type
- br000_ovl_stub
;
1391 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1392 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1393 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1395 /* The branch that uses this stub goes to stub_addr + 4. We'll
1396 set up an xor pattern that can be used by the icache manager
1397 to modify this branch to go directly to its destination. */
1399 br_dest
= g
->stub_addr
;
1402 /* Except in the case of _SPUEAR_ stubs, the branch in
1403 question is the one in the stub itself. */
1404 BFD_ASSERT (stub_type
== nonovl_stub
);
1405 g
->br_addr
= g
->stub_addr
;
1409 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1410 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1411 sec
->contents
+ sec
->size
);
1412 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1413 sec
->contents
+ sec
->size
+ 4);
1414 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1415 sec
->contents
+ sec
->size
+ 8);
1416 patt
= dest
^ br_dest
;
1417 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1418 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1419 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1420 sec
->contents
+ sec
->size
+ 12);
1423 /* Extra space for linked list entries. */
1429 sec
->size
+= ovl_stub_size (htab
->params
);
1431 if (htab
->params
->emit_stub_syms
)
1437 len
= 8 + sizeof (".ovl_call.") - 1;
1439 len
+= strlen (h
->root
.root
.string
);
1444 add
= (int) irela
->r_addend
& 0xffffffff;
1447 name
= bfd_malloc (len
+ 1);
1451 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1453 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1455 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1456 dest_sec
->id
& 0xffffffff,
1457 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1459 sprintf (name
+ len
- 9, "+%x", add
);
1461 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1465 if (h
->root
.type
== bfd_link_hash_new
)
1467 h
->root
.type
= bfd_link_hash_defined
;
1468 h
->root
.u
.def
.section
= sec
;
1469 h
->size
= ovl_stub_size (htab
->params
);
1470 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1474 h
->ref_regular_nonweak
= 1;
1475 h
->forced_local
= 1;
1483 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1487 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1489 /* Symbols starting with _SPUEAR_ need a stub because they may be
1490 invoked by the PPU. */
1491 struct bfd_link_info
*info
= inf
;
1492 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1495 if ((h
->root
.type
== bfd_link_hash_defined
1496 || h
->root
.type
== bfd_link_hash_defweak
)
1498 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1499 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1500 && sym_sec
->output_section
!= bfd_abs_section_ptr
1501 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1502 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1503 || htab
->params
->non_overlay_stubs
))
1505 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1512 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1514 /* Symbols starting with _SPUEAR_ need a stub because they may be
1515 invoked by the PPU. */
1516 struct bfd_link_info
*info
= inf
;
1517 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1520 if ((h
->root
.type
== bfd_link_hash_defined
1521 || h
->root
.type
== bfd_link_hash_defweak
)
1523 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1524 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1525 && sym_sec
->output_section
!= bfd_abs_section_ptr
1526 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1527 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1528 || htab
->params
->non_overlay_stubs
))
1530 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1531 h
->root
.u
.def
.value
, sym_sec
);
1537 /* Size or build stubs. */
1540 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1542 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1545 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
1547 extern const bfd_target spu_elf32_vec
;
1548 Elf_Internal_Shdr
*symtab_hdr
;
1550 Elf_Internal_Sym
*local_syms
= NULL
;
1552 if (ibfd
->xvec
!= &spu_elf32_vec
)
1555 /* We'll need the symbol table in a second. */
1556 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1557 if (symtab_hdr
->sh_info
== 0)
1560 /* Walk over each section attached to the input bfd. */
1561 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1563 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1565 /* If there aren't any relocs, then there's nothing more to do. */
1566 if ((isec
->flags
& SEC_RELOC
) == 0
1567 || isec
->reloc_count
== 0)
1570 if (!maybe_needs_stubs (isec
))
1573 /* Get the relocs. */
1574 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1576 if (internal_relocs
== NULL
)
1577 goto error_ret_free_local
;
1579 /* Now examine each relocation. */
1580 irela
= internal_relocs
;
1581 irelaend
= irela
+ isec
->reloc_count
;
1582 for (; irela
< irelaend
; irela
++)
1584 enum elf_spu_reloc_type r_type
;
1585 unsigned int r_indx
;
1587 Elf_Internal_Sym
*sym
;
1588 struct elf_link_hash_entry
*h
;
1589 enum _stub_type stub_type
;
1591 r_type
= ELF32_R_TYPE (irela
->r_info
);
1592 r_indx
= ELF32_R_SYM (irela
->r_info
);
1594 if (r_type
>= R_SPU_max
)
1596 bfd_set_error (bfd_error_bad_value
);
1597 error_ret_free_internal
:
1598 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1599 free (internal_relocs
);
1600 error_ret_free_local
:
1601 if (symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1606 /* Determine the reloc target section. */
1607 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1608 goto error_ret_free_internal
;
1610 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1612 if (stub_type
== no_stub
)
1614 else if (stub_type
== stub_error
)
1615 goto error_ret_free_internal
;
1617 if (htab
->stub_count
== NULL
)
1620 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1621 htab
->stub_count
= bfd_zmalloc (amt
);
1622 if (htab
->stub_count
== NULL
)
1623 goto error_ret_free_internal
;
1628 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1629 goto error_ret_free_internal
;
1636 dest
= h
->root
.u
.def
.value
;
1638 dest
= sym
->st_value
;
1639 dest
+= irela
->r_addend
;
1640 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1642 goto error_ret_free_internal
;
1646 /* We're done with the internal relocs, free them. */
1647 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1648 free (internal_relocs
);
1651 if (local_syms
!= NULL
1652 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1654 if (!info
->keep_memory
)
1657 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1664 /* Allocate space for overlay call and return stubs.
1665 Return 0 on error, 1 if no overlays, 2 otherwise. */
1668 spu_elf_size_stubs (struct bfd_link_info
*info
)
1670 struct spu_link_hash_table
*htab
;
1677 if (!process_stubs (info
, FALSE
))
1680 htab
= spu_hash_table (info
);
1681 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1685 ibfd
= info
->input_bfds
;
1686 if (htab
->stub_count
!= NULL
)
1688 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1689 htab
->stub_sec
= bfd_zmalloc (amt
);
1690 if (htab
->stub_sec
== NULL
)
1693 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1694 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1695 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1696 htab
->stub_sec
[0] = stub
;
1698 || !bfd_set_section_alignment (stub
,
1699 ovl_stub_size_log2 (htab
->params
)))
1701 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1702 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1703 /* Extra space for linked list entries. */
1704 stub
->size
+= htab
->stub_count
[0] * 16;
1706 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1708 asection
*osec
= htab
->ovl_sec
[i
];
1709 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1710 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1711 htab
->stub_sec
[ovl
] = stub
;
1713 || !bfd_set_section_alignment (stub
,
1714 ovl_stub_size_log2 (htab
->params
)))
1716 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1720 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1722 /* Space for icache manager tables.
1723 a) Tag array, one quadword per cache line.
1724 b) Rewrite "to" list, one quadword per cache line.
1725 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1726 a power-of-two number of full quadwords) per cache line. */
1729 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1730 if (htab
->ovtab
== NULL
1731 || !bfd_set_section_alignment (htab
->ovtab
, 4))
1734 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1735 << htab
->num_lines_log2
;
1737 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1738 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1739 if (htab
->init
== NULL
1740 || !bfd_set_section_alignment (htab
->init
, 4))
1743 htab
->init
->size
= 16;
1745 else if (htab
->stub_count
== NULL
)
1749 /* htab->ovtab consists of two arrays.
1759 . } _ovly_buf_table[];
1762 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1763 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1764 if (htab
->ovtab
== NULL
1765 || !bfd_set_section_alignment (htab
->ovtab
, 4))
1768 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1771 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1772 if (htab
->toe
== NULL
1773 || !bfd_set_section_alignment (htab
->toe
, 4))
1775 htab
->toe
->size
= 16;
1780 /* Called from ld to place overlay manager data sections. This is done
1781 after the overlay manager itself is loaded, mainly so that the
1782 linker's htab->init section is placed after any other .ovl.init
1786 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1788 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1791 if (htab
->stub_sec
!= NULL
)
1793 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1795 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1797 asection
*osec
= htab
->ovl_sec
[i
];
1798 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1799 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1803 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1804 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1806 if (htab
->ovtab
!= NULL
)
1808 const char *ovout
= ".data";
1809 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1811 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1814 if (htab
->toe
!= NULL
)
1815 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1818 /* Functions to handle embedded spu_ovl.o object. */
1821 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1827 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1833 struct _ovl_stream
*os
;
1837 os
= (struct _ovl_stream
*) stream
;
1838 max
= (const char *) os
->end
- (const char *) os
->start
;
1840 if ((ufile_ptr
) offset
>= max
)
1844 if (count
> max
- offset
)
1845 count
= max
- offset
;
1847 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1852 ovl_mgr_stat (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1856 struct _ovl_stream
*os
= (struct _ovl_stream
*) stream
;
1858 memset (sb
, 0, sizeof (*sb
));
1859 sb
->st_size
= (const char *) os
->end
- (const char *) os
->start
;
1864 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1866 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1873 return *ovl_bfd
!= NULL
;
1877 overlay_index (asection
*sec
)
1880 || sec
->output_section
== bfd_abs_section_ptr
)
1882 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1885 /* Define an STT_OBJECT symbol. */
1887 static struct elf_link_hash_entry
*
1888 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1890 struct elf_link_hash_entry
*h
;
1892 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1896 if (h
->root
.type
!= bfd_link_hash_defined
1899 h
->root
.type
= bfd_link_hash_defined
;
1900 h
->root
.u
.def
.section
= htab
->ovtab
;
1901 h
->type
= STT_OBJECT
;
1904 h
->ref_regular_nonweak
= 1;
1907 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1909 /* xgettext:c-format */
1910 _bfd_error_handler (_("%pB is not allowed to define %s"),
1911 h
->root
.u
.def
.section
->owner
,
1912 h
->root
.root
.string
);
1913 bfd_set_error (bfd_error_bad_value
);
1918 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1919 h
->root
.root
.string
);
1920 bfd_set_error (bfd_error_bad_value
);
1927 /* Fill in all stubs and the overlay tables. */
1930 spu_elf_build_stubs (struct bfd_link_info
*info
)
1932 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1933 struct elf_link_hash_entry
*h
;
1939 if (htab
->num_overlays
!= 0)
1941 for (i
= 0; i
< 2; i
++)
1943 h
= htab
->ovly_entry
[i
];
1945 && (h
->root
.type
== bfd_link_hash_defined
1946 || h
->root
.type
== bfd_link_hash_defweak
)
1949 s
= h
->root
.u
.def
.section
->output_section
;
1950 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1952 _bfd_error_handler (_("%s in overlay section"),
1953 h
->root
.root
.string
);
1954 bfd_set_error (bfd_error_bad_value
);
1961 if (htab
->stub_sec
!= NULL
)
1963 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1964 if (htab
->stub_sec
[i
]->size
!= 0)
1966 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1967 htab
->stub_sec
[i
]->size
);
1968 if (htab
->stub_sec
[i
]->contents
== NULL
)
1970 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1971 htab
->stub_sec
[i
]->size
= 0;
1974 /* Fill in all the stubs. */
1975 process_stubs (info
, TRUE
);
1976 if (!htab
->stub_err
)
1977 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1981 _bfd_error_handler (_("overlay stub relocation overflow"));
1982 bfd_set_error (bfd_error_bad_value
);
1986 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1988 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1990 _bfd_error_handler (_("stubs don't match calculated size"));
1991 bfd_set_error (bfd_error_bad_value
);
1994 htab
->stub_sec
[i
]->rawsize
= 0;
1998 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
2001 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
2002 if (htab
->ovtab
->contents
== NULL
)
2005 p
= htab
->ovtab
->contents
;
2006 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
2010 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
2013 h
->root
.u
.def
.value
= 0;
2014 h
->size
= 16 << htab
->num_lines_log2
;
2017 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
2020 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2021 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2023 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
2026 h
->root
.u
.def
.value
= off
;
2027 h
->size
= 16 << htab
->num_lines_log2
;
2030 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
2033 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2034 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2036 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
2039 h
->root
.u
.def
.value
= off
;
2040 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2043 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2046 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2047 + htab
->num_lines_log2
);
2048 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2050 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2053 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2054 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2056 h
= define_ovtab_symbol (htab
, "__icache_base");
2059 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2060 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2061 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2063 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2066 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2067 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2069 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2072 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2073 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2075 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2078 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2079 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2081 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2084 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2085 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2087 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2090 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2091 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2093 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2096 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2097 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2099 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2101 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2103 if (htab
->init
->contents
== NULL
)
2106 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2109 h
->root
.u
.def
.value
= 0;
2110 h
->root
.u
.def
.section
= htab
->init
;
2116 /* Write out _ovly_table. */
2117 /* set low bit of .size to mark non-overlay area as present. */
2119 obfd
= htab
->ovtab
->output_section
->owner
;
2120 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2122 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2126 unsigned long off
= ovl_index
* 16;
2127 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2129 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2130 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2132 /* file_off written later in spu_elf_modify_headers. */
2133 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2137 h
= define_ovtab_symbol (htab
, "_ovly_table");
2140 h
->root
.u
.def
.value
= 16;
2141 h
->size
= htab
->num_overlays
* 16;
2143 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2146 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2149 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2152 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2153 h
->size
= htab
->num_buf
* 4;
2155 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2158 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2162 h
= define_ovtab_symbol (htab
, "_EAR_");
2165 h
->root
.u
.def
.section
= htab
->toe
;
2166 h
->root
.u
.def
.value
= 0;
2172 /* Check that all loadable section VMAs lie in the range
2173 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2176 spu_elf_check_vma (struct bfd_link_info
*info
)
2178 struct elf_segment_map
*m
;
2180 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2181 bfd
*abfd
= info
->output_bfd
;
2182 bfd_vma hi
= htab
->params
->local_store_hi
;
2183 bfd_vma lo
= htab
->params
->local_store_lo
;
2185 htab
->local_store
= hi
+ 1 - lo
;
2187 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
2188 if (m
->p_type
== PT_LOAD
)
2189 for (i
= 0; i
< m
->count
; i
++)
2190 if (m
->sections
[i
]->size
!= 0
2191 && (m
->sections
[i
]->vma
< lo
2192 || m
->sections
[i
]->vma
> hi
2193 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2194 return m
->sections
[i
];
2199 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2200 Search for stack adjusting insns, and return the sp delta.
2201 If a store of lr is found save the instruction offset to *LR_STORE.
2202 If a stack adjusting instruction is found, save that offset to
2206 find_function_stack_adjust (asection
*sec
,
2213 memset (reg
, 0, sizeof (reg
));
2214 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2216 unsigned char buf
[4];
2220 /* Assume no relocs on stack adjusing insns. */
2221 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2225 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2227 if (buf
[0] == 0x24 /* stqd */)
2229 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2234 /* Partly decoded immediate field. */
2235 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2237 if (buf
[0] == 0x1c /* ai */)
2240 imm
= (imm
^ 0x200) - 0x200;
2241 reg
[rt
] = reg
[ra
] + imm
;
2243 if (rt
== 1 /* sp */)
2247 *sp_adjust
= offset
;
2251 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2253 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2255 reg
[rt
] = reg
[ra
] + reg
[rb
];
2260 *sp_adjust
= offset
;
2264 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2266 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2268 reg
[rt
] = reg
[rb
] - reg
[ra
];
2273 *sp_adjust
= offset
;
2277 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2279 if (buf
[0] >= 0x42 /* ila */)
2280 imm
|= (buf
[0] & 1) << 17;
2285 if (buf
[0] == 0x40 /* il */)
2287 if ((buf
[1] & 0x80) == 0)
2289 imm
= (imm
^ 0x8000) - 0x8000;
2291 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2297 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2299 reg
[rt
] |= imm
& 0xffff;
2302 else if (buf
[0] == 0x04 /* ori */)
2305 imm
= (imm
^ 0x200) - 0x200;
2306 reg
[rt
] = reg
[ra
] | imm
;
2309 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2311 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2312 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2313 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2314 | ((imm
& 0x1000) ? 0x000000ff : 0));
2317 else if (buf
[0] == 0x16 /* andbi */)
2323 reg
[rt
] = reg
[ra
] & imm
;
2326 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2328 /* Used in pic reg load. Say rt is trashed. Won't be used
2329 in stack adjust, but we need to continue past this branch. */
2333 else if (is_branch (buf
) || is_indirect_branch (buf
))
2334 /* If we hit a branch then we must be out of the prologue. */
2341 /* qsort predicate to sort symbols by section and value. */
2343 static Elf_Internal_Sym
*sort_syms_syms
;
2344 static asection
**sort_syms_psecs
;
2347 sort_syms (const void *a
, const void *b
)
2349 Elf_Internal_Sym
*const *s1
= a
;
2350 Elf_Internal_Sym
*const *s2
= b
;
2351 asection
*sec1
,*sec2
;
2352 bfd_signed_vma delta
;
2354 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2355 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2358 return sec1
->index
- sec2
->index
;
2360 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2362 return delta
< 0 ? -1 : 1;
2364 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2366 return delta
< 0 ? -1 : 1;
2368 return *s1
< *s2
? -1 : 1;
2371 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2372 entries for section SEC. */
2374 static struct spu_elf_stack_info
*
2375 alloc_stack_info (asection
*sec
, int max_fun
)
2377 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2380 amt
= sizeof (struct spu_elf_stack_info
);
2381 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2382 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2383 if (sec_data
->u
.i
.stack_info
!= NULL
)
2384 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2385 return sec_data
->u
.i
.stack_info
;
2388 /* Add a new struct function_info describing a (part of a) function
2389 starting at SYM_H. Keep the array sorted by address. */
2391 static struct function_info
*
2392 maybe_insert_function (asection
*sec
,
2395 bfd_boolean is_func
)
2397 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2398 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2404 sinfo
= alloc_stack_info (sec
, 20);
2411 Elf_Internal_Sym
*sym
= sym_h
;
2412 off
= sym
->st_value
;
2413 size
= sym
->st_size
;
2417 struct elf_link_hash_entry
*h
= sym_h
;
2418 off
= h
->root
.u
.def
.value
;
2422 for (i
= sinfo
->num_fun
; --i
>= 0; )
2423 if (sinfo
->fun
[i
].lo
<= off
)
2428 /* Don't add another entry for an alias, but do update some
2430 if (sinfo
->fun
[i
].lo
== off
)
2432 /* Prefer globals over local syms. */
2433 if (global
&& !sinfo
->fun
[i
].global
)
2435 sinfo
->fun
[i
].global
= TRUE
;
2436 sinfo
->fun
[i
].u
.h
= sym_h
;
2439 sinfo
->fun
[i
].is_func
= TRUE
;
2440 return &sinfo
->fun
[i
];
2442 /* Ignore a zero-size symbol inside an existing function. */
2443 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2444 return &sinfo
->fun
[i
];
2447 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2449 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2450 bfd_size_type old
= amt
;
2452 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2453 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2454 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2455 sinfo
= bfd_realloc (sinfo
, amt
);
2458 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2459 sec_data
->u
.i
.stack_info
= sinfo
;
2462 if (++i
< sinfo
->num_fun
)
2463 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2464 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2465 sinfo
->fun
[i
].is_func
= is_func
;
2466 sinfo
->fun
[i
].global
= global
;
2467 sinfo
->fun
[i
].sec
= sec
;
2469 sinfo
->fun
[i
].u
.h
= sym_h
;
2471 sinfo
->fun
[i
].u
.sym
= sym_h
;
2472 sinfo
->fun
[i
].lo
= off
;
2473 sinfo
->fun
[i
].hi
= off
+ size
;
2474 sinfo
->fun
[i
].lr_store
= -1;
2475 sinfo
->fun
[i
].sp_adjust
= -1;
2476 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2477 &sinfo
->fun
[i
].lr_store
,
2478 &sinfo
->fun
[i
].sp_adjust
);
2479 sinfo
->num_fun
+= 1;
2480 return &sinfo
->fun
[i
];
2483 /* Return the name of FUN. */
2486 func_name (struct function_info
*fun
)
2490 Elf_Internal_Shdr
*symtab_hdr
;
2492 while (fun
->start
!= NULL
)
2496 return fun
->u
.h
->root
.root
.string
;
2499 if (fun
->u
.sym
->st_name
== 0)
2501 size_t len
= strlen (sec
->name
);
2502 char *name
= bfd_malloc (len
+ 10);
2505 sprintf (name
, "%s+%lx", sec
->name
,
2506 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2510 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2511 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2514 /* Read the instruction at OFF in SEC. Return true iff the instruction
2515 is a nop, lnop, or stop 0 (all zero insn). */
2518 is_nop (asection
*sec
, bfd_vma off
)
2520 unsigned char insn
[4];
2522 if (off
+ 4 > sec
->size
2523 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2525 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2527 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2532 /* Extend the range of FUN to cover nop padding up to LIMIT.
2533 Return TRUE iff some instruction other than a NOP was found. */
2536 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2538 bfd_vma off
= (fun
->hi
+ 3) & -4;
2540 while (off
< limit
&& is_nop (fun
->sec
, off
))
2551 /* Check and fix overlapping function ranges. Return TRUE iff there
2552 are gaps in the current info we have about functions in SEC. */
2555 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2557 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2558 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2560 bfd_boolean gaps
= FALSE
;
2565 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2566 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2568 /* Fix overlapping symbols. */
2569 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2570 const char *f2
= func_name (&sinfo
->fun
[i
]);
2572 /* xgettext:c-format */
2573 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2574 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2576 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2579 if (sinfo
->num_fun
== 0)
2583 if (sinfo
->fun
[0].lo
!= 0)
2585 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2587 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2589 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2590 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2592 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2598 /* Search current function info for a function that contains address
2599 OFFSET in section SEC. */
2601 static struct function_info
*
2602 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2604 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2605 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2609 hi
= sinfo
->num_fun
;
2612 mid
= (lo
+ hi
) / 2;
2613 if (offset
< sinfo
->fun
[mid
].lo
)
2615 else if (offset
>= sinfo
->fun
[mid
].hi
)
2618 return &sinfo
->fun
[mid
];
2620 /* xgettext:c-format */
2621 info
->callbacks
->einfo (_("%pA:0x%v not found in function table\n"),
2623 bfd_set_error (bfd_error_bad_value
);
2627 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2628 if CALLEE was new. If this function return FALSE, CALLEE should
2632 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2634 struct call_info
**pp
, *p
;
2636 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2637 if (p
->fun
== callee
->fun
)
2639 /* Tail calls use less stack than normal calls. Retain entry
2640 for normal call over one for tail call. */
2641 p
->is_tail
&= callee
->is_tail
;
2644 p
->fun
->start
= NULL
;
2645 p
->fun
->is_func
= TRUE
;
2647 p
->count
+= callee
->count
;
2648 /* Reorder list so most recent call is first. */
2650 p
->next
= caller
->call_list
;
2651 caller
->call_list
= p
;
2654 callee
->next
= caller
->call_list
;
2655 caller
->call_list
= callee
;
2659 /* Copy CALL and insert the copy into CALLER. */
2662 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2664 struct call_info
*callee
;
2665 callee
= bfd_malloc (sizeof (*callee
));
2669 if (!insert_callee (caller
, callee
))
2674 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2675 overlay stub sections. */
2678 interesting_section (asection
*s
)
2680 return (s
->output_section
!= bfd_abs_section_ptr
2681 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2682 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2686 /* Rummage through the relocs for SEC, looking for function calls.
2687 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2688 mark destination symbols on calls as being functions. Also
2689 look at branches, which may be tail calls or go to hot/cold
2690 section part of same function. */
2693 mark_functions_via_relocs (asection
*sec
,
2694 struct bfd_link_info
*info
,
2697 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2698 Elf_Internal_Shdr
*symtab_hdr
;
2700 unsigned int priority
= 0;
2701 static bfd_boolean warned
;
2703 if (!interesting_section (sec
)
2704 || sec
->reloc_count
== 0)
2707 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2709 if (internal_relocs
== NULL
)
2712 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2713 psyms
= &symtab_hdr
->contents
;
2714 irela
= internal_relocs
;
2715 irelaend
= irela
+ sec
->reloc_count
;
2716 for (; irela
< irelaend
; irela
++)
2718 enum elf_spu_reloc_type r_type
;
2719 unsigned int r_indx
;
2721 Elf_Internal_Sym
*sym
;
2722 struct elf_link_hash_entry
*h
;
2724 bfd_boolean nonbranch
, is_call
;
2725 struct function_info
*caller
;
2726 struct call_info
*callee
;
2728 r_type
= ELF32_R_TYPE (irela
->r_info
);
2729 nonbranch
= r_type
!= R_SPU_REL16
&& r_type
!= R_SPU_ADDR16
;
2731 r_indx
= ELF32_R_SYM (irela
->r_info
);
2732 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2736 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2742 unsigned char insn
[4];
2744 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2745 irela
->r_offset
, 4))
2747 if (is_branch (insn
))
2749 is_call
= (insn
[0] & 0xfd) == 0x31;
2750 priority
= insn
[1] & 0x0f;
2752 priority
|= insn
[2];
2754 priority
|= insn
[3];
2756 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2757 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2760 info
->callbacks
->einfo
2761 /* xgettext:c-format */
2762 (_("%pB(%pA+0x%v): call to non-code section"
2763 " %pB(%pA), analysis incomplete\n"),
2764 sec
->owner
, sec
, irela
->r_offset
,
2765 sym_sec
->owner
, sym_sec
);
2780 /* For --auto-overlay, count possible stubs we need for
2781 function pointer references. */
2782 unsigned int sym_type
;
2786 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2787 if (sym_type
== STT_FUNC
)
2789 if (call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2790 spu_hash_table (info
)->non_ovly_stub
+= 1;
2791 /* If the symbol type is STT_FUNC then this must be a
2792 function pointer initialisation. */
2795 /* Ignore data references. */
2796 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2797 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2799 /* Otherwise we probably have a jump table reloc for
2800 a switch statement or some other reference to a
2805 val
= h
->root
.u
.def
.value
;
2807 val
= sym
->st_value
;
2808 val
+= irela
->r_addend
;
2812 struct function_info
*fun
;
2814 if (irela
->r_addend
!= 0)
2816 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2819 fake
->st_value
= val
;
2821 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2825 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2827 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2830 if (irela
->r_addend
!= 0
2831 && fun
->u
.sym
!= sym
)
2836 caller
= find_function (sec
, irela
->r_offset
, info
);
2839 callee
= bfd_malloc (sizeof *callee
);
2843 callee
->fun
= find_function (sym_sec
, val
, info
);
2844 if (callee
->fun
== NULL
)
2846 callee
->is_tail
= !is_call
;
2847 callee
->is_pasted
= FALSE
;
2848 callee
->broken_cycle
= FALSE
;
2849 callee
->priority
= priority
;
2850 callee
->count
= nonbranch
? 0 : 1;
2851 if (callee
->fun
->last_caller
!= sec
)
2853 callee
->fun
->last_caller
= sec
;
2854 callee
->fun
->call_count
+= 1;
2856 if (!insert_callee (caller
, callee
))
2859 && !callee
->fun
->is_func
2860 && callee
->fun
->stack
== 0)
2862 /* This is either a tail call or a branch from one part of
2863 the function to another, ie. hot/cold section. If the
2864 destination has been called by some other function then
2865 it is a separate function. We also assume that functions
2866 are not split across input files. */
2867 if (sec
->owner
!= sym_sec
->owner
)
2869 callee
->fun
->start
= NULL
;
2870 callee
->fun
->is_func
= TRUE
;
2872 else if (callee
->fun
->start
== NULL
)
2874 struct function_info
*caller_start
= caller
;
2875 while (caller_start
->start
)
2876 caller_start
= caller_start
->start
;
2878 if (caller_start
!= callee
->fun
)
2879 callee
->fun
->start
= caller_start
;
2883 struct function_info
*callee_start
;
2884 struct function_info
*caller_start
;
2885 callee_start
= callee
->fun
;
2886 while (callee_start
->start
)
2887 callee_start
= callee_start
->start
;
2888 caller_start
= caller
;
2889 while (caller_start
->start
)
2890 caller_start
= caller_start
->start
;
2891 if (caller_start
!= callee_start
)
2893 callee
->fun
->start
= NULL
;
2894 callee
->fun
->is_func
= TRUE
;
2903 /* Handle something like .init or .fini, which has a piece of a function.
2904 These sections are pasted together to form a single function. */
2907 pasted_function (asection
*sec
)
2909 struct bfd_link_order
*l
;
2910 struct _spu_elf_section_data
*sec_data
;
2911 struct spu_elf_stack_info
*sinfo
;
2912 Elf_Internal_Sym
*fake
;
2913 struct function_info
*fun
, *fun_start
;
2915 fake
= bfd_zmalloc (sizeof (*fake
));
2919 fake
->st_size
= sec
->size
;
2921 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2922 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2926 /* Find a function immediately preceding this section. */
2928 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2930 if (l
->u
.indirect
.section
== sec
)
2932 if (fun_start
!= NULL
)
2934 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2938 fun
->start
= fun_start
;
2940 callee
->is_tail
= TRUE
;
2941 callee
->is_pasted
= TRUE
;
2942 callee
->broken_cycle
= FALSE
;
2943 callee
->priority
= 0;
2945 if (!insert_callee (fun_start
, callee
))
2951 if (l
->type
== bfd_indirect_link_order
2952 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2953 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2954 && sinfo
->num_fun
!= 0)
2955 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2958 /* Don't return an error if we did not find a function preceding this
2959 section. The section may have incorrect flags. */
2963 /* Map address ranges in code sections to functions. */
2966 discover_functions (struct bfd_link_info
*info
)
2970 Elf_Internal_Sym
***psym_arr
;
2971 asection
***sec_arr
;
2972 bfd_boolean gaps
= FALSE
;
2975 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
2978 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2979 if (psym_arr
== NULL
)
2981 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2982 if (sec_arr
== NULL
)
2985 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2987 ibfd
= ibfd
->link
.next
, bfd_idx
++)
2989 extern const bfd_target spu_elf32_vec
;
2990 Elf_Internal_Shdr
*symtab_hdr
;
2993 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2994 asection
**psecs
, **p
;
2996 if (ibfd
->xvec
!= &spu_elf32_vec
)
2999 /* Read all the symbols. */
3000 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3001 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
3005 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3006 if (interesting_section (sec
))
3014 /* Don't use cached symbols since the generic ELF linker
3015 code only reads local symbols, and we need globals too. */
3016 free (symtab_hdr
->contents
);
3017 symtab_hdr
->contents
= NULL
;
3018 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
3020 symtab_hdr
->contents
= (void *) syms
;
3024 /* Select defined function symbols that are going to be output. */
3025 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
3028 psym_arr
[bfd_idx
] = psyms
;
3029 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
3032 sec_arr
[bfd_idx
] = psecs
;
3033 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
3034 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
3035 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3039 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
3040 if (s
!= NULL
&& interesting_section (s
))
3043 symcount
= psy
- psyms
;
3046 /* Sort them by section and offset within section. */
3047 sort_syms_syms
= syms
;
3048 sort_syms_psecs
= psecs
;
3049 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3051 /* Now inspect the function symbols. */
3052 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3054 asection
*s
= psecs
[*psy
- syms
];
3055 Elf_Internal_Sym
**psy2
;
3057 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3058 if (psecs
[*psy2
- syms
] != s
)
3061 if (!alloc_stack_info (s
, psy2
- psy
))
3066 /* First install info about properly typed and sized functions.
3067 In an ideal world this will cover all code sections, except
3068 when partitioning functions into hot and cold sections,
3069 and the horrible pasted together .init and .fini functions. */
3070 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3073 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3075 asection
*s
= psecs
[sy
- syms
];
3076 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3081 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3082 if (interesting_section (sec
))
3083 gaps
|= check_function_ranges (sec
, info
);
3088 /* See if we can discover more function symbols by looking at
3090 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3092 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3096 if (psym_arr
[bfd_idx
] == NULL
)
3099 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3100 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3104 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3106 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3108 Elf_Internal_Shdr
*symtab_hdr
;
3110 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3113 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3116 psecs
= sec_arr
[bfd_idx
];
3118 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3119 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3122 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3123 if (interesting_section (sec
))
3124 gaps
|= check_function_ranges (sec
, info
);
3128 /* Finally, install all globals. */
3129 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3133 s
= psecs
[sy
- syms
];
3135 /* Global syms might be improperly typed functions. */
3136 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3137 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3139 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3145 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3147 extern const bfd_target spu_elf32_vec
;
3150 if (ibfd
->xvec
!= &spu_elf32_vec
)
3153 /* Some of the symbols we've installed as marking the
3154 beginning of functions may have a size of zero. Extend
3155 the range of such functions to the beginning of the
3156 next symbol of interest. */
3157 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3158 if (interesting_section (sec
))
3160 struct _spu_elf_section_data
*sec_data
;
3161 struct spu_elf_stack_info
*sinfo
;
3163 sec_data
= spu_elf_section_data (sec
);
3164 sinfo
= sec_data
->u
.i
.stack_info
;
3165 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3168 bfd_vma hi
= sec
->size
;
3170 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3172 sinfo
->fun
[fun_idx
].hi
= hi
;
3173 hi
= sinfo
->fun
[fun_idx
].lo
;
3176 sinfo
->fun
[0].lo
= 0;
3178 /* No symbols in this section. Must be .init or .fini
3179 or something similar. */
3180 else if (!pasted_function (sec
))
3186 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3188 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3190 if (psym_arr
[bfd_idx
] == NULL
)
3193 free (psym_arr
[bfd_idx
]);
3194 free (sec_arr
[bfd_idx
]);
3203 /* Iterate over all function_info we have collected, calling DOIT on
3204 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3208 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3209 struct bfd_link_info
*,
3211 struct bfd_link_info
*info
,
3217 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3219 extern const bfd_target spu_elf32_vec
;
3222 if (ibfd
->xvec
!= &spu_elf32_vec
)
3225 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3227 struct _spu_elf_section_data
*sec_data
;
3228 struct spu_elf_stack_info
*sinfo
;
3230 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3231 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3234 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3235 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3236 if (!doit (&sinfo
->fun
[i
], info
, param
))
3244 /* Transfer call info attached to struct function_info entries for
3245 all of a given function's sections to the first entry. */
3248 transfer_calls (struct function_info
*fun
,
3249 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3250 void *param ATTRIBUTE_UNUSED
)
3252 struct function_info
*start
= fun
->start
;
3256 struct call_info
*call
, *call_next
;
3258 while (start
->start
!= NULL
)
3259 start
= start
->start
;
3260 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3262 call_next
= call
->next
;
3263 if (!insert_callee (start
, call
))
3266 fun
->call_list
= NULL
;
3271 /* Mark nodes in the call graph that are called by some other node. */
3274 mark_non_root (struct function_info
*fun
,
3275 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3276 void *param ATTRIBUTE_UNUSED
)
3278 struct call_info
*call
;
3283 for (call
= fun
->call_list
; call
; call
= call
->next
)
3285 call
->fun
->non_root
= TRUE
;
3286 mark_non_root (call
->fun
, 0, 0);
3291 /* Remove cycles from the call graph. Set depth of nodes. */
3294 remove_cycles (struct function_info
*fun
,
3295 struct bfd_link_info
*info
,
3298 struct call_info
**callp
, *call
;
3299 unsigned int depth
= *(unsigned int *) param
;
3300 unsigned int max_depth
= depth
;
3304 fun
->marking
= TRUE
;
3306 callp
= &fun
->call_list
;
3307 while ((call
= *callp
) != NULL
)
3309 call
->max_depth
= depth
+ !call
->is_pasted
;
3310 if (!call
->fun
->visit2
)
3312 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3314 if (max_depth
< call
->max_depth
)
3315 max_depth
= call
->max_depth
;
3317 else if (call
->fun
->marking
)
3319 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3321 if (!htab
->params
->auto_overlay
3322 && htab
->params
->stack_analysis
)
3324 const char *f1
= func_name (fun
);
3325 const char *f2
= func_name (call
->fun
);
3327 /* xgettext:c-format */
3328 info
->callbacks
->info (_("stack analysis will ignore the call "
3333 call
->broken_cycle
= TRUE
;
3335 callp
= &call
->next
;
3337 fun
->marking
= FALSE
;
3338 *(unsigned int *) param
= max_depth
;
3342 /* Check that we actually visited all nodes in remove_cycles. If we
3343 didn't, then there is some cycle in the call graph not attached to
3344 any root node. Arbitrarily choose a node in the cycle as a new
3345 root and break the cycle. */
3348 mark_detached_root (struct function_info
*fun
,
3349 struct bfd_link_info
*info
,
3354 fun
->non_root
= FALSE
;
3355 *(unsigned int *) param
= 0;
3356 return remove_cycles (fun
, info
, param
);
3359 /* Populate call_list for each function. */
3362 build_call_tree (struct bfd_link_info
*info
)
3367 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3369 extern const bfd_target spu_elf32_vec
;
3372 if (ibfd
->xvec
!= &spu_elf32_vec
)
3375 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3376 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3380 /* Transfer call info from hot/cold section part of function
3382 if (!spu_hash_table (info
)->params
->auto_overlay
3383 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3386 /* Find the call graph root(s). */
3387 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3390 /* Remove cycles from the call graph. We start from the root node(s)
3391 so that we break cycles in a reasonable place. */
3393 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3396 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3399 /* qsort predicate to sort calls by priority, max_depth then count. */
3402 sort_calls (const void *a
, const void *b
)
3404 struct call_info
*const *c1
= a
;
3405 struct call_info
*const *c2
= b
;
3408 delta
= (*c2
)->priority
- (*c1
)->priority
;
3412 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3416 delta
= (*c2
)->count
- (*c1
)->count
;
3420 return (char *) c1
- (char *) c2
;
3424 unsigned int max_overlay_size
;
3427 /* Set linker_mark and gc_mark on any sections that we will put in
3428 overlays. These flags are used by the generic ELF linker, but we
3429 won't be continuing on to bfd_elf_final_link so it is OK to use
3430 them. linker_mark is clear before we get here. Set segment_mark
3431 on sections that are part of a pasted function (excluding the last
3434 Set up function rodata section if --overlay-rodata. We don't
3435 currently include merged string constant rodata sections since
3437 Sort the call graph so that the deepest nodes will be visited
3441 mark_overlay_section (struct function_info
*fun
,
3442 struct bfd_link_info
*info
,
3445 struct call_info
*call
;
3447 struct _mos_param
*mos_param
= param
;
3448 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3454 if (!fun
->sec
->linker_mark
3455 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3456 || htab
->params
->non_ia_text
3457 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3458 || strcmp (fun
->sec
->name
, ".init") == 0
3459 || strcmp (fun
->sec
->name
, ".fini") == 0))
3463 fun
->sec
->linker_mark
= 1;
3464 fun
->sec
->gc_mark
= 1;
3465 fun
->sec
->segment_mark
= 0;
3466 /* Ensure SEC_CODE is set on this text section (it ought to
3467 be!), and SEC_CODE is clear on rodata sections. We use
3468 this flag to differentiate the two overlay section types. */
3469 fun
->sec
->flags
|= SEC_CODE
;
3471 size
= fun
->sec
->size
;
3472 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3476 /* Find the rodata section corresponding to this function's
3478 if (strcmp (fun
->sec
->name
, ".text") == 0)
3480 name
= bfd_malloc (sizeof (".rodata"));
3483 memcpy (name
, ".rodata", sizeof (".rodata"));
3485 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3487 size_t len
= strlen (fun
->sec
->name
);
3488 name
= bfd_malloc (len
+ 3);
3491 memcpy (name
, ".rodata", sizeof (".rodata"));
3492 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3494 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3496 size_t len
= strlen (fun
->sec
->name
) + 1;
3497 name
= bfd_malloc (len
);
3500 memcpy (name
, fun
->sec
->name
, len
);
3506 asection
*rodata
= NULL
;
3507 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3508 if (group_sec
== NULL
)
3509 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3511 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3513 if (strcmp (group_sec
->name
, name
) == 0)
3518 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3520 fun
->rodata
= rodata
;
3523 size
+= fun
->rodata
->size
;
3524 if (htab
->params
->line_size
!= 0
3525 && size
> htab
->params
->line_size
)
3527 size
-= fun
->rodata
->size
;
3532 fun
->rodata
->linker_mark
= 1;
3533 fun
->rodata
->gc_mark
= 1;
3534 fun
->rodata
->flags
&= ~SEC_CODE
;
3540 if (mos_param
->max_overlay_size
< size
)
3541 mos_param
->max_overlay_size
= size
;
3544 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3549 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3553 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3554 calls
[count
++] = call
;
3556 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3558 fun
->call_list
= NULL
;
3562 calls
[count
]->next
= fun
->call_list
;
3563 fun
->call_list
= calls
[count
];
3568 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3570 if (call
->is_pasted
)
3572 /* There can only be one is_pasted call per function_info. */
3573 BFD_ASSERT (!fun
->sec
->segment_mark
);
3574 fun
->sec
->segment_mark
= 1;
3576 if (!call
->broken_cycle
3577 && !mark_overlay_section (call
->fun
, info
, param
))
3581 /* Don't put entry code into an overlay. The overlay manager needs
3582 a stack! Also, don't mark .ovl.init as an overlay. */
3583 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3584 == info
->output_bfd
->start_address
3585 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3587 fun
->sec
->linker_mark
= 0;
3588 if (fun
->rodata
!= NULL
)
3589 fun
->rodata
->linker_mark
= 0;
3594 /* If non-zero then unmark functions called from those within sections
3595 that we need to unmark. Unfortunately this isn't reliable since the
3596 call graph cannot know the destination of function pointer calls. */
3597 #define RECURSE_UNMARK 0
3600 asection
*exclude_input_section
;
3601 asection
*exclude_output_section
;
3602 unsigned long clearing
;
3605 /* Undo some of mark_overlay_section's work. */
3608 unmark_overlay_section (struct function_info
*fun
,
3609 struct bfd_link_info
*info
,
3612 struct call_info
*call
;
3613 struct _uos_param
*uos_param
= param
;
3614 unsigned int excluded
= 0;
3622 if (fun
->sec
== uos_param
->exclude_input_section
3623 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3627 uos_param
->clearing
+= excluded
;
3629 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3631 fun
->sec
->linker_mark
= 0;
3633 fun
->rodata
->linker_mark
= 0;
3636 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3637 if (!call
->broken_cycle
3638 && !unmark_overlay_section (call
->fun
, info
, param
))
3642 uos_param
->clearing
-= excluded
;
3647 unsigned int lib_size
;
3648 asection
**lib_sections
;
3651 /* Add sections we have marked as belonging to overlays to an array
3652 for consideration as non-overlay sections. The array consist of
3653 pairs of sections, (text,rodata), for functions in the call graph. */
3656 collect_lib_sections (struct function_info
*fun
,
3657 struct bfd_link_info
*info
,
3660 struct _cl_param
*lib_param
= param
;
3661 struct call_info
*call
;
3668 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3671 size
= fun
->sec
->size
;
3673 size
+= fun
->rodata
->size
;
3675 if (size
<= lib_param
->lib_size
)
3677 *lib_param
->lib_sections
++ = fun
->sec
;
3678 fun
->sec
->gc_mark
= 0;
3679 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3681 *lib_param
->lib_sections
++ = fun
->rodata
;
3682 fun
->rodata
->gc_mark
= 0;
3685 *lib_param
->lib_sections
++ = NULL
;
3688 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3689 if (!call
->broken_cycle
)
3690 collect_lib_sections (call
->fun
, info
, param
);
3695 /* qsort predicate to sort sections by call count. */
3698 sort_lib (const void *a
, const void *b
)
3700 asection
*const *s1
= a
;
3701 asection
*const *s2
= b
;
3702 struct _spu_elf_section_data
*sec_data
;
3703 struct spu_elf_stack_info
*sinfo
;
3707 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3708 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3711 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3712 delta
-= sinfo
->fun
[i
].call_count
;
3715 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3716 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3719 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3720 delta
+= sinfo
->fun
[i
].call_count
;
3729 /* Remove some sections from those marked to be in overlays. Choose
3730 those that are called from many places, likely library functions. */
3733 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3736 asection
**lib_sections
;
3737 unsigned int i
, lib_count
;
3738 struct _cl_param collect_lib_param
;
3739 struct function_info dummy_caller
;
3740 struct spu_link_hash_table
*htab
;
3742 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3744 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3746 extern const bfd_target spu_elf32_vec
;
3749 if (ibfd
->xvec
!= &spu_elf32_vec
)
3752 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3753 if (sec
->linker_mark
3754 && sec
->size
< lib_size
3755 && (sec
->flags
& SEC_CODE
) != 0)
3758 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3759 if (lib_sections
== NULL
)
3760 return (unsigned int) -1;
3761 collect_lib_param
.lib_size
= lib_size
;
3762 collect_lib_param
.lib_sections
= lib_sections
;
3763 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3765 return (unsigned int) -1;
3766 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3768 /* Sort sections so that those with the most calls are first. */
3770 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3772 htab
= spu_hash_table (info
);
3773 for (i
= 0; i
< lib_count
; i
++)
3775 unsigned int tmp
, stub_size
;
3777 struct _spu_elf_section_data
*sec_data
;
3778 struct spu_elf_stack_info
*sinfo
;
3780 sec
= lib_sections
[2 * i
];
3781 /* If this section is OK, its size must be less than lib_size. */
3783 /* If it has a rodata section, then add that too. */
3784 if (lib_sections
[2 * i
+ 1])
3785 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3786 /* Add any new overlay call stubs needed by the section. */
3789 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3790 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3793 struct call_info
*call
;
3795 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3796 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3797 if (call
->fun
->sec
->linker_mark
)
3799 struct call_info
*p
;
3800 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3801 if (p
->fun
== call
->fun
)
3804 stub_size
+= ovl_stub_size (htab
->params
);
3807 if (tmp
+ stub_size
< lib_size
)
3809 struct call_info
**pp
, *p
;
3811 /* This section fits. Mark it as non-overlay. */
3812 lib_sections
[2 * i
]->linker_mark
= 0;
3813 if (lib_sections
[2 * i
+ 1])
3814 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3815 lib_size
-= tmp
+ stub_size
;
3816 /* Call stubs to the section we just added are no longer
3818 pp
= &dummy_caller
.call_list
;
3819 while ((p
= *pp
) != NULL
)
3820 if (!p
->fun
->sec
->linker_mark
)
3822 lib_size
+= ovl_stub_size (htab
->params
);
3828 /* Add new call stubs to dummy_caller. */
3829 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3830 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3833 struct call_info
*call
;
3835 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3836 for (call
= sinfo
->fun
[k
].call_list
;
3839 if (call
->fun
->sec
->linker_mark
)
3841 struct call_info
*callee
;
3842 callee
= bfd_malloc (sizeof (*callee
));
3844 return (unsigned int) -1;
3846 if (!insert_callee (&dummy_caller
, callee
))
3852 while (dummy_caller
.call_list
!= NULL
)
3854 struct call_info
*call
= dummy_caller
.call_list
;
3855 dummy_caller
.call_list
= call
->next
;
3858 for (i
= 0; i
< 2 * lib_count
; i
++)
3859 if (lib_sections
[i
])
3860 lib_sections
[i
]->gc_mark
= 1;
3861 free (lib_sections
);
3865 /* Build an array of overlay sections. The deepest node's section is
3866 added first, then its parent node's section, then everything called
3867 from the parent section. The idea being to group sections to
3868 minimise calls between different overlays. */
3871 collect_overlays (struct function_info
*fun
,
3872 struct bfd_link_info
*info
,
3875 struct call_info
*call
;
3876 bfd_boolean added_fun
;
3877 asection
***ovly_sections
= param
;
3883 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3884 if (!call
->is_pasted
&& !call
->broken_cycle
)
3886 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3892 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3894 fun
->sec
->gc_mark
= 0;
3895 *(*ovly_sections
)++ = fun
->sec
;
3896 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3898 fun
->rodata
->gc_mark
= 0;
3899 *(*ovly_sections
)++ = fun
->rodata
;
3902 *(*ovly_sections
)++ = NULL
;
3905 /* Pasted sections must stay with the first section. We don't
3906 put pasted sections in the array, just the first section.
3907 Mark subsequent sections as already considered. */
3908 if (fun
->sec
->segment_mark
)
3910 struct function_info
*call_fun
= fun
;
3913 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3914 if (call
->is_pasted
)
3916 call_fun
= call
->fun
;
3917 call_fun
->sec
->gc_mark
= 0;
3918 if (call_fun
->rodata
)
3919 call_fun
->rodata
->gc_mark
= 0;
3925 while (call_fun
->sec
->segment_mark
);
3929 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3930 if (!call
->broken_cycle
3931 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3936 struct _spu_elf_section_data
*sec_data
;
3937 struct spu_elf_stack_info
*sinfo
;
3939 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3940 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3943 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3944 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3952 struct _sum_stack_param
{
3954 size_t overall_stack
;
3955 bfd_boolean emit_stack_syms
;
3958 /* Descend the call graph for FUN, accumulating total stack required. */
3961 sum_stack (struct function_info
*fun
,
3962 struct bfd_link_info
*info
,
3965 struct call_info
*call
;
3966 struct function_info
*max
;
3967 size_t stack
, cum_stack
;
3969 bfd_boolean has_call
;
3970 struct _sum_stack_param
*sum_stack_param
= param
;
3971 struct spu_link_hash_table
*htab
;
3973 cum_stack
= fun
->stack
;
3974 sum_stack_param
->cum_stack
= cum_stack
;
3980 for (call
= fun
->call_list
; call
; call
= call
->next
)
3982 if (call
->broken_cycle
)
3984 if (!call
->is_pasted
)
3986 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3988 stack
= sum_stack_param
->cum_stack
;
3989 /* Include caller stack for normal calls, don't do so for
3990 tail calls. fun->stack here is local stack usage for
3992 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3993 stack
+= fun
->stack
;
3994 if (cum_stack
< stack
)
4001 sum_stack_param
->cum_stack
= cum_stack
;
4003 /* Now fun->stack holds cumulative stack. */
4004 fun
->stack
= cum_stack
;
4008 && sum_stack_param
->overall_stack
< cum_stack
)
4009 sum_stack_param
->overall_stack
= cum_stack
;
4011 htab
= spu_hash_table (info
);
4012 if (htab
->params
->auto_overlay
)
4015 f1
= func_name (fun
);
4016 if (htab
->params
->stack_analysis
)
4019 info
->callbacks
->info (" %s: 0x%v\n", f1
, (bfd_vma
) cum_stack
);
4020 info
->callbacks
->minfo ("%s: 0x%v 0x%v\n",
4021 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
4025 info
->callbacks
->minfo (_(" calls:\n"));
4026 for (call
= fun
->call_list
; call
; call
= call
->next
)
4027 if (!call
->is_pasted
&& !call
->broken_cycle
)
4029 const char *f2
= func_name (call
->fun
);
4030 const char *ann1
= call
->fun
== max
? "*" : " ";
4031 const char *ann2
= call
->is_tail
? "t" : " ";
4033 info
->callbacks
->minfo (" %s%s %s\n", ann1
, ann2
, f2
);
4038 if (sum_stack_param
->emit_stack_syms
)
4040 char *name
= bfd_malloc (18 + strlen (f1
));
4041 struct elf_link_hash_entry
*h
;
4046 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
4047 sprintf (name
, "__stack_%s", f1
);
4049 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4051 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
4054 && (h
->root
.type
== bfd_link_hash_new
4055 || h
->root
.type
== bfd_link_hash_undefined
4056 || h
->root
.type
== bfd_link_hash_undefweak
))
4058 h
->root
.type
= bfd_link_hash_defined
;
4059 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4060 h
->root
.u
.def
.value
= cum_stack
;
4065 h
->ref_regular_nonweak
= 1;
4066 h
->forced_local
= 1;
4074 /* SEC is part of a pasted function. Return the call_info for the
4075 next section of this function. */
4077 static struct call_info
*
4078 find_pasted_call (asection
*sec
)
4080 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4081 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4082 struct call_info
*call
;
4085 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4086 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4087 if (call
->is_pasted
)
4093 /* qsort predicate to sort bfds by file name. */
4096 sort_bfds (const void *a
, const void *b
)
4098 bfd
*const *abfd1
= a
;
4099 bfd
*const *abfd2
= b
;
4101 return filename_cmp (bfd_get_filename (*abfd1
), bfd_get_filename (*abfd2
));
4105 print_one_overlay_section (FILE *script
,
4108 unsigned int ovlynum
,
4109 unsigned int *ovly_map
,
4110 asection
**ovly_sections
,
4111 struct bfd_link_info
*info
)
4115 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4117 asection
*sec
= ovly_sections
[2 * j
];
4119 if (fprintf (script
, " %s%c%s (%s)\n",
4120 (sec
->owner
->my_archive
!= NULL
4121 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4122 info
->path_separator
,
4123 bfd_get_filename (sec
->owner
),
4126 if (sec
->segment_mark
)
4128 struct call_info
*call
= find_pasted_call (sec
);
4129 while (call
!= NULL
)
4131 struct function_info
*call_fun
= call
->fun
;
4132 sec
= call_fun
->sec
;
4133 if (fprintf (script
, " %s%c%s (%s)\n",
4134 (sec
->owner
->my_archive
!= NULL
4135 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4136 info
->path_separator
,
4137 bfd_get_filename (sec
->owner
),
4140 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4141 if (call
->is_pasted
)
4147 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4149 asection
*sec
= ovly_sections
[2 * j
+ 1];
4151 && fprintf (script
, " %s%c%s (%s)\n",
4152 (sec
->owner
->my_archive
!= NULL
4153 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4154 info
->path_separator
,
4155 bfd_get_filename (sec
->owner
),
4159 sec
= ovly_sections
[2 * j
];
4160 if (sec
->segment_mark
)
4162 struct call_info
*call
= find_pasted_call (sec
);
4163 while (call
!= NULL
)
4165 struct function_info
*call_fun
= call
->fun
;
4166 sec
= call_fun
->rodata
;
4168 && fprintf (script
, " %s%c%s (%s)\n",
4169 (sec
->owner
->my_archive
!= NULL
4170 ? bfd_get_filename (sec
->owner
->my_archive
) : ""),
4171 info
->path_separator
,
4172 bfd_get_filename (sec
->owner
),
4175 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4176 if (call
->is_pasted
)
4185 /* Handle --auto-overlay. */
4188 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4192 struct elf_segment_map
*m
;
4193 unsigned int fixed_size
, lo
, hi
;
4194 unsigned int reserved
;
4195 struct spu_link_hash_table
*htab
;
4196 unsigned int base
, i
, count
, bfd_count
;
4197 unsigned int region
, ovlynum
;
4198 asection
**ovly_sections
, **ovly_p
;
4199 unsigned int *ovly_map
;
4201 unsigned int total_overlay_size
, overlay_size
;
4202 const char *ovly_mgr_entry
;
4203 struct elf_link_hash_entry
*h
;
4204 struct _mos_param mos_param
;
4205 struct _uos_param uos_param
;
4206 struct function_info dummy_caller
;
4208 /* Find the extents of our loadable image. */
4209 lo
= (unsigned int) -1;
4211 for (m
= elf_seg_map (info
->output_bfd
); m
!= NULL
; m
= m
->next
)
4212 if (m
->p_type
== PT_LOAD
)
4213 for (i
= 0; i
< m
->count
; i
++)
4214 if (m
->sections
[i
]->size
!= 0)
4216 if (m
->sections
[i
]->vma
< lo
)
4217 lo
= m
->sections
[i
]->vma
;
4218 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4219 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4221 fixed_size
= hi
+ 1 - lo
;
4223 if (!discover_functions (info
))
4226 if (!build_call_tree (info
))
4229 htab
= spu_hash_table (info
);
4230 reserved
= htab
->params
->auto_overlay_reserved
;
4233 struct _sum_stack_param sum_stack_param
;
4235 sum_stack_param
.emit_stack_syms
= 0;
4236 sum_stack_param
.overall_stack
= 0;
4237 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4239 reserved
= (sum_stack_param
.overall_stack
4240 + htab
->params
->extra_stack_space
);
4243 /* No need for overlays if everything already fits. */
4244 if (fixed_size
+ reserved
<= htab
->local_store
4245 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4247 htab
->params
->auto_overlay
= 0;
4251 uos_param
.exclude_input_section
= 0;
4252 uos_param
.exclude_output_section
4253 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4255 ovly_mgr_entry
= "__ovly_load";
4256 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4257 ovly_mgr_entry
= "__icache_br_handler";
4258 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4259 FALSE
, FALSE
, FALSE
);
4261 && (h
->root
.type
== bfd_link_hash_defined
4262 || h
->root
.type
== bfd_link_hash_defweak
)
4265 /* We have a user supplied overlay manager. */
4266 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4270 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4271 builtin version to .text, and will adjust .text size. */
4272 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4275 /* Mark overlay sections, and find max overlay section size. */
4276 mos_param
.max_overlay_size
= 0;
4277 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4280 /* We can't put the overlay manager or interrupt routines in
4282 uos_param
.clearing
= 0;
4283 if ((uos_param
.exclude_input_section
4284 || uos_param
.exclude_output_section
)
4285 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4289 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4291 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4292 if (bfd_arr
== NULL
)
4295 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4298 total_overlay_size
= 0;
4299 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4301 extern const bfd_target spu_elf32_vec
;
4303 unsigned int old_count
;
4305 if (ibfd
->xvec
!= &spu_elf32_vec
)
4309 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4310 if (sec
->linker_mark
)
4312 if ((sec
->flags
& SEC_CODE
) != 0)
4314 fixed_size
-= sec
->size
;
4315 total_overlay_size
+= sec
->size
;
4317 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4318 && sec
->output_section
->owner
== info
->output_bfd
4319 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4320 fixed_size
-= sec
->size
;
4321 if (count
!= old_count
)
4322 bfd_arr
[bfd_count
++] = ibfd
;
4325 /* Since the overlay link script selects sections by file name and
4326 section name, ensure that file names are unique. */
4329 bfd_boolean ok
= TRUE
;
4331 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4332 for (i
= 1; i
< bfd_count
; ++i
)
4333 if (filename_cmp (bfd_get_filename (bfd_arr
[i
- 1]),
4334 bfd_get_filename (bfd_arr
[i
])) == 0)
4336 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4338 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4339 /* xgettext:c-format */
4340 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4341 bfd_get_filename (bfd_arr
[i
]),
4342 bfd_get_filename (bfd_arr
[i
]->my_archive
));
4344 info
->callbacks
->einfo (_("%s duplicated\n"),
4345 bfd_get_filename (bfd_arr
[i
]));
4351 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4352 "object files in auto-overlay script\n"));
4353 bfd_set_error (bfd_error_bad_value
);
4359 fixed_size
+= reserved
;
4360 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4361 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4363 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4365 /* Stubs in the non-icache area are bigger. */
4366 fixed_size
+= htab
->non_ovly_stub
* 16;
4367 /* Space for icache manager tables.
4368 a) Tag array, one quadword per cache line.
4369 - word 0: ia address of present line, init to zero. */
4370 fixed_size
+= 16 << htab
->num_lines_log2
;
4371 /* b) Rewrite "to" list, one quadword per cache line. */
4372 fixed_size
+= 16 << htab
->num_lines_log2
;
4373 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4374 to a power-of-two number of full quadwords) per cache line. */
4375 fixed_size
+= 16 << (htab
->fromelem_size_log2
4376 + htab
->num_lines_log2
);
4377 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4382 /* Guess number of overlays. Assuming overlay buffer is on
4383 average only half full should be conservative. */
4384 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4385 / (htab
->local_store
- fixed_size
));
4386 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4387 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4391 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4392 /* xgettext:c-format */
4393 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4394 "size of 0x%v exceeds local store\n"),
4395 (bfd_vma
) fixed_size
,
4396 (bfd_vma
) mos_param
.max_overlay_size
);
4398 /* Now see if we should put some functions in the non-overlay area. */
4399 else if (fixed_size
< htab
->params
->auto_overlay_fixed
)
4401 unsigned int max_fixed
, lib_size
;
4403 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4404 if (max_fixed
> htab
->params
->auto_overlay_fixed
)
4405 max_fixed
= htab
->params
->auto_overlay_fixed
;
4406 lib_size
= max_fixed
- fixed_size
;
4407 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4408 if (lib_size
== (unsigned int) -1)
4410 fixed_size
= max_fixed
- lib_size
;
4413 /* Build an array of sections, suitably sorted to place into
4415 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4416 if (ovly_sections
== NULL
)
4418 ovly_p
= ovly_sections
;
4419 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4421 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4422 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4423 if (ovly_map
== NULL
)
4426 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4427 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4428 if (htab
->params
->line_size
!= 0)
4429 overlay_size
= htab
->params
->line_size
;
4432 while (base
< count
)
4434 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4436 for (i
= base
; i
< count
; i
++)
4438 asection
*sec
, *rosec
;
4439 unsigned int tmp
, rotmp
;
4440 unsigned int num_stubs
;
4441 struct call_info
*call
, *pasty
;
4442 struct _spu_elf_section_data
*sec_data
;
4443 struct spu_elf_stack_info
*sinfo
;
4446 /* See whether we can add this section to the current
4447 overlay without overflowing our overlay buffer. */
4448 sec
= ovly_sections
[2 * i
];
4449 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4451 rosec
= ovly_sections
[2 * i
+ 1];
4454 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4455 if (roalign
< rosec
->alignment_power
)
4456 roalign
= rosec
->alignment_power
;
4458 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4460 if (sec
->segment_mark
)
4462 /* Pasted sections must stay together, so add their
4464 pasty
= find_pasted_call (sec
);
4465 while (pasty
!= NULL
)
4467 struct function_info
*call_fun
= pasty
->fun
;
4468 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4469 + call_fun
->sec
->size
);
4470 if (call_fun
->rodata
)
4472 rotmp
= (align_power (rotmp
,
4473 call_fun
->rodata
->alignment_power
)
4474 + call_fun
->rodata
->size
);
4475 if (roalign
< rosec
->alignment_power
)
4476 roalign
= rosec
->alignment_power
;
4478 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4479 if (pasty
->is_pasted
)
4483 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4486 /* If we add this section, we might need new overlay call
4487 stubs. Add any overlay section calls to dummy_call. */
4489 sec_data
= spu_elf_section_data (sec
);
4490 sinfo
= sec_data
->u
.i
.stack_info
;
4491 for (k
= 0; k
< (unsigned) sinfo
->num_fun
; ++k
)
4492 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4493 if (call
->is_pasted
)
4495 BFD_ASSERT (pasty
== NULL
);
4498 else if (call
->fun
->sec
->linker_mark
)
4500 if (!copy_callee (&dummy_caller
, call
))
4503 while (pasty
!= NULL
)
4505 struct function_info
*call_fun
= pasty
->fun
;
4507 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4508 if (call
->is_pasted
)
4510 BFD_ASSERT (pasty
== NULL
);
4513 else if (!copy_callee (&dummy_caller
, call
))
4517 /* Calculate call stub size. */
4519 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4521 unsigned int stub_delta
= 1;
4523 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4524 stub_delta
= call
->count
;
4525 num_stubs
+= stub_delta
;
4527 /* If the call is within this overlay, we won't need a
4529 for (k
= base
; k
< i
+ 1; k
++)
4530 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4532 num_stubs
-= stub_delta
;
4536 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4537 && num_stubs
> htab
->params
->max_branch
)
4539 if (align_power (tmp
, roalign
) + rotmp
4540 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4548 /* xgettext:c-format */
4549 info
->callbacks
->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4550 ovly_sections
[2 * i
]->owner
,
4551 ovly_sections
[2 * i
],
4552 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4553 bfd_set_error (bfd_error_bad_value
);
4557 while (dummy_caller
.call_list
!= NULL
)
4559 struct call_info
*call
= dummy_caller
.call_list
;
4560 dummy_caller
.call_list
= call
->next
;
4566 ovly_map
[base
++] = ovlynum
;
4569 script
= htab
->params
->spu_elf_open_overlay_script ();
4571 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4573 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4576 if (fprintf (script
,
4577 " . = ALIGN (%u);\n"
4578 " .ovl.init : { *(.ovl.init) }\n"
4579 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4580 htab
->params
->line_size
) <= 0)
4585 while (base
< count
)
4587 unsigned int indx
= ovlynum
- 1;
4588 unsigned int vma
, lma
;
4590 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4591 lma
= vma
+ (((indx
>> htab
->num_lines_log2
) + 1) << 18);
4593 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4594 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4595 ovlynum
, vma
, lma
) <= 0)
4598 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4599 ovly_map
, ovly_sections
, info
);
4600 if (base
== (unsigned) -1)
4603 if (fprintf (script
, " }\n") <= 0)
4609 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4610 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4613 if (fprintf (script
, "}\nINSERT AFTER .toe;\n") <= 0)
4618 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4621 if (fprintf (script
,
4622 " . = ALIGN (16);\n"
4623 " .ovl.init : { *(.ovl.init) }\n"
4624 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4627 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4631 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4639 /* We need to set lma since we are overlaying .ovl.init. */
4640 if (fprintf (script
,
4641 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4646 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4650 while (base
< count
)
4652 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4655 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4656 ovly_map
, ovly_sections
, info
);
4657 if (base
== (unsigned) -1)
4660 if (fprintf (script
, " }\n") <= 0)
4663 ovlynum
+= htab
->params
->num_lines
;
4664 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4668 if (fprintf (script
, " }\n") <= 0)
4672 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4677 free (ovly_sections
);
4679 if (fclose (script
) != 0)
4682 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4683 (*htab
->params
->spu_elf_relink
) ();
4688 bfd_set_error (bfd_error_system_call
);
4690 info
->callbacks
->einfo (_("%F%P: auto overlay error: %E\n"));
4694 /* Provide an estimate of total stack required. */
4697 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4699 struct spu_link_hash_table
*htab
;
4700 struct _sum_stack_param sum_stack_param
;
4702 if (!discover_functions (info
))
4705 if (!build_call_tree (info
))
4708 htab
= spu_hash_table (info
);
4709 if (htab
->params
->stack_analysis
)
4711 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4712 info
->callbacks
->minfo (_("\nStack size for functions. "
4713 "Annotations: '*' max stack, 't' tail call\n"));
4716 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4717 sum_stack_param
.overall_stack
= 0;
4718 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4721 if (htab
->params
->stack_analysis
)
4722 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4723 (bfd_vma
) sum_stack_param
.overall_stack
);
4727 /* Perform a final link. */
4730 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4732 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4734 if (htab
->params
->auto_overlay
)
4735 spu_elf_auto_overlay (info
);
4737 if ((htab
->params
->stack_analysis
4738 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4739 && htab
->params
->lrlive_analysis
))
4740 && !spu_elf_stack_analysis (info
))
4741 info
->callbacks
->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4743 if (!spu_elf_build_stubs (info
))
4744 info
->callbacks
->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4746 return bfd_elf_final_link (output_bfd
, info
);
4749 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4750 and !info->emitrelocations. Returns a count of special relocs
4751 that need to be emitted. */
4754 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4756 Elf_Internal_Rela
*relocs
;
4757 unsigned int count
= 0;
4759 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4763 Elf_Internal_Rela
*rel
;
4764 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4766 for (rel
= relocs
; rel
< relend
; rel
++)
4768 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4769 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4773 if (elf_section_data (sec
)->relocs
!= relocs
)
4780 /* Functions for adding fixup records to .fixup */
4782 #define FIXUP_RECORD_SIZE 4
4784 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4785 bfd_put_32 (output_bfd, addr, \
4786 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4787 #define FIXUP_GET(output_bfd,htab,index) \
4788 bfd_get_32 (output_bfd, \
4789 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4791 /* Store OFFSET in .fixup. This assumes it will be called with an
4792 increasing OFFSET. When this OFFSET fits with the last base offset,
4793 it just sets a bit, otherwise it adds a new fixup record. */
4795 spu_elf_emit_fixup (bfd
* output_bfd
, struct bfd_link_info
*info
,
4798 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4799 asection
*sfixup
= htab
->sfixup
;
4800 bfd_vma qaddr
= offset
& ~(bfd_vma
) 15;
4801 bfd_vma bit
= ((bfd_vma
) 8) >> ((offset
& 15) >> 2);
4802 if (sfixup
->reloc_count
== 0)
4804 FIXUP_PUT (output_bfd
, htab
, 0, qaddr
| bit
);
4805 sfixup
->reloc_count
++;
4809 bfd_vma base
= FIXUP_GET (output_bfd
, htab
, sfixup
->reloc_count
- 1);
4810 if (qaddr
!= (base
& ~(bfd_vma
) 15))
4812 if ((sfixup
->reloc_count
+ 1) * FIXUP_RECORD_SIZE
> sfixup
->size
)
4813 _bfd_error_handler (_("fatal error while creating .fixup"));
4814 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
, qaddr
| bit
);
4815 sfixup
->reloc_count
++;
4818 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
- 1, base
| bit
);
4822 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4825 spu_elf_relocate_section (bfd
*output_bfd
,
4826 struct bfd_link_info
*info
,
4828 asection
*input_section
,
4830 Elf_Internal_Rela
*relocs
,
4831 Elf_Internal_Sym
*local_syms
,
4832 asection
**local_sections
)
4834 Elf_Internal_Shdr
*symtab_hdr
;
4835 struct elf_link_hash_entry
**sym_hashes
;
4836 Elf_Internal_Rela
*rel
, *relend
;
4837 struct spu_link_hash_table
*htab
;
4840 bfd_boolean emit_these_relocs
= FALSE
;
4841 bfd_boolean is_ea_sym
;
4843 unsigned int iovl
= 0;
4845 htab
= spu_hash_table (info
);
4846 stubs
= (htab
->stub_sec
!= NULL
4847 && maybe_needs_stubs (input_section
));
4848 iovl
= overlay_index (input_section
);
4849 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4850 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4851 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4854 relend
= relocs
+ input_section
->reloc_count
;
4855 for (; rel
< relend
; rel
++)
4858 reloc_howto_type
*howto
;
4859 unsigned int r_symndx
;
4860 Elf_Internal_Sym
*sym
;
4862 struct elf_link_hash_entry
*h
;
4863 const char *sym_name
;
4866 bfd_reloc_status_type r
;
4867 bfd_boolean unresolved_reloc
;
4868 enum _stub_type stub_type
;
4870 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4871 r_type
= ELF32_R_TYPE (rel
->r_info
);
4872 howto
= elf_howto_table
+ r_type
;
4873 unresolved_reloc
= FALSE
;
4877 if (r_symndx
< symtab_hdr
->sh_info
)
4879 sym
= local_syms
+ r_symndx
;
4880 sec
= local_sections
[r_symndx
];
4881 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4882 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4886 if (sym_hashes
== NULL
)
4889 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4891 if (info
->wrap_hash
!= NULL
4892 && (input_section
->flags
& SEC_DEBUGGING
) != 0)
4893 h
= ((struct elf_link_hash_entry
*)
4894 unwrap_hash_lookup (info
, input_bfd
, &h
->root
));
4896 while (h
->root
.type
== bfd_link_hash_indirect
4897 || h
->root
.type
== bfd_link_hash_warning
)
4898 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4901 if (h
->root
.type
== bfd_link_hash_defined
4902 || h
->root
.type
== bfd_link_hash_defweak
)
4904 sec
= h
->root
.u
.def
.section
;
4906 || sec
->output_section
== NULL
)
4907 /* Set a flag that will be cleared later if we find a
4908 relocation value for this symbol. output_section
4909 is typically NULL for symbols satisfied by a shared
4911 unresolved_reloc
= TRUE
;
4913 relocation
= (h
->root
.u
.def
.value
4914 + sec
->output_section
->vma
4915 + sec
->output_offset
);
4917 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4919 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4920 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4922 else if (!bfd_link_relocatable (info
)
4923 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4927 err
= (info
->unresolved_syms_in_objects
== RM_DIAGNOSE
4928 && !info
->warn_unresolved_syms
)
4929 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
;
4931 info
->callbacks
->undefined_symbol
4932 (info
, h
->root
.root
.string
, input_bfd
,
4933 input_section
, rel
->r_offset
, err
);
4935 sym_name
= h
->root
.root
.string
;
4938 if (sec
!= NULL
&& discarded_section (sec
))
4939 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4940 rel
, 1, relend
, howto
, 0, contents
);
4942 if (bfd_link_relocatable (info
))
4945 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4946 if (r_type
== R_SPU_ADD_PIC
4948 && !(h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4950 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4956 is_ea_sym
= (ea
!= NULL
4958 && sec
->output_section
== ea
);
4960 /* If this symbol is in an overlay area, we may need to relocate
4961 to the overlay stub. */
4962 addend
= rel
->r_addend
;
4965 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4966 contents
, info
)) != no_stub
)
4968 unsigned int ovl
= 0;
4969 struct got_entry
*g
, **head
;
4971 if (stub_type
!= nonovl_stub
)
4975 head
= &h
->got
.glist
;
4977 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4979 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4980 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4982 && g
->br_addr
== (rel
->r_offset
4983 + input_section
->output_offset
4984 + input_section
->output_section
->vma
))
4985 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4990 relocation
= g
->stub_addr
;
4995 /* For soft icache, encode the overlay index into addresses. */
4996 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4997 && (r_type
== R_SPU_ADDR16_HI
4998 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
5001 unsigned int ovl
= overlay_index (sec
);
5004 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
5005 relocation
+= set_id
<< 18;
5010 if (htab
->params
->emit_fixups
&& !bfd_link_relocatable (info
)
5011 && (input_section
->flags
& SEC_ALLOC
) != 0
5012 && r_type
== R_SPU_ADDR32
)
5015 offset
= rel
->r_offset
+ input_section
->output_section
->vma
5016 + input_section
->output_offset
;
5017 spu_elf_emit_fixup (output_bfd
, info
, offset
);
5020 if (unresolved_reloc
)
5022 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5026 /* ._ea is a special section that isn't allocated in SPU
5027 memory, but rather occupies space in PPU memory as
5028 part of an embedded ELF image. If this reloc is
5029 against a symbol defined in ._ea, then transform the
5030 reloc into an equivalent one without a symbol
5031 relative to the start of the ELF image. */
5032 rel
->r_addend
+= (relocation
5034 + elf_section_data (ea
)->this_hdr
.sh_offset
);
5035 rel
->r_info
= ELF32_R_INFO (0, r_type
);
5037 emit_these_relocs
= TRUE
;
5041 unresolved_reloc
= TRUE
;
5043 if (unresolved_reloc
5044 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
5045 rel
->r_offset
) != (bfd_vma
) -1)
5048 /* xgettext:c-format */
5049 (_("%pB(%s+%#" PRIx64
"): "
5050 "unresolvable %s relocation against symbol `%s'"),
5052 bfd_section_name (input_section
),
5053 (uint64_t) rel
->r_offset
,
5059 r
= _bfd_final_link_relocate (howto
,
5063 rel
->r_offset
, relocation
, addend
);
5065 if (r
!= bfd_reloc_ok
)
5067 const char *msg
= (const char *) 0;
5071 case bfd_reloc_overflow
:
5072 (*info
->callbacks
->reloc_overflow
)
5073 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
5074 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
5077 case bfd_reloc_undefined
:
5078 (*info
->callbacks
->undefined_symbol
)
5079 (info
, sym_name
, input_bfd
, input_section
, rel
->r_offset
, TRUE
);
5082 case bfd_reloc_outofrange
:
5083 msg
= _("internal error: out of range error");
5086 case bfd_reloc_notsupported
:
5087 msg
= _("internal error: unsupported relocation error");
5090 case bfd_reloc_dangerous
:
5091 msg
= _("internal error: dangerous error");
5095 msg
= _("internal error: unknown error");
5100 (*info
->callbacks
->warning
) (info
, msg
, sym_name
, input_bfd
,
5101 input_section
, rel
->r_offset
);
5108 && emit_these_relocs
5109 && !info
->emitrelocations
)
5111 Elf_Internal_Rela
*wrel
;
5112 Elf_Internal_Shdr
*rel_hdr
;
5114 wrel
= rel
= relocs
;
5115 relend
= relocs
+ input_section
->reloc_count
;
5116 for (; rel
< relend
; rel
++)
5120 r_type
= ELF32_R_TYPE (rel
->r_info
);
5121 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5124 input_section
->reloc_count
= wrel
- relocs
;
5125 /* Backflips for _bfd_elf_link_output_relocs. */
5126 rel_hdr
= _bfd_elf_single_rel_hdr (input_section
);
5127 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5135 spu_elf_finish_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
5136 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5141 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5144 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5145 const char *sym_name ATTRIBUTE_UNUSED
,
5146 Elf_Internal_Sym
*sym
,
5147 asection
*sym_sec ATTRIBUTE_UNUSED
,
5148 struct elf_link_hash_entry
*h
)
5150 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5152 if (!bfd_link_relocatable (info
)
5153 && htab
->stub_sec
!= NULL
5155 && (h
->root
.type
== bfd_link_hash_defined
5156 || h
->root
.type
== bfd_link_hash_defweak
)
5158 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5160 struct got_entry
*g
;
5162 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5163 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5164 ? g
->br_addr
== g
->stub_addr
5165 : g
->addend
== 0 && g
->ovl
== 0)
5167 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5168 (htab
->stub_sec
[0]->output_section
->owner
,
5169 htab
->stub_sec
[0]->output_section
));
5170 sym
->st_value
= g
->stub_addr
;
5178 static int spu_plugin
= 0;
5181 spu_elf_plugin (int val
)
5186 /* Set ELF header e_type for plugins. */
5189 spu_elf_init_file_header (bfd
*abfd
, struct bfd_link_info
*info
)
5191 if (!_bfd_elf_init_file_header (abfd
, info
))
5196 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5198 i_ehdrp
->e_type
= ET_DYN
;
5203 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5204 segments for overlays. */
5207 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5214 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5215 extra
= htab
->num_overlays
;
5221 sec
= bfd_get_section_by_name (abfd
, ".toe");
5222 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5228 /* Remove .toe section from other PT_LOAD segments and put it in
5229 a segment of its own. Put overlays in separate segments too. */
5232 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5235 struct elf_segment_map
*m
, *m_overlay
;
5236 struct elf_segment_map
**p
, **p_overlay
, **first_load
;
5242 toe
= bfd_get_section_by_name (abfd
, ".toe");
5243 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
5244 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5245 for (i
= 0; i
< m
->count
; i
++)
5246 if ((s
= m
->sections
[i
]) == toe
5247 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5249 struct elf_segment_map
*m2
;
5252 if (i
+ 1 < m
->count
)
5254 amt
= sizeof (struct elf_segment_map
);
5255 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5256 m2
= bfd_zalloc (abfd
, amt
);
5259 m2
->count
= m
->count
- (i
+ 1);
5260 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5261 m2
->count
* sizeof (m
->sections
[0]));
5262 m2
->p_type
= PT_LOAD
;
5270 amt
= sizeof (struct elf_segment_map
);
5271 m2
= bfd_zalloc (abfd
, amt
);
5274 m2
->p_type
= PT_LOAD
;
5276 m2
->sections
[0] = s
;
5284 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5285 PT_LOAD segments. This can cause the .ovl.init section to be
5286 overwritten with the contents of some overlay segment. To work
5287 around this issue, we ensure that all PF_OVERLAY segments are
5288 sorted first amongst the program headers; this ensures that even
5289 with a broken loader, the .ovl.init section (which is not marked
5290 as PF_OVERLAY) will be placed into SPU local store on startup. */
5292 /* Move all overlay segments onto a separate list. */
5293 p
= &elf_seg_map (abfd
);
5294 p_overlay
= &m_overlay
;
5299 if ((*p
)->p_type
== PT_LOAD
)
5303 if ((*p
)->count
== 1
5304 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5310 p_overlay
= &m
->next
;
5317 /* Re-insert overlay segments at the head of the segment map. */
5318 if (m_overlay
!= NULL
)
5321 if (*p
!= NULL
&& (*p
)->p_type
== PT_LOAD
&& (*p
)->includes_filehdr
)
5322 /* It doesn't really make sense for someone to include the ELF
5323 file header into an spu image, but if they do the code that
5324 assigns p_offset needs to see the segment containing the
5334 /* Tweak the section type of .note.spu_name. */
5337 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5338 Elf_Internal_Shdr
*hdr
,
5341 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5342 hdr
->sh_type
= SHT_NOTE
;
5346 /* Tweak phdrs before writing them out. */
5349 spu_elf_modify_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5353 const struct elf_backend_data
*bed
;
5354 struct elf_obj_tdata
*tdata
;
5355 Elf_Internal_Phdr
*phdr
, *last
;
5356 struct spu_link_hash_table
*htab
;
5360 bed
= get_elf_backend_data (abfd
);
5361 tdata
= elf_tdata (abfd
);
5363 count
= elf_program_header_size (abfd
) / bed
->s
->sizeof_phdr
;
5364 htab
= spu_hash_table (info
);
5365 if (htab
->num_overlays
!= 0)
5367 struct elf_segment_map
*m
;
5370 for (i
= 0, m
= elf_seg_map (abfd
); m
; ++i
, m
= m
->next
)
5372 && ((o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
)
5375 /* Mark this as an overlay header. */
5376 phdr
[i
].p_flags
|= PF_OVERLAY
;
5378 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5379 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5381 bfd_byte
*p
= htab
->ovtab
->contents
;
5382 unsigned int off
= o
* 16 + 8;
5384 /* Write file_off into _ovly_table. */
5385 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5388 /* Soft-icache has its file offset put in .ovl.init. */
5389 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5392 = elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5394 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5398 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5399 of 16. This should always be possible when using the standard
5400 linker scripts, but don't create overlapping segments if
5401 someone is playing games with linker scripts. */
5403 for (i
= count
; i
-- != 0; )
5404 if (phdr
[i
].p_type
== PT_LOAD
)
5408 adjust
= -phdr
[i
].p_filesz
& 15;
5411 && (phdr
[i
].p_offset
+ phdr
[i
].p_filesz
5412 > last
->p_offset
- adjust
))
5415 adjust
= -phdr
[i
].p_memsz
& 15;
5418 && phdr
[i
].p_filesz
!= 0
5419 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5420 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5423 if (phdr
[i
].p_filesz
!= 0)
5427 if (i
== (unsigned int) -1)
5428 for (i
= count
; i
-- != 0; )
5429 if (phdr
[i
].p_type
== PT_LOAD
)
5433 adjust
= -phdr
[i
].p_filesz
& 15;
5434 phdr
[i
].p_filesz
+= adjust
;
5436 adjust
= -phdr
[i
].p_memsz
& 15;
5437 phdr
[i
].p_memsz
+= adjust
;
5441 return _bfd_elf_modify_headers (abfd
, info
);
5445 spu_elf_size_sections (bfd
*obfd ATTRIBUTE_UNUSED
, struct bfd_link_info
*info
)
5447 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5448 if (htab
->params
->emit_fixups
)
5450 asection
*sfixup
= htab
->sfixup
;
5451 int fixup_count
= 0;
5455 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
5459 if (bfd_get_flavour (ibfd
) != bfd_target_elf_flavour
)
5462 /* Walk over each section attached to the input bfd. */
5463 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
5465 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5468 /* If there aren't any relocs, then there's nothing more
5470 if ((isec
->flags
& SEC_ALLOC
) == 0
5471 || (isec
->flags
& SEC_RELOC
) == 0
5472 || isec
->reloc_count
== 0)
5475 /* Get the relocs. */
5477 _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
5479 if (internal_relocs
== NULL
)
5482 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5483 relocations. They are stored in a single word by
5484 saving the upper 28 bits of the address and setting the
5485 lower 4 bits to a bit mask of the words that have the
5486 relocation. BASE_END keeps track of the next quadword. */
5487 irela
= internal_relocs
;
5488 irelaend
= irela
+ isec
->reloc_count
;
5490 for (; irela
< irelaend
; irela
++)
5491 if (ELF32_R_TYPE (irela
->r_info
) == R_SPU_ADDR32
5492 && irela
->r_offset
>= base_end
)
5494 base_end
= (irela
->r_offset
& ~(bfd_vma
) 15) + 16;
5500 /* We always have a NULL fixup as a sentinel */
5501 size
= (fixup_count
+ 1) * FIXUP_RECORD_SIZE
;
5502 if (!bfd_set_section_size (sfixup
, size
))
5504 sfixup
->contents
= (bfd_byte
*) bfd_zalloc (info
->input_bfds
, size
);
5505 if (sfixup
->contents
== NULL
)
5511 #define TARGET_BIG_SYM spu_elf32_vec
5512 #define TARGET_BIG_NAME "elf32-spu"
5513 #define ELF_ARCH bfd_arch_spu
5514 #define ELF_TARGET_ID SPU_ELF_DATA
5515 #define ELF_MACHINE_CODE EM_SPU
5516 /* This matches the alignment need for DMA. */
5517 #define ELF_MAXPAGESIZE 0x80
5518 #define elf_backend_rela_normal 1
5519 #define elf_backend_can_gc_sections 1
5521 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5522 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5523 #define elf_info_to_howto spu_elf_info_to_howto
5524 #define elf_backend_count_relocs spu_elf_count_relocs
5525 #define elf_backend_relocate_section spu_elf_relocate_section
5526 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5527 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5528 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5529 #define elf_backend_object_p spu_elf_object_p
5530 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5531 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5533 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5534 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5535 #define elf_backend_modify_headers spu_elf_modify_headers
5536 #define elf_backend_init_file_header spu_elf_init_file_header
5537 #define elf_backend_fake_sections spu_elf_fake_sections
5538 #define elf_backend_special_sections spu_elf_special_sections
5539 #define bfd_elf32_bfd_final_link spu_elf_final_link
5541 #include "elf32-target.h"