1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 struct spu_elf_params
*params
;
302 /* Shortcuts to overlay sections. */
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count
;
311 /* The stub section for each overlay section. */
314 struct elf_link_hash_entry
*ovly_load
;
315 struct elf_link_hash_entry
*ovly_return
;
316 unsigned long ovly_load_r_symndx
;
318 /* Number of overlay buffers. */
319 unsigned int num_buf
;
321 /* Total number of overlays. */
322 unsigned int num_overlays
;
324 /* For soft icache. */
325 unsigned int line_size_log2
;
326 unsigned int num_lines_log2
;
328 /* How much memory we have. */
329 unsigned int local_store
;
330 /* Local store --auto-overlay should reserve for non-overlay
331 functions and data. */
332 unsigned int overlay_fixed
;
333 /* Local store --auto-overlay should reserve for stack and heap. */
334 unsigned int reserved
;
335 /* If reserved is not specified, stack analysis will calculate a value
336 for the stack. This parameter adjusts that value to allow for
337 negative sp access (the ABI says 2000 bytes below sp are valid,
338 and the overlay manager uses some of this area). */
339 int extra_stack_space
;
340 /* Count of overlay stubs needed in non-overlay area. */
341 unsigned int non_ovly_stub
;
344 unsigned int stub_err
: 1;
347 /* Hijack the generic got fields for overlay stub accounting. */
351 struct got_entry
*next
;
360 #define spu_hash_table(p) \
361 ((struct spu_link_hash_table *) ((p)->hash))
365 struct function_info
*fun
;
366 struct call_info
*next
;
368 unsigned int max_depth
;
369 unsigned int is_tail
: 1;
370 unsigned int is_pasted
: 1;
371 unsigned int priority
: 13;
376 /* List of functions called. Also branches to hot/cold part of
378 struct call_info
*call_list
;
379 /* For hot/cold part of function, point to owner. */
380 struct function_info
*start
;
381 /* Symbol at start of function. */
383 Elf_Internal_Sym
*sym
;
384 struct elf_link_hash_entry
*h
;
386 /* Function section. */
389 /* Where last called from, and number of sections called from. */
390 asection
*last_caller
;
391 unsigned int call_count
;
392 /* Address range of (this part of) function. */
394 /* Offset where we found a store of lr, or -1 if none found. */
396 /* Offset where we found the stack adjustment insn. */
400 /* Distance from root of call tree. Tail and hot/cold branches
401 count as one deeper. We aren't counting stack frames here. */
403 /* Set if global symbol. */
404 unsigned int global
: 1;
405 /* Set if known to be start of function (as distinct from a hunk
406 in hot/cold section. */
407 unsigned int is_func
: 1;
408 /* Set if not a root node. */
409 unsigned int non_root
: 1;
410 /* Flags used during call tree traversal. It's cheaper to replicate
411 the visit flags than have one which needs clearing after a traversal. */
412 unsigned int visit1
: 1;
413 unsigned int visit2
: 1;
414 unsigned int marking
: 1;
415 unsigned int visit3
: 1;
416 unsigned int visit4
: 1;
417 unsigned int visit5
: 1;
418 unsigned int visit6
: 1;
419 unsigned int visit7
: 1;
422 struct spu_elf_stack_info
426 /* Variable size array describing functions, one per contiguous
427 address range belonging to a function. */
428 struct function_info fun
[1];
431 static struct function_info
*find_function (asection
*, bfd_vma
,
432 struct bfd_link_info
*);
434 /* Create a spu ELF linker hash table. */
436 static struct bfd_link_hash_table
*
437 spu_elf_link_hash_table_create (bfd
*abfd
)
439 struct spu_link_hash_table
*htab
;
441 htab
= bfd_malloc (sizeof (*htab
));
445 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
446 _bfd_elf_link_hash_newfunc
,
447 sizeof (struct elf_link_hash_entry
)))
453 memset (&htab
->ovtab
, 0,
454 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
456 htab
->elf
.init_got_refcount
.refcount
= 0;
457 htab
->elf
.init_got_refcount
.glist
= NULL
;
458 htab
->elf
.init_got_offset
.offset
= 0;
459 htab
->elf
.init_got_offset
.glist
= NULL
;
460 return &htab
->elf
.root
;
464 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
466 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
467 htab
->params
= params
;
468 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
469 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
472 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
473 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
474 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
477 get_sym_h (struct elf_link_hash_entry
**hp
,
478 Elf_Internal_Sym
**symp
,
480 Elf_Internal_Sym
**locsymsp
,
481 unsigned long r_symndx
,
484 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
486 if (r_symndx
>= symtab_hdr
->sh_info
)
488 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
489 struct elf_link_hash_entry
*h
;
491 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
492 while (h
->root
.type
== bfd_link_hash_indirect
493 || h
->root
.type
== bfd_link_hash_warning
)
494 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
504 asection
*symsec
= NULL
;
505 if (h
->root
.type
== bfd_link_hash_defined
506 || h
->root
.type
== bfd_link_hash_defweak
)
507 symsec
= h
->root
.u
.def
.section
;
513 Elf_Internal_Sym
*sym
;
514 Elf_Internal_Sym
*locsyms
= *locsymsp
;
518 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
520 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
522 0, NULL
, NULL
, NULL
);
527 sym
= locsyms
+ r_symndx
;
536 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
542 /* Create the note section if not already present. This is done early so
543 that the linker maps the sections to the right place in the output. */
546 spu_elf_create_sections (struct bfd_link_info
*info
)
550 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
551 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
556 /* Make SPU_PTNOTE_SPUNAME section. */
563 ibfd
= info
->input_bfds
;
564 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
565 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
567 || !bfd_set_section_alignment (ibfd
, s
, 4))
570 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
571 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
572 size
+= (name_len
+ 3) & -4;
574 if (!bfd_set_section_size (ibfd
, s
, size
))
577 data
= bfd_zalloc (ibfd
, size
);
581 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
582 bfd_put_32 (ibfd
, name_len
, data
+ 4);
583 bfd_put_32 (ibfd
, 1, data
+ 8);
584 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
585 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
586 bfd_get_filename (info
->output_bfd
), name_len
);
593 /* qsort predicate to sort sections by vma. */
596 sort_sections (const void *a
, const void *b
)
598 const asection
*const *s1
= a
;
599 const asection
*const *s2
= b
;
600 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
603 return delta
< 0 ? -1 : 1;
605 return (*s1
)->index
- (*s2
)->index
;
608 /* Identify overlays in the output bfd, and number them. */
611 spu_elf_find_overlays (struct bfd_link_info
*info
)
613 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
614 asection
**alloc_sec
;
615 unsigned int i
, n
, ovl_index
, num_buf
;
618 const char *ovly_mgr_entry
;
620 if (info
->output_bfd
->section_count
< 2)
624 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
625 if (alloc_sec
== NULL
)
628 /* Pick out all the alloced sections. */
629 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
630 if ((s
->flags
& SEC_ALLOC
) != 0
631 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
641 /* Sort them by vma. */
642 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
644 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
645 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
647 /* Look for an overlapping vma to find the first overlay section. */
648 bfd_vma vma_start
= 0;
649 bfd_vma lma_start
= 0;
651 for (i
= 1; i
< n
; i
++)
654 if (s
->vma
< ovl_end
)
656 asection
*s0
= alloc_sec
[i
- 1];
661 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
666 ovl_end
= s
->vma
+ s
->size
;
669 /* Now find any sections within the cache area. */
670 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
673 if (s
->vma
>= ovl_end
)
676 /* A section in an overlay area called .ovl.init is not
677 an overlay, in the sense that it might be loaded in
678 by the overlay manager, but rather the initial
679 section contents for the overlay buffer. */
680 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
682 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
683 if (((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
684 || ((s
->lma
- lma_start
) & (htab
->params
->line_size
- 1)))
686 info
->callbacks
->einfo (_("%X%P: overlay section %A "
687 "does not start on a cache line.\n"),
689 bfd_set_error (bfd_error_bad_value
);
692 else if (s
->size
> htab
->params
->line_size
)
694 info
->callbacks
->einfo (_("%X%P: overlay section %A "
695 "is larger than a cache line.\n"),
697 bfd_set_error (bfd_error_bad_value
);
701 alloc_sec
[ovl_index
++] = s
;
702 spu_elf_section_data (s
)->u
.o
.ovl_index
703 = ((s
->lma
- lma_start
) >> htab
->line_size_log2
) + 1;
704 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
708 /* Ensure there are no more overlay sections. */
712 if (s
->vma
< ovl_end
)
714 info
->callbacks
->einfo (_("%X%P: overlay section %A "
715 "is not in cache area.\n"),
717 bfd_set_error (bfd_error_bad_value
);
721 ovl_end
= s
->vma
+ s
->size
;
726 /* Look for overlapping vmas. Any with overlap must be overlays.
727 Count them. Also count the number of overlay regions. */
728 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
731 if (s
->vma
< ovl_end
)
733 asection
*s0
= alloc_sec
[i
- 1];
735 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
738 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
740 alloc_sec
[ovl_index
] = s0
;
741 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
742 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
745 ovl_end
= s
->vma
+ s
->size
;
747 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
749 alloc_sec
[ovl_index
] = s
;
750 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
751 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
752 if (s0
->vma
!= s
->vma
)
754 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
755 "and %A do not start at the "
758 bfd_set_error (bfd_error_bad_value
);
761 if (ovl_end
< s
->vma
+ s
->size
)
762 ovl_end
= s
->vma
+ s
->size
;
766 ovl_end
= s
->vma
+ s
->size
;
770 htab
->num_overlays
= ovl_index
;
771 htab
->num_buf
= num_buf
;
772 htab
->ovl_sec
= alloc_sec
;
773 ovly_mgr_entry
= "__ovly_load";
774 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
775 ovly_mgr_entry
= "__icache_br_handler";
776 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
777 FALSE
, FALSE
, FALSE
);
778 if (htab
->params
->ovly_flavour
!= ovly_soft_icache
)
779 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
780 FALSE
, FALSE
, FALSE
);
781 return ovl_index
!= 0;
784 /* Non-zero to use bra in overlay stubs rather than br. */
787 #define BRA 0x30000000
788 #define BRASL 0x31000000
789 #define BR 0x32000000
790 #define BRSL 0x33000000
791 #define NOP 0x40200000
792 #define LNOP 0x00200000
793 #define ILA 0x42000000
795 /* Return true for all relative and absolute branch instructions.
803 brhnz 00100011 0.. */
806 is_branch (const unsigned char *insn
)
808 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
811 /* Return true for all indirect branch instructions.
819 bihnz 00100101 011 */
822 is_indirect_branch (const unsigned char *insn
)
824 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
827 /* Return true for branch hint instructions.
832 is_hint (const unsigned char *insn
)
834 return (insn
[0] & 0xfc) == 0x10;
837 /* True if INPUT_SECTION might need overlay stubs. */
840 maybe_needs_stubs (asection
*input_section
)
842 /* No stubs for debug sections and suchlike. */
843 if ((input_section
->flags
& SEC_ALLOC
) == 0)
846 /* No stubs for link-once sections that will be discarded. */
847 if (input_section
->output_section
== bfd_abs_section_ptr
)
850 /* Don't create stubs for .eh_frame references. */
851 if (strcmp (input_section
->name
, ".eh_frame") == 0)
873 /* Return non-zero if this reloc symbol should go via an overlay stub.
874 Return 2 if the stub must be in non-overlay area. */
876 static enum _stub_type
877 needs_ovl_stub (struct elf_link_hash_entry
*h
,
878 Elf_Internal_Sym
*sym
,
880 asection
*input_section
,
881 Elf_Internal_Rela
*irela
,
883 struct bfd_link_info
*info
)
885 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
886 enum elf_spu_reloc_type r_type
;
887 unsigned int sym_type
;
888 bfd_boolean branch
, hint
, call
;
889 enum _stub_type ret
= no_stub
;
893 || sym_sec
->output_section
== bfd_abs_section_ptr
894 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
899 /* Ensure no stubs for user supplied overlay manager syms. */
900 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
903 /* setjmp always goes via an overlay stub, because then the return
904 and hence the longjmp goes via __ovly_return. That magically
905 makes setjmp/longjmp between overlays work. */
906 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
907 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
914 sym_type
= ELF_ST_TYPE (sym
->st_info
);
916 r_type
= ELF32_R_TYPE (irela
->r_info
);
920 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
922 if (contents
== NULL
)
925 if (!bfd_get_section_contents (input_section
->owner
,
932 contents
+= irela
->r_offset
;
934 branch
= is_branch (contents
);
935 hint
= is_hint (contents
);
938 call
= (contents
[0] & 0xfd) == 0x31;
940 && sym_type
!= STT_FUNC
943 /* It's common for people to write assembly and forget
944 to give function symbols the right type. Handle
945 calls to such symbols, but warn so that (hopefully)
946 people will fix their code. We need the symbol
947 type to be correct to distinguish function pointer
948 initialisation from other pointer initialisations. */
949 const char *sym_name
;
952 sym_name
= h
->root
.root
.string
;
955 Elf_Internal_Shdr
*symtab_hdr
;
956 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
957 sym_name
= bfd_elf_sym_name (input_section
->owner
,
962 (*_bfd_error_handler
) (_("warning: call to non-function"
963 " symbol %s defined in %B"),
964 sym_sec
->owner
, sym_name
);
970 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
971 || (sym_type
!= STT_FUNC
973 && (sym_sec
->flags
& SEC_CODE
) == 0))
976 /* Usually, symbols in non-overlay sections don't need stubs. */
977 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
978 && !htab
->params
->non_overlay_stubs
)
981 /* A reference from some other section to a symbol in an overlay
982 section needs a stub. */
983 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
984 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
986 if (call
|| sym_type
== STT_FUNC
)
990 ret
= br000_ovl_stub
;
994 unsigned int lrlive
= (contents
[1] & 0x70) >> 4;
1000 /* If this insn isn't a branch then we are possibly taking the
1001 address of a function and passing it out somehow. Soft-icache code
1002 always generates inline code to do indirect branches. */
1003 if (!(branch
|| hint
)
1004 && sym_type
== STT_FUNC
1005 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1012 count_stub (struct spu_link_hash_table
*htab
,
1015 enum _stub_type stub_type
,
1016 struct elf_link_hash_entry
*h
,
1017 const Elf_Internal_Rela
*irela
)
1019 unsigned int ovl
= 0;
1020 struct got_entry
*g
, **head
;
1023 /* If this instruction is a branch or call, we need a stub
1024 for it. One stub per function per overlay.
1025 If it isn't a branch, then we are taking the address of
1026 this function so need a stub in the non-overlay area
1027 for it. One stub per function. */
1028 if (stub_type
!= nonovl_stub
)
1029 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1032 head
= &h
->got
.glist
;
1035 if (elf_local_got_ents (ibfd
) == NULL
)
1037 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1038 * sizeof (*elf_local_got_ents (ibfd
)));
1039 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1040 if (elf_local_got_ents (ibfd
) == NULL
)
1043 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1046 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1048 htab
->stub_count
[ovl
] += 1;
1054 addend
= irela
->r_addend
;
1058 struct got_entry
*gnext
;
1060 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1061 if (g
->addend
== addend
&& g
->ovl
== 0)
1066 /* Need a new non-overlay area stub. Zap other stubs. */
1067 for (g
= *head
; g
!= NULL
; g
= gnext
)
1070 if (g
->addend
== addend
)
1072 htab
->stub_count
[g
->ovl
] -= 1;
1080 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1081 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1087 g
= bfd_malloc (sizeof *g
);
1092 g
->stub_addr
= (bfd_vma
) -1;
1096 htab
->stub_count
[ovl
] += 1;
1102 /* Support two sizes of overlay stubs, a slower more compact stub of two
1103 intructions, and a faster stub of four instructions. */
1106 ovl_stub_size (enum _ovly_flavour ovly_flavour
)
1108 return 8 << ovly_flavour
;
1111 /* Two instruction overlay stubs look like:
1113 brsl $75,__ovly_load
1114 .word target_ovl_and_address
1116 ovl_and_address is a word with the overlay number in the top 14 bits
1117 and local store address in the bottom 18 bits.
1119 Four instruction overlay stubs look like:
1123 ila $79,target_address
1126 Software icache stubs are:
1130 .word lrlive_branchlocalstoreaddr;
1131 brasl $75,__icache_br_handler
1136 build_stub (struct bfd_link_info
*info
,
1139 enum _stub_type stub_type
,
1140 struct elf_link_hash_entry
*h
,
1141 const Elf_Internal_Rela
*irela
,
1145 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1146 unsigned int ovl
, dest_ovl
, set_id
;
1147 struct got_entry
*g
, **head
;
1149 bfd_vma addend
, from
, to
, br_dest
, patt
;
1150 unsigned int lrlive
;
1153 if (stub_type
!= nonovl_stub
)
1154 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1157 head
= &h
->got
.glist
;
1159 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1163 addend
= irela
->r_addend
;
1165 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1167 g
= bfd_malloc (sizeof *g
);
1173 g
->br_addr
= (irela
->r_offset
1174 + isec
->output_offset
1175 + isec
->output_section
->vma
);
1181 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1182 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1187 if (g
->ovl
== 0 && ovl
!= 0)
1190 if (g
->stub_addr
!= (bfd_vma
) -1)
1194 sec
= htab
->stub_sec
[ovl
];
1195 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1196 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1197 g
->stub_addr
= from
;
1198 to
= (htab
->ovly_load
->root
.u
.def
.value
1199 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1200 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
1202 if (((dest
| to
| from
) & 3) != 0)
1207 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1209 switch (htab
->params
->ovly_flavour
)
1212 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1213 sec
->contents
+ sec
->size
);
1214 bfd_put_32 (sec
->owner
, LNOP
,
1215 sec
->contents
+ sec
->size
+ 4);
1216 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1217 sec
->contents
+ sec
->size
+ 8);
1219 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1220 sec
->contents
+ sec
->size
+ 12);
1222 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1223 sec
->contents
+ sec
->size
+ 12);
1228 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1229 sec
->contents
+ sec
->size
);
1231 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1232 sec
->contents
+ sec
->size
);
1233 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1234 sec
->contents
+ sec
->size
+ 4);
1237 case ovly_soft_icache
:
1239 if (stub_type
== nonovl_stub
)
1241 else if (stub_type
== call_ovl_stub
)
1242 /* A brsl makes lr live and *(*sp+16) is live.
1243 Tail calls have the same liveness. */
1245 else if (!htab
->params
->lrlive_analysis
)
1246 /* Assume stack frame and lr save. */
1248 else if (irela
!= NULL
)
1250 /* Analyse branch instructions. */
1251 struct function_info
*caller
;
1254 caller
= find_function (isec
, irela
->r_offset
, info
);
1255 if (caller
->start
== NULL
)
1256 off
= irela
->r_offset
;
1259 struct function_info
*found
= NULL
;
1261 /* Find the earliest piece of this function that
1262 has frame adjusting instructions. We might
1263 see dynamic frame adjustment (eg. for alloca)
1264 in some later piece, but functions using
1265 alloca always set up a frame earlier. Frame
1266 setup instructions are always in one piece. */
1267 if (caller
->lr_store
!= (bfd_vma
) -1
1268 || caller
->sp_adjust
!= (bfd_vma
) -1)
1270 while (caller
->start
!= NULL
)
1272 caller
= caller
->start
;
1273 if (caller
->lr_store
!= (bfd_vma
) -1
1274 || caller
->sp_adjust
!= (bfd_vma
) -1)
1282 if (off
> caller
->sp_adjust
)
1284 if (off
> caller
->lr_store
)
1285 /* Only *(*sp+16) is live. */
1288 /* If no lr save, then we must be in a
1289 leaf function with a frame.
1290 lr is still live. */
1293 else if (off
> caller
->lr_store
)
1295 /* Between lr save and stack adjust. */
1297 /* This should never happen since prologues won't
1302 /* On entry to function. */
1305 if (stub_type
!= br000_ovl_stub
1306 && lrlive
!= stub_type
- br000_ovl_stub
)
1307 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1308 "from analysis (%u)\n"),
1309 isec
, irela
->r_offset
, lrlive
,
1310 stub_type
- br000_ovl_stub
);
1313 /* If given lrlive info via .brinfo, use it. */
1314 if (stub_type
> br000_ovl_stub
)
1315 lrlive
= stub_type
- br000_ovl_stub
;
1317 /* The branch that uses this stub goes to stub_addr + 12. We'll
1318 set up an xor pattern that can be used by the icache manager
1319 to modify this branch to go directly to its destination. */
1321 br_dest
= g
->stub_addr
;
1324 /* Except in the case of _SPUEAR_ stubs, the branch in
1325 question is the one in the stub itself. */
1326 BFD_ASSERT (stub_type
== nonovl_stub
);
1327 g
->br_addr
= g
->stub_addr
;
1331 bfd_put_32 (sec
->owner
, dest_ovl
- 1,
1332 sec
->contents
+ sec
->size
+ 0);
1333 set_id
= (dest_ovl
- 1) >> htab
->num_lines_log2
;
1334 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1335 sec
->contents
+ sec
->size
+ 4);
1336 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1337 sec
->contents
+ sec
->size
+ 8);
1338 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1339 sec
->contents
+ sec
->size
+ 12);
1340 patt
= dest
^ br_dest
;
1341 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1342 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1343 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1344 sec
->contents
+ sec
->size
+ 16 + (g
->br_addr
& 0xf));
1346 /* Extra space for linked list entries. */
1353 sec
->size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
1355 if (htab
->params
->emit_stub_syms
)
1361 len
= 8 + sizeof (".ovl_call.") - 1;
1363 len
+= strlen (h
->root
.root
.string
);
1368 add
= (int) irela
->r_addend
& 0xffffffff;
1371 name
= bfd_malloc (len
);
1375 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1377 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1379 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1380 dest_sec
->id
& 0xffffffff,
1381 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1383 sprintf (name
+ len
- 9, "+%x", add
);
1385 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1389 if (h
->root
.type
== bfd_link_hash_new
)
1391 h
->root
.type
= bfd_link_hash_defined
;
1392 h
->root
.u
.def
.section
= sec
;
1393 h
->size
= ovl_stub_size (htab
->params
->ovly_flavour
);
1394 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1398 h
->ref_regular_nonweak
= 1;
1399 h
->forced_local
= 1;
1407 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1411 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1413 /* Symbols starting with _SPUEAR_ need a stub because they may be
1414 invoked by the PPU. */
1415 struct bfd_link_info
*info
= inf
;
1416 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1419 if ((h
->root
.type
== bfd_link_hash_defined
1420 || h
->root
.type
== bfd_link_hash_defweak
)
1422 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1423 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1424 && sym_sec
->output_section
!= bfd_abs_section_ptr
1425 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1426 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1427 || htab
->params
->non_overlay_stubs
))
1429 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1436 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1438 /* Symbols starting with _SPUEAR_ need a stub because they may be
1439 invoked by the PPU. */
1440 struct bfd_link_info
*info
= inf
;
1441 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1444 if ((h
->root
.type
== bfd_link_hash_defined
1445 || h
->root
.type
== bfd_link_hash_defweak
)
1447 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1448 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1449 && sym_sec
->output_section
!= bfd_abs_section_ptr
1450 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1451 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1452 || htab
->params
->non_overlay_stubs
))
1454 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1455 h
->root
.u
.def
.value
, sym_sec
);
1461 /* Size or build stubs. */
1464 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1466 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1469 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1471 extern const bfd_target bfd_elf32_spu_vec
;
1472 Elf_Internal_Shdr
*symtab_hdr
;
1474 Elf_Internal_Sym
*local_syms
= NULL
;
1476 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1479 /* We'll need the symbol table in a second. */
1480 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1481 if (symtab_hdr
->sh_info
== 0)
1484 /* Walk over each section attached to the input bfd. */
1485 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1487 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1489 /* If there aren't any relocs, then there's nothing more to do. */
1490 if ((isec
->flags
& SEC_RELOC
) == 0
1491 || isec
->reloc_count
== 0)
1494 if (!maybe_needs_stubs (isec
))
1497 /* Get the relocs. */
1498 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1500 if (internal_relocs
== NULL
)
1501 goto error_ret_free_local
;
1503 /* Now examine each relocation. */
1504 irela
= internal_relocs
;
1505 irelaend
= irela
+ isec
->reloc_count
;
1506 for (; irela
< irelaend
; irela
++)
1508 enum elf_spu_reloc_type r_type
;
1509 unsigned int r_indx
;
1511 Elf_Internal_Sym
*sym
;
1512 struct elf_link_hash_entry
*h
;
1513 enum _stub_type stub_type
;
1515 r_type
= ELF32_R_TYPE (irela
->r_info
);
1516 r_indx
= ELF32_R_SYM (irela
->r_info
);
1518 if (r_type
>= R_SPU_max
)
1520 bfd_set_error (bfd_error_bad_value
);
1521 error_ret_free_internal
:
1522 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1523 free (internal_relocs
);
1524 error_ret_free_local
:
1525 if (local_syms
!= NULL
1526 && (symtab_hdr
->contents
1527 != (unsigned char *) local_syms
))
1532 /* Determine the reloc target section. */
1533 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1534 goto error_ret_free_internal
;
1536 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1538 if (stub_type
== no_stub
)
1540 else if (stub_type
== stub_error
)
1541 goto error_ret_free_internal
;
1543 if (htab
->stub_count
== NULL
)
1546 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1547 htab
->stub_count
= bfd_zmalloc (amt
);
1548 if (htab
->stub_count
== NULL
)
1549 goto error_ret_free_internal
;
1554 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1555 goto error_ret_free_internal
;
1562 dest
= h
->root
.u
.def
.value
;
1564 dest
= sym
->st_value
;
1565 dest
+= irela
->r_addend
;
1566 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1568 goto error_ret_free_internal
;
1572 /* We're done with the internal relocs, free them. */
1573 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1574 free (internal_relocs
);
1577 if (local_syms
!= NULL
1578 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1580 if (!info
->keep_memory
)
1583 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1590 /* Allocate space for overlay call and return stubs. */
1593 spu_elf_size_stubs (struct bfd_link_info
*info
)
1595 struct spu_link_hash_table
*htab
;
1603 if (!process_stubs (info
, FALSE
))
1606 htab
= spu_hash_table (info
);
1607 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1611 if (htab
->stub_count
== NULL
)
1614 ibfd
= info
->input_bfds
;
1615 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1616 htab
->stub_sec
= bfd_zmalloc (amt
);
1617 if (htab
->stub_sec
== NULL
)
1620 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1621 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1622 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1623 htab
->stub_sec
[0] = stub
;
1625 || !bfd_set_section_alignment (ibfd
, stub
,
1626 htab
->params
->ovly_flavour
+ 3))
1628 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
->ovly_flavour
);
1629 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1630 /* Extra space for linked list entries. */
1631 stub
->size
+= htab
->stub_count
[0] * 16;
1632 (*htab
->params
->place_spu_section
) (stub
, NULL
, ".text");
1634 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1636 asection
*osec
= htab
->ovl_sec
[i
];
1637 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1638 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1639 htab
->stub_sec
[ovl
] = stub
;
1641 || !bfd_set_section_alignment (ibfd
, stub
,
1642 htab
->params
->ovly_flavour
+ 3))
1644 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
->ovly_flavour
);
1645 (*htab
->params
->place_spu_section
) (stub
, osec
, NULL
);
1648 flags
= (SEC_ALLOC
| SEC_LOAD
1649 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1650 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1651 if (htab
->ovtab
== NULL
1652 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1655 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1657 /* Space for icache manager tables.
1658 a) Tag array, one quadword per cache line.
1659 b) Linked list elements, max_branch per line quadwords.
1660 c) Indirect branch descriptors, 8 quadwords. */
1661 htab
->ovtab
->size
= 16 * (((1 + htab
->params
->max_branch
)
1662 << htab
->num_lines_log2
)
1665 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1666 if (htab
->init
== NULL
1667 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1670 htab
->init
->size
= 16;
1671 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1675 /* htab->ovtab consists of two arrays.
1685 . } _ovly_buf_table[];
1688 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1691 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1692 ovout
= ".data.icache";
1693 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1695 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1696 if (htab
->toe
== NULL
1697 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1699 htab
->toe
->size
= htab
->params
->ovly_flavour
== ovly_soft_icache
? 256 : 16;
1700 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1705 /* Functions to handle embedded spu_ovl.o object. */
1708 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1714 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1720 struct _ovl_stream
*os
;
1724 os
= (struct _ovl_stream
*) stream
;
1725 max
= (const char *) os
->end
- (const char *) os
->start
;
1727 if ((ufile_ptr
) offset
>= max
)
1731 if (count
> max
- offset
)
1732 count
= max
- offset
;
1734 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1739 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1741 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1748 return *ovl_bfd
!= NULL
;
1752 overlay_index (asection
*sec
)
1755 || sec
->output_section
== bfd_abs_section_ptr
)
1757 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1760 /* Define an STT_OBJECT symbol. */
1762 static struct elf_link_hash_entry
*
1763 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1765 struct elf_link_hash_entry
*h
;
1767 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1771 if (h
->root
.type
!= bfd_link_hash_defined
1774 h
->root
.type
= bfd_link_hash_defined
;
1775 h
->root
.u
.def
.section
= htab
->ovtab
;
1776 h
->type
= STT_OBJECT
;
1779 h
->ref_regular_nonweak
= 1;
1782 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1784 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1785 h
->root
.u
.def
.section
->owner
,
1786 h
->root
.root
.string
);
1787 bfd_set_error (bfd_error_bad_value
);
1792 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1793 h
->root
.root
.string
);
1794 bfd_set_error (bfd_error_bad_value
);
1801 /* Fill in all stubs and the overlay tables. */
1804 spu_elf_build_stubs (struct bfd_link_info
*info
)
1806 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1807 struct elf_link_hash_entry
*h
;
1813 if (htab
->stub_count
== NULL
)
1816 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1817 if (htab
->stub_sec
[i
]->size
!= 0)
1819 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1820 htab
->stub_sec
[i
]->size
);
1821 if (htab
->stub_sec
[i
]->contents
== NULL
)
1823 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1824 htab
->stub_sec
[i
]->size
= 0;
1827 h
= htab
->ovly_load
;
1830 const char *ovly_mgr_entry
= "__ovly_load";
1832 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1833 ovly_mgr_entry
= "__icache_br_handler";
1834 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
1835 FALSE
, FALSE
, FALSE
);
1836 htab
->ovly_load
= h
;
1838 BFD_ASSERT (h
!= NULL
1839 && (h
->root
.type
== bfd_link_hash_defined
1840 || h
->root
.type
== bfd_link_hash_defweak
)
1843 s
= h
->root
.u
.def
.section
->output_section
;
1844 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1846 (*_bfd_error_handler
) (_("%s in overlay section"),
1847 h
->root
.root
.string
);
1848 bfd_set_error (bfd_error_bad_value
);
1852 h
= htab
->ovly_return
;
1853 if (h
== NULL
&& htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1855 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
1856 FALSE
, FALSE
, FALSE
);
1857 htab
->ovly_return
= h
;
1860 /* Fill in all the stubs. */
1861 process_stubs (info
, TRUE
);
1862 if (!htab
->stub_err
)
1863 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1867 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1868 bfd_set_error (bfd_error_bad_value
);
1872 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1874 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1876 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1877 bfd_set_error (bfd_error_bad_value
);
1880 htab
->stub_sec
[i
]->rawsize
= 0;
1883 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1886 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1887 if (htab
->ovtab
->contents
== NULL
)
1890 p
= htab
->ovtab
->contents
;
1891 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1893 #define BI_HANDLER "__icache_ptr_handler0"
1894 char name
[sizeof (BI_HANDLER
)];
1895 bfd_vma off
, icache_base
, linklist
, bihand
;
1897 h
= define_ovtab_symbol (htab
, "__icache_tagbase");
1900 h
->root
.u
.def
.value
= 0;
1901 h
->size
= 16 << htab
->num_lines_log2
;
1903 icache_base
= htab
->ovl_sec
[0]->vma
;
1904 linklist
= (htab
->ovtab
->output_section
->vma
1905 + htab
->ovtab
->output_offset
1907 for (i
= 0; i
< htab
->params
->num_lines
; i
++)
1909 bfd_vma line_end
= icache_base
+ ((i
+ 1) << htab
->line_size_log2
);
1910 bfd_vma stub_base
= line_end
- htab
->params
->max_branch
* 32;
1911 bfd_vma link_elem
= linklist
+ i
* htab
->params
->max_branch
* 16;
1912 bfd_vma locator
= link_elem
- stub_base
/ 2;
1914 bfd_put_32 (htab
->ovtab
->owner
, locator
, p
+ 4);
1915 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 8);
1916 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 10);
1917 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 12);
1918 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 14);
1922 h
= define_ovtab_symbol (htab
, "__icache_linked_list");
1925 h
->root
.u
.def
.value
= off
;
1926 h
->size
= htab
->params
->max_branch
<< (htab
->num_lines_log2
+ 4);
1930 h
= elf_link_hash_lookup (&htab
->elf
, "__icache_bi_handler",
1931 FALSE
, FALSE
, FALSE
);
1934 && (h
->root
.type
== bfd_link_hash_defined
1935 || h
->root
.type
== bfd_link_hash_defweak
)
1937 bihand
= (h
->root
.u
.def
.value
1938 + h
->root
.u
.def
.section
->output_offset
1939 + h
->root
.u
.def
.section
->output_section
->vma
);
1940 memcpy (name
, BI_HANDLER
, sizeof (BI_HANDLER
));
1941 for (i
= 0; i
< 8; i
++)
1943 name
[sizeof (BI_HANDLER
) - 2] = '0' + i
;
1944 h
= define_ovtab_symbol (htab
, name
);
1947 h
->root
.u
.def
.value
= off
;
1949 bfd_put_32 (htab
->ovtab
->owner
, bihand
, p
);
1950 bfd_put_32 (htab
->ovtab
->owner
, i
<< 28, p
+ 8);
1955 h
= define_ovtab_symbol (htab
, "__icache_base");
1958 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
1959 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1960 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
1962 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
1965 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
1966 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1968 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
1970 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
1972 if (htab
->init
->contents
== NULL
)
1975 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
1978 h
->root
.u
.def
.value
= 0;
1979 h
->root
.u
.def
.section
= htab
->init
;
1985 /* Write out _ovly_table. */
1986 /* set low bit of .size to mark non-overlay area as present. */
1988 obfd
= htab
->ovtab
->output_section
->owner
;
1989 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1991 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1995 unsigned long off
= ovl_index
* 16;
1996 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1998 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1999 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2001 /* file_off written later in spu_elf_modify_program_headers. */
2002 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2006 h
= define_ovtab_symbol (htab
, "_ovly_table");
2009 h
->root
.u
.def
.value
= 16;
2010 h
->size
= htab
->num_overlays
* 16;
2012 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2015 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2018 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2021 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2022 h
->size
= htab
->num_buf
* 4;
2024 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2027 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2031 h
= define_ovtab_symbol (htab
, "_EAR_");
2034 h
->root
.u
.def
.section
= htab
->toe
;
2035 h
->root
.u
.def
.value
= 0;
2036 h
->size
= htab
->params
->ovly_flavour
== ovly_soft_icache
? 16 * 16 : 16;
2041 /* Check that all loadable section VMAs lie in the range
2042 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2045 spu_elf_check_vma (struct bfd_link_info
*info
)
2047 struct elf_segment_map
*m
;
2049 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2050 bfd
*abfd
= info
->output_bfd
;
2051 bfd_vma hi
= htab
->params
->local_store_hi
;
2052 bfd_vma lo
= htab
->params
->local_store_lo
;
2054 htab
->local_store
= hi
+ 1 - lo
;
2056 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2057 if (m
->p_type
== PT_LOAD
)
2058 for (i
= 0; i
< m
->count
; i
++)
2059 if (m
->sections
[i
]->size
!= 0
2060 && (m
->sections
[i
]->vma
< lo
2061 || m
->sections
[i
]->vma
> hi
2062 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2063 return m
->sections
[i
];
2065 /* No need for overlays if it all fits. */
2066 if (htab
->params
->ovly_flavour
!= ovly_soft_icache
)
2067 htab
->params
->auto_overlay
= 0;
2071 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2072 Search for stack adjusting insns, and return the sp delta.
2073 If a store of lr is found save the instruction offset to *LR_STORE.
2074 If a stack adjusting instruction is found, save that offset to
2078 find_function_stack_adjust (asection
*sec
,
2085 memset (reg
, 0, sizeof (reg
));
2086 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2088 unsigned char buf
[4];
2092 /* Assume no relocs on stack adjusing insns. */
2093 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2097 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2099 if (buf
[0] == 0x24 /* stqd */)
2101 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2106 /* Partly decoded immediate field. */
2107 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2109 if (buf
[0] == 0x1c /* ai */)
2112 imm
= (imm
^ 0x200) - 0x200;
2113 reg
[rt
] = reg
[ra
] + imm
;
2115 if (rt
== 1 /* sp */)
2119 *sp_adjust
= offset
;
2123 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2125 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2127 reg
[rt
] = reg
[ra
] + reg
[rb
];
2132 *sp_adjust
= offset
;
2136 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2138 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2140 reg
[rt
] = reg
[rb
] - reg
[ra
];
2145 *sp_adjust
= offset
;
2149 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2151 if (buf
[0] >= 0x42 /* ila */)
2152 imm
|= (buf
[0] & 1) << 17;
2157 if (buf
[0] == 0x40 /* il */)
2159 if ((buf
[1] & 0x80) == 0)
2161 imm
= (imm
^ 0x8000) - 0x8000;
2163 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2169 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2171 reg
[rt
] |= imm
& 0xffff;
2174 else if (buf
[0] == 0x04 /* ori */)
2177 imm
= (imm
^ 0x200) - 0x200;
2178 reg
[rt
] = reg
[ra
] | imm
;
2181 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2183 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2184 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2185 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2186 | ((imm
& 0x1000) ? 0x000000ff : 0));
2189 else if (buf
[0] == 0x16 /* andbi */)
2195 reg
[rt
] = reg
[ra
] & imm
;
2198 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2200 /* Used in pic reg load. Say rt is trashed. Won't be used
2201 in stack adjust, but we need to continue past this branch. */
2205 else if (is_branch (buf
) || is_indirect_branch (buf
))
2206 /* If we hit a branch then we must be out of the prologue. */
2213 /* qsort predicate to sort symbols by section and value. */
2215 static Elf_Internal_Sym
*sort_syms_syms
;
2216 static asection
**sort_syms_psecs
;
2219 sort_syms (const void *a
, const void *b
)
2221 Elf_Internal_Sym
*const *s1
= a
;
2222 Elf_Internal_Sym
*const *s2
= b
;
2223 asection
*sec1
,*sec2
;
2224 bfd_signed_vma delta
;
2226 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2227 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2230 return sec1
->index
- sec2
->index
;
2232 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2234 return delta
< 0 ? -1 : 1;
2236 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2238 return delta
< 0 ? -1 : 1;
2240 return *s1
< *s2
? -1 : 1;
2243 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2244 entries for section SEC. */
2246 static struct spu_elf_stack_info
*
2247 alloc_stack_info (asection
*sec
, int max_fun
)
2249 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2252 amt
= sizeof (struct spu_elf_stack_info
);
2253 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2254 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2255 if (sec_data
->u
.i
.stack_info
!= NULL
)
2256 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2257 return sec_data
->u
.i
.stack_info
;
2260 /* Add a new struct function_info describing a (part of a) function
2261 starting at SYM_H. Keep the array sorted by address. */
2263 static struct function_info
*
2264 maybe_insert_function (asection
*sec
,
2267 bfd_boolean is_func
)
2269 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2270 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2276 sinfo
= alloc_stack_info (sec
, 20);
2283 Elf_Internal_Sym
*sym
= sym_h
;
2284 off
= sym
->st_value
;
2285 size
= sym
->st_size
;
2289 struct elf_link_hash_entry
*h
= sym_h
;
2290 off
= h
->root
.u
.def
.value
;
2294 for (i
= sinfo
->num_fun
; --i
>= 0; )
2295 if (sinfo
->fun
[i
].lo
<= off
)
2300 /* Don't add another entry for an alias, but do update some
2302 if (sinfo
->fun
[i
].lo
== off
)
2304 /* Prefer globals over local syms. */
2305 if (global
&& !sinfo
->fun
[i
].global
)
2307 sinfo
->fun
[i
].global
= TRUE
;
2308 sinfo
->fun
[i
].u
.h
= sym_h
;
2311 sinfo
->fun
[i
].is_func
= TRUE
;
2312 return &sinfo
->fun
[i
];
2314 /* Ignore a zero-size symbol inside an existing function. */
2315 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2316 return &sinfo
->fun
[i
];
2319 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2321 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2322 bfd_size_type old
= amt
;
2324 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2325 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2326 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2327 sinfo
= bfd_realloc (sinfo
, amt
);
2330 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2331 sec_data
->u
.i
.stack_info
= sinfo
;
2334 if (++i
< sinfo
->num_fun
)
2335 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2336 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2337 sinfo
->fun
[i
].is_func
= is_func
;
2338 sinfo
->fun
[i
].global
= global
;
2339 sinfo
->fun
[i
].sec
= sec
;
2341 sinfo
->fun
[i
].u
.h
= sym_h
;
2343 sinfo
->fun
[i
].u
.sym
= sym_h
;
2344 sinfo
->fun
[i
].lo
= off
;
2345 sinfo
->fun
[i
].hi
= off
+ size
;
2346 sinfo
->fun
[i
].lr_store
= -1;
2347 sinfo
->fun
[i
].sp_adjust
= -1;
2348 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2349 &sinfo
->fun
[i
].lr_store
,
2350 &sinfo
->fun
[i
].sp_adjust
);
2351 sinfo
->num_fun
+= 1;
2352 return &sinfo
->fun
[i
];
2355 /* Return the name of FUN. */
2358 func_name (struct function_info
*fun
)
2362 Elf_Internal_Shdr
*symtab_hdr
;
2364 while (fun
->start
!= NULL
)
2368 return fun
->u
.h
->root
.root
.string
;
2371 if (fun
->u
.sym
->st_name
== 0)
2373 size_t len
= strlen (sec
->name
);
2374 char *name
= bfd_malloc (len
+ 10);
2377 sprintf (name
, "%s+%lx", sec
->name
,
2378 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2382 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2383 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2386 /* Read the instruction at OFF in SEC. Return true iff the instruction
2387 is a nop, lnop, or stop 0 (all zero insn). */
2390 is_nop (asection
*sec
, bfd_vma off
)
2392 unsigned char insn
[4];
2394 if (off
+ 4 > sec
->size
2395 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2397 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2399 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2404 /* Extend the range of FUN to cover nop padding up to LIMIT.
2405 Return TRUE iff some instruction other than a NOP was found. */
2408 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2410 bfd_vma off
= (fun
->hi
+ 3) & -4;
2412 while (off
< limit
&& is_nop (fun
->sec
, off
))
2423 /* Check and fix overlapping function ranges. Return TRUE iff there
2424 are gaps in the current info we have about functions in SEC. */
2427 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2429 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2430 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2432 bfd_boolean gaps
= FALSE
;
2437 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2438 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2440 /* Fix overlapping symbols. */
2441 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2442 const char *f2
= func_name (&sinfo
->fun
[i
]);
2444 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2445 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2447 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2450 if (sinfo
->num_fun
== 0)
2454 if (sinfo
->fun
[0].lo
!= 0)
2456 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2458 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2460 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2461 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2463 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2469 /* Search current function info for a function that contains address
2470 OFFSET in section SEC. */
2472 static struct function_info
*
2473 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2475 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2476 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2480 hi
= sinfo
->num_fun
;
2483 mid
= (lo
+ hi
) / 2;
2484 if (offset
< sinfo
->fun
[mid
].lo
)
2486 else if (offset
>= sinfo
->fun
[mid
].hi
)
2489 return &sinfo
->fun
[mid
];
2491 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2493 bfd_set_error (bfd_error_bad_value
);
2497 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2498 if CALLEE was new. If this function return FALSE, CALLEE should
2502 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2504 struct call_info
**pp
, *p
;
2506 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2507 if (p
->fun
== callee
->fun
)
2509 /* Tail calls use less stack than normal calls. Retain entry
2510 for normal call over one for tail call. */
2511 p
->is_tail
&= callee
->is_tail
;
2514 p
->fun
->start
= NULL
;
2515 p
->fun
->is_func
= TRUE
;
2518 /* Reorder list so most recent call is first. */
2520 p
->next
= caller
->call_list
;
2521 caller
->call_list
= p
;
2524 callee
->next
= caller
->call_list
;
2526 caller
->call_list
= callee
;
2530 /* Copy CALL and insert the copy into CALLER. */
2533 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2535 struct call_info
*callee
;
2536 callee
= bfd_malloc (sizeof (*callee
));
2540 if (!insert_callee (caller
, callee
))
2545 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2546 overlay stub sections. */
2549 interesting_section (asection
*s
)
2551 return (s
->output_section
!= bfd_abs_section_ptr
2552 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2553 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2557 /* Rummage through the relocs for SEC, looking for function calls.
2558 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2559 mark destination symbols on calls as being functions. Also
2560 look at branches, which may be tail calls or go to hot/cold
2561 section part of same function. */
2564 mark_functions_via_relocs (asection
*sec
,
2565 struct bfd_link_info
*info
,
2568 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2569 Elf_Internal_Shdr
*symtab_hdr
;
2571 unsigned int priority
= 0;
2572 static bfd_boolean warned
;
2574 if (!interesting_section (sec
)
2575 || sec
->reloc_count
== 0)
2578 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2580 if (internal_relocs
== NULL
)
2583 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2584 psyms
= &symtab_hdr
->contents
;
2585 irela
= internal_relocs
;
2586 irelaend
= irela
+ sec
->reloc_count
;
2587 for (; irela
< irelaend
; irela
++)
2589 enum elf_spu_reloc_type r_type
;
2590 unsigned int r_indx
;
2592 Elf_Internal_Sym
*sym
;
2593 struct elf_link_hash_entry
*h
;
2595 bfd_boolean reject
, is_call
;
2596 struct function_info
*caller
;
2597 struct call_info
*callee
;
2600 r_type
= ELF32_R_TYPE (irela
->r_info
);
2601 if (r_type
!= R_SPU_REL16
2602 && r_type
!= R_SPU_ADDR16
)
2605 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
))
2609 r_indx
= ELF32_R_SYM (irela
->r_info
);
2610 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2614 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2620 unsigned char insn
[4];
2622 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2623 irela
->r_offset
, 4))
2625 if (is_branch (insn
))
2627 is_call
= (insn
[0] & 0xfd) == 0x31;
2628 priority
= insn
[1] & 0x0f;
2630 priority
|= insn
[2];
2632 priority
|= insn
[3];
2634 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2635 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2638 info
->callbacks
->einfo
2639 (_("%B(%A+0x%v): call to non-code section"
2640 " %B(%A), analysis incomplete\n"),
2641 sec
->owner
, sec
, irela
->r_offset
,
2642 sym_sec
->owner
, sym_sec
);
2650 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2658 /* For --auto-overlay, count possible stubs we need for
2659 function pointer references. */
2660 unsigned int sym_type
;
2664 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2665 if (sym_type
== STT_FUNC
)
2666 spu_hash_table (info
)->non_ovly_stub
+= 1;
2671 val
= h
->root
.u
.def
.value
;
2673 val
= sym
->st_value
;
2674 val
+= irela
->r_addend
;
2678 struct function_info
*fun
;
2680 if (irela
->r_addend
!= 0)
2682 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2685 fake
->st_value
= val
;
2687 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2691 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2693 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2696 if (irela
->r_addend
!= 0
2697 && fun
->u
.sym
!= sym
)
2702 caller
= find_function (sec
, irela
->r_offset
, info
);
2705 callee
= bfd_malloc (sizeof *callee
);
2709 callee
->fun
= find_function (sym_sec
, val
, info
);
2710 if (callee
->fun
== NULL
)
2712 callee
->is_tail
= !is_call
;
2713 callee
->is_pasted
= FALSE
;
2714 callee
->priority
= priority
;
2716 if (callee
->fun
->last_caller
!= sec
)
2718 callee
->fun
->last_caller
= sec
;
2719 callee
->fun
->call_count
+= 1;
2721 if (!insert_callee (caller
, callee
))
2724 && !callee
->fun
->is_func
2725 && callee
->fun
->stack
== 0)
2727 /* This is either a tail call or a branch from one part of
2728 the function to another, ie. hot/cold section. If the
2729 destination has been called by some other function then
2730 it is a separate function. We also assume that functions
2731 are not split across input files. */
2732 if (sec
->owner
!= sym_sec
->owner
)
2734 callee
->fun
->start
= NULL
;
2735 callee
->fun
->is_func
= TRUE
;
2737 else if (callee
->fun
->start
== NULL
)
2738 callee
->fun
->start
= caller
;
2741 struct function_info
*callee_start
;
2742 struct function_info
*caller_start
;
2743 callee_start
= callee
->fun
;
2744 while (callee_start
->start
)
2745 callee_start
= callee_start
->start
;
2746 caller_start
= caller
;
2747 while (caller_start
->start
)
2748 caller_start
= caller_start
->start
;
2749 if (caller_start
!= callee_start
)
2751 callee
->fun
->start
= NULL
;
2752 callee
->fun
->is_func
= TRUE
;
2761 /* Handle something like .init or .fini, which has a piece of a function.
2762 These sections are pasted together to form a single function. */
2765 pasted_function (asection
*sec
)
2767 struct bfd_link_order
*l
;
2768 struct _spu_elf_section_data
*sec_data
;
2769 struct spu_elf_stack_info
*sinfo
;
2770 Elf_Internal_Sym
*fake
;
2771 struct function_info
*fun
, *fun_start
;
2773 fake
= bfd_zmalloc (sizeof (*fake
));
2777 fake
->st_size
= sec
->size
;
2779 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2780 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2784 /* Find a function immediately preceding this section. */
2786 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2788 if (l
->u
.indirect
.section
== sec
)
2790 if (fun_start
!= NULL
)
2792 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2796 fun
->start
= fun_start
;
2798 callee
->is_tail
= TRUE
;
2799 callee
->is_pasted
= TRUE
;
2801 if (!insert_callee (fun_start
, callee
))
2807 if (l
->type
== bfd_indirect_link_order
2808 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2809 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2810 && sinfo
->num_fun
!= 0)
2811 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2814 /* Don't return an error if we did not find a function preceding this
2815 section. The section may have incorrect flags. */
2819 /* Map address ranges in code sections to functions. */
2822 discover_functions (struct bfd_link_info
*info
)
2826 Elf_Internal_Sym
***psym_arr
;
2827 asection
***sec_arr
;
2828 bfd_boolean gaps
= FALSE
;
2831 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2834 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2835 if (psym_arr
== NULL
)
2837 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2838 if (sec_arr
== NULL
)
2841 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2843 ibfd
= ibfd
->link_next
, bfd_idx
++)
2845 extern const bfd_target bfd_elf32_spu_vec
;
2846 Elf_Internal_Shdr
*symtab_hdr
;
2849 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2850 asection
**psecs
, **p
;
2852 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2855 /* Read all the symbols. */
2856 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2857 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2861 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2862 if (interesting_section (sec
))
2870 if (symtab_hdr
->contents
!= NULL
)
2872 /* Don't use cached symbols since the generic ELF linker
2873 code only reads local symbols, and we need globals too. */
2874 free (symtab_hdr
->contents
);
2875 symtab_hdr
->contents
= NULL
;
2877 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2879 symtab_hdr
->contents
= (void *) syms
;
2883 /* Select defined function symbols that are going to be output. */
2884 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2887 psym_arr
[bfd_idx
] = psyms
;
2888 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2891 sec_arr
[bfd_idx
] = psecs
;
2892 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2893 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2894 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2898 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2899 if (s
!= NULL
&& interesting_section (s
))
2902 symcount
= psy
- psyms
;
2905 /* Sort them by section and offset within section. */
2906 sort_syms_syms
= syms
;
2907 sort_syms_psecs
= psecs
;
2908 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2910 /* Now inspect the function symbols. */
2911 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2913 asection
*s
= psecs
[*psy
- syms
];
2914 Elf_Internal_Sym
**psy2
;
2916 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2917 if (psecs
[*psy2
- syms
] != s
)
2920 if (!alloc_stack_info (s
, psy2
- psy
))
2925 /* First install info about properly typed and sized functions.
2926 In an ideal world this will cover all code sections, except
2927 when partitioning functions into hot and cold sections,
2928 and the horrible pasted together .init and .fini functions. */
2929 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2932 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2934 asection
*s
= psecs
[sy
- syms
];
2935 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2940 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2941 if (interesting_section (sec
))
2942 gaps
|= check_function_ranges (sec
, info
);
2947 /* See if we can discover more function symbols by looking at
2949 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2951 ibfd
= ibfd
->link_next
, bfd_idx
++)
2955 if (psym_arr
[bfd_idx
] == NULL
)
2958 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2959 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2963 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2965 ibfd
= ibfd
->link_next
, bfd_idx
++)
2967 Elf_Internal_Shdr
*symtab_hdr
;
2969 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2972 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2975 psecs
= sec_arr
[bfd_idx
];
2977 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2978 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2981 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2982 if (interesting_section (sec
))
2983 gaps
|= check_function_ranges (sec
, info
);
2987 /* Finally, install all globals. */
2988 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2992 s
= psecs
[sy
- syms
];
2994 /* Global syms might be improperly typed functions. */
2995 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2996 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2998 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3004 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3006 extern const bfd_target bfd_elf32_spu_vec
;
3009 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3012 /* Some of the symbols we've installed as marking the
3013 beginning of functions may have a size of zero. Extend
3014 the range of such functions to the beginning of the
3015 next symbol of interest. */
3016 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3017 if (interesting_section (sec
))
3019 struct _spu_elf_section_data
*sec_data
;
3020 struct spu_elf_stack_info
*sinfo
;
3022 sec_data
= spu_elf_section_data (sec
);
3023 sinfo
= sec_data
->u
.i
.stack_info
;
3024 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3027 bfd_vma hi
= sec
->size
;
3029 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3031 sinfo
->fun
[fun_idx
].hi
= hi
;
3032 hi
= sinfo
->fun
[fun_idx
].lo
;
3035 sinfo
->fun
[0].lo
= 0;
3037 /* No symbols in this section. Must be .init or .fini
3038 or something similar. */
3039 else if (!pasted_function (sec
))
3045 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3047 ibfd
= ibfd
->link_next
, bfd_idx
++)
3049 if (psym_arr
[bfd_idx
] == NULL
)
3052 free (psym_arr
[bfd_idx
]);
3053 free (sec_arr
[bfd_idx
]);
3062 /* Iterate over all function_info we have collected, calling DOIT on
3063 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3067 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3068 struct bfd_link_info
*,
3070 struct bfd_link_info
*info
,
3076 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3078 extern const bfd_target bfd_elf32_spu_vec
;
3081 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3084 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3086 struct _spu_elf_section_data
*sec_data
;
3087 struct spu_elf_stack_info
*sinfo
;
3089 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3090 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3093 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3094 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3095 if (!doit (&sinfo
->fun
[i
], info
, param
))
3103 /* Transfer call info attached to struct function_info entries for
3104 all of a given function's sections to the first entry. */
3107 transfer_calls (struct function_info
*fun
,
3108 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3109 void *param ATTRIBUTE_UNUSED
)
3111 struct function_info
*start
= fun
->start
;
3115 struct call_info
*call
, *call_next
;
3117 while (start
->start
!= NULL
)
3118 start
= start
->start
;
3119 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3121 call_next
= call
->next
;
3122 if (!insert_callee (start
, call
))
3125 fun
->call_list
= NULL
;
3130 /* Mark nodes in the call graph that are called by some other node. */
3133 mark_non_root (struct function_info
*fun
,
3134 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3135 void *param ATTRIBUTE_UNUSED
)
3137 struct call_info
*call
;
3142 for (call
= fun
->call_list
; call
; call
= call
->next
)
3144 call
->fun
->non_root
= TRUE
;
3145 mark_non_root (call
->fun
, 0, 0);
3150 /* Remove cycles from the call graph. Set depth of nodes. */
3153 remove_cycles (struct function_info
*fun
,
3154 struct bfd_link_info
*info
,
3157 struct call_info
**callp
, *call
;
3158 unsigned int depth
= *(unsigned int *) param
;
3159 unsigned int max_depth
= depth
;
3163 fun
->marking
= TRUE
;
3165 callp
= &fun
->call_list
;
3166 while ((call
= *callp
) != NULL
)
3168 call
->max_depth
= depth
+ !call
->is_pasted
;
3169 if (!call
->fun
->visit2
)
3171 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3173 if (max_depth
< call
->max_depth
)
3174 max_depth
= call
->max_depth
;
3176 else if (call
->fun
->marking
)
3178 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3180 if (!htab
->params
->auto_overlay
3181 && htab
->params
->stack_analysis
)
3183 const char *f1
= func_name (fun
);
3184 const char *f2
= func_name (call
->fun
);
3186 info
->callbacks
->info (_("Stack analysis will ignore the call "
3190 *callp
= call
->next
;
3194 callp
= &call
->next
;
3196 fun
->marking
= FALSE
;
3197 *(unsigned int *) param
= max_depth
;
3201 /* Check that we actually visited all nodes in remove_cycles. If we
3202 didn't, then there is some cycle in the call graph not attached to
3203 any root node. Arbitrarily choose a node in the cycle as a new
3204 root and break the cycle. */
3207 mark_detached_root (struct function_info
*fun
,
3208 struct bfd_link_info
*info
,
3213 fun
->non_root
= FALSE
;
3214 *(unsigned int *) param
= 0;
3215 return remove_cycles (fun
, info
, param
);
3218 /* Populate call_list for each function. */
3221 build_call_tree (struct bfd_link_info
*info
)
3226 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3228 extern const bfd_target bfd_elf32_spu_vec
;
3231 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3234 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3235 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3239 /* Transfer call info from hot/cold section part of function
3241 if (!spu_hash_table (info
)->params
->auto_overlay
3242 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3245 /* Find the call graph root(s). */
3246 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3249 /* Remove cycles from the call graph. We start from the root node(s)
3250 so that we break cycles in a reasonable place. */
3252 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3255 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3258 /* qsort predicate to sort calls by priority, max_depth then count. */
3261 sort_calls (const void *a
, const void *b
)
3263 struct call_info
*const *c1
= a
;
3264 struct call_info
*const *c2
= b
;
3267 delta
= (*c2
)->priority
- (*c1
)->priority
;
3271 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3275 delta
= (*c2
)->count
- (*c1
)->count
;
3279 return (char *) c1
- (char *) c2
;
3283 unsigned int max_overlay_size
;
3286 /* Set linker_mark and gc_mark on any sections that we will put in
3287 overlays. These flags are used by the generic ELF linker, but we
3288 won't be continuing on to bfd_elf_final_link so it is OK to use
3289 them. linker_mark is clear before we get here. Set segment_mark
3290 on sections that are part of a pasted function (excluding the last
3293 Set up function rodata section if --overlay-rodata. We don't
3294 currently include merged string constant rodata sections since
3296 Sort the call graph so that the deepest nodes will be visited
3300 mark_overlay_section (struct function_info
*fun
,
3301 struct bfd_link_info
*info
,
3304 struct call_info
*call
;
3306 struct _mos_param
*mos_param
= param
;
3307 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3313 if (!fun
->sec
->linker_mark
3314 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3315 || htab
->params
->non_ia_text
3316 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0))
3320 fun
->sec
->linker_mark
= 1;
3321 fun
->sec
->gc_mark
= 1;
3322 fun
->sec
->segment_mark
= 0;
3323 /* Ensure SEC_CODE is set on this text section (it ought to
3324 be!), and SEC_CODE is clear on rodata sections. We use
3325 this flag to differentiate the two overlay section types. */
3326 fun
->sec
->flags
|= SEC_CODE
;
3328 size
= fun
->sec
->size
;
3329 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3333 /* Find the rodata section corresponding to this function's
3335 if (strcmp (fun
->sec
->name
, ".text") == 0)
3337 name
= bfd_malloc (sizeof (".rodata"));
3340 memcpy (name
, ".rodata", sizeof (".rodata"));
3342 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3344 size_t len
= strlen (fun
->sec
->name
);
3345 name
= bfd_malloc (len
+ 3);
3348 memcpy (name
, ".rodata", sizeof (".rodata"));
3349 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3351 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3353 size_t len
= strlen (fun
->sec
->name
) + 1;
3354 name
= bfd_malloc (len
);
3357 memcpy (name
, fun
->sec
->name
, len
);
3363 asection
*rodata
= NULL
;
3364 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3365 if (group_sec
== NULL
)
3366 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3368 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3370 if (strcmp (group_sec
->name
, name
) == 0)
3375 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3377 fun
->rodata
= rodata
;
3380 size
+= fun
->rodata
->size
;
3381 if (htab
->params
->line_size
!= 0
3382 && size
> htab
->params
->line_size
)
3384 size
-= fun
->rodata
->size
;
3389 fun
->rodata
->linker_mark
= 1;
3390 fun
->rodata
->gc_mark
= 1;
3391 fun
->rodata
->flags
&= ~SEC_CODE
;
3397 if (mos_param
->max_overlay_size
< size
)
3398 mos_param
->max_overlay_size
= size
;
3401 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3406 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3410 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3411 calls
[count
++] = call
;
3413 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3415 fun
->call_list
= NULL
;
3419 calls
[count
]->next
= fun
->call_list
;
3420 fun
->call_list
= calls
[count
];
3425 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3427 if (call
->is_pasted
)
3429 /* There can only be one is_pasted call per function_info. */
3430 BFD_ASSERT (!fun
->sec
->segment_mark
);
3431 fun
->sec
->segment_mark
= 1;
3433 if (!mark_overlay_section (call
->fun
, info
, param
))
3437 /* Don't put entry code into an overlay. The overlay manager needs
3438 a stack! Also, don't mark .ovl.init as an overlay. */
3439 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3440 == info
->output_bfd
->start_address
3441 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3443 fun
->sec
->linker_mark
= 0;
3444 if (fun
->rodata
!= NULL
)
3445 fun
->rodata
->linker_mark
= 0;
3450 /* If non-zero then unmark functions called from those within sections
3451 that we need to unmark. Unfortunately this isn't reliable since the
3452 call graph cannot know the destination of function pointer calls. */
3453 #define RECURSE_UNMARK 0
3456 asection
*exclude_input_section
;
3457 asection
*exclude_output_section
;
3458 unsigned long clearing
;
3461 /* Undo some of mark_overlay_section's work. */
3464 unmark_overlay_section (struct function_info
*fun
,
3465 struct bfd_link_info
*info
,
3468 struct call_info
*call
;
3469 struct _uos_param
*uos_param
= param
;
3470 unsigned int excluded
= 0;
3478 if (fun
->sec
== uos_param
->exclude_input_section
3479 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3483 uos_param
->clearing
+= excluded
;
3485 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3487 fun
->sec
->linker_mark
= 0;
3489 fun
->rodata
->linker_mark
= 0;
3492 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3493 if (!unmark_overlay_section (call
->fun
, info
, param
))
3497 uos_param
->clearing
-= excluded
;
3502 unsigned int lib_size
;
3503 asection
**lib_sections
;
3506 /* Add sections we have marked as belonging to overlays to an array
3507 for consideration as non-overlay sections. The array consist of
3508 pairs of sections, (text,rodata), for functions in the call graph. */
3511 collect_lib_sections (struct function_info
*fun
,
3512 struct bfd_link_info
*info
,
3515 struct _cl_param
*lib_param
= param
;
3516 struct call_info
*call
;
3523 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3526 size
= fun
->sec
->size
;
3528 size
+= fun
->rodata
->size
;
3530 if (size
<= lib_param
->lib_size
)
3532 *lib_param
->lib_sections
++ = fun
->sec
;
3533 fun
->sec
->gc_mark
= 0;
3534 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3536 *lib_param
->lib_sections
++ = fun
->rodata
;
3537 fun
->rodata
->gc_mark
= 0;
3540 *lib_param
->lib_sections
++ = NULL
;
3543 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3544 collect_lib_sections (call
->fun
, info
, param
);
3549 /* qsort predicate to sort sections by call count. */
3552 sort_lib (const void *a
, const void *b
)
3554 asection
*const *s1
= a
;
3555 asection
*const *s2
= b
;
3556 struct _spu_elf_section_data
*sec_data
;
3557 struct spu_elf_stack_info
*sinfo
;
3561 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3562 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3565 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3566 delta
-= sinfo
->fun
[i
].call_count
;
3569 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3570 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3573 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3574 delta
+= sinfo
->fun
[i
].call_count
;
3583 /* Remove some sections from those marked to be in overlays. Choose
3584 those that are called from many places, likely library functions. */
3587 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3590 asection
**lib_sections
;
3591 unsigned int i
, lib_count
;
3592 struct _cl_param collect_lib_param
;
3593 struct function_info dummy_caller
;
3594 struct spu_link_hash_table
*htab
;
3596 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3598 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3600 extern const bfd_target bfd_elf32_spu_vec
;
3603 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3606 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3607 if (sec
->linker_mark
3608 && sec
->size
< lib_size
3609 && (sec
->flags
& SEC_CODE
) != 0)
3612 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3613 if (lib_sections
== NULL
)
3614 return (unsigned int) -1;
3615 collect_lib_param
.lib_size
= lib_size
;
3616 collect_lib_param
.lib_sections
= lib_sections
;
3617 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3619 return (unsigned int) -1;
3620 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3622 /* Sort sections so that those with the most calls are first. */
3624 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3626 htab
= spu_hash_table (info
);
3627 for (i
= 0; i
< lib_count
; i
++)
3629 unsigned int tmp
, stub_size
;
3631 struct _spu_elf_section_data
*sec_data
;
3632 struct spu_elf_stack_info
*sinfo
;
3634 sec
= lib_sections
[2 * i
];
3635 /* If this section is OK, its size must be less than lib_size. */
3637 /* If it has a rodata section, then add that too. */
3638 if (lib_sections
[2 * i
+ 1])
3639 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3640 /* Add any new overlay call stubs needed by the section. */
3643 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3644 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3647 struct call_info
*call
;
3649 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3650 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3651 if (call
->fun
->sec
->linker_mark
)
3653 struct call_info
*p
;
3654 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3655 if (p
->fun
== call
->fun
)
3658 stub_size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
3661 if (tmp
+ stub_size
< lib_size
)
3663 struct call_info
**pp
, *p
;
3665 /* This section fits. Mark it as non-overlay. */
3666 lib_sections
[2 * i
]->linker_mark
= 0;
3667 if (lib_sections
[2 * i
+ 1])
3668 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3669 lib_size
-= tmp
+ stub_size
;
3670 /* Call stubs to the section we just added are no longer
3672 pp
= &dummy_caller
.call_list
;
3673 while ((p
= *pp
) != NULL
)
3674 if (!p
->fun
->sec
->linker_mark
)
3676 lib_size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
3682 /* Add new call stubs to dummy_caller. */
3683 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3684 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3687 struct call_info
*call
;
3689 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3690 for (call
= sinfo
->fun
[k
].call_list
;
3693 if (call
->fun
->sec
->linker_mark
)
3695 struct call_info
*callee
;
3696 callee
= bfd_malloc (sizeof (*callee
));
3698 return (unsigned int) -1;
3700 if (!insert_callee (&dummy_caller
, callee
))
3706 while (dummy_caller
.call_list
!= NULL
)
3708 struct call_info
*call
= dummy_caller
.call_list
;
3709 dummy_caller
.call_list
= call
->next
;
3712 for (i
= 0; i
< 2 * lib_count
; i
++)
3713 if (lib_sections
[i
])
3714 lib_sections
[i
]->gc_mark
= 1;
3715 free (lib_sections
);
3719 /* Build an array of overlay sections. The deepest node's section is
3720 added first, then its parent node's section, then everything called
3721 from the parent section. The idea being to group sections to
3722 minimise calls between different overlays. */
3725 collect_overlays (struct function_info
*fun
,
3726 struct bfd_link_info
*info
,
3729 struct call_info
*call
;
3730 bfd_boolean added_fun
;
3731 asection
***ovly_sections
= param
;
3737 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3738 if (!call
->is_pasted
)
3740 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3746 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3748 fun
->sec
->gc_mark
= 0;
3749 *(*ovly_sections
)++ = fun
->sec
;
3750 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3752 fun
->rodata
->gc_mark
= 0;
3753 *(*ovly_sections
)++ = fun
->rodata
;
3756 *(*ovly_sections
)++ = NULL
;
3759 /* Pasted sections must stay with the first section. We don't
3760 put pasted sections in the array, just the first section.
3761 Mark subsequent sections as already considered. */
3762 if (fun
->sec
->segment_mark
)
3764 struct function_info
*call_fun
= fun
;
3767 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3768 if (call
->is_pasted
)
3770 call_fun
= call
->fun
;
3771 call_fun
->sec
->gc_mark
= 0;
3772 if (call_fun
->rodata
)
3773 call_fun
->rodata
->gc_mark
= 0;
3779 while (call_fun
->sec
->segment_mark
);
3783 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3784 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3789 struct _spu_elf_section_data
*sec_data
;
3790 struct spu_elf_stack_info
*sinfo
;
3792 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3793 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3796 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3797 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3805 struct _sum_stack_param
{
3807 size_t overall_stack
;
3808 bfd_boolean emit_stack_syms
;
3811 /* Descend the call graph for FUN, accumulating total stack required. */
3814 sum_stack (struct function_info
*fun
,
3815 struct bfd_link_info
*info
,
3818 struct call_info
*call
;
3819 struct function_info
*max
;
3820 size_t stack
, cum_stack
;
3822 bfd_boolean has_call
;
3823 struct _sum_stack_param
*sum_stack_param
= param
;
3824 struct spu_link_hash_table
*htab
;
3826 cum_stack
= fun
->stack
;
3827 sum_stack_param
->cum_stack
= cum_stack
;
3833 for (call
= fun
->call_list
; call
; call
= call
->next
)
3835 if (!call
->is_pasted
)
3837 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3839 stack
= sum_stack_param
->cum_stack
;
3840 /* Include caller stack for normal calls, don't do so for
3841 tail calls. fun->stack here is local stack usage for
3843 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3844 stack
+= fun
->stack
;
3845 if (cum_stack
< stack
)
3852 sum_stack_param
->cum_stack
= cum_stack
;
3854 /* Now fun->stack holds cumulative stack. */
3855 fun
->stack
= cum_stack
;
3859 && sum_stack_param
->overall_stack
< cum_stack
)
3860 sum_stack_param
->overall_stack
= cum_stack
;
3862 htab
= spu_hash_table (info
);
3863 if (htab
->params
->auto_overlay
)
3866 f1
= func_name (fun
);
3867 if (htab
->params
->stack_analysis
)
3870 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3871 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3872 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3876 info
->callbacks
->minfo (_(" calls:\n"));
3877 for (call
= fun
->call_list
; call
; call
= call
->next
)
3878 if (!call
->is_pasted
)
3880 const char *f2
= func_name (call
->fun
);
3881 const char *ann1
= call
->fun
== max
? "*" : " ";
3882 const char *ann2
= call
->is_tail
? "t" : " ";
3884 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3889 if (sum_stack_param
->emit_stack_syms
)
3891 char *name
= bfd_malloc (18 + strlen (f1
));
3892 struct elf_link_hash_entry
*h
;
3897 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3898 sprintf (name
, "__stack_%s", f1
);
3900 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3902 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3905 && (h
->root
.type
== bfd_link_hash_new
3906 || h
->root
.type
== bfd_link_hash_undefined
3907 || h
->root
.type
== bfd_link_hash_undefweak
))
3909 h
->root
.type
= bfd_link_hash_defined
;
3910 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3911 h
->root
.u
.def
.value
= cum_stack
;
3916 h
->ref_regular_nonweak
= 1;
3917 h
->forced_local
= 1;
3925 /* SEC is part of a pasted function. Return the call_info for the
3926 next section of this function. */
3928 static struct call_info
*
3929 find_pasted_call (asection
*sec
)
3931 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3932 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3933 struct call_info
*call
;
3936 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3937 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3938 if (call
->is_pasted
)
3944 /* qsort predicate to sort bfds by file name. */
3947 sort_bfds (const void *a
, const void *b
)
3949 bfd
*const *abfd1
= a
;
3950 bfd
*const *abfd2
= b
;
3952 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3956 print_one_overlay_section (FILE *script
,
3959 unsigned int ovlynum
,
3960 unsigned int *ovly_map
,
3961 asection
**ovly_sections
,
3962 struct bfd_link_info
*info
)
3966 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
3968 asection
*sec
= ovly_sections
[2 * j
];
3970 if (fprintf (script
, " %s%c%s (%s)\n",
3971 (sec
->owner
->my_archive
!= NULL
3972 ? sec
->owner
->my_archive
->filename
: ""),
3973 info
->path_separator
,
3974 sec
->owner
->filename
,
3977 if (sec
->segment_mark
)
3979 struct call_info
*call
= find_pasted_call (sec
);
3980 while (call
!= NULL
)
3982 struct function_info
*call_fun
= call
->fun
;
3983 sec
= call_fun
->sec
;
3984 if (fprintf (script
, " %s%c%s (%s)\n",
3985 (sec
->owner
->my_archive
!= NULL
3986 ? sec
->owner
->my_archive
->filename
: ""),
3987 info
->path_separator
,
3988 sec
->owner
->filename
,
3991 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3992 if (call
->is_pasted
)
3998 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4000 asection
*sec
= ovly_sections
[2 * j
+ 1];
4002 && fprintf (script
, " %s%c%s (%s)\n",
4003 (sec
->owner
->my_archive
!= NULL
4004 ? sec
->owner
->my_archive
->filename
: ""),
4005 info
->path_separator
,
4006 sec
->owner
->filename
,
4010 sec
= ovly_sections
[2 * j
];
4011 if (sec
->segment_mark
)
4013 struct call_info
*call
= find_pasted_call (sec
);
4014 while (call
!= NULL
)
4016 struct function_info
*call_fun
= call
->fun
;
4017 sec
= call_fun
->rodata
;
4019 && fprintf (script
, " %s%c%s (%s)\n",
4020 (sec
->owner
->my_archive
!= NULL
4021 ? sec
->owner
->my_archive
->filename
: ""),
4022 info
->path_separator
,
4023 sec
->owner
->filename
,
4026 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4027 if (call
->is_pasted
)
4036 /* Handle --auto-overlay. */
4038 static void spu_elf_auto_overlay (struct bfd_link_info
*)
4042 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4046 struct elf_segment_map
*m
;
4047 unsigned int fixed_size
, lo
, hi
;
4048 struct spu_link_hash_table
*htab
;
4049 unsigned int base
, i
, count
, bfd_count
;
4050 unsigned int region
, ovlynum
;
4051 asection
**ovly_sections
, **ovly_p
;
4052 unsigned int *ovly_map
;
4054 unsigned int total_overlay_size
, overlay_size
;
4055 const char *ovly_mgr_entry
;
4056 struct elf_link_hash_entry
*h
;
4057 struct _mos_param mos_param
;
4058 struct _uos_param uos_param
;
4059 struct function_info dummy_caller
;
4061 /* Find the extents of our loadable image. */
4062 lo
= (unsigned int) -1;
4064 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4065 if (m
->p_type
== PT_LOAD
)
4066 for (i
= 0; i
< m
->count
; i
++)
4067 if (m
->sections
[i
]->size
!= 0)
4069 if (m
->sections
[i
]->vma
< lo
)
4070 lo
= m
->sections
[i
]->vma
;
4071 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4072 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4074 fixed_size
= hi
+ 1 - lo
;
4076 if (!discover_functions (info
))
4079 if (!build_call_tree (info
))
4082 uos_param
.exclude_input_section
= 0;
4083 uos_param
.exclude_output_section
4084 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4086 htab
= spu_hash_table (info
);
4087 ovly_mgr_entry
= "__ovly_load";
4088 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4089 ovly_mgr_entry
= "__icache_br_handler";
4090 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4091 FALSE
, FALSE
, FALSE
);
4093 && (h
->root
.type
== bfd_link_hash_defined
4094 || h
->root
.type
== bfd_link_hash_defweak
)
4097 /* We have a user supplied overlay manager. */
4098 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4102 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4103 builtin version to .text, and will adjust .text size. */
4104 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4107 /* Mark overlay sections, and find max overlay section size. */
4108 mos_param
.max_overlay_size
= 0;
4109 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4112 /* We can't put the overlay manager or interrupt routines in
4114 uos_param
.clearing
= 0;
4115 if ((uos_param
.exclude_input_section
4116 || uos_param
.exclude_output_section
)
4117 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4121 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4123 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4124 if (bfd_arr
== NULL
)
4127 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4130 total_overlay_size
= 0;
4131 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4133 extern const bfd_target bfd_elf32_spu_vec
;
4135 unsigned int old_count
;
4137 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4141 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4142 if (sec
->linker_mark
)
4144 if ((sec
->flags
& SEC_CODE
) != 0)
4146 fixed_size
-= sec
->size
;
4147 total_overlay_size
+= sec
->size
;
4149 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4150 && sec
->output_section
->owner
== info
->output_bfd
4151 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4152 fixed_size
-= sec
->size
;
4153 if (count
!= old_count
)
4154 bfd_arr
[bfd_count
++] = ibfd
;
4157 /* Since the overlay link script selects sections by file name and
4158 section name, ensure that file names are unique. */
4161 bfd_boolean ok
= TRUE
;
4163 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4164 for (i
= 1; i
< bfd_count
; ++i
)
4165 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4167 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4169 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4170 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4171 bfd_arr
[i
]->filename
,
4172 bfd_arr
[i
]->my_archive
->filename
);
4174 info
->callbacks
->einfo (_("%s duplicated\n"),
4175 bfd_arr
[i
]->filename
);
4181 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4182 "object files in auto-overlay script\n"));
4183 bfd_set_error (bfd_error_bad_value
);
4189 if (htab
->reserved
== 0)
4191 struct _sum_stack_param sum_stack_param
;
4193 sum_stack_param
.emit_stack_syms
= 0;
4194 sum_stack_param
.overall_stack
= 0;
4195 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4197 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
4199 fixed_size
+= htab
->reserved
;
4200 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
->ovly_flavour
);
4201 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4203 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4205 /* Stubs in the non-icache area are bigger. */
4206 fixed_size
+= htab
->non_ovly_stub
* 16;
4207 /* Space for icache manager tables.
4208 a) Tag array, one quadword per cache line.
4209 - word 0: ia address of present line, init to zero.
4210 - word 1: link locator. link_elem=stub_addr/2+locator
4211 - halfwords 4-7: head/tail pointers for linked lists. */
4212 fixed_size
+= 16 << htab
->num_lines_log2
;
4213 /* b) Linked list elements, max_branch per line. */
4214 fixed_size
+= htab
->params
->max_branch
<< (htab
->num_lines_log2
+ 4);
4215 /* c) Indirect branch descriptors, 8 quadwords. */
4216 fixed_size
+= 8 * 16;
4217 /* d) Pointers to __ea backing store, 16 quadwords. */
4218 fixed_size
+= 16 * 16;
4222 /* Guess number of overlays. Assuming overlay buffer is on
4223 average only half full should be conservative. */
4224 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4225 / (htab
->local_store
- fixed_size
));
4226 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4227 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4231 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4232 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4233 "size of 0x%v exceeds local store\n"),
4234 (bfd_vma
) fixed_size
,
4235 (bfd_vma
) mos_param
.max_overlay_size
);
4237 /* Now see if we should put some functions in the non-overlay area. */
4238 else if (fixed_size
< htab
->overlay_fixed
)
4240 unsigned int max_fixed
, lib_size
;
4242 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4243 if (max_fixed
> htab
->overlay_fixed
)
4244 max_fixed
= htab
->overlay_fixed
;
4245 lib_size
= max_fixed
- fixed_size
;
4246 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4247 if (lib_size
== (unsigned int) -1)
4249 fixed_size
= max_fixed
- lib_size
;
4252 /* Build an array of sections, suitably sorted to place into
4254 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4255 if (ovly_sections
== NULL
)
4257 ovly_p
= ovly_sections
;
4258 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4260 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4261 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4262 if (ovly_map
== NULL
)
4265 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4266 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4267 if (htab
->params
->line_size
!= 0)
4268 overlay_size
= htab
->params
->line_size
;
4271 while (base
< count
)
4273 unsigned int size
= 0;
4275 for (i
= base
; i
< count
; i
++)
4279 unsigned int num_stubs
;
4280 struct call_info
*call
, *pasty
;
4281 struct _spu_elf_section_data
*sec_data
;
4282 struct spu_elf_stack_info
*sinfo
;
4285 /* See whether we can add this section to the current
4286 overlay without overflowing our overlay buffer. */
4287 sec
= ovly_sections
[2 * i
];
4288 tmp
= size
+ sec
->size
;
4289 if (ovly_sections
[2 * i
+ 1])
4290 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
4291 if (tmp
> overlay_size
)
4293 if (sec
->segment_mark
)
4295 /* Pasted sections must stay together, so add their
4297 struct call_info
*pasty
= find_pasted_call (sec
);
4298 while (pasty
!= NULL
)
4300 struct function_info
*call_fun
= pasty
->fun
;
4301 tmp
+= call_fun
->sec
->size
;
4302 if (call_fun
->rodata
)
4303 tmp
+= call_fun
->rodata
->size
;
4304 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4305 if (pasty
->is_pasted
)
4309 if (tmp
> overlay_size
)
4312 /* If we add this section, we might need new overlay call
4313 stubs. Add any overlay section calls to dummy_call. */
4315 sec_data
= spu_elf_section_data (sec
);
4316 sinfo
= sec_data
->u
.i
.stack_info
;
4317 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4318 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4319 if (call
->is_pasted
)
4321 BFD_ASSERT (pasty
== NULL
);
4324 else if (call
->fun
->sec
->linker_mark
)
4326 if (!copy_callee (&dummy_caller
, call
))
4329 while (pasty
!= NULL
)
4331 struct function_info
*call_fun
= pasty
->fun
;
4333 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4334 if (call
->is_pasted
)
4336 BFD_ASSERT (pasty
== NULL
);
4339 else if (!copy_callee (&dummy_caller
, call
))
4343 /* Calculate call stub size. */
4345 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4350 /* If the call is within this overlay, we won't need a
4352 for (k
= base
; k
< i
+ 1; k
++)
4353 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4359 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4360 && num_stubs
> htab
->params
->max_branch
)
4362 if (tmp
+ num_stubs
* ovl_stub_size (htab
->params
->ovly_flavour
)
4370 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4371 ovly_sections
[2 * i
]->owner
,
4372 ovly_sections
[2 * i
],
4373 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4374 bfd_set_error (bfd_error_bad_value
);
4378 while (dummy_caller
.call_list
!= NULL
)
4380 struct call_info
*call
= dummy_caller
.call_list
;
4381 dummy_caller
.call_list
= call
->next
;
4387 ovly_map
[base
++] = ovlynum
;
4390 script
= htab
->params
->spu_elf_open_overlay_script ();
4392 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4395 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4397 if (fprintf (script
,
4398 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4399 " . = ALIGN (%u);\n"
4400 " .ovl.init : { *(.ovl.init) }\n"
4401 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4402 htab
->params
->line_size
) <= 0)
4407 while (base
< count
)
4409 unsigned int indx
= ovlynum
- 1;
4410 unsigned int vma
, lma
;
4412 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4413 lma
= indx
<< htab
->line_size_log2
;
4415 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4416 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4417 ovlynum
, vma
, lma
) <= 0)
4420 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4421 ovly_map
, ovly_sections
, info
);
4422 if (base
== (unsigned) -1)
4425 if (fprintf (script
, " }\n") <= 0)
4431 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4432 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4437 if (fprintf (script
,
4438 " . = ALIGN (16);\n"
4439 " .ovl.init : { *(.ovl.init) }\n"
4440 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4443 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4447 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4455 /* We need to set lma since we are overlaying .ovl.init. */
4456 if (fprintf (script
,
4457 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4462 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4466 while (base
< count
)
4468 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4471 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4472 ovly_map
, ovly_sections
, info
);
4473 if (base
== (unsigned) -1)
4476 if (fprintf (script
, " }\n") <= 0)
4479 ovlynum
+= htab
->params
->num_lines
;
4480 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4484 if (fprintf (script
, " }\n") <= 0)
4491 free (ovly_sections
);
4493 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4495 if (fclose (script
) != 0)
4498 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4499 (*htab
->params
->spu_elf_relink
) ();
4504 bfd_set_error (bfd_error_system_call
);
4506 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4510 /* Provide an estimate of total stack required. */
4513 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4515 struct spu_link_hash_table
*htab
;
4516 struct _sum_stack_param sum_stack_param
;
4518 if (!discover_functions (info
))
4521 if (!build_call_tree (info
))
4524 htab
= spu_hash_table (info
);
4525 if (htab
->params
->stack_analysis
)
4527 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4528 info
->callbacks
->minfo (_("\nStack size for functions. "
4529 "Annotations: '*' max stack, 't' tail call\n"));
4532 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4533 sum_stack_param
.overall_stack
= 0;
4534 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4537 if (htab
->params
->stack_analysis
)
4538 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4539 (bfd_vma
) sum_stack_param
.overall_stack
);
4543 /* Perform a final link. */
4546 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4548 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4550 if (htab
->params
->auto_overlay
)
4551 spu_elf_auto_overlay (info
);
4553 if ((htab
->params
->stack_analysis
4554 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4555 && htab
->params
->lrlive_analysis
))
4556 && !spu_elf_stack_analysis (info
))
4557 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4559 if (!spu_elf_build_stubs (info
))
4560 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4562 return bfd_elf_final_link (output_bfd
, info
);
4565 /* Called when not normally emitting relocs, ie. !info->relocatable
4566 and !info->emitrelocations. Returns a count of special relocs
4567 that need to be emitted. */
4570 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4572 Elf_Internal_Rela
*relocs
;
4573 unsigned int count
= 0;
4575 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4579 Elf_Internal_Rela
*rel
;
4580 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4582 for (rel
= relocs
; rel
< relend
; rel
++)
4584 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4585 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4589 if (elf_section_data (sec
)->relocs
!= relocs
)
4596 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4599 spu_elf_relocate_section (bfd
*output_bfd
,
4600 struct bfd_link_info
*info
,
4602 asection
*input_section
,
4604 Elf_Internal_Rela
*relocs
,
4605 Elf_Internal_Sym
*local_syms
,
4606 asection
**local_sections
)
4608 Elf_Internal_Shdr
*symtab_hdr
;
4609 struct elf_link_hash_entry
**sym_hashes
;
4610 Elf_Internal_Rela
*rel
, *relend
;
4611 struct spu_link_hash_table
*htab
;
4614 bfd_boolean emit_these_relocs
= FALSE
;
4615 bfd_boolean is_ea_sym
;
4617 unsigned int iovl
= 0;
4619 htab
= spu_hash_table (info
);
4620 stubs
= (htab
->stub_sec
!= NULL
4621 && maybe_needs_stubs (input_section
));
4622 iovl
= overlay_index (input_section
);
4623 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4624 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4625 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4628 relend
= relocs
+ input_section
->reloc_count
;
4629 for (; rel
< relend
; rel
++)
4632 reloc_howto_type
*howto
;
4633 unsigned int r_symndx
;
4634 Elf_Internal_Sym
*sym
;
4636 struct elf_link_hash_entry
*h
;
4637 const char *sym_name
;
4640 bfd_reloc_status_type r
;
4641 bfd_boolean unresolved_reloc
;
4643 bfd_boolean overlay_encoded
;
4644 enum _stub_type stub_type
;
4646 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4647 r_type
= ELF32_R_TYPE (rel
->r_info
);
4648 howto
= elf_howto_table
+ r_type
;
4649 unresolved_reloc
= FALSE
;
4654 if (r_symndx
< symtab_hdr
->sh_info
)
4656 sym
= local_syms
+ r_symndx
;
4657 sec
= local_sections
[r_symndx
];
4658 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4659 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4663 if (sym_hashes
== NULL
)
4666 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4668 while (h
->root
.type
== bfd_link_hash_indirect
4669 || h
->root
.type
== bfd_link_hash_warning
)
4670 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4673 if (h
->root
.type
== bfd_link_hash_defined
4674 || h
->root
.type
== bfd_link_hash_defweak
)
4676 sec
= h
->root
.u
.def
.section
;
4678 || sec
->output_section
== NULL
)
4679 /* Set a flag that will be cleared later if we find a
4680 relocation value for this symbol. output_section
4681 is typically NULL for symbols satisfied by a shared
4683 unresolved_reloc
= TRUE
;
4685 relocation
= (h
->root
.u
.def
.value
4686 + sec
->output_section
->vma
4687 + sec
->output_offset
);
4689 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4691 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4692 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4694 else if (!info
->relocatable
4695 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4698 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4699 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4700 if (!info
->callbacks
->undefined_symbol (info
,
4701 h
->root
.root
.string
,
4704 rel
->r_offset
, err
))
4708 sym_name
= h
->root
.root
.string
;
4711 if (sec
!= NULL
&& elf_discarded_section (sec
))
4713 /* For relocs against symbols from removed linkonce sections,
4714 or sections discarded by a linker script, we just want the
4715 section contents zeroed. Avoid any special processing. */
4716 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4722 if (info
->relocatable
)
4725 is_ea_sym
= (ea
!= NULL
4727 && sec
->output_section
== ea
);
4728 overlay_encoded
= FALSE
;
4730 /* If this symbol is in an overlay area, we may need to relocate
4731 to the overlay stub. */
4732 addend
= rel
->r_addend
;
4735 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4736 contents
, info
)) != no_stub
)
4738 unsigned int ovl
= 0;
4739 struct got_entry
*g
, **head
;
4741 if (stub_type
!= nonovl_stub
)
4745 head
= &h
->got
.glist
;
4747 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4749 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4750 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4751 ? g
->br_addr
== (rel
->r_offset
4752 + input_section
->output_offset
4753 + input_section
->output_section
->vma
)
4754 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4759 relocation
= g
->stub_addr
;
4764 /* For soft icache, encode the overlay index into addresses. */
4765 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4768 unsigned int ovl
= overlay_index (sec
);
4771 unsigned int set_id
= (ovl
- 1) >> htab
->num_lines_log2
;
4772 relocation
+= set_id
<< 18;
4773 overlay_encoded
= set_id
!= 0;
4778 if (unresolved_reloc
)
4780 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4784 /* ._ea is a special section that isn't allocated in SPU
4785 memory, but rather occupies space in PPU memory as
4786 part of an embedded ELF image. If this reloc is
4787 against a symbol defined in ._ea, then transform the
4788 reloc into an equivalent one without a symbol
4789 relative to the start of the ELF image. */
4790 rel
->r_addend
+= (relocation
4792 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4793 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4795 emit_these_relocs
= TRUE
;
4799 unresolved_reloc
= TRUE
;
4801 if (unresolved_reloc
)
4803 (*_bfd_error_handler
)
4804 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4806 bfd_get_section_name (input_bfd
, input_section
),
4807 (long) rel
->r_offset
,
4813 r
= _bfd_final_link_relocate (howto
,
4817 rel
->r_offset
, relocation
, addend
);
4819 if (r
!= bfd_reloc_ok
)
4821 const char *msg
= (const char *) 0;
4825 case bfd_reloc_overflow
:
4826 /* FIXME: We don't want to warn on most references
4827 within an overlay to itself, but this may silence a
4828 warning that should be reported. */
4829 if (overlay_encoded
&& sec
== input_section
)
4831 if (!((*info
->callbacks
->reloc_overflow
)
4832 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4833 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4837 case bfd_reloc_undefined
:
4838 if (!((*info
->callbacks
->undefined_symbol
)
4839 (info
, sym_name
, input_bfd
, input_section
,
4840 rel
->r_offset
, TRUE
)))
4844 case bfd_reloc_outofrange
:
4845 msg
= _("internal error: out of range error");
4848 case bfd_reloc_notsupported
:
4849 msg
= _("internal error: unsupported relocation error");
4852 case bfd_reloc_dangerous
:
4853 msg
= _("internal error: dangerous error");
4857 msg
= _("internal error: unknown error");
4862 if (!((*info
->callbacks
->warning
)
4863 (info
, msg
, sym_name
, input_bfd
, input_section
,
4872 && emit_these_relocs
4873 && !info
->emitrelocations
)
4875 Elf_Internal_Rela
*wrel
;
4876 Elf_Internal_Shdr
*rel_hdr
;
4878 wrel
= rel
= relocs
;
4879 relend
= relocs
+ input_section
->reloc_count
;
4880 for (; rel
< relend
; rel
++)
4884 r_type
= ELF32_R_TYPE (rel
->r_info
);
4885 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4888 input_section
->reloc_count
= wrel
- relocs
;
4889 /* Backflips for _bfd_elf_link_output_relocs. */
4890 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4891 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4898 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4901 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4902 const char *sym_name ATTRIBUTE_UNUSED
,
4903 Elf_Internal_Sym
*sym
,
4904 asection
*sym_sec ATTRIBUTE_UNUSED
,
4905 struct elf_link_hash_entry
*h
)
4907 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4909 if (!info
->relocatable
4910 && htab
->stub_sec
!= NULL
4912 && (h
->root
.type
== bfd_link_hash_defined
4913 || h
->root
.type
== bfd_link_hash_defweak
)
4915 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4917 struct got_entry
*g
;
4919 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4920 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4921 ? g
->br_addr
== g
->stub_addr
4922 : g
->addend
== 0 && g
->ovl
== 0)
4924 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4925 (htab
->stub_sec
[0]->output_section
->owner
,
4926 htab
->stub_sec
[0]->output_section
));
4927 sym
->st_value
= g
->stub_addr
;
4935 static int spu_plugin
= 0;
4938 spu_elf_plugin (int val
)
4943 /* Set ELF header e_type for plugins. */
4946 spu_elf_post_process_headers (bfd
*abfd
,
4947 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4951 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4953 i_ehdrp
->e_type
= ET_DYN
;
4957 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4958 segments for overlays. */
4961 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4968 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4969 extra
= htab
->num_overlays
;
4975 sec
= bfd_get_section_by_name (abfd
, ".toe");
4976 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4982 /* Remove .toe section from other PT_LOAD segments and put it in
4983 a segment of its own. Put overlays in separate segments too. */
4986 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4989 struct elf_segment_map
*m
;
4995 toe
= bfd_get_section_by_name (abfd
, ".toe");
4996 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4997 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4998 for (i
= 0; i
< m
->count
; i
++)
4999 if ((s
= m
->sections
[i
]) == toe
5000 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5002 struct elf_segment_map
*m2
;
5005 if (i
+ 1 < m
->count
)
5007 amt
= sizeof (struct elf_segment_map
);
5008 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5009 m2
= bfd_zalloc (abfd
, amt
);
5012 m2
->count
= m
->count
- (i
+ 1);
5013 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5014 m2
->count
* sizeof (m
->sections
[0]));
5015 m2
->p_type
= PT_LOAD
;
5023 amt
= sizeof (struct elf_segment_map
);
5024 m2
= bfd_zalloc (abfd
, amt
);
5027 m2
->p_type
= PT_LOAD
;
5029 m2
->sections
[0] = s
;
5039 /* Tweak the section type of .note.spu_name. */
5042 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5043 Elf_Internal_Shdr
*hdr
,
5046 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5047 hdr
->sh_type
= SHT_NOTE
;
5051 /* Tweak phdrs before writing them out. */
5054 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5056 const struct elf_backend_data
*bed
;
5057 struct elf_obj_tdata
*tdata
;
5058 Elf_Internal_Phdr
*phdr
, *last
;
5059 struct spu_link_hash_table
*htab
;
5066 bed
= get_elf_backend_data (abfd
);
5067 tdata
= elf_tdata (abfd
);
5069 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5070 htab
= spu_hash_table (info
);
5071 if (htab
->num_overlays
!= 0)
5073 struct elf_segment_map
*m
;
5076 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5078 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5080 /* Mark this as an overlay header. */
5081 phdr
[i
].p_flags
|= PF_OVERLAY
;
5083 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5084 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5086 bfd_byte
*p
= htab
->ovtab
->contents
;
5087 unsigned int off
= o
* 16 + 8;
5089 /* Write file_off into _ovly_table. */
5090 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5093 /* Soft-icache has its file offset put in .ovl.init. */
5094 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5096 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5098 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5102 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5103 of 16. This should always be possible when using the standard
5104 linker scripts, but don't create overlapping segments if
5105 someone is playing games with linker scripts. */
5107 for (i
= count
; i
-- != 0; )
5108 if (phdr
[i
].p_type
== PT_LOAD
)
5112 adjust
= -phdr
[i
].p_filesz
& 15;
5115 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5118 adjust
= -phdr
[i
].p_memsz
& 15;
5121 && phdr
[i
].p_filesz
!= 0
5122 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5123 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5126 if (phdr
[i
].p_filesz
!= 0)
5130 if (i
== (unsigned int) -1)
5131 for (i
= count
; i
-- != 0; )
5132 if (phdr
[i
].p_type
== PT_LOAD
)
5136 adjust
= -phdr
[i
].p_filesz
& 15;
5137 phdr
[i
].p_filesz
+= adjust
;
5139 adjust
= -phdr
[i
].p_memsz
& 15;
5140 phdr
[i
].p_memsz
+= adjust
;
5146 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5147 #define TARGET_BIG_NAME "elf32-spu"
5148 #define ELF_ARCH bfd_arch_spu
5149 #define ELF_MACHINE_CODE EM_SPU
5150 /* This matches the alignment need for DMA. */
5151 #define ELF_MAXPAGESIZE 0x80
5152 #define elf_backend_rela_normal 1
5153 #define elf_backend_can_gc_sections 1
5155 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5156 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5157 #define elf_info_to_howto spu_elf_info_to_howto
5158 #define elf_backend_count_relocs spu_elf_count_relocs
5159 #define elf_backend_relocate_section spu_elf_relocate_section
5160 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5161 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5162 #define elf_backend_object_p spu_elf_object_p
5163 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5164 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5166 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5167 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5168 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5169 #define elf_backend_post_process_headers spu_elf_post_process_headers
5170 #define elf_backend_fake_sections spu_elf_fake_sections
5171 #define elf_backend_special_sections spu_elf_special_sections
5172 #define bfd_elf32_bfd_final_link spu_elf_final_link
5174 #include "elf32-target.h"