5be8f1cf5e538fcfc342a0485fca007ad4ce0086
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
28
29 /* We use RELA style relocs. Don't define USE_REL. */
30
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
34
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
37
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
89 FALSE, 0, -1, FALSE),
90 };
91
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 { NULL, 0, 0, 0, 0 }
95 };
96
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
99 {
100 switch (code)
101 {
102 default:
103 return R_SPU_NONE;
104 case BFD_RELOC_SPU_IMM10W:
105 return R_SPU_ADDR10;
106 case BFD_RELOC_SPU_IMM16W:
107 return R_SPU_ADDR16;
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
113 return R_SPU_ADDR18;
114 case BFD_RELOC_SPU_PCREL16:
115 return R_SPU_REL16;
116 case BFD_RELOC_SPU_IMM7:
117 return R_SPU_ADDR7;
118 case BFD_RELOC_SPU_IMM8:
119 return R_SPU_NONE;
120 case BFD_RELOC_SPU_PCREL9a:
121 return R_SPU_REL9;
122 case BFD_RELOC_SPU_PCREL9b:
123 return R_SPU_REL9I;
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
128 case BFD_RELOC_32:
129 return R_SPU_ADDR32;
130 case BFD_RELOC_32_PCREL:
131 return R_SPU_REL32;
132 case BFD_RELOC_SPU_PPU32:
133 return R_SPU_PPU32;
134 case BFD_RELOC_SPU_PPU64:
135 return R_SPU_PPU64;
136 }
137 }
138
139 static void
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141 arelent *cache_ptr,
142 Elf_Internal_Rela *dst)
143 {
144 enum elf_spu_reloc_type r_type;
145
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
149 }
150
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
154 {
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
156
157 if (r_type == R_SPU_NONE)
158 return NULL;
159
160 return elf_howto_table + r_type;
161 }
162
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165 const char *r_name)
166 {
167 unsigned int i;
168
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
173
174 return NULL;
175 }
176
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
178
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
183 {
184 bfd_size_type octets;
185 bfd_vma val;
186 long insn;
187
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
190 link time. */
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
194
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
198
199 /* Get symbol value. */
200 val = 0;
201 if (!bfd_is_com_section (symbol->section))
202 val = symbol->value;
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
205
206 val += reloc_entry->addend;
207
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
210
211 val >>= 2;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
214
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
216
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223 return bfd_reloc_ok;
224 }
225
226 static bfd_boolean
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
228 {
229 if (!sec->used_by_bfd)
230 {
231 struct _spu_elf_section_data *sdata;
232
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
234 if (sdata == NULL)
235 return FALSE;
236 sec->used_by_bfd = sdata;
237 }
238
239 return _bfd_elf_new_section_hook (abfd, sec);
240 }
241
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
244
245 static void
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
247 {
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
252 }
253
254 /* SPU ELF linker hash table. */
255
256 struct spu_link_hash_table
257 {
258 struct elf_link_hash_table elf;
259
260 /* Shortcuts to overlay sections. */
261 asection *ovtab;
262 asection *toe;
263 asection **ovl_sec;
264
265 /* Count of stubs in each overlay section. */
266 unsigned int *stub_count;
267
268 /* The stub section for each overlay section. */
269 asection **stub_sec;
270
271 struct elf_link_hash_entry *ovly_load;
272 struct elf_link_hash_entry *ovly_return;
273 unsigned long ovly_load_r_symndx;
274
275 /* Number of overlay buffers. */
276 unsigned int num_buf;
277
278 /* Total number of overlays. */
279 unsigned int num_overlays;
280
281 /* Set if we should emit symbols for stubs. */
282 unsigned int emit_stub_syms:1;
283
284 /* Set if we want stubs on calls out of overlay regions to
285 non-overlay regions. */
286 unsigned int non_overlay_stubs : 1;
287
288 /* Set on error. */
289 unsigned int stub_err : 1;
290
291 /* Set if stack size analysis should be done. */
292 unsigned int stack_analysis : 1;
293
294 /* Set if __stack_* syms will be emitted. */
295 unsigned int emit_stack_syms : 1;
296 };
297
298 /* Hijack the generic got fields for overlay stub accounting. */
299
300 struct got_entry
301 {
302 struct got_entry *next;
303 unsigned int ovl;
304 bfd_vma addend;
305 bfd_vma stub_addr;
306 };
307
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
310
311 /* Create a spu ELF linker hash table. */
312
313 static struct bfd_link_hash_table *
314 spu_elf_link_hash_table_create (bfd *abfd)
315 {
316 struct spu_link_hash_table *htab;
317
318 htab = bfd_malloc (sizeof (*htab));
319 if (htab == NULL)
320 return NULL;
321
322 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
323 _bfd_elf_link_hash_newfunc,
324 sizeof (struct elf_link_hash_entry)))
325 {
326 free (htab);
327 return NULL;
328 }
329
330 memset (&htab->ovtab, 0,
331 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
332
333 htab->elf.init_got_refcount.refcount = 0;
334 htab->elf.init_got_refcount.glist = NULL;
335 htab->elf.init_got_offset.offset = 0;
336 htab->elf.init_got_offset.glist = NULL;
337 return &htab->elf.root;
338 }
339
340 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
341 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
342 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
343
344 static bfd_boolean
345 get_sym_h (struct elf_link_hash_entry **hp,
346 Elf_Internal_Sym **symp,
347 asection **symsecp,
348 Elf_Internal_Sym **locsymsp,
349 unsigned long r_symndx,
350 bfd *ibfd)
351 {
352 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
353
354 if (r_symndx >= symtab_hdr->sh_info)
355 {
356 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
357 struct elf_link_hash_entry *h;
358
359 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
360 while (h->root.type == bfd_link_hash_indirect
361 || h->root.type == bfd_link_hash_warning)
362 h = (struct elf_link_hash_entry *) h->root.u.i.link;
363
364 if (hp != NULL)
365 *hp = h;
366
367 if (symp != NULL)
368 *symp = NULL;
369
370 if (symsecp != NULL)
371 {
372 asection *symsec = NULL;
373 if (h->root.type == bfd_link_hash_defined
374 || h->root.type == bfd_link_hash_defweak)
375 symsec = h->root.u.def.section;
376 *symsecp = symsec;
377 }
378 }
379 else
380 {
381 Elf_Internal_Sym *sym;
382 Elf_Internal_Sym *locsyms = *locsymsp;
383
384 if (locsyms == NULL)
385 {
386 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
387 if (locsyms == NULL)
388 {
389 size_t symcount = symtab_hdr->sh_info;
390
391 /* If we are reading symbols into the contents, then
392 read the global syms too. This is done to cache
393 syms for later stack analysis. */
394 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
395 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
396 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
397 NULL, NULL, NULL);
398 }
399 if (locsyms == NULL)
400 return FALSE;
401 *locsymsp = locsyms;
402 }
403 sym = locsyms + r_symndx;
404
405 if (hp != NULL)
406 *hp = NULL;
407
408 if (symp != NULL)
409 *symp = sym;
410
411 if (symsecp != NULL)
412 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
413 }
414
415 return TRUE;
416 }
417
418 /* Create the note section if not already present. This is done early so
419 that the linker maps the sections to the right place in the output. */
420
421 bfd_boolean
422 spu_elf_create_sections (struct bfd_link_info *info,
423 int stack_analysis,
424 int emit_stack_syms)
425 {
426 bfd *ibfd;
427 struct spu_link_hash_table *htab = spu_hash_table (info);
428
429 /* Stash some options away where we can get at them later. */
430 htab->stack_analysis = stack_analysis;
431 htab->emit_stack_syms = emit_stack_syms;
432
433 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
434 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
435 break;
436
437 if (ibfd == NULL)
438 {
439 /* Make SPU_PTNOTE_SPUNAME section. */
440 asection *s;
441 size_t name_len;
442 size_t size;
443 bfd_byte *data;
444 flagword flags;
445
446 ibfd = info->input_bfds;
447 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
448 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
449 if (s == NULL
450 || !bfd_set_section_alignment (ibfd, s, 4))
451 return FALSE;
452
453 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
454 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
455 size += (name_len + 3) & -4;
456
457 if (!bfd_set_section_size (ibfd, s, size))
458 return FALSE;
459
460 data = bfd_zalloc (ibfd, size);
461 if (data == NULL)
462 return FALSE;
463
464 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
465 bfd_put_32 (ibfd, name_len, data + 4);
466 bfd_put_32 (ibfd, 1, data + 8);
467 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
468 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
469 bfd_get_filename (info->output_bfd), name_len);
470 s->contents = data;
471 }
472
473 return TRUE;
474 }
475
476 /* qsort predicate to sort sections by vma. */
477
478 static int
479 sort_sections (const void *a, const void *b)
480 {
481 const asection *const *s1 = a;
482 const asection *const *s2 = b;
483 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
484
485 if (delta != 0)
486 return delta < 0 ? -1 : 1;
487
488 return (*s1)->index - (*s2)->index;
489 }
490
491 /* Identify overlays in the output bfd, and number them. */
492
493 bfd_boolean
494 spu_elf_find_overlays (struct bfd_link_info *info)
495 {
496 struct spu_link_hash_table *htab = spu_hash_table (info);
497 asection **alloc_sec;
498 unsigned int i, n, ovl_index, num_buf;
499 asection *s;
500 bfd_vma ovl_end;
501
502 if (info->output_bfd->section_count < 2)
503 return FALSE;
504
505 alloc_sec
506 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
507 if (alloc_sec == NULL)
508 return FALSE;
509
510 /* Pick out all the alloced sections. */
511 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
512 if ((s->flags & SEC_ALLOC) != 0
513 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
514 && s->size != 0)
515 alloc_sec[n++] = s;
516
517 if (n == 0)
518 {
519 free (alloc_sec);
520 return FALSE;
521 }
522
523 /* Sort them by vma. */
524 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
525
526 /* Look for overlapping vmas. Any with overlap must be overlays.
527 Count them. Also count the number of overlay regions. */
528 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
529 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
530 {
531 s = alloc_sec[i];
532 if (s->vma < ovl_end)
533 {
534 asection *s0 = alloc_sec[i - 1];
535
536 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
537 {
538 alloc_sec[ovl_index] = s0;
539 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
540 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
541 }
542 alloc_sec[ovl_index] = s;
543 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
544 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
545 if (s0->vma != s->vma)
546 {
547 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
548 "do not start at the same address.\n"),
549 s0, s);
550 return FALSE;
551 }
552 if (ovl_end < s->vma + s->size)
553 ovl_end = s->vma + s->size;
554 }
555 else
556 ovl_end = s->vma + s->size;
557 }
558
559 htab->num_overlays = ovl_index;
560 htab->num_buf = num_buf;
561 htab->ovl_sec = alloc_sec;
562 htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
563 FALSE, FALSE, FALSE);
564 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
565 FALSE, FALSE, FALSE);
566 return ovl_index != 0;
567 }
568
569 /* Support two sizes of overlay stubs, a slower more compact stub of two
570 intructions, and a faster stub of four instructions. */
571 #ifndef OVL_STUB_SIZE
572 /* Default to faster. */
573 #define OVL_STUB_SIZE 16
574 /* #define OVL_STUB_SIZE 8 */
575 #endif
576 #define BRSL 0x33000000
577 #define BR 0x32000000
578 #define NOP 0x40200000
579 #define LNOP 0x00200000
580 #define ILA 0x42000000
581
582 /* Return true for all relative and absolute branch instructions.
583 bra 00110000 0..
584 brasl 00110001 0..
585 br 00110010 0..
586 brsl 00110011 0..
587 brz 00100000 0..
588 brnz 00100001 0..
589 brhz 00100010 0..
590 brhnz 00100011 0.. */
591
592 static bfd_boolean
593 is_branch (const unsigned char *insn)
594 {
595 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
596 }
597
598 /* Return true for all indirect branch instructions.
599 bi 00110101 000
600 bisl 00110101 001
601 iret 00110101 010
602 bisled 00110101 011
603 biz 00100101 000
604 binz 00100101 001
605 bihz 00100101 010
606 bihnz 00100101 011 */
607
608 static bfd_boolean
609 is_indirect_branch (const unsigned char *insn)
610 {
611 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
612 }
613
614 /* Return true for branch hint instructions.
615 hbra 0001000..
616 hbrr 0001001.. */
617
618 static bfd_boolean
619 is_hint (const unsigned char *insn)
620 {
621 return (insn[0] & 0xfc) == 0x10;
622 }
623
624 /* True if INPUT_SECTION might need overlay stubs. */
625
626 static bfd_boolean
627 maybe_needs_stubs (asection *input_section, bfd *output_bfd)
628 {
629 /* No stubs for debug sections and suchlike. */
630 if ((input_section->flags & SEC_ALLOC) == 0)
631 return FALSE;
632
633 /* No stubs for link-once sections that will be discarded. */
634 if (input_section->output_section == NULL
635 || input_section->output_section->owner != output_bfd)
636 return FALSE;
637
638 /* Don't create stubs for .eh_frame references. */
639 if (strcmp (input_section->name, ".eh_frame") == 0)
640 return FALSE;
641
642 return TRUE;
643 }
644
645 enum _stub_type
646 {
647 no_stub,
648 ovl_stub,
649 nonovl_stub,
650 stub_error
651 };
652
653 /* Return non-zero if this reloc symbol should go via an overlay stub.
654 Return 2 if the stub must be in non-overlay area. */
655
656 static enum _stub_type
657 needs_ovl_stub (struct elf_link_hash_entry *h,
658 Elf_Internal_Sym *sym,
659 asection *sym_sec,
660 asection *input_section,
661 Elf_Internal_Rela *irela,
662 bfd_byte *contents,
663 struct bfd_link_info *info)
664 {
665 struct spu_link_hash_table *htab = spu_hash_table (info);
666 enum elf_spu_reloc_type r_type;
667 unsigned int sym_type;
668 bfd_boolean branch;
669 enum _stub_type ret = no_stub;
670
671 if (sym_sec == NULL
672 || sym_sec->output_section == NULL
673 || sym_sec->output_section->owner != info->output_bfd
674 || spu_elf_section_data (sym_sec->output_section) == NULL)
675 return ret;
676
677 if (h != NULL)
678 {
679 /* Ensure no stubs for user supplied overlay manager syms. */
680 if (h == htab->ovly_load || h == htab->ovly_return)
681 return ret;
682
683 /* setjmp always goes via an overlay stub, because then the return
684 and hence the longjmp goes via __ovly_return. That magically
685 makes setjmp/longjmp between overlays work. */
686 if (strncmp (h->root.root.string, "setjmp", 6) == 0
687 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
688 ret = ovl_stub;
689 }
690
691 /* Usually, symbols in non-overlay sections don't need stubs. */
692 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
693 && !htab->non_overlay_stubs)
694 return ret;
695
696 if (h != NULL)
697 sym_type = h->type;
698 else
699 sym_type = ELF_ST_TYPE (sym->st_info);
700
701 r_type = ELF32_R_TYPE (irela->r_info);
702 branch = FALSE;
703 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
704 {
705 bfd_byte insn[4];
706
707 if (contents == NULL)
708 {
709 contents = insn;
710 if (!bfd_get_section_contents (input_section->owner,
711 input_section,
712 contents,
713 irela->r_offset, 4))
714 return stub_error;
715 }
716 else
717 contents += irela->r_offset;
718
719 if (is_branch (contents) || is_hint (contents))
720 {
721 branch = TRUE;
722 if ((contents[0] & 0xfd) == 0x31
723 && sym_type != STT_FUNC
724 && contents == insn)
725 {
726 /* It's common for people to write assembly and forget
727 to give function symbols the right type. Handle
728 calls to such symbols, but warn so that (hopefully)
729 people will fix their code. We need the symbol
730 type to be correct to distinguish function pointer
731 initialisation from other pointer initialisations. */
732 const char *sym_name;
733
734 if (h != NULL)
735 sym_name = h->root.root.string;
736 else
737 {
738 Elf_Internal_Shdr *symtab_hdr;
739 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
740 sym_name = bfd_elf_sym_name (input_section->owner,
741 symtab_hdr,
742 sym,
743 sym_sec);
744 }
745 (*_bfd_error_handler) (_("warning: call to non-function"
746 " symbol %s defined in %B"),
747 sym_sec->owner, sym_name);
748
749 }
750 }
751 }
752
753 if (sym_type != STT_FUNC
754 && !branch
755 && (sym_sec->flags & SEC_CODE) == 0)
756 return ret;
757
758 /* A reference from some other section to a symbol in an overlay
759 section needs a stub. */
760 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
761 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
762 return ovl_stub;
763
764 /* If this insn isn't a branch then we are possibly taking the
765 address of a function and passing it out somehow. */
766 return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
767 }
768
769 static bfd_boolean
770 count_stub (struct spu_link_hash_table *htab,
771 bfd *ibfd,
772 asection *isec,
773 enum _stub_type stub_type,
774 struct elf_link_hash_entry *h,
775 const Elf_Internal_Rela *irela)
776 {
777 unsigned int ovl = 0;
778 struct got_entry *g, **head;
779 bfd_vma addend;
780
781 /* If this instruction is a branch or call, we need a stub
782 for it. One stub per function per overlay.
783 If it isn't a branch, then we are taking the address of
784 this function so need a stub in the non-overlay area
785 for it. One stub per function. */
786 if (stub_type != nonovl_stub)
787 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
788
789 if (h != NULL)
790 head = &h->got.glist;
791 else
792 {
793 if (elf_local_got_ents (ibfd) == NULL)
794 {
795 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
796 * sizeof (*elf_local_got_ents (ibfd)));
797 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
798 if (elf_local_got_ents (ibfd) == NULL)
799 return FALSE;
800 }
801 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
802 }
803
804 addend = 0;
805 if (irela != NULL)
806 addend = irela->r_addend;
807
808 if (ovl == 0)
809 {
810 struct got_entry *gnext;
811
812 for (g = *head; g != NULL; g = g->next)
813 if (g->addend == addend && g->ovl == 0)
814 break;
815
816 if (g == NULL)
817 {
818 /* Need a new non-overlay area stub. Zap other stubs. */
819 for (g = *head; g != NULL; g = gnext)
820 {
821 gnext = g->next;
822 if (g->addend == addend)
823 {
824 htab->stub_count[g->ovl] -= 1;
825 free (g);
826 }
827 }
828 }
829 }
830 else
831 {
832 for (g = *head; g != NULL; g = g->next)
833 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
834 break;
835 }
836
837 if (g == NULL)
838 {
839 g = bfd_malloc (sizeof *g);
840 if (g == NULL)
841 return FALSE;
842 g->ovl = ovl;
843 g->addend = addend;
844 g->stub_addr = (bfd_vma) -1;
845 g->next = *head;
846 *head = g;
847
848 htab->stub_count[ovl] += 1;
849 }
850
851 return TRUE;
852 }
853
854 /* Two instruction overlay stubs look like:
855
856 brsl $75,__ovly_load
857 .word target_ovl_and_address
858
859 ovl_and_address is a word with the overlay number in the top 14 bits
860 and local store address in the bottom 18 bits.
861
862 Four instruction overlay stubs look like:
863
864 ila $78,ovl_number
865 lnop
866 ila $79,target_address
867 br __ovly_load */
868
869 static bfd_boolean
870 build_stub (struct spu_link_hash_table *htab,
871 bfd *ibfd,
872 asection *isec,
873 enum _stub_type stub_type,
874 struct elf_link_hash_entry *h,
875 const Elf_Internal_Rela *irela,
876 bfd_vma dest,
877 asection *dest_sec)
878 {
879 unsigned int ovl;
880 struct got_entry *g, **head;
881 asection *sec;
882 bfd_vma addend, val, from, to;
883
884 ovl = 0;
885 if (stub_type != nonovl_stub)
886 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
887
888 if (h != NULL)
889 head = &h->got.glist;
890 else
891 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
892
893 addend = 0;
894 if (irela != NULL)
895 addend = irela->r_addend;
896
897 for (g = *head; g != NULL; g = g->next)
898 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
899 break;
900 if (g == NULL)
901 abort ();
902
903 if (g->ovl == 0 && ovl != 0)
904 return TRUE;
905
906 if (g->stub_addr != (bfd_vma) -1)
907 return TRUE;
908
909 sec = htab->stub_sec[ovl];
910 dest += dest_sec->output_offset + dest_sec->output_section->vma;
911 from = sec->size + sec->output_offset + sec->output_section->vma;
912 g->stub_addr = from;
913 to = (htab->ovly_load->root.u.def.value
914 + htab->ovly_load->root.u.def.section->output_offset
915 + htab->ovly_load->root.u.def.section->output_section->vma);
916 val = to - from;
917 if (OVL_STUB_SIZE == 16)
918 val -= 12;
919 if (((dest | to | from) & 3) != 0
920 || val + 0x20000 >= 0x40000)
921 {
922 htab->stub_err = 1;
923 return FALSE;
924 }
925 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
926
927 if (OVL_STUB_SIZE == 16)
928 {
929 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
930 sec->contents + sec->size);
931 bfd_put_32 (sec->owner, LNOP,
932 sec->contents + sec->size + 4);
933 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
934 sec->contents + sec->size + 8);
935 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
936 sec->contents + sec->size + 12);
937 }
938 else if (OVL_STUB_SIZE == 8)
939 {
940 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
941 sec->contents + sec->size);
942
943 val = (dest & 0x3ffff) | (ovl << 14);
944 bfd_put_32 (sec->owner, val,
945 sec->contents + sec->size + 4);
946 }
947 else
948 abort ();
949 sec->size += OVL_STUB_SIZE;
950
951 if (htab->emit_stub_syms)
952 {
953 size_t len;
954 char *name;
955 int add;
956
957 len = 8 + sizeof (".ovl_call.") - 1;
958 if (h != NULL)
959 len += strlen (h->root.root.string);
960 else
961 len += 8 + 1 + 8;
962 add = 0;
963 if (irela != NULL)
964 add = (int) irela->r_addend & 0xffffffff;
965 if (add != 0)
966 len += 1 + 8;
967 name = bfd_malloc (len);
968 if (name == NULL)
969 return FALSE;
970
971 sprintf (name, "%08x.ovl_call.", g->ovl);
972 if (h != NULL)
973 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
974 else
975 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
976 dest_sec->id & 0xffffffff,
977 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
978 if (add != 0)
979 sprintf (name + len - 9, "+%x", add);
980
981 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
982 free (name);
983 if (h == NULL)
984 return FALSE;
985 if (h->root.type == bfd_link_hash_new)
986 {
987 h->root.type = bfd_link_hash_defined;
988 h->root.u.def.section = sec;
989 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
990 h->size = OVL_STUB_SIZE;
991 h->type = STT_FUNC;
992 h->ref_regular = 1;
993 h->def_regular = 1;
994 h->ref_regular_nonweak = 1;
995 h->forced_local = 1;
996 h->non_elf = 0;
997 }
998 }
999
1000 return TRUE;
1001 }
1002
1003 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1004 symbols. */
1005
1006 static bfd_boolean
1007 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1008 {
1009 /* Symbols starting with _SPUEAR_ need a stub because they may be
1010 invoked by the PPU. */
1011 if ((h->root.type == bfd_link_hash_defined
1012 || h->root.type == bfd_link_hash_defweak)
1013 && h->def_regular
1014 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
1015 {
1016 struct spu_link_hash_table *htab = inf;
1017
1018 count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1019 }
1020
1021 return TRUE;
1022 }
1023
1024 static bfd_boolean
1025 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1026 {
1027 /* Symbols starting with _SPUEAR_ need a stub because they may be
1028 invoked by the PPU. */
1029 if ((h->root.type == bfd_link_hash_defined
1030 || h->root.type == bfd_link_hash_defweak)
1031 && h->def_regular
1032 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
1033 {
1034 struct spu_link_hash_table *htab = inf;
1035
1036 build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1037 h->root.u.def.value, h->root.u.def.section);
1038 }
1039
1040 return TRUE;
1041 }
1042
1043 /* Size or build stubs. */
1044
1045 static bfd_boolean
1046 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1047 {
1048 struct spu_link_hash_table *htab = spu_hash_table (info);
1049 bfd *ibfd;
1050
1051 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1052 {
1053 extern const bfd_target bfd_elf32_spu_vec;
1054 Elf_Internal_Shdr *symtab_hdr;
1055 asection *isec;
1056 Elf_Internal_Sym *local_syms = NULL;
1057 void *psyms;
1058
1059 if (ibfd->xvec != &bfd_elf32_spu_vec)
1060 continue;
1061
1062 /* We'll need the symbol table in a second. */
1063 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1064 if (symtab_hdr->sh_info == 0)
1065 continue;
1066
1067 /* Arrange to read and keep global syms for later stack analysis. */
1068 psyms = &local_syms;
1069 if (htab->stack_analysis)
1070 psyms = &symtab_hdr->contents;
1071
1072 /* Walk over each section attached to the input bfd. */
1073 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1074 {
1075 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1076
1077 /* If there aren't any relocs, then there's nothing more to do. */
1078 if ((isec->flags & SEC_RELOC) == 0
1079 || isec->reloc_count == 0)
1080 continue;
1081
1082 if (!maybe_needs_stubs (isec, info->output_bfd))
1083 continue;
1084
1085 /* Get the relocs. */
1086 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1087 info->keep_memory);
1088 if (internal_relocs == NULL)
1089 goto error_ret_free_local;
1090
1091 /* Now examine each relocation. */
1092 irela = internal_relocs;
1093 irelaend = irela + isec->reloc_count;
1094 for (; irela < irelaend; irela++)
1095 {
1096 enum elf_spu_reloc_type r_type;
1097 unsigned int r_indx;
1098 asection *sym_sec;
1099 Elf_Internal_Sym *sym;
1100 struct elf_link_hash_entry *h;
1101 enum _stub_type stub_type;
1102
1103 r_type = ELF32_R_TYPE (irela->r_info);
1104 r_indx = ELF32_R_SYM (irela->r_info);
1105
1106 if (r_type >= R_SPU_max)
1107 {
1108 bfd_set_error (bfd_error_bad_value);
1109 error_ret_free_internal:
1110 if (elf_section_data (isec)->relocs != internal_relocs)
1111 free (internal_relocs);
1112 error_ret_free_local:
1113 if (local_syms != NULL
1114 && (symtab_hdr->contents
1115 != (unsigned char *) local_syms))
1116 free (local_syms);
1117 return FALSE;
1118 }
1119
1120 /* Determine the reloc target section. */
1121 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
1122 goto error_ret_free_internal;
1123
1124 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1125 NULL, info);
1126 if (stub_type == no_stub)
1127 continue;
1128 else if (stub_type == stub_error)
1129 goto error_ret_free_internal;
1130
1131 if (htab->stub_count == NULL)
1132 {
1133 bfd_size_type amt;
1134 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1135 htab->stub_count = bfd_zmalloc (amt);
1136 if (htab->stub_count == NULL)
1137 goto error_ret_free_internal;
1138 }
1139
1140 if (!build)
1141 {
1142 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1143 goto error_ret_free_internal;
1144 }
1145 else
1146 {
1147 bfd_vma dest;
1148
1149 if (h != NULL)
1150 dest = h->root.u.def.value;
1151 else
1152 dest = sym->st_value;
1153 dest += irela->r_addend;
1154 if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
1155 dest, sym_sec))
1156 goto error_ret_free_internal;
1157 }
1158 }
1159
1160 /* We're done with the internal relocs, free them. */
1161 if (elf_section_data (isec)->relocs != internal_relocs)
1162 free (internal_relocs);
1163 }
1164
1165 if (local_syms != NULL
1166 && symtab_hdr->contents != (unsigned char *) local_syms)
1167 {
1168 if (!info->keep_memory)
1169 free (local_syms);
1170 else
1171 symtab_hdr->contents = (unsigned char *) local_syms;
1172 }
1173 }
1174
1175 return TRUE;
1176 }
1177
1178 /* Allocate space for overlay call and return stubs. */
1179
1180 int
1181 spu_elf_size_stubs (struct bfd_link_info *info,
1182 void (*place_spu_section) (asection *, asection *,
1183 const char *),
1184 int non_overlay_stubs)
1185 {
1186 struct spu_link_hash_table *htab = spu_hash_table (info);
1187 bfd *ibfd;
1188 bfd_size_type amt;
1189 flagword flags;
1190 unsigned int i;
1191 asection *stub;
1192
1193 htab->non_overlay_stubs = non_overlay_stubs;
1194 if (!process_stubs (info, FALSE))
1195 return 0;
1196
1197 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
1198 if (htab->stub_err)
1199 return 0;
1200
1201 if (htab->stub_count == NULL)
1202 return 1;
1203
1204 ibfd = info->input_bfds;
1205 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1206 htab->stub_sec = bfd_zmalloc (amt);
1207 if (htab->stub_sec == NULL)
1208 return 0;
1209
1210 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1211 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1212 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1213 htab->stub_sec[0] = stub;
1214 if (stub == NULL
1215 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1216 return 0;
1217 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1218 (*place_spu_section) (stub, NULL, ".text");
1219
1220 for (i = 0; i < htab->num_overlays; ++i)
1221 {
1222 asection *osec = htab->ovl_sec[i];
1223 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1224 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1225 htab->stub_sec[ovl] = stub;
1226 if (stub == NULL
1227 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1228 return 0;
1229 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1230 (*place_spu_section) (stub, osec, NULL);
1231 }
1232
1233 /* htab->ovtab consists of two arrays.
1234 . struct {
1235 . u32 vma;
1236 . u32 size;
1237 . u32 file_off;
1238 . u32 buf;
1239 . } _ovly_table[];
1240 .
1241 . struct {
1242 . u32 mapped;
1243 . } _ovly_buf_table[];
1244 . */
1245
1246 flags = (SEC_ALLOC | SEC_LOAD
1247 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1248 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1249 if (htab->ovtab == NULL
1250 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1251 return 0;
1252
1253 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1254 (*place_spu_section) (htab->ovtab, NULL, ".data");
1255
1256 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1257 if (htab->toe == NULL
1258 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1259 return 0;
1260 htab->toe->size = 16;
1261 (*place_spu_section) (htab->toe, NULL, ".toe");
1262
1263 return 2;
1264 }
1265
1266 /* Functions to handle embedded spu_ovl.o object. */
1267
1268 static void *
1269 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1270 {
1271 return stream;
1272 }
1273
1274 static file_ptr
1275 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1276 void *stream,
1277 void *buf,
1278 file_ptr nbytes,
1279 file_ptr offset)
1280 {
1281 struct _ovl_stream *os;
1282 size_t count;
1283 size_t max;
1284
1285 os = (struct _ovl_stream *) stream;
1286 max = (const char *) os->end - (const char *) os->start;
1287
1288 if ((ufile_ptr) offset >= max)
1289 return 0;
1290
1291 count = nbytes;
1292 if (count > max - offset)
1293 count = max - offset;
1294
1295 memcpy (buf, (const char *) os->start + offset, count);
1296 return count;
1297 }
1298
1299 bfd_boolean
1300 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1301 {
1302 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1303 "elf32-spu",
1304 ovl_mgr_open,
1305 (void *) stream,
1306 ovl_mgr_pread,
1307 NULL,
1308 NULL);
1309 return *ovl_bfd != NULL;
1310 }
1311
1312 /* Define an STT_OBJECT symbol. */
1313
1314 static struct elf_link_hash_entry *
1315 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1316 {
1317 struct elf_link_hash_entry *h;
1318
1319 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1320 if (h == NULL)
1321 return NULL;
1322
1323 if (h->root.type != bfd_link_hash_defined
1324 || !h->def_regular)
1325 {
1326 h->root.type = bfd_link_hash_defined;
1327 h->root.u.def.section = htab->ovtab;
1328 h->type = STT_OBJECT;
1329 h->ref_regular = 1;
1330 h->def_regular = 1;
1331 h->ref_regular_nonweak = 1;
1332 h->non_elf = 0;
1333 }
1334 else
1335 {
1336 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1337 h->root.u.def.section->owner,
1338 h->root.root.string);
1339 bfd_set_error (bfd_error_bad_value);
1340 return NULL;
1341 }
1342
1343 return h;
1344 }
1345
1346 /* Fill in all stubs and the overlay tables. */
1347
1348 bfd_boolean
1349 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
1350 {
1351 struct spu_link_hash_table *htab = spu_hash_table (info);
1352 struct elf_link_hash_entry *h;
1353 bfd_byte *p;
1354 asection *s;
1355 bfd *obfd;
1356 unsigned int i;
1357
1358 htab->emit_stub_syms = emit_syms;
1359 if (htab->stub_count == NULL)
1360 return TRUE;
1361
1362 for (i = 0; i <= htab->num_overlays; i++)
1363 if (htab->stub_sec[i]->size != 0)
1364 {
1365 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1366 htab->stub_sec[i]->size);
1367 if (htab->stub_sec[i]->contents == NULL)
1368 return FALSE;
1369 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1370 htab->stub_sec[i]->size = 0;
1371 }
1372
1373 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1374 htab->ovly_load = h;
1375 BFD_ASSERT (h != NULL
1376 && (h->root.type == bfd_link_hash_defined
1377 || h->root.type == bfd_link_hash_defweak)
1378 && h->def_regular);
1379
1380 s = h->root.u.def.section->output_section;
1381 if (spu_elf_section_data (s)->u.o.ovl_index)
1382 {
1383 (*_bfd_error_handler) (_("%s in overlay section"),
1384 h->root.u.def.section->owner);
1385 bfd_set_error (bfd_error_bad_value);
1386 return FALSE;
1387 }
1388
1389 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1390 htab->ovly_return = h;
1391
1392 /* Fill in all the stubs. */
1393 process_stubs (info, TRUE);
1394
1395 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, htab);
1396 if (htab->stub_err)
1397 return FALSE;
1398
1399 for (i = 0; i <= htab->num_overlays; i++)
1400 {
1401 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1402 {
1403 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1404 bfd_set_error (bfd_error_bad_value);
1405 return FALSE;
1406 }
1407 htab->stub_sec[i]->rawsize = 0;
1408 }
1409
1410 if (htab->stub_err)
1411 {
1412 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1413 bfd_set_error (bfd_error_bad_value);
1414 return FALSE;
1415 }
1416
1417 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1418 if (htab->ovtab->contents == NULL)
1419 return FALSE;
1420
1421 /* Write out _ovly_table. */
1422 p = htab->ovtab->contents;
1423 /* set low bit of .size to mark non-overlay area as present. */
1424 p[7] = 1;
1425 obfd = htab->ovtab->output_section->owner;
1426 for (s = obfd->sections; s != NULL; s = s->next)
1427 {
1428 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1429
1430 if (ovl_index != 0)
1431 {
1432 unsigned long off = ovl_index * 16;
1433 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1434
1435 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1436 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1437 /* file_off written later in spu_elf_modify_program_headers. */
1438 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1439 }
1440 }
1441
1442 h = define_ovtab_symbol (htab, "_ovly_table");
1443 if (h == NULL)
1444 return FALSE;
1445 h->root.u.def.value = 16;
1446 h->size = htab->num_overlays * 16;
1447
1448 h = define_ovtab_symbol (htab, "_ovly_table_end");
1449 if (h == NULL)
1450 return FALSE;
1451 h->root.u.def.value = htab->num_overlays * 16 + 16;
1452 h->size = 0;
1453
1454 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1455 if (h == NULL)
1456 return FALSE;
1457 h->root.u.def.value = htab->num_overlays * 16 + 16;
1458 h->size = htab->num_buf * 4;
1459
1460 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1461 if (h == NULL)
1462 return FALSE;
1463 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1464 h->size = 0;
1465
1466 h = define_ovtab_symbol (htab, "_EAR_");
1467 if (h == NULL)
1468 return FALSE;
1469 h->root.u.def.section = htab->toe;
1470 h->root.u.def.value = 0;
1471 h->size = 16;
1472
1473 return TRUE;
1474 }
1475
1476 /* Check that all loadable section VMAs lie in the range
1477 LO .. HI inclusive. */
1478
1479 asection *
1480 spu_elf_check_vma (struct bfd_link_info *info, bfd_vma lo, bfd_vma hi)
1481 {
1482 struct elf_segment_map *m;
1483 unsigned int i;
1484 bfd *abfd = info->output_bfd;
1485
1486 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1487 if (m->p_type == PT_LOAD)
1488 for (i = 0; i < m->count; i++)
1489 if (m->sections[i]->size != 0
1490 && (m->sections[i]->vma < lo
1491 || m->sections[i]->vma > hi
1492 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1493 return m->sections[i];
1494
1495 return NULL;
1496 }
1497
1498 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1499 Search for stack adjusting insns, and return the sp delta. */
1500
1501 static int
1502 find_function_stack_adjust (asection *sec, bfd_vma offset)
1503 {
1504 int unrecog;
1505 int reg[128];
1506
1507 memset (reg, 0, sizeof (reg));
1508 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1509 {
1510 unsigned char buf[4];
1511 int rt, ra;
1512 int imm;
1513
1514 /* Assume no relocs on stack adjusing insns. */
1515 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1516 break;
1517
1518 if (buf[0] == 0x24 /* stqd */)
1519 continue;
1520
1521 rt = buf[3] & 0x7f;
1522 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1523 /* Partly decoded immediate field. */
1524 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1525
1526 if (buf[0] == 0x1c /* ai */)
1527 {
1528 imm >>= 7;
1529 imm = (imm ^ 0x200) - 0x200;
1530 reg[rt] = reg[ra] + imm;
1531
1532 if (rt == 1 /* sp */)
1533 {
1534 if (imm > 0)
1535 break;
1536 return reg[rt];
1537 }
1538 }
1539 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1540 {
1541 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1542
1543 reg[rt] = reg[ra] + reg[rb];
1544 if (rt == 1)
1545 return reg[rt];
1546 }
1547 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1548 {
1549 if (buf[0] >= 0x42 /* ila */)
1550 imm |= (buf[0] & 1) << 17;
1551 else
1552 {
1553 imm &= 0xffff;
1554
1555 if (buf[0] == 0x40 /* il */)
1556 {
1557 if ((buf[1] & 0x80) == 0)
1558 goto unknown_insn;
1559 imm = (imm ^ 0x8000) - 0x8000;
1560 }
1561 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1562 imm <<= 16;
1563 }
1564 reg[rt] = imm;
1565 continue;
1566 }
1567 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1568 {
1569 reg[rt] |= imm & 0xffff;
1570 continue;
1571 }
1572 else if (buf[0] == 0x04 /* ori */)
1573 {
1574 imm >>= 7;
1575 imm = (imm ^ 0x200) - 0x200;
1576 reg[rt] = reg[ra] | imm;
1577 continue;
1578 }
1579 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1580 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1581 {
1582 /* Used in pic reg load. Say rt is trashed. */
1583 reg[rt] = 0;
1584 continue;
1585 }
1586 else if (is_branch (buf) || is_indirect_branch (buf))
1587 /* If we hit a branch then we must be out of the prologue. */
1588 break;
1589 unknown_insn:
1590 ++unrecog;
1591 }
1592
1593 return 0;
1594 }
1595
1596 /* qsort predicate to sort symbols by section and value. */
1597
1598 static Elf_Internal_Sym *sort_syms_syms;
1599 static asection **sort_syms_psecs;
1600
1601 static int
1602 sort_syms (const void *a, const void *b)
1603 {
1604 Elf_Internal_Sym *const *s1 = a;
1605 Elf_Internal_Sym *const *s2 = b;
1606 asection *sec1,*sec2;
1607 bfd_signed_vma delta;
1608
1609 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1610 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1611
1612 if (sec1 != sec2)
1613 return sec1->index - sec2->index;
1614
1615 delta = (*s1)->st_value - (*s2)->st_value;
1616 if (delta != 0)
1617 return delta < 0 ? -1 : 1;
1618
1619 delta = (*s2)->st_size - (*s1)->st_size;
1620 if (delta != 0)
1621 return delta < 0 ? -1 : 1;
1622
1623 return *s1 < *s2 ? -1 : 1;
1624 }
1625
1626 struct call_info
1627 {
1628 struct function_info *fun;
1629 struct call_info *next;
1630 unsigned int is_tail : 1;
1631 };
1632
1633 struct function_info
1634 {
1635 /* List of functions called. Also branches to hot/cold part of
1636 function. */
1637 struct call_info *call_list;
1638 /* For hot/cold part of function, point to owner. */
1639 struct function_info *start;
1640 /* Symbol at start of function. */
1641 union {
1642 Elf_Internal_Sym *sym;
1643 struct elf_link_hash_entry *h;
1644 } u;
1645 /* Function section. */
1646 asection *sec;
1647 /* Address range of (this part of) function. */
1648 bfd_vma lo, hi;
1649 /* Stack usage. */
1650 int stack;
1651 /* Set if global symbol. */
1652 unsigned int global : 1;
1653 /* Set if known to be start of function (as distinct from a hunk
1654 in hot/cold section. */
1655 unsigned int is_func : 1;
1656 /* Flags used during call tree traversal. */
1657 unsigned int visit1 : 1;
1658 unsigned int non_root : 1;
1659 unsigned int visit2 : 1;
1660 unsigned int marking : 1;
1661 unsigned int visit3 : 1;
1662 };
1663
1664 struct spu_elf_stack_info
1665 {
1666 int num_fun;
1667 int max_fun;
1668 /* Variable size array describing functions, one per contiguous
1669 address range belonging to a function. */
1670 struct function_info fun[1];
1671 };
1672
1673 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1674 entries for section SEC. */
1675
1676 static struct spu_elf_stack_info *
1677 alloc_stack_info (asection *sec, int max_fun)
1678 {
1679 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1680 bfd_size_type amt;
1681
1682 amt = sizeof (struct spu_elf_stack_info);
1683 amt += (max_fun - 1) * sizeof (struct function_info);
1684 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1685 if (sec_data->u.i.stack_info != NULL)
1686 sec_data->u.i.stack_info->max_fun = max_fun;
1687 return sec_data->u.i.stack_info;
1688 }
1689
1690 /* Add a new struct function_info describing a (part of a) function
1691 starting at SYM_H. Keep the array sorted by address. */
1692
1693 static struct function_info *
1694 maybe_insert_function (asection *sec,
1695 void *sym_h,
1696 bfd_boolean global,
1697 bfd_boolean is_func)
1698 {
1699 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1700 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1701 int i;
1702 bfd_vma off, size;
1703
1704 if (sinfo == NULL)
1705 {
1706 sinfo = alloc_stack_info (sec, 20);
1707 if (sinfo == NULL)
1708 return NULL;
1709 }
1710
1711 if (!global)
1712 {
1713 Elf_Internal_Sym *sym = sym_h;
1714 off = sym->st_value;
1715 size = sym->st_size;
1716 }
1717 else
1718 {
1719 struct elf_link_hash_entry *h = sym_h;
1720 off = h->root.u.def.value;
1721 size = h->size;
1722 }
1723
1724 for (i = sinfo->num_fun; --i >= 0; )
1725 if (sinfo->fun[i].lo <= off)
1726 break;
1727
1728 if (i >= 0)
1729 {
1730 /* Don't add another entry for an alias, but do update some
1731 info. */
1732 if (sinfo->fun[i].lo == off)
1733 {
1734 /* Prefer globals over local syms. */
1735 if (global && !sinfo->fun[i].global)
1736 {
1737 sinfo->fun[i].global = TRUE;
1738 sinfo->fun[i].u.h = sym_h;
1739 }
1740 if (is_func)
1741 sinfo->fun[i].is_func = TRUE;
1742 return &sinfo->fun[i];
1743 }
1744 /* Ignore a zero-size symbol inside an existing function. */
1745 else if (sinfo->fun[i].hi > off && size == 0)
1746 return &sinfo->fun[i];
1747 }
1748
1749 if (++i < sinfo->num_fun)
1750 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1751 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1752 else if (i >= sinfo->max_fun)
1753 {
1754 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1755 bfd_size_type old = amt;
1756
1757 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1758 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1759 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1760 sinfo = bfd_realloc (sinfo, amt);
1761 if (sinfo == NULL)
1762 return NULL;
1763 memset ((char *) sinfo + old, 0, amt - old);
1764 sec_data->u.i.stack_info = sinfo;
1765 }
1766 sinfo->fun[i].is_func = is_func;
1767 sinfo->fun[i].global = global;
1768 sinfo->fun[i].sec = sec;
1769 if (global)
1770 sinfo->fun[i].u.h = sym_h;
1771 else
1772 sinfo->fun[i].u.sym = sym_h;
1773 sinfo->fun[i].lo = off;
1774 sinfo->fun[i].hi = off + size;
1775 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1776 sinfo->num_fun += 1;
1777 return &sinfo->fun[i];
1778 }
1779
1780 /* Return the name of FUN. */
1781
1782 static const char *
1783 func_name (struct function_info *fun)
1784 {
1785 asection *sec;
1786 bfd *ibfd;
1787 Elf_Internal_Shdr *symtab_hdr;
1788
1789 while (fun->start != NULL)
1790 fun = fun->start;
1791
1792 if (fun->global)
1793 return fun->u.h->root.root.string;
1794
1795 sec = fun->sec;
1796 if (fun->u.sym->st_name == 0)
1797 {
1798 size_t len = strlen (sec->name);
1799 char *name = bfd_malloc (len + 10);
1800 if (name == NULL)
1801 return "(null)";
1802 sprintf (name, "%s+%lx", sec->name,
1803 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1804 return name;
1805 }
1806 ibfd = sec->owner;
1807 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1808 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1809 }
1810
1811 /* Read the instruction at OFF in SEC. Return true iff the instruction
1812 is a nop, lnop, or stop 0 (all zero insn). */
1813
1814 static bfd_boolean
1815 is_nop (asection *sec, bfd_vma off)
1816 {
1817 unsigned char insn[4];
1818
1819 if (off + 4 > sec->size
1820 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1821 return FALSE;
1822 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1823 return TRUE;
1824 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1825 return TRUE;
1826 return FALSE;
1827 }
1828
1829 /* Extend the range of FUN to cover nop padding up to LIMIT.
1830 Return TRUE iff some instruction other than a NOP was found. */
1831
1832 static bfd_boolean
1833 insns_at_end (struct function_info *fun, bfd_vma limit)
1834 {
1835 bfd_vma off = (fun->hi + 3) & -4;
1836
1837 while (off < limit && is_nop (fun->sec, off))
1838 off += 4;
1839 if (off < limit)
1840 {
1841 fun->hi = off;
1842 return TRUE;
1843 }
1844 fun->hi = limit;
1845 return FALSE;
1846 }
1847
1848 /* Check and fix overlapping function ranges. Return TRUE iff there
1849 are gaps in the current info we have about functions in SEC. */
1850
1851 static bfd_boolean
1852 check_function_ranges (asection *sec, struct bfd_link_info *info)
1853 {
1854 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1855 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1856 int i;
1857 bfd_boolean gaps = FALSE;
1858
1859 if (sinfo == NULL)
1860 return FALSE;
1861
1862 for (i = 1; i < sinfo->num_fun; i++)
1863 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1864 {
1865 /* Fix overlapping symbols. */
1866 const char *f1 = func_name (&sinfo->fun[i - 1]);
1867 const char *f2 = func_name (&sinfo->fun[i]);
1868
1869 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1870 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1871 }
1872 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1873 gaps = TRUE;
1874
1875 if (sinfo->num_fun == 0)
1876 gaps = TRUE;
1877 else
1878 {
1879 if (sinfo->fun[0].lo != 0)
1880 gaps = TRUE;
1881 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1882 {
1883 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1884
1885 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1886 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1887 }
1888 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1889 gaps = TRUE;
1890 }
1891 return gaps;
1892 }
1893
1894 /* Search current function info for a function that contains address
1895 OFFSET in section SEC. */
1896
1897 static struct function_info *
1898 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1899 {
1900 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1901 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1902 int lo, hi, mid;
1903
1904 lo = 0;
1905 hi = sinfo->num_fun;
1906 while (lo < hi)
1907 {
1908 mid = (lo + hi) / 2;
1909 if (offset < sinfo->fun[mid].lo)
1910 hi = mid;
1911 else if (offset >= sinfo->fun[mid].hi)
1912 lo = mid + 1;
1913 else
1914 return &sinfo->fun[mid];
1915 }
1916 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1917 sec, offset);
1918 return NULL;
1919 }
1920
1921 /* Add CALLEE to CALLER call list if not already present. */
1922
1923 static bfd_boolean
1924 insert_callee (struct function_info *caller, struct call_info *callee)
1925 {
1926 struct call_info **pp, *p;
1927
1928 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
1929 if (p->fun == callee->fun)
1930 {
1931 /* Tail calls use less stack than normal calls. Retain entry
1932 for normal call over one for tail call. */
1933 p->is_tail &= callee->is_tail;
1934 if (!p->is_tail)
1935 {
1936 p->fun->start = NULL;
1937 p->fun->is_func = TRUE;
1938 }
1939 /* Reorder list so most recent call is first. */
1940 *pp = p->next;
1941 p->next = caller->call_list;
1942 caller->call_list = p;
1943 return FALSE;
1944 }
1945 callee->next = caller->call_list;
1946 caller->call_list = callee;
1947 return TRUE;
1948 }
1949
1950 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
1951 overlay stub sections. */
1952
1953 static bfd_boolean
1954 interesting_section (asection *s, bfd *obfd)
1955 {
1956 return (s->output_section != NULL
1957 && s->output_section->owner == obfd
1958 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
1959 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1960 && s->size != 0);
1961 }
1962
1963 /* Rummage through the relocs for SEC, looking for function calls.
1964 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1965 mark destination symbols on calls as being functions. Also
1966 look at branches, which may be tail calls or go to hot/cold
1967 section part of same function. */
1968
1969 static bfd_boolean
1970 mark_functions_via_relocs (asection *sec,
1971 struct bfd_link_info *info,
1972 int call_tree)
1973 {
1974 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1975 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1976 Elf_Internal_Sym *syms;
1977 void *psyms;
1978 static bfd_boolean warned;
1979
1980 if (!interesting_section (sec, info->output_bfd)
1981 || sec->reloc_count == 0)
1982 return TRUE;
1983
1984 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1985 info->keep_memory);
1986 if (internal_relocs == NULL)
1987 return FALSE;
1988
1989 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1990 psyms = &symtab_hdr->contents;
1991 syms = *(Elf_Internal_Sym **) psyms;
1992 irela = internal_relocs;
1993 irelaend = irela + sec->reloc_count;
1994 for (; irela < irelaend; irela++)
1995 {
1996 enum elf_spu_reloc_type r_type;
1997 unsigned int r_indx;
1998 asection *sym_sec;
1999 Elf_Internal_Sym *sym;
2000 struct elf_link_hash_entry *h;
2001 bfd_vma val;
2002 unsigned char insn[4];
2003 bfd_boolean is_call;
2004 struct function_info *caller;
2005 struct call_info *callee;
2006
2007 r_type = ELF32_R_TYPE (irela->r_info);
2008 if (r_type != R_SPU_REL16
2009 && r_type != R_SPU_ADDR16)
2010 continue;
2011
2012 r_indx = ELF32_R_SYM (irela->r_info);
2013 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2014 return FALSE;
2015
2016 if (sym_sec == NULL
2017 || sym_sec->output_section == NULL
2018 || sym_sec->output_section->owner != info->output_bfd)
2019 continue;
2020
2021 if (!bfd_get_section_contents (sec->owner, sec, insn,
2022 irela->r_offset, 4))
2023 return FALSE;
2024 if (!is_branch (insn))
2025 continue;
2026
2027 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2028 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2029 {
2030 if (!call_tree)
2031 warned = TRUE;
2032 if (!call_tree || !warned)
2033 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
2034 " %B(%A), stack analysis incomplete\n"),
2035 sec->owner, sec, irela->r_offset,
2036 sym_sec->owner, sym_sec);
2037 continue;
2038 }
2039
2040 is_call = (insn[0] & 0xfd) == 0x31;
2041
2042 if (h)
2043 val = h->root.u.def.value;
2044 else
2045 val = sym->st_value;
2046 val += irela->r_addend;
2047
2048 if (!call_tree)
2049 {
2050 struct function_info *fun;
2051
2052 if (irela->r_addend != 0)
2053 {
2054 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2055 if (fake == NULL)
2056 return FALSE;
2057 fake->st_value = val;
2058 fake->st_shndx
2059 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2060 sym = fake;
2061 }
2062 if (sym)
2063 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2064 else
2065 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2066 if (fun == NULL)
2067 return FALSE;
2068 if (irela->r_addend != 0
2069 && fun->u.sym != sym)
2070 free (sym);
2071 continue;
2072 }
2073
2074 caller = find_function (sec, irela->r_offset, info);
2075 if (caller == NULL)
2076 return FALSE;
2077 callee = bfd_malloc (sizeof *callee);
2078 if (callee == NULL)
2079 return FALSE;
2080
2081 callee->fun = find_function (sym_sec, val, info);
2082 if (callee->fun == NULL)
2083 return FALSE;
2084 callee->is_tail = !is_call;
2085 if (!insert_callee (caller, callee))
2086 free (callee);
2087 else if (!is_call
2088 && !callee->fun->is_func
2089 && callee->fun->stack == 0)
2090 {
2091 /* This is either a tail call or a branch from one part of
2092 the function to another, ie. hot/cold section. If the
2093 destination has been called by some other function then
2094 it is a separate function. We also assume that functions
2095 are not split across input files. */
2096 if (sec->owner != sym_sec->owner)
2097 {
2098 callee->fun->start = NULL;
2099 callee->fun->is_func = TRUE;
2100 }
2101 else if (callee->fun->start == NULL)
2102 callee->fun->start = caller;
2103 else
2104 {
2105 struct function_info *callee_start;
2106 struct function_info *caller_start;
2107 callee_start = callee->fun;
2108 while (callee_start->start)
2109 callee_start = callee_start->start;
2110 caller_start = caller;
2111 while (caller_start->start)
2112 caller_start = caller_start->start;
2113 if (caller_start != callee_start)
2114 {
2115 callee->fun->start = NULL;
2116 callee->fun->is_func = TRUE;
2117 }
2118 }
2119 }
2120 }
2121
2122 return TRUE;
2123 }
2124
2125 /* Handle something like .init or .fini, which has a piece of a function.
2126 These sections are pasted together to form a single function. */
2127
2128 static bfd_boolean
2129 pasted_function (asection *sec, struct bfd_link_info *info)
2130 {
2131 struct bfd_link_order *l;
2132 struct _spu_elf_section_data *sec_data;
2133 struct spu_elf_stack_info *sinfo;
2134 Elf_Internal_Sym *fake;
2135 struct function_info *fun, *fun_start;
2136
2137 fake = bfd_zmalloc (sizeof (*fake));
2138 if (fake == NULL)
2139 return FALSE;
2140 fake->st_value = 0;
2141 fake->st_size = sec->size;
2142 fake->st_shndx
2143 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2144 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2145 if (!fun)
2146 return FALSE;
2147
2148 /* Find a function immediately preceding this section. */
2149 fun_start = NULL;
2150 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2151 {
2152 if (l->u.indirect.section == sec)
2153 {
2154 if (fun_start != NULL)
2155 fun->start = fun_start;
2156 return TRUE;
2157 }
2158 if (l->type == bfd_indirect_link_order
2159 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2160 && (sinfo = sec_data->u.i.stack_info) != NULL
2161 && sinfo->num_fun != 0)
2162 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2163 }
2164
2165 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2166 return FALSE;
2167 }
2168
2169 /* Map address ranges in code sections to functions. */
2170
2171 static bfd_boolean
2172 discover_functions (struct bfd_link_info *info)
2173 {
2174 bfd *ibfd;
2175 int bfd_idx;
2176 Elf_Internal_Sym ***psym_arr;
2177 asection ***sec_arr;
2178 bfd_boolean gaps = FALSE;
2179
2180 bfd_idx = 0;
2181 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2182 bfd_idx++;
2183
2184 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2185 if (psym_arr == NULL)
2186 return FALSE;
2187 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2188 if (sec_arr == NULL)
2189 return FALSE;
2190
2191
2192 for (ibfd = info->input_bfds, bfd_idx = 0;
2193 ibfd != NULL;
2194 ibfd = ibfd->link_next, bfd_idx++)
2195 {
2196 extern const bfd_target bfd_elf32_spu_vec;
2197 Elf_Internal_Shdr *symtab_hdr;
2198 asection *sec;
2199 size_t symcount;
2200 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2201 asection **psecs, **p;
2202
2203 if (ibfd->xvec != &bfd_elf32_spu_vec)
2204 continue;
2205
2206 /* Read all the symbols. */
2207 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2208 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2209 if (symcount == 0)
2210 {
2211 if (!gaps)
2212 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2213 if (interesting_section (sec, info->output_bfd))
2214 {
2215 gaps = TRUE;
2216 break;
2217 }
2218 continue;
2219 }
2220
2221 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2222 if (syms == NULL)
2223 {
2224 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2225 NULL, NULL, NULL);
2226 symtab_hdr->contents = (void *) syms;
2227 if (syms == NULL)
2228 return FALSE;
2229 }
2230
2231 /* Select defined function symbols that are going to be output. */
2232 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2233 if (psyms == NULL)
2234 return FALSE;
2235 psym_arr[bfd_idx] = psyms;
2236 psecs = bfd_malloc (symcount * sizeof (*psecs));
2237 if (psecs == NULL)
2238 return FALSE;
2239 sec_arr[bfd_idx] = psecs;
2240 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2241 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2242 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2243 {
2244 asection *s;
2245
2246 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2247 if (s != NULL && interesting_section (s, info->output_bfd))
2248 *psy++ = sy;
2249 }
2250 symcount = psy - psyms;
2251 *psy = NULL;
2252
2253 /* Sort them by section and offset within section. */
2254 sort_syms_syms = syms;
2255 sort_syms_psecs = psecs;
2256 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2257
2258 /* Now inspect the function symbols. */
2259 for (psy = psyms; psy < psyms + symcount; )
2260 {
2261 asection *s = psecs[*psy - syms];
2262 Elf_Internal_Sym **psy2;
2263
2264 for (psy2 = psy; ++psy2 < psyms + symcount; )
2265 if (psecs[*psy2 - syms] != s)
2266 break;
2267
2268 if (!alloc_stack_info (s, psy2 - psy))
2269 return FALSE;
2270 psy = psy2;
2271 }
2272
2273 /* First install info about properly typed and sized functions.
2274 In an ideal world this will cover all code sections, except
2275 when partitioning functions into hot and cold sections,
2276 and the horrible pasted together .init and .fini functions. */
2277 for (psy = psyms; psy < psyms + symcount; ++psy)
2278 {
2279 sy = *psy;
2280 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2281 {
2282 asection *s = psecs[sy - syms];
2283 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2284 return FALSE;
2285 }
2286 }
2287
2288 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2289 if (interesting_section (sec, info->output_bfd))
2290 gaps |= check_function_ranges (sec, info);
2291 }
2292
2293 if (gaps)
2294 {
2295 /* See if we can discover more function symbols by looking at
2296 relocations. */
2297 for (ibfd = info->input_bfds, bfd_idx = 0;
2298 ibfd != NULL;
2299 ibfd = ibfd->link_next, bfd_idx++)
2300 {
2301 asection *sec;
2302
2303 if (psym_arr[bfd_idx] == NULL)
2304 continue;
2305
2306 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2307 if (!mark_functions_via_relocs (sec, info, FALSE))
2308 return FALSE;
2309 }
2310
2311 for (ibfd = info->input_bfds, bfd_idx = 0;
2312 ibfd != NULL;
2313 ibfd = ibfd->link_next, bfd_idx++)
2314 {
2315 Elf_Internal_Shdr *symtab_hdr;
2316 asection *sec;
2317 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2318 asection **psecs;
2319
2320 if ((psyms = psym_arr[bfd_idx]) == NULL)
2321 continue;
2322
2323 psecs = sec_arr[bfd_idx];
2324
2325 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2326 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2327
2328 gaps = FALSE;
2329 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2330 if (interesting_section (sec, info->output_bfd))
2331 gaps |= check_function_ranges (sec, info);
2332 if (!gaps)
2333 continue;
2334
2335 /* Finally, install all globals. */
2336 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2337 {
2338 asection *s;
2339
2340 s = psecs[sy - syms];
2341
2342 /* Global syms might be improperly typed functions. */
2343 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2344 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2345 {
2346 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2347 return FALSE;
2348 }
2349 }
2350 }
2351
2352 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2353 {
2354 extern const bfd_target bfd_elf32_spu_vec;
2355 asection *sec;
2356
2357 if (ibfd->xvec != &bfd_elf32_spu_vec)
2358 continue;
2359
2360 /* Some of the symbols we've installed as marking the
2361 beginning of functions may have a size of zero. Extend
2362 the range of such functions to the beginning of the
2363 next symbol of interest. */
2364 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2365 if (interesting_section (sec, info->output_bfd))
2366 {
2367 struct _spu_elf_section_data *sec_data;
2368 struct spu_elf_stack_info *sinfo;
2369
2370 sec_data = spu_elf_section_data (sec);
2371 sinfo = sec_data->u.i.stack_info;
2372 if (sinfo != NULL)
2373 {
2374 int fun_idx;
2375 bfd_vma hi = sec->size;
2376
2377 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2378 {
2379 sinfo->fun[fun_idx].hi = hi;
2380 hi = sinfo->fun[fun_idx].lo;
2381 }
2382 }
2383 /* No symbols in this section. Must be .init or .fini
2384 or something similar. */
2385 else if (!pasted_function (sec, info))
2386 return FALSE;
2387 }
2388 }
2389 }
2390
2391 for (ibfd = info->input_bfds, bfd_idx = 0;
2392 ibfd != NULL;
2393 ibfd = ibfd->link_next, bfd_idx++)
2394 {
2395 if (psym_arr[bfd_idx] == NULL)
2396 continue;
2397
2398 free (psym_arr[bfd_idx]);
2399 free (sec_arr[bfd_idx]);
2400 }
2401
2402 free (psym_arr);
2403 free (sec_arr);
2404
2405 return TRUE;
2406 }
2407
2408 /* Iterate over all function_info we have collected, calling DOIT on
2409 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2410 if ROOT_ONLY. */
2411
2412 static bfd_boolean
2413 for_each_node (bfd_boolean (*doit) (struct function_info *,
2414 struct bfd_link_info *,
2415 void *),
2416 struct bfd_link_info *info,
2417 void *param,
2418 int root_only)
2419 {
2420 bfd *ibfd;
2421
2422 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2423 {
2424 extern const bfd_target bfd_elf32_spu_vec;
2425 asection *sec;
2426
2427 if (ibfd->xvec != &bfd_elf32_spu_vec)
2428 continue;
2429
2430 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2431 {
2432 struct _spu_elf_section_data *sec_data;
2433 struct spu_elf_stack_info *sinfo;
2434
2435 if ((sec_data = spu_elf_section_data (sec)) != NULL
2436 && (sinfo = sec_data->u.i.stack_info) != NULL)
2437 {
2438 int i;
2439 for (i = 0; i < sinfo->num_fun; ++i)
2440 if (!root_only || !sinfo->fun[i].non_root)
2441 if (!doit (&sinfo->fun[i], info, param))
2442 return FALSE;
2443 }
2444 }
2445 }
2446 return TRUE;
2447 }
2448
2449 /* Transfer call info attached to struct function_info entries for
2450 all of a given function's sections to the first entry. */
2451
2452 static bfd_boolean
2453 transfer_calls (struct function_info *fun,
2454 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2455 void *param ATTRIBUTE_UNUSED)
2456 {
2457 struct function_info *start = fun->start;
2458
2459 if (start != NULL)
2460 {
2461 struct call_info *call, *call_next;
2462
2463 while (start->start != NULL)
2464 start = start->start;
2465 for (call = fun->call_list; call != NULL; call = call_next)
2466 {
2467 call_next = call->next;
2468 if (!insert_callee (start, call))
2469 free (call);
2470 }
2471 fun->call_list = NULL;
2472 }
2473 return TRUE;
2474 }
2475
2476 /* Mark nodes in the call graph that are called by some other node. */
2477
2478 static bfd_boolean
2479 mark_non_root (struct function_info *fun,
2480 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2481 void *param ATTRIBUTE_UNUSED)
2482 {
2483 struct call_info *call;
2484
2485 if (fun->visit1)
2486 return TRUE;
2487 fun->visit1 = TRUE;
2488 for (call = fun->call_list; call; call = call->next)
2489 {
2490 call->fun->non_root = TRUE;
2491 mark_non_root (call->fun, 0, 0);
2492 }
2493 return TRUE;
2494 }
2495
2496 /* Remove cycles from the call graph. */
2497
2498 static bfd_boolean
2499 remove_cycles (struct function_info *fun,
2500 struct bfd_link_info *info,
2501 void *param ATTRIBUTE_UNUSED)
2502 {
2503 struct call_info **callp, *call;
2504
2505 fun->visit2 = TRUE;
2506 fun->marking = TRUE;
2507
2508 callp = &fun->call_list;
2509 while ((call = *callp) != NULL)
2510 {
2511 if (!call->fun->visit2)
2512 {
2513 if (!remove_cycles (call->fun, info, 0))
2514 return FALSE;
2515 }
2516 else if (call->fun->marking)
2517 {
2518 const char *f1 = func_name (fun);
2519 const char *f2 = func_name (call->fun);
2520
2521 info->callbacks->info (_("Stack analysis will ignore the call "
2522 "from %s to %s\n"),
2523 f1, f2);
2524 *callp = call->next;
2525 free (call);
2526 continue;
2527 }
2528 callp = &call->next;
2529 }
2530 fun->marking = FALSE;
2531 return TRUE;
2532 }
2533
2534 /* Populate call_list for each function. */
2535
2536 static bfd_boolean
2537 build_call_tree (struct bfd_link_info *info)
2538 {
2539 bfd *ibfd;
2540
2541 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2542 {
2543 extern const bfd_target bfd_elf32_spu_vec;
2544 asection *sec;
2545
2546 if (ibfd->xvec != &bfd_elf32_spu_vec)
2547 continue;
2548
2549 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2550 if (!mark_functions_via_relocs (sec, info, TRUE))
2551 return FALSE;
2552 }
2553
2554 /* Transfer call info from hot/cold section part of function
2555 to main entry. */
2556 if (!for_each_node (transfer_calls, info, 0, FALSE))
2557 return FALSE;
2558
2559 /* Find the call graph root(s). */
2560 if (!for_each_node (mark_non_root, info, 0, FALSE))
2561 return FALSE;
2562
2563 /* Remove cycles from the call graph. We start from the root node(s)
2564 so that we break cycles in a reasonable place. */
2565 return for_each_node (remove_cycles, info, 0, TRUE);
2566 }
2567
2568 struct _sum_stack_param {
2569 size_t cum_stack;
2570 size_t overall_stack;
2571 bfd_boolean emit_stack_syms;
2572 };
2573
2574 /* Descend the call graph for FUN, accumulating total stack required. */
2575
2576 static bfd_boolean
2577 sum_stack (struct function_info *fun,
2578 struct bfd_link_info *info,
2579 void *param)
2580 {
2581 struct call_info *call;
2582 struct function_info *max;
2583 size_t stack, cum_stack;
2584 const char *f1;
2585 struct _sum_stack_param *sum_stack_param = param;
2586
2587 cum_stack = fun->stack;
2588 sum_stack_param->cum_stack = cum_stack;
2589 if (fun->visit3)
2590 return TRUE;
2591
2592 max = NULL;
2593 for (call = fun->call_list; call; call = call->next)
2594 {
2595 if (!sum_stack (call->fun, info, sum_stack_param))
2596 return FALSE;
2597 stack = sum_stack_param->cum_stack;
2598 /* Include caller stack for normal calls, don't do so for
2599 tail calls. fun->stack here is local stack usage for
2600 this function. */
2601 if (!call->is_tail)
2602 stack += fun->stack;
2603 if (cum_stack < stack)
2604 {
2605 cum_stack = stack;
2606 max = call->fun;
2607 }
2608 }
2609
2610 sum_stack_param->cum_stack = cum_stack;
2611 stack = fun->stack;
2612 /* Now fun->stack holds cumulative stack. */
2613 fun->stack = cum_stack;
2614 fun->visit3 = TRUE;
2615
2616 if (!fun->non_root
2617 && sum_stack_param->overall_stack < cum_stack)
2618 sum_stack_param->overall_stack = cum_stack;
2619
2620 f1 = func_name (fun);
2621 if (!fun->non_root)
2622 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
2623 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2624 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
2625
2626 if (fun->call_list)
2627 {
2628 info->callbacks->minfo (_(" calls:\n"));
2629 for (call = fun->call_list; call; call = call->next)
2630 {
2631 const char *f2 = func_name (call->fun);
2632 const char *ann1 = call->fun == max ? "*" : " ";
2633 const char *ann2 = call->is_tail ? "t" : " ";
2634
2635 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2636 }
2637 }
2638
2639 if (sum_stack_param->emit_stack_syms)
2640 {
2641 struct spu_link_hash_table *htab = spu_hash_table (info);
2642 char *name = bfd_malloc (18 + strlen (f1));
2643 struct elf_link_hash_entry *h;
2644
2645 if (name == NULL)
2646 return FALSE;
2647
2648 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2649 sprintf (name, "__stack_%s", f1);
2650 else
2651 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2652
2653 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2654 free (name);
2655 if (h != NULL
2656 && (h->root.type == bfd_link_hash_new
2657 || h->root.type == bfd_link_hash_undefined
2658 || h->root.type == bfd_link_hash_undefweak))
2659 {
2660 h->root.type = bfd_link_hash_defined;
2661 h->root.u.def.section = bfd_abs_section_ptr;
2662 h->root.u.def.value = cum_stack;
2663 h->size = 0;
2664 h->type = 0;
2665 h->ref_regular = 1;
2666 h->def_regular = 1;
2667 h->ref_regular_nonweak = 1;
2668 h->forced_local = 1;
2669 h->non_elf = 0;
2670 }
2671 }
2672
2673 return TRUE;
2674 }
2675
2676 /* Provide an estimate of total stack required. */
2677
2678 static bfd_boolean
2679 spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
2680 {
2681 struct _sum_stack_param sum_stack_param;
2682
2683 if (!discover_functions (info))
2684 return FALSE;
2685
2686 if (!build_call_tree (info))
2687 return FALSE;
2688
2689 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2690 info->callbacks->minfo (_("\nStack size for functions. "
2691 "Annotations: '*' max stack, 't' tail call\n"));
2692
2693 sum_stack_param.emit_stack_syms = emit_stack_syms;
2694 sum_stack_param.overall_stack = 0;
2695 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
2696 return FALSE;
2697
2698 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
2699 (bfd_vma) sum_stack_param.overall_stack);
2700 return TRUE;
2701 }
2702
2703 /* Perform a final link. */
2704
2705 static bfd_boolean
2706 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2707 {
2708 struct spu_link_hash_table *htab = spu_hash_table (info);
2709
2710 if (htab->stack_analysis
2711 && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
2712 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2713
2714 return bfd_elf_final_link (output_bfd, info);
2715 }
2716
2717 /* Called when not normally emitting relocs, ie. !info->relocatable
2718 and !info->emitrelocations. Returns a count of special relocs
2719 that need to be emitted. */
2720
2721 static unsigned int
2722 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2723 {
2724 unsigned int count = 0;
2725 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2726
2727 for (; relocs < relend; relocs++)
2728 {
2729 int r_type = ELF32_R_TYPE (relocs->r_info);
2730 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2731 ++count;
2732 }
2733
2734 return count;
2735 }
2736
2737 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2738
2739 static int
2740 spu_elf_relocate_section (bfd *output_bfd,
2741 struct bfd_link_info *info,
2742 bfd *input_bfd,
2743 asection *input_section,
2744 bfd_byte *contents,
2745 Elf_Internal_Rela *relocs,
2746 Elf_Internal_Sym *local_syms,
2747 asection **local_sections)
2748 {
2749 Elf_Internal_Shdr *symtab_hdr;
2750 struct elf_link_hash_entry **sym_hashes;
2751 Elf_Internal_Rela *rel, *relend;
2752 struct spu_link_hash_table *htab;
2753 int ret = TRUE;
2754 bfd_boolean emit_these_relocs = FALSE;
2755 bfd_boolean stubs;
2756
2757 htab = spu_hash_table (info);
2758 stubs = (htab->stub_sec != NULL
2759 && maybe_needs_stubs (input_section, output_bfd));
2760 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2761 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2762
2763 rel = relocs;
2764 relend = relocs + input_section->reloc_count;
2765 for (; rel < relend; rel++)
2766 {
2767 int r_type;
2768 reloc_howto_type *howto;
2769 unsigned long r_symndx;
2770 Elf_Internal_Sym *sym;
2771 asection *sec;
2772 struct elf_link_hash_entry *h;
2773 const char *sym_name;
2774 bfd_vma relocation;
2775 bfd_vma addend;
2776 bfd_reloc_status_type r;
2777 bfd_boolean unresolved_reloc;
2778 bfd_boolean warned;
2779
2780 r_symndx = ELF32_R_SYM (rel->r_info);
2781 r_type = ELF32_R_TYPE (rel->r_info);
2782 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2783 {
2784 emit_these_relocs = TRUE;
2785 continue;
2786 }
2787
2788 howto = elf_howto_table + r_type;
2789 unresolved_reloc = FALSE;
2790 warned = FALSE;
2791 h = NULL;
2792 sym = NULL;
2793 sec = NULL;
2794 if (r_symndx < symtab_hdr->sh_info)
2795 {
2796 sym = local_syms + r_symndx;
2797 sec = local_sections[r_symndx];
2798 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2799 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2800 }
2801 else
2802 {
2803 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2804 r_symndx, symtab_hdr, sym_hashes,
2805 h, sec, relocation,
2806 unresolved_reloc, warned);
2807 sym_name = h->root.root.string;
2808 }
2809
2810 if (sec != NULL && elf_discarded_section (sec))
2811 {
2812 /* For relocs against symbols from removed linkonce sections,
2813 or sections discarded by a linker script, we just want the
2814 section contents zeroed. Avoid any special processing. */
2815 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2816 rel->r_info = 0;
2817 rel->r_addend = 0;
2818 continue;
2819 }
2820
2821 if (info->relocatable)
2822 continue;
2823
2824 if (unresolved_reloc)
2825 {
2826 (*_bfd_error_handler)
2827 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2828 input_bfd,
2829 bfd_get_section_name (input_bfd, input_section),
2830 (long) rel->r_offset,
2831 howto->name,
2832 sym_name);
2833 ret = FALSE;
2834 }
2835
2836 /* If this symbol is in an overlay area, we may need to relocate
2837 to the overlay stub. */
2838 addend = rel->r_addend;
2839 if (stubs)
2840 {
2841 enum _stub_type stub_type;
2842
2843 stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
2844 contents, info);
2845 if (stub_type != no_stub)
2846 {
2847 unsigned int ovl = 0;
2848 struct got_entry *g, **head;
2849
2850 if (stub_type != nonovl_stub)
2851 ovl = (spu_elf_section_data (input_section->output_section)
2852 ->u.o.ovl_index);
2853
2854 if (h != NULL)
2855 head = &h->got.glist;
2856 else
2857 head = elf_local_got_ents (input_bfd) + r_symndx;
2858
2859 for (g = *head; g != NULL; g = g->next)
2860 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
2861 break;
2862 if (g == NULL)
2863 abort ();
2864
2865 relocation = g->stub_addr;
2866 addend = 0;
2867 }
2868 }
2869
2870 r = _bfd_final_link_relocate (howto,
2871 input_bfd,
2872 input_section,
2873 contents,
2874 rel->r_offset, relocation, addend);
2875
2876 if (r != bfd_reloc_ok)
2877 {
2878 const char *msg = (const char *) 0;
2879
2880 switch (r)
2881 {
2882 case bfd_reloc_overflow:
2883 if (!((*info->callbacks->reloc_overflow)
2884 (info, (h ? &h->root : NULL), sym_name, howto->name,
2885 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2886 return FALSE;
2887 break;
2888
2889 case bfd_reloc_undefined:
2890 if (!((*info->callbacks->undefined_symbol)
2891 (info, sym_name, input_bfd, input_section,
2892 rel->r_offset, TRUE)))
2893 return FALSE;
2894 break;
2895
2896 case bfd_reloc_outofrange:
2897 msg = _("internal error: out of range error");
2898 goto common_error;
2899
2900 case bfd_reloc_notsupported:
2901 msg = _("internal error: unsupported relocation error");
2902 goto common_error;
2903
2904 case bfd_reloc_dangerous:
2905 msg = _("internal error: dangerous error");
2906 goto common_error;
2907
2908 default:
2909 msg = _("internal error: unknown error");
2910 /* fall through */
2911
2912 common_error:
2913 ret = FALSE;
2914 if (!((*info->callbacks->warning)
2915 (info, msg, sym_name, input_bfd, input_section,
2916 rel->r_offset)))
2917 return FALSE;
2918 break;
2919 }
2920 }
2921 }
2922
2923 if (ret
2924 && emit_these_relocs
2925 && !info->relocatable
2926 && !info->emitrelocations)
2927 {
2928 Elf_Internal_Rela *wrel;
2929 Elf_Internal_Shdr *rel_hdr;
2930
2931 wrel = rel = relocs;
2932 relend = relocs + input_section->reloc_count;
2933 for (; rel < relend; rel++)
2934 {
2935 int r_type;
2936
2937 r_type = ELF32_R_TYPE (rel->r_info);
2938 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2939 *wrel++ = *rel;
2940 }
2941 input_section->reloc_count = wrel - relocs;
2942 /* Backflips for _bfd_elf_link_output_relocs. */
2943 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2944 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2945 ret = 2;
2946 }
2947
2948 return ret;
2949 }
2950
2951 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2952
2953 static bfd_boolean
2954 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2955 const char *sym_name ATTRIBUTE_UNUSED,
2956 Elf_Internal_Sym *sym,
2957 asection *sym_sec ATTRIBUTE_UNUSED,
2958 struct elf_link_hash_entry *h)
2959 {
2960 struct spu_link_hash_table *htab = spu_hash_table (info);
2961
2962 if (!info->relocatable
2963 && htab->stub_sec != NULL
2964 && h != NULL
2965 && (h->root.type == bfd_link_hash_defined
2966 || h->root.type == bfd_link_hash_defweak)
2967 && h->def_regular
2968 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2969 {
2970 struct got_entry *g;
2971
2972 for (g = h->got.glist; g != NULL; g = g->next)
2973 if (g->addend == 0 && g->ovl == 0)
2974 {
2975 sym->st_shndx = (_bfd_elf_section_from_bfd_section
2976 (htab->stub_sec[0]->output_section->owner,
2977 htab->stub_sec[0]->output_section));
2978 sym->st_value = g->stub_addr;
2979 break;
2980 }
2981 }
2982
2983 return TRUE;
2984 }
2985
2986 static int spu_plugin = 0;
2987
2988 void
2989 spu_elf_plugin (int val)
2990 {
2991 spu_plugin = val;
2992 }
2993
2994 /* Set ELF header e_type for plugins. */
2995
2996 static void
2997 spu_elf_post_process_headers (bfd *abfd,
2998 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2999 {
3000 if (spu_plugin)
3001 {
3002 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
3003
3004 i_ehdrp->e_type = ET_DYN;
3005 }
3006 }
3007
3008 /* We may add an extra PT_LOAD segment for .toe. We also need extra
3009 segments for overlays. */
3010
3011 static int
3012 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
3013 {
3014 struct spu_link_hash_table *htab = spu_hash_table (info);
3015 int extra = htab->num_overlays;
3016 asection *sec;
3017
3018 if (extra)
3019 ++extra;
3020
3021 sec = bfd_get_section_by_name (abfd, ".toe");
3022 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
3023 ++extra;
3024
3025 return extra;
3026 }
3027
3028 /* Remove .toe section from other PT_LOAD segments and put it in
3029 a segment of its own. Put overlays in separate segments too. */
3030
3031 static bfd_boolean
3032 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
3033 {
3034 asection *toe, *s;
3035 struct elf_segment_map *m;
3036 unsigned int i;
3037
3038 if (info == NULL)
3039 return TRUE;
3040
3041 toe = bfd_get_section_by_name (abfd, ".toe");
3042 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3043 if (m->p_type == PT_LOAD && m->count > 1)
3044 for (i = 0; i < m->count; i++)
3045 if ((s = m->sections[i]) == toe
3046 || spu_elf_section_data (s)->u.o.ovl_index != 0)
3047 {
3048 struct elf_segment_map *m2;
3049 bfd_vma amt;
3050
3051 if (i + 1 < m->count)
3052 {
3053 amt = sizeof (struct elf_segment_map);
3054 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
3055 m2 = bfd_zalloc (abfd, amt);
3056 if (m2 == NULL)
3057 return FALSE;
3058 m2->count = m->count - (i + 1);
3059 memcpy (m2->sections, m->sections + i + 1,
3060 m2->count * sizeof (m->sections[0]));
3061 m2->p_type = PT_LOAD;
3062 m2->next = m->next;
3063 m->next = m2;
3064 }
3065 m->count = 1;
3066 if (i != 0)
3067 {
3068 m->count = i;
3069 amt = sizeof (struct elf_segment_map);
3070 m2 = bfd_zalloc (abfd, amt);
3071 if (m2 == NULL)
3072 return FALSE;
3073 m2->p_type = PT_LOAD;
3074 m2->count = 1;
3075 m2->sections[0] = s;
3076 m2->next = m->next;
3077 m->next = m2;
3078 }
3079 break;
3080 }
3081
3082 return TRUE;
3083 }
3084
3085 /* Tweak the section type of .note.spu_name. */
3086
3087 static bfd_boolean
3088 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3089 Elf_Internal_Shdr *hdr,
3090 asection *sec)
3091 {
3092 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3093 hdr->sh_type = SHT_NOTE;
3094 return TRUE;
3095 }
3096
3097 /* Tweak phdrs before writing them out. */
3098
3099 static int
3100 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3101 {
3102 const struct elf_backend_data *bed;
3103 struct elf_obj_tdata *tdata;
3104 Elf_Internal_Phdr *phdr, *last;
3105 struct spu_link_hash_table *htab;
3106 unsigned int count;
3107 unsigned int i;
3108
3109 if (info == NULL)
3110 return TRUE;
3111
3112 bed = get_elf_backend_data (abfd);
3113 tdata = elf_tdata (abfd);
3114 phdr = tdata->phdr;
3115 count = tdata->program_header_size / bed->s->sizeof_phdr;
3116 htab = spu_hash_table (info);
3117 if (htab->num_overlays != 0)
3118 {
3119 struct elf_segment_map *m;
3120 unsigned int o;
3121
3122 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3123 if (m->count != 0
3124 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
3125 {
3126 /* Mark this as an overlay header. */
3127 phdr[i].p_flags |= PF_OVERLAY;
3128
3129 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3130 {
3131 bfd_byte *p = htab->ovtab->contents;
3132 unsigned int off = o * 16 + 8;
3133
3134 /* Write file_off into _ovly_table. */
3135 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3136 }
3137 }
3138 }
3139
3140 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3141 of 16. This should always be possible when using the standard
3142 linker scripts, but don't create overlapping segments if
3143 someone is playing games with linker scripts. */
3144 last = NULL;
3145 for (i = count; i-- != 0; )
3146 if (phdr[i].p_type == PT_LOAD)
3147 {
3148 unsigned adjust;
3149
3150 adjust = -phdr[i].p_filesz & 15;
3151 if (adjust != 0
3152 && last != NULL
3153 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3154 break;
3155
3156 adjust = -phdr[i].p_memsz & 15;
3157 if (adjust != 0
3158 && last != NULL
3159 && phdr[i].p_filesz != 0
3160 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3161 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3162 break;
3163
3164 if (phdr[i].p_filesz != 0)
3165 last = &phdr[i];
3166 }
3167
3168 if (i == (unsigned int) -1)
3169 for (i = count; i-- != 0; )
3170 if (phdr[i].p_type == PT_LOAD)
3171 {
3172 unsigned adjust;
3173
3174 adjust = -phdr[i].p_filesz & 15;
3175 phdr[i].p_filesz += adjust;
3176
3177 adjust = -phdr[i].p_memsz & 15;
3178 phdr[i].p_memsz += adjust;
3179 }
3180
3181 return TRUE;
3182 }
3183
3184 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3185 #define TARGET_BIG_NAME "elf32-spu"
3186 #define ELF_ARCH bfd_arch_spu
3187 #define ELF_MACHINE_CODE EM_SPU
3188 /* This matches the alignment need for DMA. */
3189 #define ELF_MAXPAGESIZE 0x80
3190 #define elf_backend_rela_normal 1
3191 #define elf_backend_can_gc_sections 1
3192
3193 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3194 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3195 #define elf_info_to_howto spu_elf_info_to_howto
3196 #define elf_backend_count_relocs spu_elf_count_relocs
3197 #define elf_backend_relocate_section spu_elf_relocate_section
3198 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3199 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3200 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3201 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3202
3203 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3204 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3205 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3206 #define elf_backend_post_process_headers spu_elf_post_process_headers
3207 #define elf_backend_fake_sections spu_elf_fake_sections
3208 #define elf_backend_special_sections spu_elf_special_sections
3209 #define bfd_elf32_bfd_final_link spu_elf_final_link
3210
3211 #include "elf32-target.h"
This page took 0.099617 seconds and 4 git commands to generate.