gas: blackfin: add tests for recent loop label fixes
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
... / ...
CommitLineData
1/* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22#include "sysdep.h"
23#include <limits.h>
24
25#include "bfd.h"
26#include "libiberty.h"
27#include "libbfd.h"
28#include "elf-bfd.h"
29#include "elf-vxworks.h"
30#include "elf/arm.h"
31
32/* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34#define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37/* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39#define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44/* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46#define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51/* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53#define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58#define elf_info_to_howto 0
59#define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61#define ARM_ELF_ABI_VERSION 0
62#define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69/* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73static reloc_howto_type elf32_arm_howto_table_1[] =
74{
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602};
1603
1604/* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610static reloc_howto_type elf32_arm_howto_table_2[4] =
1611{
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667};
1668
1669static reloc_howto_type *
1670elf32_arm_howto_from_type (unsigned int r_type)
1671{
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680}
1681
1682static void
1683elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685{
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690}
1691
1692struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698/* All entries in this list must also be present in elf32_arm_howto_table. */
1699static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1780 };
1781
1782static reloc_howto_type *
1783elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1785{
1786 unsigned int i;
1787
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1791
1792 return NULL;
1793}
1794
1795static reloc_howto_type *
1796elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1797 const char *r_name)
1798{
1799 unsigned int i;
1800
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1805
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1810
1811 return NULL;
1812}
1813
1814/* Support for core dump NOTE sections. */
1815
1816static bfd_boolean
1817elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1818{
1819 int offset;
1820 size_t size;
1821
1822 switch (note->descsz)
1823 {
1824 default:
1825 return FALSE;
1826
1827 case 148: /* Linux/ARM 32-bit. */
1828 /* pr_cursig */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1830
1831 /* pr_pid */
1832 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1833
1834 /* pr_reg */
1835 offset = 72;
1836 size = 72;
1837
1838 break;
1839 }
1840
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1844}
1845
1846static bfd_boolean
1847elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1848{
1849 switch (note->descsz)
1850 {
1851 default:
1852 return FALSE;
1853
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1859 }
1860
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1864 {
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1867
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1870 }
1871
1872 return TRUE;
1873}
1874
1875#define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876#define TARGET_LITTLE_NAME "elf32-littlearm"
1877#define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878#define TARGET_BIG_NAME "elf32-bigarm"
1879
1880#define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881#define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1882
1883typedef unsigned long int insn32;
1884typedef unsigned short int insn16;
1885
1886/* In lieu of proper flags, assume all EABIv4 or later objects are
1887 interworkable. */
1888#define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1892
1893/* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897#define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898#define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1899
1900#define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901#define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1902
1903#define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904#define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1905
1906#define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907#define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1908
1909#define STUB_ENTRY_NAME "__%s_veneer"
1910
1911/* The name of the dynamic interpreter. This is put in the .interp
1912 section. */
1913#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1914
1915#ifdef FOUR_WORD_PLT
1916
1917/* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1920 linker first. */
1921static const bfd_vma elf32_arm_plt0_entry [] =
1922 {
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1927 };
1928
1929/* Subsequent entries in a procedure linkage table look like
1930 this. */
1931static const bfd_vma elf32_arm_plt_entry [] =
1932 {
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1937 };
1938
1939#else
1940
1941/* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1944 linker first. */
1945static const bfd_vma elf32_arm_plt0_entry [] =
1946 {
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1952 };
1953
1954/* Subsequent entries in a procedure linkage table look like
1955 this. */
1956static const bfd_vma elf32_arm_plt_entry [] =
1957 {
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1961 };
1962
1963#endif
1964
1965/* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1968 {
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1973 };
1974
1975/* The format of subsequent entries in a VxWorks executable. */
1976static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1977 {
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1984 };
1985
1986/* The format of entries in a VxWorks shared library. */
1987static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1988 {
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1995 };
1996
1997/* An initial stub used if the PLT entry is referenced from Thumb code. */
1998#define PLT_THUMB_STUB_SIZE 4
1999static const bfd_vma elf32_arm_plt_thumb_stub [] =
2000 {
2001 0x4778, /* bx pc */
2002 0x46c0 /* nop */
2003 };
2004
2005/* The entries in a PLT when using a DLL-based target with multiple
2006 address spaces. */
2007static const bfd_vma elf32_arm_symbian_plt_entry [] =
2008 {
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2011 };
2012
2013#define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014#define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015#define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016#define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017#define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018#define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2019
2020enum stub_insn_type
2021 {
2022 THUMB16_TYPE = 1,
2023 THUMB32_TYPE,
2024 ARM_TYPE,
2025 DATA_TYPE
2026 };
2027
2028#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029/* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031#define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033#define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2037
2038typedef struct
2039{
2040 bfd_vma data;
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2043 int reloc_addend;
2044} insn_sequence;
2045
2046/* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2049 {
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2052 };
2053
2054/* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2055 available. */
2056static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2057 {
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2061 };
2062
2063/* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2065 {
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2073 };
2074
2075/* V4T Thumb -> Thumb long branch stub. Using the stack is not
2076 allowed. */
2077static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2078 {
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2084 };
2085
2086/* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2087 available. */
2088static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2089 {
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2094 };
2095
2096/* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2099 {
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2103 };
2104
2105/* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2108 {
2109 ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2112 };
2113
2114/* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2117 ARMv7). */
2118static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2119 {
2120 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2124 };
2125
2126/* V4T ARM -> ARM long branch stub, PIC. */
2127static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2128 {
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2133 };
2134
2135/* V4T Thumb -> ARM long branch stub, PIC. */
2136static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2137 {
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2143 };
2144
2145/* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2146 architectures. */
2147static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2148 {
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2156 };
2157
2158/* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2159 allowed. */
2160static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2161 {
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2168 };
2169
2170/* Cortex-A8 erratum-workaround stubs. */
2171
2172/* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2174
2175static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2176 {
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2180 };
2181
2182/* Stub used for b.w and bl.w instructions. */
2183
2184static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2185 {
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2187 };
2188
2189static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2190 {
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2192 };
2193
2194/* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2197
2198static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2199 {
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2201 };
2202
2203/* Section name for stubs is the associated section name plus this
2204 string. */
2205#define STUB_SUFFIX ".stub"
2206
2207/* One entry per long/short branch stub defined above. */
2208#define DEF_STUBS \
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2225
2226#define DEF_STUB(x) arm_stub_##x,
2227enum elf32_arm_stub_type {
2228 arm_stub_none,
2229 DEF_STUBS
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2232};
2233#undef DEF_STUB
2234
2235typedef struct
2236{
2237 const insn_sequence* template_sequence;
2238 int template_size;
2239} stub_def;
2240
2241#define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242static const stub_def stub_definitions[] = {
2243 {NULL, 0},
2244 DEF_STUBS
2245};
2246
2247struct elf32_arm_stub_hash_entry
2248{
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2251
2252 /* The stub section. */
2253 asection *stub_sec;
2254
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2257
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2262
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2265
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2269
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2273 int stub_size;
2274 /* Its template. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2278
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2281
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2284
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2287 asection *id_sec;
2288
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2292 char *output_name;
2293};
2294
2295/* Used to build a map of a section. This is required for mixed-endian
2296 code/data. */
2297
2298typedef struct elf32_elf_section_map
2299{
2300 bfd_vma vma;
2301 char type;
2302}
2303elf32_arm_section_map;
2304
2305/* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2306
2307typedef enum
2308{
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2313}
2314elf32_vfp11_erratum_type;
2315
2316typedef struct elf32_vfp11_erratum_list
2317{
2318 struct elf32_vfp11_erratum_list *next;
2319 bfd_vma vma;
2320 union
2321 {
2322 struct
2323 {
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2326 } b;
2327 struct
2328 {
2329 struct elf32_vfp11_erratum_list *branch;
2330 unsigned int id;
2331 } v;
2332 } u;
2333 elf32_vfp11_erratum_type type;
2334}
2335elf32_vfp11_erratum_list;
2336
2337typedef enum
2338{
2339 DELETE_EXIDX_ENTRY,
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2341}
2342arm_unwind_edit_type;
2343
2344/* A (sorted) list of edits to apply to an unwind table. */
2345typedef struct arm_unwind_table_edit
2346{
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2352 unsigned int index;
2353 struct arm_unwind_table_edit *next;
2354}
2355arm_unwind_table_edit;
2356
2357typedef struct _arm_elf_section_data
2358{
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2368 union
2369 {
2370 /* Unwind info attached to a text section. */
2371 struct
2372 {
2373 asection *arm_exidx_sec;
2374 } text;
2375
2376 /* Unwind info attached to an .ARM.exidx section. */
2377 struct
2378 {
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2381 } exidx;
2382 } u;
2383}
2384_arm_elf_section_data;
2385
2386#define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2388
2389/* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2394
2395struct a8_erratum_fix {
2396 bfd *input_bfd;
2397 asection *section;
2398 bfd_vma offset;
2399 bfd_vma addend;
2400 unsigned long orig_insn;
2401 char *stub_name;
2402 enum elf32_arm_stub_type stub_type;
2403 int st_type;
2404};
2405
2406/* A table of relocs applied to branches which might trigger Cortex-A8
2407 erratum. */
2408
2409struct a8_erratum_reloc {
2410 bfd_vma from;
2411 bfd_vma destination;
2412 struct elf32_arm_link_hash_entry *hash;
2413 const char *sym_name;
2414 unsigned int r_type;
2415 unsigned char st_type;
2416 bfd_boolean non_a8_stub;
2417};
2418
2419/* The size of the thread control block. */
2420#define TCB_SIZE 8
2421
2422struct elf_arm_obj_tdata
2423{
2424 struct elf_obj_tdata root;
2425
2426 /* tls_type for each local got entry. */
2427 char *local_got_tls_type;
2428
2429 /* Zero to warn when linking objects with incompatible enum sizes. */
2430 int no_enum_size_warning;
2431
2432 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2433 int no_wchar_size_warning;
2434};
2435
2436#define elf_arm_tdata(bfd) \
2437 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2438
2439#define elf32_arm_local_got_tls_type(bfd) \
2440 (elf_arm_tdata (bfd)->local_got_tls_type)
2441
2442#define is_arm_elf(bfd) \
2443 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2444 && elf_tdata (bfd) != NULL \
2445 && elf_object_id (bfd) == ARM_ELF_DATA)
2446
2447static bfd_boolean
2448elf32_arm_mkobject (bfd *abfd)
2449{
2450 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 ARM_ELF_DATA);
2452}
2453
2454/* The ARM linker needs to keep track of the number of relocs that it
2455 decides to copy in check_relocs for each symbol. This is so that
2456 it can discard PC relative relocs if it doesn't need them when
2457 linking with -Bsymbolic. We store the information in a field
2458 extending the regular ELF linker hash table. */
2459
2460/* This structure keeps track of the number of relocs we have copied
2461 for a given symbol. */
2462struct elf32_arm_relocs_copied
2463 {
2464 /* Next section. */
2465 struct elf32_arm_relocs_copied * next;
2466 /* A section in dynobj. */
2467 asection * section;
2468 /* Number of relocs copied in this section. */
2469 bfd_size_type count;
2470 /* Number of PC-relative relocs copied in this section. */
2471 bfd_size_type pc_count;
2472 };
2473
2474#define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2475
2476/* Arm ELF linker hash entry. */
2477struct elf32_arm_link_hash_entry
2478 {
2479 struct elf_link_hash_entry root;
2480
2481 /* Number of PC relative relocs copied for this symbol. */
2482 struct elf32_arm_relocs_copied * relocs_copied;
2483
2484 /* We reference count Thumb references to a PLT entry separately,
2485 so that we can emit the Thumb trampoline only if needed. */
2486 bfd_signed_vma plt_thumb_refcount;
2487
2488 /* Some references from Thumb code may be eliminated by BL->BLX
2489 conversion, so record them separately. */
2490 bfd_signed_vma plt_maybe_thumb_refcount;
2491
2492 /* Since PLT entries have variable size if the Thumb prologue is
2493 used, we need to record the index into .got.plt instead of
2494 recomputing it from the PLT offset. */
2495 bfd_signed_vma plt_got_offset;
2496
2497#define GOT_UNKNOWN 0
2498#define GOT_NORMAL 1
2499#define GOT_TLS_GD 2
2500#define GOT_TLS_IE 4
2501 unsigned char tls_type;
2502
2503 /* The symbol marking the real symbol location for exported thumb
2504 symbols with Arm stubs. */
2505 struct elf_link_hash_entry *export_glue;
2506
2507 /* A pointer to the most recently used stub hash entry against this
2508 symbol. */
2509 struct elf32_arm_stub_hash_entry *stub_cache;
2510 };
2511
2512/* Traverse an arm ELF linker hash table. */
2513#define elf32_arm_link_hash_traverse(table, func, info) \
2514 (elf_link_hash_traverse \
2515 (&(table)->root, \
2516 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2517 (info)))
2518
2519/* Get the ARM elf linker hash table from a link_info structure. */
2520#define elf32_arm_hash_table(info) \
2521 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2522 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2523
2524#define arm_stub_hash_lookup(table, string, create, copy) \
2525 ((struct elf32_arm_stub_hash_entry *) \
2526 bfd_hash_lookup ((table), (string), (create), (copy)))
2527
2528/* Array to keep track of which stub sections have been created, and
2529 information on stub grouping. */
2530struct map_stub
2531{
2532 /* This is the section to which stubs in the group will be
2533 attached. */
2534 asection *link_sec;
2535 /* The stub section. */
2536 asection *stub_sec;
2537};
2538
2539/* ARM ELF linker hash table. */
2540struct elf32_arm_link_hash_table
2541{
2542 /* The main hash table. */
2543 struct elf_link_hash_table root;
2544
2545 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2546 bfd_size_type thumb_glue_size;
2547
2548 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2549 bfd_size_type arm_glue_size;
2550
2551 /* The size in bytes of section containing the ARMv4 BX veneers. */
2552 bfd_size_type bx_glue_size;
2553
2554 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2555 veneer has been populated. */
2556 bfd_vma bx_glue_offset[15];
2557
2558 /* The size in bytes of the section containing glue for VFP11 erratum
2559 veneers. */
2560 bfd_size_type vfp11_erratum_glue_size;
2561
2562 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2563 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2564 elf32_arm_write_section(). */
2565 struct a8_erratum_fix *a8_erratum_fixes;
2566 unsigned int num_a8_erratum_fixes;
2567
2568 /* An arbitrary input BFD chosen to hold the glue sections. */
2569 bfd * bfd_of_glue_owner;
2570
2571 /* Nonzero to output a BE8 image. */
2572 int byteswap_code;
2573
2574 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2575 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2576 int target1_is_rel;
2577
2578 /* The relocation to use for R_ARM_TARGET2 relocations. */
2579 int target2_reloc;
2580
2581 /* 0 = Ignore R_ARM_V4BX.
2582 1 = Convert BX to MOV PC.
2583 2 = Generate v4 interworing stubs. */
2584 int fix_v4bx;
2585
2586 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2587 int fix_cortex_a8;
2588
2589 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2590 int use_blx;
2591
2592 /* What sort of code sequences we should look for which may trigger the
2593 VFP11 denorm erratum. */
2594 bfd_arm_vfp11_fix vfp11_fix;
2595
2596 /* Global counter for the number of fixes we have emitted. */
2597 int num_vfp11_fixes;
2598
2599 /* Nonzero to force PIC branch veneers. */
2600 int pic_veneer;
2601
2602 /* The number of bytes in the initial entry in the PLT. */
2603 bfd_size_type plt_header_size;
2604
2605 /* The number of bytes in the subsequent PLT etries. */
2606 bfd_size_type plt_entry_size;
2607
2608 /* True if the target system is VxWorks. */
2609 int vxworks_p;
2610
2611 /* True if the target system is Symbian OS. */
2612 int symbian_p;
2613
2614 /* True if the target uses REL relocations. */
2615 int use_rel;
2616
2617 /* Short-cuts to get to dynamic linker sections. */
2618 asection *sgot;
2619 asection *sgotplt;
2620 asection *srelgot;
2621 asection *splt;
2622 asection *srelplt;
2623 asection *sdynbss;
2624 asection *srelbss;
2625
2626 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2627 asection *srelplt2;
2628
2629 /* Data for R_ARM_TLS_LDM32 relocations. */
2630 union
2631 {
2632 bfd_signed_vma refcount;
2633 bfd_vma offset;
2634 } tls_ldm_got;
2635
2636 /* Small local sym cache. */
2637 struct sym_cache sym_cache;
2638
2639 /* For convenience in allocate_dynrelocs. */
2640 bfd * obfd;
2641
2642 /* The stub hash table. */
2643 struct bfd_hash_table stub_hash_table;
2644
2645 /* Linker stub bfd. */
2646 bfd *stub_bfd;
2647
2648 /* Linker call-backs. */
2649 asection * (*add_stub_section) (const char *, asection *);
2650 void (*layout_sections_again) (void);
2651
2652 /* Array to keep track of which stub sections have been created, and
2653 information on stub grouping. */
2654 struct map_stub *stub_group;
2655
2656 /* Number of elements in stub_group. */
2657 int top_id;
2658
2659 /* Assorted information used by elf32_arm_size_stubs. */
2660 unsigned int bfd_count;
2661 int top_index;
2662 asection **input_list;
2663};
2664
2665/* Create an entry in an ARM ELF linker hash table. */
2666
2667static struct bfd_hash_entry *
2668elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2669 struct bfd_hash_table * table,
2670 const char * string)
2671{
2672 struct elf32_arm_link_hash_entry * ret =
2673 (struct elf32_arm_link_hash_entry *) entry;
2674
2675 /* Allocate the structure if it has not already been allocated by a
2676 subclass. */
2677 if (ret == NULL)
2678 ret = (struct elf32_arm_link_hash_entry *)
2679 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2680 if (ret == NULL)
2681 return (struct bfd_hash_entry *) ret;
2682
2683 /* Call the allocation method of the superclass. */
2684 ret = ((struct elf32_arm_link_hash_entry *)
2685 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2686 table, string));
2687 if (ret != NULL)
2688 {
2689 ret->relocs_copied = NULL;
2690 ret->tls_type = GOT_UNKNOWN;
2691 ret->plt_thumb_refcount = 0;
2692 ret->plt_maybe_thumb_refcount = 0;
2693 ret->plt_got_offset = -1;
2694 ret->export_glue = NULL;
2695
2696 ret->stub_cache = NULL;
2697 }
2698
2699 return (struct bfd_hash_entry *) ret;
2700}
2701
2702/* Initialize an entry in the stub hash table. */
2703
2704static struct bfd_hash_entry *
2705stub_hash_newfunc (struct bfd_hash_entry *entry,
2706 struct bfd_hash_table *table,
2707 const char *string)
2708{
2709 /* Allocate the structure if it has not already been allocated by a
2710 subclass. */
2711 if (entry == NULL)
2712 {
2713 entry = (struct bfd_hash_entry *)
2714 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2715 if (entry == NULL)
2716 return entry;
2717 }
2718
2719 /* Call the allocation method of the superclass. */
2720 entry = bfd_hash_newfunc (entry, table, string);
2721 if (entry != NULL)
2722 {
2723 struct elf32_arm_stub_hash_entry *eh;
2724
2725 /* Initialize the local fields. */
2726 eh = (struct elf32_arm_stub_hash_entry *) entry;
2727 eh->stub_sec = NULL;
2728 eh->stub_offset = 0;
2729 eh->target_value = 0;
2730 eh->target_section = NULL;
2731 eh->target_addend = 0;
2732 eh->orig_insn = 0;
2733 eh->stub_type = arm_stub_none;
2734 eh->stub_size = 0;
2735 eh->stub_template = NULL;
2736 eh->stub_template_size = 0;
2737 eh->h = NULL;
2738 eh->id_sec = NULL;
2739 eh->output_name = NULL;
2740 }
2741
2742 return entry;
2743}
2744
2745/* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2746 shortcuts to them in our hash table. */
2747
2748static bfd_boolean
2749create_got_section (bfd *dynobj, struct bfd_link_info *info)
2750{
2751 struct elf32_arm_link_hash_table *htab;
2752
2753 htab = elf32_arm_hash_table (info);
2754 if (htab == NULL)
2755 return FALSE;
2756
2757 /* BPABI objects never have a GOT, or associated sections. */
2758 if (htab->symbian_p)
2759 return TRUE;
2760
2761 if (! _bfd_elf_create_got_section (dynobj, info))
2762 return FALSE;
2763
2764 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2765 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2766 if (!htab->sgot || !htab->sgotplt)
2767 abort ();
2768
2769 htab->srelgot = bfd_get_section_by_name (dynobj,
2770 RELOC_SECTION (htab, ".got"));
2771 if (htab->srelgot == NULL)
2772 return FALSE;
2773 return TRUE;
2774}
2775
2776/* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2777 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2778 hash table. */
2779
2780static bfd_boolean
2781elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2782{
2783 struct elf32_arm_link_hash_table *htab;
2784
2785 htab = elf32_arm_hash_table (info);
2786 if (htab == NULL)
2787 return FALSE;
2788
2789 if (!htab->sgot && !create_got_section (dynobj, info))
2790 return FALSE;
2791
2792 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2793 return FALSE;
2794
2795 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2796 htab->srelplt = bfd_get_section_by_name (dynobj,
2797 RELOC_SECTION (htab, ".plt"));
2798 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2799 if (!info->shared)
2800 htab->srelbss = bfd_get_section_by_name (dynobj,
2801 RELOC_SECTION (htab, ".bss"));
2802
2803 if (htab->vxworks_p)
2804 {
2805 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2806 return FALSE;
2807
2808 if (info->shared)
2809 {
2810 htab->plt_header_size = 0;
2811 htab->plt_entry_size
2812 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2813 }
2814 else
2815 {
2816 htab->plt_header_size
2817 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2818 htab->plt_entry_size
2819 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2820 }
2821 }
2822
2823 if (!htab->splt
2824 || !htab->srelplt
2825 || !htab->sdynbss
2826 || (!info->shared && !htab->srelbss))
2827 abort ();
2828
2829 return TRUE;
2830}
2831
2832/* Copy the extra info we tack onto an elf_link_hash_entry. */
2833
2834static void
2835elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2836 struct elf_link_hash_entry *dir,
2837 struct elf_link_hash_entry *ind)
2838{
2839 struct elf32_arm_link_hash_entry *edir, *eind;
2840
2841 edir = (struct elf32_arm_link_hash_entry *) dir;
2842 eind = (struct elf32_arm_link_hash_entry *) ind;
2843
2844 if (eind->relocs_copied != NULL)
2845 {
2846 if (edir->relocs_copied != NULL)
2847 {
2848 struct elf32_arm_relocs_copied **pp;
2849 struct elf32_arm_relocs_copied *p;
2850
2851 /* Add reloc counts against the indirect sym to the direct sym
2852 list. Merge any entries against the same section. */
2853 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2854 {
2855 struct elf32_arm_relocs_copied *q;
2856
2857 for (q = edir->relocs_copied; q != NULL; q = q->next)
2858 if (q->section == p->section)
2859 {
2860 q->pc_count += p->pc_count;
2861 q->count += p->count;
2862 *pp = p->next;
2863 break;
2864 }
2865 if (q == NULL)
2866 pp = &p->next;
2867 }
2868 *pp = edir->relocs_copied;
2869 }
2870
2871 edir->relocs_copied = eind->relocs_copied;
2872 eind->relocs_copied = NULL;
2873 }
2874
2875 if (ind->root.type == bfd_link_hash_indirect)
2876 {
2877 /* Copy over PLT info. */
2878 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2879 eind->plt_thumb_refcount = 0;
2880 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2881 eind->plt_maybe_thumb_refcount = 0;
2882
2883 if (dir->got.refcount <= 0)
2884 {
2885 edir->tls_type = eind->tls_type;
2886 eind->tls_type = GOT_UNKNOWN;
2887 }
2888 }
2889
2890 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2891}
2892
2893/* Create an ARM elf linker hash table. */
2894
2895static struct bfd_link_hash_table *
2896elf32_arm_link_hash_table_create (bfd *abfd)
2897{
2898 struct elf32_arm_link_hash_table *ret;
2899 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2900
2901 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2902 if (ret == NULL)
2903 return NULL;
2904
2905 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2906 elf32_arm_link_hash_newfunc,
2907 sizeof (struct elf32_arm_link_hash_entry),
2908 ARM_ELF_DATA))
2909 {
2910 free (ret);
2911 return NULL;
2912 }
2913
2914 ret->sgot = NULL;
2915 ret->sgotplt = NULL;
2916 ret->srelgot = NULL;
2917 ret->splt = NULL;
2918 ret->srelplt = NULL;
2919 ret->sdynbss = NULL;
2920 ret->srelbss = NULL;
2921 ret->srelplt2 = NULL;
2922 ret->thumb_glue_size = 0;
2923 ret->arm_glue_size = 0;
2924 ret->bx_glue_size = 0;
2925 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2926 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2927 ret->vfp11_erratum_glue_size = 0;
2928 ret->num_vfp11_fixes = 0;
2929 ret->fix_cortex_a8 = 0;
2930 ret->bfd_of_glue_owner = NULL;
2931 ret->byteswap_code = 0;
2932 ret->target1_is_rel = 0;
2933 ret->target2_reloc = R_ARM_NONE;
2934#ifdef FOUR_WORD_PLT
2935 ret->plt_header_size = 16;
2936 ret->plt_entry_size = 16;
2937#else
2938 ret->plt_header_size = 20;
2939 ret->plt_entry_size = 12;
2940#endif
2941 ret->fix_v4bx = 0;
2942 ret->use_blx = 0;
2943 ret->vxworks_p = 0;
2944 ret->symbian_p = 0;
2945 ret->use_rel = 1;
2946 ret->sym_cache.abfd = NULL;
2947 ret->obfd = abfd;
2948 ret->tls_ldm_got.refcount = 0;
2949 ret->stub_bfd = NULL;
2950 ret->add_stub_section = NULL;
2951 ret->layout_sections_again = NULL;
2952 ret->stub_group = NULL;
2953 ret->top_id = 0;
2954 ret->bfd_count = 0;
2955 ret->top_index = 0;
2956 ret->input_list = NULL;
2957
2958 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2959 sizeof (struct elf32_arm_stub_hash_entry)))
2960 {
2961 free (ret);
2962 return NULL;
2963 }
2964
2965 return &ret->root.root;
2966}
2967
2968/* Free the derived linker hash table. */
2969
2970static void
2971elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2972{
2973 struct elf32_arm_link_hash_table *ret
2974 = (struct elf32_arm_link_hash_table *) hash;
2975
2976 bfd_hash_table_free (&ret->stub_hash_table);
2977 _bfd_generic_link_hash_table_free (hash);
2978}
2979
2980/* Determine if we're dealing with a Thumb only architecture. */
2981
2982static bfd_boolean
2983using_thumb_only (struct elf32_arm_link_hash_table *globals)
2984{
2985 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch);
2987 int profile;
2988
2989 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
2990 return TRUE;
2991
2992 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2993 return FALSE;
2994
2995 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2996 Tag_CPU_arch_profile);
2997
2998 return profile == 'M';
2999}
3000
3001/* Determine if we're dealing with a Thumb-2 object. */
3002
3003static bfd_boolean
3004using_thumb2 (struct elf32_arm_link_hash_table *globals)
3005{
3006 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3007 Tag_CPU_arch);
3008 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3009}
3010
3011/* Determine what kind of NOPs are available. */
3012
3013static bfd_boolean
3014arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3015{
3016 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3017 Tag_CPU_arch);
3018 return arch == TAG_CPU_ARCH_V6T2
3019 || arch == TAG_CPU_ARCH_V6K
3020 || arch == TAG_CPU_ARCH_V7
3021 || arch == TAG_CPU_ARCH_V7E_M;
3022}
3023
3024static bfd_boolean
3025arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3026{
3027 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3028 Tag_CPU_arch);
3029 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3030 || arch == TAG_CPU_ARCH_V7E_M);
3031}
3032
3033static bfd_boolean
3034arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3035{
3036 switch (stub_type)
3037 {
3038 case arm_stub_long_branch_thumb_only:
3039 case arm_stub_long_branch_v4t_thumb_arm:
3040 case arm_stub_short_branch_v4t_thumb_arm:
3041 case arm_stub_long_branch_v4t_thumb_arm_pic:
3042 case arm_stub_long_branch_thumb_only_pic:
3043 return TRUE;
3044 case arm_stub_none:
3045 BFD_FAIL ();
3046 return FALSE;
3047 break;
3048 default:
3049 return FALSE;
3050 }
3051}
3052
3053/* Determine the type of stub needed, if any, for a call. */
3054
3055static enum elf32_arm_stub_type
3056arm_type_of_stub (struct bfd_link_info *info,
3057 asection *input_sec,
3058 const Elf_Internal_Rela *rel,
3059 int *actual_st_type,
3060 struct elf32_arm_link_hash_entry *hash,
3061 bfd_vma destination,
3062 asection *sym_sec,
3063 bfd *input_bfd,
3064 const char *name)
3065{
3066 bfd_vma location;
3067 bfd_signed_vma branch_offset;
3068 unsigned int r_type;
3069 struct elf32_arm_link_hash_table * globals;
3070 int thumb2;
3071 int thumb_only;
3072 enum elf32_arm_stub_type stub_type = arm_stub_none;
3073 int use_plt = 0;
3074 int st_type = *actual_st_type;
3075
3076 /* We don't know the actual type of destination in case it is of
3077 type STT_SECTION: give up. */
3078 if (st_type == STT_SECTION)
3079 return stub_type;
3080
3081 globals = elf32_arm_hash_table (info);
3082 if (globals == NULL)
3083 return stub_type;
3084
3085 thumb_only = using_thumb_only (globals);
3086
3087 thumb2 = using_thumb2 (globals);
3088
3089 /* Determine where the call point is. */
3090 location = (input_sec->output_offset
3091 + input_sec->output_section->vma
3092 + rel->r_offset);
3093
3094 r_type = ELF32_R_TYPE (rel->r_info);
3095
3096 /* Keep a simpler condition, for the sake of clarity. */
3097 if (globals->splt != NULL
3098 && hash != NULL
3099 && hash->root.plt.offset != (bfd_vma) -1)
3100 {
3101 use_plt = 1;
3102
3103 /* Note when dealing with PLT entries: the main PLT stub is in
3104 ARM mode, so if the branch is in Thumb mode, another
3105 Thumb->ARM stub will be inserted later just before the ARM
3106 PLT stub. We don't take this extra distance into account
3107 here, because if a long branch stub is needed, we'll add a
3108 Thumb->Arm one and branch directly to the ARM PLT entry
3109 because it avoids spreading offset corrections in several
3110 places. */
3111
3112 destination = (globals->splt->output_section->vma
3113 + globals->splt->output_offset
3114 + hash->root.plt.offset);
3115 st_type = STT_FUNC;
3116 }
3117
3118 branch_offset = (bfd_signed_vma)(destination - location);
3119
3120 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3121 {
3122 /* Handle cases where:
3123 - this call goes too far (different Thumb/Thumb2 max
3124 distance)
3125 - it's a Thumb->Arm call and blx is not available, or it's a
3126 Thumb->Arm branch (not bl). A stub is needed in this case,
3127 but only if this call is not through a PLT entry. Indeed,
3128 PLT stubs handle mode switching already.
3129 */
3130 if ((!thumb2
3131 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3132 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3133 || (thumb2
3134 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3135 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3136 || ((st_type != STT_ARM_TFUNC)
3137 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3138 || (r_type == R_ARM_THM_JUMP24))
3139 && !use_plt))
3140 {
3141 if (st_type == STT_ARM_TFUNC)
3142 {
3143 /* Thumb to thumb. */
3144 if (!thumb_only)
3145 {
3146 stub_type = (info->shared | globals->pic_veneer)
3147 /* PIC stubs. */
3148 ? ((globals->use_blx
3149 && (r_type ==R_ARM_THM_CALL))
3150 /* V5T and above. Stub starts with ARM code, so
3151 we must be able to switch mode before
3152 reaching it, which is only possible for 'bl'
3153 (ie R_ARM_THM_CALL relocation). */
3154 ? arm_stub_long_branch_any_thumb_pic
3155 /* On V4T, use Thumb code only. */
3156 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3157
3158 /* non-PIC stubs. */
3159 : ((globals->use_blx
3160 && (r_type ==R_ARM_THM_CALL))
3161 /* V5T and above. */
3162 ? arm_stub_long_branch_any_any
3163 /* V4T. */
3164 : arm_stub_long_branch_v4t_thumb_thumb);
3165 }
3166 else
3167 {
3168 stub_type = (info->shared | globals->pic_veneer)
3169 /* PIC stub. */
3170 ? arm_stub_long_branch_thumb_only_pic
3171 /* non-PIC stub. */
3172 : arm_stub_long_branch_thumb_only;
3173 }
3174 }
3175 else
3176 {
3177 /* Thumb to arm. */
3178 if (sym_sec != NULL
3179 && sym_sec->owner != NULL
3180 && !INTERWORK_FLAG (sym_sec->owner))
3181 {
3182 (*_bfd_error_handler)
3183 (_("%B(%s): warning: interworking not enabled.\n"
3184 " first occurrence: %B: Thumb call to ARM"),
3185 sym_sec->owner, input_bfd, name);
3186 }
3187
3188 stub_type = (info->shared | globals->pic_veneer)
3189 /* PIC stubs. */
3190 ? ((globals->use_blx
3191 && (r_type ==R_ARM_THM_CALL))
3192 /* V5T and above. */
3193 ? arm_stub_long_branch_any_arm_pic
3194 /* V4T PIC stub. */
3195 : arm_stub_long_branch_v4t_thumb_arm_pic)
3196
3197 /* non-PIC stubs. */
3198 : ((globals->use_blx
3199 && (r_type ==R_ARM_THM_CALL))
3200 /* V5T and above. */
3201 ? arm_stub_long_branch_any_any
3202 /* V4T. */
3203 : arm_stub_long_branch_v4t_thumb_arm);
3204
3205 /* Handle v4t short branches. */
3206 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3207 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3208 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3209 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3210 }
3211 }
3212 }
3213 else if (r_type == R_ARM_CALL
3214 || r_type == R_ARM_JUMP24
3215 || r_type == R_ARM_PLT32)
3216 {
3217 if (st_type == STT_ARM_TFUNC)
3218 {
3219 /* Arm to thumb. */
3220
3221 if (sym_sec != NULL
3222 && sym_sec->owner != NULL
3223 && !INTERWORK_FLAG (sym_sec->owner))
3224 {
3225 (*_bfd_error_handler)
3226 (_("%B(%s): warning: interworking not enabled.\n"
3227 " first occurrence: %B: ARM call to Thumb"),
3228 sym_sec->owner, input_bfd, name);
3229 }
3230
3231 /* We have an extra 2-bytes reach because of
3232 the mode change (bit 24 (H) of BLX encoding). */
3233 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3234 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3235 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3236 || (r_type == R_ARM_JUMP24)
3237 || (r_type == R_ARM_PLT32))
3238 {
3239 stub_type = (info->shared | globals->pic_veneer)
3240 /* PIC stubs. */
3241 ? ((globals->use_blx)
3242 /* V5T and above. */
3243 ? arm_stub_long_branch_any_thumb_pic
3244 /* V4T stub. */
3245 : arm_stub_long_branch_v4t_arm_thumb_pic)
3246
3247 /* non-PIC stubs. */
3248 : ((globals->use_blx)
3249 /* V5T and above. */
3250 ? arm_stub_long_branch_any_any
3251 /* V4T. */
3252 : arm_stub_long_branch_v4t_arm_thumb);
3253 }
3254 }
3255 else
3256 {
3257 /* Arm to arm. */
3258 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3259 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3260 {
3261 stub_type = (info->shared | globals->pic_veneer)
3262 /* PIC stubs. */
3263 ? arm_stub_long_branch_any_arm_pic
3264 /* non-PIC stubs. */
3265 : arm_stub_long_branch_any_any;
3266 }
3267 }
3268 }
3269
3270 /* If a stub is needed, record the actual destination type. */
3271 if (stub_type != arm_stub_none)
3272 *actual_st_type = st_type;
3273
3274 return stub_type;
3275}
3276
3277/* Build a name for an entry in the stub hash table. */
3278
3279static char *
3280elf32_arm_stub_name (const asection *input_section,
3281 const asection *sym_sec,
3282 const struct elf32_arm_link_hash_entry *hash,
3283 const Elf_Internal_Rela *rel,
3284 enum elf32_arm_stub_type stub_type)
3285{
3286 char *stub_name;
3287 bfd_size_type len;
3288
3289 if (hash)
3290 {
3291 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3292 stub_name = (char *) bfd_malloc (len);
3293 if (stub_name != NULL)
3294 sprintf (stub_name, "%08x_%s+%x_%d",
3295 input_section->id & 0xffffffff,
3296 hash->root.root.root.string,
3297 (int) rel->r_addend & 0xffffffff,
3298 (int) stub_type);
3299 }
3300 else
3301 {
3302 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3303 stub_name = (char *) bfd_malloc (len);
3304 if (stub_name != NULL)
3305 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3306 input_section->id & 0xffffffff,
3307 sym_sec->id & 0xffffffff,
3308 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3309 (int) rel->r_addend & 0xffffffff,
3310 (int) stub_type);
3311 }
3312
3313 return stub_name;
3314}
3315
3316/* Look up an entry in the stub hash. Stub entries are cached because
3317 creating the stub name takes a bit of time. */
3318
3319static struct elf32_arm_stub_hash_entry *
3320elf32_arm_get_stub_entry (const asection *input_section,
3321 const asection *sym_sec,
3322 struct elf_link_hash_entry *hash,
3323 const Elf_Internal_Rela *rel,
3324 struct elf32_arm_link_hash_table *htab,
3325 enum elf32_arm_stub_type stub_type)
3326{
3327 struct elf32_arm_stub_hash_entry *stub_entry;
3328 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3329 const asection *id_sec;
3330
3331 if ((input_section->flags & SEC_CODE) == 0)
3332 return NULL;
3333
3334 /* If this input section is part of a group of sections sharing one
3335 stub section, then use the id of the first section in the group.
3336 Stub names need to include a section id, as there may well be
3337 more than one stub used to reach say, printf, and we need to
3338 distinguish between them. */
3339 id_sec = htab->stub_group[input_section->id].link_sec;
3340
3341 if (h != NULL && h->stub_cache != NULL
3342 && h->stub_cache->h == h
3343 && h->stub_cache->id_sec == id_sec
3344 && h->stub_cache->stub_type == stub_type)
3345 {
3346 stub_entry = h->stub_cache;
3347 }
3348 else
3349 {
3350 char *stub_name;
3351
3352 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3353 if (stub_name == NULL)
3354 return NULL;
3355
3356 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3357 stub_name, FALSE, FALSE);
3358 if (h != NULL)
3359 h->stub_cache = stub_entry;
3360
3361 free (stub_name);
3362 }
3363
3364 return stub_entry;
3365}
3366
3367/* Find or create a stub section. Returns a pointer to the stub section, and
3368 the section to which the stub section will be attached (in *LINK_SEC_P).
3369 LINK_SEC_P may be NULL. */
3370
3371static asection *
3372elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3373 struct elf32_arm_link_hash_table *htab)
3374{
3375 asection *link_sec;
3376 asection *stub_sec;
3377
3378 link_sec = htab->stub_group[section->id].link_sec;
3379 stub_sec = htab->stub_group[section->id].stub_sec;
3380 if (stub_sec == NULL)
3381 {
3382 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3383 if (stub_sec == NULL)
3384 {
3385 size_t namelen;
3386 bfd_size_type len;
3387 char *s_name;
3388
3389 namelen = strlen (link_sec->name);
3390 len = namelen + sizeof (STUB_SUFFIX);
3391 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3392 if (s_name == NULL)
3393 return NULL;
3394
3395 memcpy (s_name, link_sec->name, namelen);
3396 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3397 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3398 if (stub_sec == NULL)
3399 return NULL;
3400 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3401 }
3402 htab->stub_group[section->id].stub_sec = stub_sec;
3403 }
3404
3405 if (link_sec_p)
3406 *link_sec_p = link_sec;
3407
3408 return stub_sec;
3409}
3410
3411/* Add a new stub entry to the stub hash. Not all fields of the new
3412 stub entry are initialised. */
3413
3414static struct elf32_arm_stub_hash_entry *
3415elf32_arm_add_stub (const char *stub_name,
3416 asection *section,
3417 struct elf32_arm_link_hash_table *htab)
3418{
3419 asection *link_sec;
3420 asection *stub_sec;
3421 struct elf32_arm_stub_hash_entry *stub_entry;
3422
3423 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3424 if (stub_sec == NULL)
3425 return NULL;
3426
3427 /* Enter this entry into the linker stub hash table. */
3428 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3429 TRUE, FALSE);
3430 if (stub_entry == NULL)
3431 {
3432 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3433 section->owner,
3434 stub_name);
3435 return NULL;
3436 }
3437
3438 stub_entry->stub_sec = stub_sec;
3439 stub_entry->stub_offset = 0;
3440 stub_entry->id_sec = link_sec;
3441
3442 return stub_entry;
3443}
3444
3445/* Store an Arm insn into an output section not processed by
3446 elf32_arm_write_section. */
3447
3448static void
3449put_arm_insn (struct elf32_arm_link_hash_table * htab,
3450 bfd * output_bfd, bfd_vma val, void * ptr)
3451{
3452 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3453 bfd_putl32 (val, ptr);
3454 else
3455 bfd_putb32 (val, ptr);
3456}
3457
3458/* Store a 16-bit Thumb insn into an output section not processed by
3459 elf32_arm_write_section. */
3460
3461static void
3462put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3463 bfd * output_bfd, bfd_vma val, void * ptr)
3464{
3465 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3466 bfd_putl16 (val, ptr);
3467 else
3468 bfd_putb16 (val, ptr);
3469}
3470
3471static bfd_reloc_status_type elf32_arm_final_link_relocate
3472 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3473 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3474 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3475
3476static unsigned int
3477arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
3478{
3479 switch (stub_type)
3480 {
3481 case arm_stub_a8_veneer_b_cond:
3482 case arm_stub_a8_veneer_b:
3483 case arm_stub_a8_veneer_bl:
3484 return 2;
3485
3486 case arm_stub_long_branch_any_any:
3487 case arm_stub_long_branch_v4t_arm_thumb:
3488 case arm_stub_long_branch_thumb_only:
3489 case arm_stub_long_branch_v4t_thumb_thumb:
3490 case arm_stub_long_branch_v4t_thumb_arm:
3491 case arm_stub_short_branch_v4t_thumb_arm:
3492 case arm_stub_long_branch_any_arm_pic:
3493 case arm_stub_long_branch_any_thumb_pic:
3494 case arm_stub_long_branch_v4t_thumb_thumb_pic:
3495 case arm_stub_long_branch_v4t_arm_thumb_pic:
3496 case arm_stub_long_branch_v4t_thumb_arm_pic:
3497 case arm_stub_long_branch_thumb_only_pic:
3498 case arm_stub_a8_veneer_blx:
3499 return 4;
3500
3501 default:
3502 abort (); /* Should be unreachable. */
3503 }
3504}
3505
3506static bfd_boolean
3507arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3508 void * in_arg)
3509{
3510#define MAXRELOCS 2
3511 struct elf32_arm_stub_hash_entry *stub_entry;
3512 struct elf32_arm_link_hash_table *globals;
3513 struct bfd_link_info *info;
3514 asection *stub_sec;
3515 bfd *stub_bfd;
3516 bfd_byte *loc;
3517 bfd_vma sym_value;
3518 int template_size;
3519 int size;
3520 const insn_sequence *template_sequence;
3521 int i;
3522 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3523 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3524 int nrelocs = 0;
3525
3526 /* Massage our args to the form they really have. */
3527 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3528 info = (struct bfd_link_info *) in_arg;
3529
3530 globals = elf32_arm_hash_table (info);
3531 if (globals == NULL)
3532 return FALSE;
3533
3534 stub_sec = stub_entry->stub_sec;
3535
3536 if ((globals->fix_cortex_a8 < 0)
3537 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
3538 /* We have to do less-strictly-aligned fixes last. */
3539 return TRUE;
3540
3541 /* Make a note of the offset within the stubs for this entry. */
3542 stub_entry->stub_offset = stub_sec->size;
3543 loc = stub_sec->contents + stub_entry->stub_offset;
3544
3545 stub_bfd = stub_sec->owner;
3546
3547 /* This is the address of the stub destination. */
3548 sym_value = (stub_entry->target_value
3549 + stub_entry->target_section->output_offset
3550 + stub_entry->target_section->output_section->vma);
3551
3552 template_sequence = stub_entry->stub_template;
3553 template_size = stub_entry->stub_template_size;
3554
3555 size = 0;
3556 for (i = 0; i < template_size; i++)
3557 {
3558 switch (template_sequence[i].type)
3559 {
3560 case THUMB16_TYPE:
3561 {
3562 bfd_vma data = (bfd_vma) template_sequence[i].data;
3563 if (template_sequence[i].reloc_addend != 0)
3564 {
3565 /* We've borrowed the reloc_addend field to mean we should
3566 insert a condition code into this (Thumb-1 branch)
3567 instruction. See THUMB16_BCOND_INSN. */
3568 BFD_ASSERT ((data & 0xff00) == 0xd000);
3569 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3570 }
3571 bfd_put_16 (stub_bfd, data, loc + size);
3572 size += 2;
3573 }
3574 break;
3575
3576 case THUMB32_TYPE:
3577 bfd_put_16 (stub_bfd,
3578 (template_sequence[i].data >> 16) & 0xffff,
3579 loc + size);
3580 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3581 loc + size + 2);
3582 if (template_sequence[i].r_type != R_ARM_NONE)
3583 {
3584 stub_reloc_idx[nrelocs] = i;
3585 stub_reloc_offset[nrelocs++] = size;
3586 }
3587 size += 4;
3588 break;
3589
3590 case ARM_TYPE:
3591 bfd_put_32 (stub_bfd, template_sequence[i].data,
3592 loc + size);
3593 /* Handle cases where the target is encoded within the
3594 instruction. */
3595 if (template_sequence[i].r_type == R_ARM_JUMP24)
3596 {
3597 stub_reloc_idx[nrelocs] = i;
3598 stub_reloc_offset[nrelocs++] = size;
3599 }
3600 size += 4;
3601 break;
3602
3603 case DATA_TYPE:
3604 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3605 stub_reloc_idx[nrelocs] = i;
3606 stub_reloc_offset[nrelocs++] = size;
3607 size += 4;
3608 break;
3609
3610 default:
3611 BFD_FAIL ();
3612 return FALSE;
3613 }
3614 }
3615
3616 stub_sec->size += size;
3617
3618 /* Stub size has already been computed in arm_size_one_stub. Check
3619 consistency. */
3620 BFD_ASSERT (size == stub_entry->stub_size);
3621
3622 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3623 if (stub_entry->st_type == STT_ARM_TFUNC)
3624 sym_value |= 1;
3625
3626 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3627 in each stub. */
3628 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3629
3630 for (i = 0; i < nrelocs; i++)
3631 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3632 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3633 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3634 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3635 {
3636 Elf_Internal_Rela rel;
3637 bfd_boolean unresolved_reloc;
3638 char *error_message;
3639 int sym_flags
3640 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3641 ? STT_ARM_TFUNC : 0;
3642 bfd_vma points_to = sym_value + stub_entry->target_addend;
3643
3644 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3645 rel.r_info = ELF32_R_INFO (0,
3646 template_sequence[stub_reloc_idx[i]].r_type);
3647 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3648
3649 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3650 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3651 template should refer back to the instruction after the original
3652 branch. */
3653 points_to = sym_value;
3654
3655 /* There may be unintended consequences if this is not true. */
3656 BFD_ASSERT (stub_entry->h == NULL);
3657
3658 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3659 properly. We should probably use this function unconditionally,
3660 rather than only for certain relocations listed in the enclosing
3661 conditional, for the sake of consistency. */
3662 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3663 (template_sequence[stub_reloc_idx[i]].r_type),
3664 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3665 points_to, info, stub_entry->target_section, "", sym_flags,
3666 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3667 &error_message);
3668 }
3669 else
3670 {
3671 Elf_Internal_Rela rel;
3672 bfd_boolean unresolved_reloc;
3673 char *error_message;
3674 bfd_vma points_to = sym_value + stub_entry->target_addend
3675 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3676
3677 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3678 rel.r_info = ELF32_R_INFO (0,
3679 template_sequence[stub_reloc_idx[i]].r_type);
3680 rel.r_addend = 0;
3681
3682 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3683 (template_sequence[stub_reloc_idx[i]].r_type),
3684 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3685 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3686 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3687 &error_message);
3688 }
3689
3690 return TRUE;
3691#undef MAXRELOCS
3692}
3693
3694/* Calculate the template, template size and instruction size for a stub.
3695 Return value is the instruction size. */
3696
3697static unsigned int
3698find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3699 const insn_sequence **stub_template,
3700 int *stub_template_size)
3701{
3702 const insn_sequence *template_sequence = NULL;
3703 int template_size = 0, i;
3704 unsigned int size;
3705
3706 template_sequence = stub_definitions[stub_type].template_sequence;
3707 template_size = stub_definitions[stub_type].template_size;
3708
3709 size = 0;
3710 for (i = 0; i < template_size; i++)
3711 {
3712 switch (template_sequence[i].type)
3713 {
3714 case THUMB16_TYPE:
3715 size += 2;
3716 break;
3717
3718 case ARM_TYPE:
3719 case THUMB32_TYPE:
3720 case DATA_TYPE:
3721 size += 4;
3722 break;
3723
3724 default:
3725 BFD_FAIL ();
3726 return FALSE;
3727 }
3728 }
3729
3730 if (stub_template)
3731 *stub_template = template_sequence;
3732
3733 if (stub_template_size)
3734 *stub_template_size = template_size;
3735
3736 return size;
3737}
3738
3739/* As above, but don't actually build the stub. Just bump offset so
3740 we know stub section sizes. */
3741
3742static bfd_boolean
3743arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3744 void *in_arg ATTRIBUTE_UNUSED)
3745{
3746 struct elf32_arm_stub_hash_entry *stub_entry;
3747 const insn_sequence *template_sequence;
3748 int template_size, size;
3749
3750 /* Massage our args to the form they really have. */
3751 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3752
3753 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3754 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3755
3756 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3757 &template_size);
3758
3759 stub_entry->stub_size = size;
3760 stub_entry->stub_template = template_sequence;
3761 stub_entry->stub_template_size = template_size;
3762
3763 size = (size + 7) & ~7;
3764 stub_entry->stub_sec->size += size;
3765
3766 return TRUE;
3767}
3768
3769/* External entry points for sizing and building linker stubs. */
3770
3771/* Set up various things so that we can make a list of input sections
3772 for each output section included in the link. Returns -1 on error,
3773 0 when no stubs will be needed, and 1 on success. */
3774
3775int
3776elf32_arm_setup_section_lists (bfd *output_bfd,
3777 struct bfd_link_info *info)
3778{
3779 bfd *input_bfd;
3780 unsigned int bfd_count;
3781 int top_id, top_index;
3782 asection *section;
3783 asection **input_list, **list;
3784 bfd_size_type amt;
3785 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3786
3787 if (htab == NULL)
3788 return 0;
3789 if (! is_elf_hash_table (htab))
3790 return 0;
3791
3792 /* Count the number of input BFDs and find the top input section id. */
3793 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3794 input_bfd != NULL;
3795 input_bfd = input_bfd->link_next)
3796 {
3797 bfd_count += 1;
3798 for (section = input_bfd->sections;
3799 section != NULL;
3800 section = section->next)
3801 {
3802 if (top_id < section->id)
3803 top_id = section->id;
3804 }
3805 }
3806 htab->bfd_count = bfd_count;
3807
3808 amt = sizeof (struct map_stub) * (top_id + 1);
3809 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3810 if (htab->stub_group == NULL)
3811 return -1;
3812 htab->top_id = top_id;
3813
3814 /* We can't use output_bfd->section_count here to find the top output
3815 section index as some sections may have been removed, and
3816 _bfd_strip_section_from_output doesn't renumber the indices. */
3817 for (section = output_bfd->sections, top_index = 0;
3818 section != NULL;
3819 section = section->next)
3820 {
3821 if (top_index < section->index)
3822 top_index = section->index;
3823 }
3824
3825 htab->top_index = top_index;
3826 amt = sizeof (asection *) * (top_index + 1);
3827 input_list = (asection **) bfd_malloc (amt);
3828 htab->input_list = input_list;
3829 if (input_list == NULL)
3830 return -1;
3831
3832 /* For sections we aren't interested in, mark their entries with a
3833 value we can check later. */
3834 list = input_list + top_index;
3835 do
3836 *list = bfd_abs_section_ptr;
3837 while (list-- != input_list);
3838
3839 for (section = output_bfd->sections;
3840 section != NULL;
3841 section = section->next)
3842 {
3843 if ((section->flags & SEC_CODE) != 0)
3844 input_list[section->index] = NULL;
3845 }
3846
3847 return 1;
3848}
3849
3850/* The linker repeatedly calls this function for each input section,
3851 in the order that input sections are linked into output sections.
3852 Build lists of input sections to determine groupings between which
3853 we may insert linker stubs. */
3854
3855void
3856elf32_arm_next_input_section (struct bfd_link_info *info,
3857 asection *isec)
3858{
3859 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3860
3861 if (htab == NULL)
3862 return;
3863
3864 if (isec->output_section->index <= htab->top_index)
3865 {
3866 asection **list = htab->input_list + isec->output_section->index;
3867
3868 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3869 {
3870 /* Steal the link_sec pointer for our list. */
3871#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3872 /* This happens to make the list in reverse order,
3873 which we reverse later. */
3874 PREV_SEC (isec) = *list;
3875 *list = isec;
3876 }
3877 }
3878}
3879
3880/* See whether we can group stub sections together. Grouping stub
3881 sections may result in fewer stubs. More importantly, we need to
3882 put all .init* and .fini* stubs at the end of the .init or
3883 .fini output sections respectively, because glibc splits the
3884 _init and _fini functions into multiple parts. Putting a stub in
3885 the middle of a function is not a good idea. */
3886
3887static void
3888group_sections (struct elf32_arm_link_hash_table *htab,
3889 bfd_size_type stub_group_size,
3890 bfd_boolean stubs_always_after_branch)
3891{
3892 asection **list = htab->input_list;
3893
3894 do
3895 {
3896 asection *tail = *list;
3897 asection *head;
3898
3899 if (tail == bfd_abs_section_ptr)
3900 continue;
3901
3902 /* Reverse the list: we must avoid placing stubs at the
3903 beginning of the section because the beginning of the text
3904 section may be required for an interrupt vector in bare metal
3905 code. */
3906#define NEXT_SEC PREV_SEC
3907 head = NULL;
3908 while (tail != NULL)
3909 {
3910 /* Pop from tail. */
3911 asection *item = tail;
3912 tail = PREV_SEC (item);
3913
3914 /* Push on head. */
3915 NEXT_SEC (item) = head;
3916 head = item;
3917 }
3918
3919 while (head != NULL)
3920 {
3921 asection *curr;
3922 asection *next;
3923 bfd_vma stub_group_start = head->output_offset;
3924 bfd_vma end_of_next;
3925
3926 curr = head;
3927 while (NEXT_SEC (curr) != NULL)
3928 {
3929 next = NEXT_SEC (curr);
3930 end_of_next = next->output_offset + next->size;
3931 if (end_of_next - stub_group_start >= stub_group_size)
3932 /* End of NEXT is too far from start, so stop. */
3933 break;
3934 /* Add NEXT to the group. */
3935 curr = next;
3936 }
3937
3938 /* OK, the size from the start to the start of CURR is less
3939 than stub_group_size and thus can be handled by one stub
3940 section. (Or the head section is itself larger than
3941 stub_group_size, in which case we may be toast.)
3942 We should really be keeping track of the total size of
3943 stubs added here, as stubs contribute to the final output
3944 section size. */
3945 do
3946 {
3947 next = NEXT_SEC (head);
3948 /* Set up this stub group. */
3949 htab->stub_group[head->id].link_sec = curr;
3950 }
3951 while (head != curr && (head = next) != NULL);
3952
3953 /* But wait, there's more! Input sections up to stub_group_size
3954 bytes after the stub section can be handled by it too. */
3955 if (!stubs_always_after_branch)
3956 {
3957 stub_group_start = curr->output_offset + curr->size;
3958
3959 while (next != NULL)
3960 {
3961 end_of_next = next->output_offset + next->size;
3962 if (end_of_next - stub_group_start >= stub_group_size)
3963 /* End of NEXT is too far from stubs, so stop. */
3964 break;
3965 /* Add NEXT to the stub group. */
3966 head = next;
3967 next = NEXT_SEC (head);
3968 htab->stub_group[head->id].link_sec = curr;
3969 }
3970 }
3971 head = next;
3972 }
3973 }
3974 while (list++ != htab->input_list + htab->top_index);
3975
3976 free (htab->input_list);
3977#undef PREV_SEC
3978#undef NEXT_SEC
3979}
3980
3981/* Comparison function for sorting/searching relocations relating to Cortex-A8
3982 erratum fix. */
3983
3984static int
3985a8_reloc_compare (const void *a, const void *b)
3986{
3987 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3988 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3989
3990 if (ra->from < rb->from)
3991 return -1;
3992 else if (ra->from > rb->from)
3993 return 1;
3994 else
3995 return 0;
3996}
3997
3998static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3999 const char *, char **);
4000
4001/* Helper function to scan code for sequences which might trigger the Cortex-A8
4002 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4003 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4004 otherwise. */
4005
4006static bfd_boolean
4007cortex_a8_erratum_scan (bfd *input_bfd,
4008 struct bfd_link_info *info,
4009 struct a8_erratum_fix **a8_fixes_p,
4010 unsigned int *num_a8_fixes_p,
4011 unsigned int *a8_fix_table_size_p,
4012 struct a8_erratum_reloc *a8_relocs,
4013 unsigned int num_a8_relocs,
4014 unsigned prev_num_a8_fixes,
4015 bfd_boolean *stub_changed_p)
4016{
4017 asection *section;
4018 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4019 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4020 unsigned int num_a8_fixes = *num_a8_fixes_p;
4021 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4022
4023 if (htab == NULL)
4024 return FALSE;
4025
4026 for (section = input_bfd->sections;
4027 section != NULL;
4028 section = section->next)
4029 {
4030 bfd_byte *contents = NULL;
4031 struct _arm_elf_section_data *sec_data;
4032 unsigned int span;
4033 bfd_vma base_vma;
4034
4035 if (elf_section_type (section) != SHT_PROGBITS
4036 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4037 || (section->flags & SEC_EXCLUDE) != 0
4038 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4039 || (section->output_section == bfd_abs_section_ptr))
4040 continue;
4041
4042 base_vma = section->output_section->vma + section->output_offset;
4043
4044 if (elf_section_data (section)->this_hdr.contents != NULL)
4045 contents = elf_section_data (section)->this_hdr.contents;
4046 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4047 return TRUE;
4048
4049 sec_data = elf32_arm_section_data (section);
4050
4051 for (span = 0; span < sec_data->mapcount; span++)
4052 {
4053 unsigned int span_start = sec_data->map[span].vma;
4054 unsigned int span_end = (span == sec_data->mapcount - 1)
4055 ? section->size : sec_data->map[span + 1].vma;
4056 unsigned int i;
4057 char span_type = sec_data->map[span].type;
4058 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4059
4060 if (span_type != 't')
4061 continue;
4062
4063 /* Span is entirely within a single 4KB region: skip scanning. */
4064 if (((base_vma + span_start) & ~0xfff)
4065 == ((base_vma + span_end) & ~0xfff))
4066 continue;
4067
4068 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4069
4070 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4071 * The branch target is in the same 4KB region as the
4072 first half of the branch.
4073 * The instruction before the branch is a 32-bit
4074 length non-branch instruction. */
4075 for (i = span_start; i < span_end;)
4076 {
4077 unsigned int insn = bfd_getl16 (&contents[i]);
4078 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4079 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4080
4081 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4082 insn_32bit = TRUE;
4083
4084 if (insn_32bit)
4085 {
4086 /* Load the rest of the insn (in manual-friendly order). */
4087 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4088
4089 /* Encoding T4: B<c>.W. */
4090 is_b = (insn & 0xf800d000) == 0xf0009000;
4091 /* Encoding T1: BL<c>.W. */
4092 is_bl = (insn & 0xf800d000) == 0xf000d000;
4093 /* Encoding T2: BLX<c>.W. */
4094 is_blx = (insn & 0xf800d000) == 0xf000c000;
4095 /* Encoding T3: B<c>.W (not permitted in IT block). */
4096 is_bcc = (insn & 0xf800d000) == 0xf0008000
4097 && (insn & 0x07f00000) != 0x03800000;
4098 }
4099
4100 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4101
4102 if (((base_vma + i) & 0xfff) == 0xffe
4103 && insn_32bit
4104 && is_32bit_branch
4105 && last_was_32bit
4106 && ! last_was_branch)
4107 {
4108 bfd_signed_vma offset = 0;
4109 bfd_boolean force_target_arm = FALSE;
4110 bfd_boolean force_target_thumb = FALSE;
4111 bfd_vma target;
4112 enum elf32_arm_stub_type stub_type = arm_stub_none;
4113 struct a8_erratum_reloc key, *found;
4114
4115 key.from = base_vma + i;
4116 found = (struct a8_erratum_reloc *)
4117 bsearch (&key, a8_relocs, num_a8_relocs,
4118 sizeof (struct a8_erratum_reloc),
4119 &a8_reloc_compare);
4120
4121 if (found)
4122 {
4123 char *error_message = NULL;
4124 struct elf_link_hash_entry *entry;
4125 bfd_boolean use_plt = FALSE;
4126
4127 /* We don't care about the error returned from this
4128 function, only if there is glue or not. */
4129 entry = find_thumb_glue (info, found->sym_name,
4130 &error_message);
4131
4132 if (entry)
4133 found->non_a8_stub = TRUE;
4134
4135 /* Keep a simpler condition, for the sake of clarity. */
4136 if (htab->splt != NULL && found->hash != NULL
4137 && found->hash->root.plt.offset != (bfd_vma) -1)
4138 use_plt = TRUE;
4139
4140 if (found->r_type == R_ARM_THM_CALL)
4141 {
4142 if (found->st_type != STT_ARM_TFUNC || use_plt)
4143 force_target_arm = TRUE;
4144 else
4145 force_target_thumb = TRUE;
4146 }
4147 }
4148
4149 /* Check if we have an offending branch instruction. */
4150
4151 if (found && found->non_a8_stub)
4152 /* We've already made a stub for this instruction, e.g.
4153 it's a long branch or a Thumb->ARM stub. Assume that
4154 stub will suffice to work around the A8 erratum (see
4155 setting of always_after_branch above). */
4156 ;
4157 else if (is_bcc)
4158 {
4159 offset = (insn & 0x7ff) << 1;
4160 offset |= (insn & 0x3f0000) >> 4;
4161 offset |= (insn & 0x2000) ? 0x40000 : 0;
4162 offset |= (insn & 0x800) ? 0x80000 : 0;
4163 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4164 if (offset & 0x100000)
4165 offset |= ~ ((bfd_signed_vma) 0xfffff);
4166 stub_type = arm_stub_a8_veneer_b_cond;
4167 }
4168 else if (is_b || is_bl || is_blx)
4169 {
4170 int s = (insn & 0x4000000) != 0;
4171 int j1 = (insn & 0x2000) != 0;
4172 int j2 = (insn & 0x800) != 0;
4173 int i1 = !(j1 ^ s);
4174 int i2 = !(j2 ^ s);
4175
4176 offset = (insn & 0x7ff) << 1;
4177 offset |= (insn & 0x3ff0000) >> 4;
4178 offset |= i2 << 22;
4179 offset |= i1 << 23;
4180 offset |= s << 24;
4181 if (offset & 0x1000000)
4182 offset |= ~ ((bfd_signed_vma) 0xffffff);
4183
4184 if (is_blx)
4185 offset &= ~ ((bfd_signed_vma) 3);
4186
4187 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4188 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4189 }
4190
4191 if (stub_type != arm_stub_none)
4192 {
4193 bfd_vma pc_for_insn = base_vma + i + 4;
4194
4195 /* The original instruction is a BL, but the target is
4196 an ARM instruction. If we were not making a stub,
4197 the BL would have been converted to a BLX. Use the
4198 BLX stub instead in that case. */
4199 if (htab->use_blx && force_target_arm
4200 && stub_type == arm_stub_a8_veneer_bl)
4201 {
4202 stub_type = arm_stub_a8_veneer_blx;
4203 is_blx = TRUE;
4204 is_bl = FALSE;
4205 }
4206 /* Conversely, if the original instruction was
4207 BLX but the target is Thumb mode, use the BL
4208 stub. */
4209 else if (force_target_thumb
4210 && stub_type == arm_stub_a8_veneer_blx)
4211 {
4212 stub_type = arm_stub_a8_veneer_bl;
4213 is_blx = FALSE;
4214 is_bl = TRUE;
4215 }
4216
4217 if (is_blx)
4218 pc_for_insn &= ~ ((bfd_vma) 3);
4219
4220 /* If we found a relocation, use the proper destination,
4221 not the offset in the (unrelocated) instruction.
4222 Note this is always done if we switched the stub type
4223 above. */
4224 if (found)
4225 offset =
4226 (bfd_signed_vma) (found->destination - pc_for_insn);
4227
4228 target = pc_for_insn + offset;
4229
4230 /* The BLX stub is ARM-mode code. Adjust the offset to
4231 take the different PC value (+8 instead of +4) into
4232 account. */
4233 if (stub_type == arm_stub_a8_veneer_blx)
4234 offset += 4;
4235
4236 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4237 {
4238 char *stub_name = NULL;
4239
4240 if (num_a8_fixes == a8_fix_table_size)
4241 {
4242 a8_fix_table_size *= 2;
4243 a8_fixes = (struct a8_erratum_fix *)
4244 bfd_realloc (a8_fixes,
4245 sizeof (struct a8_erratum_fix)
4246 * a8_fix_table_size);
4247 }
4248
4249 if (num_a8_fixes < prev_num_a8_fixes)
4250 {
4251 /* If we're doing a subsequent scan,
4252 check if we've found the same fix as
4253 before, and try and reuse the stub
4254 name. */
4255 stub_name = a8_fixes[num_a8_fixes].stub_name;
4256 if ((a8_fixes[num_a8_fixes].section != section)
4257 || (a8_fixes[num_a8_fixes].offset != i))
4258 {
4259 free (stub_name);
4260 stub_name = NULL;
4261 *stub_changed_p = TRUE;
4262 }
4263 }
4264
4265 if (!stub_name)
4266 {
4267 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4268 if (stub_name != NULL)
4269 sprintf (stub_name, "%x:%x", section->id, i);
4270 }
4271
4272 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4273 a8_fixes[num_a8_fixes].section = section;
4274 a8_fixes[num_a8_fixes].offset = i;
4275 a8_fixes[num_a8_fixes].addend = offset;
4276 a8_fixes[num_a8_fixes].orig_insn = insn;
4277 a8_fixes[num_a8_fixes].stub_name = stub_name;
4278 a8_fixes[num_a8_fixes].stub_type = stub_type;
4279 a8_fixes[num_a8_fixes].st_type =
4280 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4281
4282 num_a8_fixes++;
4283 }
4284 }
4285 }
4286
4287 i += insn_32bit ? 4 : 2;
4288 last_was_32bit = insn_32bit;
4289 last_was_branch = is_32bit_branch;
4290 }
4291 }
4292
4293 if (elf_section_data (section)->this_hdr.contents == NULL)
4294 free (contents);
4295 }
4296
4297 *a8_fixes_p = a8_fixes;
4298 *num_a8_fixes_p = num_a8_fixes;
4299 *a8_fix_table_size_p = a8_fix_table_size;
4300
4301 return FALSE;
4302}
4303
4304/* Determine and set the size of the stub section for a final link.
4305
4306 The basic idea here is to examine all the relocations looking for
4307 PC-relative calls to a target that is unreachable with a "bl"
4308 instruction. */
4309
4310bfd_boolean
4311elf32_arm_size_stubs (bfd *output_bfd,
4312 bfd *stub_bfd,
4313 struct bfd_link_info *info,
4314 bfd_signed_vma group_size,
4315 asection * (*add_stub_section) (const char *, asection *),
4316 void (*layout_sections_again) (void))
4317{
4318 bfd_size_type stub_group_size;
4319 bfd_boolean stubs_always_after_branch;
4320 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4321 struct a8_erratum_fix *a8_fixes = NULL;
4322 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4323 struct a8_erratum_reloc *a8_relocs = NULL;
4324 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4325
4326 if (htab == NULL)
4327 return FALSE;
4328
4329 if (htab->fix_cortex_a8)
4330 {
4331 a8_fixes = (struct a8_erratum_fix *)
4332 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4333 a8_relocs = (struct a8_erratum_reloc *)
4334 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4335 }
4336
4337 /* Propagate mach to stub bfd, because it may not have been
4338 finalized when we created stub_bfd. */
4339 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4340 bfd_get_mach (output_bfd));
4341
4342 /* Stash our params away. */
4343 htab->stub_bfd = stub_bfd;
4344 htab->add_stub_section = add_stub_section;
4345 htab->layout_sections_again = layout_sections_again;
4346 stubs_always_after_branch = group_size < 0;
4347
4348 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4349 as the first half of a 32-bit branch straddling two 4K pages. This is a
4350 crude way of enforcing that. */
4351 if (htab->fix_cortex_a8)
4352 stubs_always_after_branch = 1;
4353
4354 if (group_size < 0)
4355 stub_group_size = -group_size;
4356 else
4357 stub_group_size = group_size;
4358
4359 if (stub_group_size == 1)
4360 {
4361 /* Default values. */
4362 /* Thumb branch range is +-4MB has to be used as the default
4363 maximum size (a given section can contain both ARM and Thumb
4364 code, so the worst case has to be taken into account).
4365
4366 This value is 24K less than that, which allows for 2025
4367 12-byte stubs. If we exceed that, then we will fail to link.
4368 The user will have to relink with an explicit group size
4369 option. */
4370 stub_group_size = 4170000;
4371 }
4372
4373 group_sections (htab, stub_group_size, stubs_always_after_branch);
4374
4375 /* If we're applying the cortex A8 fix, we need to determine the
4376 program header size now, because we cannot change it later --
4377 that could alter section placements. Notice the A8 erratum fix
4378 ends up requiring the section addresses to remain unchanged
4379 modulo the page size. That's something we cannot represent
4380 inside BFD, and we don't want to force the section alignment to
4381 be the page size. */
4382 if (htab->fix_cortex_a8)
4383 (*htab->layout_sections_again) ();
4384
4385 while (1)
4386 {
4387 bfd *input_bfd;
4388 unsigned int bfd_indx;
4389 asection *stub_sec;
4390 bfd_boolean stub_changed = FALSE;
4391 unsigned prev_num_a8_fixes = num_a8_fixes;
4392
4393 num_a8_fixes = 0;
4394 for (input_bfd = info->input_bfds, bfd_indx = 0;
4395 input_bfd != NULL;
4396 input_bfd = input_bfd->link_next, bfd_indx++)
4397 {
4398 Elf_Internal_Shdr *symtab_hdr;
4399 asection *section;
4400 Elf_Internal_Sym *local_syms = NULL;
4401
4402 num_a8_relocs = 0;
4403
4404 /* We'll need the symbol table in a second. */
4405 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4406 if (symtab_hdr->sh_info == 0)
4407 continue;
4408
4409 /* Walk over each section attached to the input bfd. */
4410 for (section = input_bfd->sections;
4411 section != NULL;
4412 section = section->next)
4413 {
4414 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4415
4416 /* If there aren't any relocs, then there's nothing more
4417 to do. */
4418 if ((section->flags & SEC_RELOC) == 0
4419 || section->reloc_count == 0
4420 || (section->flags & SEC_CODE) == 0)
4421 continue;
4422
4423 /* If this section is a link-once section that will be
4424 discarded, then don't create any stubs. */
4425 if (section->output_section == NULL
4426 || section->output_section->owner != output_bfd)
4427 continue;
4428
4429 /* Get the relocs. */
4430 internal_relocs
4431 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4432 NULL, info->keep_memory);
4433 if (internal_relocs == NULL)
4434 goto error_ret_free_local;
4435
4436 /* Now examine each relocation. */
4437 irela = internal_relocs;
4438 irelaend = irela + section->reloc_count;
4439 for (; irela < irelaend; irela++)
4440 {
4441 unsigned int r_type, r_indx;
4442 enum elf32_arm_stub_type stub_type;
4443 struct elf32_arm_stub_hash_entry *stub_entry;
4444 asection *sym_sec;
4445 bfd_vma sym_value;
4446 bfd_vma destination;
4447 struct elf32_arm_link_hash_entry *hash;
4448 const char *sym_name;
4449 char *stub_name;
4450 const asection *id_sec;
4451 int st_type;
4452 bfd_boolean created_stub = FALSE;
4453
4454 r_type = ELF32_R_TYPE (irela->r_info);
4455 r_indx = ELF32_R_SYM (irela->r_info);
4456
4457 if (r_type >= (unsigned int) R_ARM_max)
4458 {
4459 bfd_set_error (bfd_error_bad_value);
4460 error_ret_free_internal:
4461 if (elf_section_data (section)->relocs == NULL)
4462 free (internal_relocs);
4463 goto error_ret_free_local;
4464 }
4465
4466 /* Only look for stubs on branch instructions. */
4467 if ((r_type != (unsigned int) R_ARM_CALL)
4468 && (r_type != (unsigned int) R_ARM_THM_CALL)
4469 && (r_type != (unsigned int) R_ARM_JUMP24)
4470 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4471 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4472 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4473 && (r_type != (unsigned int) R_ARM_PLT32))
4474 continue;
4475
4476 /* Now determine the call target, its name, value,
4477 section. */
4478 sym_sec = NULL;
4479 sym_value = 0;
4480 destination = 0;
4481 hash = NULL;
4482 sym_name = NULL;
4483 if (r_indx < symtab_hdr->sh_info)
4484 {
4485 /* It's a local symbol. */
4486 Elf_Internal_Sym *sym;
4487
4488 if (local_syms == NULL)
4489 {
4490 local_syms
4491 = (Elf_Internal_Sym *) symtab_hdr->contents;
4492 if (local_syms == NULL)
4493 local_syms
4494 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4495 symtab_hdr->sh_info, 0,
4496 NULL, NULL, NULL);
4497 if (local_syms == NULL)
4498 goto error_ret_free_internal;
4499 }
4500
4501 sym = local_syms + r_indx;
4502 if (sym->st_shndx == SHN_UNDEF)
4503 sym_sec = bfd_und_section_ptr;
4504 else if (sym->st_shndx == SHN_ABS)
4505 sym_sec = bfd_abs_section_ptr;
4506 else if (sym->st_shndx == SHN_COMMON)
4507 sym_sec = bfd_com_section_ptr;
4508 else
4509 sym_sec =
4510 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
4511
4512 if (!sym_sec)
4513 /* This is an undefined symbol. It can never
4514 be resolved. */
4515 continue;
4516
4517 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4518 sym_value = sym->st_value;
4519 destination = (sym_value + irela->r_addend
4520 + sym_sec->output_offset
4521 + sym_sec->output_section->vma);
4522 st_type = ELF_ST_TYPE (sym->st_info);
4523 sym_name
4524 = bfd_elf_string_from_elf_section (input_bfd,
4525 symtab_hdr->sh_link,
4526 sym->st_name);
4527 }
4528 else
4529 {
4530 /* It's an external symbol. */
4531 int e_indx;
4532
4533 e_indx = r_indx - symtab_hdr->sh_info;
4534 hash = ((struct elf32_arm_link_hash_entry *)
4535 elf_sym_hashes (input_bfd)[e_indx]);
4536
4537 while (hash->root.root.type == bfd_link_hash_indirect
4538 || hash->root.root.type == bfd_link_hash_warning)
4539 hash = ((struct elf32_arm_link_hash_entry *)
4540 hash->root.root.u.i.link);
4541
4542 if (hash->root.root.type == bfd_link_hash_defined
4543 || hash->root.root.type == bfd_link_hash_defweak)
4544 {
4545 sym_sec = hash->root.root.u.def.section;
4546 sym_value = hash->root.root.u.def.value;
4547
4548 struct elf32_arm_link_hash_table *globals =
4549 elf32_arm_hash_table (info);
4550
4551 /* For a destination in a shared library,
4552 use the PLT stub as target address to
4553 decide whether a branch stub is
4554 needed. */
4555 if (globals != NULL
4556 && globals->splt != NULL
4557 && hash != NULL
4558 && hash->root.plt.offset != (bfd_vma) -1)
4559 {
4560 sym_sec = globals->splt;
4561 sym_value = hash->root.plt.offset;
4562 if (sym_sec->output_section != NULL)
4563 destination = (sym_value
4564 + sym_sec->output_offset
4565 + sym_sec->output_section->vma);
4566 }
4567 else if (sym_sec->output_section != NULL)
4568 destination = (sym_value + irela->r_addend
4569 + sym_sec->output_offset
4570 + sym_sec->output_section->vma);
4571 }
4572 else if ((hash->root.root.type == bfd_link_hash_undefined)
4573 || (hash->root.root.type == bfd_link_hash_undefweak))
4574 {
4575 /* For a shared library, use the PLT stub as
4576 target address to decide whether a long
4577 branch stub is needed.
4578 For absolute code, they cannot be handled. */
4579 struct elf32_arm_link_hash_table *globals =
4580 elf32_arm_hash_table (info);
4581
4582 if (globals != NULL
4583 && globals->splt != NULL
4584 && hash != NULL
4585 && hash->root.plt.offset != (bfd_vma) -1)
4586 {
4587 sym_sec = globals->splt;
4588 sym_value = hash->root.plt.offset;
4589 if (sym_sec->output_section != NULL)
4590 destination = (sym_value
4591 + sym_sec->output_offset
4592 + sym_sec->output_section->vma);
4593 }
4594 else
4595 continue;
4596 }
4597 else
4598 {
4599 bfd_set_error (bfd_error_bad_value);
4600 goto error_ret_free_internal;
4601 }
4602 st_type = ELF_ST_TYPE (hash->root.type);
4603 sym_name = hash->root.root.root.string;
4604 }
4605
4606 do
4607 {
4608 /* Determine what (if any) linker stub is needed. */
4609 stub_type = arm_type_of_stub (info, section, irela,
4610 &st_type, hash,
4611 destination, sym_sec,
4612 input_bfd, sym_name);
4613 if (stub_type == arm_stub_none)
4614 break;
4615
4616 /* Support for grouping stub sections. */
4617 id_sec = htab->stub_group[section->id].link_sec;
4618
4619 /* Get the name of this stub. */
4620 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4621 irela, stub_type);
4622 if (!stub_name)
4623 goto error_ret_free_internal;
4624
4625 /* We've either created a stub for this reloc already,
4626 or we are about to. */
4627 created_stub = TRUE;
4628
4629 stub_entry = arm_stub_hash_lookup
4630 (&htab->stub_hash_table, stub_name,
4631 FALSE, FALSE);
4632 if (stub_entry != NULL)
4633 {
4634 /* The proper stub has already been created. */
4635 free (stub_name);
4636 stub_entry->target_value = sym_value;
4637 break;
4638 }
4639
4640 stub_entry = elf32_arm_add_stub (stub_name, section,
4641 htab);
4642 if (stub_entry == NULL)
4643 {
4644 free (stub_name);
4645 goto error_ret_free_internal;
4646 }
4647
4648 stub_entry->target_value = sym_value;
4649 stub_entry->target_section = sym_sec;
4650 stub_entry->stub_type = stub_type;
4651 stub_entry->h = hash;
4652 stub_entry->st_type = st_type;
4653
4654 if (sym_name == NULL)
4655 sym_name = "unnamed";
4656 stub_entry->output_name = (char *)
4657 bfd_alloc (htab->stub_bfd,
4658 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4659 + strlen (sym_name));
4660 if (stub_entry->output_name == NULL)
4661 {
4662 free (stub_name);
4663 goto error_ret_free_internal;
4664 }
4665
4666 /* For historical reasons, use the existing names for
4667 ARM-to-Thumb and Thumb-to-ARM stubs. */
4668 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4669 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4670 && st_type != STT_ARM_TFUNC)
4671 sprintf (stub_entry->output_name,
4672 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4673 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4674 || (r_type == (unsigned int) R_ARM_JUMP24))
4675 && st_type == STT_ARM_TFUNC)
4676 sprintf (stub_entry->output_name,
4677 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4678 else
4679 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4680 sym_name);
4681
4682 stub_changed = TRUE;
4683 }
4684 while (0);
4685
4686 /* Look for relocations which might trigger Cortex-A8
4687 erratum. */
4688 if (htab->fix_cortex_a8
4689 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4690 || r_type == (unsigned int) R_ARM_THM_JUMP19
4691 || r_type == (unsigned int) R_ARM_THM_CALL
4692 || r_type == (unsigned int) R_ARM_THM_XPC22))
4693 {
4694 bfd_vma from = section->output_section->vma
4695 + section->output_offset
4696 + irela->r_offset;
4697
4698 if ((from & 0xfff) == 0xffe)
4699 {
4700 /* Found a candidate. Note we haven't checked the
4701 destination is within 4K here: if we do so (and
4702 don't create an entry in a8_relocs) we can't tell
4703 that a branch should have been relocated when
4704 scanning later. */
4705 if (num_a8_relocs == a8_reloc_table_size)
4706 {
4707 a8_reloc_table_size *= 2;
4708 a8_relocs = (struct a8_erratum_reloc *)
4709 bfd_realloc (a8_relocs,
4710 sizeof (struct a8_erratum_reloc)
4711 * a8_reloc_table_size);
4712 }
4713
4714 a8_relocs[num_a8_relocs].from = from;
4715 a8_relocs[num_a8_relocs].destination = destination;
4716 a8_relocs[num_a8_relocs].r_type = r_type;
4717 a8_relocs[num_a8_relocs].st_type = st_type;
4718 a8_relocs[num_a8_relocs].sym_name = sym_name;
4719 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4720 a8_relocs[num_a8_relocs].hash = hash;
4721
4722 num_a8_relocs++;
4723 }
4724 }
4725 }
4726
4727 /* We're done with the internal relocs, free them. */
4728 if (elf_section_data (section)->relocs == NULL)
4729 free (internal_relocs);
4730 }
4731
4732 if (htab->fix_cortex_a8)
4733 {
4734 /* Sort relocs which might apply to Cortex-A8 erratum. */
4735 qsort (a8_relocs, num_a8_relocs,
4736 sizeof (struct a8_erratum_reloc),
4737 &a8_reloc_compare);
4738
4739 /* Scan for branches which might trigger Cortex-A8 erratum. */
4740 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4741 &num_a8_fixes, &a8_fix_table_size,
4742 a8_relocs, num_a8_relocs,
4743 prev_num_a8_fixes, &stub_changed)
4744 != 0)
4745 goto error_ret_free_local;
4746 }
4747 }
4748
4749 if (prev_num_a8_fixes != num_a8_fixes)
4750 stub_changed = TRUE;
4751
4752 if (!stub_changed)
4753 break;
4754
4755 /* OK, we've added some stubs. Find out the new size of the
4756 stub sections. */
4757 for (stub_sec = htab->stub_bfd->sections;
4758 stub_sec != NULL;
4759 stub_sec = stub_sec->next)
4760 {
4761 /* Ignore non-stub sections. */
4762 if (!strstr (stub_sec->name, STUB_SUFFIX))
4763 continue;
4764
4765 stub_sec->size = 0;
4766 }
4767
4768 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4769
4770 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4771 if (htab->fix_cortex_a8)
4772 for (i = 0; i < num_a8_fixes; i++)
4773 {
4774 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4775 a8_fixes[i].section, htab);
4776
4777 if (stub_sec == NULL)
4778 goto error_ret_free_local;
4779
4780 stub_sec->size
4781 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4782 NULL);
4783 }
4784
4785
4786 /* Ask the linker to do its stuff. */
4787 (*htab->layout_sections_again) ();
4788 }
4789
4790 /* Add stubs for Cortex-A8 erratum fixes now. */
4791 if (htab->fix_cortex_a8)
4792 {
4793 for (i = 0; i < num_a8_fixes; i++)
4794 {
4795 struct elf32_arm_stub_hash_entry *stub_entry;
4796 char *stub_name = a8_fixes[i].stub_name;
4797 asection *section = a8_fixes[i].section;
4798 unsigned int section_id = a8_fixes[i].section->id;
4799 asection *link_sec = htab->stub_group[section_id].link_sec;
4800 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4801 const insn_sequence *template_sequence;
4802 int template_size, size = 0;
4803
4804 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4805 TRUE, FALSE);
4806 if (stub_entry == NULL)
4807 {
4808 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4809 section->owner,
4810 stub_name);
4811 return FALSE;
4812 }
4813
4814 stub_entry->stub_sec = stub_sec;
4815 stub_entry->stub_offset = 0;
4816 stub_entry->id_sec = link_sec;
4817 stub_entry->stub_type = a8_fixes[i].stub_type;
4818 stub_entry->target_section = a8_fixes[i].section;
4819 stub_entry->target_value = a8_fixes[i].offset;
4820 stub_entry->target_addend = a8_fixes[i].addend;
4821 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4822 stub_entry->st_type = a8_fixes[i].st_type;
4823
4824 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4825 &template_sequence,
4826 &template_size);
4827
4828 stub_entry->stub_size = size;
4829 stub_entry->stub_template = template_sequence;
4830 stub_entry->stub_template_size = template_size;
4831 }
4832
4833 /* Stash the Cortex-A8 erratum fix array for use later in
4834 elf32_arm_write_section(). */
4835 htab->a8_erratum_fixes = a8_fixes;
4836 htab->num_a8_erratum_fixes = num_a8_fixes;
4837 }
4838 else
4839 {
4840 htab->a8_erratum_fixes = NULL;
4841 htab->num_a8_erratum_fixes = 0;
4842 }
4843 return TRUE;
4844
4845 error_ret_free_local:
4846 return FALSE;
4847}
4848
4849/* Build all the stubs associated with the current output file. The
4850 stubs are kept in a hash table attached to the main linker hash
4851 table. We also set up the .plt entries for statically linked PIC
4852 functions here. This function is called via arm_elf_finish in the
4853 linker. */
4854
4855bfd_boolean
4856elf32_arm_build_stubs (struct bfd_link_info *info)
4857{
4858 asection *stub_sec;
4859 struct bfd_hash_table *table;
4860 struct elf32_arm_link_hash_table *htab;
4861
4862 htab = elf32_arm_hash_table (info);
4863 if (htab == NULL)
4864 return FALSE;
4865
4866 for (stub_sec = htab->stub_bfd->sections;
4867 stub_sec != NULL;
4868 stub_sec = stub_sec->next)
4869 {
4870 bfd_size_type size;
4871
4872 /* Ignore non-stub sections. */
4873 if (!strstr (stub_sec->name, STUB_SUFFIX))
4874 continue;
4875
4876 /* Allocate memory to hold the linker stubs. */
4877 size = stub_sec->size;
4878 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4879 if (stub_sec->contents == NULL && size != 0)
4880 return FALSE;
4881 stub_sec->size = 0;
4882 }
4883
4884 /* Build the stubs as directed by the stub hash table. */
4885 table = &htab->stub_hash_table;
4886 bfd_hash_traverse (table, arm_build_one_stub, info);
4887 if (htab->fix_cortex_a8)
4888 {
4889 /* Place the cortex a8 stubs last. */
4890 htab->fix_cortex_a8 = -1;
4891 bfd_hash_traverse (table, arm_build_one_stub, info);
4892 }
4893
4894 return TRUE;
4895}
4896
4897/* Locate the Thumb encoded calling stub for NAME. */
4898
4899static struct elf_link_hash_entry *
4900find_thumb_glue (struct bfd_link_info *link_info,
4901 const char *name,
4902 char **error_message)
4903{
4904 char *tmp_name;
4905 struct elf_link_hash_entry *hash;
4906 struct elf32_arm_link_hash_table *hash_table;
4907
4908 /* We need a pointer to the armelf specific hash table. */
4909 hash_table = elf32_arm_hash_table (link_info);
4910 if (hash_table == NULL)
4911 return NULL;
4912
4913 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4914 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4915
4916 BFD_ASSERT (tmp_name);
4917
4918 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4919
4920 hash = elf_link_hash_lookup
4921 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4922
4923 if (hash == NULL
4924 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4925 tmp_name, name) == -1)
4926 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4927
4928 free (tmp_name);
4929
4930 return hash;
4931}
4932
4933/* Locate the ARM encoded calling stub for NAME. */
4934
4935static struct elf_link_hash_entry *
4936find_arm_glue (struct bfd_link_info *link_info,
4937 const char *name,
4938 char **error_message)
4939{
4940 char *tmp_name;
4941 struct elf_link_hash_entry *myh;
4942 struct elf32_arm_link_hash_table *hash_table;
4943
4944 /* We need a pointer to the elfarm specific hash table. */
4945 hash_table = elf32_arm_hash_table (link_info);
4946 if (hash_table == NULL)
4947 return NULL;
4948
4949 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4950 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4951
4952 BFD_ASSERT (tmp_name);
4953
4954 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4955
4956 myh = elf_link_hash_lookup
4957 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4958
4959 if (myh == NULL
4960 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4961 tmp_name, name) == -1)
4962 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4963
4964 free (tmp_name);
4965
4966 return myh;
4967}
4968
4969/* ARM->Thumb glue (static images):
4970
4971 .arm
4972 __func_from_arm:
4973 ldr r12, __func_addr
4974 bx r12
4975 __func_addr:
4976 .word func @ behave as if you saw a ARM_32 reloc.
4977
4978 (v5t static images)
4979 .arm
4980 __func_from_arm:
4981 ldr pc, __func_addr
4982 __func_addr:
4983 .word func @ behave as if you saw a ARM_32 reloc.
4984
4985 (relocatable images)
4986 .arm
4987 __func_from_arm:
4988 ldr r12, __func_offset
4989 add r12, r12, pc
4990 bx r12
4991 __func_offset:
4992 .word func - . */
4993
4994#define ARM2THUMB_STATIC_GLUE_SIZE 12
4995static const insn32 a2t1_ldr_insn = 0xe59fc000;
4996static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4997static const insn32 a2t3_func_addr_insn = 0x00000001;
4998
4999#define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5000static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5001static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5002
5003#define ARM2THUMB_PIC_GLUE_SIZE 16
5004static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5005static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5006static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5007
5008/* Thumb->ARM: Thumb->(non-interworking aware) ARM
5009
5010 .thumb .thumb
5011 .align 2 .align 2
5012 __func_from_thumb: __func_from_thumb:
5013 bx pc push {r6, lr}
5014 nop ldr r6, __func_addr
5015 .arm mov lr, pc
5016 b func bx r6
5017 .arm
5018 ;; back_to_thumb
5019 ldmia r13! {r6, lr}
5020 bx lr
5021 __func_addr:
5022 .word func */
5023
5024#define THUMB2ARM_GLUE_SIZE 8
5025static const insn16 t2a1_bx_pc_insn = 0x4778;
5026static const insn16 t2a2_noop_insn = 0x46c0;
5027static const insn32 t2a3_b_insn = 0xea000000;
5028
5029#define VFP11_ERRATUM_VENEER_SIZE 8
5030
5031#define ARM_BX_VENEER_SIZE 12
5032static const insn32 armbx1_tst_insn = 0xe3100001;
5033static const insn32 armbx2_moveq_insn = 0x01a0f000;
5034static const insn32 armbx3_bx_insn = 0xe12fff10;
5035
5036#ifndef ELFARM_NABI_C_INCLUDED
5037static void
5038arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5039{
5040 asection * s;
5041 bfd_byte * contents;
5042
5043 if (size == 0)
5044 {
5045 /* Do not include empty glue sections in the output. */
5046 if (abfd != NULL)
5047 {
5048 s = bfd_get_section_by_name (abfd, name);
5049 if (s != NULL)
5050 s->flags |= SEC_EXCLUDE;
5051 }
5052 return;
5053 }
5054
5055 BFD_ASSERT (abfd != NULL);
5056
5057 s = bfd_get_section_by_name (abfd, name);
5058 BFD_ASSERT (s != NULL);
5059
5060 contents = (bfd_byte *) bfd_alloc (abfd, size);
5061
5062 BFD_ASSERT (s->size == size);
5063 s->contents = contents;
5064}
5065
5066bfd_boolean
5067bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5068{
5069 struct elf32_arm_link_hash_table * globals;
5070
5071 globals = elf32_arm_hash_table (info);
5072 BFD_ASSERT (globals != NULL);
5073
5074 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5075 globals->arm_glue_size,
5076 ARM2THUMB_GLUE_SECTION_NAME);
5077
5078 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5079 globals->thumb_glue_size,
5080 THUMB2ARM_GLUE_SECTION_NAME);
5081
5082 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5083 globals->vfp11_erratum_glue_size,
5084 VFP11_ERRATUM_VENEER_SECTION_NAME);
5085
5086 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5087 globals->bx_glue_size,
5088 ARM_BX_GLUE_SECTION_NAME);
5089
5090 return TRUE;
5091}
5092
5093/* Allocate space and symbols for calling a Thumb function from Arm mode.
5094 returns the symbol identifying the stub. */
5095
5096static struct elf_link_hash_entry *
5097record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5098 struct elf_link_hash_entry * h)
5099{
5100 const char * name = h->root.root.string;
5101 asection * s;
5102 char * tmp_name;
5103 struct elf_link_hash_entry * myh;
5104 struct bfd_link_hash_entry * bh;
5105 struct elf32_arm_link_hash_table * globals;
5106 bfd_vma val;
5107 bfd_size_type size;
5108
5109 globals = elf32_arm_hash_table (link_info);
5110 BFD_ASSERT (globals != NULL);
5111 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5112
5113 s = bfd_get_section_by_name
5114 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5115
5116 BFD_ASSERT (s != NULL);
5117
5118 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5119 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5120
5121 BFD_ASSERT (tmp_name);
5122
5123 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5124
5125 myh = elf_link_hash_lookup
5126 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5127
5128 if (myh != NULL)
5129 {
5130 /* We've already seen this guy. */
5131 free (tmp_name);
5132 return myh;
5133 }
5134
5135 /* The only trick here is using hash_table->arm_glue_size as the value.
5136 Even though the section isn't allocated yet, this is where we will be
5137 putting it. The +1 on the value marks that the stub has not been
5138 output yet - not that it is a Thumb function. */
5139 bh = NULL;
5140 val = globals->arm_glue_size + 1;
5141 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5142 tmp_name, BSF_GLOBAL, s, val,
5143 NULL, TRUE, FALSE, &bh);
5144
5145 myh = (struct elf_link_hash_entry *) bh;
5146 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5147 myh->forced_local = 1;
5148
5149 free (tmp_name);
5150
5151 if (link_info->shared || globals->root.is_relocatable_executable
5152 || globals->pic_veneer)
5153 size = ARM2THUMB_PIC_GLUE_SIZE;
5154 else if (globals->use_blx)
5155 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5156 else
5157 size = ARM2THUMB_STATIC_GLUE_SIZE;
5158
5159 s->size += size;
5160 globals->arm_glue_size += size;
5161
5162 return myh;
5163}
5164
5165/* Allocate space for ARMv4 BX veneers. */
5166
5167static void
5168record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5169{
5170 asection * s;
5171 struct elf32_arm_link_hash_table *globals;
5172 char *tmp_name;
5173 struct elf_link_hash_entry *myh;
5174 struct bfd_link_hash_entry *bh;
5175 bfd_vma val;
5176
5177 /* BX PC does not need a veneer. */
5178 if (reg == 15)
5179 return;
5180
5181 globals = elf32_arm_hash_table (link_info);
5182 BFD_ASSERT (globals != NULL);
5183 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5184
5185 /* Check if this veneer has already been allocated. */
5186 if (globals->bx_glue_offset[reg])
5187 return;
5188
5189 s = bfd_get_section_by_name
5190 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5191
5192 BFD_ASSERT (s != NULL);
5193
5194 /* Add symbol for veneer. */
5195 tmp_name = (char *)
5196 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5197
5198 BFD_ASSERT (tmp_name);
5199
5200 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5201
5202 myh = elf_link_hash_lookup
5203 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5204
5205 BFD_ASSERT (myh == NULL);
5206
5207 bh = NULL;
5208 val = globals->bx_glue_size;
5209 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5210 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5211 NULL, TRUE, FALSE, &bh);
5212
5213 myh = (struct elf_link_hash_entry *) bh;
5214 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5215 myh->forced_local = 1;
5216
5217 s->size += ARM_BX_VENEER_SIZE;
5218 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5219 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5220}
5221
5222
5223/* Add an entry to the code/data map for section SEC. */
5224
5225static void
5226elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5227{
5228 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5229 unsigned int newidx;
5230
5231 if (sec_data->map == NULL)
5232 {
5233 sec_data->map = (elf32_arm_section_map *)
5234 bfd_malloc (sizeof (elf32_arm_section_map));
5235 sec_data->mapcount = 0;
5236 sec_data->mapsize = 1;
5237 }
5238
5239 newidx = sec_data->mapcount++;
5240
5241 if (sec_data->mapcount > sec_data->mapsize)
5242 {
5243 sec_data->mapsize *= 2;
5244 sec_data->map = (elf32_arm_section_map *)
5245 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5246 * sizeof (elf32_arm_section_map));
5247 }
5248
5249 if (sec_data->map)
5250 {
5251 sec_data->map[newidx].vma = vma;
5252 sec_data->map[newidx].type = type;
5253 }
5254}
5255
5256
5257/* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5258 veneers are handled for now. */
5259
5260static bfd_vma
5261record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5262 elf32_vfp11_erratum_list *branch,
5263 bfd *branch_bfd,
5264 asection *branch_sec,
5265 unsigned int offset)
5266{
5267 asection *s;
5268 struct elf32_arm_link_hash_table *hash_table;
5269 char *tmp_name;
5270 struct elf_link_hash_entry *myh;
5271 struct bfd_link_hash_entry *bh;
5272 bfd_vma val;
5273 struct _arm_elf_section_data *sec_data;
5274 elf32_vfp11_erratum_list *newerr;
5275
5276 hash_table = elf32_arm_hash_table (link_info);
5277 BFD_ASSERT (hash_table != NULL);
5278 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5279
5280 s = bfd_get_section_by_name
5281 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5282
5283 sec_data = elf32_arm_section_data (s);
5284
5285 BFD_ASSERT (s != NULL);
5286
5287 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5288 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5289
5290 BFD_ASSERT (tmp_name);
5291
5292 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5293 hash_table->num_vfp11_fixes);
5294
5295 myh = elf_link_hash_lookup
5296 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5297
5298 BFD_ASSERT (myh == NULL);
5299
5300 bh = NULL;
5301 val = hash_table->vfp11_erratum_glue_size;
5302 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5303 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5304 NULL, TRUE, FALSE, &bh);
5305
5306 myh = (struct elf_link_hash_entry *) bh;
5307 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5308 myh->forced_local = 1;
5309
5310 /* Link veneer back to calling location. */
5311 sec_data->erratumcount += 1;
5312 newerr = (elf32_vfp11_erratum_list *)
5313 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5314
5315 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5316 newerr->vma = -1;
5317 newerr->u.v.branch = branch;
5318 newerr->u.v.id = hash_table->num_vfp11_fixes;
5319 branch->u.b.veneer = newerr;
5320
5321 newerr->next = sec_data->erratumlist;
5322 sec_data->erratumlist = newerr;
5323
5324 /* A symbol for the return from the veneer. */
5325 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5326 hash_table->num_vfp11_fixes);
5327
5328 myh = elf_link_hash_lookup
5329 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5330
5331 if (myh != NULL)
5332 abort ();
5333
5334 bh = NULL;
5335 val = offset + 4;
5336 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5337 branch_sec, val, NULL, TRUE, FALSE, &bh);
5338
5339 myh = (struct elf_link_hash_entry *) bh;
5340 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5341 myh->forced_local = 1;
5342
5343 free (tmp_name);
5344
5345 /* Generate a mapping symbol for the veneer section, and explicitly add an
5346 entry for that symbol to the code/data map for the section. */
5347 if (hash_table->vfp11_erratum_glue_size == 0)
5348 {
5349 bh = NULL;
5350 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5351 ever requires this erratum fix. */
5352 _bfd_generic_link_add_one_symbol (link_info,
5353 hash_table->bfd_of_glue_owner, "$a",
5354 BSF_LOCAL, s, 0, NULL,
5355 TRUE, FALSE, &bh);
5356
5357 myh = (struct elf_link_hash_entry *) bh;
5358 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5359 myh->forced_local = 1;
5360
5361 /* The elf32_arm_init_maps function only cares about symbols from input
5362 BFDs. We must make a note of this generated mapping symbol
5363 ourselves so that code byteswapping works properly in
5364 elf32_arm_write_section. */
5365 elf32_arm_section_map_add (s, 'a', 0);
5366 }
5367
5368 s->size += VFP11_ERRATUM_VENEER_SIZE;
5369 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5370 hash_table->num_vfp11_fixes++;
5371
5372 /* The offset of the veneer. */
5373 return val;
5374}
5375
5376#define ARM_GLUE_SECTION_FLAGS \
5377 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5378 | SEC_READONLY | SEC_LINKER_CREATED)
5379
5380/* Create a fake section for use by the ARM backend of the linker. */
5381
5382static bfd_boolean
5383arm_make_glue_section (bfd * abfd, const char * name)
5384{
5385 asection * sec;
5386
5387 sec = bfd_get_section_by_name (abfd, name);
5388 if (sec != NULL)
5389 /* Already made. */
5390 return TRUE;
5391
5392 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5393
5394 if (sec == NULL
5395 || !bfd_set_section_alignment (abfd, sec, 2))
5396 return FALSE;
5397
5398 /* Set the gc mark to prevent the section from being removed by garbage
5399 collection, despite the fact that no relocs refer to this section. */
5400 sec->gc_mark = 1;
5401
5402 return TRUE;
5403}
5404
5405/* Add the glue sections to ABFD. This function is called from the
5406 linker scripts in ld/emultempl/{armelf}.em. */
5407
5408bfd_boolean
5409bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5410 struct bfd_link_info *info)
5411{
5412 /* If we are only performing a partial
5413 link do not bother adding the glue. */
5414 if (info->relocatable)
5415 return TRUE;
5416
5417 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5418 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5419 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5420 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5421}
5422
5423/* Select a BFD to be used to hold the sections used by the glue code.
5424 This function is called from the linker scripts in ld/emultempl/
5425 {armelf/pe}.em. */
5426
5427bfd_boolean
5428bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5429{
5430 struct elf32_arm_link_hash_table *globals;
5431
5432 /* If we are only performing a partial link
5433 do not bother getting a bfd to hold the glue. */
5434 if (info->relocatable)
5435 return TRUE;
5436
5437 /* Make sure we don't attach the glue sections to a dynamic object. */
5438 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5439
5440 globals = elf32_arm_hash_table (info);
5441 BFD_ASSERT (globals != NULL);
5442
5443 if (globals->bfd_of_glue_owner != NULL)
5444 return TRUE;
5445
5446 /* Save the bfd for later use. */
5447 globals->bfd_of_glue_owner = abfd;
5448
5449 return TRUE;
5450}
5451
5452static void
5453check_use_blx (struct elf32_arm_link_hash_table *globals)
5454{
5455 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5456 Tag_CPU_arch) > 2)
5457 globals->use_blx = 1;
5458}
5459
5460bfd_boolean
5461bfd_elf32_arm_process_before_allocation (bfd *abfd,
5462 struct bfd_link_info *link_info)
5463{
5464 Elf_Internal_Shdr *symtab_hdr;
5465 Elf_Internal_Rela *internal_relocs = NULL;
5466 Elf_Internal_Rela *irel, *irelend;
5467 bfd_byte *contents = NULL;
5468
5469 asection *sec;
5470 struct elf32_arm_link_hash_table *globals;
5471
5472 /* If we are only performing a partial link do not bother
5473 to construct any glue. */
5474 if (link_info->relocatable)
5475 return TRUE;
5476
5477 /* Here we have a bfd that is to be included on the link. We have a
5478 hook to do reloc rummaging, before section sizes are nailed down. */
5479 globals = elf32_arm_hash_table (link_info);
5480 BFD_ASSERT (globals != NULL);
5481
5482 check_use_blx (globals);
5483
5484 if (globals->byteswap_code && !bfd_big_endian (abfd))
5485 {
5486 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5487 abfd);
5488 return FALSE;
5489 }
5490
5491 /* PR 5398: If we have not decided to include any loadable sections in
5492 the output then we will not have a glue owner bfd. This is OK, it
5493 just means that there is nothing else for us to do here. */
5494 if (globals->bfd_of_glue_owner == NULL)
5495 return TRUE;
5496
5497 /* Rummage around all the relocs and map the glue vectors. */
5498 sec = abfd->sections;
5499
5500 if (sec == NULL)
5501 return TRUE;
5502
5503 for (; sec != NULL; sec = sec->next)
5504 {
5505 if (sec->reloc_count == 0)
5506 continue;
5507
5508 if ((sec->flags & SEC_EXCLUDE) != 0)
5509 continue;
5510
5511 symtab_hdr = & elf_symtab_hdr (abfd);
5512
5513 /* Load the relocs. */
5514 internal_relocs
5515 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5516
5517 if (internal_relocs == NULL)
5518 goto error_return;
5519
5520 irelend = internal_relocs + sec->reloc_count;
5521 for (irel = internal_relocs; irel < irelend; irel++)
5522 {
5523 long r_type;
5524 unsigned long r_index;
5525
5526 struct elf_link_hash_entry *h;
5527
5528 r_type = ELF32_R_TYPE (irel->r_info);
5529 r_index = ELF32_R_SYM (irel->r_info);
5530
5531 /* These are the only relocation types we care about. */
5532 if ( r_type != R_ARM_PC24
5533 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5534 continue;
5535
5536 /* Get the section contents if we haven't done so already. */
5537 if (contents == NULL)
5538 {
5539 /* Get cached copy if it exists. */
5540 if (elf_section_data (sec)->this_hdr.contents != NULL)
5541 contents = elf_section_data (sec)->this_hdr.contents;
5542 else
5543 {
5544 /* Go get them off disk. */
5545 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5546 goto error_return;
5547 }
5548 }
5549
5550 if (r_type == R_ARM_V4BX)
5551 {
5552 int reg;
5553
5554 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5555 record_arm_bx_glue (link_info, reg);
5556 continue;
5557 }
5558
5559 /* If the relocation is not against a symbol it cannot concern us. */
5560 h = NULL;
5561
5562 /* We don't care about local symbols. */
5563 if (r_index < symtab_hdr->sh_info)
5564 continue;
5565
5566 /* This is an external symbol. */
5567 r_index -= symtab_hdr->sh_info;
5568 h = (struct elf_link_hash_entry *)
5569 elf_sym_hashes (abfd)[r_index];
5570
5571 /* If the relocation is against a static symbol it must be within
5572 the current section and so cannot be a cross ARM/Thumb relocation. */
5573 if (h == NULL)
5574 continue;
5575
5576 /* If the call will go through a PLT entry then we do not need
5577 glue. */
5578 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5579 continue;
5580
5581 switch (r_type)
5582 {
5583 case R_ARM_PC24:
5584 /* This one is a call from arm code. We need to look up
5585 the target of the call. If it is a thumb target, we
5586 insert glue. */
5587 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5588 record_arm_to_thumb_glue (link_info, h);
5589 break;
5590
5591 default:
5592 abort ();
5593 }
5594 }
5595
5596 if (contents != NULL
5597 && elf_section_data (sec)->this_hdr.contents != contents)
5598 free (contents);
5599 contents = NULL;
5600
5601 if (internal_relocs != NULL
5602 && elf_section_data (sec)->relocs != internal_relocs)
5603 free (internal_relocs);
5604 internal_relocs = NULL;
5605 }
5606
5607 return TRUE;
5608
5609error_return:
5610 if (contents != NULL
5611 && elf_section_data (sec)->this_hdr.contents != contents)
5612 free (contents);
5613 if (internal_relocs != NULL
5614 && elf_section_data (sec)->relocs != internal_relocs)
5615 free (internal_relocs);
5616
5617 return FALSE;
5618}
5619#endif
5620
5621
5622/* Initialise maps of ARM/Thumb/data for input BFDs. */
5623
5624void
5625bfd_elf32_arm_init_maps (bfd *abfd)
5626{
5627 Elf_Internal_Sym *isymbuf;
5628 Elf_Internal_Shdr *hdr;
5629 unsigned int i, localsyms;
5630
5631 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5632 if (! is_arm_elf (abfd))
5633 return;
5634
5635 if ((abfd->flags & DYNAMIC) != 0)
5636 return;
5637
5638 hdr = & elf_symtab_hdr (abfd);
5639 localsyms = hdr->sh_info;
5640
5641 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5642 should contain the number of local symbols, which should come before any
5643 global symbols. Mapping symbols are always local. */
5644 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5645 NULL);
5646
5647 /* No internal symbols read? Skip this BFD. */
5648 if (isymbuf == NULL)
5649 return;
5650
5651 for (i = 0; i < localsyms; i++)
5652 {
5653 Elf_Internal_Sym *isym = &isymbuf[i];
5654 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5655 const char *name;
5656
5657 if (sec != NULL
5658 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5659 {
5660 name = bfd_elf_string_from_elf_section (abfd,
5661 hdr->sh_link, isym->st_name);
5662
5663 if (bfd_is_arm_special_symbol_name (name,
5664 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5665 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5666 }
5667 }
5668}
5669
5670
5671/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5672 say what they wanted. */
5673
5674void
5675bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5676{
5677 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5678 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5679
5680 if (globals == NULL)
5681 return;
5682
5683 if (globals->fix_cortex_a8 == -1)
5684 {
5685 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5686 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5687 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5688 || out_attr[Tag_CPU_arch_profile].i == 0))
5689 globals->fix_cortex_a8 = 1;
5690 else
5691 globals->fix_cortex_a8 = 0;
5692 }
5693}
5694
5695
5696void
5697bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5698{
5699 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5700 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5701
5702 if (globals == NULL)
5703 return;
5704 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5705 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5706 {
5707 switch (globals->vfp11_fix)
5708 {
5709 case BFD_ARM_VFP11_FIX_DEFAULT:
5710 case BFD_ARM_VFP11_FIX_NONE:
5711 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5712 break;
5713
5714 default:
5715 /* Give a warning, but do as the user requests anyway. */
5716 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5717 "workaround is not necessary for target architecture"), obfd);
5718 }
5719 }
5720 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5721 /* For earlier architectures, we might need the workaround, but do not
5722 enable it by default. If users is running with broken hardware, they
5723 must enable the erratum fix explicitly. */
5724 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5725}
5726
5727
5728enum bfd_arm_vfp11_pipe
5729{
5730 VFP11_FMAC,
5731 VFP11_LS,
5732 VFP11_DS,
5733 VFP11_BAD
5734};
5735
5736/* Return a VFP register number. This is encoded as RX:X for single-precision
5737 registers, or X:RX for double-precision registers, where RX is the group of
5738 four bits in the instruction encoding and X is the single extension bit.
5739 RX and X fields are specified using their lowest (starting) bit. The return
5740 value is:
5741
5742 0...31: single-precision registers s0...s31
5743 32...63: double-precision registers d0...d31.
5744
5745 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5746 encounter VFP3 instructions, so we allow the full range for DP registers. */
5747
5748static unsigned int
5749bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5750 unsigned int x)
5751{
5752 if (is_double)
5753 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5754 else
5755 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5756}
5757
5758/* Set bits in *WMASK according to a register number REG as encoded by
5759 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5760
5761static void
5762bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5763{
5764 if (reg < 32)
5765 *wmask |= 1 << reg;
5766 else if (reg < 48)
5767 *wmask |= 3 << ((reg - 32) * 2);
5768}
5769
5770/* Return TRUE if WMASK overwrites anything in REGS. */
5771
5772static bfd_boolean
5773bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5774{
5775 int i;
5776
5777 for (i = 0; i < numregs; i++)
5778 {
5779 unsigned int reg = regs[i];
5780
5781 if (reg < 32 && (wmask & (1 << reg)) != 0)
5782 return TRUE;
5783
5784 reg -= 32;
5785
5786 if (reg >= 16)
5787 continue;
5788
5789 if ((wmask & (3 << (reg * 2))) != 0)
5790 return TRUE;
5791 }
5792
5793 return FALSE;
5794}
5795
5796/* In this function, we're interested in two things: finding input registers
5797 for VFP data-processing instructions, and finding the set of registers which
5798 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5799 hold the written set, so FLDM etc. are easy to deal with (we're only
5800 interested in 32 SP registers or 16 dp registers, due to the VFP version
5801 implemented by the chip in question). DP registers are marked by setting
5802 both SP registers in the write mask). */
5803
5804static enum bfd_arm_vfp11_pipe
5805bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5806 int *numregs)
5807{
5808 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5809 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5810
5811 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5812 {
5813 unsigned int pqrs;
5814 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5815 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5816
5817 pqrs = ((insn & 0x00800000) >> 20)
5818 | ((insn & 0x00300000) >> 19)
5819 | ((insn & 0x00000040) >> 6);
5820
5821 switch (pqrs)
5822 {
5823 case 0: /* fmac[sd]. */
5824 case 1: /* fnmac[sd]. */
5825 case 2: /* fmsc[sd]. */
5826 case 3: /* fnmsc[sd]. */
5827 vpipe = VFP11_FMAC;
5828 bfd_arm_vfp11_write_mask (destmask, fd);
5829 regs[0] = fd;
5830 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5831 regs[2] = fm;
5832 *numregs = 3;
5833 break;
5834
5835 case 4: /* fmul[sd]. */
5836 case 5: /* fnmul[sd]. */
5837 case 6: /* fadd[sd]. */
5838 case 7: /* fsub[sd]. */
5839 vpipe = VFP11_FMAC;
5840 goto vfp_binop;
5841
5842 case 8: /* fdiv[sd]. */
5843 vpipe = VFP11_DS;
5844 vfp_binop:
5845 bfd_arm_vfp11_write_mask (destmask, fd);
5846 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5847 regs[1] = fm;
5848 *numregs = 2;
5849 break;
5850
5851 case 15: /* extended opcode. */
5852 {
5853 unsigned int extn = ((insn >> 15) & 0x1e)
5854 | ((insn >> 7) & 1);
5855
5856 switch (extn)
5857 {
5858 case 0: /* fcpy[sd]. */
5859 case 1: /* fabs[sd]. */
5860 case 2: /* fneg[sd]. */
5861 case 8: /* fcmp[sd]. */
5862 case 9: /* fcmpe[sd]. */
5863 case 10: /* fcmpz[sd]. */
5864 case 11: /* fcmpez[sd]. */
5865 case 16: /* fuito[sd]. */
5866 case 17: /* fsito[sd]. */
5867 case 24: /* ftoui[sd]. */
5868 case 25: /* ftouiz[sd]. */
5869 case 26: /* ftosi[sd]. */
5870 case 27: /* ftosiz[sd]. */
5871 /* These instructions will not bounce due to underflow. */
5872 *numregs = 0;
5873 vpipe = VFP11_FMAC;
5874 break;
5875
5876 case 3: /* fsqrt[sd]. */
5877 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5878 registers to cause the erratum in previous instructions. */
5879 bfd_arm_vfp11_write_mask (destmask, fd);
5880 vpipe = VFP11_DS;
5881 break;
5882
5883 case 15: /* fcvt{ds,sd}. */
5884 {
5885 int rnum = 0;
5886
5887 bfd_arm_vfp11_write_mask (destmask, fd);
5888
5889 /* Only FCVTSD can underflow. */
5890 if ((insn & 0x100) != 0)
5891 regs[rnum++] = fm;
5892
5893 *numregs = rnum;
5894
5895 vpipe = VFP11_FMAC;
5896 }
5897 break;
5898
5899 default:
5900 return VFP11_BAD;
5901 }
5902 }
5903 break;
5904
5905 default:
5906 return VFP11_BAD;
5907 }
5908 }
5909 /* Two-register transfer. */
5910 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5911 {
5912 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5913
5914 if ((insn & 0x100000) == 0)
5915 {
5916 if (is_double)
5917 bfd_arm_vfp11_write_mask (destmask, fm);
5918 else
5919 {
5920 bfd_arm_vfp11_write_mask (destmask, fm);
5921 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5922 }
5923 }
5924
5925 vpipe = VFP11_LS;
5926 }
5927 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5928 {
5929 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5930 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5931
5932 switch (puw)
5933 {
5934 case 0: /* Two-reg transfer. We should catch these above. */
5935 abort ();
5936
5937 case 2: /* fldm[sdx]. */
5938 case 3:
5939 case 5:
5940 {
5941 unsigned int i, offset = insn & 0xff;
5942
5943 if (is_double)
5944 offset >>= 1;
5945
5946 for (i = fd; i < fd + offset; i++)
5947 bfd_arm_vfp11_write_mask (destmask, i);
5948 }
5949 break;
5950
5951 case 4: /* fld[sd]. */
5952 case 6:
5953 bfd_arm_vfp11_write_mask (destmask, fd);
5954 break;
5955
5956 default:
5957 return VFP11_BAD;
5958 }
5959
5960 vpipe = VFP11_LS;
5961 }
5962 /* Single-register transfer. Note L==0. */
5963 else if ((insn & 0x0f100e10) == 0x0e000a10)
5964 {
5965 unsigned int opcode = (insn >> 21) & 7;
5966 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5967
5968 switch (opcode)
5969 {
5970 case 0: /* fmsr/fmdlr. */
5971 case 1: /* fmdhr. */
5972 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5973 destination register. I don't know if this is exactly right,
5974 but it is the conservative choice. */
5975 bfd_arm_vfp11_write_mask (destmask, fn);
5976 break;
5977
5978 case 7: /* fmxr. */
5979 break;
5980 }
5981
5982 vpipe = VFP11_LS;
5983 }
5984
5985 return vpipe;
5986}
5987
5988
5989static int elf32_arm_compare_mapping (const void * a, const void * b);
5990
5991
5992/* Look for potentially-troublesome code sequences which might trigger the
5993 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5994 (available from ARM) for details of the erratum. A short version is
5995 described in ld.texinfo. */
5996
5997bfd_boolean
5998bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5999{
6000 asection *sec;
6001 bfd_byte *contents = NULL;
6002 int state = 0;
6003 int regs[3], numregs = 0;
6004 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6005 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6006
6007 if (globals == NULL)
6008 return FALSE;
6009
6010 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6011 The states transition as follows:
6012
6013 0 -> 1 (vector) or 0 -> 2 (scalar)
6014 A VFP FMAC-pipeline instruction has been seen. Fill
6015 regs[0]..regs[numregs-1] with its input operands. Remember this
6016 instruction in 'first_fmac'.
6017
6018 1 -> 2
6019 Any instruction, except for a VFP instruction which overwrites
6020 regs[*].
6021
6022 1 -> 3 [ -> 0 ] or
6023 2 -> 3 [ -> 0 ]
6024 A VFP instruction has been seen which overwrites any of regs[*].
6025 We must make a veneer! Reset state to 0 before examining next
6026 instruction.
6027
6028 2 -> 0
6029 If we fail to match anything in state 2, reset to state 0 and reset
6030 the instruction pointer to the instruction after 'first_fmac'.
6031
6032 If the VFP11 vector mode is in use, there must be at least two unrelated
6033 instructions between anti-dependent VFP11 instructions to properly avoid
6034 triggering the erratum, hence the use of the extra state 1. */
6035
6036 /* If we are only performing a partial link do not bother
6037 to construct any glue. */
6038 if (link_info->relocatable)
6039 return TRUE;
6040
6041 /* Skip if this bfd does not correspond to an ELF image. */
6042 if (! is_arm_elf (abfd))
6043 return TRUE;
6044
6045 /* We should have chosen a fix type by the time we get here. */
6046 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6047
6048 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6049 return TRUE;
6050
6051 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6052 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6053 return TRUE;
6054
6055 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6056 {
6057 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6058 struct _arm_elf_section_data *sec_data;
6059
6060 /* If we don't have executable progbits, we're not interested in this
6061 section. Also skip if section is to be excluded. */
6062 if (elf_section_type (sec) != SHT_PROGBITS
6063 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6064 || (sec->flags & SEC_EXCLUDE) != 0
6065 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6066 || sec->output_section == bfd_abs_section_ptr
6067 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6068 continue;
6069
6070 sec_data = elf32_arm_section_data (sec);
6071
6072 if (sec_data->mapcount == 0)
6073 continue;
6074
6075 if (elf_section_data (sec)->this_hdr.contents != NULL)
6076 contents = elf_section_data (sec)->this_hdr.contents;
6077 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6078 goto error_return;
6079
6080 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6081 elf32_arm_compare_mapping);
6082
6083 for (span = 0; span < sec_data->mapcount; span++)
6084 {
6085 unsigned int span_start = sec_data->map[span].vma;
6086 unsigned int span_end = (span == sec_data->mapcount - 1)
6087 ? sec->size : sec_data->map[span + 1].vma;
6088 char span_type = sec_data->map[span].type;
6089
6090 /* FIXME: Only ARM mode is supported at present. We may need to
6091 support Thumb-2 mode also at some point. */
6092 if (span_type != 'a')
6093 continue;
6094
6095 for (i = span_start; i < span_end;)
6096 {
6097 unsigned int next_i = i + 4;
6098 unsigned int insn = bfd_big_endian (abfd)
6099 ? (contents[i] << 24)
6100 | (contents[i + 1] << 16)
6101 | (contents[i + 2] << 8)
6102 | contents[i + 3]
6103 : (contents[i + 3] << 24)
6104 | (contents[i + 2] << 16)
6105 | (contents[i + 1] << 8)
6106 | contents[i];
6107 unsigned int writemask = 0;
6108 enum bfd_arm_vfp11_pipe vpipe;
6109
6110 switch (state)
6111 {
6112 case 0:
6113 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6114 &numregs);
6115 /* I'm assuming the VFP11 erratum can trigger with denorm
6116 operands on either the FMAC or the DS pipeline. This might
6117 lead to slightly overenthusiastic veneer insertion. */
6118 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6119 {
6120 state = use_vector ? 1 : 2;
6121 first_fmac = i;
6122 veneer_of_insn = insn;
6123 }
6124 break;
6125
6126 case 1:
6127 {
6128 int other_regs[3], other_numregs;
6129 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6130 other_regs,
6131 &other_numregs);
6132 if (vpipe != VFP11_BAD
6133 && bfd_arm_vfp11_antidependency (writemask, regs,
6134 numregs))
6135 state = 3;
6136 else
6137 state = 2;
6138 }
6139 break;
6140
6141 case 2:
6142 {
6143 int other_regs[3], other_numregs;
6144 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6145 other_regs,
6146 &other_numregs);
6147 if (vpipe != VFP11_BAD
6148 && bfd_arm_vfp11_antidependency (writemask, regs,
6149 numregs))
6150 state = 3;
6151 else
6152 {
6153 state = 0;
6154 next_i = first_fmac + 4;
6155 }
6156 }
6157 break;
6158
6159 case 3:
6160 abort (); /* Should be unreachable. */
6161 }
6162
6163 if (state == 3)
6164 {
6165 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6166 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6167
6168 elf32_arm_section_data (sec)->erratumcount += 1;
6169
6170 newerr->u.b.vfp_insn = veneer_of_insn;
6171
6172 switch (span_type)
6173 {
6174 case 'a':
6175 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6176 break;
6177
6178 default:
6179 abort ();
6180 }
6181
6182 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6183 first_fmac);
6184
6185 newerr->vma = -1;
6186
6187 newerr->next = sec_data->erratumlist;
6188 sec_data->erratumlist = newerr;
6189
6190 state = 0;
6191 }
6192
6193 i = next_i;
6194 }
6195 }
6196
6197 if (contents != NULL
6198 && elf_section_data (sec)->this_hdr.contents != contents)
6199 free (contents);
6200 contents = NULL;
6201 }
6202
6203 return TRUE;
6204
6205error_return:
6206 if (contents != NULL
6207 && elf_section_data (sec)->this_hdr.contents != contents)
6208 free (contents);
6209
6210 return FALSE;
6211}
6212
6213/* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6214 after sections have been laid out, using specially-named symbols. */
6215
6216void
6217bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6218 struct bfd_link_info *link_info)
6219{
6220 asection *sec;
6221 struct elf32_arm_link_hash_table *globals;
6222 char *tmp_name;
6223
6224 if (link_info->relocatable)
6225 return;
6226
6227 /* Skip if this bfd does not correspond to an ELF image. */
6228 if (! is_arm_elf (abfd))
6229 return;
6230
6231 globals = elf32_arm_hash_table (link_info);
6232 if (globals == NULL)
6233 return;
6234
6235 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6236 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6237
6238 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6239 {
6240 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6241 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6242
6243 for (; errnode != NULL; errnode = errnode->next)
6244 {
6245 struct elf_link_hash_entry *myh;
6246 bfd_vma vma;
6247
6248 switch (errnode->type)
6249 {
6250 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6251 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6252 /* Find veneer symbol. */
6253 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6254 errnode->u.b.veneer->u.v.id);
6255
6256 myh = elf_link_hash_lookup
6257 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6258
6259 if (myh == NULL)
6260 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6261 "`%s'"), abfd, tmp_name);
6262
6263 vma = myh->root.u.def.section->output_section->vma
6264 + myh->root.u.def.section->output_offset
6265 + myh->root.u.def.value;
6266
6267 errnode->u.b.veneer->vma = vma;
6268 break;
6269
6270 case VFP11_ERRATUM_ARM_VENEER:
6271 case VFP11_ERRATUM_THUMB_VENEER:
6272 /* Find return location. */
6273 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6274 errnode->u.v.id);
6275
6276 myh = elf_link_hash_lookup
6277 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6278
6279 if (myh == NULL)
6280 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6281 "`%s'"), abfd, tmp_name);
6282
6283 vma = myh->root.u.def.section->output_section->vma
6284 + myh->root.u.def.section->output_offset
6285 + myh->root.u.def.value;
6286
6287 errnode->u.v.branch->vma = vma;
6288 break;
6289
6290 default:
6291 abort ();
6292 }
6293 }
6294 }
6295
6296 free (tmp_name);
6297}
6298
6299
6300/* Set target relocation values needed during linking. */
6301
6302void
6303bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6304 struct bfd_link_info *link_info,
6305 int target1_is_rel,
6306 char * target2_type,
6307 int fix_v4bx,
6308 int use_blx,
6309 bfd_arm_vfp11_fix vfp11_fix,
6310 int no_enum_warn, int no_wchar_warn,
6311 int pic_veneer, int fix_cortex_a8)
6312{
6313 struct elf32_arm_link_hash_table *globals;
6314
6315 globals = elf32_arm_hash_table (link_info);
6316 if (globals == NULL)
6317 return;
6318
6319 globals->target1_is_rel = target1_is_rel;
6320 if (strcmp (target2_type, "rel") == 0)
6321 globals->target2_reloc = R_ARM_REL32;
6322 else if (strcmp (target2_type, "abs") == 0)
6323 globals->target2_reloc = R_ARM_ABS32;
6324 else if (strcmp (target2_type, "got-rel") == 0)
6325 globals->target2_reloc = R_ARM_GOT_PREL;
6326 else
6327 {
6328 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6329 target2_type);
6330 }
6331 globals->fix_v4bx = fix_v4bx;
6332 globals->use_blx |= use_blx;
6333 globals->vfp11_fix = vfp11_fix;
6334 globals->pic_veneer = pic_veneer;
6335 globals->fix_cortex_a8 = fix_cortex_a8;
6336
6337 BFD_ASSERT (is_arm_elf (output_bfd));
6338 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6339 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6340}
6341
6342/* Replace the target offset of a Thumb bl or b.w instruction. */
6343
6344static void
6345insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6346{
6347 bfd_vma upper;
6348 bfd_vma lower;
6349 int reloc_sign;
6350
6351 BFD_ASSERT ((offset & 1) == 0);
6352
6353 upper = bfd_get_16 (abfd, insn);
6354 lower = bfd_get_16 (abfd, insn + 2);
6355 reloc_sign = (offset < 0) ? 1 : 0;
6356 upper = (upper & ~(bfd_vma) 0x7ff)
6357 | ((offset >> 12) & 0x3ff)
6358 | (reloc_sign << 10);
6359 lower = (lower & ~(bfd_vma) 0x2fff)
6360 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6361 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6362 | ((offset >> 1) & 0x7ff);
6363 bfd_put_16 (abfd, upper, insn);
6364 bfd_put_16 (abfd, lower, insn + 2);
6365}
6366
6367/* Thumb code calling an ARM function. */
6368
6369static int
6370elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6371 const char * name,
6372 bfd * input_bfd,
6373 bfd * output_bfd,
6374 asection * input_section,
6375 bfd_byte * hit_data,
6376 asection * sym_sec,
6377 bfd_vma offset,
6378 bfd_signed_vma addend,
6379 bfd_vma val,
6380 char **error_message)
6381{
6382 asection * s = 0;
6383 bfd_vma my_offset;
6384 long int ret_offset;
6385 struct elf_link_hash_entry * myh;
6386 struct elf32_arm_link_hash_table * globals;
6387
6388 myh = find_thumb_glue (info, name, error_message);
6389 if (myh == NULL)
6390 return FALSE;
6391
6392 globals = elf32_arm_hash_table (info);
6393 BFD_ASSERT (globals != NULL);
6394 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6395
6396 my_offset = myh->root.u.def.value;
6397
6398 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6399 THUMB2ARM_GLUE_SECTION_NAME);
6400
6401 BFD_ASSERT (s != NULL);
6402 BFD_ASSERT (s->contents != NULL);
6403 BFD_ASSERT (s->output_section != NULL);
6404
6405 if ((my_offset & 0x01) == 0x01)
6406 {
6407 if (sym_sec != NULL
6408 && sym_sec->owner != NULL
6409 && !INTERWORK_FLAG (sym_sec->owner))
6410 {
6411 (*_bfd_error_handler)
6412 (_("%B(%s): warning: interworking not enabled.\n"
6413 " first occurrence: %B: thumb call to arm"),
6414 sym_sec->owner, input_bfd, name);
6415
6416 return FALSE;
6417 }
6418
6419 --my_offset;
6420 myh->root.u.def.value = my_offset;
6421
6422 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6423 s->contents + my_offset);
6424
6425 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6426 s->contents + my_offset + 2);
6427
6428 ret_offset =
6429 /* Address of destination of the stub. */
6430 ((bfd_signed_vma) val)
6431 - ((bfd_signed_vma)
6432 /* Offset from the start of the current section
6433 to the start of the stubs. */
6434 (s->output_offset
6435 /* Offset of the start of this stub from the start of the stubs. */
6436 + my_offset
6437 /* Address of the start of the current section. */
6438 + s->output_section->vma)
6439 /* The branch instruction is 4 bytes into the stub. */
6440 + 4
6441 /* ARM branches work from the pc of the instruction + 8. */
6442 + 8);
6443
6444 put_arm_insn (globals, output_bfd,
6445 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6446 s->contents + my_offset + 4);
6447 }
6448
6449 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6450
6451 /* Now go back and fix up the original BL insn to point to here. */
6452 ret_offset =
6453 /* Address of where the stub is located. */
6454 (s->output_section->vma + s->output_offset + my_offset)
6455 /* Address of where the BL is located. */
6456 - (input_section->output_section->vma + input_section->output_offset
6457 + offset)
6458 /* Addend in the relocation. */
6459 - addend
6460 /* Biassing for PC-relative addressing. */
6461 - 8;
6462
6463 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6464
6465 return TRUE;
6466}
6467
6468/* Populate an Arm to Thumb stub. Returns the stub symbol. */
6469
6470static struct elf_link_hash_entry *
6471elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6472 const char * name,
6473 bfd * input_bfd,
6474 bfd * output_bfd,
6475 asection * sym_sec,
6476 bfd_vma val,
6477 asection * s,
6478 char ** error_message)
6479{
6480 bfd_vma my_offset;
6481 long int ret_offset;
6482 struct elf_link_hash_entry * myh;
6483 struct elf32_arm_link_hash_table * globals;
6484
6485 myh = find_arm_glue (info, name, error_message);
6486 if (myh == NULL)
6487 return NULL;
6488
6489 globals = elf32_arm_hash_table (info);
6490 BFD_ASSERT (globals != NULL);
6491 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6492
6493 my_offset = myh->root.u.def.value;
6494
6495 if ((my_offset & 0x01) == 0x01)
6496 {
6497 if (sym_sec != NULL
6498 && sym_sec->owner != NULL
6499 && !INTERWORK_FLAG (sym_sec->owner))
6500 {
6501 (*_bfd_error_handler)
6502 (_("%B(%s): warning: interworking not enabled.\n"
6503 " first occurrence: %B: arm call to thumb"),
6504 sym_sec->owner, input_bfd, name);
6505 }
6506
6507 --my_offset;
6508 myh->root.u.def.value = my_offset;
6509
6510 if (info->shared || globals->root.is_relocatable_executable
6511 || globals->pic_veneer)
6512 {
6513 /* For relocatable objects we can't use absolute addresses,
6514 so construct the address from a relative offset. */
6515 /* TODO: If the offset is small it's probably worth
6516 constructing the address with adds. */
6517 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6518 s->contents + my_offset);
6519 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6520 s->contents + my_offset + 4);
6521 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6522 s->contents + my_offset + 8);
6523 /* Adjust the offset by 4 for the position of the add,
6524 and 8 for the pipeline offset. */
6525 ret_offset = (val - (s->output_offset
6526 + s->output_section->vma
6527 + my_offset + 12))
6528 | 1;
6529 bfd_put_32 (output_bfd, ret_offset,
6530 s->contents + my_offset + 12);
6531 }
6532 else if (globals->use_blx)
6533 {
6534 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6535 s->contents + my_offset);
6536
6537 /* It's a thumb address. Add the low order bit. */
6538 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6539 s->contents + my_offset + 4);
6540 }
6541 else
6542 {
6543 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6544 s->contents + my_offset);
6545
6546 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6547 s->contents + my_offset + 4);
6548
6549 /* It's a thumb address. Add the low order bit. */
6550 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6551 s->contents + my_offset + 8);
6552
6553 my_offset += 12;
6554 }
6555 }
6556
6557 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6558
6559 return myh;
6560}
6561
6562/* Arm code calling a Thumb function. */
6563
6564static int
6565elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6566 const char * name,
6567 bfd * input_bfd,
6568 bfd * output_bfd,
6569 asection * input_section,
6570 bfd_byte * hit_data,
6571 asection * sym_sec,
6572 bfd_vma offset,
6573 bfd_signed_vma addend,
6574 bfd_vma val,
6575 char **error_message)
6576{
6577 unsigned long int tmp;
6578 bfd_vma my_offset;
6579 asection * s;
6580 long int ret_offset;
6581 struct elf_link_hash_entry * myh;
6582 struct elf32_arm_link_hash_table * globals;
6583
6584 globals = elf32_arm_hash_table (info);
6585 BFD_ASSERT (globals != NULL);
6586 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6587
6588 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6589 ARM2THUMB_GLUE_SECTION_NAME);
6590 BFD_ASSERT (s != NULL);
6591 BFD_ASSERT (s->contents != NULL);
6592 BFD_ASSERT (s->output_section != NULL);
6593
6594 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6595 sym_sec, val, s, error_message);
6596 if (!myh)
6597 return FALSE;
6598
6599 my_offset = myh->root.u.def.value;
6600 tmp = bfd_get_32 (input_bfd, hit_data);
6601 tmp = tmp & 0xFF000000;
6602
6603 /* Somehow these are both 4 too far, so subtract 8. */
6604 ret_offset = (s->output_offset
6605 + my_offset
6606 + s->output_section->vma
6607 - (input_section->output_offset
6608 + input_section->output_section->vma
6609 + offset + addend)
6610 - 8);
6611
6612 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6613
6614 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6615
6616 return TRUE;
6617}
6618
6619/* Populate Arm stub for an exported Thumb function. */
6620
6621static bfd_boolean
6622elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6623{
6624 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6625 asection * s;
6626 struct elf_link_hash_entry * myh;
6627 struct elf32_arm_link_hash_entry *eh;
6628 struct elf32_arm_link_hash_table * globals;
6629 asection *sec;
6630 bfd_vma val;
6631 char *error_message;
6632
6633 eh = elf32_arm_hash_entry (h);
6634 /* Allocate stubs for exported Thumb functions on v4t. */
6635 if (eh->export_glue == NULL)
6636 return TRUE;
6637
6638 globals = elf32_arm_hash_table (info);
6639 BFD_ASSERT (globals != NULL);
6640 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6641
6642 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6643 ARM2THUMB_GLUE_SECTION_NAME);
6644 BFD_ASSERT (s != NULL);
6645 BFD_ASSERT (s->contents != NULL);
6646 BFD_ASSERT (s->output_section != NULL);
6647
6648 sec = eh->export_glue->root.u.def.section;
6649
6650 BFD_ASSERT (sec->output_section != NULL);
6651
6652 val = eh->export_glue->root.u.def.value + sec->output_offset
6653 + sec->output_section->vma;
6654
6655 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6656 h->root.u.def.section->owner,
6657 globals->obfd, sec, val, s,
6658 &error_message);
6659 BFD_ASSERT (myh);
6660 return TRUE;
6661}
6662
6663/* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6664
6665static bfd_vma
6666elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6667{
6668 bfd_byte *p;
6669 bfd_vma glue_addr;
6670 asection *s;
6671 struct elf32_arm_link_hash_table *globals;
6672
6673 globals = elf32_arm_hash_table (info);
6674 BFD_ASSERT (globals != NULL);
6675 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6676
6677 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6678 ARM_BX_GLUE_SECTION_NAME);
6679 BFD_ASSERT (s != NULL);
6680 BFD_ASSERT (s->contents != NULL);
6681 BFD_ASSERT (s->output_section != NULL);
6682
6683 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6684
6685 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6686
6687 if ((globals->bx_glue_offset[reg] & 1) == 0)
6688 {
6689 p = s->contents + glue_addr;
6690 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6691 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6692 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6693 globals->bx_glue_offset[reg] |= 1;
6694 }
6695
6696 return glue_addr + s->output_section->vma + s->output_offset;
6697}
6698
6699/* Generate Arm stubs for exported Thumb symbols. */
6700static void
6701elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6702 struct bfd_link_info *link_info)
6703{
6704 struct elf32_arm_link_hash_table * globals;
6705
6706 if (link_info == NULL)
6707 /* Ignore this if we are not called by the ELF backend linker. */
6708 return;
6709
6710 globals = elf32_arm_hash_table (link_info);
6711 if (globals == NULL)
6712 return;
6713
6714 /* If blx is available then exported Thumb symbols are OK and there is
6715 nothing to do. */
6716 if (globals->use_blx)
6717 return;
6718
6719 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6720 link_info);
6721}
6722
6723/* Some relocations map to different relocations depending on the
6724 target. Return the real relocation. */
6725
6726static int
6727arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6728 int r_type)
6729{
6730 switch (r_type)
6731 {
6732 case R_ARM_TARGET1:
6733 if (globals->target1_is_rel)
6734 return R_ARM_REL32;
6735 else
6736 return R_ARM_ABS32;
6737
6738 case R_ARM_TARGET2:
6739 return globals->target2_reloc;
6740
6741 default:
6742 return r_type;
6743 }
6744}
6745
6746/* Return the base VMA address which should be subtracted from real addresses
6747 when resolving @dtpoff relocation.
6748 This is PT_TLS segment p_vaddr. */
6749
6750static bfd_vma
6751dtpoff_base (struct bfd_link_info *info)
6752{
6753 /* If tls_sec is NULL, we should have signalled an error already. */
6754 if (elf_hash_table (info)->tls_sec == NULL)
6755 return 0;
6756 return elf_hash_table (info)->tls_sec->vma;
6757}
6758
6759/* Return the relocation value for @tpoff relocation
6760 if STT_TLS virtual address is ADDRESS. */
6761
6762static bfd_vma
6763tpoff (struct bfd_link_info *info, bfd_vma address)
6764{
6765 struct elf_link_hash_table *htab = elf_hash_table (info);
6766 bfd_vma base;
6767
6768 /* If tls_sec is NULL, we should have signalled an error already. */
6769 if (htab->tls_sec == NULL)
6770 return 0;
6771 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6772 return address - htab->tls_sec->vma + base;
6773}
6774
6775/* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6776 VALUE is the relocation value. */
6777
6778static bfd_reloc_status_type
6779elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6780{
6781 if (value > 0xfff)
6782 return bfd_reloc_overflow;
6783
6784 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6785 bfd_put_32 (abfd, value, data);
6786 return bfd_reloc_ok;
6787}
6788
6789/* For a given value of n, calculate the value of G_n as required to
6790 deal with group relocations. We return it in the form of an
6791 encoded constant-and-rotation, together with the final residual. If n is
6792 specified as less than zero, then final_residual is filled with the
6793 input value and no further action is performed. */
6794
6795static bfd_vma
6796calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6797{
6798 int current_n;
6799 bfd_vma g_n;
6800 bfd_vma encoded_g_n = 0;
6801 bfd_vma residual = value; /* Also known as Y_n. */
6802
6803 for (current_n = 0; current_n <= n; current_n++)
6804 {
6805 int shift;
6806
6807 /* Calculate which part of the value to mask. */
6808 if (residual == 0)
6809 shift = 0;
6810 else
6811 {
6812 int msb;
6813
6814 /* Determine the most significant bit in the residual and
6815 align the resulting value to a 2-bit boundary. */
6816 for (msb = 30; msb >= 0; msb -= 2)
6817 if (residual & (3 << msb))
6818 break;
6819
6820 /* The desired shift is now (msb - 6), or zero, whichever
6821 is the greater. */
6822 shift = msb - 6;
6823 if (shift < 0)
6824 shift = 0;
6825 }
6826
6827 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6828 g_n = residual & (0xff << shift);
6829 encoded_g_n = (g_n >> shift)
6830 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6831
6832 /* Calculate the residual for the next time around. */
6833 residual &= ~g_n;
6834 }
6835
6836 *final_residual = residual;
6837
6838 return encoded_g_n;
6839}
6840
6841/* Given an ARM instruction, determine whether it is an ADD or a SUB.
6842 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6843
6844static int
6845identify_add_or_sub (bfd_vma insn)
6846{
6847 int opcode = insn & 0x1e00000;
6848
6849 if (opcode == 1 << 23) /* ADD */
6850 return 1;
6851
6852 if (opcode == 1 << 22) /* SUB */
6853 return -1;
6854
6855 return 0;
6856}
6857
6858/* Perform a relocation as part of a final link. */
6859
6860static bfd_reloc_status_type
6861elf32_arm_final_link_relocate (reloc_howto_type * howto,
6862 bfd * input_bfd,
6863 bfd * output_bfd,
6864 asection * input_section,
6865 bfd_byte * contents,
6866 Elf_Internal_Rela * rel,
6867 bfd_vma value,
6868 struct bfd_link_info * info,
6869 asection * sym_sec,
6870 const char * sym_name,
6871 int sym_flags,
6872 struct elf_link_hash_entry * h,
6873 bfd_boolean * unresolved_reloc_p,
6874 char ** error_message)
6875{
6876 unsigned long r_type = howto->type;
6877 unsigned long r_symndx;
6878 bfd_byte * hit_data = contents + rel->r_offset;
6879 bfd * dynobj = NULL;
6880 bfd_vma * local_got_offsets;
6881 asection * sgot = NULL;
6882 asection * splt = NULL;
6883 asection * sreloc = NULL;
6884 bfd_vma addend;
6885 bfd_signed_vma signed_addend;
6886 struct elf32_arm_link_hash_table * globals;
6887
6888 globals = elf32_arm_hash_table (info);
6889 if (globals == NULL)
6890 return bfd_reloc_notsupported;
6891
6892 BFD_ASSERT (is_arm_elf (input_bfd));
6893
6894 /* Some relocation types map to different relocations depending on the
6895 target. We pick the right one here. */
6896 r_type = arm_real_reloc_type (globals, r_type);
6897 if (r_type != howto->type)
6898 howto = elf32_arm_howto_from_type (r_type);
6899
6900 /* If the start address has been set, then set the EF_ARM_HASENTRY
6901 flag. Setting this more than once is redundant, but the cost is
6902 not too high, and it keeps the code simple.
6903
6904 The test is done here, rather than somewhere else, because the
6905 start address is only set just before the final link commences.
6906
6907 Note - if the user deliberately sets a start address of 0, the
6908 flag will not be set. */
6909 if (bfd_get_start_address (output_bfd) != 0)
6910 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6911
6912 dynobj = elf_hash_table (info)->dynobj;
6913 if (dynobj)
6914 {
6915 sgot = bfd_get_section_by_name (dynobj, ".got");
6916 splt = bfd_get_section_by_name (dynobj, ".plt");
6917 }
6918 local_got_offsets = elf_local_got_offsets (input_bfd);
6919 r_symndx = ELF32_R_SYM (rel->r_info);
6920
6921 if (globals->use_rel)
6922 {
6923 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6924
6925 if (addend & ((howto->src_mask + 1) >> 1))
6926 {
6927 signed_addend = -1;
6928 signed_addend &= ~ howto->src_mask;
6929 signed_addend |= addend;
6930 }
6931 else
6932 signed_addend = addend;
6933 }
6934 else
6935 addend = signed_addend = rel->r_addend;
6936
6937 switch (r_type)
6938 {
6939 case R_ARM_NONE:
6940 /* We don't need to find a value for this symbol. It's just a
6941 marker. */
6942 *unresolved_reloc_p = FALSE;
6943 return bfd_reloc_ok;
6944
6945 case R_ARM_ABS12:
6946 if (!globals->vxworks_p)
6947 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6948
6949 case R_ARM_PC24:
6950 case R_ARM_ABS32:
6951 case R_ARM_ABS32_NOI:
6952 case R_ARM_REL32:
6953 case R_ARM_REL32_NOI:
6954 case R_ARM_CALL:
6955 case R_ARM_JUMP24:
6956 case R_ARM_XPC25:
6957 case R_ARM_PREL31:
6958 case R_ARM_PLT32:
6959 /* Handle relocations which should use the PLT entry. ABS32/REL32
6960 will use the symbol's value, which may point to a PLT entry, but we
6961 don't need to handle that here. If we created a PLT entry, all
6962 branches in this object should go to it, except if the PLT is too
6963 far away, in which case a long branch stub should be inserted. */
6964 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6965 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6966 && r_type != R_ARM_CALL
6967 && r_type != R_ARM_JUMP24
6968 && r_type != R_ARM_PLT32)
6969 && h != NULL
6970 && splt != NULL
6971 && h->plt.offset != (bfd_vma) -1)
6972 {
6973 /* If we've created a .plt section, and assigned a PLT entry to
6974 this function, it should not be known to bind locally. If
6975 it were, we would have cleared the PLT entry. */
6976 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6977
6978 value = (splt->output_section->vma
6979 + splt->output_offset
6980 + h->plt.offset);
6981 *unresolved_reloc_p = FALSE;
6982 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6983 contents, rel->r_offset, value,
6984 rel->r_addend);
6985 }
6986
6987 /* When generating a shared object or relocatable executable, these
6988 relocations are copied into the output file to be resolved at
6989 run time. */
6990 if ((info->shared || globals->root.is_relocatable_executable)
6991 && (input_section->flags & SEC_ALLOC)
6992 && !(globals->vxworks_p
6993 && strcmp (input_section->output_section->name,
6994 ".tls_vars") == 0)
6995 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6996 || !SYMBOL_CALLS_LOCAL (info, h))
6997 && (!strstr (input_section->name, STUB_SUFFIX))
6998 && (h == NULL
6999 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7000 || h->root.type != bfd_link_hash_undefweak)
7001 && r_type != R_ARM_PC24
7002 && r_type != R_ARM_CALL
7003 && r_type != R_ARM_JUMP24
7004 && r_type != R_ARM_PREL31
7005 && r_type != R_ARM_PLT32)
7006 {
7007 Elf_Internal_Rela outrel;
7008 bfd_byte *loc;
7009 bfd_boolean skip, relocate;
7010
7011 *unresolved_reloc_p = FALSE;
7012
7013 if (sreloc == NULL)
7014 {
7015 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
7016 ! globals->use_rel);
7017
7018 if (sreloc == NULL)
7019 return bfd_reloc_notsupported;
7020 }
7021
7022 skip = FALSE;
7023 relocate = FALSE;
7024
7025 outrel.r_addend = addend;
7026 outrel.r_offset =
7027 _bfd_elf_section_offset (output_bfd, info, input_section,
7028 rel->r_offset);
7029 if (outrel.r_offset == (bfd_vma) -1)
7030 skip = TRUE;
7031 else if (outrel.r_offset == (bfd_vma) -2)
7032 skip = TRUE, relocate = TRUE;
7033 outrel.r_offset += (input_section->output_section->vma
7034 + input_section->output_offset);
7035
7036 if (skip)
7037 memset (&outrel, 0, sizeof outrel);
7038 else if (h != NULL
7039 && h->dynindx != -1
7040 && (!info->shared
7041 || !info->symbolic
7042 || !h->def_regular))
7043 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7044 else
7045 {
7046 int symbol;
7047
7048 /* This symbol is local, or marked to become local. */
7049 if (sym_flags == STT_ARM_TFUNC)
7050 value |= 1;
7051 if (globals->symbian_p)
7052 {
7053 asection *osec;
7054
7055 /* On Symbian OS, the data segment and text segement
7056 can be relocated independently. Therefore, we
7057 must indicate the segment to which this
7058 relocation is relative. The BPABI allows us to
7059 use any symbol in the right segment; we just use
7060 the section symbol as it is convenient. (We
7061 cannot use the symbol given by "h" directly as it
7062 will not appear in the dynamic symbol table.)
7063
7064 Note that the dynamic linker ignores the section
7065 symbol value, so we don't subtract osec->vma
7066 from the emitted reloc addend. */
7067 if (sym_sec)
7068 osec = sym_sec->output_section;
7069 else
7070 osec = input_section->output_section;
7071 symbol = elf_section_data (osec)->dynindx;
7072 if (symbol == 0)
7073 {
7074 struct elf_link_hash_table *htab = elf_hash_table (info);
7075
7076 if ((osec->flags & SEC_READONLY) == 0
7077 && htab->data_index_section != NULL)
7078 osec = htab->data_index_section;
7079 else
7080 osec = htab->text_index_section;
7081 symbol = elf_section_data (osec)->dynindx;
7082 }
7083 BFD_ASSERT (symbol != 0);
7084 }
7085 else
7086 /* On SVR4-ish systems, the dynamic loader cannot
7087 relocate the text and data segments independently,
7088 so the symbol does not matter. */
7089 symbol = 0;
7090 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7091 if (globals->use_rel)
7092 relocate = TRUE;
7093 else
7094 outrel.r_addend += value;
7095 }
7096
7097 loc = sreloc->contents;
7098 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7099 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7100
7101 /* If this reloc is against an external symbol, we do not want to
7102 fiddle with the addend. Otherwise, we need to include the symbol
7103 value so that it becomes an addend for the dynamic reloc. */
7104 if (! relocate)
7105 return bfd_reloc_ok;
7106
7107 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7108 contents, rel->r_offset, value,
7109 (bfd_vma) 0);
7110 }
7111 else switch (r_type)
7112 {
7113 case R_ARM_ABS12:
7114 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7115
7116 case R_ARM_XPC25: /* Arm BLX instruction. */
7117 case R_ARM_CALL:
7118 case R_ARM_JUMP24:
7119 case R_ARM_PC24: /* Arm B/BL instruction. */
7120 case R_ARM_PLT32:
7121 {
7122 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7123
7124 if (r_type == R_ARM_XPC25)
7125 {
7126 /* Check for Arm calling Arm function. */
7127 /* FIXME: Should we translate the instruction into a BL
7128 instruction instead ? */
7129 if (sym_flags != STT_ARM_TFUNC)
7130 (*_bfd_error_handler)
7131 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7132 input_bfd,
7133 h ? h->root.root.string : "(local)");
7134 }
7135 else if (r_type == R_ARM_PC24)
7136 {
7137 /* Check for Arm calling Thumb function. */
7138 if (sym_flags == STT_ARM_TFUNC)
7139 {
7140 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7141 output_bfd, input_section,
7142 hit_data, sym_sec, rel->r_offset,
7143 signed_addend, value,
7144 error_message))
7145 return bfd_reloc_ok;
7146 else
7147 return bfd_reloc_dangerous;
7148 }
7149 }
7150
7151 /* Check if a stub has to be inserted because the
7152 destination is too far or we are changing mode. */
7153 if ( r_type == R_ARM_CALL
7154 || r_type == R_ARM_JUMP24
7155 || r_type == R_ARM_PLT32)
7156 {
7157 enum elf32_arm_stub_type stub_type = arm_stub_none;
7158 struct elf32_arm_link_hash_entry *hash;
7159
7160 hash = (struct elf32_arm_link_hash_entry *) h;
7161 stub_type = arm_type_of_stub (info, input_section, rel,
7162 &sym_flags, hash,
7163 value, sym_sec,
7164 input_bfd, sym_name);
7165
7166 if (stub_type != arm_stub_none)
7167 {
7168 /* The target is out of reach, so redirect the
7169 branch to the local stub for this function. */
7170
7171 stub_entry = elf32_arm_get_stub_entry (input_section,
7172 sym_sec, h,
7173 rel, globals,
7174 stub_type);
7175 if (stub_entry != NULL)
7176 value = (stub_entry->stub_offset
7177 + stub_entry->stub_sec->output_offset
7178 + stub_entry->stub_sec->output_section->vma);
7179 }
7180 else
7181 {
7182 /* If the call goes through a PLT entry, make sure to
7183 check distance to the right destination address. */
7184 if (h != NULL
7185 && splt != NULL
7186 && h->plt.offset != (bfd_vma) -1)
7187 {
7188 value = (splt->output_section->vma
7189 + splt->output_offset
7190 + h->plt.offset);
7191 *unresolved_reloc_p = FALSE;
7192 /* The PLT entry is in ARM mode, regardless of the
7193 target function. */
7194 sym_flags = STT_FUNC;
7195 }
7196 }
7197 }
7198
7199 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7200 where:
7201 S is the address of the symbol in the relocation.
7202 P is address of the instruction being relocated.
7203 A is the addend (extracted from the instruction) in bytes.
7204
7205 S is held in 'value'.
7206 P is the base address of the section containing the
7207 instruction plus the offset of the reloc into that
7208 section, ie:
7209 (input_section->output_section->vma +
7210 input_section->output_offset +
7211 rel->r_offset).
7212 A is the addend, converted into bytes, ie:
7213 (signed_addend * 4)
7214
7215 Note: None of these operations have knowledge of the pipeline
7216 size of the processor, thus it is up to the assembler to
7217 encode this information into the addend. */
7218 value -= (input_section->output_section->vma
7219 + input_section->output_offset);
7220 value -= rel->r_offset;
7221 if (globals->use_rel)
7222 value += (signed_addend << howto->size);
7223 else
7224 /* RELA addends do not have to be adjusted by howto->size. */
7225 value += signed_addend;
7226
7227 signed_addend = value;
7228 signed_addend >>= howto->rightshift;
7229
7230 /* A branch to an undefined weak symbol is turned into a jump to
7231 the next instruction unless a PLT entry will be created.
7232 Do the same for local undefined symbols (but not for STN_UNDEF).
7233 The jump to the next instruction is optimized as a NOP depending
7234 on the architecture. */
7235 if (h ? (h->root.type == bfd_link_hash_undefweak
7236 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7237 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
7238 {
7239 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7240
7241 if (arch_has_arm_nop (globals))
7242 value |= 0x0320f000;
7243 else
7244 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7245 }
7246 else
7247 {
7248 /* Perform a signed range check. */
7249 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7250 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7251 return bfd_reloc_overflow;
7252
7253 addend = (value & 2);
7254
7255 value = (signed_addend & howto->dst_mask)
7256 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7257
7258 if (r_type == R_ARM_CALL)
7259 {
7260 /* Set the H bit in the BLX instruction. */
7261 if (sym_flags == STT_ARM_TFUNC)
7262 {
7263 if (addend)
7264 value |= (1 << 24);
7265 else
7266 value &= ~(bfd_vma)(1 << 24);
7267 }
7268
7269 /* Select the correct instruction (BL or BLX). */
7270 /* Only if we are not handling a BL to a stub. In this
7271 case, mode switching is performed by the stub. */
7272 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7273 value |= (1 << 28);
7274 else
7275 {
7276 value &= ~(bfd_vma)(1 << 28);
7277 value |= (1 << 24);
7278 }
7279 }
7280 }
7281 }
7282 break;
7283
7284 case R_ARM_ABS32:
7285 value += addend;
7286 if (sym_flags == STT_ARM_TFUNC)
7287 value |= 1;
7288 break;
7289
7290 case R_ARM_ABS32_NOI:
7291 value += addend;
7292 break;
7293
7294 case R_ARM_REL32:
7295 value += addend;
7296 if (sym_flags == STT_ARM_TFUNC)
7297 value |= 1;
7298 value -= (input_section->output_section->vma
7299 + input_section->output_offset + rel->r_offset);
7300 break;
7301
7302 case R_ARM_REL32_NOI:
7303 value += addend;
7304 value -= (input_section->output_section->vma
7305 + input_section->output_offset + rel->r_offset);
7306 break;
7307
7308 case R_ARM_PREL31:
7309 value -= (input_section->output_section->vma
7310 + input_section->output_offset + rel->r_offset);
7311 value += signed_addend;
7312 if (! h || h->root.type != bfd_link_hash_undefweak)
7313 {
7314 /* Check for overflow. */
7315 if ((value ^ (value >> 1)) & (1 << 30))
7316 return bfd_reloc_overflow;
7317 }
7318 value &= 0x7fffffff;
7319 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7320 if (sym_flags == STT_ARM_TFUNC)
7321 value |= 1;
7322 break;
7323 }
7324
7325 bfd_put_32 (input_bfd, value, hit_data);
7326 return bfd_reloc_ok;
7327
7328 case R_ARM_ABS8:
7329 value += addend;
7330
7331 /* There is no way to tell whether the user intended to use a signed or
7332 unsigned addend. When checking for overflow we accept either,
7333 as specified by the AAELF. */
7334 if ((long) value > 0xff || (long) value < -0x80)
7335 return bfd_reloc_overflow;
7336
7337 bfd_put_8 (input_bfd, value, hit_data);
7338 return bfd_reloc_ok;
7339
7340 case R_ARM_ABS16:
7341 value += addend;
7342
7343 /* See comment for R_ARM_ABS8. */
7344 if ((long) value > 0xffff || (long) value < -0x8000)
7345 return bfd_reloc_overflow;
7346
7347 bfd_put_16 (input_bfd, value, hit_data);
7348 return bfd_reloc_ok;
7349
7350 case R_ARM_THM_ABS5:
7351 /* Support ldr and str instructions for the thumb. */
7352 if (globals->use_rel)
7353 {
7354 /* Need to refetch addend. */
7355 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7356 /* ??? Need to determine shift amount from operand size. */
7357 addend >>= howto->rightshift;
7358 }
7359 value += addend;
7360
7361 /* ??? Isn't value unsigned? */
7362 if ((long) value > 0x1f || (long) value < -0x10)
7363 return bfd_reloc_overflow;
7364
7365 /* ??? Value needs to be properly shifted into place first. */
7366 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7367 bfd_put_16 (input_bfd, value, hit_data);
7368 return bfd_reloc_ok;
7369
7370 case R_ARM_THM_ALU_PREL_11_0:
7371 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7372 {
7373 bfd_vma insn;
7374 bfd_signed_vma relocation;
7375
7376 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7377 | bfd_get_16 (input_bfd, hit_data + 2);
7378
7379 if (globals->use_rel)
7380 {
7381 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7382 | ((insn & (1 << 26)) >> 15);
7383 if (insn & 0xf00000)
7384 signed_addend = -signed_addend;
7385 }
7386
7387 relocation = value + signed_addend;
7388 relocation -= (input_section->output_section->vma
7389 + input_section->output_offset
7390 + rel->r_offset);
7391
7392 value = abs (relocation);
7393
7394 if (value >= 0x1000)
7395 return bfd_reloc_overflow;
7396
7397 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7398 | ((value & 0x700) << 4)
7399 | ((value & 0x800) << 15);
7400 if (relocation < 0)
7401 insn |= 0xa00000;
7402
7403 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7404 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7405
7406 return bfd_reloc_ok;
7407 }
7408
7409 case R_ARM_THM_PC8:
7410 /* PR 10073: This reloc is not generated by the GNU toolchain,
7411 but it is supported for compatibility with third party libraries
7412 generated by other compilers, specifically the ARM/IAR. */
7413 {
7414 bfd_vma insn;
7415 bfd_signed_vma relocation;
7416
7417 insn = bfd_get_16 (input_bfd, hit_data);
7418
7419 if (globals->use_rel)
7420 addend = (insn & 0x00ff) << 2;
7421
7422 relocation = value + addend;
7423 relocation -= (input_section->output_section->vma
7424 + input_section->output_offset
7425 + rel->r_offset);
7426
7427 value = abs (relocation);
7428
7429 /* We do not check for overflow of this reloc. Although strictly
7430 speaking this is incorrect, it appears to be necessary in order
7431 to work with IAR generated relocs. Since GCC and GAS do not
7432 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7433 a problem for them. */
7434 value &= 0x3fc;
7435
7436 insn = (insn & 0xff00) | (value >> 2);
7437
7438 bfd_put_16 (input_bfd, insn, hit_data);
7439
7440 return bfd_reloc_ok;
7441 }
7442
7443 case R_ARM_THM_PC12:
7444 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7445 {
7446 bfd_vma insn;
7447 bfd_signed_vma relocation;
7448
7449 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7450 | bfd_get_16 (input_bfd, hit_data + 2);
7451
7452 if (globals->use_rel)
7453 {
7454 signed_addend = insn & 0xfff;
7455 if (!(insn & (1 << 23)))
7456 signed_addend = -signed_addend;
7457 }
7458
7459 relocation = value + signed_addend;
7460 relocation -= (input_section->output_section->vma
7461 + input_section->output_offset
7462 + rel->r_offset);
7463
7464 value = abs (relocation);
7465
7466 if (value >= 0x1000)
7467 return bfd_reloc_overflow;
7468
7469 insn = (insn & 0xff7ff000) | value;
7470 if (relocation >= 0)
7471 insn |= (1 << 23);
7472
7473 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7474 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7475
7476 return bfd_reloc_ok;
7477 }
7478
7479 case R_ARM_THM_XPC22:
7480 case R_ARM_THM_CALL:
7481 case R_ARM_THM_JUMP24:
7482 /* Thumb BL (branch long instruction). */
7483 {
7484 bfd_vma relocation;
7485 bfd_vma reloc_sign;
7486 bfd_boolean overflow = FALSE;
7487 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7488 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7489 bfd_signed_vma reloc_signed_max;
7490 bfd_signed_vma reloc_signed_min;
7491 bfd_vma check;
7492 bfd_signed_vma signed_check;
7493 int bitsize;
7494 const int thumb2 = using_thumb2 (globals);
7495
7496 /* A branch to an undefined weak symbol is turned into a jump to
7497 the next instruction unless a PLT entry will be created.
7498 The jump to the next instruction is optimized as a NOP.W for
7499 Thumb-2 enabled architectures. */
7500 if (h && h->root.type == bfd_link_hash_undefweak
7501 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7502 {
7503 if (arch_has_thumb2_nop (globals))
7504 {
7505 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7506 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7507 }
7508 else
7509 {
7510 bfd_put_16 (input_bfd, 0xe000, hit_data);
7511 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7512 }
7513 return bfd_reloc_ok;
7514 }
7515
7516 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7517 with Thumb-1) involving the J1 and J2 bits. */
7518 if (globals->use_rel)
7519 {
7520 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7521 bfd_vma upper = upper_insn & 0x3ff;
7522 bfd_vma lower = lower_insn & 0x7ff;
7523 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7524 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7525 bfd_vma i1 = j1 ^ s ? 0 : 1;
7526 bfd_vma i2 = j2 ^ s ? 0 : 1;
7527
7528 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7529 /* Sign extend. */
7530 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7531
7532 signed_addend = addend;
7533 }
7534
7535 if (r_type == R_ARM_THM_XPC22)
7536 {
7537 /* Check for Thumb to Thumb call. */
7538 /* FIXME: Should we translate the instruction into a BL
7539 instruction instead ? */
7540 if (sym_flags == STT_ARM_TFUNC)
7541 (*_bfd_error_handler)
7542 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7543 input_bfd,
7544 h ? h->root.root.string : "(local)");
7545 }
7546 else
7547 {
7548 /* If it is not a call to Thumb, assume call to Arm.
7549 If it is a call relative to a section name, then it is not a
7550 function call at all, but rather a long jump. Calls through
7551 the PLT do not require stubs. */
7552 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7553 && (h == NULL || splt == NULL
7554 || h->plt.offset == (bfd_vma) -1))
7555 {
7556 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7557 {
7558 /* Convert BL to BLX. */
7559 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7560 }
7561 else if (( r_type != R_ARM_THM_CALL)
7562 && (r_type != R_ARM_THM_JUMP24))
7563 {
7564 if (elf32_thumb_to_arm_stub
7565 (info, sym_name, input_bfd, output_bfd, input_section,
7566 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7567 error_message))
7568 return bfd_reloc_ok;
7569 else
7570 return bfd_reloc_dangerous;
7571 }
7572 }
7573 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7574 && r_type == R_ARM_THM_CALL)
7575 {
7576 /* Make sure this is a BL. */
7577 lower_insn |= 0x1800;
7578 }
7579 }
7580
7581 enum elf32_arm_stub_type stub_type = arm_stub_none;
7582 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7583 {
7584 /* Check if a stub has to be inserted because the destination
7585 is too far. */
7586 struct elf32_arm_stub_hash_entry *stub_entry;
7587 struct elf32_arm_link_hash_entry *hash;
7588
7589 hash = (struct elf32_arm_link_hash_entry *) h;
7590
7591 stub_type = arm_type_of_stub (info, input_section, rel,
7592 &sym_flags, hash, value, sym_sec,
7593 input_bfd, sym_name);
7594
7595 if (stub_type != arm_stub_none)
7596 {
7597 /* The target is out of reach or we are changing modes, so
7598 redirect the branch to the local stub for this
7599 function. */
7600 stub_entry = elf32_arm_get_stub_entry (input_section,
7601 sym_sec, h,
7602 rel, globals,
7603 stub_type);
7604 if (stub_entry != NULL)
7605 value = (stub_entry->stub_offset
7606 + stub_entry->stub_sec->output_offset
7607 + stub_entry->stub_sec->output_section->vma);
7608
7609 /* If this call becomes a call to Arm, force BLX. */
7610 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7611 {
7612 if ((stub_entry
7613 && !arm_stub_is_thumb (stub_entry->stub_type))
7614 || (sym_flags != STT_ARM_TFUNC))
7615 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7616 }
7617 }
7618 }
7619
7620 /* Handle calls via the PLT. */
7621 if (stub_type == arm_stub_none
7622 && h != NULL
7623 && splt != NULL
7624 && h->plt.offset != (bfd_vma) -1)
7625 {
7626 value = (splt->output_section->vma
7627 + splt->output_offset
7628 + h->plt.offset);
7629
7630 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7631 {
7632 /* If the Thumb BLX instruction is available, convert
7633 the BL to a BLX instruction to call the ARM-mode
7634 PLT entry. */
7635 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7636 sym_flags = STT_FUNC;
7637 }
7638 else
7639 {
7640 /* Target the Thumb stub before the ARM PLT entry. */
7641 value -= PLT_THUMB_STUB_SIZE;
7642 sym_flags = STT_ARM_TFUNC;
7643 }
7644 *unresolved_reloc_p = FALSE;
7645 }
7646
7647 relocation = value + signed_addend;
7648
7649 relocation -= (input_section->output_section->vma
7650 + input_section->output_offset
7651 + rel->r_offset);
7652
7653 check = relocation >> howto->rightshift;
7654
7655 /* If this is a signed value, the rightshift just dropped
7656 leading 1 bits (assuming twos complement). */
7657 if ((bfd_signed_vma) relocation >= 0)
7658 signed_check = check;
7659 else
7660 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7661
7662 /* Calculate the permissable maximum and minimum values for
7663 this relocation according to whether we're relocating for
7664 Thumb-2 or not. */
7665 bitsize = howto->bitsize;
7666 if (!thumb2)
7667 bitsize -= 2;
7668 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7669 reloc_signed_min = ~reloc_signed_max;
7670
7671 /* Assumes two's complement. */
7672 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7673 overflow = TRUE;
7674
7675 if ((lower_insn & 0x5000) == 0x4000)
7676 /* For a BLX instruction, make sure that the relocation is rounded up
7677 to a word boundary. This follows the semantics of the instruction
7678 which specifies that bit 1 of the target address will come from bit
7679 1 of the base address. */
7680 relocation = (relocation + 2) & ~ 3;
7681
7682 /* Put RELOCATION back into the insn. Assumes two's complement.
7683 We use the Thumb-2 encoding, which is safe even if dealing with
7684 a Thumb-1 instruction by virtue of our overflow check above. */
7685 reloc_sign = (signed_check < 0) ? 1 : 0;
7686 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7687 | ((relocation >> 12) & 0x3ff)
7688 | (reloc_sign << 10);
7689 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7690 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7691 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7692 | ((relocation >> 1) & 0x7ff);
7693
7694 /* Put the relocated value back in the object file: */
7695 bfd_put_16 (input_bfd, upper_insn, hit_data);
7696 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7697
7698 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7699 }
7700 break;
7701
7702 case R_ARM_THM_JUMP19:
7703 /* Thumb32 conditional branch instruction. */
7704 {
7705 bfd_vma relocation;
7706 bfd_boolean overflow = FALSE;
7707 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7708 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7709 bfd_signed_vma reloc_signed_max = 0xffffe;
7710 bfd_signed_vma reloc_signed_min = -0x100000;
7711 bfd_signed_vma signed_check;
7712
7713 /* Need to refetch the addend, reconstruct the top three bits,
7714 and squish the two 11 bit pieces together. */
7715 if (globals->use_rel)
7716 {
7717 bfd_vma S = (upper_insn & 0x0400) >> 10;
7718 bfd_vma upper = (upper_insn & 0x003f);
7719 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7720 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7721 bfd_vma lower = (lower_insn & 0x07ff);
7722
7723 upper |= J1 << 6;
7724 upper |= J2 << 7;
7725 upper |= (!S) << 8;
7726 upper -= 0x0100; /* Sign extend. */
7727
7728 addend = (upper << 12) | (lower << 1);
7729 signed_addend = addend;
7730 }
7731
7732 /* Handle calls via the PLT. */
7733 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7734 {
7735 value = (splt->output_section->vma
7736 + splt->output_offset
7737 + h->plt.offset);
7738 /* Target the Thumb stub before the ARM PLT entry. */
7739 value -= PLT_THUMB_STUB_SIZE;
7740 *unresolved_reloc_p = FALSE;
7741 }
7742
7743 /* ??? Should handle interworking? GCC might someday try to
7744 use this for tail calls. */
7745
7746 relocation = value + signed_addend;
7747 relocation -= (input_section->output_section->vma
7748 + input_section->output_offset
7749 + rel->r_offset);
7750 signed_check = (bfd_signed_vma) relocation;
7751
7752 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7753 overflow = TRUE;
7754
7755 /* Put RELOCATION back into the insn. */
7756 {
7757 bfd_vma S = (relocation & 0x00100000) >> 20;
7758 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7759 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7760 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7761 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7762
7763 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7764 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7765 }
7766
7767 /* Put the relocated value back in the object file: */
7768 bfd_put_16 (input_bfd, upper_insn, hit_data);
7769 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7770
7771 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7772 }
7773
7774 case R_ARM_THM_JUMP11:
7775 case R_ARM_THM_JUMP8:
7776 case R_ARM_THM_JUMP6:
7777 /* Thumb B (branch) instruction). */
7778 {
7779 bfd_signed_vma relocation;
7780 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7781 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7782 bfd_signed_vma signed_check;
7783
7784 /* CZB cannot jump backward. */
7785 if (r_type == R_ARM_THM_JUMP6)
7786 reloc_signed_min = 0;
7787
7788 if (globals->use_rel)
7789 {
7790 /* Need to refetch addend. */
7791 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7792 if (addend & ((howto->src_mask + 1) >> 1))
7793 {
7794 signed_addend = -1;
7795 signed_addend &= ~ howto->src_mask;
7796 signed_addend |= addend;
7797 }
7798 else
7799 signed_addend = addend;
7800 /* The value in the insn has been right shifted. We need to
7801 undo this, so that we can perform the address calculation
7802 in terms of bytes. */
7803 signed_addend <<= howto->rightshift;
7804 }
7805 relocation = value + signed_addend;
7806
7807 relocation -= (input_section->output_section->vma
7808 + input_section->output_offset
7809 + rel->r_offset);
7810
7811 relocation >>= howto->rightshift;
7812 signed_check = relocation;
7813
7814 if (r_type == R_ARM_THM_JUMP6)
7815 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7816 else
7817 relocation &= howto->dst_mask;
7818 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7819
7820 bfd_put_16 (input_bfd, relocation, hit_data);
7821
7822 /* Assumes two's complement. */
7823 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7824 return bfd_reloc_overflow;
7825
7826 return bfd_reloc_ok;
7827 }
7828
7829 case R_ARM_ALU_PCREL7_0:
7830 case R_ARM_ALU_PCREL15_8:
7831 case R_ARM_ALU_PCREL23_15:
7832 {
7833 bfd_vma insn;
7834 bfd_vma relocation;
7835
7836 insn = bfd_get_32 (input_bfd, hit_data);
7837 if (globals->use_rel)
7838 {
7839 /* Extract the addend. */
7840 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7841 signed_addend = addend;
7842 }
7843 relocation = value + signed_addend;
7844
7845 relocation -= (input_section->output_section->vma
7846 + input_section->output_offset
7847 + rel->r_offset);
7848 insn = (insn & ~0xfff)
7849 | ((howto->bitpos << 7) & 0xf00)
7850 | ((relocation >> howto->bitpos) & 0xff);
7851 bfd_put_32 (input_bfd, value, hit_data);
7852 }
7853 return bfd_reloc_ok;
7854
7855 case R_ARM_GNU_VTINHERIT:
7856 case R_ARM_GNU_VTENTRY:
7857 return bfd_reloc_ok;
7858
7859 case R_ARM_GOTOFF32:
7860 /* Relocation is relative to the start of the
7861 global offset table. */
7862
7863 BFD_ASSERT (sgot != NULL);
7864 if (sgot == NULL)
7865 return bfd_reloc_notsupported;
7866
7867 /* If we are addressing a Thumb function, we need to adjust the
7868 address by one, so that attempts to call the function pointer will
7869 correctly interpret it as Thumb code. */
7870 if (sym_flags == STT_ARM_TFUNC)
7871 value += 1;
7872
7873 /* Note that sgot->output_offset is not involved in this
7874 calculation. We always want the start of .got. If we
7875 define _GLOBAL_OFFSET_TABLE in a different way, as is
7876 permitted by the ABI, we might have to change this
7877 calculation. */
7878 value -= sgot->output_section->vma;
7879 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7880 contents, rel->r_offset, value,
7881 rel->r_addend);
7882
7883 case R_ARM_GOTPC:
7884 /* Use global offset table as symbol value. */
7885 BFD_ASSERT (sgot != NULL);
7886
7887 if (sgot == NULL)
7888 return bfd_reloc_notsupported;
7889
7890 *unresolved_reloc_p = FALSE;
7891 value = sgot->output_section->vma;
7892 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7893 contents, rel->r_offset, value,
7894 rel->r_addend);
7895
7896 case R_ARM_GOT32:
7897 case R_ARM_GOT_PREL:
7898 /* Relocation is to the entry for this symbol in the
7899 global offset table. */
7900 if (sgot == NULL)
7901 return bfd_reloc_notsupported;
7902
7903 if (h != NULL)
7904 {
7905 bfd_vma off;
7906 bfd_boolean dyn;
7907
7908 off = h->got.offset;
7909 BFD_ASSERT (off != (bfd_vma) -1);
7910 dyn = globals->root.dynamic_sections_created;
7911
7912 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7913 || (info->shared
7914 && SYMBOL_REFERENCES_LOCAL (info, h))
7915 || (ELF_ST_VISIBILITY (h->other)
7916 && h->root.type == bfd_link_hash_undefweak))
7917 {
7918 /* This is actually a static link, or it is a -Bsymbolic link
7919 and the symbol is defined locally. We must initialize this
7920 entry in the global offset table. Since the offset must
7921 always be a multiple of 4, we use the least significant bit
7922 to record whether we have initialized it already.
7923
7924 When doing a dynamic link, we create a .rel(a).got relocation
7925 entry to initialize the value. This is done in the
7926 finish_dynamic_symbol routine. */
7927 if ((off & 1) != 0)
7928 off &= ~1;
7929 else
7930 {
7931 /* If we are addressing a Thumb function, we need to
7932 adjust the address by one, so that attempts to
7933 call the function pointer will correctly
7934 interpret it as Thumb code. */
7935 if (sym_flags == STT_ARM_TFUNC)
7936 value |= 1;
7937
7938 bfd_put_32 (output_bfd, value, sgot->contents + off);
7939 h->got.offset |= 1;
7940 }
7941 }
7942 else
7943 *unresolved_reloc_p = FALSE;
7944
7945 value = sgot->output_offset + off;
7946 }
7947 else
7948 {
7949 bfd_vma off;
7950
7951 BFD_ASSERT (local_got_offsets != NULL &&
7952 local_got_offsets[r_symndx] != (bfd_vma) -1);
7953
7954 off = local_got_offsets[r_symndx];
7955
7956 /* The offset must always be a multiple of 4. We use the
7957 least significant bit to record whether we have already
7958 generated the necessary reloc. */
7959 if ((off & 1) != 0)
7960 off &= ~1;
7961 else
7962 {
7963 /* If we are addressing a Thumb function, we need to
7964 adjust the address by one, so that attempts to
7965 call the function pointer will correctly
7966 interpret it as Thumb code. */
7967 if (sym_flags == STT_ARM_TFUNC)
7968 value |= 1;
7969
7970 if (globals->use_rel)
7971 bfd_put_32 (output_bfd, value, sgot->contents + off);
7972
7973 if (info->shared)
7974 {
7975 asection * srelgot;
7976 Elf_Internal_Rela outrel;
7977 bfd_byte *loc;
7978
7979 srelgot = (bfd_get_section_by_name
7980 (dynobj, RELOC_SECTION (globals, ".got")));
7981 BFD_ASSERT (srelgot != NULL);
7982
7983 outrel.r_addend = addend + value;
7984 outrel.r_offset = (sgot->output_section->vma
7985 + sgot->output_offset
7986 + off);
7987 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7988 loc = srelgot->contents;
7989 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7990 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7991 }
7992
7993 local_got_offsets[r_symndx] |= 1;
7994 }
7995
7996 value = sgot->output_offset + off;
7997 }
7998 if (r_type != R_ARM_GOT32)
7999 value += sgot->output_section->vma;
8000
8001 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8002 contents, rel->r_offset, value,
8003 rel->r_addend);
8004
8005 case R_ARM_TLS_LDO32:
8006 value = value - dtpoff_base (info);
8007
8008 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8009 contents, rel->r_offset, value,
8010 rel->r_addend);
8011
8012 case R_ARM_TLS_LDM32:
8013 {
8014 bfd_vma off;
8015
8016 if (globals->sgot == NULL)
8017 abort ();
8018
8019 off = globals->tls_ldm_got.offset;
8020
8021 if ((off & 1) != 0)
8022 off &= ~1;
8023 else
8024 {
8025 /* If we don't know the module number, create a relocation
8026 for it. */
8027 if (info->shared)
8028 {
8029 Elf_Internal_Rela outrel;
8030 bfd_byte *loc;
8031
8032 if (globals->srelgot == NULL)
8033 abort ();
8034
8035 outrel.r_addend = 0;
8036 outrel.r_offset = (globals->sgot->output_section->vma
8037 + globals->sgot->output_offset + off);
8038 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8039
8040 if (globals->use_rel)
8041 bfd_put_32 (output_bfd, outrel.r_addend,
8042 globals->sgot->contents + off);
8043
8044 loc = globals->srelgot->contents;
8045 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8046 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8047 }
8048 else
8049 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8050
8051 globals->tls_ldm_got.offset |= 1;
8052 }
8053
8054 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8055 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8056
8057 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8058 contents, rel->r_offset, value,
8059 rel->r_addend);
8060 }
8061
8062 case R_ARM_TLS_GD32:
8063 case R_ARM_TLS_IE32:
8064 {
8065 bfd_vma off;
8066 int indx;
8067 char tls_type;
8068
8069 if (globals->sgot == NULL)
8070 abort ();
8071
8072 indx = 0;
8073 if (h != NULL)
8074 {
8075 bfd_boolean dyn;
8076 dyn = globals->root.dynamic_sections_created;
8077 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8078 && (!info->shared
8079 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8080 {
8081 *unresolved_reloc_p = FALSE;
8082 indx = h->dynindx;
8083 }
8084 off = h->got.offset;
8085 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8086 }
8087 else
8088 {
8089 if (local_got_offsets == NULL)
8090 abort ();
8091 off = local_got_offsets[r_symndx];
8092 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8093 }
8094
8095 if (tls_type == GOT_UNKNOWN)
8096 abort ();
8097
8098 if ((off & 1) != 0)
8099 off &= ~1;
8100 else
8101 {
8102 bfd_boolean need_relocs = FALSE;
8103 Elf_Internal_Rela outrel;
8104 bfd_byte *loc = NULL;
8105 int cur_off = off;
8106
8107 /* The GOT entries have not been initialized yet. Do it
8108 now, and emit any relocations. If both an IE GOT and a
8109 GD GOT are necessary, we emit the GD first. */
8110
8111 if ((info->shared || indx != 0)
8112 && (h == NULL
8113 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8114 || h->root.type != bfd_link_hash_undefweak))
8115 {
8116 need_relocs = TRUE;
8117 if (globals->srelgot == NULL)
8118 abort ();
8119 loc = globals->srelgot->contents;
8120 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8121 }
8122
8123 if (tls_type & GOT_TLS_GD)
8124 {
8125 if (need_relocs)
8126 {
8127 outrel.r_addend = 0;
8128 outrel.r_offset = (globals->sgot->output_section->vma
8129 + globals->sgot->output_offset
8130 + cur_off);
8131 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8132
8133 if (globals->use_rel)
8134 bfd_put_32 (output_bfd, outrel.r_addend,
8135 globals->sgot->contents + cur_off);
8136
8137 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8138 globals->srelgot->reloc_count++;
8139 loc += RELOC_SIZE (globals);
8140
8141 if (indx == 0)
8142 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8143 globals->sgot->contents + cur_off + 4);
8144 else
8145 {
8146 outrel.r_addend = 0;
8147 outrel.r_info = ELF32_R_INFO (indx,
8148 R_ARM_TLS_DTPOFF32);
8149 outrel.r_offset += 4;
8150
8151 if (globals->use_rel)
8152 bfd_put_32 (output_bfd, outrel.r_addend,
8153 globals->sgot->contents + cur_off + 4);
8154
8155
8156 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8157 globals->srelgot->reloc_count++;
8158 loc += RELOC_SIZE (globals);
8159 }
8160 }
8161 else
8162 {
8163 /* If we are not emitting relocations for a
8164 general dynamic reference, then we must be in a
8165 static link or an executable link with the
8166 symbol binding locally. Mark it as belonging
8167 to module 1, the executable. */
8168 bfd_put_32 (output_bfd, 1,
8169 globals->sgot->contents + cur_off);
8170 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8171 globals->sgot->contents + cur_off + 4);
8172 }
8173
8174 cur_off += 8;
8175 }
8176
8177 if (tls_type & GOT_TLS_IE)
8178 {
8179 if (need_relocs)
8180 {
8181 if (indx == 0)
8182 outrel.r_addend = value - dtpoff_base (info);
8183 else
8184 outrel.r_addend = 0;
8185 outrel.r_offset = (globals->sgot->output_section->vma
8186 + globals->sgot->output_offset
8187 + cur_off);
8188 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8189
8190 if (globals->use_rel)
8191 bfd_put_32 (output_bfd, outrel.r_addend,
8192 globals->sgot->contents + cur_off);
8193
8194 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8195 globals->srelgot->reloc_count++;
8196 loc += RELOC_SIZE (globals);
8197 }
8198 else
8199 bfd_put_32 (output_bfd, tpoff (info, value),
8200 globals->sgot->contents + cur_off);
8201 cur_off += 4;
8202 }
8203
8204 if (h != NULL)
8205 h->got.offset |= 1;
8206 else
8207 local_got_offsets[r_symndx] |= 1;
8208 }
8209
8210 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8211 off += 8;
8212 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8213 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8214
8215 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8216 contents, rel->r_offset, value,
8217 rel->r_addend);
8218 }
8219
8220 case R_ARM_TLS_LE32:
8221 if (info->shared)
8222 {
8223 (*_bfd_error_handler)
8224 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8225 input_bfd, input_section,
8226 (long) rel->r_offset, howto->name);
8227 return (bfd_reloc_status_type) FALSE;
8228 }
8229 else
8230 value = tpoff (info, value);
8231
8232 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8233 contents, rel->r_offset, value,
8234 rel->r_addend);
8235
8236 case R_ARM_V4BX:
8237 if (globals->fix_v4bx)
8238 {
8239 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8240
8241 /* Ensure that we have a BX instruction. */
8242 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8243
8244 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8245 {
8246 /* Branch to veneer. */
8247 bfd_vma glue_addr;
8248 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8249 glue_addr -= input_section->output_section->vma
8250 + input_section->output_offset
8251 + rel->r_offset + 8;
8252 insn = (insn & 0xf0000000) | 0x0a000000
8253 | ((glue_addr >> 2) & 0x00ffffff);
8254 }
8255 else
8256 {
8257 /* Preserve Rm (lowest four bits) and the condition code
8258 (highest four bits). Other bits encode MOV PC,Rm. */
8259 insn = (insn & 0xf000000f) | 0x01a0f000;
8260 }
8261
8262 bfd_put_32 (input_bfd, insn, hit_data);
8263 }
8264 return bfd_reloc_ok;
8265
8266 case R_ARM_MOVW_ABS_NC:
8267 case R_ARM_MOVT_ABS:
8268 case R_ARM_MOVW_PREL_NC:
8269 case R_ARM_MOVT_PREL:
8270 /* Until we properly support segment-base-relative addressing then
8271 we assume the segment base to be zero, as for the group relocations.
8272 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8273 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8274 case R_ARM_MOVW_BREL_NC:
8275 case R_ARM_MOVW_BREL:
8276 case R_ARM_MOVT_BREL:
8277 {
8278 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8279
8280 if (globals->use_rel)
8281 {
8282 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8283 signed_addend = (addend ^ 0x8000) - 0x8000;
8284 }
8285
8286 value += signed_addend;
8287
8288 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8289 value -= (input_section->output_section->vma
8290 + input_section->output_offset + rel->r_offset);
8291
8292 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8293 return bfd_reloc_overflow;
8294
8295 if (sym_flags == STT_ARM_TFUNC)
8296 value |= 1;
8297
8298 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8299 || r_type == R_ARM_MOVT_BREL)
8300 value >>= 16;
8301
8302 insn &= 0xfff0f000;
8303 insn |= value & 0xfff;
8304 insn |= (value & 0xf000) << 4;
8305 bfd_put_32 (input_bfd, insn, hit_data);
8306 }
8307 return bfd_reloc_ok;
8308
8309 case R_ARM_THM_MOVW_ABS_NC:
8310 case R_ARM_THM_MOVT_ABS:
8311 case R_ARM_THM_MOVW_PREL_NC:
8312 case R_ARM_THM_MOVT_PREL:
8313 /* Until we properly support segment-base-relative addressing then
8314 we assume the segment base to be zero, as for the above relocations.
8315 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8316 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8317 as R_ARM_THM_MOVT_ABS. */
8318 case R_ARM_THM_MOVW_BREL_NC:
8319 case R_ARM_THM_MOVW_BREL:
8320 case R_ARM_THM_MOVT_BREL:
8321 {
8322 bfd_vma insn;
8323
8324 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8325 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8326
8327 if (globals->use_rel)
8328 {
8329 addend = ((insn >> 4) & 0xf000)
8330 | ((insn >> 15) & 0x0800)
8331 | ((insn >> 4) & 0x0700)
8332 | (insn & 0x00ff);
8333 signed_addend = (addend ^ 0x8000) - 0x8000;
8334 }
8335
8336 value += signed_addend;
8337
8338 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8339 value -= (input_section->output_section->vma
8340 + input_section->output_offset + rel->r_offset);
8341
8342 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8343 return bfd_reloc_overflow;
8344
8345 if (sym_flags == STT_ARM_TFUNC)
8346 value |= 1;
8347
8348 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8349 || r_type == R_ARM_THM_MOVT_BREL)
8350 value >>= 16;
8351
8352 insn &= 0xfbf08f00;
8353 insn |= (value & 0xf000) << 4;
8354 insn |= (value & 0x0800) << 15;
8355 insn |= (value & 0x0700) << 4;
8356 insn |= (value & 0x00ff);
8357
8358 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8359 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8360 }
8361 return bfd_reloc_ok;
8362
8363 case R_ARM_ALU_PC_G0_NC:
8364 case R_ARM_ALU_PC_G1_NC:
8365 case R_ARM_ALU_PC_G0:
8366 case R_ARM_ALU_PC_G1:
8367 case R_ARM_ALU_PC_G2:
8368 case R_ARM_ALU_SB_G0_NC:
8369 case R_ARM_ALU_SB_G1_NC:
8370 case R_ARM_ALU_SB_G0:
8371 case R_ARM_ALU_SB_G1:
8372 case R_ARM_ALU_SB_G2:
8373 {
8374 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8375 bfd_vma pc = input_section->output_section->vma
8376 + input_section->output_offset + rel->r_offset;
8377 /* sb should be the origin of the *segment* containing the symbol.
8378 It is not clear how to obtain this OS-dependent value, so we
8379 make an arbitrary choice of zero. */
8380 bfd_vma sb = 0;
8381 bfd_vma residual;
8382 bfd_vma g_n;
8383 bfd_signed_vma signed_value;
8384 int group = 0;
8385
8386 /* Determine which group of bits to select. */
8387 switch (r_type)
8388 {
8389 case R_ARM_ALU_PC_G0_NC:
8390 case R_ARM_ALU_PC_G0:
8391 case R_ARM_ALU_SB_G0_NC:
8392 case R_ARM_ALU_SB_G0:
8393 group = 0;
8394 break;
8395
8396 case R_ARM_ALU_PC_G1_NC:
8397 case R_ARM_ALU_PC_G1:
8398 case R_ARM_ALU_SB_G1_NC:
8399 case R_ARM_ALU_SB_G1:
8400 group = 1;
8401 break;
8402
8403 case R_ARM_ALU_PC_G2:
8404 case R_ARM_ALU_SB_G2:
8405 group = 2;
8406 break;
8407
8408 default:
8409 abort ();
8410 }
8411
8412 /* If REL, extract the addend from the insn. If RELA, it will
8413 have already been fetched for us. */
8414 if (globals->use_rel)
8415 {
8416 int negative;
8417 bfd_vma constant = insn & 0xff;
8418 bfd_vma rotation = (insn & 0xf00) >> 8;
8419
8420 if (rotation == 0)
8421 signed_addend = constant;
8422 else
8423 {
8424 /* Compensate for the fact that in the instruction, the
8425 rotation is stored in multiples of 2 bits. */
8426 rotation *= 2;
8427
8428 /* Rotate "constant" right by "rotation" bits. */
8429 signed_addend = (constant >> rotation) |
8430 (constant << (8 * sizeof (bfd_vma) - rotation));
8431 }
8432
8433 /* Determine if the instruction is an ADD or a SUB.
8434 (For REL, this determines the sign of the addend.) */
8435 negative = identify_add_or_sub (insn);
8436 if (negative == 0)
8437 {
8438 (*_bfd_error_handler)
8439 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8440 input_bfd, input_section,
8441 (long) rel->r_offset, howto->name);
8442 return bfd_reloc_overflow;
8443 }
8444
8445 signed_addend *= negative;
8446 }
8447
8448 /* Compute the value (X) to go in the place. */
8449 if (r_type == R_ARM_ALU_PC_G0_NC
8450 || r_type == R_ARM_ALU_PC_G1_NC
8451 || r_type == R_ARM_ALU_PC_G0
8452 || r_type == R_ARM_ALU_PC_G1
8453 || r_type == R_ARM_ALU_PC_G2)
8454 /* PC relative. */
8455 signed_value = value - pc + signed_addend;
8456 else
8457 /* Section base relative. */
8458 signed_value = value - sb + signed_addend;
8459
8460 /* If the target symbol is a Thumb function, then set the
8461 Thumb bit in the address. */
8462 if (sym_flags == STT_ARM_TFUNC)
8463 signed_value |= 1;
8464
8465 /* Calculate the value of the relevant G_n, in encoded
8466 constant-with-rotation format. */
8467 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8468 &residual);
8469
8470 /* Check for overflow if required. */
8471 if ((r_type == R_ARM_ALU_PC_G0
8472 || r_type == R_ARM_ALU_PC_G1
8473 || r_type == R_ARM_ALU_PC_G2
8474 || r_type == R_ARM_ALU_SB_G0
8475 || r_type == R_ARM_ALU_SB_G1
8476 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8477 {
8478 (*_bfd_error_handler)
8479 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8480 input_bfd, input_section,
8481 (long) rel->r_offset, abs (signed_value), howto->name);
8482 return bfd_reloc_overflow;
8483 }
8484
8485 /* Mask out the value and the ADD/SUB part of the opcode; take care
8486 not to destroy the S bit. */
8487 insn &= 0xff1ff000;
8488
8489 /* Set the opcode according to whether the value to go in the
8490 place is negative. */
8491 if (signed_value < 0)
8492 insn |= 1 << 22;
8493 else
8494 insn |= 1 << 23;
8495
8496 /* Encode the offset. */
8497 insn |= g_n;
8498
8499 bfd_put_32 (input_bfd, insn, hit_data);
8500 }
8501 return bfd_reloc_ok;
8502
8503 case R_ARM_LDR_PC_G0:
8504 case R_ARM_LDR_PC_G1:
8505 case R_ARM_LDR_PC_G2:
8506 case R_ARM_LDR_SB_G0:
8507 case R_ARM_LDR_SB_G1:
8508 case R_ARM_LDR_SB_G2:
8509 {
8510 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8511 bfd_vma pc = input_section->output_section->vma
8512 + input_section->output_offset + rel->r_offset;
8513 bfd_vma sb = 0; /* See note above. */
8514 bfd_vma residual;
8515 bfd_signed_vma signed_value;
8516 int group = 0;
8517
8518 /* Determine which groups of bits to calculate. */
8519 switch (r_type)
8520 {
8521 case R_ARM_LDR_PC_G0:
8522 case R_ARM_LDR_SB_G0:
8523 group = 0;
8524 break;
8525
8526 case R_ARM_LDR_PC_G1:
8527 case R_ARM_LDR_SB_G1:
8528 group = 1;
8529 break;
8530
8531 case R_ARM_LDR_PC_G2:
8532 case R_ARM_LDR_SB_G2:
8533 group = 2;
8534 break;
8535
8536 default:
8537 abort ();
8538 }
8539
8540 /* If REL, extract the addend from the insn. If RELA, it will
8541 have already been fetched for us. */
8542 if (globals->use_rel)
8543 {
8544 int negative = (insn & (1 << 23)) ? 1 : -1;
8545 signed_addend = negative * (insn & 0xfff);
8546 }
8547
8548 /* Compute the value (X) to go in the place. */
8549 if (r_type == R_ARM_LDR_PC_G0
8550 || r_type == R_ARM_LDR_PC_G1
8551 || r_type == R_ARM_LDR_PC_G2)
8552 /* PC relative. */
8553 signed_value = value - pc + signed_addend;
8554 else
8555 /* Section base relative. */
8556 signed_value = value - sb + signed_addend;
8557
8558 /* Calculate the value of the relevant G_{n-1} to obtain
8559 the residual at that stage. */
8560 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8561
8562 /* Check for overflow. */
8563 if (residual >= 0x1000)
8564 {
8565 (*_bfd_error_handler)
8566 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8567 input_bfd, input_section,
8568 (long) rel->r_offset, abs (signed_value), howto->name);
8569 return bfd_reloc_overflow;
8570 }
8571
8572 /* Mask out the value and U bit. */
8573 insn &= 0xff7ff000;
8574
8575 /* Set the U bit if the value to go in the place is non-negative. */
8576 if (signed_value >= 0)
8577 insn |= 1 << 23;
8578
8579 /* Encode the offset. */
8580 insn |= residual;
8581
8582 bfd_put_32 (input_bfd, insn, hit_data);
8583 }
8584 return bfd_reloc_ok;
8585
8586 case R_ARM_LDRS_PC_G0:
8587 case R_ARM_LDRS_PC_G1:
8588 case R_ARM_LDRS_PC_G2:
8589 case R_ARM_LDRS_SB_G0:
8590 case R_ARM_LDRS_SB_G1:
8591 case R_ARM_LDRS_SB_G2:
8592 {
8593 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8594 bfd_vma pc = input_section->output_section->vma
8595 + input_section->output_offset + rel->r_offset;
8596 bfd_vma sb = 0; /* See note above. */
8597 bfd_vma residual;
8598 bfd_signed_vma signed_value;
8599 int group = 0;
8600
8601 /* Determine which groups of bits to calculate. */
8602 switch (r_type)
8603 {
8604 case R_ARM_LDRS_PC_G0:
8605 case R_ARM_LDRS_SB_G0:
8606 group = 0;
8607 break;
8608
8609 case R_ARM_LDRS_PC_G1:
8610 case R_ARM_LDRS_SB_G1:
8611 group = 1;
8612 break;
8613
8614 case R_ARM_LDRS_PC_G2:
8615 case R_ARM_LDRS_SB_G2:
8616 group = 2;
8617 break;
8618
8619 default:
8620 abort ();
8621 }
8622
8623 /* If REL, extract the addend from the insn. If RELA, it will
8624 have already been fetched for us. */
8625 if (globals->use_rel)
8626 {
8627 int negative = (insn & (1 << 23)) ? 1 : -1;
8628 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8629 }
8630
8631 /* Compute the value (X) to go in the place. */
8632 if (r_type == R_ARM_LDRS_PC_G0
8633 || r_type == R_ARM_LDRS_PC_G1
8634 || r_type == R_ARM_LDRS_PC_G2)
8635 /* PC relative. */
8636 signed_value = value - pc + signed_addend;
8637 else
8638 /* Section base relative. */
8639 signed_value = value - sb + signed_addend;
8640
8641 /* Calculate the value of the relevant G_{n-1} to obtain
8642 the residual at that stage. */
8643 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8644
8645 /* Check for overflow. */
8646 if (residual >= 0x100)
8647 {
8648 (*_bfd_error_handler)
8649 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8650 input_bfd, input_section,
8651 (long) rel->r_offset, abs (signed_value), howto->name);
8652 return bfd_reloc_overflow;
8653 }
8654
8655 /* Mask out the value and U bit. */
8656 insn &= 0xff7ff0f0;
8657
8658 /* Set the U bit if the value to go in the place is non-negative. */
8659 if (signed_value >= 0)
8660 insn |= 1 << 23;
8661
8662 /* Encode the offset. */
8663 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8664
8665 bfd_put_32 (input_bfd, insn, hit_data);
8666 }
8667 return bfd_reloc_ok;
8668
8669 case R_ARM_LDC_PC_G0:
8670 case R_ARM_LDC_PC_G1:
8671 case R_ARM_LDC_PC_G2:
8672 case R_ARM_LDC_SB_G0:
8673 case R_ARM_LDC_SB_G1:
8674 case R_ARM_LDC_SB_G2:
8675 {
8676 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8677 bfd_vma pc = input_section->output_section->vma
8678 + input_section->output_offset + rel->r_offset;
8679 bfd_vma sb = 0; /* See note above. */
8680 bfd_vma residual;
8681 bfd_signed_vma signed_value;
8682 int group = 0;
8683
8684 /* Determine which groups of bits to calculate. */
8685 switch (r_type)
8686 {
8687 case R_ARM_LDC_PC_G0:
8688 case R_ARM_LDC_SB_G0:
8689 group = 0;
8690 break;
8691
8692 case R_ARM_LDC_PC_G1:
8693 case R_ARM_LDC_SB_G1:
8694 group = 1;
8695 break;
8696
8697 case R_ARM_LDC_PC_G2:
8698 case R_ARM_LDC_SB_G2:
8699 group = 2;
8700 break;
8701
8702 default:
8703 abort ();
8704 }
8705
8706 /* If REL, extract the addend from the insn. If RELA, it will
8707 have already been fetched for us. */
8708 if (globals->use_rel)
8709 {
8710 int negative = (insn & (1 << 23)) ? 1 : -1;
8711 signed_addend = negative * ((insn & 0xff) << 2);
8712 }
8713
8714 /* Compute the value (X) to go in the place. */
8715 if (r_type == R_ARM_LDC_PC_G0
8716 || r_type == R_ARM_LDC_PC_G1
8717 || r_type == R_ARM_LDC_PC_G2)
8718 /* PC relative. */
8719 signed_value = value - pc + signed_addend;
8720 else
8721 /* Section base relative. */
8722 signed_value = value - sb + signed_addend;
8723
8724 /* Calculate the value of the relevant G_{n-1} to obtain
8725 the residual at that stage. */
8726 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8727
8728 /* Check for overflow. (The absolute value to go in the place must be
8729 divisible by four and, after having been divided by four, must
8730 fit in eight bits.) */
8731 if ((residual & 0x3) != 0 || residual >= 0x400)
8732 {
8733 (*_bfd_error_handler)
8734 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8735 input_bfd, input_section,
8736 (long) rel->r_offset, abs (signed_value), howto->name);
8737 return bfd_reloc_overflow;
8738 }
8739
8740 /* Mask out the value and U bit. */
8741 insn &= 0xff7fff00;
8742
8743 /* Set the U bit if the value to go in the place is non-negative. */
8744 if (signed_value >= 0)
8745 insn |= 1 << 23;
8746
8747 /* Encode the offset. */
8748 insn |= residual >> 2;
8749
8750 bfd_put_32 (input_bfd, insn, hit_data);
8751 }
8752 return bfd_reloc_ok;
8753
8754 default:
8755 return bfd_reloc_notsupported;
8756 }
8757}
8758
8759/* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8760static void
8761arm_add_to_rel (bfd * abfd,
8762 bfd_byte * address,
8763 reloc_howto_type * howto,
8764 bfd_signed_vma increment)
8765{
8766 bfd_signed_vma addend;
8767
8768 if (howto->type == R_ARM_THM_CALL
8769 || howto->type == R_ARM_THM_JUMP24)
8770 {
8771 int upper_insn, lower_insn;
8772 int upper, lower;
8773
8774 upper_insn = bfd_get_16 (abfd, address);
8775 lower_insn = bfd_get_16 (abfd, address + 2);
8776 upper = upper_insn & 0x7ff;
8777 lower = lower_insn & 0x7ff;
8778
8779 addend = (upper << 12) | (lower << 1);
8780 addend += increment;
8781 addend >>= 1;
8782
8783 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8784 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8785
8786 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8787 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8788 }
8789 else
8790 {
8791 bfd_vma contents;
8792
8793 contents = bfd_get_32 (abfd, address);
8794
8795 /* Get the (signed) value from the instruction. */
8796 addend = contents & howto->src_mask;
8797 if (addend & ((howto->src_mask + 1) >> 1))
8798 {
8799 bfd_signed_vma mask;
8800
8801 mask = -1;
8802 mask &= ~ howto->src_mask;
8803 addend |= mask;
8804 }
8805
8806 /* Add in the increment, (which is a byte value). */
8807 switch (howto->type)
8808 {
8809 default:
8810 addend += increment;
8811 break;
8812
8813 case R_ARM_PC24:
8814 case R_ARM_PLT32:
8815 case R_ARM_CALL:
8816 case R_ARM_JUMP24:
8817 addend <<= howto->size;
8818 addend += increment;
8819
8820 /* Should we check for overflow here ? */
8821
8822 /* Drop any undesired bits. */
8823 addend >>= howto->rightshift;
8824 break;
8825 }
8826
8827 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8828
8829 bfd_put_32 (abfd, contents, address);
8830 }
8831}
8832
8833#define IS_ARM_TLS_RELOC(R_TYPE) \
8834 ((R_TYPE) == R_ARM_TLS_GD32 \
8835 || (R_TYPE) == R_ARM_TLS_LDO32 \
8836 || (R_TYPE) == R_ARM_TLS_LDM32 \
8837 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8838 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8839 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8840 || (R_TYPE) == R_ARM_TLS_LE32 \
8841 || (R_TYPE) == R_ARM_TLS_IE32)
8842
8843/* Relocate an ARM ELF section. */
8844
8845static bfd_boolean
8846elf32_arm_relocate_section (bfd * output_bfd,
8847 struct bfd_link_info * info,
8848 bfd * input_bfd,
8849 asection * input_section,
8850 bfd_byte * contents,
8851 Elf_Internal_Rela * relocs,
8852 Elf_Internal_Sym * local_syms,
8853 asection ** local_sections)
8854{
8855 Elf_Internal_Shdr *symtab_hdr;
8856 struct elf_link_hash_entry **sym_hashes;
8857 Elf_Internal_Rela *rel;
8858 Elf_Internal_Rela *relend;
8859 const char *name;
8860 struct elf32_arm_link_hash_table * globals;
8861
8862 globals = elf32_arm_hash_table (info);
8863 if (globals == NULL)
8864 return FALSE;
8865
8866 symtab_hdr = & elf_symtab_hdr (input_bfd);
8867 sym_hashes = elf_sym_hashes (input_bfd);
8868
8869 rel = relocs;
8870 relend = relocs + input_section->reloc_count;
8871 for (; rel < relend; rel++)
8872 {
8873 int r_type;
8874 reloc_howto_type * howto;
8875 unsigned long r_symndx;
8876 Elf_Internal_Sym * sym;
8877 asection * sec;
8878 struct elf_link_hash_entry * h;
8879 bfd_vma relocation;
8880 bfd_reloc_status_type r;
8881 arelent bfd_reloc;
8882 char sym_type;
8883 bfd_boolean unresolved_reloc = FALSE;
8884 char *error_message = NULL;
8885
8886 r_symndx = ELF32_R_SYM (rel->r_info);
8887 r_type = ELF32_R_TYPE (rel->r_info);
8888 r_type = arm_real_reloc_type (globals, r_type);
8889
8890 if ( r_type == R_ARM_GNU_VTENTRY
8891 || r_type == R_ARM_GNU_VTINHERIT)
8892 continue;
8893
8894 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8895 howto = bfd_reloc.howto;
8896
8897 h = NULL;
8898 sym = NULL;
8899 sec = NULL;
8900
8901 if (r_symndx < symtab_hdr->sh_info)
8902 {
8903 sym = local_syms + r_symndx;
8904 sym_type = ELF32_ST_TYPE (sym->st_info);
8905 sec = local_sections[r_symndx];
8906
8907 /* An object file might have a reference to a local
8908 undefined symbol. This is a daft object file, but we
8909 should at least do something about it. V4BX & NONE
8910 relocations do not use the symbol and are explicitly
8911 allowed to use the undefined symbol, so allow those.
8912 Likewise for relocations against STN_UNDEF. */
8913 if (r_type != R_ARM_V4BX
8914 && r_type != R_ARM_NONE
8915 && r_symndx != STN_UNDEF
8916 && bfd_is_und_section (sec)
8917 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8918 {
8919 if (!info->callbacks->undefined_symbol
8920 (info, bfd_elf_string_from_elf_section
8921 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8922 input_bfd, input_section,
8923 rel->r_offset, TRUE))
8924 return FALSE;
8925 }
8926
8927 if (globals->use_rel)
8928 {
8929 relocation = (sec->output_section->vma
8930 + sec->output_offset
8931 + sym->st_value);
8932 if (!info->relocatable
8933 && (sec->flags & SEC_MERGE)
8934 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8935 {
8936 asection *msec;
8937 bfd_vma addend, value;
8938
8939 switch (r_type)
8940 {
8941 case R_ARM_MOVW_ABS_NC:
8942 case R_ARM_MOVT_ABS:
8943 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8944 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8945 addend = (addend ^ 0x8000) - 0x8000;
8946 break;
8947
8948 case R_ARM_THM_MOVW_ABS_NC:
8949 case R_ARM_THM_MOVT_ABS:
8950 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8951 << 16;
8952 value |= bfd_get_16 (input_bfd,
8953 contents + rel->r_offset + 2);
8954 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8955 | ((value & 0x04000000) >> 15);
8956 addend = (addend ^ 0x8000) - 0x8000;
8957 break;
8958
8959 default:
8960 if (howto->rightshift
8961 || (howto->src_mask & (howto->src_mask + 1)))
8962 {
8963 (*_bfd_error_handler)
8964 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8965 input_bfd, input_section,
8966 (long) rel->r_offset, howto->name);
8967 return FALSE;
8968 }
8969
8970 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8971
8972 /* Get the (signed) value from the instruction. */
8973 addend = value & howto->src_mask;
8974 if (addend & ((howto->src_mask + 1) >> 1))
8975 {
8976 bfd_signed_vma mask;
8977
8978 mask = -1;
8979 mask &= ~ howto->src_mask;
8980 addend |= mask;
8981 }
8982 break;
8983 }
8984
8985 msec = sec;
8986 addend =
8987 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8988 - relocation;
8989 addend += msec->output_section->vma + msec->output_offset;
8990
8991 /* Cases here must match those in the preceeding
8992 switch statement. */
8993 switch (r_type)
8994 {
8995 case R_ARM_MOVW_ABS_NC:
8996 case R_ARM_MOVT_ABS:
8997 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8998 | (addend & 0xfff);
8999 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
9000 break;
9001
9002 case R_ARM_THM_MOVW_ABS_NC:
9003 case R_ARM_THM_MOVT_ABS:
9004 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
9005 | (addend & 0xff) | ((addend & 0x0800) << 15);
9006 bfd_put_16 (input_bfd, value >> 16,
9007 contents + rel->r_offset);
9008 bfd_put_16 (input_bfd, value,
9009 contents + rel->r_offset + 2);
9010 break;
9011
9012 default:
9013 value = (value & ~ howto->dst_mask)
9014 | (addend & howto->dst_mask);
9015 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
9016 break;
9017 }
9018 }
9019 }
9020 else
9021 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
9022 }
9023 else
9024 {
9025 bfd_boolean warned;
9026
9027 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
9028 r_symndx, symtab_hdr, sym_hashes,
9029 h, sec, relocation,
9030 unresolved_reloc, warned);
9031
9032 sym_type = h->type;
9033 }
9034
9035 if (sec != NULL && elf_discarded_section (sec))
9036 {
9037 /* For relocs against symbols from removed linkonce sections,
9038 or sections discarded by a linker script, we just want the
9039 section contents zeroed. Avoid any special processing. */
9040 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
9041 rel->r_info = 0;
9042 rel->r_addend = 0;
9043 continue;
9044 }
9045
9046 if (info->relocatable)
9047 {
9048 /* This is a relocatable link. We don't have to change
9049 anything, unless the reloc is against a section symbol,
9050 in which case we have to adjust according to where the
9051 section symbol winds up in the output section. */
9052 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9053 {
9054 if (globals->use_rel)
9055 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9056 howto, (bfd_signed_vma) sec->output_offset);
9057 else
9058 rel->r_addend += sec->output_offset;
9059 }
9060 continue;
9061 }
9062
9063 if (h != NULL)
9064 name = h->root.root.string;
9065 else
9066 {
9067 name = (bfd_elf_string_from_elf_section
9068 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9069 if (name == NULL || *name == '\0')
9070 name = bfd_section_name (input_bfd, sec);
9071 }
9072
9073 if (r_symndx != STN_UNDEF
9074 && r_type != R_ARM_NONE
9075 && (h == NULL
9076 || h->root.type == bfd_link_hash_defined
9077 || h->root.type == bfd_link_hash_defweak)
9078 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9079 {
9080 (*_bfd_error_handler)
9081 ((sym_type == STT_TLS
9082 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9083 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9084 input_bfd,
9085 input_section,
9086 (long) rel->r_offset,
9087 howto->name,
9088 name);
9089 }
9090
9091 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9092 input_section, contents, rel,
9093 relocation, info, sec, name,
9094 (h ? ELF_ST_TYPE (h->type) :
9095 ELF_ST_TYPE (sym->st_info)), h,
9096 &unresolved_reloc, &error_message);
9097
9098 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9099 because such sections are not SEC_ALLOC and thus ld.so will
9100 not process them. */
9101 if (unresolved_reloc
9102 && !((input_section->flags & SEC_DEBUGGING) != 0
9103 && h->def_dynamic))
9104 {
9105 (*_bfd_error_handler)
9106 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9107 input_bfd,
9108 input_section,
9109 (long) rel->r_offset,
9110 howto->name,
9111 h->root.root.string);
9112 return FALSE;
9113 }
9114
9115 if (r != bfd_reloc_ok)
9116 {
9117 switch (r)
9118 {
9119 case bfd_reloc_overflow:
9120 /* If the overflowing reloc was to an undefined symbol,
9121 we have already printed one error message and there
9122 is no point complaining again. */
9123 if ((! h ||
9124 h->root.type != bfd_link_hash_undefined)
9125 && (!((*info->callbacks->reloc_overflow)
9126 (info, (h ? &h->root : NULL), name, howto->name,
9127 (bfd_vma) 0, input_bfd, input_section,
9128 rel->r_offset))))
9129 return FALSE;
9130 break;
9131
9132 case bfd_reloc_undefined:
9133 if (!((*info->callbacks->undefined_symbol)
9134 (info, name, input_bfd, input_section,
9135 rel->r_offset, TRUE)))
9136 return FALSE;
9137 break;
9138
9139 case bfd_reloc_outofrange:
9140 error_message = _("out of range");
9141 goto common_error;
9142
9143 case bfd_reloc_notsupported:
9144 error_message = _("unsupported relocation");
9145 goto common_error;
9146
9147 case bfd_reloc_dangerous:
9148 /* error_message should already be set. */
9149 goto common_error;
9150
9151 default:
9152 error_message = _("unknown error");
9153 /* Fall through. */
9154
9155 common_error:
9156 BFD_ASSERT (error_message != NULL);
9157 if (!((*info->callbacks->reloc_dangerous)
9158 (info, error_message, input_bfd, input_section,
9159 rel->r_offset)))
9160 return FALSE;
9161 break;
9162 }
9163 }
9164 }
9165
9166 return TRUE;
9167}
9168
9169/* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9170 adds the edit to the start of the list. (The list must be built in order of
9171 ascending TINDEX: the function's callers are primarily responsible for
9172 maintaining that condition). */
9173
9174static void
9175add_unwind_table_edit (arm_unwind_table_edit **head,
9176 arm_unwind_table_edit **tail,
9177 arm_unwind_edit_type type,
9178 asection *linked_section,
9179 unsigned int tindex)
9180{
9181 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9182 xmalloc (sizeof (arm_unwind_table_edit));
9183
9184 new_edit->type = type;
9185 new_edit->linked_section = linked_section;
9186 new_edit->index = tindex;
9187
9188 if (tindex > 0)
9189 {
9190 new_edit->next = NULL;
9191
9192 if (*tail)
9193 (*tail)->next = new_edit;
9194
9195 (*tail) = new_edit;
9196
9197 if (!*head)
9198 (*head) = new_edit;
9199 }
9200 else
9201 {
9202 new_edit->next = *head;
9203
9204 if (!*tail)
9205 *tail = new_edit;
9206
9207 *head = new_edit;
9208 }
9209}
9210
9211static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9212
9213/* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9214static void
9215adjust_exidx_size(asection *exidx_sec, int adjust)
9216{
9217 asection *out_sec;
9218
9219 if (!exidx_sec->rawsize)
9220 exidx_sec->rawsize = exidx_sec->size;
9221
9222 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9223 out_sec = exidx_sec->output_section;
9224 /* Adjust size of output section. */
9225 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9226}
9227
9228/* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9229static void
9230insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9231{
9232 struct _arm_elf_section_data *exidx_arm_data;
9233
9234 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9235 add_unwind_table_edit (
9236 &exidx_arm_data->u.exidx.unwind_edit_list,
9237 &exidx_arm_data->u.exidx.unwind_edit_tail,
9238 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9239
9240 adjust_exidx_size(exidx_sec, 8);
9241}
9242
9243/* Scan .ARM.exidx tables, and create a list describing edits which should be
9244 made to those tables, such that:
9245
9246 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9247 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9248 codes which have been inlined into the index).
9249
9250 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
9251
9252 The edits are applied when the tables are written
9253 (in elf32_arm_write_section).
9254*/
9255
9256bfd_boolean
9257elf32_arm_fix_exidx_coverage (asection **text_section_order,
9258 unsigned int num_text_sections,
9259 struct bfd_link_info *info,
9260 bfd_boolean merge_exidx_entries)
9261{
9262 bfd *inp;
9263 unsigned int last_second_word = 0, i;
9264 asection *last_exidx_sec = NULL;
9265 asection *last_text_sec = NULL;
9266 int last_unwind_type = -1;
9267
9268 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9269 text sections. */
9270 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9271 {
9272 asection *sec;
9273
9274 for (sec = inp->sections; sec != NULL; sec = sec->next)
9275 {
9276 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9277 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9278
9279 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9280 continue;
9281
9282 if (elf_sec->linked_to)
9283 {
9284 Elf_Internal_Shdr *linked_hdr
9285 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9286 struct _arm_elf_section_data *linked_sec_arm_data
9287 = get_arm_elf_section_data (linked_hdr->bfd_section);
9288
9289 if (linked_sec_arm_data == NULL)
9290 continue;
9291
9292 /* Link this .ARM.exidx section back from the text section it
9293 describes. */
9294 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9295 }
9296 }
9297 }
9298
9299 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9300 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9301 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9302
9303 for (i = 0; i < num_text_sections; i++)
9304 {
9305 asection *sec = text_section_order[i];
9306 asection *exidx_sec;
9307 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9308 struct _arm_elf_section_data *exidx_arm_data;
9309 bfd_byte *contents = NULL;
9310 int deleted_exidx_bytes = 0;
9311 bfd_vma j;
9312 arm_unwind_table_edit *unwind_edit_head = NULL;
9313 arm_unwind_table_edit *unwind_edit_tail = NULL;
9314 Elf_Internal_Shdr *hdr;
9315 bfd *ibfd;
9316
9317 if (arm_data == NULL)
9318 continue;
9319
9320 exidx_sec = arm_data->u.text.arm_exidx_sec;
9321 if (exidx_sec == NULL)
9322 {
9323 /* Section has no unwind data. */
9324 if (last_unwind_type == 0 || !last_exidx_sec)
9325 continue;
9326
9327 /* Ignore zero sized sections. */
9328 if (sec->size == 0)
9329 continue;
9330
9331 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9332 last_unwind_type = 0;
9333 continue;
9334 }
9335
9336 /* Skip /DISCARD/ sections. */
9337 if (bfd_is_abs_section (exidx_sec->output_section))
9338 continue;
9339
9340 hdr = &elf_section_data (exidx_sec)->this_hdr;
9341 if (hdr->sh_type != SHT_ARM_EXIDX)
9342 continue;
9343
9344 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9345 if (exidx_arm_data == NULL)
9346 continue;
9347
9348 ibfd = exidx_sec->owner;
9349
9350 if (hdr->contents != NULL)
9351 contents = hdr->contents;
9352 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9353 /* An error? */
9354 continue;
9355
9356 for (j = 0; j < hdr->sh_size; j += 8)
9357 {
9358 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9359 int unwind_type;
9360 int elide = 0;
9361
9362 /* An EXIDX_CANTUNWIND entry. */
9363 if (second_word == 1)
9364 {
9365 if (last_unwind_type == 0)
9366 elide = 1;
9367 unwind_type = 0;
9368 }
9369 /* Inlined unwinding data. Merge if equal to previous. */
9370 else if ((second_word & 0x80000000) != 0)
9371 {
9372 if (merge_exidx_entries
9373 && last_second_word == second_word && last_unwind_type == 1)
9374 elide = 1;
9375 unwind_type = 1;
9376 last_second_word = second_word;
9377 }
9378 /* Normal table entry. In theory we could merge these too,
9379 but duplicate entries are likely to be much less common. */
9380 else
9381 unwind_type = 2;
9382
9383 if (elide)
9384 {
9385 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9386 DELETE_EXIDX_ENTRY, NULL, j / 8);
9387
9388 deleted_exidx_bytes += 8;
9389 }
9390
9391 last_unwind_type = unwind_type;
9392 }
9393
9394 /* Free contents if we allocated it ourselves. */
9395 if (contents != hdr->contents)
9396 free (contents);
9397
9398 /* Record edits to be applied later (in elf32_arm_write_section). */
9399 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9400 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9401
9402 if (deleted_exidx_bytes > 0)
9403 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9404
9405 last_exidx_sec = exidx_sec;
9406 last_text_sec = sec;
9407 }
9408
9409 /* Add terminating CANTUNWIND entry. */
9410 if (last_exidx_sec && last_unwind_type != 0)
9411 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9412
9413 return TRUE;
9414}
9415
9416static bfd_boolean
9417elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9418 bfd *ibfd, const char *name)
9419{
9420 asection *sec, *osec;
9421
9422 sec = bfd_get_section_by_name (ibfd, name);
9423 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9424 return TRUE;
9425
9426 osec = sec->output_section;
9427 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9428 return TRUE;
9429
9430 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9431 sec->output_offset, sec->size))
9432 return FALSE;
9433
9434 return TRUE;
9435}
9436
9437static bfd_boolean
9438elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9439{
9440 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9441 asection *sec, *osec;
9442
9443 if (globals == NULL)
9444 return FALSE;
9445
9446 /* Invoke the regular ELF backend linker to do all the work. */
9447 if (!bfd_elf_final_link (abfd, info))
9448 return FALSE;
9449
9450 /* Process stub sections (eg BE8 encoding, ...). */
9451 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9452 int i;
9453 for (i=0; i<htab->top_id; i++)
9454 {
9455 sec = htab->stub_group[i].stub_sec;
9456 /* Only process it once, in its link_sec slot. */
9457 if (sec && i == htab->stub_group[i].link_sec->id)
9458 {
9459 osec = sec->output_section;
9460 elf32_arm_write_section (abfd, info, sec, sec->contents);
9461 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9462 sec->output_offset, sec->size))
9463 return FALSE;
9464 }
9465 }
9466
9467 /* Write out any glue sections now that we have created all the
9468 stubs. */
9469 if (globals->bfd_of_glue_owner != NULL)
9470 {
9471 if (! elf32_arm_output_glue_section (info, abfd,
9472 globals->bfd_of_glue_owner,
9473 ARM2THUMB_GLUE_SECTION_NAME))
9474 return FALSE;
9475
9476 if (! elf32_arm_output_glue_section (info, abfd,
9477 globals->bfd_of_glue_owner,
9478 THUMB2ARM_GLUE_SECTION_NAME))
9479 return FALSE;
9480
9481 if (! elf32_arm_output_glue_section (info, abfd,
9482 globals->bfd_of_glue_owner,
9483 VFP11_ERRATUM_VENEER_SECTION_NAME))
9484 return FALSE;
9485
9486 if (! elf32_arm_output_glue_section (info, abfd,
9487 globals->bfd_of_glue_owner,
9488 ARM_BX_GLUE_SECTION_NAME))
9489 return FALSE;
9490 }
9491
9492 return TRUE;
9493}
9494
9495/* Set the right machine number. */
9496
9497static bfd_boolean
9498elf32_arm_object_p (bfd *abfd)
9499{
9500 unsigned int mach;
9501
9502 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9503
9504 if (mach != bfd_mach_arm_unknown)
9505 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9506
9507 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9508 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9509
9510 else
9511 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9512
9513 return TRUE;
9514}
9515
9516/* Function to keep ARM specific flags in the ELF header. */
9517
9518static bfd_boolean
9519elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9520{
9521 if (elf_flags_init (abfd)
9522 && elf_elfheader (abfd)->e_flags != flags)
9523 {
9524 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9525 {
9526 if (flags & EF_ARM_INTERWORK)
9527 (*_bfd_error_handler)
9528 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9529 abfd);
9530 else
9531 _bfd_error_handler
9532 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9533 abfd);
9534 }
9535 }
9536 else
9537 {
9538 elf_elfheader (abfd)->e_flags = flags;
9539 elf_flags_init (abfd) = TRUE;
9540 }
9541
9542 return TRUE;
9543}
9544
9545/* Copy backend specific data from one object module to another. */
9546
9547static bfd_boolean
9548elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9549{
9550 flagword in_flags;
9551 flagword out_flags;
9552
9553 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9554 return TRUE;
9555
9556 in_flags = elf_elfheader (ibfd)->e_flags;
9557 out_flags = elf_elfheader (obfd)->e_flags;
9558
9559 if (elf_flags_init (obfd)
9560 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9561 && in_flags != out_flags)
9562 {
9563 /* Cannot mix APCS26 and APCS32 code. */
9564 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9565 return FALSE;
9566
9567 /* Cannot mix float APCS and non-float APCS code. */
9568 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9569 return FALSE;
9570
9571 /* If the src and dest have different interworking flags
9572 then turn off the interworking bit. */
9573 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9574 {
9575 if (out_flags & EF_ARM_INTERWORK)
9576 _bfd_error_handler
9577 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9578 obfd, ibfd);
9579
9580 in_flags &= ~EF_ARM_INTERWORK;
9581 }
9582
9583 /* Likewise for PIC, though don't warn for this case. */
9584 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9585 in_flags &= ~EF_ARM_PIC;
9586 }
9587
9588 elf_elfheader (obfd)->e_flags = in_flags;
9589 elf_flags_init (obfd) = TRUE;
9590
9591 /* Also copy the EI_OSABI field. */
9592 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9593 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9594
9595 /* Copy object attributes. */
9596 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9597
9598 return TRUE;
9599}
9600
9601/* Values for Tag_ABI_PCS_R9_use. */
9602enum
9603{
9604 AEABI_R9_V6,
9605 AEABI_R9_SB,
9606 AEABI_R9_TLS,
9607 AEABI_R9_unused
9608};
9609
9610/* Values for Tag_ABI_PCS_RW_data. */
9611enum
9612{
9613 AEABI_PCS_RW_data_absolute,
9614 AEABI_PCS_RW_data_PCrel,
9615 AEABI_PCS_RW_data_SBrel,
9616 AEABI_PCS_RW_data_unused
9617};
9618
9619/* Values for Tag_ABI_enum_size. */
9620enum
9621{
9622 AEABI_enum_unused,
9623 AEABI_enum_short,
9624 AEABI_enum_wide,
9625 AEABI_enum_forced_wide
9626};
9627
9628/* Determine whether an object attribute tag takes an integer, a
9629 string or both. */
9630
9631static int
9632elf32_arm_obj_attrs_arg_type (int tag)
9633{
9634 if (tag == Tag_compatibility)
9635 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9636 else if (tag == Tag_nodefaults)
9637 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9638 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9639 return ATTR_TYPE_FLAG_STR_VAL;
9640 else if (tag < 32)
9641 return ATTR_TYPE_FLAG_INT_VAL;
9642 else
9643 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9644}
9645
9646/* The ABI defines that Tag_conformance should be emitted first, and that
9647 Tag_nodefaults should be second (if either is defined). This sets those
9648 two positions, and bumps up the position of all the remaining tags to
9649 compensate. */
9650static int
9651elf32_arm_obj_attrs_order (int num)
9652{
9653 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
9654 return Tag_conformance;
9655 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
9656 return Tag_nodefaults;
9657 if ((num - 2) < Tag_nodefaults)
9658 return num - 2;
9659 if ((num - 1) < Tag_conformance)
9660 return num - 1;
9661 return num;
9662}
9663
9664/* Read the architecture from the Tag_also_compatible_with attribute, if any.
9665 Returns -1 if no architecture could be read. */
9666
9667static int
9668get_secondary_compatible_arch (bfd *abfd)
9669{
9670 obj_attribute *attr =
9671 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9672
9673 /* Note: the tag and its argument below are uleb128 values, though
9674 currently-defined values fit in one byte for each. */
9675 if (attr->s
9676 && attr->s[0] == Tag_CPU_arch
9677 && (attr->s[1] & 128) != 128
9678 && attr->s[2] == 0)
9679 return attr->s[1];
9680
9681 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9682 return -1;
9683}
9684
9685/* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9686 The tag is removed if ARCH is -1. */
9687
9688static void
9689set_secondary_compatible_arch (bfd *abfd, int arch)
9690{
9691 obj_attribute *attr =
9692 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9693
9694 if (arch == -1)
9695 {
9696 attr->s = NULL;
9697 return;
9698 }
9699
9700 /* Note: the tag and its argument below are uleb128 values, though
9701 currently-defined values fit in one byte for each. */
9702 if (!attr->s)
9703 attr->s = (char *) bfd_alloc (abfd, 3);
9704 attr->s[0] = Tag_CPU_arch;
9705 attr->s[1] = arch;
9706 attr->s[2] = '\0';
9707}
9708
9709/* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9710 into account. */
9711
9712static int
9713tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9714 int newtag, int secondary_compat)
9715{
9716#define T(X) TAG_CPU_ARCH_##X
9717 int tagl, tagh, result;
9718 const int v6t2[] =
9719 {
9720 T(V6T2), /* PRE_V4. */
9721 T(V6T2), /* V4. */
9722 T(V6T2), /* V4T. */
9723 T(V6T2), /* V5T. */
9724 T(V6T2), /* V5TE. */
9725 T(V6T2), /* V5TEJ. */
9726 T(V6T2), /* V6. */
9727 T(V7), /* V6KZ. */
9728 T(V6T2) /* V6T2. */
9729 };
9730 const int v6k[] =
9731 {
9732 T(V6K), /* PRE_V4. */
9733 T(V6K), /* V4. */
9734 T(V6K), /* V4T. */
9735 T(V6K), /* V5T. */
9736 T(V6K), /* V5TE. */
9737 T(V6K), /* V5TEJ. */
9738 T(V6K), /* V6. */
9739 T(V6KZ), /* V6KZ. */
9740 T(V7), /* V6T2. */
9741 T(V6K) /* V6K. */
9742 };
9743 const int v7[] =
9744 {
9745 T(V7), /* PRE_V4. */
9746 T(V7), /* V4. */
9747 T(V7), /* V4T. */
9748 T(V7), /* V5T. */
9749 T(V7), /* V5TE. */
9750 T(V7), /* V5TEJ. */
9751 T(V7), /* V6. */
9752 T(V7), /* V6KZ. */
9753 T(V7), /* V6T2. */
9754 T(V7), /* V6K. */
9755 T(V7) /* V7. */
9756 };
9757 const int v6_m[] =
9758 {
9759 -1, /* PRE_V4. */
9760 -1, /* V4. */
9761 T(V6K), /* V4T. */
9762 T(V6K), /* V5T. */
9763 T(V6K), /* V5TE. */
9764 T(V6K), /* V5TEJ. */
9765 T(V6K), /* V6. */
9766 T(V6KZ), /* V6KZ. */
9767 T(V7), /* V6T2. */
9768 T(V6K), /* V6K. */
9769 T(V7), /* V7. */
9770 T(V6_M) /* V6_M. */
9771 };
9772 const int v6s_m[] =
9773 {
9774 -1, /* PRE_V4. */
9775 -1, /* V4. */
9776 T(V6K), /* V4T. */
9777 T(V6K), /* V5T. */
9778 T(V6K), /* V5TE. */
9779 T(V6K), /* V5TEJ. */
9780 T(V6K), /* V6. */
9781 T(V6KZ), /* V6KZ. */
9782 T(V7), /* V6T2. */
9783 T(V6K), /* V6K. */
9784 T(V7), /* V7. */
9785 T(V6S_M), /* V6_M. */
9786 T(V6S_M) /* V6S_M. */
9787 };
9788 const int v7e_m[] =
9789 {
9790 -1, /* PRE_V4. */
9791 -1, /* V4. */
9792 T(V7E_M), /* V4T. */
9793 T(V7E_M), /* V5T. */
9794 T(V7E_M), /* V5TE. */
9795 T(V7E_M), /* V5TEJ. */
9796 T(V7E_M), /* V6. */
9797 T(V7E_M), /* V6KZ. */
9798 T(V7E_M), /* V6T2. */
9799 T(V7E_M), /* V6K. */
9800 T(V7E_M), /* V7. */
9801 T(V7E_M), /* V6_M. */
9802 T(V7E_M), /* V6S_M. */
9803 T(V7E_M) /* V7E_M. */
9804 };
9805 const int v4t_plus_v6_m[] =
9806 {
9807 -1, /* PRE_V4. */
9808 -1, /* V4. */
9809 T(V4T), /* V4T. */
9810 T(V5T), /* V5T. */
9811 T(V5TE), /* V5TE. */
9812 T(V5TEJ), /* V5TEJ. */
9813 T(V6), /* V6. */
9814 T(V6KZ), /* V6KZ. */
9815 T(V6T2), /* V6T2. */
9816 T(V6K), /* V6K. */
9817 T(V7), /* V7. */
9818 T(V6_M), /* V6_M. */
9819 T(V6S_M), /* V6S_M. */
9820 T(V7E_M), /* V7E_M. */
9821 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9822 };
9823 const int *comb[] =
9824 {
9825 v6t2,
9826 v6k,
9827 v7,
9828 v6_m,
9829 v6s_m,
9830 v7e_m,
9831 /* Pseudo-architecture. */
9832 v4t_plus_v6_m
9833 };
9834
9835 /* Check we've not got a higher architecture than we know about. */
9836
9837 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9838 {
9839 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9840 return -1;
9841 }
9842
9843 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9844
9845 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9846 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9847 oldtag = T(V4T_PLUS_V6_M);
9848
9849 /* And override the new tag if we have a Tag_also_compatible_with on the
9850 input. */
9851
9852 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9853 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9854 newtag = T(V4T_PLUS_V6_M);
9855
9856 tagl = (oldtag < newtag) ? oldtag : newtag;
9857 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9858
9859 /* Architectures before V6KZ add features monotonically. */
9860 if (tagh <= TAG_CPU_ARCH_V6KZ)
9861 return result;
9862
9863 result = comb[tagh - T(V6T2)][tagl];
9864
9865 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9866 as the canonical version. */
9867 if (result == T(V4T_PLUS_V6_M))
9868 {
9869 result = T(V4T);
9870 *secondary_compat_out = T(V6_M);
9871 }
9872 else
9873 *secondary_compat_out = -1;
9874
9875 if (result == -1)
9876 {
9877 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9878 ibfd, oldtag, newtag);
9879 return -1;
9880 }
9881
9882 return result;
9883#undef T
9884}
9885
9886/* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9887 are conflicting attributes. */
9888
9889static bfd_boolean
9890elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9891{
9892 obj_attribute *in_attr;
9893 obj_attribute *out_attr;
9894 obj_attribute_list *in_list;
9895 obj_attribute_list *out_list;
9896 obj_attribute_list **out_listp;
9897 /* Some tags have 0 = don't care, 1 = strong requirement,
9898 2 = weak requirement. */
9899 static const int order_021[3] = {0, 2, 1};
9900 int i;
9901 bfd_boolean result = TRUE;
9902
9903 /* Skip the linker stubs file. This preserves previous behavior
9904 of accepting unknown attributes in the first input file - but
9905 is that a bug? */
9906 if (ibfd->flags & BFD_LINKER_CREATED)
9907 return TRUE;
9908
9909 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9910 {
9911 /* This is the first object. Copy the attributes. */
9912 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9913
9914 out_attr = elf_known_obj_attributes_proc (obfd);
9915
9916 /* Use the Tag_null value to indicate the attributes have been
9917 initialized. */
9918 out_attr[0].i = 1;
9919
9920 /* We do not output objects with Tag_MPextension_use_legacy - we move
9921 the attribute's value to Tag_MPextension_use. */
9922 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9923 {
9924 if (out_attr[Tag_MPextension_use].i != 0
9925 && out_attr[Tag_MPextension_use_legacy].i
9926 != out_attr[Tag_MPextension_use].i)
9927 {
9928 _bfd_error_handler
9929 (_("Error: %B has both the current and legacy "
9930 "Tag_MPextension_use attributes"), ibfd);
9931 result = FALSE;
9932 }
9933
9934 out_attr[Tag_MPextension_use] =
9935 out_attr[Tag_MPextension_use_legacy];
9936 out_attr[Tag_MPextension_use_legacy].type = 0;
9937 out_attr[Tag_MPextension_use_legacy].i = 0;
9938 }
9939
9940 return result;
9941 }
9942
9943 in_attr = elf_known_obj_attributes_proc (ibfd);
9944 out_attr = elf_known_obj_attributes_proc (obfd);
9945 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9946 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9947 {
9948 /* Ignore mismatches if the object doesn't use floating point. */
9949 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9950 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9951 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9952 {
9953 _bfd_error_handler
9954 (_("error: %B uses VFP register arguments, %B does not"),
9955 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9956 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9957 result = FALSE;
9958 }
9959 }
9960
9961 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9962 {
9963 /* Merge this attribute with existing attributes. */
9964 switch (i)
9965 {
9966 case Tag_CPU_raw_name:
9967 case Tag_CPU_name:
9968 /* These are merged after Tag_CPU_arch. */
9969 break;
9970
9971 case Tag_ABI_optimization_goals:
9972 case Tag_ABI_FP_optimization_goals:
9973 /* Use the first value seen. */
9974 break;
9975
9976 case Tag_CPU_arch:
9977 {
9978 int secondary_compat = -1, secondary_compat_out = -1;
9979 unsigned int saved_out_attr = out_attr[i].i;
9980 static const char *name_table[] = {
9981 /* These aren't real CPU names, but we can't guess
9982 that from the architecture version alone. */
9983 "Pre v4",
9984 "ARM v4",
9985 "ARM v4T",
9986 "ARM v5T",
9987 "ARM v5TE",
9988 "ARM v5TEJ",
9989 "ARM v6",
9990 "ARM v6KZ",
9991 "ARM v6T2",
9992 "ARM v6K",
9993 "ARM v7",
9994 "ARM v6-M",
9995 "ARM v6S-M"
9996 };
9997
9998 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9999 secondary_compat = get_secondary_compatible_arch (ibfd);
10000 secondary_compat_out = get_secondary_compatible_arch (obfd);
10001 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
10002 &secondary_compat_out,
10003 in_attr[i].i,
10004 secondary_compat);
10005 set_secondary_compatible_arch (obfd, secondary_compat_out);
10006
10007 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
10008 if (out_attr[i].i == saved_out_attr)
10009 ; /* Leave the names alone. */
10010 else if (out_attr[i].i == in_attr[i].i)
10011 {
10012 /* The output architecture has been changed to match the
10013 input architecture. Use the input names. */
10014 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
10015 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
10016 : NULL;
10017 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
10018 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
10019 : NULL;
10020 }
10021 else
10022 {
10023 out_attr[Tag_CPU_name].s = NULL;
10024 out_attr[Tag_CPU_raw_name].s = NULL;
10025 }
10026
10027 /* If we still don't have a value for Tag_CPU_name,
10028 make one up now. Tag_CPU_raw_name remains blank. */
10029 if (out_attr[Tag_CPU_name].s == NULL
10030 && out_attr[i].i < ARRAY_SIZE (name_table))
10031 out_attr[Tag_CPU_name].s =
10032 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
10033 }
10034 break;
10035
10036 case Tag_ARM_ISA_use:
10037 case Tag_THUMB_ISA_use:
10038 case Tag_WMMX_arch:
10039 case Tag_Advanced_SIMD_arch:
10040 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
10041 case Tag_ABI_FP_rounding:
10042 case Tag_ABI_FP_exceptions:
10043 case Tag_ABI_FP_user_exceptions:
10044 case Tag_ABI_FP_number_model:
10045 case Tag_FP_HP_extension:
10046 case Tag_CPU_unaligned_access:
10047 case Tag_T2EE_use:
10048 case Tag_MPextension_use:
10049 /* Use the largest value specified. */
10050 if (in_attr[i].i > out_attr[i].i)
10051 out_attr[i].i = in_attr[i].i;
10052 break;
10053
10054 case Tag_ABI_align_preserved:
10055 case Tag_ABI_PCS_RO_data:
10056 /* Use the smallest value specified. */
10057 if (in_attr[i].i < out_attr[i].i)
10058 out_attr[i].i = in_attr[i].i;
10059 break;
10060
10061 case Tag_ABI_align_needed:
10062 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10063 && (in_attr[Tag_ABI_align_preserved].i == 0
10064 || out_attr[Tag_ABI_align_preserved].i == 0))
10065 {
10066 /* This error message should be enabled once all non-conformant
10067 binaries in the toolchain have had the attributes set
10068 properly.
10069 _bfd_error_handler
10070 (_("error: %B: 8-byte data alignment conflicts with %B"),
10071 obfd, ibfd);
10072 result = FALSE; */
10073 }
10074 /* Fall through. */
10075 case Tag_ABI_FP_denormal:
10076 case Tag_ABI_PCS_GOT_use:
10077 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10078 value if greater than 2 (for future-proofing). */
10079 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10080 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10081 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10082 out_attr[i].i = in_attr[i].i;
10083 break;
10084
10085 case Tag_Virtualization_use:
10086 /* The virtualization tag effectively stores two bits of
10087 information: the intended use of TrustZone (in bit 0), and the
10088 intended use of Virtualization (in bit 1). */
10089 if (out_attr[i].i == 0)
10090 out_attr[i].i = in_attr[i].i;
10091 else if (in_attr[i].i != 0
10092 && in_attr[i].i != out_attr[i].i)
10093 {
10094 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
10095 out_attr[i].i = 3;
10096 else
10097 {
10098 _bfd_error_handler
10099 (_("error: %B: unable to merge virtualization attributes "
10100 "with %B"),
10101 obfd, ibfd);
10102 result = FALSE;
10103 }
10104 }
10105 break;
10106
10107 case Tag_CPU_arch_profile:
10108 if (out_attr[i].i != in_attr[i].i)
10109 {
10110 /* 0 will merge with anything.
10111 'A' and 'S' merge to 'A'.
10112 'R' and 'S' merge to 'R'.
10113 'M' and 'A|R|S' is an error. */
10114 if (out_attr[i].i == 0
10115 || (out_attr[i].i == 'S'
10116 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10117 out_attr[i].i = in_attr[i].i;
10118 else if (in_attr[i].i == 0
10119 || (in_attr[i].i == 'S'
10120 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10121 ; /* Do nothing. */
10122 else
10123 {
10124 _bfd_error_handler
10125 (_("error: %B: Conflicting architecture profiles %c/%c"),
10126 ibfd,
10127 in_attr[i].i ? in_attr[i].i : '0',
10128 out_attr[i].i ? out_attr[i].i : '0');
10129 result = FALSE;
10130 }
10131 }
10132 break;
10133 case Tag_FP_arch:
10134 {
10135 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
10136 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
10137 when it's 0. It might mean absence of FP hardware if
10138 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
10139
10140 static const struct
10141 {
10142 int ver;
10143 int regs;
10144 } vfp_versions[7] =
10145 {
10146 {0, 0},
10147 {1, 16},
10148 {2, 16},
10149 {3, 32},
10150 {3, 16},
10151 {4, 32},
10152 {4, 16}
10153 };
10154 int ver;
10155 int regs;
10156 int newval;
10157
10158 /* If the output has no requirement about FP hardware,
10159 follow the requirement of the input. */
10160 if (out_attr[i].i == 0)
10161 {
10162 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
10163 out_attr[i].i = in_attr[i].i;
10164 out_attr[Tag_ABI_HardFP_use].i
10165 = in_attr[Tag_ABI_HardFP_use].i;
10166 break;
10167 }
10168 /* If the input has no requirement about FP hardware, do
10169 nothing. */
10170 else if (in_attr[i].i == 0)
10171 {
10172 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
10173 break;
10174 }
10175
10176 /* Both the input and the output have nonzero Tag_FP_arch.
10177 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
10178
10179 /* If both the input and the output have zero Tag_ABI_HardFP_use,
10180 do nothing. */
10181 if (in_attr[Tag_ABI_HardFP_use].i == 0
10182 && out_attr[Tag_ABI_HardFP_use].i == 0)
10183 ;
10184 /* If the input and the output have different Tag_ABI_HardFP_use,
10185 the combination of them is 3 (SP & DP). */
10186 else if (in_attr[Tag_ABI_HardFP_use].i
10187 != out_attr[Tag_ABI_HardFP_use].i)
10188 out_attr[Tag_ABI_HardFP_use].i = 3;
10189
10190 /* Now we can handle Tag_FP_arch. */
10191
10192 /* Values greater than 6 aren't defined, so just pick the
10193 biggest */
10194 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10195 {
10196 out_attr[i] = in_attr[i];
10197 break;
10198 }
10199 /* The output uses the superset of input features
10200 (ISA version) and registers. */
10201 ver = vfp_versions[in_attr[i].i].ver;
10202 if (ver < vfp_versions[out_attr[i].i].ver)
10203 ver = vfp_versions[out_attr[i].i].ver;
10204 regs = vfp_versions[in_attr[i].i].regs;
10205 if (regs < vfp_versions[out_attr[i].i].regs)
10206 regs = vfp_versions[out_attr[i].i].regs;
10207 /* This assumes all possible supersets are also a valid
10208 options. */
10209 for (newval = 6; newval > 0; newval--)
10210 {
10211 if (regs == vfp_versions[newval].regs
10212 && ver == vfp_versions[newval].ver)
10213 break;
10214 }
10215 out_attr[i].i = newval;
10216 }
10217 break;
10218 case Tag_PCS_config:
10219 if (out_attr[i].i == 0)
10220 out_attr[i].i = in_attr[i].i;
10221 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10222 {
10223 /* It's sometimes ok to mix different configs, so this is only
10224 a warning. */
10225 _bfd_error_handler
10226 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10227 }
10228 break;
10229 case Tag_ABI_PCS_R9_use:
10230 if (in_attr[i].i != out_attr[i].i
10231 && out_attr[i].i != AEABI_R9_unused
10232 && in_attr[i].i != AEABI_R9_unused)
10233 {
10234 _bfd_error_handler
10235 (_("error: %B: Conflicting use of R9"), ibfd);
10236 result = FALSE;
10237 }
10238 if (out_attr[i].i == AEABI_R9_unused)
10239 out_attr[i].i = in_attr[i].i;
10240 break;
10241 case Tag_ABI_PCS_RW_data:
10242 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10243 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10244 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10245 {
10246 _bfd_error_handler
10247 (_("error: %B: SB relative addressing conflicts with use of R9"),
10248 ibfd);
10249 result = FALSE;
10250 }
10251 /* Use the smallest value specified. */
10252 if (in_attr[i].i < out_attr[i].i)
10253 out_attr[i].i = in_attr[i].i;
10254 break;
10255 case Tag_ABI_PCS_wchar_t:
10256 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10257 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10258 {
10259 _bfd_error_handler
10260 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10261 ibfd, in_attr[i].i, out_attr[i].i);
10262 }
10263 else if (in_attr[i].i && !out_attr[i].i)
10264 out_attr[i].i = in_attr[i].i;
10265 break;
10266 case Tag_ABI_enum_size:
10267 if (in_attr[i].i != AEABI_enum_unused)
10268 {
10269 if (out_attr[i].i == AEABI_enum_unused
10270 || out_attr[i].i == AEABI_enum_forced_wide)
10271 {
10272 /* The existing object is compatible with anything.
10273 Use whatever requirements the new object has. */
10274 out_attr[i].i = in_attr[i].i;
10275 }
10276 else if (in_attr[i].i != AEABI_enum_forced_wide
10277 && out_attr[i].i != in_attr[i].i
10278 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10279 {
10280 static const char *aeabi_enum_names[] =
10281 { "", "variable-size", "32-bit", "" };
10282 const char *in_name =
10283 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10284 ? aeabi_enum_names[in_attr[i].i]
10285 : "<unknown>";
10286 const char *out_name =
10287 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10288 ? aeabi_enum_names[out_attr[i].i]
10289 : "<unknown>";
10290 _bfd_error_handler
10291 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10292 ibfd, in_name, out_name);
10293 }
10294 }
10295 break;
10296 case Tag_ABI_VFP_args:
10297 /* Aready done. */
10298 break;
10299 case Tag_ABI_WMMX_args:
10300 if (in_attr[i].i != out_attr[i].i)
10301 {
10302 _bfd_error_handler
10303 (_("error: %B uses iWMMXt register arguments, %B does not"),
10304 ibfd, obfd);
10305 result = FALSE;
10306 }
10307 break;
10308 case Tag_compatibility:
10309 /* Merged in target-independent code. */
10310 break;
10311 case Tag_ABI_HardFP_use:
10312 /* This is handled along with Tag_FP_arch. */
10313 break;
10314 case Tag_ABI_FP_16bit_format:
10315 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10316 {
10317 if (in_attr[i].i != out_attr[i].i)
10318 {
10319 _bfd_error_handler
10320 (_("error: fp16 format mismatch between %B and %B"),
10321 ibfd, obfd);
10322 result = FALSE;
10323 }
10324 }
10325 if (in_attr[i].i != 0)
10326 out_attr[i].i = in_attr[i].i;
10327 break;
10328
10329 case Tag_DIV_use:
10330 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10331 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10332 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10333 CPU. We will merge as follows: If the input attribute's value
10334 is one then the output attribute's value remains unchanged. If
10335 the input attribute's value is zero or two then if the output
10336 attribute's value is one the output value is set to the input
10337 value, otherwise the output value must be the same as the
10338 inputs. */
10339 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10340 {
10341 if (in_attr[i].i != out_attr[i].i)
10342 {
10343 _bfd_error_handler
10344 (_("DIV usage mismatch between %B and %B"),
10345 ibfd, obfd);
10346 result = FALSE;
10347 }
10348 }
10349
10350 if (in_attr[i].i != 1)
10351 out_attr[i].i = in_attr[i].i;
10352
10353 break;
10354
10355 case Tag_MPextension_use_legacy:
10356 /* We don't output objects with Tag_MPextension_use_legacy - we
10357 move the value to Tag_MPextension_use. */
10358 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10359 {
10360 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10361 {
10362 _bfd_error_handler
10363 (_("%B has has both the current and legacy "
10364 "Tag_MPextension_use attributes"),
10365 ibfd);
10366 result = FALSE;
10367 }
10368 }
10369
10370 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10371 out_attr[Tag_MPextension_use] = in_attr[i];
10372
10373 break;
10374
10375 case Tag_nodefaults:
10376 /* This tag is set if it exists, but the value is unused (and is
10377 typically zero). We don't actually need to do anything here -
10378 the merge happens automatically when the type flags are merged
10379 below. */
10380 break;
10381 case Tag_also_compatible_with:
10382 /* Already done in Tag_CPU_arch. */
10383 break;
10384 case Tag_conformance:
10385 /* Keep the attribute if it matches. Throw it away otherwise.
10386 No attribute means no claim to conform. */
10387 if (!in_attr[i].s || !out_attr[i].s
10388 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10389 out_attr[i].s = NULL;
10390 break;
10391
10392 default:
10393 {
10394 bfd *err_bfd = NULL;
10395
10396 /* The "known_obj_attributes" table does contain some undefined
10397 attributes. Ensure that there are unused. */
10398 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10399 err_bfd = obfd;
10400 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10401 err_bfd = ibfd;
10402
10403 if (err_bfd != NULL)
10404 {
10405 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10406 if ((i & 127) < 64)
10407 {
10408 _bfd_error_handler
10409 (_("%B: Unknown mandatory EABI object attribute %d"),
10410 err_bfd, i);
10411 bfd_set_error (bfd_error_bad_value);
10412 result = FALSE;
10413 }
10414 else
10415 {
10416 _bfd_error_handler
10417 (_("Warning: %B: Unknown EABI object attribute %d"),
10418 err_bfd, i);
10419 }
10420 }
10421
10422 /* Only pass on attributes that match in both inputs. */
10423 if (in_attr[i].i != out_attr[i].i
10424 || in_attr[i].s != out_attr[i].s
10425 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10426 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10427 {
10428 out_attr[i].i = 0;
10429 out_attr[i].s = NULL;
10430 }
10431 }
10432 }
10433
10434 /* If out_attr was copied from in_attr then it won't have a type yet. */
10435 if (in_attr[i].type && !out_attr[i].type)
10436 out_attr[i].type = in_attr[i].type;
10437 }
10438
10439 /* Merge Tag_compatibility attributes and any common GNU ones. */
10440 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10441 return FALSE;
10442
10443 /* Check for any attributes not known on ARM. */
10444 in_list = elf_other_obj_attributes_proc (ibfd);
10445 out_listp = &elf_other_obj_attributes_proc (obfd);
10446 out_list = *out_listp;
10447
10448 for (; in_list || out_list; )
10449 {
10450 bfd *err_bfd = NULL;
10451 int err_tag = 0;
10452
10453 /* The tags for each list are in numerical order. */
10454 /* If the tags are equal, then merge. */
10455 if (out_list && (!in_list || in_list->tag > out_list->tag))
10456 {
10457 /* This attribute only exists in obfd. We can't merge, and we don't
10458 know what the tag means, so delete it. */
10459 err_bfd = obfd;
10460 err_tag = out_list->tag;
10461 *out_listp = out_list->next;
10462 out_list = *out_listp;
10463 }
10464 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10465 {
10466 /* This attribute only exists in ibfd. We can't merge, and we don't
10467 know what the tag means, so ignore it. */
10468 err_bfd = ibfd;
10469 err_tag = in_list->tag;
10470 in_list = in_list->next;
10471 }
10472 else /* The tags are equal. */
10473 {
10474 /* As present, all attributes in the list are unknown, and
10475 therefore can't be merged meaningfully. */
10476 err_bfd = obfd;
10477 err_tag = out_list->tag;
10478
10479 /* Only pass on attributes that match in both inputs. */
10480 if (in_list->attr.i != out_list->attr.i
10481 || in_list->attr.s != out_list->attr.s
10482 || (in_list->attr.s && out_list->attr.s
10483 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10484 {
10485 /* No match. Delete the attribute. */
10486 *out_listp = out_list->next;
10487 out_list = *out_listp;
10488 }
10489 else
10490 {
10491 /* Matched. Keep the attribute and move to the next. */
10492 out_list = out_list->next;
10493 in_list = in_list->next;
10494 }
10495 }
10496
10497 if (err_bfd)
10498 {
10499 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10500 if ((err_tag & 127) < 64)
10501 {
10502 _bfd_error_handler
10503 (_("%B: Unknown mandatory EABI object attribute %d"),
10504 err_bfd, err_tag);
10505 bfd_set_error (bfd_error_bad_value);
10506 result = FALSE;
10507 }
10508 else
10509 {
10510 _bfd_error_handler
10511 (_("Warning: %B: Unknown EABI object attribute %d"),
10512 err_bfd, err_tag);
10513 }
10514 }
10515 }
10516 return result;
10517}
10518
10519
10520/* Return TRUE if the two EABI versions are incompatible. */
10521
10522static bfd_boolean
10523elf32_arm_versions_compatible (unsigned iver, unsigned over)
10524{
10525 /* v4 and v5 are the same spec before and after it was released,
10526 so allow mixing them. */
10527 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10528 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10529 return TRUE;
10530
10531 return (iver == over);
10532}
10533
10534/* Merge backend specific data from an object file to the output
10535 object file when linking. */
10536
10537static bfd_boolean
10538elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10539
10540/* Display the flags field. */
10541
10542static bfd_boolean
10543elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10544{
10545 FILE * file = (FILE *) ptr;
10546 unsigned long flags;
10547
10548 BFD_ASSERT (abfd != NULL && ptr != NULL);
10549
10550 /* Print normal ELF private data. */
10551 _bfd_elf_print_private_bfd_data (abfd, ptr);
10552
10553 flags = elf_elfheader (abfd)->e_flags;
10554 /* Ignore init flag - it may not be set, despite the flags field
10555 containing valid data. */
10556
10557 /* xgettext:c-format */
10558 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10559
10560 switch (EF_ARM_EABI_VERSION (flags))
10561 {
10562 case EF_ARM_EABI_UNKNOWN:
10563 /* The following flag bits are GNU extensions and not part of the
10564 official ARM ELF extended ABI. Hence they are only decoded if
10565 the EABI version is not set. */
10566 if (flags & EF_ARM_INTERWORK)
10567 fprintf (file, _(" [interworking enabled]"));
10568
10569 if (flags & EF_ARM_APCS_26)
10570 fprintf (file, " [APCS-26]");
10571 else
10572 fprintf (file, " [APCS-32]");
10573
10574 if (flags & EF_ARM_VFP_FLOAT)
10575 fprintf (file, _(" [VFP float format]"));
10576 else if (flags & EF_ARM_MAVERICK_FLOAT)
10577 fprintf (file, _(" [Maverick float format]"));
10578 else
10579 fprintf (file, _(" [FPA float format]"));
10580
10581 if (flags & EF_ARM_APCS_FLOAT)
10582 fprintf (file, _(" [floats passed in float registers]"));
10583
10584 if (flags & EF_ARM_PIC)
10585 fprintf (file, _(" [position independent]"));
10586
10587 if (flags & EF_ARM_NEW_ABI)
10588 fprintf (file, _(" [new ABI]"));
10589
10590 if (flags & EF_ARM_OLD_ABI)
10591 fprintf (file, _(" [old ABI]"));
10592
10593 if (flags & EF_ARM_SOFT_FLOAT)
10594 fprintf (file, _(" [software FP]"));
10595
10596 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10597 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10598 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10599 | EF_ARM_MAVERICK_FLOAT);
10600 break;
10601
10602 case EF_ARM_EABI_VER1:
10603 fprintf (file, _(" [Version1 EABI]"));
10604
10605 if (flags & EF_ARM_SYMSARESORTED)
10606 fprintf (file, _(" [sorted symbol table]"));
10607 else
10608 fprintf (file, _(" [unsorted symbol table]"));
10609
10610 flags &= ~ EF_ARM_SYMSARESORTED;
10611 break;
10612
10613 case EF_ARM_EABI_VER2:
10614 fprintf (file, _(" [Version2 EABI]"));
10615
10616 if (flags & EF_ARM_SYMSARESORTED)
10617 fprintf (file, _(" [sorted symbol table]"));
10618 else
10619 fprintf (file, _(" [unsorted symbol table]"));
10620
10621 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10622 fprintf (file, _(" [dynamic symbols use segment index]"));
10623
10624 if (flags & EF_ARM_MAPSYMSFIRST)
10625 fprintf (file, _(" [mapping symbols precede others]"));
10626
10627 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10628 | EF_ARM_MAPSYMSFIRST);
10629 break;
10630
10631 case EF_ARM_EABI_VER3:
10632 fprintf (file, _(" [Version3 EABI]"));
10633 break;
10634
10635 case EF_ARM_EABI_VER4:
10636 fprintf (file, _(" [Version4 EABI]"));
10637 goto eabi;
10638
10639 case EF_ARM_EABI_VER5:
10640 fprintf (file, _(" [Version5 EABI]"));
10641 eabi:
10642 if (flags & EF_ARM_BE8)
10643 fprintf (file, _(" [BE8]"));
10644
10645 if (flags & EF_ARM_LE8)
10646 fprintf (file, _(" [LE8]"));
10647
10648 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10649 break;
10650
10651 default:
10652 fprintf (file, _(" <EABI version unrecognised>"));
10653 break;
10654 }
10655
10656 flags &= ~ EF_ARM_EABIMASK;
10657
10658 if (flags & EF_ARM_RELEXEC)
10659 fprintf (file, _(" [relocatable executable]"));
10660
10661 if (flags & EF_ARM_HASENTRY)
10662 fprintf (file, _(" [has entry point]"));
10663
10664 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10665
10666 if (flags)
10667 fprintf (file, _("<Unrecognised flag bits set>"));
10668
10669 fputc ('\n', file);
10670
10671 return TRUE;
10672}
10673
10674static int
10675elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10676{
10677 switch (ELF_ST_TYPE (elf_sym->st_info))
10678 {
10679 case STT_ARM_TFUNC:
10680 return ELF_ST_TYPE (elf_sym->st_info);
10681
10682 case STT_ARM_16BIT:
10683 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10684 This allows us to distinguish between data used by Thumb instructions
10685 and non-data (which is probably code) inside Thumb regions of an
10686 executable. */
10687 if (type != STT_OBJECT && type != STT_TLS)
10688 return ELF_ST_TYPE (elf_sym->st_info);
10689 break;
10690
10691 default:
10692 break;
10693 }
10694
10695 return type;
10696}
10697
10698static asection *
10699elf32_arm_gc_mark_hook (asection *sec,
10700 struct bfd_link_info *info,
10701 Elf_Internal_Rela *rel,
10702 struct elf_link_hash_entry *h,
10703 Elf_Internal_Sym *sym)
10704{
10705 if (h != NULL)
10706 switch (ELF32_R_TYPE (rel->r_info))
10707 {
10708 case R_ARM_GNU_VTINHERIT:
10709 case R_ARM_GNU_VTENTRY:
10710 return NULL;
10711 }
10712
10713 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10714}
10715
10716/* Update the got entry reference counts for the section being removed. */
10717
10718static bfd_boolean
10719elf32_arm_gc_sweep_hook (bfd * abfd,
10720 struct bfd_link_info * info,
10721 asection * sec,
10722 const Elf_Internal_Rela * relocs)
10723{
10724 Elf_Internal_Shdr *symtab_hdr;
10725 struct elf_link_hash_entry **sym_hashes;
10726 bfd_signed_vma *local_got_refcounts;
10727 const Elf_Internal_Rela *rel, *relend;
10728 struct elf32_arm_link_hash_table * globals;
10729
10730 if (info->relocatable)
10731 return TRUE;
10732
10733 globals = elf32_arm_hash_table (info);
10734 if (globals == NULL)
10735 return FALSE;
10736
10737 elf_section_data (sec)->local_dynrel = NULL;
10738
10739 symtab_hdr = & elf_symtab_hdr (abfd);
10740 sym_hashes = elf_sym_hashes (abfd);
10741 local_got_refcounts = elf_local_got_refcounts (abfd);
10742
10743 check_use_blx (globals);
10744
10745 relend = relocs + sec->reloc_count;
10746 for (rel = relocs; rel < relend; rel++)
10747 {
10748 unsigned long r_symndx;
10749 struct elf_link_hash_entry *h = NULL;
10750 int r_type;
10751
10752 r_symndx = ELF32_R_SYM (rel->r_info);
10753 if (r_symndx >= symtab_hdr->sh_info)
10754 {
10755 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10756 while (h->root.type == bfd_link_hash_indirect
10757 || h->root.type == bfd_link_hash_warning)
10758 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10759 }
10760
10761 r_type = ELF32_R_TYPE (rel->r_info);
10762 r_type = arm_real_reloc_type (globals, r_type);
10763 switch (r_type)
10764 {
10765 case R_ARM_GOT32:
10766 case R_ARM_GOT_PREL:
10767 case R_ARM_TLS_GD32:
10768 case R_ARM_TLS_IE32:
10769 if (h != NULL)
10770 {
10771 if (h->got.refcount > 0)
10772 h->got.refcount -= 1;
10773 }
10774 else if (local_got_refcounts != NULL)
10775 {
10776 if (local_got_refcounts[r_symndx] > 0)
10777 local_got_refcounts[r_symndx] -= 1;
10778 }
10779 break;
10780
10781 case R_ARM_TLS_LDM32:
10782 globals->tls_ldm_got.refcount -= 1;
10783 break;
10784
10785 case R_ARM_ABS32:
10786 case R_ARM_ABS32_NOI:
10787 case R_ARM_REL32:
10788 case R_ARM_REL32_NOI:
10789 case R_ARM_PC24:
10790 case R_ARM_PLT32:
10791 case R_ARM_CALL:
10792 case R_ARM_JUMP24:
10793 case R_ARM_PREL31:
10794 case R_ARM_THM_CALL:
10795 case R_ARM_THM_JUMP24:
10796 case R_ARM_THM_JUMP19:
10797 case R_ARM_MOVW_ABS_NC:
10798 case R_ARM_MOVT_ABS:
10799 case R_ARM_MOVW_PREL_NC:
10800 case R_ARM_MOVT_PREL:
10801 case R_ARM_THM_MOVW_ABS_NC:
10802 case R_ARM_THM_MOVT_ABS:
10803 case R_ARM_THM_MOVW_PREL_NC:
10804 case R_ARM_THM_MOVT_PREL:
10805 /* Should the interworking branches be here also? */
10806
10807 if (h != NULL)
10808 {
10809 struct elf32_arm_link_hash_entry *eh;
10810 struct elf32_arm_relocs_copied **pp;
10811 struct elf32_arm_relocs_copied *p;
10812
10813 eh = (struct elf32_arm_link_hash_entry *) h;
10814
10815 if (h->plt.refcount > 0)
10816 {
10817 h->plt.refcount -= 1;
10818 if (r_type == R_ARM_THM_CALL)
10819 eh->plt_maybe_thumb_refcount--;
10820
10821 if (r_type == R_ARM_THM_JUMP24
10822 || r_type == R_ARM_THM_JUMP19)
10823 eh->plt_thumb_refcount--;
10824 }
10825
10826 if (r_type == R_ARM_ABS32
10827 || r_type == R_ARM_REL32
10828 || r_type == R_ARM_ABS32_NOI
10829 || r_type == R_ARM_REL32_NOI)
10830 {
10831 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10832 pp = &p->next)
10833 if (p->section == sec)
10834 {
10835 p->count -= 1;
10836 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10837 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10838 p->pc_count -= 1;
10839 if (p->count == 0)
10840 *pp = p->next;
10841 break;
10842 }
10843 }
10844 }
10845 break;
10846
10847 default:
10848 break;
10849 }
10850 }
10851
10852 return TRUE;
10853}
10854
10855/* Look through the relocs for a section during the first phase. */
10856
10857static bfd_boolean
10858elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10859 asection *sec, const Elf_Internal_Rela *relocs)
10860{
10861 Elf_Internal_Shdr *symtab_hdr;
10862 struct elf_link_hash_entry **sym_hashes;
10863 const Elf_Internal_Rela *rel;
10864 const Elf_Internal_Rela *rel_end;
10865 bfd *dynobj;
10866 asection *sreloc;
10867 struct elf32_arm_link_hash_table *htab;
10868 bfd_boolean needs_plt;
10869 unsigned long nsyms;
10870
10871 if (info->relocatable)
10872 return TRUE;
10873
10874 BFD_ASSERT (is_arm_elf (abfd));
10875
10876 htab = elf32_arm_hash_table (info);
10877 if (htab == NULL)
10878 return FALSE;
10879
10880 sreloc = NULL;
10881
10882 /* Create dynamic sections for relocatable executables so that we can
10883 copy relocations. */
10884 if (htab->root.is_relocatable_executable
10885 && ! htab->root.dynamic_sections_created)
10886 {
10887 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10888 return FALSE;
10889 }
10890
10891 dynobj = elf_hash_table (info)->dynobj;
10892 symtab_hdr = & elf_symtab_hdr (abfd);
10893 sym_hashes = elf_sym_hashes (abfd);
10894 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10895
10896 rel_end = relocs + sec->reloc_count;
10897 for (rel = relocs; rel < rel_end; rel++)
10898 {
10899 struct elf_link_hash_entry *h;
10900 struct elf32_arm_link_hash_entry *eh;
10901 unsigned long r_symndx;
10902 int r_type;
10903
10904 r_symndx = ELF32_R_SYM (rel->r_info);
10905 r_type = ELF32_R_TYPE (rel->r_info);
10906 r_type = arm_real_reloc_type (htab, r_type);
10907
10908 if (r_symndx >= nsyms
10909 /* PR 9934: It is possible to have relocations that do not
10910 refer to symbols, thus it is also possible to have an
10911 object file containing relocations but no symbol table. */
10912 && (r_symndx > STN_UNDEF || nsyms > 0))
10913 {
10914 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10915 r_symndx);
10916 return FALSE;
10917 }
10918
10919 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10920 h = NULL;
10921 else
10922 {
10923 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10924 while (h->root.type == bfd_link_hash_indirect
10925 || h->root.type == bfd_link_hash_warning)
10926 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10927 }
10928
10929 eh = (struct elf32_arm_link_hash_entry *) h;
10930
10931 switch (r_type)
10932 {
10933 case R_ARM_GOT32:
10934 case R_ARM_GOT_PREL:
10935 case R_ARM_TLS_GD32:
10936 case R_ARM_TLS_IE32:
10937 /* This symbol requires a global offset table entry. */
10938 {
10939 int tls_type, old_tls_type;
10940
10941 switch (r_type)
10942 {
10943 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10944 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10945 default: tls_type = GOT_NORMAL; break;
10946 }
10947
10948 if (h != NULL)
10949 {
10950 h->got.refcount++;
10951 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10952 }
10953 else
10954 {
10955 bfd_signed_vma *local_got_refcounts;
10956
10957 /* This is a global offset table entry for a local symbol. */
10958 local_got_refcounts = elf_local_got_refcounts (abfd);
10959 if (local_got_refcounts == NULL)
10960 {
10961 bfd_size_type size;
10962
10963 size = symtab_hdr->sh_info;
10964 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10965 local_got_refcounts = (bfd_signed_vma *)
10966 bfd_zalloc (abfd, size);
10967 if (local_got_refcounts == NULL)
10968 return FALSE;
10969 elf_local_got_refcounts (abfd) = local_got_refcounts;
10970 elf32_arm_local_got_tls_type (abfd)
10971 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10972 }
10973 local_got_refcounts[r_symndx] += 1;
10974 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10975 }
10976
10977 /* We will already have issued an error message if there is a
10978 TLS / non-TLS mismatch, based on the symbol type. We don't
10979 support any linker relaxations. So just combine any TLS
10980 types needed. */
10981 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10982 && tls_type != GOT_NORMAL)
10983 tls_type |= old_tls_type;
10984
10985 if (old_tls_type != tls_type)
10986 {
10987 if (h != NULL)
10988 elf32_arm_hash_entry (h)->tls_type = tls_type;
10989 else
10990 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10991 }
10992 }
10993 /* Fall through. */
10994
10995 case R_ARM_TLS_LDM32:
10996 if (r_type == R_ARM_TLS_LDM32)
10997 htab->tls_ldm_got.refcount++;
10998 /* Fall through. */
10999
11000 case R_ARM_GOTOFF32:
11001 case R_ARM_GOTPC:
11002 if (htab->sgot == NULL)
11003 {
11004 if (htab->root.dynobj == NULL)
11005 htab->root.dynobj = abfd;
11006 if (!create_got_section (htab->root.dynobj, info))
11007 return FALSE;
11008 }
11009 break;
11010
11011 case R_ARM_ABS12:
11012 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
11013 ldr __GOTT_INDEX__ offsets. */
11014 if (!htab->vxworks_p)
11015 break;
11016 /* Fall through. */
11017
11018 case R_ARM_PC24:
11019 case R_ARM_PLT32:
11020 case R_ARM_CALL:
11021 case R_ARM_JUMP24:
11022 case R_ARM_PREL31:
11023 case R_ARM_THM_CALL:
11024 case R_ARM_THM_JUMP24:
11025 case R_ARM_THM_JUMP19:
11026 needs_plt = 1;
11027 goto normal_reloc;
11028
11029 case R_ARM_MOVW_ABS_NC:
11030 case R_ARM_MOVT_ABS:
11031 case R_ARM_THM_MOVW_ABS_NC:
11032 case R_ARM_THM_MOVT_ABS:
11033 if (info->shared)
11034 {
11035 (*_bfd_error_handler)
11036 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
11037 abfd, elf32_arm_howto_table_1[r_type].name,
11038 (h) ? h->root.root.string : "a local symbol");
11039 bfd_set_error (bfd_error_bad_value);
11040 return FALSE;
11041 }
11042
11043 /* Fall through. */
11044 case R_ARM_ABS32:
11045 case R_ARM_ABS32_NOI:
11046 case R_ARM_REL32:
11047 case R_ARM_REL32_NOI:
11048 case R_ARM_MOVW_PREL_NC:
11049 case R_ARM_MOVT_PREL:
11050 case R_ARM_THM_MOVW_PREL_NC:
11051 case R_ARM_THM_MOVT_PREL:
11052 needs_plt = 0;
11053 normal_reloc:
11054
11055 /* Should the interworking branches be listed here? */
11056 if (h != NULL)
11057 {
11058 /* If this reloc is in a read-only section, we might
11059 need a copy reloc. We can't check reliably at this
11060 stage whether the section is read-only, as input
11061 sections have not yet been mapped to output sections.
11062 Tentatively set the flag for now, and correct in
11063 adjust_dynamic_symbol. */
11064 if (!info->shared)
11065 h->non_got_ref = 1;
11066
11067 /* We may need a .plt entry if the function this reloc
11068 refers to is in a different object. We can't tell for
11069 sure yet, because something later might force the
11070 symbol local. */
11071 if (needs_plt)
11072 h->needs_plt = 1;
11073
11074 /* If we create a PLT entry, this relocation will reference
11075 it, even if it's an ABS32 relocation. */
11076 h->plt.refcount += 1;
11077
11078 /* It's too early to use htab->use_blx here, so we have to
11079 record possible blx references separately from
11080 relocs that definitely need a thumb stub. */
11081
11082 if (r_type == R_ARM_THM_CALL)
11083 eh->plt_maybe_thumb_refcount += 1;
11084
11085 if (r_type == R_ARM_THM_JUMP24
11086 || r_type == R_ARM_THM_JUMP19)
11087 eh->plt_thumb_refcount += 1;
11088 }
11089
11090 /* If we are creating a shared library or relocatable executable,
11091 and this is a reloc against a global symbol, or a non PC
11092 relative reloc against a local symbol, then we need to copy
11093 the reloc into the shared library. However, if we are linking
11094 with -Bsymbolic, we do not need to copy a reloc against a
11095 global symbol which is defined in an object we are
11096 including in the link (i.e., DEF_REGULAR is set). At
11097 this point we have not seen all the input files, so it is
11098 possible that DEF_REGULAR is not set now but will be set
11099 later (it is never cleared). We account for that
11100 possibility below by storing information in the
11101 relocs_copied field of the hash table entry. */
11102 if ((info->shared || htab->root.is_relocatable_executable)
11103 && (sec->flags & SEC_ALLOC) != 0
11104 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11105 || (h != NULL && ! h->needs_plt
11106 && (! info->symbolic || ! h->def_regular))))
11107 {
11108 struct elf32_arm_relocs_copied *p, **head;
11109
11110 /* When creating a shared object, we must copy these
11111 reloc types into the output file. We create a reloc
11112 section in dynobj and make room for this reloc. */
11113 if (sreloc == NULL)
11114 {
11115 sreloc = _bfd_elf_make_dynamic_reloc_section
11116 (sec, dynobj, 2, abfd, ! htab->use_rel);
11117
11118 if (sreloc == NULL)
11119 return FALSE;
11120
11121 /* BPABI objects never have dynamic relocations mapped. */
11122 if (htab->symbian_p)
11123 {
11124 flagword flags;
11125
11126 flags = bfd_get_section_flags (dynobj, sreloc);
11127 flags &= ~(SEC_LOAD | SEC_ALLOC);
11128 bfd_set_section_flags (dynobj, sreloc, flags);
11129 }
11130 }
11131
11132 /* If this is a global symbol, we count the number of
11133 relocations we need for this symbol. */
11134 if (h != NULL)
11135 {
11136 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11137 }
11138 else
11139 {
11140 /* Track dynamic relocs needed for local syms too.
11141 We really need local syms available to do this
11142 easily. Oh well. */
11143 asection *s;
11144 void *vpp;
11145 Elf_Internal_Sym *isym;
11146
11147 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11148 abfd, r_symndx);
11149 if (isym == NULL)
11150 return FALSE;
11151
11152 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11153 if (s == NULL)
11154 s = sec;
11155
11156 vpp = &elf_section_data (s)->local_dynrel;
11157 head = (struct elf32_arm_relocs_copied **) vpp;
11158 }
11159
11160 p = *head;
11161 if (p == NULL || p->section != sec)
11162 {
11163 bfd_size_type amt = sizeof *p;
11164
11165 p = (struct elf32_arm_relocs_copied *)
11166 bfd_alloc (htab->root.dynobj, amt);
11167 if (p == NULL)
11168 return FALSE;
11169 p->next = *head;
11170 *head = p;
11171 p->section = sec;
11172 p->count = 0;
11173 p->pc_count = 0;
11174 }
11175
11176 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11177 p->pc_count += 1;
11178 p->count += 1;
11179 }
11180 break;
11181
11182 /* This relocation describes the C++ object vtable hierarchy.
11183 Reconstruct it for later use during GC. */
11184 case R_ARM_GNU_VTINHERIT:
11185 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11186 return FALSE;
11187 break;
11188
11189 /* This relocation describes which C++ vtable entries are actually
11190 used. Record for later use during GC. */
11191 case R_ARM_GNU_VTENTRY:
11192 BFD_ASSERT (h != NULL);
11193 if (h != NULL
11194 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11195 return FALSE;
11196 break;
11197 }
11198 }
11199
11200 return TRUE;
11201}
11202
11203/* Unwinding tables are not referenced directly. This pass marks them as
11204 required if the corresponding code section is marked. */
11205
11206static bfd_boolean
11207elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11208 elf_gc_mark_hook_fn gc_mark_hook)
11209{
11210 bfd *sub;
11211 Elf_Internal_Shdr **elf_shdrp;
11212 bfd_boolean again;
11213
11214 /* Marking EH data may cause additional code sections to be marked,
11215 requiring multiple passes. */
11216 again = TRUE;
11217 while (again)
11218 {
11219 again = FALSE;
11220 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11221 {
11222 asection *o;
11223
11224 if (! is_arm_elf (sub))
11225 continue;
11226
11227 elf_shdrp = elf_elfsections (sub);
11228 for (o = sub->sections; o != NULL; o = o->next)
11229 {
11230 Elf_Internal_Shdr *hdr;
11231
11232 hdr = &elf_section_data (o)->this_hdr;
11233 if (hdr->sh_type == SHT_ARM_EXIDX
11234 && hdr->sh_link
11235 && hdr->sh_link < elf_numsections (sub)
11236 && !o->gc_mark
11237 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11238 {
11239 again = TRUE;
11240 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11241 return FALSE;
11242 }
11243 }
11244 }
11245 }
11246
11247 return TRUE;
11248}
11249
11250/* Treat mapping symbols as special target symbols. */
11251
11252static bfd_boolean
11253elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11254{
11255 return bfd_is_arm_special_symbol_name (sym->name,
11256 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11257}
11258
11259/* This is a copy of elf_find_function() from elf.c except that
11260 ARM mapping symbols are ignored when looking for function names
11261 and STT_ARM_TFUNC is considered to a function type. */
11262
11263static bfd_boolean
11264arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11265 asection * section,
11266 asymbol ** symbols,
11267 bfd_vma offset,
11268 const char ** filename_ptr,
11269 const char ** functionname_ptr)
11270{
11271 const char * filename = NULL;
11272 asymbol * func = NULL;
11273 bfd_vma low_func = 0;
11274 asymbol ** p;
11275
11276 for (p = symbols; *p != NULL; p++)
11277 {
11278 elf_symbol_type *q;
11279
11280 q = (elf_symbol_type *) *p;
11281
11282 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11283 {
11284 default:
11285 break;
11286 case STT_FILE:
11287 filename = bfd_asymbol_name (&q->symbol);
11288 break;
11289 case STT_FUNC:
11290 case STT_ARM_TFUNC:
11291 case STT_NOTYPE:
11292 /* Skip mapping symbols. */
11293 if ((q->symbol.flags & BSF_LOCAL)
11294 && bfd_is_arm_special_symbol_name (q->symbol.name,
11295 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11296 continue;
11297 /* Fall through. */
11298 if (bfd_get_section (&q->symbol) == section
11299 && q->symbol.value >= low_func
11300 && q->symbol.value <= offset)
11301 {
11302 func = (asymbol *) q;
11303 low_func = q->symbol.value;
11304 }
11305 break;
11306 }
11307 }
11308
11309 if (func == NULL)
11310 return FALSE;
11311
11312 if (filename_ptr)
11313 *filename_ptr = filename;
11314 if (functionname_ptr)
11315 *functionname_ptr = bfd_asymbol_name (func);
11316
11317 return TRUE;
11318}
11319
11320
11321/* Find the nearest line to a particular section and offset, for error
11322 reporting. This code is a duplicate of the code in elf.c, except
11323 that it uses arm_elf_find_function. */
11324
11325static bfd_boolean
11326elf32_arm_find_nearest_line (bfd * abfd,
11327 asection * section,
11328 asymbol ** symbols,
11329 bfd_vma offset,
11330 const char ** filename_ptr,
11331 const char ** functionname_ptr,
11332 unsigned int * line_ptr)
11333{
11334 bfd_boolean found = FALSE;
11335
11336 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11337
11338 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11339 filename_ptr, functionname_ptr,
11340 line_ptr, 0,
11341 & elf_tdata (abfd)->dwarf2_find_line_info))
11342 {
11343 if (!*functionname_ptr)
11344 arm_elf_find_function (abfd, section, symbols, offset,
11345 *filename_ptr ? NULL : filename_ptr,
11346 functionname_ptr);
11347
11348 return TRUE;
11349 }
11350
11351 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11352 & found, filename_ptr,
11353 functionname_ptr, line_ptr,
11354 & elf_tdata (abfd)->line_info))
11355 return FALSE;
11356
11357 if (found && (*functionname_ptr || *line_ptr))
11358 return TRUE;
11359
11360 if (symbols == NULL)
11361 return FALSE;
11362
11363 if (! arm_elf_find_function (abfd, section, symbols, offset,
11364 filename_ptr, functionname_ptr))
11365 return FALSE;
11366
11367 *line_ptr = 0;
11368 return TRUE;
11369}
11370
11371static bfd_boolean
11372elf32_arm_find_inliner_info (bfd * abfd,
11373 const char ** filename_ptr,
11374 const char ** functionname_ptr,
11375 unsigned int * line_ptr)
11376{
11377 bfd_boolean found;
11378 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11379 functionname_ptr, line_ptr,
11380 & elf_tdata (abfd)->dwarf2_find_line_info);
11381 return found;
11382}
11383
11384/* Adjust a symbol defined by a dynamic object and referenced by a
11385 regular object. The current definition is in some section of the
11386 dynamic object, but we're not including those sections. We have to
11387 change the definition to something the rest of the link can
11388 understand. */
11389
11390static bfd_boolean
11391elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11392 struct elf_link_hash_entry * h)
11393{
11394 bfd * dynobj;
11395 asection * s;
11396 struct elf32_arm_link_hash_entry * eh;
11397 struct elf32_arm_link_hash_table *globals;
11398
11399 globals = elf32_arm_hash_table (info);
11400 if (globals == NULL)
11401 return FALSE;
11402
11403 dynobj = elf_hash_table (info)->dynobj;
11404
11405 /* Make sure we know what is going on here. */
11406 BFD_ASSERT (dynobj != NULL
11407 && (h->needs_plt
11408 || h->u.weakdef != NULL
11409 || (h->def_dynamic
11410 && h->ref_regular
11411 && !h->def_regular)));
11412
11413 eh = (struct elf32_arm_link_hash_entry *) h;
11414
11415 /* If this is a function, put it in the procedure linkage table. We
11416 will fill in the contents of the procedure linkage table later,
11417 when we know the address of the .got section. */
11418 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11419 || h->needs_plt)
11420 {
11421 if (h->plt.refcount <= 0
11422 || SYMBOL_CALLS_LOCAL (info, h)
11423 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11424 && h->root.type == bfd_link_hash_undefweak))
11425 {
11426 /* This case can occur if we saw a PLT32 reloc in an input
11427 file, but the symbol was never referred to by a dynamic
11428 object, or if all references were garbage collected. In
11429 such a case, we don't actually need to build a procedure
11430 linkage table, and we can just do a PC24 reloc instead. */
11431 h->plt.offset = (bfd_vma) -1;
11432 eh->plt_thumb_refcount = 0;
11433 eh->plt_maybe_thumb_refcount = 0;
11434 h->needs_plt = 0;
11435 }
11436
11437 return TRUE;
11438 }
11439 else
11440 {
11441 /* It's possible that we incorrectly decided a .plt reloc was
11442 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11443 in check_relocs. We can't decide accurately between function
11444 and non-function syms in check-relocs; Objects loaded later in
11445 the link may change h->type. So fix it now. */
11446 h->plt.offset = (bfd_vma) -1;
11447 eh->plt_thumb_refcount = 0;
11448 eh->plt_maybe_thumb_refcount = 0;
11449 }
11450
11451 /* If this is a weak symbol, and there is a real definition, the
11452 processor independent code will have arranged for us to see the
11453 real definition first, and we can just use the same value. */
11454 if (h->u.weakdef != NULL)
11455 {
11456 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11457 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11458 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11459 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11460 return TRUE;
11461 }
11462
11463 /* If there are no non-GOT references, we do not need a copy
11464 relocation. */
11465 if (!h->non_got_ref)
11466 return TRUE;
11467
11468 /* This is a reference to a symbol defined by a dynamic object which
11469 is not a function. */
11470
11471 /* If we are creating a shared library, we must presume that the
11472 only references to the symbol are via the global offset table.
11473 For such cases we need not do anything here; the relocations will
11474 be handled correctly by relocate_section. Relocatable executables
11475 can reference data in shared objects directly, so we don't need to
11476 do anything here. */
11477 if (info->shared || globals->root.is_relocatable_executable)
11478 return TRUE;
11479
11480 if (h->size == 0)
11481 {
11482 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11483 h->root.root.string);
11484 return TRUE;
11485 }
11486
11487 /* We must allocate the symbol in our .dynbss section, which will
11488 become part of the .bss section of the executable. There will be
11489 an entry for this symbol in the .dynsym section. The dynamic
11490 object will contain position independent code, so all references
11491 from the dynamic object to this symbol will go through the global
11492 offset table. The dynamic linker will use the .dynsym entry to
11493 determine the address it must put in the global offset table, so
11494 both the dynamic object and the regular object will refer to the
11495 same memory location for the variable. */
11496 s = bfd_get_section_by_name (dynobj, ".dynbss");
11497 BFD_ASSERT (s != NULL);
11498
11499 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11500 copy the initial value out of the dynamic object and into the
11501 runtime process image. We need to remember the offset into the
11502 .rel(a).bss section we are going to use. */
11503 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11504 {
11505 asection *srel;
11506
11507 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11508 BFD_ASSERT (srel != NULL);
11509 srel->size += RELOC_SIZE (globals);
11510 h->needs_copy = 1;
11511 }
11512
11513 return _bfd_elf_adjust_dynamic_copy (h, s);
11514}
11515
11516/* Allocate space in .plt, .got and associated reloc sections for
11517 dynamic relocs. */
11518
11519static bfd_boolean
11520allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11521{
11522 struct bfd_link_info *info;
11523 struct elf32_arm_link_hash_table *htab;
11524 struct elf32_arm_link_hash_entry *eh;
11525 struct elf32_arm_relocs_copied *p;
11526 bfd_signed_vma thumb_refs;
11527
11528 eh = (struct elf32_arm_link_hash_entry *) h;
11529
11530 if (h->root.type == bfd_link_hash_indirect)
11531 return TRUE;
11532
11533 if (h->root.type == bfd_link_hash_warning)
11534 /* When warning symbols are created, they **replace** the "real"
11535 entry in the hash table, thus we never get to see the real
11536 symbol in a hash traversal. So look at it now. */
11537 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11538
11539 info = (struct bfd_link_info *) inf;
11540 htab = elf32_arm_hash_table (info);
11541 if (htab == NULL)
11542 return FALSE;
11543
11544 if (htab->root.dynamic_sections_created
11545 && h->plt.refcount > 0)
11546 {
11547 /* Make sure this symbol is output as a dynamic symbol.
11548 Undefined weak syms won't yet be marked as dynamic. */
11549 if (h->dynindx == -1
11550 && !h->forced_local)
11551 {
11552 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11553 return FALSE;
11554 }
11555
11556 if (info->shared
11557 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11558 {
11559 asection *s = htab->splt;
11560
11561 /* If this is the first .plt entry, make room for the special
11562 first entry. */
11563 if (s->size == 0)
11564 s->size += htab->plt_header_size;
11565
11566 h->plt.offset = s->size;
11567
11568 /* If we will insert a Thumb trampoline before this PLT, leave room
11569 for it. */
11570 thumb_refs = eh->plt_thumb_refcount;
11571 if (!htab->use_blx)
11572 thumb_refs += eh->plt_maybe_thumb_refcount;
11573
11574 if (thumb_refs > 0)
11575 {
11576 h->plt.offset += PLT_THUMB_STUB_SIZE;
11577 s->size += PLT_THUMB_STUB_SIZE;
11578 }
11579
11580 /* If this symbol is not defined in a regular file, and we are
11581 not generating a shared library, then set the symbol to this
11582 location in the .plt. This is required to make function
11583 pointers compare as equal between the normal executable and
11584 the shared library. */
11585 if (! info->shared
11586 && !h->def_regular)
11587 {
11588 h->root.u.def.section = s;
11589 h->root.u.def.value = h->plt.offset;
11590
11591 /* Make sure the function is not marked as Thumb, in case
11592 it is the target of an ABS32 relocation, which will
11593 point to the PLT entry. */
11594 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11595 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11596 }
11597
11598 /* Make room for this entry. */
11599 s->size += htab->plt_entry_size;
11600
11601 if (!htab->symbian_p)
11602 {
11603 /* We also need to make an entry in the .got.plt section, which
11604 will be placed in the .got section by the linker script. */
11605 eh->plt_got_offset = htab->sgotplt->size;
11606 htab->sgotplt->size += 4;
11607 }
11608
11609 /* We also need to make an entry in the .rel(a).plt section. */
11610 htab->srelplt->size += RELOC_SIZE (htab);
11611
11612 /* VxWorks executables have a second set of relocations for
11613 each PLT entry. They go in a separate relocation section,
11614 which is processed by the kernel loader. */
11615 if (htab->vxworks_p && !info->shared)
11616 {
11617 /* There is a relocation for the initial PLT entry:
11618 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11619 if (h->plt.offset == htab->plt_header_size)
11620 htab->srelplt2->size += RELOC_SIZE (htab);
11621
11622 /* There are two extra relocations for each subsequent
11623 PLT entry: an R_ARM_32 relocation for the GOT entry,
11624 and an R_ARM_32 relocation for the PLT entry. */
11625 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11626 }
11627 }
11628 else
11629 {
11630 h->plt.offset = (bfd_vma) -1;
11631 h->needs_plt = 0;
11632 }
11633 }
11634 else
11635 {
11636 h->plt.offset = (bfd_vma) -1;
11637 h->needs_plt = 0;
11638 }
11639
11640 if (h->got.refcount > 0)
11641 {
11642 asection *s;
11643 bfd_boolean dyn;
11644 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11645 int indx;
11646
11647 /* Make sure this symbol is output as a dynamic symbol.
11648 Undefined weak syms won't yet be marked as dynamic. */
11649 if (h->dynindx == -1
11650 && !h->forced_local)
11651 {
11652 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11653 return FALSE;
11654 }
11655
11656 if (!htab->symbian_p)
11657 {
11658 s = htab->sgot;
11659 h->got.offset = s->size;
11660
11661 if (tls_type == GOT_UNKNOWN)
11662 abort ();
11663
11664 if (tls_type == GOT_NORMAL)
11665 /* Non-TLS symbols need one GOT slot. */
11666 s->size += 4;
11667 else
11668 {
11669 if (tls_type & GOT_TLS_GD)
11670 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11671 s->size += 8;
11672 if (tls_type & GOT_TLS_IE)
11673 /* R_ARM_TLS_IE32 needs one GOT slot. */
11674 s->size += 4;
11675 }
11676
11677 dyn = htab->root.dynamic_sections_created;
11678
11679 indx = 0;
11680 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11681 && (!info->shared
11682 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11683 indx = h->dynindx;
11684
11685 if (tls_type != GOT_NORMAL
11686 && (info->shared || indx != 0)
11687 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11688 || h->root.type != bfd_link_hash_undefweak))
11689 {
11690 if (tls_type & GOT_TLS_IE)
11691 htab->srelgot->size += RELOC_SIZE (htab);
11692
11693 if (tls_type & GOT_TLS_GD)
11694 htab->srelgot->size += RELOC_SIZE (htab);
11695
11696 if ((tls_type & GOT_TLS_GD) && indx != 0)
11697 htab->srelgot->size += RELOC_SIZE (htab);
11698 }
11699 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11700 || h->root.type != bfd_link_hash_undefweak)
11701 && (info->shared
11702 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11703 htab->srelgot->size += RELOC_SIZE (htab);
11704 }
11705 }
11706 else
11707 h->got.offset = (bfd_vma) -1;
11708
11709 /* Allocate stubs for exported Thumb functions on v4t. */
11710 if (!htab->use_blx && h->dynindx != -1
11711 && h->def_regular
11712 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11713 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11714 {
11715 struct elf_link_hash_entry * th;
11716 struct bfd_link_hash_entry * bh;
11717 struct elf_link_hash_entry * myh;
11718 char name[1024];
11719 asection *s;
11720 bh = NULL;
11721 /* Create a new symbol to regist the real location of the function. */
11722 s = h->root.u.def.section;
11723 sprintf (name, "__real_%s", h->root.root.string);
11724 _bfd_generic_link_add_one_symbol (info, s->owner,
11725 name, BSF_GLOBAL, s,
11726 h->root.u.def.value,
11727 NULL, TRUE, FALSE, &bh);
11728
11729 myh = (struct elf_link_hash_entry *) bh;
11730 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11731 myh->forced_local = 1;
11732 eh->export_glue = myh;
11733 th = record_arm_to_thumb_glue (info, h);
11734 /* Point the symbol at the stub. */
11735 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11736 h->root.u.def.section = th->root.u.def.section;
11737 h->root.u.def.value = th->root.u.def.value & ~1;
11738 }
11739
11740 if (eh->relocs_copied == NULL)
11741 return TRUE;
11742
11743 /* In the shared -Bsymbolic case, discard space allocated for
11744 dynamic pc-relative relocs against symbols which turn out to be
11745 defined in regular objects. For the normal shared case, discard
11746 space for pc-relative relocs that have become local due to symbol
11747 visibility changes. */
11748
11749 if (info->shared || htab->root.is_relocatable_executable)
11750 {
11751 /* The only relocs that use pc_count are R_ARM_REL32 and
11752 R_ARM_REL32_NOI, which will appear on something like
11753 ".long foo - .". We want calls to protected symbols to resolve
11754 directly to the function rather than going via the plt. If people
11755 want function pointer comparisons to work as expected then they
11756 should avoid writing assembly like ".long foo - .". */
11757 if (SYMBOL_CALLS_LOCAL (info, h))
11758 {
11759 struct elf32_arm_relocs_copied **pp;
11760
11761 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11762 {
11763 p->count -= p->pc_count;
11764 p->pc_count = 0;
11765 if (p->count == 0)
11766 *pp = p->next;
11767 else
11768 pp = &p->next;
11769 }
11770 }
11771
11772 if (htab->vxworks_p)
11773 {
11774 struct elf32_arm_relocs_copied **pp;
11775
11776 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11777 {
11778 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11779 *pp = p->next;
11780 else
11781 pp = &p->next;
11782 }
11783 }
11784
11785 /* Also discard relocs on undefined weak syms with non-default
11786 visibility. */
11787 if (eh->relocs_copied != NULL
11788 && h->root.type == bfd_link_hash_undefweak)
11789 {
11790 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11791 eh->relocs_copied = NULL;
11792
11793 /* Make sure undefined weak symbols are output as a dynamic
11794 symbol in PIEs. */
11795 else if (h->dynindx == -1
11796 && !h->forced_local)
11797 {
11798 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11799 return FALSE;
11800 }
11801 }
11802
11803 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11804 && h->root.type == bfd_link_hash_new)
11805 {
11806 /* Output absolute symbols so that we can create relocations
11807 against them. For normal symbols we output a relocation
11808 against the section that contains them. */
11809 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11810 return FALSE;
11811 }
11812
11813 }
11814 else
11815 {
11816 /* For the non-shared case, discard space for relocs against
11817 symbols which turn out to need copy relocs or are not
11818 dynamic. */
11819
11820 if (!h->non_got_ref
11821 && ((h->def_dynamic
11822 && !h->def_regular)
11823 || (htab->root.dynamic_sections_created
11824 && (h->root.type == bfd_link_hash_undefweak
11825 || h->root.type == bfd_link_hash_undefined))))
11826 {
11827 /* Make sure this symbol is output as a dynamic symbol.
11828 Undefined weak syms won't yet be marked as dynamic. */
11829 if (h->dynindx == -1
11830 && !h->forced_local)
11831 {
11832 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11833 return FALSE;
11834 }
11835
11836 /* If that succeeded, we know we'll be keeping all the
11837 relocs. */
11838 if (h->dynindx != -1)
11839 goto keep;
11840 }
11841
11842 eh->relocs_copied = NULL;
11843
11844 keep: ;
11845 }
11846
11847 /* Finally, allocate space. */
11848 for (p = eh->relocs_copied; p != NULL; p = p->next)
11849 {
11850 asection *sreloc = elf_section_data (p->section)->sreloc;
11851 sreloc->size += p->count * RELOC_SIZE (htab);
11852 }
11853
11854 return TRUE;
11855}
11856
11857/* Find any dynamic relocs that apply to read-only sections. */
11858
11859static bfd_boolean
11860elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11861{
11862 struct elf32_arm_link_hash_entry * eh;
11863 struct elf32_arm_relocs_copied * p;
11864
11865 if (h->root.type == bfd_link_hash_warning)
11866 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11867
11868 eh = (struct elf32_arm_link_hash_entry *) h;
11869 for (p = eh->relocs_copied; p != NULL; p = p->next)
11870 {
11871 asection *s = p->section;
11872
11873 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11874 {
11875 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11876
11877 info->flags |= DF_TEXTREL;
11878
11879 /* Not an error, just cut short the traversal. */
11880 return FALSE;
11881 }
11882 }
11883 return TRUE;
11884}
11885
11886void
11887bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11888 int byteswap_code)
11889{
11890 struct elf32_arm_link_hash_table *globals;
11891
11892 globals = elf32_arm_hash_table (info);
11893 if (globals == NULL)
11894 return;
11895
11896 globals->byteswap_code = byteswap_code;
11897}
11898
11899/* Set the sizes of the dynamic sections. */
11900
11901static bfd_boolean
11902elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11903 struct bfd_link_info * info)
11904{
11905 bfd * dynobj;
11906 asection * s;
11907 bfd_boolean plt;
11908 bfd_boolean relocs;
11909 bfd *ibfd;
11910 struct elf32_arm_link_hash_table *htab;
11911
11912 htab = elf32_arm_hash_table (info);
11913 if (htab == NULL)
11914 return FALSE;
11915
11916 dynobj = elf_hash_table (info)->dynobj;
11917 BFD_ASSERT (dynobj != NULL);
11918 check_use_blx (htab);
11919
11920 if (elf_hash_table (info)->dynamic_sections_created)
11921 {
11922 /* Set the contents of the .interp section to the interpreter. */
11923 if (info->executable)
11924 {
11925 s = bfd_get_section_by_name (dynobj, ".interp");
11926 BFD_ASSERT (s != NULL);
11927 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11928 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11929 }
11930 }
11931
11932 /* Set up .got offsets for local syms, and space for local dynamic
11933 relocs. */
11934 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11935 {
11936 bfd_signed_vma *local_got;
11937 bfd_signed_vma *end_local_got;
11938 char *local_tls_type;
11939 bfd_size_type locsymcount;
11940 Elf_Internal_Shdr *symtab_hdr;
11941 asection *srel;
11942 bfd_boolean is_vxworks = htab->vxworks_p;
11943
11944 if (! is_arm_elf (ibfd))
11945 continue;
11946
11947 for (s = ibfd->sections; s != NULL; s = s->next)
11948 {
11949 struct elf32_arm_relocs_copied *p;
11950
11951 for (p = (struct elf32_arm_relocs_copied *)
11952 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11953 {
11954 if (!bfd_is_abs_section (p->section)
11955 && bfd_is_abs_section (p->section->output_section))
11956 {
11957 /* Input section has been discarded, either because
11958 it is a copy of a linkonce section or due to
11959 linker script /DISCARD/, so we'll be discarding
11960 the relocs too. */
11961 }
11962 else if (is_vxworks
11963 && strcmp (p->section->output_section->name,
11964 ".tls_vars") == 0)
11965 {
11966 /* Relocations in vxworks .tls_vars sections are
11967 handled specially by the loader. */
11968 }
11969 else if (p->count != 0)
11970 {
11971 srel = elf_section_data (p->section)->sreloc;
11972 srel->size += p->count * RELOC_SIZE (htab);
11973 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11974 info->flags |= DF_TEXTREL;
11975 }
11976 }
11977 }
11978
11979 local_got = elf_local_got_refcounts (ibfd);
11980 if (!local_got)
11981 continue;
11982
11983 symtab_hdr = & elf_symtab_hdr (ibfd);
11984 locsymcount = symtab_hdr->sh_info;
11985 end_local_got = local_got + locsymcount;
11986 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11987 s = htab->sgot;
11988 srel = htab->srelgot;
11989 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11990 {
11991 if (*local_got > 0)
11992 {
11993 *local_got = s->size;
11994 if (*local_tls_type & GOT_TLS_GD)
11995 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11996 s->size += 8;
11997 if (*local_tls_type & GOT_TLS_IE)
11998 s->size += 4;
11999 if (*local_tls_type == GOT_NORMAL)
12000 s->size += 4;
12001
12002 if (info->shared || *local_tls_type == GOT_TLS_GD)
12003 srel->size += RELOC_SIZE (htab);
12004 }
12005 else
12006 *local_got = (bfd_vma) -1;
12007 }
12008 }
12009
12010 if (htab->tls_ldm_got.refcount > 0)
12011 {
12012 /* Allocate two GOT entries and one dynamic relocation (if necessary)
12013 for R_ARM_TLS_LDM32 relocations. */
12014 htab->tls_ldm_got.offset = htab->sgot->size;
12015 htab->sgot->size += 8;
12016 if (info->shared)
12017 htab->srelgot->size += RELOC_SIZE (htab);
12018 }
12019 else
12020 htab->tls_ldm_got.offset = -1;
12021
12022 /* Allocate global sym .plt and .got entries, and space for global
12023 sym dynamic relocs. */
12024 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
12025
12026 /* Here we rummage through the found bfds to collect glue information. */
12027 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
12028 {
12029 if (! is_arm_elf (ibfd))
12030 continue;
12031
12032 /* Initialise mapping tables for code/data. */
12033 bfd_elf32_arm_init_maps (ibfd);
12034
12035 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
12036 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
12037 /* xgettext:c-format */
12038 _bfd_error_handler (_("Errors encountered processing file %s"),
12039 ibfd->filename);
12040 }
12041
12042 /* Allocate space for the glue sections now that we've sized them. */
12043 bfd_elf32_arm_allocate_interworking_sections (info);
12044
12045 /* The check_relocs and adjust_dynamic_symbol entry points have
12046 determined the sizes of the various dynamic sections. Allocate
12047 memory for them. */
12048 plt = FALSE;
12049 relocs = FALSE;
12050 for (s = dynobj->sections; s != NULL; s = s->next)
12051 {
12052 const char * name;
12053
12054 if ((s->flags & SEC_LINKER_CREATED) == 0)
12055 continue;
12056
12057 /* It's OK to base decisions on the section name, because none
12058 of the dynobj section names depend upon the input files. */
12059 name = bfd_get_section_name (dynobj, s);
12060
12061 if (strcmp (name, ".plt") == 0)
12062 {
12063 /* Remember whether there is a PLT. */
12064 plt = s->size != 0;
12065 }
12066 else if (CONST_STRNEQ (name, ".rel"))
12067 {
12068 if (s->size != 0)
12069 {
12070 /* Remember whether there are any reloc sections other
12071 than .rel(a).plt and .rela.plt.unloaded. */
12072 if (s != htab->srelplt && s != htab->srelplt2)
12073 relocs = TRUE;
12074
12075 /* We use the reloc_count field as a counter if we need
12076 to copy relocs into the output file. */
12077 s->reloc_count = 0;
12078 }
12079 }
12080 else if (! CONST_STRNEQ (name, ".got")
12081 && strcmp (name, ".dynbss") != 0)
12082 {
12083 /* It's not one of our sections, so don't allocate space. */
12084 continue;
12085 }
12086
12087 if (s->size == 0)
12088 {
12089 /* If we don't need this section, strip it from the
12090 output file. This is mostly to handle .rel(a).bss and
12091 .rel(a).plt. We must create both sections in
12092 create_dynamic_sections, because they must be created
12093 before the linker maps input sections to output
12094 sections. The linker does that before
12095 adjust_dynamic_symbol is called, and it is that
12096 function which decides whether anything needs to go
12097 into these sections. */
12098 s->flags |= SEC_EXCLUDE;
12099 continue;
12100 }
12101
12102 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12103 continue;
12104
12105 /* Allocate memory for the section contents. */
12106 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12107 if (s->contents == NULL)
12108 return FALSE;
12109 }
12110
12111 if (elf_hash_table (info)->dynamic_sections_created)
12112 {
12113 /* Add some entries to the .dynamic section. We fill in the
12114 values later, in elf32_arm_finish_dynamic_sections, but we
12115 must add the entries now so that we get the correct size for
12116 the .dynamic section. The DT_DEBUG entry is filled in by the
12117 dynamic linker and used by the debugger. */
12118#define add_dynamic_entry(TAG, VAL) \
12119 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12120
12121 if (info->executable)
12122 {
12123 if (!add_dynamic_entry (DT_DEBUG, 0))
12124 return FALSE;
12125 }
12126
12127 if (plt)
12128 {
12129 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12130 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12131 || !add_dynamic_entry (DT_PLTREL,
12132 htab->use_rel ? DT_REL : DT_RELA)
12133 || !add_dynamic_entry (DT_JMPREL, 0))
12134 return FALSE;
12135 }
12136
12137 if (relocs)
12138 {
12139 if (htab->use_rel)
12140 {
12141 if (!add_dynamic_entry (DT_REL, 0)
12142 || !add_dynamic_entry (DT_RELSZ, 0)
12143 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12144 return FALSE;
12145 }
12146 else
12147 {
12148 if (!add_dynamic_entry (DT_RELA, 0)
12149 || !add_dynamic_entry (DT_RELASZ, 0)
12150 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12151 return FALSE;
12152 }
12153 }
12154
12155 /* If any dynamic relocs apply to a read-only section,
12156 then we need a DT_TEXTREL entry. */
12157 if ((info->flags & DF_TEXTREL) == 0)
12158 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12159 info);
12160
12161 if ((info->flags & DF_TEXTREL) != 0)
12162 {
12163 if (!add_dynamic_entry (DT_TEXTREL, 0))
12164 return FALSE;
12165 }
12166 if (htab->vxworks_p
12167 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12168 return FALSE;
12169 }
12170#undef add_dynamic_entry
12171
12172 return TRUE;
12173}
12174
12175/* Finish up dynamic symbol handling. We set the contents of various
12176 dynamic sections here. */
12177
12178static bfd_boolean
12179elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12180 struct bfd_link_info * info,
12181 struct elf_link_hash_entry * h,
12182 Elf_Internal_Sym * sym)
12183{
12184 bfd * dynobj;
12185 struct elf32_arm_link_hash_table *htab;
12186 struct elf32_arm_link_hash_entry *eh;
12187
12188 dynobj = elf_hash_table (info)->dynobj;
12189 htab = elf32_arm_hash_table (info);
12190 if (htab == NULL)
12191 return FALSE;
12192
12193 eh = (struct elf32_arm_link_hash_entry *) h;
12194
12195 if (h->plt.offset != (bfd_vma) -1)
12196 {
12197 asection * splt;
12198 asection * srel;
12199 bfd_byte *loc;
12200 bfd_vma plt_index;
12201 Elf_Internal_Rela rel;
12202
12203 /* This symbol has an entry in the procedure linkage table. Set
12204 it up. */
12205
12206 BFD_ASSERT (h->dynindx != -1);
12207
12208 splt = bfd_get_section_by_name (dynobj, ".plt");
12209 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12210 BFD_ASSERT (splt != NULL && srel != NULL);
12211
12212 /* Fill in the entry in the procedure linkage table. */
12213 if (htab->symbian_p)
12214 {
12215 put_arm_insn (htab, output_bfd,
12216 elf32_arm_symbian_plt_entry[0],
12217 splt->contents + h->plt.offset);
12218 bfd_put_32 (output_bfd,
12219 elf32_arm_symbian_plt_entry[1],
12220 splt->contents + h->plt.offset + 4);
12221
12222 /* Fill in the entry in the .rel.plt section. */
12223 rel.r_offset = (splt->output_section->vma
12224 + splt->output_offset
12225 + h->plt.offset + 4);
12226 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12227
12228 /* Get the index in the procedure linkage table which
12229 corresponds to this symbol. This is the index of this symbol
12230 in all the symbols for which we are making plt entries. The
12231 first entry in the procedure linkage table is reserved. */
12232 plt_index = ((h->plt.offset - htab->plt_header_size)
12233 / htab->plt_entry_size);
12234 }
12235 else
12236 {
12237 bfd_vma got_offset, got_address, plt_address;
12238 bfd_vma got_displacement;
12239 asection * sgot;
12240 bfd_byte * ptr;
12241
12242 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12243 BFD_ASSERT (sgot != NULL);
12244
12245 /* Get the offset into the .got.plt table of the entry that
12246 corresponds to this function. */
12247 got_offset = eh->plt_got_offset;
12248
12249 /* Get the index in the procedure linkage table which
12250 corresponds to this symbol. This is the index of this symbol
12251 in all the symbols for which we are making plt entries. The
12252 first three entries in .got.plt are reserved; after that
12253 symbols appear in the same order as in .plt. */
12254 plt_index = (got_offset - 12) / 4;
12255
12256 /* Calculate the address of the GOT entry. */
12257 got_address = (sgot->output_section->vma
12258 + sgot->output_offset
12259 + got_offset);
12260
12261 /* ...and the address of the PLT entry. */
12262 plt_address = (splt->output_section->vma
12263 + splt->output_offset
12264 + h->plt.offset);
12265
12266 ptr = htab->splt->contents + h->plt.offset;
12267 if (htab->vxworks_p && info->shared)
12268 {
12269 unsigned int i;
12270 bfd_vma val;
12271
12272 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12273 {
12274 val = elf32_arm_vxworks_shared_plt_entry[i];
12275 if (i == 2)
12276 val |= got_address - sgot->output_section->vma;
12277 if (i == 5)
12278 val |= plt_index * RELOC_SIZE (htab);
12279 if (i == 2 || i == 5)
12280 bfd_put_32 (output_bfd, val, ptr);
12281 else
12282 put_arm_insn (htab, output_bfd, val, ptr);
12283 }
12284 }
12285 else if (htab->vxworks_p)
12286 {
12287 unsigned int i;
12288 bfd_vma val;
12289
12290 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12291 {
12292 val = elf32_arm_vxworks_exec_plt_entry[i];
12293 if (i == 2)
12294 val |= got_address;
12295 if (i == 4)
12296 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12297 if (i == 5)
12298 val |= plt_index * RELOC_SIZE (htab);
12299 if (i == 2 || i == 5)
12300 bfd_put_32 (output_bfd, val, ptr);
12301 else
12302 put_arm_insn (htab, output_bfd, val, ptr);
12303 }
12304
12305 loc = (htab->srelplt2->contents
12306 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12307
12308 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12309 referencing the GOT for this PLT entry. */
12310 rel.r_offset = plt_address + 8;
12311 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12312 rel.r_addend = got_offset;
12313 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12314 loc += RELOC_SIZE (htab);
12315
12316 /* Create the R_ARM_ABS32 relocation referencing the
12317 beginning of the PLT for this GOT entry. */
12318 rel.r_offset = got_address;
12319 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12320 rel.r_addend = 0;
12321 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12322 }
12323 else
12324 {
12325 bfd_signed_vma thumb_refs;
12326 /* Calculate the displacement between the PLT slot and the
12327 entry in the GOT. The eight-byte offset accounts for the
12328 value produced by adding to pc in the first instruction
12329 of the PLT stub. */
12330 got_displacement = got_address - (plt_address + 8);
12331
12332 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12333
12334 thumb_refs = eh->plt_thumb_refcount;
12335 if (!htab->use_blx)
12336 thumb_refs += eh->plt_maybe_thumb_refcount;
12337
12338 if (thumb_refs > 0)
12339 {
12340 put_thumb_insn (htab, output_bfd,
12341 elf32_arm_plt_thumb_stub[0], ptr - 4);
12342 put_thumb_insn (htab, output_bfd,
12343 elf32_arm_plt_thumb_stub[1], ptr - 2);
12344 }
12345
12346 put_arm_insn (htab, output_bfd,
12347 elf32_arm_plt_entry[0]
12348 | ((got_displacement & 0x0ff00000) >> 20),
12349 ptr + 0);
12350 put_arm_insn (htab, output_bfd,
12351 elf32_arm_plt_entry[1]
12352 | ((got_displacement & 0x000ff000) >> 12),
12353 ptr+ 4);
12354 put_arm_insn (htab, output_bfd,
12355 elf32_arm_plt_entry[2]
12356 | (got_displacement & 0x00000fff),
12357 ptr + 8);
12358#ifdef FOUR_WORD_PLT
12359 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12360#endif
12361 }
12362
12363 /* Fill in the entry in the global offset table. */
12364 bfd_put_32 (output_bfd,
12365 (splt->output_section->vma
12366 + splt->output_offset),
12367 sgot->contents + got_offset);
12368
12369 /* Fill in the entry in the .rel(a).plt section. */
12370 rel.r_addend = 0;
12371 rel.r_offset = got_address;
12372 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12373 }
12374
12375 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12376 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12377
12378 if (!h->def_regular)
12379 {
12380 /* Mark the symbol as undefined, rather than as defined in
12381 the .plt section. Leave the value alone. */
12382 sym->st_shndx = SHN_UNDEF;
12383 /* If the symbol is weak, we do need to clear the value.
12384 Otherwise, the PLT entry would provide a definition for
12385 the symbol even if the symbol wasn't defined anywhere,
12386 and so the symbol would never be NULL. */
12387 if (!h->ref_regular_nonweak)
12388 sym->st_value = 0;
12389 }
12390 }
12391
12392 if (h->got.offset != (bfd_vma) -1
12393 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12394 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12395 {
12396 asection * sgot;
12397 asection * srel;
12398 Elf_Internal_Rela rel;
12399 bfd_byte *loc;
12400 bfd_vma offset;
12401
12402 /* This symbol has an entry in the global offset table. Set it
12403 up. */
12404 sgot = bfd_get_section_by_name (dynobj, ".got");
12405 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12406 BFD_ASSERT (sgot != NULL && srel != NULL);
12407
12408 offset = (h->got.offset & ~(bfd_vma) 1);
12409 rel.r_addend = 0;
12410 rel.r_offset = (sgot->output_section->vma
12411 + sgot->output_offset
12412 + offset);
12413
12414 /* If this is a static link, or it is a -Bsymbolic link and the
12415 symbol is defined locally or was forced to be local because
12416 of a version file, we just want to emit a RELATIVE reloc.
12417 The entry in the global offset table will already have been
12418 initialized in the relocate_section function. */
12419 if (info->shared
12420 && SYMBOL_REFERENCES_LOCAL (info, h))
12421 {
12422 BFD_ASSERT ((h->got.offset & 1) != 0);
12423 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12424 if (!htab->use_rel)
12425 {
12426 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12427 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12428 }
12429 }
12430 else
12431 {
12432 BFD_ASSERT ((h->got.offset & 1) == 0);
12433 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12434 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12435 }
12436
12437 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12438 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12439 }
12440
12441 if (h->needs_copy)
12442 {
12443 asection * s;
12444 Elf_Internal_Rela rel;
12445 bfd_byte *loc;
12446
12447 /* This symbol needs a copy reloc. Set it up. */
12448 BFD_ASSERT (h->dynindx != -1
12449 && (h->root.type == bfd_link_hash_defined
12450 || h->root.type == bfd_link_hash_defweak));
12451
12452 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12453 RELOC_SECTION (htab, ".bss"));
12454 BFD_ASSERT (s != NULL);
12455
12456 rel.r_addend = 0;
12457 rel.r_offset = (h->root.u.def.value
12458 + h->root.u.def.section->output_section->vma
12459 + h->root.u.def.section->output_offset);
12460 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12461 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12462 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12463 }
12464
12465 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12466 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12467 to the ".got" section. */
12468 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12469 || (!htab->vxworks_p && h == htab->root.hgot))
12470 sym->st_shndx = SHN_ABS;
12471
12472 return TRUE;
12473}
12474
12475/* Finish up the dynamic sections. */
12476
12477static bfd_boolean
12478elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12479{
12480 bfd * dynobj;
12481 asection * sgot;
12482 asection * sdyn;
12483 struct elf32_arm_link_hash_table *htab;
12484
12485 htab = elf32_arm_hash_table (info);
12486 if (htab == NULL)
12487 return FALSE;
12488
12489 dynobj = elf_hash_table (info)->dynobj;
12490
12491 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12492 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12493 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12494
12495 if (elf_hash_table (info)->dynamic_sections_created)
12496 {
12497 asection *splt;
12498 Elf32_External_Dyn *dyncon, *dynconend;
12499
12500 splt = bfd_get_section_by_name (dynobj, ".plt");
12501 BFD_ASSERT (splt != NULL && sdyn != NULL);
12502
12503 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12504 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12505
12506 for (; dyncon < dynconend; dyncon++)
12507 {
12508 Elf_Internal_Dyn dyn;
12509 const char * name;
12510 asection * s;
12511
12512 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12513
12514 switch (dyn.d_tag)
12515 {
12516 unsigned int type;
12517
12518 default:
12519 if (htab->vxworks_p
12520 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12521 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12522 break;
12523
12524 case DT_HASH:
12525 name = ".hash";
12526 goto get_vma_if_bpabi;
12527 case DT_STRTAB:
12528 name = ".dynstr";
12529 goto get_vma_if_bpabi;
12530 case DT_SYMTAB:
12531 name = ".dynsym";
12532 goto get_vma_if_bpabi;
12533 case DT_VERSYM:
12534 name = ".gnu.version";
12535 goto get_vma_if_bpabi;
12536 case DT_VERDEF:
12537 name = ".gnu.version_d";
12538 goto get_vma_if_bpabi;
12539 case DT_VERNEED:
12540 name = ".gnu.version_r";
12541 goto get_vma_if_bpabi;
12542
12543 case DT_PLTGOT:
12544 name = ".got";
12545 goto get_vma;
12546 case DT_JMPREL:
12547 name = RELOC_SECTION (htab, ".plt");
12548 get_vma:
12549 s = bfd_get_section_by_name (output_bfd, name);
12550 BFD_ASSERT (s != NULL);
12551 if (!htab->symbian_p)
12552 dyn.d_un.d_ptr = s->vma;
12553 else
12554 /* In the BPABI, tags in the PT_DYNAMIC section point
12555 at the file offset, not the memory address, for the
12556 convenience of the post linker. */
12557 dyn.d_un.d_ptr = s->filepos;
12558 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12559 break;
12560
12561 get_vma_if_bpabi:
12562 if (htab->symbian_p)
12563 goto get_vma;
12564 break;
12565
12566 case DT_PLTRELSZ:
12567 s = bfd_get_section_by_name (output_bfd,
12568 RELOC_SECTION (htab, ".plt"));
12569 BFD_ASSERT (s != NULL);
12570 dyn.d_un.d_val = s->size;
12571 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12572 break;
12573
12574 case DT_RELSZ:
12575 case DT_RELASZ:
12576 if (!htab->symbian_p)
12577 {
12578 /* My reading of the SVR4 ABI indicates that the
12579 procedure linkage table relocs (DT_JMPREL) should be
12580 included in the overall relocs (DT_REL). This is
12581 what Solaris does. However, UnixWare can not handle
12582 that case. Therefore, we override the DT_RELSZ entry
12583 here to make it not include the JMPREL relocs. Since
12584 the linker script arranges for .rel(a).plt to follow all
12585 other relocation sections, we don't have to worry
12586 about changing the DT_REL entry. */
12587 s = bfd_get_section_by_name (output_bfd,
12588 RELOC_SECTION (htab, ".plt"));
12589 if (s != NULL)
12590 dyn.d_un.d_val -= s->size;
12591 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12592 break;
12593 }
12594 /* Fall through. */
12595
12596 case DT_REL:
12597 case DT_RELA:
12598 /* In the BPABI, the DT_REL tag must point at the file
12599 offset, not the VMA, of the first relocation
12600 section. So, we use code similar to that in
12601 elflink.c, but do not check for SHF_ALLOC on the
12602 relcoation section, since relocations sections are
12603 never allocated under the BPABI. The comments above
12604 about Unixware notwithstanding, we include all of the
12605 relocations here. */
12606 if (htab->symbian_p)
12607 {
12608 unsigned int i;
12609 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12610 ? SHT_REL : SHT_RELA);
12611 dyn.d_un.d_val = 0;
12612 for (i = 1; i < elf_numsections (output_bfd); i++)
12613 {
12614 Elf_Internal_Shdr *hdr
12615 = elf_elfsections (output_bfd)[i];
12616 if (hdr->sh_type == type)
12617 {
12618 if (dyn.d_tag == DT_RELSZ
12619 || dyn.d_tag == DT_RELASZ)
12620 dyn.d_un.d_val += hdr->sh_size;
12621 else if ((ufile_ptr) hdr->sh_offset
12622 <= dyn.d_un.d_val - 1)
12623 dyn.d_un.d_val = hdr->sh_offset;
12624 }
12625 }
12626 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12627 }
12628 break;
12629
12630 /* Set the bottom bit of DT_INIT/FINI if the
12631 corresponding function is Thumb. */
12632 case DT_INIT:
12633 name = info->init_function;
12634 goto get_sym;
12635 case DT_FINI:
12636 name = info->fini_function;
12637 get_sym:
12638 /* If it wasn't set by elf_bfd_final_link
12639 then there is nothing to adjust. */
12640 if (dyn.d_un.d_val != 0)
12641 {
12642 struct elf_link_hash_entry * eh;
12643
12644 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12645 FALSE, FALSE, TRUE);
12646 if (eh != NULL
12647 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12648 {
12649 dyn.d_un.d_val |= 1;
12650 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12651 }
12652 }
12653 break;
12654 }
12655 }
12656
12657 /* Fill in the first entry in the procedure linkage table. */
12658 if (splt->size > 0 && htab->plt_header_size)
12659 {
12660 const bfd_vma *plt0_entry;
12661 bfd_vma got_address, plt_address, got_displacement;
12662
12663 /* Calculate the addresses of the GOT and PLT. */
12664 got_address = sgot->output_section->vma + sgot->output_offset;
12665 plt_address = splt->output_section->vma + splt->output_offset;
12666
12667 if (htab->vxworks_p)
12668 {
12669 /* The VxWorks GOT is relocated by the dynamic linker.
12670 Therefore, we must emit relocations rather than simply
12671 computing the values now. */
12672 Elf_Internal_Rela rel;
12673
12674 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12675 put_arm_insn (htab, output_bfd, plt0_entry[0],
12676 splt->contents + 0);
12677 put_arm_insn (htab, output_bfd, plt0_entry[1],
12678 splt->contents + 4);
12679 put_arm_insn (htab, output_bfd, plt0_entry[2],
12680 splt->contents + 8);
12681 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12682
12683 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12684 rel.r_offset = plt_address + 12;
12685 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12686 rel.r_addend = 0;
12687 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12688 htab->srelplt2->contents);
12689 }
12690 else
12691 {
12692 got_displacement = got_address - (plt_address + 16);
12693
12694 plt0_entry = elf32_arm_plt0_entry;
12695 put_arm_insn (htab, output_bfd, plt0_entry[0],
12696 splt->contents + 0);
12697 put_arm_insn (htab, output_bfd, plt0_entry[1],
12698 splt->contents + 4);
12699 put_arm_insn (htab, output_bfd, plt0_entry[2],
12700 splt->contents + 8);
12701 put_arm_insn (htab, output_bfd, plt0_entry[3],
12702 splt->contents + 12);
12703
12704#ifdef FOUR_WORD_PLT
12705 /* The displacement value goes in the otherwise-unused
12706 last word of the second entry. */
12707 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12708#else
12709 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12710#endif
12711 }
12712 }
12713
12714 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12715 really seem like the right value. */
12716 if (splt->output_section->owner == output_bfd)
12717 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12718
12719 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12720 {
12721 /* Correct the .rel(a).plt.unloaded relocations. They will have
12722 incorrect symbol indexes. */
12723 int num_plts;
12724 unsigned char *p;
12725
12726 num_plts = ((htab->splt->size - htab->plt_header_size)
12727 / htab->plt_entry_size);
12728 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12729
12730 for (; num_plts; num_plts--)
12731 {
12732 Elf_Internal_Rela rel;
12733
12734 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12735 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12736 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12737 p += RELOC_SIZE (htab);
12738
12739 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12740 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12741 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12742 p += RELOC_SIZE (htab);
12743 }
12744 }
12745 }
12746
12747 /* Fill in the first three entries in the global offset table. */
12748 if (sgot)
12749 {
12750 if (sgot->size > 0)
12751 {
12752 if (sdyn == NULL)
12753 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12754 else
12755 bfd_put_32 (output_bfd,
12756 sdyn->output_section->vma + sdyn->output_offset,
12757 sgot->contents);
12758 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12759 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12760 }
12761
12762 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12763 }
12764
12765 return TRUE;
12766}
12767
12768static void
12769elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12770{
12771 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12772 struct elf32_arm_link_hash_table *globals;
12773
12774 i_ehdrp = elf_elfheader (abfd);
12775
12776 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12777 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12778 else
12779 i_ehdrp->e_ident[EI_OSABI] = 0;
12780 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12781
12782 if (link_info)
12783 {
12784 globals = elf32_arm_hash_table (link_info);
12785 if (globals != NULL && globals->byteswap_code)
12786 i_ehdrp->e_flags |= EF_ARM_BE8;
12787 }
12788}
12789
12790static enum elf_reloc_type_class
12791elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12792{
12793 switch ((int) ELF32_R_TYPE (rela->r_info))
12794 {
12795 case R_ARM_RELATIVE:
12796 return reloc_class_relative;
12797 case R_ARM_JUMP_SLOT:
12798 return reloc_class_plt;
12799 case R_ARM_COPY:
12800 return reloc_class_copy;
12801 default:
12802 return reloc_class_normal;
12803 }
12804}
12805
12806/* Set the right machine number for an Arm ELF file. */
12807
12808static bfd_boolean
12809elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12810{
12811 if (hdr->sh_type == SHT_NOTE)
12812 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12813
12814 return TRUE;
12815}
12816
12817static void
12818elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12819{
12820 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12821}
12822
12823/* Return TRUE if this is an unwinding table entry. */
12824
12825static bfd_boolean
12826is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12827{
12828 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12829 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12830}
12831
12832
12833/* Set the type and flags for an ARM section. We do this by
12834 the section name, which is a hack, but ought to work. */
12835
12836static bfd_boolean
12837elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12838{
12839 const char * name;
12840
12841 name = bfd_get_section_name (abfd, sec);
12842
12843 if (is_arm_elf_unwind_section_name (abfd, name))
12844 {
12845 hdr->sh_type = SHT_ARM_EXIDX;
12846 hdr->sh_flags |= SHF_LINK_ORDER;
12847 }
12848 return TRUE;
12849}
12850
12851/* Handle an ARM specific section when reading an object file. This is
12852 called when bfd_section_from_shdr finds a section with an unknown
12853 type. */
12854
12855static bfd_boolean
12856elf32_arm_section_from_shdr (bfd *abfd,
12857 Elf_Internal_Shdr * hdr,
12858 const char *name,
12859 int shindex)
12860{
12861 /* There ought to be a place to keep ELF backend specific flags, but
12862 at the moment there isn't one. We just keep track of the
12863 sections by their name, instead. Fortunately, the ABI gives
12864 names for all the ARM specific sections, so we will probably get
12865 away with this. */
12866 switch (hdr->sh_type)
12867 {
12868 case SHT_ARM_EXIDX:
12869 case SHT_ARM_PREEMPTMAP:
12870 case SHT_ARM_ATTRIBUTES:
12871 break;
12872
12873 default:
12874 return FALSE;
12875 }
12876
12877 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12878 return FALSE;
12879
12880 return TRUE;
12881}
12882
12883static _arm_elf_section_data *
12884get_arm_elf_section_data (asection * sec)
12885{
12886 if (sec && sec->owner && is_arm_elf (sec->owner))
12887 return elf32_arm_section_data (sec);
12888 else
12889 return NULL;
12890}
12891
12892typedef struct
12893{
12894 void *finfo;
12895 struct bfd_link_info *info;
12896 asection *sec;
12897 int sec_shndx;
12898 int (*func) (void *, const char *, Elf_Internal_Sym *,
12899 asection *, struct elf_link_hash_entry *);
12900} output_arch_syminfo;
12901
12902enum map_symbol_type
12903{
12904 ARM_MAP_ARM,
12905 ARM_MAP_THUMB,
12906 ARM_MAP_DATA
12907};
12908
12909
12910/* Output a single mapping symbol. */
12911
12912static bfd_boolean
12913elf32_arm_output_map_sym (output_arch_syminfo *osi,
12914 enum map_symbol_type type,
12915 bfd_vma offset)
12916{
12917 static const char *names[3] = {"$a", "$t", "$d"};
12918 Elf_Internal_Sym sym;
12919
12920 sym.st_value = osi->sec->output_section->vma
12921 + osi->sec->output_offset
12922 + offset;
12923 sym.st_size = 0;
12924 sym.st_other = 0;
12925 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12926 sym.st_shndx = osi->sec_shndx;
12927 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12928 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12929}
12930
12931
12932/* Output mapping symbols for PLT entries associated with H. */
12933
12934static bfd_boolean
12935elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12936{
12937 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12938 struct elf32_arm_link_hash_table *htab;
12939 struct elf32_arm_link_hash_entry *eh;
12940 bfd_vma addr;
12941
12942 if (h->root.type == bfd_link_hash_indirect)
12943 return TRUE;
12944
12945 if (h->root.type == bfd_link_hash_warning)
12946 /* When warning symbols are created, they **replace** the "real"
12947 entry in the hash table, thus we never get to see the real
12948 symbol in a hash traversal. So look at it now. */
12949 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12950
12951 if (h->plt.offset == (bfd_vma) -1)
12952 return TRUE;
12953
12954 htab = elf32_arm_hash_table (osi->info);
12955 if (htab == NULL)
12956 return FALSE;
12957
12958 eh = (struct elf32_arm_link_hash_entry *) h;
12959 addr = h->plt.offset;
12960 if (htab->symbian_p)
12961 {
12962 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12963 return FALSE;
12964 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12965 return FALSE;
12966 }
12967 else if (htab->vxworks_p)
12968 {
12969 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12970 return FALSE;
12971 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12972 return FALSE;
12973 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12974 return FALSE;
12975 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12976 return FALSE;
12977 }
12978 else
12979 {
12980 bfd_signed_vma thumb_refs;
12981
12982 thumb_refs = eh->plt_thumb_refcount;
12983 if (!htab->use_blx)
12984 thumb_refs += eh->plt_maybe_thumb_refcount;
12985
12986 if (thumb_refs > 0)
12987 {
12988 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12989 return FALSE;
12990 }
12991#ifdef FOUR_WORD_PLT
12992 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12993 return FALSE;
12994 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12995 return FALSE;
12996#else
12997 /* A three-word PLT with no Thumb thunk contains only Arm code,
12998 so only need to output a mapping symbol for the first PLT entry and
12999 entries with thumb thunks. */
13000 if (thumb_refs > 0 || addr == 20)
13001 {
13002 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
13003 return FALSE;
13004 }
13005#endif
13006 }
13007
13008 return TRUE;
13009}
13010
13011/* Output a single local symbol for a generated stub. */
13012
13013static bfd_boolean
13014elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
13015 bfd_vma offset, bfd_vma size)
13016{
13017 Elf_Internal_Sym sym;
13018
13019 sym.st_value = osi->sec->output_section->vma
13020 + osi->sec->output_offset
13021 + offset;
13022 sym.st_size = size;
13023 sym.st_other = 0;
13024 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13025 sym.st_shndx = osi->sec_shndx;
13026 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
13027}
13028
13029static bfd_boolean
13030arm_map_one_stub (struct bfd_hash_entry * gen_entry,
13031 void * in_arg)
13032{
13033 struct elf32_arm_stub_hash_entry *stub_entry;
13034 asection *stub_sec;
13035 bfd_vma addr;
13036 char *stub_name;
13037 output_arch_syminfo *osi;
13038 const insn_sequence *template_sequence;
13039 enum stub_insn_type prev_type;
13040 int size;
13041 int i;
13042 enum map_symbol_type sym_type;
13043
13044 /* Massage our args to the form they really have. */
13045 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13046 osi = (output_arch_syminfo *) in_arg;
13047
13048 stub_sec = stub_entry->stub_sec;
13049
13050 /* Ensure this stub is attached to the current section being
13051 processed. */
13052 if (stub_sec != osi->sec)
13053 return TRUE;
13054
13055 addr = (bfd_vma) stub_entry->stub_offset;
13056 stub_name = stub_entry->output_name;
13057
13058 template_sequence = stub_entry->stub_template;
13059 switch (template_sequence[0].type)
13060 {
13061 case ARM_TYPE:
13062 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
13063 return FALSE;
13064 break;
13065 case THUMB16_TYPE:
13066 case THUMB32_TYPE:
13067 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
13068 stub_entry->stub_size))
13069 return FALSE;
13070 break;
13071 default:
13072 BFD_FAIL ();
13073 return 0;
13074 }
13075
13076 prev_type = DATA_TYPE;
13077 size = 0;
13078 for (i = 0; i < stub_entry->stub_template_size; i++)
13079 {
13080 switch (template_sequence[i].type)
13081 {
13082 case ARM_TYPE:
13083 sym_type = ARM_MAP_ARM;
13084 break;
13085
13086 case THUMB16_TYPE:
13087 case THUMB32_TYPE:
13088 sym_type = ARM_MAP_THUMB;
13089 break;
13090
13091 case DATA_TYPE:
13092 sym_type = ARM_MAP_DATA;
13093 break;
13094
13095 default:
13096 BFD_FAIL ();
13097 return FALSE;
13098 }
13099
13100 if (template_sequence[i].type != prev_type)
13101 {
13102 prev_type = template_sequence[i].type;
13103 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13104 return FALSE;
13105 }
13106
13107 switch (template_sequence[i].type)
13108 {
13109 case ARM_TYPE:
13110 case THUMB32_TYPE:
13111 size += 4;
13112 break;
13113
13114 case THUMB16_TYPE:
13115 size += 2;
13116 break;
13117
13118 case DATA_TYPE:
13119 size += 4;
13120 break;
13121
13122 default:
13123 BFD_FAIL ();
13124 return FALSE;
13125 }
13126 }
13127
13128 return TRUE;
13129}
13130
13131/* Output mapping symbols for linker generated sections,
13132 and for those data-only sections that do not have a
13133 $d. */
13134
13135static bfd_boolean
13136elf32_arm_output_arch_local_syms (bfd *output_bfd,
13137 struct bfd_link_info *info,
13138 void *finfo,
13139 int (*func) (void *, const char *,
13140 Elf_Internal_Sym *,
13141 asection *,
13142 struct elf_link_hash_entry *))
13143{
13144 output_arch_syminfo osi;
13145 struct elf32_arm_link_hash_table *htab;
13146 bfd_vma offset;
13147 bfd_size_type size;
13148 bfd *input_bfd;
13149
13150 htab = elf32_arm_hash_table (info);
13151 if (htab == NULL)
13152 return FALSE;
13153
13154 check_use_blx (htab);
13155
13156 osi.finfo = finfo;
13157 osi.info = info;
13158 osi.func = func;
13159
13160 /* Add a $d mapping symbol to data-only sections that
13161 don't have any mapping symbol. This may result in (harmless) redundant
13162 mapping symbols. */
13163 for (input_bfd = info->input_bfds;
13164 input_bfd != NULL;
13165 input_bfd = input_bfd->link_next)
13166 {
13167 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13168 for (osi.sec = input_bfd->sections;
13169 osi.sec != NULL;
13170 osi.sec = osi.sec->next)
13171 {
13172 if (osi.sec->output_section != NULL
13173 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13174 != 0)
13175 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13176 == SEC_HAS_CONTENTS
13177 && get_arm_elf_section_data (osi.sec) != NULL
13178 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13179 && osi.sec->size > 0)
13180 {
13181 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13182 (output_bfd, osi.sec->output_section);
13183 if (osi.sec_shndx != (int)SHN_BAD)
13184 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13185 }
13186 }
13187 }
13188
13189 /* ARM->Thumb glue. */
13190 if (htab->arm_glue_size > 0)
13191 {
13192 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13193 ARM2THUMB_GLUE_SECTION_NAME);
13194
13195 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13196 (output_bfd, osi.sec->output_section);
13197 if (info->shared || htab->root.is_relocatable_executable
13198 || htab->pic_veneer)
13199 size = ARM2THUMB_PIC_GLUE_SIZE;
13200 else if (htab->use_blx)
13201 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13202 else
13203 size = ARM2THUMB_STATIC_GLUE_SIZE;
13204
13205 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13206 {
13207 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13208 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13209 }
13210 }
13211
13212 /* Thumb->ARM glue. */
13213 if (htab->thumb_glue_size > 0)
13214 {
13215 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13216 THUMB2ARM_GLUE_SECTION_NAME);
13217
13218 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13219 (output_bfd, osi.sec->output_section);
13220 size = THUMB2ARM_GLUE_SIZE;
13221
13222 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13223 {
13224 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13225 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13226 }
13227 }
13228
13229 /* ARMv4 BX veneers. */
13230 if (htab->bx_glue_size > 0)
13231 {
13232 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13233 ARM_BX_GLUE_SECTION_NAME);
13234
13235 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13236 (output_bfd, osi.sec->output_section);
13237
13238 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13239 }
13240
13241 /* Long calls stubs. */
13242 if (htab->stub_bfd && htab->stub_bfd->sections)
13243 {
13244 asection* stub_sec;
13245
13246 for (stub_sec = htab->stub_bfd->sections;
13247 stub_sec != NULL;
13248 stub_sec = stub_sec->next)
13249 {
13250 /* Ignore non-stub sections. */
13251 if (!strstr (stub_sec->name, STUB_SUFFIX))
13252 continue;
13253
13254 osi.sec = stub_sec;
13255
13256 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13257 (output_bfd, osi.sec->output_section);
13258
13259 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13260 }
13261 }
13262
13263 /* Finally, output mapping symbols for the PLT. */
13264 if (!htab->splt || htab->splt->size == 0)
13265 return TRUE;
13266
13267 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13268 htab->splt->output_section);
13269 osi.sec = htab->splt;
13270 /* Output mapping symbols for the plt header. SymbianOS does not have a
13271 plt header. */
13272 if (htab->vxworks_p)
13273 {
13274 /* VxWorks shared libraries have no PLT header. */
13275 if (!info->shared)
13276 {
13277 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13278 return FALSE;
13279 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13280 return FALSE;
13281 }
13282 }
13283 else if (!htab->symbian_p)
13284 {
13285 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13286 return FALSE;
13287#ifndef FOUR_WORD_PLT
13288 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13289 return FALSE;
13290#endif
13291 }
13292
13293 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13294 return TRUE;
13295}
13296
13297/* Allocate target specific section data. */
13298
13299static bfd_boolean
13300elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13301{
13302 if (!sec->used_by_bfd)
13303 {
13304 _arm_elf_section_data *sdata;
13305 bfd_size_type amt = sizeof (*sdata);
13306
13307 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13308 if (sdata == NULL)
13309 return FALSE;
13310 sec->used_by_bfd = sdata;
13311 }
13312
13313 return _bfd_elf_new_section_hook (abfd, sec);
13314}
13315
13316
13317/* Used to order a list of mapping symbols by address. */
13318
13319static int
13320elf32_arm_compare_mapping (const void * a, const void * b)
13321{
13322 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13323 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13324
13325 if (amap->vma > bmap->vma)
13326 return 1;
13327 else if (amap->vma < bmap->vma)
13328 return -1;
13329 else if (amap->type > bmap->type)
13330 /* Ensure results do not depend on the host qsort for objects with
13331 multiple mapping symbols at the same address by sorting on type
13332 after vma. */
13333 return 1;
13334 else if (amap->type < bmap->type)
13335 return -1;
13336 else
13337 return 0;
13338}
13339
13340/* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13341
13342static unsigned long
13343offset_prel31 (unsigned long addr, bfd_vma offset)
13344{
13345 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13346}
13347
13348/* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13349 relocations. */
13350
13351static void
13352copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13353{
13354 unsigned long first_word = bfd_get_32 (output_bfd, from);
13355 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13356
13357 /* High bit of first word is supposed to be zero. */
13358 if ((first_word & 0x80000000ul) == 0)
13359 first_word = offset_prel31 (first_word, offset);
13360
13361 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13362 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13363 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13364 second_word = offset_prel31 (second_word, offset);
13365
13366 bfd_put_32 (output_bfd, first_word, to);
13367 bfd_put_32 (output_bfd, second_word, to + 4);
13368}
13369
13370/* Data for make_branch_to_a8_stub(). */
13371
13372struct a8_branch_to_stub_data {
13373 asection *writing_section;
13374 bfd_byte *contents;
13375};
13376
13377
13378/* Helper to insert branches to Cortex-A8 erratum stubs in the right
13379 places for a particular section. */
13380
13381static bfd_boolean
13382make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13383 void *in_arg)
13384{
13385 struct elf32_arm_stub_hash_entry *stub_entry;
13386 struct a8_branch_to_stub_data *data;
13387 bfd_byte *contents;
13388 unsigned long branch_insn;
13389 bfd_vma veneered_insn_loc, veneer_entry_loc;
13390 bfd_signed_vma branch_offset;
13391 bfd *abfd;
13392 unsigned int target;
13393
13394 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13395 data = (struct a8_branch_to_stub_data *) in_arg;
13396
13397 if (stub_entry->target_section != data->writing_section
13398 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
13399 return TRUE;
13400
13401 contents = data->contents;
13402
13403 veneered_insn_loc = stub_entry->target_section->output_section->vma
13404 + stub_entry->target_section->output_offset
13405 + stub_entry->target_value;
13406
13407 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13408 + stub_entry->stub_sec->output_offset
13409 + stub_entry->stub_offset;
13410
13411 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13412 veneered_insn_loc &= ~3u;
13413
13414 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13415
13416 abfd = stub_entry->target_section->owner;
13417 target = stub_entry->target_value;
13418
13419 /* We attempt to avoid this condition by setting stubs_always_after_branch
13420 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13421 This check is just to be on the safe side... */
13422 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13423 {
13424 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13425 "allocated in unsafe location"), abfd);
13426 return FALSE;
13427 }
13428
13429 switch (stub_entry->stub_type)
13430 {
13431 case arm_stub_a8_veneer_b:
13432 case arm_stub_a8_veneer_b_cond:
13433 branch_insn = 0xf0009000;
13434 goto jump24;
13435
13436 case arm_stub_a8_veneer_blx:
13437 branch_insn = 0xf000e800;
13438 goto jump24;
13439
13440 case arm_stub_a8_veneer_bl:
13441 {
13442 unsigned int i1, j1, i2, j2, s;
13443
13444 branch_insn = 0xf000d000;
13445
13446 jump24:
13447 if (branch_offset < -16777216 || branch_offset > 16777214)
13448 {
13449 /* There's not much we can do apart from complain if this
13450 happens. */
13451 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13452 "of range (input file too large)"), abfd);
13453 return FALSE;
13454 }
13455
13456 /* i1 = not(j1 eor s), so:
13457 not i1 = j1 eor s
13458 j1 = (not i1) eor s. */
13459
13460 branch_insn |= (branch_offset >> 1) & 0x7ff;
13461 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13462 i2 = (branch_offset >> 22) & 1;
13463 i1 = (branch_offset >> 23) & 1;
13464 s = (branch_offset >> 24) & 1;
13465 j1 = (!i1) ^ s;
13466 j2 = (!i2) ^ s;
13467 branch_insn |= j2 << 11;
13468 branch_insn |= j1 << 13;
13469 branch_insn |= s << 26;
13470 }
13471 break;
13472
13473 default:
13474 BFD_FAIL ();
13475 return FALSE;
13476 }
13477
13478 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13479 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13480
13481 return TRUE;
13482}
13483
13484/* Do code byteswapping. Return FALSE afterwards so that the section is
13485 written out as normal. */
13486
13487static bfd_boolean
13488elf32_arm_write_section (bfd *output_bfd,
13489 struct bfd_link_info *link_info,
13490 asection *sec,
13491 bfd_byte *contents)
13492{
13493 unsigned int mapcount, errcount;
13494 _arm_elf_section_data *arm_data;
13495 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13496 elf32_arm_section_map *map;
13497 elf32_vfp11_erratum_list *errnode;
13498 bfd_vma ptr;
13499 bfd_vma end;
13500 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13501 bfd_byte tmp;
13502 unsigned int i;
13503
13504 if (globals == NULL)
13505 return FALSE;
13506
13507 /* If this section has not been allocated an _arm_elf_section_data
13508 structure then we cannot record anything. */
13509 arm_data = get_arm_elf_section_data (sec);
13510 if (arm_data == NULL)
13511 return FALSE;
13512
13513 mapcount = arm_data->mapcount;
13514 map = arm_data->map;
13515 errcount = arm_data->erratumcount;
13516
13517 if (errcount != 0)
13518 {
13519 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13520
13521 for (errnode = arm_data->erratumlist; errnode != 0;
13522 errnode = errnode->next)
13523 {
13524 bfd_vma target = errnode->vma - offset;
13525
13526 switch (errnode->type)
13527 {
13528 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13529 {
13530 bfd_vma branch_to_veneer;
13531 /* Original condition code of instruction, plus bit mask for
13532 ARM B instruction. */
13533 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13534 | 0x0a000000;
13535
13536 /* The instruction is before the label. */
13537 target -= 4;
13538
13539 /* Above offset included in -4 below. */
13540 branch_to_veneer = errnode->u.b.veneer->vma
13541 - errnode->vma - 4;
13542
13543 if ((signed) branch_to_veneer < -(1 << 25)
13544 || (signed) branch_to_veneer >= (1 << 25))
13545 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13546 "range"), output_bfd);
13547
13548 insn |= (branch_to_veneer >> 2) & 0xffffff;
13549 contents[endianflip ^ target] = insn & 0xff;
13550 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13551 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13552 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13553 }
13554 break;
13555
13556 case VFP11_ERRATUM_ARM_VENEER:
13557 {
13558 bfd_vma branch_from_veneer;
13559 unsigned int insn;
13560
13561 /* Take size of veneer into account. */
13562 branch_from_veneer = errnode->u.v.branch->vma
13563 - errnode->vma - 12;
13564
13565 if ((signed) branch_from_veneer < -(1 << 25)
13566 || (signed) branch_from_veneer >= (1 << 25))
13567 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13568 "range"), output_bfd);
13569
13570 /* Original instruction. */
13571 insn = errnode->u.v.branch->u.b.vfp_insn;
13572 contents[endianflip ^ target] = insn & 0xff;
13573 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13574 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13575 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13576
13577 /* Branch back to insn after original insn. */
13578 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13579 contents[endianflip ^ (target + 4)] = insn & 0xff;
13580 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13581 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13582 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13583 }
13584 break;
13585
13586 default:
13587 abort ();
13588 }
13589 }
13590 }
13591
13592 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13593 {
13594 arm_unwind_table_edit *edit_node
13595 = arm_data->u.exidx.unwind_edit_list;
13596 /* Now, sec->size is the size of the section we will write. The original
13597 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13598 markers) was sec->rawsize. (This isn't the case if we perform no
13599 edits, then rawsize will be zero and we should use size). */
13600 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13601 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13602 unsigned int in_index, out_index;
13603 bfd_vma add_to_offsets = 0;
13604
13605 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13606 {
13607 if (edit_node)
13608 {
13609 unsigned int edit_index = edit_node->index;
13610
13611 if (in_index < edit_index && in_index * 8 < input_size)
13612 {
13613 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13614 contents + in_index * 8, add_to_offsets);
13615 out_index++;
13616 in_index++;
13617 }
13618 else if (in_index == edit_index
13619 || (in_index * 8 >= input_size
13620 && edit_index == UINT_MAX))
13621 {
13622 switch (edit_node->type)
13623 {
13624 case DELETE_EXIDX_ENTRY:
13625 in_index++;
13626 add_to_offsets += 8;
13627 break;
13628
13629 case INSERT_EXIDX_CANTUNWIND_AT_END:
13630 {
13631 asection *text_sec = edit_node->linked_section;
13632 bfd_vma text_offset = text_sec->output_section->vma
13633 + text_sec->output_offset
13634 + text_sec->size;
13635 bfd_vma exidx_offset = offset + out_index * 8;
13636 unsigned long prel31_offset;
13637
13638 /* Note: this is meant to be equivalent to an
13639 R_ARM_PREL31 relocation. These synthetic
13640 EXIDX_CANTUNWIND markers are not relocated by the
13641 usual BFD method. */
13642 prel31_offset = (text_offset - exidx_offset)
13643 & 0x7ffffffful;
13644
13645 /* First address we can't unwind. */
13646 bfd_put_32 (output_bfd, prel31_offset,
13647 &edited_contents[out_index * 8]);
13648
13649 /* Code for EXIDX_CANTUNWIND. */
13650 bfd_put_32 (output_bfd, 0x1,
13651 &edited_contents[out_index * 8 + 4]);
13652
13653 out_index++;
13654 add_to_offsets -= 8;
13655 }
13656 break;
13657 }
13658
13659 edit_node = edit_node->next;
13660 }
13661 }
13662 else
13663 {
13664 /* No more edits, copy remaining entries verbatim. */
13665 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13666 contents + in_index * 8, add_to_offsets);
13667 out_index++;
13668 in_index++;
13669 }
13670 }
13671
13672 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13673 bfd_set_section_contents (output_bfd, sec->output_section,
13674 edited_contents,
13675 (file_ptr) sec->output_offset, sec->size);
13676
13677 return TRUE;
13678 }
13679
13680 /* Fix code to point to Cortex-A8 erratum stubs. */
13681 if (globals->fix_cortex_a8)
13682 {
13683 struct a8_branch_to_stub_data data;
13684
13685 data.writing_section = sec;
13686 data.contents = contents;
13687
13688 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13689 &data);
13690 }
13691
13692 if (mapcount == 0)
13693 return FALSE;
13694
13695 if (globals->byteswap_code)
13696 {
13697 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13698
13699 ptr = map[0].vma;
13700 for (i = 0; i < mapcount; i++)
13701 {
13702 if (i == mapcount - 1)
13703 end = sec->size;
13704 else
13705 end = map[i + 1].vma;
13706
13707 switch (map[i].type)
13708 {
13709 case 'a':
13710 /* Byte swap code words. */
13711 while (ptr + 3 < end)
13712 {
13713 tmp = contents[ptr];
13714 contents[ptr] = contents[ptr + 3];
13715 contents[ptr + 3] = tmp;
13716 tmp = contents[ptr + 1];
13717 contents[ptr + 1] = contents[ptr + 2];
13718 contents[ptr + 2] = tmp;
13719 ptr += 4;
13720 }
13721 break;
13722
13723 case 't':
13724 /* Byte swap code halfwords. */
13725 while (ptr + 1 < end)
13726 {
13727 tmp = contents[ptr];
13728 contents[ptr] = contents[ptr + 1];
13729 contents[ptr + 1] = tmp;
13730 ptr += 2;
13731 }
13732 break;
13733
13734 case 'd':
13735 /* Leave data alone. */
13736 break;
13737 }
13738 ptr = end;
13739 }
13740 }
13741
13742 free (map);
13743 arm_data->mapcount = -1;
13744 arm_data->mapsize = 0;
13745 arm_data->map = NULL;
13746
13747 return FALSE;
13748}
13749
13750/* Display STT_ARM_TFUNC symbols as functions. */
13751
13752static void
13753elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13754 asymbol *asym)
13755{
13756 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13757
13758 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13759 elfsym->symbol.flags |= BSF_FUNCTION;
13760}
13761
13762
13763/* Mangle thumb function symbols as we read them in. */
13764
13765static bfd_boolean
13766elf32_arm_swap_symbol_in (bfd * abfd,
13767 const void *psrc,
13768 const void *pshn,
13769 Elf_Internal_Sym *dst)
13770{
13771 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13772 return FALSE;
13773
13774 /* New EABI objects mark thumb function symbols by setting the low bit of
13775 the address. Turn these into STT_ARM_TFUNC. */
13776 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13777 && (dst->st_value & 1))
13778 {
13779 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13780 dst->st_value &= ~(bfd_vma) 1;
13781 }
13782 return TRUE;
13783}
13784
13785
13786/* Mangle thumb function symbols as we write them out. */
13787
13788static void
13789elf32_arm_swap_symbol_out (bfd *abfd,
13790 const Elf_Internal_Sym *src,
13791 void *cdst,
13792 void *shndx)
13793{
13794 Elf_Internal_Sym newsym;
13795
13796 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13797 of the address set, as per the new EABI. We do this unconditionally
13798 because objcopy does not set the elf header flags until after
13799 it writes out the symbol table. */
13800 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13801 {
13802 newsym = *src;
13803 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13804 if (newsym.st_shndx != SHN_UNDEF)
13805 {
13806 /* Do this only for defined symbols. At link type, the static
13807 linker will simulate the work of dynamic linker of resolving
13808 symbols and will carry over the thumbness of found symbols to
13809 the output symbol table. It's not clear how it happens, but
13810 the thumbness of undefined symbols can well be different at
13811 runtime, and writing '1' for them will be confusing for users
13812 and possibly for dynamic linker itself.
13813 */
13814 newsym.st_value |= 1;
13815 }
13816
13817 src = &newsym;
13818 }
13819 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13820}
13821
13822/* Add the PT_ARM_EXIDX program header. */
13823
13824static bfd_boolean
13825elf32_arm_modify_segment_map (bfd *abfd,
13826 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13827{
13828 struct elf_segment_map *m;
13829 asection *sec;
13830
13831 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13832 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13833 {
13834 /* If there is already a PT_ARM_EXIDX header, then we do not
13835 want to add another one. This situation arises when running
13836 "strip"; the input binary already has the header. */
13837 m = elf_tdata (abfd)->segment_map;
13838 while (m && m->p_type != PT_ARM_EXIDX)
13839 m = m->next;
13840 if (!m)
13841 {
13842 m = (struct elf_segment_map *)
13843 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13844 if (m == NULL)
13845 return FALSE;
13846 m->p_type = PT_ARM_EXIDX;
13847 m->count = 1;
13848 m->sections[0] = sec;
13849
13850 m->next = elf_tdata (abfd)->segment_map;
13851 elf_tdata (abfd)->segment_map = m;
13852 }
13853 }
13854
13855 return TRUE;
13856}
13857
13858/* We may add a PT_ARM_EXIDX program header. */
13859
13860static int
13861elf32_arm_additional_program_headers (bfd *abfd,
13862 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13863{
13864 asection *sec;
13865
13866 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13867 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13868 return 1;
13869 else
13870 return 0;
13871}
13872
13873/* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13874
13875static bfd_boolean
13876elf32_arm_is_function_type (unsigned int type)
13877{
13878 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13879}
13880
13881/* We use this to override swap_symbol_in and swap_symbol_out. */
13882const struct elf_size_info elf32_arm_size_info =
13883{
13884 sizeof (Elf32_External_Ehdr),
13885 sizeof (Elf32_External_Phdr),
13886 sizeof (Elf32_External_Shdr),
13887 sizeof (Elf32_External_Rel),
13888 sizeof (Elf32_External_Rela),
13889 sizeof (Elf32_External_Sym),
13890 sizeof (Elf32_External_Dyn),
13891 sizeof (Elf_External_Note),
13892 4,
13893 1,
13894 32, 2,
13895 ELFCLASS32, EV_CURRENT,
13896 bfd_elf32_write_out_phdrs,
13897 bfd_elf32_write_shdrs_and_ehdr,
13898 bfd_elf32_checksum_contents,
13899 bfd_elf32_write_relocs,
13900 elf32_arm_swap_symbol_in,
13901 elf32_arm_swap_symbol_out,
13902 bfd_elf32_slurp_reloc_table,
13903 bfd_elf32_slurp_symbol_table,
13904 bfd_elf32_swap_dyn_in,
13905 bfd_elf32_swap_dyn_out,
13906 bfd_elf32_swap_reloc_in,
13907 bfd_elf32_swap_reloc_out,
13908 bfd_elf32_swap_reloca_in,
13909 bfd_elf32_swap_reloca_out
13910};
13911
13912#define ELF_ARCH bfd_arch_arm
13913#define ELF_TARGET_ID ARM_ELF_DATA
13914#define ELF_MACHINE_CODE EM_ARM
13915#ifdef __QNXTARGET__
13916#define ELF_MAXPAGESIZE 0x1000
13917#else
13918#define ELF_MAXPAGESIZE 0x8000
13919#endif
13920#define ELF_MINPAGESIZE 0x1000
13921#define ELF_COMMONPAGESIZE 0x1000
13922
13923#define bfd_elf32_mkobject elf32_arm_mkobject
13924
13925#define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13926#define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13927#define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13928#define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13929#define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13930#define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13931#define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13932#define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13933#define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13934#define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13935#define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13936#define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13937#define bfd_elf32_bfd_final_link elf32_arm_final_link
13938
13939#define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13940#define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13941#define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13942#define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13943#define elf_backend_check_relocs elf32_arm_check_relocs
13944#define elf_backend_relocate_section elf32_arm_relocate_section
13945#define elf_backend_write_section elf32_arm_write_section
13946#define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13947#define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13948#define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13949#define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13950#define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13951#define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13952#define elf_backend_post_process_headers elf32_arm_post_process_headers
13953#define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13954#define elf_backend_object_p elf32_arm_object_p
13955#define elf_backend_section_flags elf32_arm_section_flags
13956#define elf_backend_fake_sections elf32_arm_fake_sections
13957#define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13958#define elf_backend_final_write_processing elf32_arm_final_write_processing
13959#define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13960#define elf_backend_symbol_processing elf32_arm_symbol_processing
13961#define elf_backend_size_info elf32_arm_size_info
13962#define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13963#define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13964#define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13965#define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13966#define elf_backend_is_function_type elf32_arm_is_function_type
13967
13968#define elf_backend_can_refcount 1
13969#define elf_backend_can_gc_sections 1
13970#define elf_backend_plt_readonly 1
13971#define elf_backend_want_got_plt 1
13972#define elf_backend_want_plt_sym 0
13973#define elf_backend_may_use_rel_p 1
13974#define elf_backend_may_use_rela_p 0
13975#define elf_backend_default_use_rela_p 0
13976
13977#define elf_backend_got_header_size 12
13978
13979#undef elf_backend_obj_attrs_vendor
13980#define elf_backend_obj_attrs_vendor "aeabi"
13981#undef elf_backend_obj_attrs_section
13982#define elf_backend_obj_attrs_section ".ARM.attributes"
13983#undef elf_backend_obj_attrs_arg_type
13984#define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13985#undef elf_backend_obj_attrs_section_type
13986#define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13987#define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13988
13989#include "elf32-target.h"
13990
13991/* VxWorks Targets. */
13992
13993#undef TARGET_LITTLE_SYM
13994#define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13995#undef TARGET_LITTLE_NAME
13996#define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13997#undef TARGET_BIG_SYM
13998#define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13999#undef TARGET_BIG_NAME
14000#define TARGET_BIG_NAME "elf32-bigarm-vxworks"
14001
14002/* Like elf32_arm_link_hash_table_create -- but overrides
14003 appropriately for VxWorks. */
14004
14005static struct bfd_link_hash_table *
14006elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
14007{
14008 struct bfd_link_hash_table *ret;
14009
14010 ret = elf32_arm_link_hash_table_create (abfd);
14011 if (ret)
14012 {
14013 struct elf32_arm_link_hash_table *htab
14014 = (struct elf32_arm_link_hash_table *) ret;
14015 htab->use_rel = 0;
14016 htab->vxworks_p = 1;
14017 }
14018 return ret;
14019}
14020
14021static void
14022elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
14023{
14024 elf32_arm_final_write_processing (abfd, linker);
14025 elf_vxworks_final_write_processing (abfd, linker);
14026}
14027
14028#undef elf32_bed
14029#define elf32_bed elf32_arm_vxworks_bed
14030
14031#undef bfd_elf32_bfd_link_hash_table_create
14032#define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
14033#undef elf_backend_add_symbol_hook
14034#define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
14035#undef elf_backend_final_write_processing
14036#define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
14037#undef elf_backend_emit_relocs
14038#define elf_backend_emit_relocs elf_vxworks_emit_relocs
14039
14040#undef elf_backend_may_use_rel_p
14041#define elf_backend_may_use_rel_p 0
14042#undef elf_backend_may_use_rela_p
14043#define elf_backend_may_use_rela_p 1
14044#undef elf_backend_default_use_rela_p
14045#define elf_backend_default_use_rela_p 1
14046#undef elf_backend_want_plt_sym
14047#define elf_backend_want_plt_sym 1
14048#undef ELF_MAXPAGESIZE
14049#define ELF_MAXPAGESIZE 0x1000
14050
14051#include "elf32-target.h"
14052
14053
14054/* Merge backend specific data from an object file to the output
14055 object file when linking. */
14056
14057static bfd_boolean
14058elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
14059{
14060 flagword out_flags;
14061 flagword in_flags;
14062 bfd_boolean flags_compatible = TRUE;
14063 asection *sec;
14064
14065 /* Check if we have the same endianess. */
14066 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
14067 return FALSE;
14068
14069 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14070 return TRUE;
14071
14072 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
14073 return FALSE;
14074
14075 /* The input BFD must have had its flags initialised. */
14076 /* The following seems bogus to me -- The flags are initialized in
14077 the assembler but I don't think an elf_flags_init field is
14078 written into the object. */
14079 /* BFD_ASSERT (elf_flags_init (ibfd)); */
14080
14081 in_flags = elf_elfheader (ibfd)->e_flags;
14082 out_flags = elf_elfheader (obfd)->e_flags;
14083
14084 /* In theory there is no reason why we couldn't handle this. However
14085 in practice it isn't even close to working and there is no real
14086 reason to want it. */
14087 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
14088 && !(ibfd->flags & DYNAMIC)
14089 && (in_flags & EF_ARM_BE8))
14090 {
14091 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14092 ibfd);
14093 return FALSE;
14094 }
14095
14096 if (!elf_flags_init (obfd))
14097 {
14098 /* If the input is the default architecture and had the default
14099 flags then do not bother setting the flags for the output
14100 architecture, instead allow future merges to do this. If no
14101 future merges ever set these flags then they will retain their
14102 uninitialised values, which surprise surprise, correspond
14103 to the default values. */
14104 if (bfd_get_arch_info (ibfd)->the_default
14105 && elf_elfheader (ibfd)->e_flags == 0)
14106 return TRUE;
14107
14108 elf_flags_init (obfd) = TRUE;
14109 elf_elfheader (obfd)->e_flags = in_flags;
14110
14111 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14112 && bfd_get_arch_info (obfd)->the_default)
14113 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14114
14115 return TRUE;
14116 }
14117
14118 /* Determine what should happen if the input ARM architecture
14119 does not match the output ARM architecture. */
14120 if (! bfd_arm_merge_machines (ibfd, obfd))
14121 return FALSE;
14122
14123 /* Identical flags must be compatible. */
14124 if (in_flags == out_flags)
14125 return TRUE;
14126
14127 /* Check to see if the input BFD actually contains any sections. If
14128 not, its flags may not have been initialised either, but it
14129 cannot actually cause any incompatiblity. Do not short-circuit
14130 dynamic objects; their section list may be emptied by
14131 elf_link_add_object_symbols.
14132
14133 Also check to see if there are no code sections in the input.
14134 In this case there is no need to check for code specific flags.
14135 XXX - do we need to worry about floating-point format compatability
14136 in data sections ? */
14137 if (!(ibfd->flags & DYNAMIC))
14138 {
14139 bfd_boolean null_input_bfd = TRUE;
14140 bfd_boolean only_data_sections = TRUE;
14141
14142 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14143 {
14144 /* Ignore synthetic glue sections. */
14145 if (strcmp (sec->name, ".glue_7")
14146 && strcmp (sec->name, ".glue_7t"))
14147 {
14148 if ((bfd_get_section_flags (ibfd, sec)
14149 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14150 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14151 only_data_sections = FALSE;
14152
14153 null_input_bfd = FALSE;
14154 break;
14155 }
14156 }
14157
14158 if (null_input_bfd || only_data_sections)
14159 return TRUE;
14160 }
14161
14162 /* Complain about various flag mismatches. */
14163 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14164 EF_ARM_EABI_VERSION (out_flags)))
14165 {
14166 _bfd_error_handler
14167 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14168 ibfd, obfd,
14169 (in_flags & EF_ARM_EABIMASK) >> 24,
14170 (out_flags & EF_ARM_EABIMASK) >> 24);
14171 return FALSE;
14172 }
14173
14174 /* Not sure what needs to be checked for EABI versions >= 1. */
14175 /* VxWorks libraries do not use these flags. */
14176 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14177 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14178 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14179 {
14180 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14181 {
14182 _bfd_error_handler
14183 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14184 ibfd, obfd,
14185 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14186 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14187 flags_compatible = FALSE;
14188 }
14189
14190 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14191 {
14192 if (in_flags & EF_ARM_APCS_FLOAT)
14193 _bfd_error_handler
14194 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14195 ibfd, obfd);
14196 else
14197 _bfd_error_handler
14198 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14199 ibfd, obfd);
14200
14201 flags_compatible = FALSE;
14202 }
14203
14204 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14205 {
14206 if (in_flags & EF_ARM_VFP_FLOAT)
14207 _bfd_error_handler
14208 (_("error: %B uses VFP instructions, whereas %B does not"),
14209 ibfd, obfd);
14210 else
14211 _bfd_error_handler
14212 (_("error: %B uses FPA instructions, whereas %B does not"),
14213 ibfd, obfd);
14214
14215 flags_compatible = FALSE;
14216 }
14217
14218 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14219 {
14220 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14221 _bfd_error_handler
14222 (_("error: %B uses Maverick instructions, whereas %B does not"),
14223 ibfd, obfd);
14224 else
14225 _bfd_error_handler
14226 (_("error: %B does not use Maverick instructions, whereas %B does"),
14227 ibfd, obfd);
14228
14229 flags_compatible = FALSE;
14230 }
14231
14232#ifdef EF_ARM_SOFT_FLOAT
14233 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14234 {
14235 /* We can allow interworking between code that is VFP format
14236 layout, and uses either soft float or integer regs for
14237 passing floating point arguments and results. We already
14238 know that the APCS_FLOAT flags match; similarly for VFP
14239 flags. */
14240 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14241 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14242 {
14243 if (in_flags & EF_ARM_SOFT_FLOAT)
14244 _bfd_error_handler
14245 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14246 ibfd, obfd);
14247 else
14248 _bfd_error_handler
14249 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14250 ibfd, obfd);
14251
14252 flags_compatible = FALSE;
14253 }
14254 }
14255#endif
14256
14257 /* Interworking mismatch is only a warning. */
14258 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14259 {
14260 if (in_flags & EF_ARM_INTERWORK)
14261 {
14262 _bfd_error_handler
14263 (_("Warning: %B supports interworking, whereas %B does not"),
14264 ibfd, obfd);
14265 }
14266 else
14267 {
14268 _bfd_error_handler
14269 (_("Warning: %B does not support interworking, whereas %B does"),
14270 ibfd, obfd);
14271 }
14272 }
14273 }
14274
14275 return flags_compatible;
14276}
14277
14278
14279/* Symbian OS Targets. */
14280
14281#undef TARGET_LITTLE_SYM
14282#define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14283#undef TARGET_LITTLE_NAME
14284#define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14285#undef TARGET_BIG_SYM
14286#define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14287#undef TARGET_BIG_NAME
14288#define TARGET_BIG_NAME "elf32-bigarm-symbian"
14289
14290/* Like elf32_arm_link_hash_table_create -- but overrides
14291 appropriately for Symbian OS. */
14292
14293static struct bfd_link_hash_table *
14294elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14295{
14296 struct bfd_link_hash_table *ret;
14297
14298 ret = elf32_arm_link_hash_table_create (abfd);
14299 if (ret)
14300 {
14301 struct elf32_arm_link_hash_table *htab
14302 = (struct elf32_arm_link_hash_table *)ret;
14303 /* There is no PLT header for Symbian OS. */
14304 htab->plt_header_size = 0;
14305 /* The PLT entries are each one instruction and one word. */
14306 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14307 htab->symbian_p = 1;
14308 /* Symbian uses armv5t or above, so use_blx is always true. */
14309 htab->use_blx = 1;
14310 htab->root.is_relocatable_executable = 1;
14311 }
14312 return ret;
14313}
14314
14315static const struct bfd_elf_special_section
14316elf32_arm_symbian_special_sections[] =
14317{
14318 /* In a BPABI executable, the dynamic linking sections do not go in
14319 the loadable read-only segment. The post-linker may wish to
14320 refer to these sections, but they are not part of the final
14321 program image. */
14322 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14323 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14324 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14325 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14326 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14327 /* These sections do not need to be writable as the SymbianOS
14328 postlinker will arrange things so that no dynamic relocation is
14329 required. */
14330 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14331 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14332 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14333 { NULL, 0, 0, 0, 0 }
14334};
14335
14336static void
14337elf32_arm_symbian_begin_write_processing (bfd *abfd,
14338 struct bfd_link_info *link_info)
14339{
14340 /* BPABI objects are never loaded directly by an OS kernel; they are
14341 processed by a postlinker first, into an OS-specific format. If
14342 the D_PAGED bit is set on the file, BFD will align segments on
14343 page boundaries, so that an OS can directly map the file. With
14344 BPABI objects, that just results in wasted space. In addition,
14345 because we clear the D_PAGED bit, map_sections_to_segments will
14346 recognize that the program headers should not be mapped into any
14347 loadable segment. */
14348 abfd->flags &= ~D_PAGED;
14349 elf32_arm_begin_write_processing (abfd, link_info);
14350}
14351
14352static bfd_boolean
14353elf32_arm_symbian_modify_segment_map (bfd *abfd,
14354 struct bfd_link_info *info)
14355{
14356 struct elf_segment_map *m;
14357 asection *dynsec;
14358
14359 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14360 segment. However, because the .dynamic section is not marked
14361 with SEC_LOAD, the generic ELF code will not create such a
14362 segment. */
14363 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14364 if (dynsec)
14365 {
14366 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14367 if (m->p_type == PT_DYNAMIC)
14368 break;
14369
14370 if (m == NULL)
14371 {
14372 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14373 m->next = elf_tdata (abfd)->segment_map;
14374 elf_tdata (abfd)->segment_map = m;
14375 }
14376 }
14377
14378 /* Also call the generic arm routine. */
14379 return elf32_arm_modify_segment_map (abfd, info);
14380}
14381
14382/* Return address for Ith PLT stub in section PLT, for relocation REL
14383 or (bfd_vma) -1 if it should not be included. */
14384
14385static bfd_vma
14386elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14387 const arelent *rel ATTRIBUTE_UNUSED)
14388{
14389 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14390}
14391
14392
14393#undef elf32_bed
14394#define elf32_bed elf32_arm_symbian_bed
14395
14396/* The dynamic sections are not allocated on SymbianOS; the postlinker
14397 will process them and then discard them. */
14398#undef ELF_DYNAMIC_SEC_FLAGS
14399#define ELF_DYNAMIC_SEC_FLAGS \
14400 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14401
14402#undef elf_backend_add_symbol_hook
14403#undef elf_backend_emit_relocs
14404
14405#undef bfd_elf32_bfd_link_hash_table_create
14406#define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14407#undef elf_backend_special_sections
14408#define elf_backend_special_sections elf32_arm_symbian_special_sections
14409#undef elf_backend_begin_write_processing
14410#define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14411#undef elf_backend_final_write_processing
14412#define elf_backend_final_write_processing elf32_arm_final_write_processing
14413
14414#undef elf_backend_modify_segment_map
14415#define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14416
14417/* There is no .got section for BPABI objects, and hence no header. */
14418#undef elf_backend_got_header_size
14419#define elf_backend_got_header_size 0
14420
14421/* Similarly, there is no .got.plt section. */
14422#undef elf_backend_want_got_plt
14423#define elf_backend_want_got_plt 0
14424
14425#undef elf_backend_plt_sym_val
14426#define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14427
14428#undef elf_backend_may_use_rel_p
14429#define elf_backend_may_use_rel_p 1
14430#undef elf_backend_may_use_rela_p
14431#define elf_backend_may_use_rela_p 0
14432#undef elf_backend_default_use_rela_p
14433#define elf_backend_default_use_rela_p 0
14434#undef elf_backend_want_plt_sym
14435#define elf_backend_want_plt_sym 0
14436#undef ELF_MAXPAGESIZE
14437#define ELF_MAXPAGESIZE 0x8000
14438
14439#include "elf32-target.h"
This page took 0.093073 seconds and 4 git commands to generate.