gas/
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
... / ...
CommitLineData
1/* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22#include "sysdep.h"
23#include <limits.h>
24
25#include "bfd.h"
26#include "libiberty.h"
27#include "libbfd.h"
28#include "elf-bfd.h"
29#include "elf-vxworks.h"
30#include "elf/arm.h"
31
32/* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34#define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37/* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39#define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44/* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46#define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51/* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53#define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58#define elf_info_to_howto 0
59#define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61#define ARM_ELF_ABI_VERSION 0
62#define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69/* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73static reloc_howto_type elf32_arm_howto_table_1[] =
74{
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602};
1603
1604/* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610static reloc_howto_type elf32_arm_howto_table_2[4] =
1611{
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667};
1668
1669static reloc_howto_type *
1670elf32_arm_howto_from_type (unsigned int r_type)
1671{
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680}
1681
1682static void
1683elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685{
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690}
1691
1692struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698/* All entries in this list must also be present in elf32_arm_howto_table. */
1699static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1780 };
1781
1782static reloc_howto_type *
1783elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1785{
1786 unsigned int i;
1787
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1791
1792 return NULL;
1793}
1794
1795static reloc_howto_type *
1796elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1797 const char *r_name)
1798{
1799 unsigned int i;
1800
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1805
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1810
1811 return NULL;
1812}
1813
1814/* Support for core dump NOTE sections. */
1815
1816static bfd_boolean
1817elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1818{
1819 int offset;
1820 size_t size;
1821
1822 switch (note->descsz)
1823 {
1824 default:
1825 return FALSE;
1826
1827 case 148: /* Linux/ARM 32-bit. */
1828 /* pr_cursig */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1830
1831 /* pr_pid */
1832 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1833
1834 /* pr_reg */
1835 offset = 72;
1836 size = 72;
1837
1838 break;
1839 }
1840
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1844}
1845
1846static bfd_boolean
1847elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1848{
1849 switch (note->descsz)
1850 {
1851 default:
1852 return FALSE;
1853
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1859 }
1860
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1864 {
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1867
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1870 }
1871
1872 return TRUE;
1873}
1874
1875#define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876#define TARGET_LITTLE_NAME "elf32-littlearm"
1877#define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878#define TARGET_BIG_NAME "elf32-bigarm"
1879
1880#define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881#define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1882
1883typedef unsigned long int insn32;
1884typedef unsigned short int insn16;
1885
1886/* In lieu of proper flags, assume all EABIv4 or later objects are
1887 interworkable. */
1888#define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1892
1893/* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897#define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898#define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1899
1900#define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901#define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1902
1903#define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904#define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1905
1906#define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907#define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1908
1909#define STUB_ENTRY_NAME "__%s_veneer"
1910
1911/* The name of the dynamic interpreter. This is put in the .interp
1912 section. */
1913#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1914
1915#ifdef FOUR_WORD_PLT
1916
1917/* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1920 linker first. */
1921static const bfd_vma elf32_arm_plt0_entry [] =
1922 {
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1927 };
1928
1929/* Subsequent entries in a procedure linkage table look like
1930 this. */
1931static const bfd_vma elf32_arm_plt_entry [] =
1932 {
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1937 };
1938
1939#else
1940
1941/* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1944 linker first. */
1945static const bfd_vma elf32_arm_plt0_entry [] =
1946 {
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1952 };
1953
1954/* Subsequent entries in a procedure linkage table look like
1955 this. */
1956static const bfd_vma elf32_arm_plt_entry [] =
1957 {
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1961 };
1962
1963#endif
1964
1965/* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1968 {
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1973 };
1974
1975/* The format of subsequent entries in a VxWorks executable. */
1976static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1977 {
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1984 };
1985
1986/* The format of entries in a VxWorks shared library. */
1987static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1988 {
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1995 };
1996
1997/* An initial stub used if the PLT entry is referenced from Thumb code. */
1998#define PLT_THUMB_STUB_SIZE 4
1999static const bfd_vma elf32_arm_plt_thumb_stub [] =
2000 {
2001 0x4778, /* bx pc */
2002 0x46c0 /* nop */
2003 };
2004
2005/* The entries in a PLT when using a DLL-based target with multiple
2006 address spaces. */
2007static const bfd_vma elf32_arm_symbian_plt_entry [] =
2008 {
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2011 };
2012
2013#define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014#define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015#define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016#define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017#define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018#define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2019
2020enum stub_insn_type
2021 {
2022 THUMB16_TYPE = 1,
2023 THUMB32_TYPE,
2024 ARM_TYPE,
2025 DATA_TYPE
2026 };
2027
2028#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029/* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031#define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033#define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2037
2038typedef struct
2039{
2040 bfd_vma data;
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2043 int reloc_addend;
2044} insn_sequence;
2045
2046/* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2049 {
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2052 };
2053
2054/* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2055 available. */
2056static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2057 {
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2061 };
2062
2063/* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2065 {
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2073 };
2074
2075/* V4T Thumb -> Thumb long branch stub. Using the stack is not
2076 allowed. */
2077static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2078 {
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2084 };
2085
2086/* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2087 available. */
2088static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2089 {
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2094 };
2095
2096/* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2099 {
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2103 };
2104
2105/* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2108 {
2109 ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2112 };
2113
2114/* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2117 ARMv7). */
2118static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2119 {
2120 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2124 };
2125
2126/* V4T ARM -> ARM long branch stub, PIC. */
2127static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2128 {
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2133 };
2134
2135/* V4T Thumb -> ARM long branch stub, PIC. */
2136static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2137 {
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2143 };
2144
2145/* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2146 architectures. */
2147static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2148 {
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2156 };
2157
2158/* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2159 allowed. */
2160static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2161 {
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2168 };
2169
2170/* Cortex-A8 erratum-workaround stubs. */
2171
2172/* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2174
2175static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2176 {
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2180 };
2181
2182/* Stub used for b.w and bl.w instructions. */
2183
2184static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2185 {
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2187 };
2188
2189static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2190 {
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2192 };
2193
2194/* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2197
2198static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2199 {
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2201 };
2202
2203/* Section name for stubs is the associated section name plus this
2204 string. */
2205#define STUB_SUFFIX ".stub"
2206
2207/* One entry per long/short branch stub defined above. */
2208#define DEF_STUBS \
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2225
2226#define DEF_STUB(x) arm_stub_##x,
2227enum elf32_arm_stub_type {
2228 arm_stub_none,
2229 DEF_STUBS
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2232};
2233#undef DEF_STUB
2234
2235typedef struct
2236{
2237 const insn_sequence* template_sequence;
2238 int template_size;
2239} stub_def;
2240
2241#define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242static const stub_def stub_definitions[] = {
2243 {NULL, 0},
2244 DEF_STUBS
2245};
2246
2247struct elf32_arm_stub_hash_entry
2248{
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2251
2252 /* The stub section. */
2253 asection *stub_sec;
2254
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2257
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2262
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2265
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2269
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2273 int stub_size;
2274 /* Its template. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2278
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2281
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2284
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2287 asection *id_sec;
2288
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2292 char *output_name;
2293};
2294
2295/* Used to build a map of a section. This is required for mixed-endian
2296 code/data. */
2297
2298typedef struct elf32_elf_section_map
2299{
2300 bfd_vma vma;
2301 char type;
2302}
2303elf32_arm_section_map;
2304
2305/* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2306
2307typedef enum
2308{
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2313}
2314elf32_vfp11_erratum_type;
2315
2316typedef struct elf32_vfp11_erratum_list
2317{
2318 struct elf32_vfp11_erratum_list *next;
2319 bfd_vma vma;
2320 union
2321 {
2322 struct
2323 {
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2326 } b;
2327 struct
2328 {
2329 struct elf32_vfp11_erratum_list *branch;
2330 unsigned int id;
2331 } v;
2332 } u;
2333 elf32_vfp11_erratum_type type;
2334}
2335elf32_vfp11_erratum_list;
2336
2337typedef enum
2338{
2339 DELETE_EXIDX_ENTRY,
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2341}
2342arm_unwind_edit_type;
2343
2344/* A (sorted) list of edits to apply to an unwind table. */
2345typedef struct arm_unwind_table_edit
2346{
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2352 unsigned int index;
2353 struct arm_unwind_table_edit *next;
2354}
2355arm_unwind_table_edit;
2356
2357typedef struct _arm_elf_section_data
2358{
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2368 union
2369 {
2370 /* Unwind info attached to a text section. */
2371 struct
2372 {
2373 asection *arm_exidx_sec;
2374 } text;
2375
2376 /* Unwind info attached to an .ARM.exidx section. */
2377 struct
2378 {
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2381 } exidx;
2382 } u;
2383}
2384_arm_elf_section_data;
2385
2386#define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2388
2389/* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2394
2395struct a8_erratum_fix {
2396 bfd *input_bfd;
2397 asection *section;
2398 bfd_vma offset;
2399 bfd_vma addend;
2400 unsigned long orig_insn;
2401 char *stub_name;
2402 enum elf32_arm_stub_type stub_type;
2403 int st_type;
2404};
2405
2406/* A table of relocs applied to branches which might trigger Cortex-A8
2407 erratum. */
2408
2409struct a8_erratum_reloc {
2410 bfd_vma from;
2411 bfd_vma destination;
2412 struct elf32_arm_link_hash_entry *hash;
2413 const char *sym_name;
2414 unsigned int r_type;
2415 unsigned char st_type;
2416 bfd_boolean non_a8_stub;
2417};
2418
2419/* The size of the thread control block. */
2420#define TCB_SIZE 8
2421
2422struct elf_arm_obj_tdata
2423{
2424 struct elf_obj_tdata root;
2425
2426 /* tls_type for each local got entry. */
2427 char *local_got_tls_type;
2428
2429 /* Zero to warn when linking objects with incompatible enum sizes. */
2430 int no_enum_size_warning;
2431
2432 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2433 int no_wchar_size_warning;
2434};
2435
2436#define elf_arm_tdata(bfd) \
2437 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2438
2439#define elf32_arm_local_got_tls_type(bfd) \
2440 (elf_arm_tdata (bfd)->local_got_tls_type)
2441
2442#define is_arm_elf(bfd) \
2443 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2444 && elf_tdata (bfd) != NULL \
2445 && elf_object_id (bfd) == ARM_ELF_DATA)
2446
2447static bfd_boolean
2448elf32_arm_mkobject (bfd *abfd)
2449{
2450 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 ARM_ELF_DATA);
2452}
2453
2454/* The ARM linker needs to keep track of the number of relocs that it
2455 decides to copy in check_relocs for each symbol. This is so that
2456 it can discard PC relative relocs if it doesn't need them when
2457 linking with -Bsymbolic. We store the information in a field
2458 extending the regular ELF linker hash table. */
2459
2460/* This structure keeps track of the number of relocs we have copied
2461 for a given symbol. */
2462struct elf32_arm_relocs_copied
2463 {
2464 /* Next section. */
2465 struct elf32_arm_relocs_copied * next;
2466 /* A section in dynobj. */
2467 asection * section;
2468 /* Number of relocs copied in this section. */
2469 bfd_size_type count;
2470 /* Number of PC-relative relocs copied in this section. */
2471 bfd_size_type pc_count;
2472 };
2473
2474#define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2475
2476/* Arm ELF linker hash entry. */
2477struct elf32_arm_link_hash_entry
2478 {
2479 struct elf_link_hash_entry root;
2480
2481 /* Number of PC relative relocs copied for this symbol. */
2482 struct elf32_arm_relocs_copied * relocs_copied;
2483
2484 /* We reference count Thumb references to a PLT entry separately,
2485 so that we can emit the Thumb trampoline only if needed. */
2486 bfd_signed_vma plt_thumb_refcount;
2487
2488 /* Some references from Thumb code may be eliminated by BL->BLX
2489 conversion, so record them separately. */
2490 bfd_signed_vma plt_maybe_thumb_refcount;
2491
2492 /* Since PLT entries have variable size if the Thumb prologue is
2493 used, we need to record the index into .got.plt instead of
2494 recomputing it from the PLT offset. */
2495 bfd_signed_vma plt_got_offset;
2496
2497#define GOT_UNKNOWN 0
2498#define GOT_NORMAL 1
2499#define GOT_TLS_GD 2
2500#define GOT_TLS_IE 4
2501 unsigned char tls_type;
2502
2503 /* The symbol marking the real symbol location for exported thumb
2504 symbols with Arm stubs. */
2505 struct elf_link_hash_entry *export_glue;
2506
2507 /* A pointer to the most recently used stub hash entry against this
2508 symbol. */
2509 struct elf32_arm_stub_hash_entry *stub_cache;
2510 };
2511
2512/* Traverse an arm ELF linker hash table. */
2513#define elf32_arm_link_hash_traverse(table, func, info) \
2514 (elf_link_hash_traverse \
2515 (&(table)->root, \
2516 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2517 (info)))
2518
2519/* Get the ARM elf linker hash table from a link_info structure. */
2520#define elf32_arm_hash_table(info) \
2521 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2522 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2523
2524#define arm_stub_hash_lookup(table, string, create, copy) \
2525 ((struct elf32_arm_stub_hash_entry *) \
2526 bfd_hash_lookup ((table), (string), (create), (copy)))
2527
2528/* Array to keep track of which stub sections have been created, and
2529 information on stub grouping. */
2530struct map_stub
2531{
2532 /* This is the section to which stubs in the group will be
2533 attached. */
2534 asection *link_sec;
2535 /* The stub section. */
2536 asection *stub_sec;
2537};
2538
2539/* ARM ELF linker hash table. */
2540struct elf32_arm_link_hash_table
2541{
2542 /* The main hash table. */
2543 struct elf_link_hash_table root;
2544
2545 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2546 bfd_size_type thumb_glue_size;
2547
2548 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2549 bfd_size_type arm_glue_size;
2550
2551 /* The size in bytes of section containing the ARMv4 BX veneers. */
2552 bfd_size_type bx_glue_size;
2553
2554 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2555 veneer has been populated. */
2556 bfd_vma bx_glue_offset[15];
2557
2558 /* The size in bytes of the section containing glue for VFP11 erratum
2559 veneers. */
2560 bfd_size_type vfp11_erratum_glue_size;
2561
2562 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2563 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2564 elf32_arm_write_section(). */
2565 struct a8_erratum_fix *a8_erratum_fixes;
2566 unsigned int num_a8_erratum_fixes;
2567
2568 /* An arbitrary input BFD chosen to hold the glue sections. */
2569 bfd * bfd_of_glue_owner;
2570
2571 /* Nonzero to output a BE8 image. */
2572 int byteswap_code;
2573
2574 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2575 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2576 int target1_is_rel;
2577
2578 /* The relocation to use for R_ARM_TARGET2 relocations. */
2579 int target2_reloc;
2580
2581 /* 0 = Ignore R_ARM_V4BX.
2582 1 = Convert BX to MOV PC.
2583 2 = Generate v4 interworing stubs. */
2584 int fix_v4bx;
2585
2586 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2587 int fix_cortex_a8;
2588
2589 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2590 int use_blx;
2591
2592 /* What sort of code sequences we should look for which may trigger the
2593 VFP11 denorm erratum. */
2594 bfd_arm_vfp11_fix vfp11_fix;
2595
2596 /* Global counter for the number of fixes we have emitted. */
2597 int num_vfp11_fixes;
2598
2599 /* Nonzero to force PIC branch veneers. */
2600 int pic_veneer;
2601
2602 /* The number of bytes in the initial entry in the PLT. */
2603 bfd_size_type plt_header_size;
2604
2605 /* The number of bytes in the subsequent PLT etries. */
2606 bfd_size_type plt_entry_size;
2607
2608 /* True if the target system is VxWorks. */
2609 int vxworks_p;
2610
2611 /* True if the target system is Symbian OS. */
2612 int symbian_p;
2613
2614 /* True if the target uses REL relocations. */
2615 int use_rel;
2616
2617 /* Short-cuts to get to dynamic linker sections. */
2618 asection *sgot;
2619 asection *sgotplt;
2620 asection *srelgot;
2621 asection *splt;
2622 asection *srelplt;
2623 asection *sdynbss;
2624 asection *srelbss;
2625
2626 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2627 asection *srelplt2;
2628
2629 /* Data for R_ARM_TLS_LDM32 relocations. */
2630 union
2631 {
2632 bfd_signed_vma refcount;
2633 bfd_vma offset;
2634 } tls_ldm_got;
2635
2636 /* Small local sym cache. */
2637 struct sym_cache sym_cache;
2638
2639 /* For convenience in allocate_dynrelocs. */
2640 bfd * obfd;
2641
2642 /* The stub hash table. */
2643 struct bfd_hash_table stub_hash_table;
2644
2645 /* Linker stub bfd. */
2646 bfd *stub_bfd;
2647
2648 /* Linker call-backs. */
2649 asection * (*add_stub_section) (const char *, asection *);
2650 void (*layout_sections_again) (void);
2651
2652 /* Array to keep track of which stub sections have been created, and
2653 information on stub grouping. */
2654 struct map_stub *stub_group;
2655
2656 /* Number of elements in stub_group. */
2657 int top_id;
2658
2659 /* Assorted information used by elf32_arm_size_stubs. */
2660 unsigned int bfd_count;
2661 int top_index;
2662 asection **input_list;
2663};
2664
2665/* Create an entry in an ARM ELF linker hash table. */
2666
2667static struct bfd_hash_entry *
2668elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2669 struct bfd_hash_table * table,
2670 const char * string)
2671{
2672 struct elf32_arm_link_hash_entry * ret =
2673 (struct elf32_arm_link_hash_entry *) entry;
2674
2675 /* Allocate the structure if it has not already been allocated by a
2676 subclass. */
2677 if (ret == NULL)
2678 ret = (struct elf32_arm_link_hash_entry *)
2679 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2680 if (ret == NULL)
2681 return (struct bfd_hash_entry *) ret;
2682
2683 /* Call the allocation method of the superclass. */
2684 ret = ((struct elf32_arm_link_hash_entry *)
2685 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2686 table, string));
2687 if (ret != NULL)
2688 {
2689 ret->relocs_copied = NULL;
2690 ret->tls_type = GOT_UNKNOWN;
2691 ret->plt_thumb_refcount = 0;
2692 ret->plt_maybe_thumb_refcount = 0;
2693 ret->plt_got_offset = -1;
2694 ret->export_glue = NULL;
2695
2696 ret->stub_cache = NULL;
2697 }
2698
2699 return (struct bfd_hash_entry *) ret;
2700}
2701
2702/* Initialize an entry in the stub hash table. */
2703
2704static struct bfd_hash_entry *
2705stub_hash_newfunc (struct bfd_hash_entry *entry,
2706 struct bfd_hash_table *table,
2707 const char *string)
2708{
2709 /* Allocate the structure if it has not already been allocated by a
2710 subclass. */
2711 if (entry == NULL)
2712 {
2713 entry = (struct bfd_hash_entry *)
2714 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2715 if (entry == NULL)
2716 return entry;
2717 }
2718
2719 /* Call the allocation method of the superclass. */
2720 entry = bfd_hash_newfunc (entry, table, string);
2721 if (entry != NULL)
2722 {
2723 struct elf32_arm_stub_hash_entry *eh;
2724
2725 /* Initialize the local fields. */
2726 eh = (struct elf32_arm_stub_hash_entry *) entry;
2727 eh->stub_sec = NULL;
2728 eh->stub_offset = 0;
2729 eh->target_value = 0;
2730 eh->target_section = NULL;
2731 eh->target_addend = 0;
2732 eh->orig_insn = 0;
2733 eh->stub_type = arm_stub_none;
2734 eh->stub_size = 0;
2735 eh->stub_template = NULL;
2736 eh->stub_template_size = 0;
2737 eh->h = NULL;
2738 eh->id_sec = NULL;
2739 eh->output_name = NULL;
2740 }
2741
2742 return entry;
2743}
2744
2745/* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2746 shortcuts to them in our hash table. */
2747
2748static bfd_boolean
2749create_got_section (bfd *dynobj, struct bfd_link_info *info)
2750{
2751 struct elf32_arm_link_hash_table *htab;
2752
2753 htab = elf32_arm_hash_table (info);
2754 if (htab == NULL)
2755 return FALSE;
2756
2757 /* BPABI objects never have a GOT, or associated sections. */
2758 if (htab->symbian_p)
2759 return TRUE;
2760
2761 if (! _bfd_elf_create_got_section (dynobj, info))
2762 return FALSE;
2763
2764 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2765 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2766 if (!htab->sgot || !htab->sgotplt)
2767 abort ();
2768
2769 htab->srelgot = bfd_get_section_by_name (dynobj,
2770 RELOC_SECTION (htab, ".got"));
2771 if (htab->srelgot == NULL)
2772 return FALSE;
2773 return TRUE;
2774}
2775
2776/* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2777 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2778 hash table. */
2779
2780static bfd_boolean
2781elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2782{
2783 struct elf32_arm_link_hash_table *htab;
2784
2785 htab = elf32_arm_hash_table (info);
2786 if (htab == NULL)
2787 return FALSE;
2788
2789 if (!htab->sgot && !create_got_section (dynobj, info))
2790 return FALSE;
2791
2792 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2793 return FALSE;
2794
2795 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2796 htab->srelplt = bfd_get_section_by_name (dynobj,
2797 RELOC_SECTION (htab, ".plt"));
2798 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2799 if (!info->shared)
2800 htab->srelbss = bfd_get_section_by_name (dynobj,
2801 RELOC_SECTION (htab, ".bss"));
2802
2803 if (htab->vxworks_p)
2804 {
2805 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2806 return FALSE;
2807
2808 if (info->shared)
2809 {
2810 htab->plt_header_size = 0;
2811 htab->plt_entry_size
2812 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2813 }
2814 else
2815 {
2816 htab->plt_header_size
2817 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2818 htab->plt_entry_size
2819 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2820 }
2821 }
2822
2823 if (!htab->splt
2824 || !htab->srelplt
2825 || !htab->sdynbss
2826 || (!info->shared && !htab->srelbss))
2827 abort ();
2828
2829 return TRUE;
2830}
2831
2832/* Copy the extra info we tack onto an elf_link_hash_entry. */
2833
2834static void
2835elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2836 struct elf_link_hash_entry *dir,
2837 struct elf_link_hash_entry *ind)
2838{
2839 struct elf32_arm_link_hash_entry *edir, *eind;
2840
2841 edir = (struct elf32_arm_link_hash_entry *) dir;
2842 eind = (struct elf32_arm_link_hash_entry *) ind;
2843
2844 if (eind->relocs_copied != NULL)
2845 {
2846 if (edir->relocs_copied != NULL)
2847 {
2848 struct elf32_arm_relocs_copied **pp;
2849 struct elf32_arm_relocs_copied *p;
2850
2851 /* Add reloc counts against the indirect sym to the direct sym
2852 list. Merge any entries against the same section. */
2853 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2854 {
2855 struct elf32_arm_relocs_copied *q;
2856
2857 for (q = edir->relocs_copied; q != NULL; q = q->next)
2858 if (q->section == p->section)
2859 {
2860 q->pc_count += p->pc_count;
2861 q->count += p->count;
2862 *pp = p->next;
2863 break;
2864 }
2865 if (q == NULL)
2866 pp = &p->next;
2867 }
2868 *pp = edir->relocs_copied;
2869 }
2870
2871 edir->relocs_copied = eind->relocs_copied;
2872 eind->relocs_copied = NULL;
2873 }
2874
2875 if (ind->root.type == bfd_link_hash_indirect)
2876 {
2877 /* Copy over PLT info. */
2878 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2879 eind->plt_thumb_refcount = 0;
2880 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2881 eind->plt_maybe_thumb_refcount = 0;
2882
2883 if (dir->got.refcount <= 0)
2884 {
2885 edir->tls_type = eind->tls_type;
2886 eind->tls_type = GOT_UNKNOWN;
2887 }
2888 }
2889
2890 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2891}
2892
2893/* Create an ARM elf linker hash table. */
2894
2895static struct bfd_link_hash_table *
2896elf32_arm_link_hash_table_create (bfd *abfd)
2897{
2898 struct elf32_arm_link_hash_table *ret;
2899 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2900
2901 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2902 if (ret == NULL)
2903 return NULL;
2904
2905 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2906 elf32_arm_link_hash_newfunc,
2907 sizeof (struct elf32_arm_link_hash_entry),
2908 ARM_ELF_DATA))
2909 {
2910 free (ret);
2911 return NULL;
2912 }
2913
2914 ret->sgot = NULL;
2915 ret->sgotplt = NULL;
2916 ret->srelgot = NULL;
2917 ret->splt = NULL;
2918 ret->srelplt = NULL;
2919 ret->sdynbss = NULL;
2920 ret->srelbss = NULL;
2921 ret->srelplt2 = NULL;
2922 ret->thumb_glue_size = 0;
2923 ret->arm_glue_size = 0;
2924 ret->bx_glue_size = 0;
2925 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2926 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2927 ret->vfp11_erratum_glue_size = 0;
2928 ret->num_vfp11_fixes = 0;
2929 ret->fix_cortex_a8 = 0;
2930 ret->bfd_of_glue_owner = NULL;
2931 ret->byteswap_code = 0;
2932 ret->target1_is_rel = 0;
2933 ret->target2_reloc = R_ARM_NONE;
2934#ifdef FOUR_WORD_PLT
2935 ret->plt_header_size = 16;
2936 ret->plt_entry_size = 16;
2937#else
2938 ret->plt_header_size = 20;
2939 ret->plt_entry_size = 12;
2940#endif
2941 ret->fix_v4bx = 0;
2942 ret->use_blx = 0;
2943 ret->vxworks_p = 0;
2944 ret->symbian_p = 0;
2945 ret->use_rel = 1;
2946 ret->sym_cache.abfd = NULL;
2947 ret->obfd = abfd;
2948 ret->tls_ldm_got.refcount = 0;
2949 ret->stub_bfd = NULL;
2950 ret->add_stub_section = NULL;
2951 ret->layout_sections_again = NULL;
2952 ret->stub_group = NULL;
2953 ret->top_id = 0;
2954 ret->bfd_count = 0;
2955 ret->top_index = 0;
2956 ret->input_list = NULL;
2957
2958 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2959 sizeof (struct elf32_arm_stub_hash_entry)))
2960 {
2961 free (ret);
2962 return NULL;
2963 }
2964
2965 return &ret->root.root;
2966}
2967
2968/* Free the derived linker hash table. */
2969
2970static void
2971elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2972{
2973 struct elf32_arm_link_hash_table *ret
2974 = (struct elf32_arm_link_hash_table *) hash;
2975
2976 bfd_hash_table_free (&ret->stub_hash_table);
2977 _bfd_generic_link_hash_table_free (hash);
2978}
2979
2980/* Determine if we're dealing with a Thumb only architecture. */
2981
2982static bfd_boolean
2983using_thumb_only (struct elf32_arm_link_hash_table *globals)
2984{
2985 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch);
2987 int profile;
2988
2989 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
2990 return TRUE;
2991
2992 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2993 return FALSE;
2994
2995 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2996 Tag_CPU_arch_profile);
2997
2998 return profile == 'M';
2999}
3000
3001/* Determine if we're dealing with a Thumb-2 object. */
3002
3003static bfd_boolean
3004using_thumb2 (struct elf32_arm_link_hash_table *globals)
3005{
3006 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3007 Tag_CPU_arch);
3008 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3009}
3010
3011/* Determine what kind of NOPs are available. */
3012
3013static bfd_boolean
3014arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3015{
3016 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3017 Tag_CPU_arch);
3018 return arch == TAG_CPU_ARCH_V6T2
3019 || arch == TAG_CPU_ARCH_V6K
3020 || arch == TAG_CPU_ARCH_V7
3021 || arch == TAG_CPU_ARCH_V7E_M;
3022}
3023
3024static bfd_boolean
3025arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3026{
3027 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3028 Tag_CPU_arch);
3029 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3030 || arch == TAG_CPU_ARCH_V7E_M);
3031}
3032
3033static bfd_boolean
3034arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3035{
3036 switch (stub_type)
3037 {
3038 case arm_stub_long_branch_thumb_only:
3039 case arm_stub_long_branch_v4t_thumb_arm:
3040 case arm_stub_short_branch_v4t_thumb_arm:
3041 case arm_stub_long_branch_v4t_thumb_arm_pic:
3042 case arm_stub_long_branch_thumb_only_pic:
3043 return TRUE;
3044 case arm_stub_none:
3045 BFD_FAIL ();
3046 return FALSE;
3047 break;
3048 default:
3049 return FALSE;
3050 }
3051}
3052
3053/* Determine the type of stub needed, if any, for a call. */
3054
3055static enum elf32_arm_stub_type
3056arm_type_of_stub (struct bfd_link_info *info,
3057 asection *input_sec,
3058 const Elf_Internal_Rela *rel,
3059 int *actual_st_type,
3060 struct elf32_arm_link_hash_entry *hash,
3061 bfd_vma destination,
3062 asection *sym_sec,
3063 bfd *input_bfd,
3064 const char *name)
3065{
3066 bfd_vma location;
3067 bfd_signed_vma branch_offset;
3068 unsigned int r_type;
3069 struct elf32_arm_link_hash_table * globals;
3070 int thumb2;
3071 int thumb_only;
3072 enum elf32_arm_stub_type stub_type = arm_stub_none;
3073 int use_plt = 0;
3074 int st_type = *actual_st_type;
3075
3076 /* We don't know the actual type of destination in case it is of
3077 type STT_SECTION: give up. */
3078 if (st_type == STT_SECTION)
3079 return stub_type;
3080
3081 globals = elf32_arm_hash_table (info);
3082 if (globals == NULL)
3083 return stub_type;
3084
3085 thumb_only = using_thumb_only (globals);
3086
3087 thumb2 = using_thumb2 (globals);
3088
3089 /* Determine where the call point is. */
3090 location = (input_sec->output_offset
3091 + input_sec->output_section->vma
3092 + rel->r_offset);
3093
3094 r_type = ELF32_R_TYPE (rel->r_info);
3095
3096 /* Keep a simpler condition, for the sake of clarity. */
3097 if (globals->splt != NULL
3098 && hash != NULL
3099 && hash->root.plt.offset != (bfd_vma) -1)
3100 {
3101 use_plt = 1;
3102
3103 /* Note when dealing with PLT entries: the main PLT stub is in
3104 ARM mode, so if the branch is in Thumb mode, another
3105 Thumb->ARM stub will be inserted later just before the ARM
3106 PLT stub. We don't take this extra distance into account
3107 here, because if a long branch stub is needed, we'll add a
3108 Thumb->Arm one and branch directly to the ARM PLT entry
3109 because it avoids spreading offset corrections in several
3110 places. */
3111
3112 destination = (globals->splt->output_section->vma
3113 + globals->splt->output_offset
3114 + hash->root.plt.offset);
3115 st_type = STT_FUNC;
3116 }
3117
3118 branch_offset = (bfd_signed_vma)(destination - location);
3119
3120 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3121 {
3122 /* Handle cases where:
3123 - this call goes too far (different Thumb/Thumb2 max
3124 distance)
3125 - it's a Thumb->Arm call and blx is not available, or it's a
3126 Thumb->Arm branch (not bl). A stub is needed in this case,
3127 but only if this call is not through a PLT entry. Indeed,
3128 PLT stubs handle mode switching already.
3129 */
3130 if ((!thumb2
3131 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3132 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3133 || (thumb2
3134 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3135 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3136 || ((st_type != STT_ARM_TFUNC)
3137 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3138 || (r_type == R_ARM_THM_JUMP24))
3139 && !use_plt))
3140 {
3141 if (st_type == STT_ARM_TFUNC)
3142 {
3143 /* Thumb to thumb. */
3144 if (!thumb_only)
3145 {
3146 stub_type = (info->shared | globals->pic_veneer)
3147 /* PIC stubs. */
3148 ? ((globals->use_blx
3149 && (r_type ==R_ARM_THM_CALL))
3150 /* V5T and above. Stub starts with ARM code, so
3151 we must be able to switch mode before
3152 reaching it, which is only possible for 'bl'
3153 (ie R_ARM_THM_CALL relocation). */
3154 ? arm_stub_long_branch_any_thumb_pic
3155 /* On V4T, use Thumb code only. */
3156 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3157
3158 /* non-PIC stubs. */
3159 : ((globals->use_blx
3160 && (r_type ==R_ARM_THM_CALL))
3161 /* V5T and above. */
3162 ? arm_stub_long_branch_any_any
3163 /* V4T. */
3164 : arm_stub_long_branch_v4t_thumb_thumb);
3165 }
3166 else
3167 {
3168 stub_type = (info->shared | globals->pic_veneer)
3169 /* PIC stub. */
3170 ? arm_stub_long_branch_thumb_only_pic
3171 /* non-PIC stub. */
3172 : arm_stub_long_branch_thumb_only;
3173 }
3174 }
3175 else
3176 {
3177 /* Thumb to arm. */
3178 if (sym_sec != NULL
3179 && sym_sec->owner != NULL
3180 && !INTERWORK_FLAG (sym_sec->owner))
3181 {
3182 (*_bfd_error_handler)
3183 (_("%B(%s): warning: interworking not enabled.\n"
3184 " first occurrence: %B: Thumb call to ARM"),
3185 sym_sec->owner, input_bfd, name);
3186 }
3187
3188 stub_type = (info->shared | globals->pic_veneer)
3189 /* PIC stubs. */
3190 ? ((globals->use_blx
3191 && (r_type ==R_ARM_THM_CALL))
3192 /* V5T and above. */
3193 ? arm_stub_long_branch_any_arm_pic
3194 /* V4T PIC stub. */
3195 : arm_stub_long_branch_v4t_thumb_arm_pic)
3196
3197 /* non-PIC stubs. */
3198 : ((globals->use_blx
3199 && (r_type ==R_ARM_THM_CALL))
3200 /* V5T and above. */
3201 ? arm_stub_long_branch_any_any
3202 /* V4T. */
3203 : arm_stub_long_branch_v4t_thumb_arm);
3204
3205 /* Handle v4t short branches. */
3206 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3207 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3208 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3209 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3210 }
3211 }
3212 }
3213 else if (r_type == R_ARM_CALL
3214 || r_type == R_ARM_JUMP24
3215 || r_type == R_ARM_PLT32)
3216 {
3217 if (st_type == STT_ARM_TFUNC)
3218 {
3219 /* Arm to thumb. */
3220
3221 if (sym_sec != NULL
3222 && sym_sec->owner != NULL
3223 && !INTERWORK_FLAG (sym_sec->owner))
3224 {
3225 (*_bfd_error_handler)
3226 (_("%B(%s): warning: interworking not enabled.\n"
3227 " first occurrence: %B: ARM call to Thumb"),
3228 sym_sec->owner, input_bfd, name);
3229 }
3230
3231 /* We have an extra 2-bytes reach because of
3232 the mode change (bit 24 (H) of BLX encoding). */
3233 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3234 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3235 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3236 || (r_type == R_ARM_JUMP24)
3237 || (r_type == R_ARM_PLT32))
3238 {
3239 stub_type = (info->shared | globals->pic_veneer)
3240 /* PIC stubs. */
3241 ? ((globals->use_blx)
3242 /* V5T and above. */
3243 ? arm_stub_long_branch_any_thumb_pic
3244 /* V4T stub. */
3245 : arm_stub_long_branch_v4t_arm_thumb_pic)
3246
3247 /* non-PIC stubs. */
3248 : ((globals->use_blx)
3249 /* V5T and above. */
3250 ? arm_stub_long_branch_any_any
3251 /* V4T. */
3252 : arm_stub_long_branch_v4t_arm_thumb);
3253 }
3254 }
3255 else
3256 {
3257 /* Arm to arm. */
3258 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3259 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3260 {
3261 stub_type = (info->shared | globals->pic_veneer)
3262 /* PIC stubs. */
3263 ? arm_stub_long_branch_any_arm_pic
3264 /* non-PIC stubs. */
3265 : arm_stub_long_branch_any_any;
3266 }
3267 }
3268 }
3269
3270 /* If a stub is needed, record the actual destination type. */
3271 if (stub_type != arm_stub_none)
3272 *actual_st_type = st_type;
3273
3274 return stub_type;
3275}
3276
3277/* Build a name for an entry in the stub hash table. */
3278
3279static char *
3280elf32_arm_stub_name (const asection *input_section,
3281 const asection *sym_sec,
3282 const struct elf32_arm_link_hash_entry *hash,
3283 const Elf_Internal_Rela *rel,
3284 enum elf32_arm_stub_type stub_type)
3285{
3286 char *stub_name;
3287 bfd_size_type len;
3288
3289 if (hash)
3290 {
3291 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3292 stub_name = (char *) bfd_malloc (len);
3293 if (stub_name != NULL)
3294 sprintf (stub_name, "%08x_%s+%x_%d",
3295 input_section->id & 0xffffffff,
3296 hash->root.root.root.string,
3297 (int) rel->r_addend & 0xffffffff,
3298 (int) stub_type);
3299 }
3300 else
3301 {
3302 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3303 stub_name = (char *) bfd_malloc (len);
3304 if (stub_name != NULL)
3305 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3306 input_section->id & 0xffffffff,
3307 sym_sec->id & 0xffffffff,
3308 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3309 (int) rel->r_addend & 0xffffffff,
3310 (int) stub_type);
3311 }
3312
3313 return stub_name;
3314}
3315
3316/* Look up an entry in the stub hash. Stub entries are cached because
3317 creating the stub name takes a bit of time. */
3318
3319static struct elf32_arm_stub_hash_entry *
3320elf32_arm_get_stub_entry (const asection *input_section,
3321 const asection *sym_sec,
3322 struct elf_link_hash_entry *hash,
3323 const Elf_Internal_Rela *rel,
3324 struct elf32_arm_link_hash_table *htab,
3325 enum elf32_arm_stub_type stub_type)
3326{
3327 struct elf32_arm_stub_hash_entry *stub_entry;
3328 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3329 const asection *id_sec;
3330
3331 if ((input_section->flags & SEC_CODE) == 0)
3332 return NULL;
3333
3334 /* If this input section is part of a group of sections sharing one
3335 stub section, then use the id of the first section in the group.
3336 Stub names need to include a section id, as there may well be
3337 more than one stub used to reach say, printf, and we need to
3338 distinguish between them. */
3339 id_sec = htab->stub_group[input_section->id].link_sec;
3340
3341 if (h != NULL && h->stub_cache != NULL
3342 && h->stub_cache->h == h
3343 && h->stub_cache->id_sec == id_sec
3344 && h->stub_cache->stub_type == stub_type)
3345 {
3346 stub_entry = h->stub_cache;
3347 }
3348 else
3349 {
3350 char *stub_name;
3351
3352 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3353 if (stub_name == NULL)
3354 return NULL;
3355
3356 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3357 stub_name, FALSE, FALSE);
3358 if (h != NULL)
3359 h->stub_cache = stub_entry;
3360
3361 free (stub_name);
3362 }
3363
3364 return stub_entry;
3365}
3366
3367/* Find or create a stub section. Returns a pointer to the stub section, and
3368 the section to which the stub section will be attached (in *LINK_SEC_P).
3369 LINK_SEC_P may be NULL. */
3370
3371static asection *
3372elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3373 struct elf32_arm_link_hash_table *htab)
3374{
3375 asection *link_sec;
3376 asection *stub_sec;
3377
3378 link_sec = htab->stub_group[section->id].link_sec;
3379 stub_sec = htab->stub_group[section->id].stub_sec;
3380 if (stub_sec == NULL)
3381 {
3382 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3383 if (stub_sec == NULL)
3384 {
3385 size_t namelen;
3386 bfd_size_type len;
3387 char *s_name;
3388
3389 namelen = strlen (link_sec->name);
3390 len = namelen + sizeof (STUB_SUFFIX);
3391 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3392 if (s_name == NULL)
3393 return NULL;
3394
3395 memcpy (s_name, link_sec->name, namelen);
3396 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3397 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3398 if (stub_sec == NULL)
3399 return NULL;
3400 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3401 }
3402 htab->stub_group[section->id].stub_sec = stub_sec;
3403 }
3404
3405 if (link_sec_p)
3406 *link_sec_p = link_sec;
3407
3408 return stub_sec;
3409}
3410
3411/* Add a new stub entry to the stub hash. Not all fields of the new
3412 stub entry are initialised. */
3413
3414static struct elf32_arm_stub_hash_entry *
3415elf32_arm_add_stub (const char *stub_name,
3416 asection *section,
3417 struct elf32_arm_link_hash_table *htab)
3418{
3419 asection *link_sec;
3420 asection *stub_sec;
3421 struct elf32_arm_stub_hash_entry *stub_entry;
3422
3423 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3424 if (stub_sec == NULL)
3425 return NULL;
3426
3427 /* Enter this entry into the linker stub hash table. */
3428 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3429 TRUE, FALSE);
3430 if (stub_entry == NULL)
3431 {
3432 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3433 section->owner,
3434 stub_name);
3435 return NULL;
3436 }
3437
3438 stub_entry->stub_sec = stub_sec;
3439 stub_entry->stub_offset = 0;
3440 stub_entry->id_sec = link_sec;
3441
3442 return stub_entry;
3443}
3444
3445/* Store an Arm insn into an output section not processed by
3446 elf32_arm_write_section. */
3447
3448static void
3449put_arm_insn (struct elf32_arm_link_hash_table * htab,
3450 bfd * output_bfd, bfd_vma val, void * ptr)
3451{
3452 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3453 bfd_putl32 (val, ptr);
3454 else
3455 bfd_putb32 (val, ptr);
3456}
3457
3458/* Store a 16-bit Thumb insn into an output section not processed by
3459 elf32_arm_write_section. */
3460
3461static void
3462put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3463 bfd * output_bfd, bfd_vma val, void * ptr)
3464{
3465 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3466 bfd_putl16 (val, ptr);
3467 else
3468 bfd_putb16 (val, ptr);
3469}
3470
3471static bfd_reloc_status_type elf32_arm_final_link_relocate
3472 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3473 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3474 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3475
3476static unsigned int
3477arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
3478{
3479 switch (stub_type)
3480 {
3481 case arm_stub_a8_veneer_b_cond:
3482 case arm_stub_a8_veneer_b:
3483 case arm_stub_a8_veneer_bl:
3484 return 2;
3485
3486 case arm_stub_long_branch_any_any:
3487 case arm_stub_long_branch_v4t_arm_thumb:
3488 case arm_stub_long_branch_thumb_only:
3489 case arm_stub_long_branch_v4t_thumb_thumb:
3490 case arm_stub_long_branch_v4t_thumb_arm:
3491 case arm_stub_short_branch_v4t_thumb_arm:
3492 case arm_stub_long_branch_any_arm_pic:
3493 case arm_stub_long_branch_any_thumb_pic:
3494 case arm_stub_long_branch_v4t_thumb_thumb_pic:
3495 case arm_stub_long_branch_v4t_arm_thumb_pic:
3496 case arm_stub_long_branch_v4t_thumb_arm_pic:
3497 case arm_stub_long_branch_thumb_only_pic:
3498 case arm_stub_a8_veneer_blx:
3499 return 4;
3500
3501 default:
3502 abort (); /* Should be unreachable. */
3503 }
3504}
3505
3506static bfd_boolean
3507arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3508 void * in_arg)
3509{
3510#define MAXRELOCS 2
3511 struct elf32_arm_stub_hash_entry *stub_entry;
3512 struct elf32_arm_link_hash_table *globals;
3513 struct bfd_link_info *info;
3514 asection *stub_sec;
3515 bfd *stub_bfd;
3516 bfd_byte *loc;
3517 bfd_vma sym_value;
3518 int template_size;
3519 int size;
3520 const insn_sequence *template_sequence;
3521 int i;
3522 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3523 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3524 int nrelocs = 0;
3525
3526 /* Massage our args to the form they really have. */
3527 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3528 info = (struct bfd_link_info *) in_arg;
3529
3530 globals = elf32_arm_hash_table (info);
3531 if (globals == NULL)
3532 return FALSE;
3533
3534 stub_sec = stub_entry->stub_sec;
3535
3536 if ((globals->fix_cortex_a8 < 0)
3537 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
3538 /* We have to do less-strictly-aligned fixes last. */
3539 return TRUE;
3540
3541 /* Make a note of the offset within the stubs for this entry. */
3542 stub_entry->stub_offset = stub_sec->size;
3543 loc = stub_sec->contents + stub_entry->stub_offset;
3544
3545 stub_bfd = stub_sec->owner;
3546
3547 /* This is the address of the stub destination. */
3548 sym_value = (stub_entry->target_value
3549 + stub_entry->target_section->output_offset
3550 + stub_entry->target_section->output_section->vma);
3551
3552 template_sequence = stub_entry->stub_template;
3553 template_size = stub_entry->stub_template_size;
3554
3555 size = 0;
3556 for (i = 0; i < template_size; i++)
3557 {
3558 switch (template_sequence[i].type)
3559 {
3560 case THUMB16_TYPE:
3561 {
3562 bfd_vma data = (bfd_vma) template_sequence[i].data;
3563 if (template_sequence[i].reloc_addend != 0)
3564 {
3565 /* We've borrowed the reloc_addend field to mean we should
3566 insert a condition code into this (Thumb-1 branch)
3567 instruction. See THUMB16_BCOND_INSN. */
3568 BFD_ASSERT ((data & 0xff00) == 0xd000);
3569 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3570 }
3571 bfd_put_16 (stub_bfd, data, loc + size);
3572 size += 2;
3573 }
3574 break;
3575
3576 case THUMB32_TYPE:
3577 bfd_put_16 (stub_bfd,
3578 (template_sequence[i].data >> 16) & 0xffff,
3579 loc + size);
3580 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3581 loc + size + 2);
3582 if (template_sequence[i].r_type != R_ARM_NONE)
3583 {
3584 stub_reloc_idx[nrelocs] = i;
3585 stub_reloc_offset[nrelocs++] = size;
3586 }
3587 size += 4;
3588 break;
3589
3590 case ARM_TYPE:
3591 bfd_put_32 (stub_bfd, template_sequence[i].data,
3592 loc + size);
3593 /* Handle cases where the target is encoded within the
3594 instruction. */
3595 if (template_sequence[i].r_type == R_ARM_JUMP24)
3596 {
3597 stub_reloc_idx[nrelocs] = i;
3598 stub_reloc_offset[nrelocs++] = size;
3599 }
3600 size += 4;
3601 break;
3602
3603 case DATA_TYPE:
3604 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3605 stub_reloc_idx[nrelocs] = i;
3606 stub_reloc_offset[nrelocs++] = size;
3607 size += 4;
3608 break;
3609
3610 default:
3611 BFD_FAIL ();
3612 return FALSE;
3613 }
3614 }
3615
3616 stub_sec->size += size;
3617
3618 /* Stub size has already been computed in arm_size_one_stub. Check
3619 consistency. */
3620 BFD_ASSERT (size == stub_entry->stub_size);
3621
3622 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3623 if (stub_entry->st_type == STT_ARM_TFUNC)
3624 sym_value |= 1;
3625
3626 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3627 in each stub. */
3628 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3629
3630 for (i = 0; i < nrelocs; i++)
3631 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3632 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3633 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3634 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3635 {
3636 Elf_Internal_Rela rel;
3637 bfd_boolean unresolved_reloc;
3638 char *error_message;
3639 int sym_flags
3640 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3641 ? STT_ARM_TFUNC : 0;
3642 bfd_vma points_to = sym_value + stub_entry->target_addend;
3643
3644 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3645 rel.r_info = ELF32_R_INFO (0,
3646 template_sequence[stub_reloc_idx[i]].r_type);
3647 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3648
3649 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3650 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3651 template should refer back to the instruction after the original
3652 branch. */
3653 points_to = sym_value;
3654
3655 /* There may be unintended consequences if this is not true. */
3656 BFD_ASSERT (stub_entry->h == NULL);
3657
3658 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3659 properly. We should probably use this function unconditionally,
3660 rather than only for certain relocations listed in the enclosing
3661 conditional, for the sake of consistency. */
3662 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3663 (template_sequence[stub_reloc_idx[i]].r_type),
3664 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3665 points_to, info, stub_entry->target_section, "", sym_flags,
3666 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3667 &error_message);
3668 }
3669 else
3670 {
3671 Elf_Internal_Rela rel;
3672 bfd_boolean unresolved_reloc;
3673 char *error_message;
3674 bfd_vma points_to = sym_value + stub_entry->target_addend
3675 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3676
3677 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3678 rel.r_info = ELF32_R_INFO (0,
3679 template_sequence[stub_reloc_idx[i]].r_type);
3680 rel.r_addend = 0;
3681
3682 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3683 (template_sequence[stub_reloc_idx[i]].r_type),
3684 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3685 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3686 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3687 &error_message);
3688 }
3689
3690 return TRUE;
3691#undef MAXRELOCS
3692}
3693
3694/* Calculate the template, template size and instruction size for a stub.
3695 Return value is the instruction size. */
3696
3697static unsigned int
3698find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3699 const insn_sequence **stub_template,
3700 int *stub_template_size)
3701{
3702 const insn_sequence *template_sequence = NULL;
3703 int template_size = 0, i;
3704 unsigned int size;
3705
3706 template_sequence = stub_definitions[stub_type].template_sequence;
3707 if (stub_template)
3708 *stub_template = template_sequence;
3709
3710 template_size = stub_definitions[stub_type].template_size;
3711 if (stub_template_size)
3712 *stub_template_size = template_size;
3713
3714 size = 0;
3715 for (i = 0; i < template_size; i++)
3716 {
3717 switch (template_sequence[i].type)
3718 {
3719 case THUMB16_TYPE:
3720 size += 2;
3721 break;
3722
3723 case ARM_TYPE:
3724 case THUMB32_TYPE:
3725 case DATA_TYPE:
3726 size += 4;
3727 break;
3728
3729 default:
3730 BFD_FAIL ();
3731 return 0;
3732 }
3733 }
3734
3735 return size;
3736}
3737
3738/* As above, but don't actually build the stub. Just bump offset so
3739 we know stub section sizes. */
3740
3741static bfd_boolean
3742arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3743 void *in_arg ATTRIBUTE_UNUSED)
3744{
3745 struct elf32_arm_stub_hash_entry *stub_entry;
3746 const insn_sequence *template_sequence;
3747 int template_size, size;
3748
3749 /* Massage our args to the form they really have. */
3750 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3751
3752 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3753 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3754
3755 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3756 &template_size);
3757
3758 stub_entry->stub_size = size;
3759 stub_entry->stub_template = template_sequence;
3760 stub_entry->stub_template_size = template_size;
3761
3762 size = (size + 7) & ~7;
3763 stub_entry->stub_sec->size += size;
3764
3765 return TRUE;
3766}
3767
3768/* External entry points for sizing and building linker stubs. */
3769
3770/* Set up various things so that we can make a list of input sections
3771 for each output section included in the link. Returns -1 on error,
3772 0 when no stubs will be needed, and 1 on success. */
3773
3774int
3775elf32_arm_setup_section_lists (bfd *output_bfd,
3776 struct bfd_link_info *info)
3777{
3778 bfd *input_bfd;
3779 unsigned int bfd_count;
3780 int top_id, top_index;
3781 asection *section;
3782 asection **input_list, **list;
3783 bfd_size_type amt;
3784 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3785
3786 if (htab == NULL)
3787 return 0;
3788 if (! is_elf_hash_table (htab))
3789 return 0;
3790
3791 /* Count the number of input BFDs and find the top input section id. */
3792 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3793 input_bfd != NULL;
3794 input_bfd = input_bfd->link_next)
3795 {
3796 bfd_count += 1;
3797 for (section = input_bfd->sections;
3798 section != NULL;
3799 section = section->next)
3800 {
3801 if (top_id < section->id)
3802 top_id = section->id;
3803 }
3804 }
3805 htab->bfd_count = bfd_count;
3806
3807 amt = sizeof (struct map_stub) * (top_id + 1);
3808 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3809 if (htab->stub_group == NULL)
3810 return -1;
3811 htab->top_id = top_id;
3812
3813 /* We can't use output_bfd->section_count here to find the top output
3814 section index as some sections may have been removed, and
3815 _bfd_strip_section_from_output doesn't renumber the indices. */
3816 for (section = output_bfd->sections, top_index = 0;
3817 section != NULL;
3818 section = section->next)
3819 {
3820 if (top_index < section->index)
3821 top_index = section->index;
3822 }
3823
3824 htab->top_index = top_index;
3825 amt = sizeof (asection *) * (top_index + 1);
3826 input_list = (asection **) bfd_malloc (amt);
3827 htab->input_list = input_list;
3828 if (input_list == NULL)
3829 return -1;
3830
3831 /* For sections we aren't interested in, mark their entries with a
3832 value we can check later. */
3833 list = input_list + top_index;
3834 do
3835 *list = bfd_abs_section_ptr;
3836 while (list-- != input_list);
3837
3838 for (section = output_bfd->sections;
3839 section != NULL;
3840 section = section->next)
3841 {
3842 if ((section->flags & SEC_CODE) != 0)
3843 input_list[section->index] = NULL;
3844 }
3845
3846 return 1;
3847}
3848
3849/* The linker repeatedly calls this function for each input section,
3850 in the order that input sections are linked into output sections.
3851 Build lists of input sections to determine groupings between which
3852 we may insert linker stubs. */
3853
3854void
3855elf32_arm_next_input_section (struct bfd_link_info *info,
3856 asection *isec)
3857{
3858 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3859
3860 if (htab == NULL)
3861 return;
3862
3863 if (isec->output_section->index <= htab->top_index)
3864 {
3865 asection **list = htab->input_list + isec->output_section->index;
3866
3867 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3868 {
3869 /* Steal the link_sec pointer for our list. */
3870#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3871 /* This happens to make the list in reverse order,
3872 which we reverse later. */
3873 PREV_SEC (isec) = *list;
3874 *list = isec;
3875 }
3876 }
3877}
3878
3879/* See whether we can group stub sections together. Grouping stub
3880 sections may result in fewer stubs. More importantly, we need to
3881 put all .init* and .fini* stubs at the end of the .init or
3882 .fini output sections respectively, because glibc splits the
3883 _init and _fini functions into multiple parts. Putting a stub in
3884 the middle of a function is not a good idea. */
3885
3886static void
3887group_sections (struct elf32_arm_link_hash_table *htab,
3888 bfd_size_type stub_group_size,
3889 bfd_boolean stubs_always_after_branch)
3890{
3891 asection **list = htab->input_list;
3892
3893 do
3894 {
3895 asection *tail = *list;
3896 asection *head;
3897
3898 if (tail == bfd_abs_section_ptr)
3899 continue;
3900
3901 /* Reverse the list: we must avoid placing stubs at the
3902 beginning of the section because the beginning of the text
3903 section may be required for an interrupt vector in bare metal
3904 code. */
3905#define NEXT_SEC PREV_SEC
3906 head = NULL;
3907 while (tail != NULL)
3908 {
3909 /* Pop from tail. */
3910 asection *item = tail;
3911 tail = PREV_SEC (item);
3912
3913 /* Push on head. */
3914 NEXT_SEC (item) = head;
3915 head = item;
3916 }
3917
3918 while (head != NULL)
3919 {
3920 asection *curr;
3921 asection *next;
3922 bfd_vma stub_group_start = head->output_offset;
3923 bfd_vma end_of_next;
3924
3925 curr = head;
3926 while (NEXT_SEC (curr) != NULL)
3927 {
3928 next = NEXT_SEC (curr);
3929 end_of_next = next->output_offset + next->size;
3930 if (end_of_next - stub_group_start >= stub_group_size)
3931 /* End of NEXT is too far from start, so stop. */
3932 break;
3933 /* Add NEXT to the group. */
3934 curr = next;
3935 }
3936
3937 /* OK, the size from the start to the start of CURR is less
3938 than stub_group_size and thus can be handled by one stub
3939 section. (Or the head section is itself larger than
3940 stub_group_size, in which case we may be toast.)
3941 We should really be keeping track of the total size of
3942 stubs added here, as stubs contribute to the final output
3943 section size. */
3944 do
3945 {
3946 next = NEXT_SEC (head);
3947 /* Set up this stub group. */
3948 htab->stub_group[head->id].link_sec = curr;
3949 }
3950 while (head != curr && (head = next) != NULL);
3951
3952 /* But wait, there's more! Input sections up to stub_group_size
3953 bytes after the stub section can be handled by it too. */
3954 if (!stubs_always_after_branch)
3955 {
3956 stub_group_start = curr->output_offset + curr->size;
3957
3958 while (next != NULL)
3959 {
3960 end_of_next = next->output_offset + next->size;
3961 if (end_of_next - stub_group_start >= stub_group_size)
3962 /* End of NEXT is too far from stubs, so stop. */
3963 break;
3964 /* Add NEXT to the stub group. */
3965 head = next;
3966 next = NEXT_SEC (head);
3967 htab->stub_group[head->id].link_sec = curr;
3968 }
3969 }
3970 head = next;
3971 }
3972 }
3973 while (list++ != htab->input_list + htab->top_index);
3974
3975 free (htab->input_list);
3976#undef PREV_SEC
3977#undef NEXT_SEC
3978}
3979
3980/* Comparison function for sorting/searching relocations relating to Cortex-A8
3981 erratum fix. */
3982
3983static int
3984a8_reloc_compare (const void *a, const void *b)
3985{
3986 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3987 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3988
3989 if (ra->from < rb->from)
3990 return -1;
3991 else if (ra->from > rb->from)
3992 return 1;
3993 else
3994 return 0;
3995}
3996
3997static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3998 const char *, char **);
3999
4000/* Helper function to scan code for sequences which might trigger the Cortex-A8
4001 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4002 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4003 otherwise. */
4004
4005static bfd_boolean
4006cortex_a8_erratum_scan (bfd *input_bfd,
4007 struct bfd_link_info *info,
4008 struct a8_erratum_fix **a8_fixes_p,
4009 unsigned int *num_a8_fixes_p,
4010 unsigned int *a8_fix_table_size_p,
4011 struct a8_erratum_reloc *a8_relocs,
4012 unsigned int num_a8_relocs,
4013 unsigned prev_num_a8_fixes,
4014 bfd_boolean *stub_changed_p)
4015{
4016 asection *section;
4017 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4018 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4019 unsigned int num_a8_fixes = *num_a8_fixes_p;
4020 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4021
4022 if (htab == NULL)
4023 return FALSE;
4024
4025 for (section = input_bfd->sections;
4026 section != NULL;
4027 section = section->next)
4028 {
4029 bfd_byte *contents = NULL;
4030 struct _arm_elf_section_data *sec_data;
4031 unsigned int span;
4032 bfd_vma base_vma;
4033
4034 if (elf_section_type (section) != SHT_PROGBITS
4035 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4036 || (section->flags & SEC_EXCLUDE) != 0
4037 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4038 || (section->output_section == bfd_abs_section_ptr))
4039 continue;
4040
4041 base_vma = section->output_section->vma + section->output_offset;
4042
4043 if (elf_section_data (section)->this_hdr.contents != NULL)
4044 contents = elf_section_data (section)->this_hdr.contents;
4045 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4046 return TRUE;
4047
4048 sec_data = elf32_arm_section_data (section);
4049
4050 for (span = 0; span < sec_data->mapcount; span++)
4051 {
4052 unsigned int span_start = sec_data->map[span].vma;
4053 unsigned int span_end = (span == sec_data->mapcount - 1)
4054 ? section->size : sec_data->map[span + 1].vma;
4055 unsigned int i;
4056 char span_type = sec_data->map[span].type;
4057 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4058
4059 if (span_type != 't')
4060 continue;
4061
4062 /* Span is entirely within a single 4KB region: skip scanning. */
4063 if (((base_vma + span_start) & ~0xfff)
4064 == ((base_vma + span_end) & ~0xfff))
4065 continue;
4066
4067 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4068
4069 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4070 * The branch target is in the same 4KB region as the
4071 first half of the branch.
4072 * The instruction before the branch is a 32-bit
4073 length non-branch instruction. */
4074 for (i = span_start; i < span_end;)
4075 {
4076 unsigned int insn = bfd_getl16 (&contents[i]);
4077 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4078 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4079
4080 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4081 insn_32bit = TRUE;
4082
4083 if (insn_32bit)
4084 {
4085 /* Load the rest of the insn (in manual-friendly order). */
4086 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4087
4088 /* Encoding T4: B<c>.W. */
4089 is_b = (insn & 0xf800d000) == 0xf0009000;
4090 /* Encoding T1: BL<c>.W. */
4091 is_bl = (insn & 0xf800d000) == 0xf000d000;
4092 /* Encoding T2: BLX<c>.W. */
4093 is_blx = (insn & 0xf800d000) == 0xf000c000;
4094 /* Encoding T3: B<c>.W (not permitted in IT block). */
4095 is_bcc = (insn & 0xf800d000) == 0xf0008000
4096 && (insn & 0x07f00000) != 0x03800000;
4097 }
4098
4099 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4100
4101 if (((base_vma + i) & 0xfff) == 0xffe
4102 && insn_32bit
4103 && is_32bit_branch
4104 && last_was_32bit
4105 && ! last_was_branch)
4106 {
4107 bfd_signed_vma offset = 0;
4108 bfd_boolean force_target_arm = FALSE;
4109 bfd_boolean force_target_thumb = FALSE;
4110 bfd_vma target;
4111 enum elf32_arm_stub_type stub_type = arm_stub_none;
4112 struct a8_erratum_reloc key, *found;
4113
4114 key.from = base_vma + i;
4115 found = (struct a8_erratum_reloc *)
4116 bsearch (&key, a8_relocs, num_a8_relocs,
4117 sizeof (struct a8_erratum_reloc),
4118 &a8_reloc_compare);
4119
4120 if (found)
4121 {
4122 char *error_message = NULL;
4123 struct elf_link_hash_entry *entry;
4124 bfd_boolean use_plt = FALSE;
4125
4126 /* We don't care about the error returned from this
4127 function, only if there is glue or not. */
4128 entry = find_thumb_glue (info, found->sym_name,
4129 &error_message);
4130
4131 if (entry)
4132 found->non_a8_stub = TRUE;
4133
4134 /* Keep a simpler condition, for the sake of clarity. */
4135 if (htab->splt != NULL && found->hash != NULL
4136 && found->hash->root.plt.offset != (bfd_vma) -1)
4137 use_plt = TRUE;
4138
4139 if (found->r_type == R_ARM_THM_CALL)
4140 {
4141 if (found->st_type != STT_ARM_TFUNC || use_plt)
4142 force_target_arm = TRUE;
4143 else
4144 force_target_thumb = TRUE;
4145 }
4146 }
4147
4148 /* Check if we have an offending branch instruction. */
4149
4150 if (found && found->non_a8_stub)
4151 /* We've already made a stub for this instruction, e.g.
4152 it's a long branch or a Thumb->ARM stub. Assume that
4153 stub will suffice to work around the A8 erratum (see
4154 setting of always_after_branch above). */
4155 ;
4156 else if (is_bcc)
4157 {
4158 offset = (insn & 0x7ff) << 1;
4159 offset |= (insn & 0x3f0000) >> 4;
4160 offset |= (insn & 0x2000) ? 0x40000 : 0;
4161 offset |= (insn & 0x800) ? 0x80000 : 0;
4162 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4163 if (offset & 0x100000)
4164 offset |= ~ ((bfd_signed_vma) 0xfffff);
4165 stub_type = arm_stub_a8_veneer_b_cond;
4166 }
4167 else if (is_b || is_bl || is_blx)
4168 {
4169 int s = (insn & 0x4000000) != 0;
4170 int j1 = (insn & 0x2000) != 0;
4171 int j2 = (insn & 0x800) != 0;
4172 int i1 = !(j1 ^ s);
4173 int i2 = !(j2 ^ s);
4174
4175 offset = (insn & 0x7ff) << 1;
4176 offset |= (insn & 0x3ff0000) >> 4;
4177 offset |= i2 << 22;
4178 offset |= i1 << 23;
4179 offset |= s << 24;
4180 if (offset & 0x1000000)
4181 offset |= ~ ((bfd_signed_vma) 0xffffff);
4182
4183 if (is_blx)
4184 offset &= ~ ((bfd_signed_vma) 3);
4185
4186 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4187 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4188 }
4189
4190 if (stub_type != arm_stub_none)
4191 {
4192 bfd_vma pc_for_insn = base_vma + i + 4;
4193
4194 /* The original instruction is a BL, but the target is
4195 an ARM instruction. If we were not making a stub,
4196 the BL would have been converted to a BLX. Use the
4197 BLX stub instead in that case. */
4198 if (htab->use_blx && force_target_arm
4199 && stub_type == arm_stub_a8_veneer_bl)
4200 {
4201 stub_type = arm_stub_a8_veneer_blx;
4202 is_blx = TRUE;
4203 is_bl = FALSE;
4204 }
4205 /* Conversely, if the original instruction was
4206 BLX but the target is Thumb mode, use the BL
4207 stub. */
4208 else if (force_target_thumb
4209 && stub_type == arm_stub_a8_veneer_blx)
4210 {
4211 stub_type = arm_stub_a8_veneer_bl;
4212 is_blx = FALSE;
4213 is_bl = TRUE;
4214 }
4215
4216 if (is_blx)
4217 pc_for_insn &= ~ ((bfd_vma) 3);
4218
4219 /* If we found a relocation, use the proper destination,
4220 not the offset in the (unrelocated) instruction.
4221 Note this is always done if we switched the stub type
4222 above. */
4223 if (found)
4224 offset =
4225 (bfd_signed_vma) (found->destination - pc_for_insn);
4226
4227 target = pc_for_insn + offset;
4228
4229 /* The BLX stub is ARM-mode code. Adjust the offset to
4230 take the different PC value (+8 instead of +4) into
4231 account. */
4232 if (stub_type == arm_stub_a8_veneer_blx)
4233 offset += 4;
4234
4235 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4236 {
4237 char *stub_name = NULL;
4238
4239 if (num_a8_fixes == a8_fix_table_size)
4240 {
4241 a8_fix_table_size *= 2;
4242 a8_fixes = (struct a8_erratum_fix *)
4243 bfd_realloc (a8_fixes,
4244 sizeof (struct a8_erratum_fix)
4245 * a8_fix_table_size);
4246 }
4247
4248 if (num_a8_fixes < prev_num_a8_fixes)
4249 {
4250 /* If we're doing a subsequent scan,
4251 check if we've found the same fix as
4252 before, and try and reuse the stub
4253 name. */
4254 stub_name = a8_fixes[num_a8_fixes].stub_name;
4255 if ((a8_fixes[num_a8_fixes].section != section)
4256 || (a8_fixes[num_a8_fixes].offset != i))
4257 {
4258 free (stub_name);
4259 stub_name = NULL;
4260 *stub_changed_p = TRUE;
4261 }
4262 }
4263
4264 if (!stub_name)
4265 {
4266 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4267 if (stub_name != NULL)
4268 sprintf (stub_name, "%x:%x", section->id, i);
4269 }
4270
4271 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4272 a8_fixes[num_a8_fixes].section = section;
4273 a8_fixes[num_a8_fixes].offset = i;
4274 a8_fixes[num_a8_fixes].addend = offset;
4275 a8_fixes[num_a8_fixes].orig_insn = insn;
4276 a8_fixes[num_a8_fixes].stub_name = stub_name;
4277 a8_fixes[num_a8_fixes].stub_type = stub_type;
4278 a8_fixes[num_a8_fixes].st_type =
4279 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4280
4281 num_a8_fixes++;
4282 }
4283 }
4284 }
4285
4286 i += insn_32bit ? 4 : 2;
4287 last_was_32bit = insn_32bit;
4288 last_was_branch = is_32bit_branch;
4289 }
4290 }
4291
4292 if (elf_section_data (section)->this_hdr.contents == NULL)
4293 free (contents);
4294 }
4295
4296 *a8_fixes_p = a8_fixes;
4297 *num_a8_fixes_p = num_a8_fixes;
4298 *a8_fix_table_size_p = a8_fix_table_size;
4299
4300 return FALSE;
4301}
4302
4303/* Determine and set the size of the stub section for a final link.
4304
4305 The basic idea here is to examine all the relocations looking for
4306 PC-relative calls to a target that is unreachable with a "bl"
4307 instruction. */
4308
4309bfd_boolean
4310elf32_arm_size_stubs (bfd *output_bfd,
4311 bfd *stub_bfd,
4312 struct bfd_link_info *info,
4313 bfd_signed_vma group_size,
4314 asection * (*add_stub_section) (const char *, asection *),
4315 void (*layout_sections_again) (void))
4316{
4317 bfd_size_type stub_group_size;
4318 bfd_boolean stubs_always_after_branch;
4319 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4320 struct a8_erratum_fix *a8_fixes = NULL;
4321 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4322 struct a8_erratum_reloc *a8_relocs = NULL;
4323 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4324
4325 if (htab == NULL)
4326 return FALSE;
4327
4328 if (htab->fix_cortex_a8)
4329 {
4330 a8_fixes = (struct a8_erratum_fix *)
4331 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4332 a8_relocs = (struct a8_erratum_reloc *)
4333 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4334 }
4335
4336 /* Propagate mach to stub bfd, because it may not have been
4337 finalized when we created stub_bfd. */
4338 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4339 bfd_get_mach (output_bfd));
4340
4341 /* Stash our params away. */
4342 htab->stub_bfd = stub_bfd;
4343 htab->add_stub_section = add_stub_section;
4344 htab->layout_sections_again = layout_sections_again;
4345 stubs_always_after_branch = group_size < 0;
4346
4347 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4348 as the first half of a 32-bit branch straddling two 4K pages. This is a
4349 crude way of enforcing that. */
4350 if (htab->fix_cortex_a8)
4351 stubs_always_after_branch = 1;
4352
4353 if (group_size < 0)
4354 stub_group_size = -group_size;
4355 else
4356 stub_group_size = group_size;
4357
4358 if (stub_group_size == 1)
4359 {
4360 /* Default values. */
4361 /* Thumb branch range is +-4MB has to be used as the default
4362 maximum size (a given section can contain both ARM and Thumb
4363 code, so the worst case has to be taken into account).
4364
4365 This value is 24K less than that, which allows for 2025
4366 12-byte stubs. If we exceed that, then we will fail to link.
4367 The user will have to relink with an explicit group size
4368 option. */
4369 stub_group_size = 4170000;
4370 }
4371
4372 group_sections (htab, stub_group_size, stubs_always_after_branch);
4373
4374 /* If we're applying the cortex A8 fix, we need to determine the
4375 program header size now, because we cannot change it later --
4376 that could alter section placements. Notice the A8 erratum fix
4377 ends up requiring the section addresses to remain unchanged
4378 modulo the page size. That's something we cannot represent
4379 inside BFD, and we don't want to force the section alignment to
4380 be the page size. */
4381 if (htab->fix_cortex_a8)
4382 (*htab->layout_sections_again) ();
4383
4384 while (1)
4385 {
4386 bfd *input_bfd;
4387 unsigned int bfd_indx;
4388 asection *stub_sec;
4389 bfd_boolean stub_changed = FALSE;
4390 unsigned prev_num_a8_fixes = num_a8_fixes;
4391
4392 num_a8_fixes = 0;
4393 for (input_bfd = info->input_bfds, bfd_indx = 0;
4394 input_bfd != NULL;
4395 input_bfd = input_bfd->link_next, bfd_indx++)
4396 {
4397 Elf_Internal_Shdr *symtab_hdr;
4398 asection *section;
4399 Elf_Internal_Sym *local_syms = NULL;
4400
4401 num_a8_relocs = 0;
4402
4403 /* We'll need the symbol table in a second. */
4404 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4405 if (symtab_hdr->sh_info == 0)
4406 continue;
4407
4408 /* Walk over each section attached to the input bfd. */
4409 for (section = input_bfd->sections;
4410 section != NULL;
4411 section = section->next)
4412 {
4413 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4414
4415 /* If there aren't any relocs, then there's nothing more
4416 to do. */
4417 if ((section->flags & SEC_RELOC) == 0
4418 || section->reloc_count == 0
4419 || (section->flags & SEC_CODE) == 0)
4420 continue;
4421
4422 /* If this section is a link-once section that will be
4423 discarded, then don't create any stubs. */
4424 if (section->output_section == NULL
4425 || section->output_section->owner != output_bfd)
4426 continue;
4427
4428 /* Get the relocs. */
4429 internal_relocs
4430 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4431 NULL, info->keep_memory);
4432 if (internal_relocs == NULL)
4433 goto error_ret_free_local;
4434
4435 /* Now examine each relocation. */
4436 irela = internal_relocs;
4437 irelaend = irela + section->reloc_count;
4438 for (; irela < irelaend; irela++)
4439 {
4440 unsigned int r_type, r_indx;
4441 enum elf32_arm_stub_type stub_type;
4442 struct elf32_arm_stub_hash_entry *stub_entry;
4443 asection *sym_sec;
4444 bfd_vma sym_value;
4445 bfd_vma destination;
4446 struct elf32_arm_link_hash_entry *hash;
4447 const char *sym_name;
4448 char *stub_name;
4449 const asection *id_sec;
4450 int st_type;
4451 bfd_boolean created_stub = FALSE;
4452
4453 r_type = ELF32_R_TYPE (irela->r_info);
4454 r_indx = ELF32_R_SYM (irela->r_info);
4455
4456 if (r_type >= (unsigned int) R_ARM_max)
4457 {
4458 bfd_set_error (bfd_error_bad_value);
4459 error_ret_free_internal:
4460 if (elf_section_data (section)->relocs == NULL)
4461 free (internal_relocs);
4462 goto error_ret_free_local;
4463 }
4464
4465 /* Only look for stubs on branch instructions. */
4466 if ((r_type != (unsigned int) R_ARM_CALL)
4467 && (r_type != (unsigned int) R_ARM_THM_CALL)
4468 && (r_type != (unsigned int) R_ARM_JUMP24)
4469 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4470 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4471 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4472 && (r_type != (unsigned int) R_ARM_PLT32))
4473 continue;
4474
4475 /* Now determine the call target, its name, value,
4476 section. */
4477 sym_sec = NULL;
4478 sym_value = 0;
4479 destination = 0;
4480 hash = NULL;
4481 sym_name = NULL;
4482 if (r_indx < symtab_hdr->sh_info)
4483 {
4484 /* It's a local symbol. */
4485 Elf_Internal_Sym *sym;
4486
4487 if (local_syms == NULL)
4488 {
4489 local_syms
4490 = (Elf_Internal_Sym *) symtab_hdr->contents;
4491 if (local_syms == NULL)
4492 local_syms
4493 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4494 symtab_hdr->sh_info, 0,
4495 NULL, NULL, NULL);
4496 if (local_syms == NULL)
4497 goto error_ret_free_internal;
4498 }
4499
4500 sym = local_syms + r_indx;
4501 if (sym->st_shndx == SHN_UNDEF)
4502 sym_sec = bfd_und_section_ptr;
4503 else if (sym->st_shndx == SHN_ABS)
4504 sym_sec = bfd_abs_section_ptr;
4505 else if (sym->st_shndx == SHN_COMMON)
4506 sym_sec = bfd_com_section_ptr;
4507 else
4508 sym_sec =
4509 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
4510
4511 if (!sym_sec)
4512 /* This is an undefined symbol. It can never
4513 be resolved. */
4514 continue;
4515
4516 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4517 sym_value = sym->st_value;
4518 destination = (sym_value + irela->r_addend
4519 + sym_sec->output_offset
4520 + sym_sec->output_section->vma);
4521 st_type = ELF_ST_TYPE (sym->st_info);
4522 sym_name
4523 = bfd_elf_string_from_elf_section (input_bfd,
4524 symtab_hdr->sh_link,
4525 sym->st_name);
4526 }
4527 else
4528 {
4529 /* It's an external symbol. */
4530 int e_indx;
4531
4532 e_indx = r_indx - symtab_hdr->sh_info;
4533 hash = ((struct elf32_arm_link_hash_entry *)
4534 elf_sym_hashes (input_bfd)[e_indx]);
4535
4536 while (hash->root.root.type == bfd_link_hash_indirect
4537 || hash->root.root.type == bfd_link_hash_warning)
4538 hash = ((struct elf32_arm_link_hash_entry *)
4539 hash->root.root.u.i.link);
4540
4541 if (hash->root.root.type == bfd_link_hash_defined
4542 || hash->root.root.type == bfd_link_hash_defweak)
4543 {
4544 sym_sec = hash->root.root.u.def.section;
4545 sym_value = hash->root.root.u.def.value;
4546
4547 struct elf32_arm_link_hash_table *globals =
4548 elf32_arm_hash_table (info);
4549
4550 /* For a destination in a shared library,
4551 use the PLT stub as target address to
4552 decide whether a branch stub is
4553 needed. */
4554 if (globals != NULL
4555 && globals->splt != NULL
4556 && hash != NULL
4557 && hash->root.plt.offset != (bfd_vma) -1)
4558 {
4559 sym_sec = globals->splt;
4560 sym_value = hash->root.plt.offset;
4561 if (sym_sec->output_section != NULL)
4562 destination = (sym_value
4563 + sym_sec->output_offset
4564 + sym_sec->output_section->vma);
4565 }
4566 else if (sym_sec->output_section != NULL)
4567 destination = (sym_value + irela->r_addend
4568 + sym_sec->output_offset
4569 + sym_sec->output_section->vma);
4570 }
4571 else if ((hash->root.root.type == bfd_link_hash_undefined)
4572 || (hash->root.root.type == bfd_link_hash_undefweak))
4573 {
4574 /* For a shared library, use the PLT stub as
4575 target address to decide whether a long
4576 branch stub is needed.
4577 For absolute code, they cannot be handled. */
4578 struct elf32_arm_link_hash_table *globals =
4579 elf32_arm_hash_table (info);
4580
4581 if (globals != NULL
4582 && globals->splt != NULL
4583 && hash != NULL
4584 && hash->root.plt.offset != (bfd_vma) -1)
4585 {
4586 sym_sec = globals->splt;
4587 sym_value = hash->root.plt.offset;
4588 if (sym_sec->output_section != NULL)
4589 destination = (sym_value
4590 + sym_sec->output_offset
4591 + sym_sec->output_section->vma);
4592 }
4593 else
4594 continue;
4595 }
4596 else
4597 {
4598 bfd_set_error (bfd_error_bad_value);
4599 goto error_ret_free_internal;
4600 }
4601 st_type = ELF_ST_TYPE (hash->root.type);
4602 sym_name = hash->root.root.root.string;
4603 }
4604
4605 do
4606 {
4607 /* Determine what (if any) linker stub is needed. */
4608 stub_type = arm_type_of_stub (info, section, irela,
4609 &st_type, hash,
4610 destination, sym_sec,
4611 input_bfd, sym_name);
4612 if (stub_type == arm_stub_none)
4613 break;
4614
4615 /* Support for grouping stub sections. */
4616 id_sec = htab->stub_group[section->id].link_sec;
4617
4618 /* Get the name of this stub. */
4619 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4620 irela, stub_type);
4621 if (!stub_name)
4622 goto error_ret_free_internal;
4623
4624 /* We've either created a stub for this reloc already,
4625 or we are about to. */
4626 created_stub = TRUE;
4627
4628 stub_entry = arm_stub_hash_lookup
4629 (&htab->stub_hash_table, stub_name,
4630 FALSE, FALSE);
4631 if (stub_entry != NULL)
4632 {
4633 /* The proper stub has already been created. */
4634 free (stub_name);
4635 stub_entry->target_value = sym_value;
4636 break;
4637 }
4638
4639 stub_entry = elf32_arm_add_stub (stub_name, section,
4640 htab);
4641 if (stub_entry == NULL)
4642 {
4643 free (stub_name);
4644 goto error_ret_free_internal;
4645 }
4646
4647 stub_entry->target_value = sym_value;
4648 stub_entry->target_section = sym_sec;
4649 stub_entry->stub_type = stub_type;
4650 stub_entry->h = hash;
4651 stub_entry->st_type = st_type;
4652
4653 if (sym_name == NULL)
4654 sym_name = "unnamed";
4655 stub_entry->output_name = (char *)
4656 bfd_alloc (htab->stub_bfd,
4657 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4658 + strlen (sym_name));
4659 if (stub_entry->output_name == NULL)
4660 {
4661 free (stub_name);
4662 goto error_ret_free_internal;
4663 }
4664
4665 /* For historical reasons, use the existing names for
4666 ARM-to-Thumb and Thumb-to-ARM stubs. */
4667 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4668 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4669 && st_type != STT_ARM_TFUNC)
4670 sprintf (stub_entry->output_name,
4671 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4672 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4673 || (r_type == (unsigned int) R_ARM_JUMP24))
4674 && st_type == STT_ARM_TFUNC)
4675 sprintf (stub_entry->output_name,
4676 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4677 else
4678 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4679 sym_name);
4680
4681 stub_changed = TRUE;
4682 }
4683 while (0);
4684
4685 /* Look for relocations which might trigger Cortex-A8
4686 erratum. */
4687 if (htab->fix_cortex_a8
4688 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4689 || r_type == (unsigned int) R_ARM_THM_JUMP19
4690 || r_type == (unsigned int) R_ARM_THM_CALL
4691 || r_type == (unsigned int) R_ARM_THM_XPC22))
4692 {
4693 bfd_vma from = section->output_section->vma
4694 + section->output_offset
4695 + irela->r_offset;
4696
4697 if ((from & 0xfff) == 0xffe)
4698 {
4699 /* Found a candidate. Note we haven't checked the
4700 destination is within 4K here: if we do so (and
4701 don't create an entry in a8_relocs) we can't tell
4702 that a branch should have been relocated when
4703 scanning later. */
4704 if (num_a8_relocs == a8_reloc_table_size)
4705 {
4706 a8_reloc_table_size *= 2;
4707 a8_relocs = (struct a8_erratum_reloc *)
4708 bfd_realloc (a8_relocs,
4709 sizeof (struct a8_erratum_reloc)
4710 * a8_reloc_table_size);
4711 }
4712
4713 a8_relocs[num_a8_relocs].from = from;
4714 a8_relocs[num_a8_relocs].destination = destination;
4715 a8_relocs[num_a8_relocs].r_type = r_type;
4716 a8_relocs[num_a8_relocs].st_type = st_type;
4717 a8_relocs[num_a8_relocs].sym_name = sym_name;
4718 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4719 a8_relocs[num_a8_relocs].hash = hash;
4720
4721 num_a8_relocs++;
4722 }
4723 }
4724 }
4725
4726 /* We're done with the internal relocs, free them. */
4727 if (elf_section_data (section)->relocs == NULL)
4728 free (internal_relocs);
4729 }
4730
4731 if (htab->fix_cortex_a8)
4732 {
4733 /* Sort relocs which might apply to Cortex-A8 erratum. */
4734 qsort (a8_relocs, num_a8_relocs,
4735 sizeof (struct a8_erratum_reloc),
4736 &a8_reloc_compare);
4737
4738 /* Scan for branches which might trigger Cortex-A8 erratum. */
4739 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4740 &num_a8_fixes, &a8_fix_table_size,
4741 a8_relocs, num_a8_relocs,
4742 prev_num_a8_fixes, &stub_changed)
4743 != 0)
4744 goto error_ret_free_local;
4745 }
4746 }
4747
4748 if (prev_num_a8_fixes != num_a8_fixes)
4749 stub_changed = TRUE;
4750
4751 if (!stub_changed)
4752 break;
4753
4754 /* OK, we've added some stubs. Find out the new size of the
4755 stub sections. */
4756 for (stub_sec = htab->stub_bfd->sections;
4757 stub_sec != NULL;
4758 stub_sec = stub_sec->next)
4759 {
4760 /* Ignore non-stub sections. */
4761 if (!strstr (stub_sec->name, STUB_SUFFIX))
4762 continue;
4763
4764 stub_sec->size = 0;
4765 }
4766
4767 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4768
4769 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4770 if (htab->fix_cortex_a8)
4771 for (i = 0; i < num_a8_fixes; i++)
4772 {
4773 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4774 a8_fixes[i].section, htab);
4775
4776 if (stub_sec == NULL)
4777 goto error_ret_free_local;
4778
4779 stub_sec->size
4780 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4781 NULL);
4782 }
4783
4784
4785 /* Ask the linker to do its stuff. */
4786 (*htab->layout_sections_again) ();
4787 }
4788
4789 /* Add stubs for Cortex-A8 erratum fixes now. */
4790 if (htab->fix_cortex_a8)
4791 {
4792 for (i = 0; i < num_a8_fixes; i++)
4793 {
4794 struct elf32_arm_stub_hash_entry *stub_entry;
4795 char *stub_name = a8_fixes[i].stub_name;
4796 asection *section = a8_fixes[i].section;
4797 unsigned int section_id = a8_fixes[i].section->id;
4798 asection *link_sec = htab->stub_group[section_id].link_sec;
4799 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4800 const insn_sequence *template_sequence;
4801 int template_size, size = 0;
4802
4803 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4804 TRUE, FALSE);
4805 if (stub_entry == NULL)
4806 {
4807 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4808 section->owner,
4809 stub_name);
4810 return FALSE;
4811 }
4812
4813 stub_entry->stub_sec = stub_sec;
4814 stub_entry->stub_offset = 0;
4815 stub_entry->id_sec = link_sec;
4816 stub_entry->stub_type = a8_fixes[i].stub_type;
4817 stub_entry->target_section = a8_fixes[i].section;
4818 stub_entry->target_value = a8_fixes[i].offset;
4819 stub_entry->target_addend = a8_fixes[i].addend;
4820 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4821 stub_entry->st_type = a8_fixes[i].st_type;
4822
4823 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4824 &template_sequence,
4825 &template_size);
4826
4827 stub_entry->stub_size = size;
4828 stub_entry->stub_template = template_sequence;
4829 stub_entry->stub_template_size = template_size;
4830 }
4831
4832 /* Stash the Cortex-A8 erratum fix array for use later in
4833 elf32_arm_write_section(). */
4834 htab->a8_erratum_fixes = a8_fixes;
4835 htab->num_a8_erratum_fixes = num_a8_fixes;
4836 }
4837 else
4838 {
4839 htab->a8_erratum_fixes = NULL;
4840 htab->num_a8_erratum_fixes = 0;
4841 }
4842 return TRUE;
4843
4844 error_ret_free_local:
4845 return FALSE;
4846}
4847
4848/* Build all the stubs associated with the current output file. The
4849 stubs are kept in a hash table attached to the main linker hash
4850 table. We also set up the .plt entries for statically linked PIC
4851 functions here. This function is called via arm_elf_finish in the
4852 linker. */
4853
4854bfd_boolean
4855elf32_arm_build_stubs (struct bfd_link_info *info)
4856{
4857 asection *stub_sec;
4858 struct bfd_hash_table *table;
4859 struct elf32_arm_link_hash_table *htab;
4860
4861 htab = elf32_arm_hash_table (info);
4862 if (htab == NULL)
4863 return FALSE;
4864
4865 for (stub_sec = htab->stub_bfd->sections;
4866 stub_sec != NULL;
4867 stub_sec = stub_sec->next)
4868 {
4869 bfd_size_type size;
4870
4871 /* Ignore non-stub sections. */
4872 if (!strstr (stub_sec->name, STUB_SUFFIX))
4873 continue;
4874
4875 /* Allocate memory to hold the linker stubs. */
4876 size = stub_sec->size;
4877 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4878 if (stub_sec->contents == NULL && size != 0)
4879 return FALSE;
4880 stub_sec->size = 0;
4881 }
4882
4883 /* Build the stubs as directed by the stub hash table. */
4884 table = &htab->stub_hash_table;
4885 bfd_hash_traverse (table, arm_build_one_stub, info);
4886 if (htab->fix_cortex_a8)
4887 {
4888 /* Place the cortex a8 stubs last. */
4889 htab->fix_cortex_a8 = -1;
4890 bfd_hash_traverse (table, arm_build_one_stub, info);
4891 }
4892
4893 return TRUE;
4894}
4895
4896/* Locate the Thumb encoded calling stub for NAME. */
4897
4898static struct elf_link_hash_entry *
4899find_thumb_glue (struct bfd_link_info *link_info,
4900 const char *name,
4901 char **error_message)
4902{
4903 char *tmp_name;
4904 struct elf_link_hash_entry *hash;
4905 struct elf32_arm_link_hash_table *hash_table;
4906
4907 /* We need a pointer to the armelf specific hash table. */
4908 hash_table = elf32_arm_hash_table (link_info);
4909 if (hash_table == NULL)
4910 return NULL;
4911
4912 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4913 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4914
4915 BFD_ASSERT (tmp_name);
4916
4917 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4918
4919 hash = elf_link_hash_lookup
4920 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4921
4922 if (hash == NULL
4923 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4924 tmp_name, name) == -1)
4925 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4926
4927 free (tmp_name);
4928
4929 return hash;
4930}
4931
4932/* Locate the ARM encoded calling stub for NAME. */
4933
4934static struct elf_link_hash_entry *
4935find_arm_glue (struct bfd_link_info *link_info,
4936 const char *name,
4937 char **error_message)
4938{
4939 char *tmp_name;
4940 struct elf_link_hash_entry *myh;
4941 struct elf32_arm_link_hash_table *hash_table;
4942
4943 /* We need a pointer to the elfarm specific hash table. */
4944 hash_table = elf32_arm_hash_table (link_info);
4945 if (hash_table == NULL)
4946 return NULL;
4947
4948 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4949 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4950
4951 BFD_ASSERT (tmp_name);
4952
4953 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4954
4955 myh = elf_link_hash_lookup
4956 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4957
4958 if (myh == NULL
4959 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4960 tmp_name, name) == -1)
4961 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4962
4963 free (tmp_name);
4964
4965 return myh;
4966}
4967
4968/* ARM->Thumb glue (static images):
4969
4970 .arm
4971 __func_from_arm:
4972 ldr r12, __func_addr
4973 bx r12
4974 __func_addr:
4975 .word func @ behave as if you saw a ARM_32 reloc.
4976
4977 (v5t static images)
4978 .arm
4979 __func_from_arm:
4980 ldr pc, __func_addr
4981 __func_addr:
4982 .word func @ behave as if you saw a ARM_32 reloc.
4983
4984 (relocatable images)
4985 .arm
4986 __func_from_arm:
4987 ldr r12, __func_offset
4988 add r12, r12, pc
4989 bx r12
4990 __func_offset:
4991 .word func - . */
4992
4993#define ARM2THUMB_STATIC_GLUE_SIZE 12
4994static const insn32 a2t1_ldr_insn = 0xe59fc000;
4995static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4996static const insn32 a2t3_func_addr_insn = 0x00000001;
4997
4998#define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4999static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5000static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5001
5002#define ARM2THUMB_PIC_GLUE_SIZE 16
5003static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5004static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5005static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5006
5007/* Thumb->ARM: Thumb->(non-interworking aware) ARM
5008
5009 .thumb .thumb
5010 .align 2 .align 2
5011 __func_from_thumb: __func_from_thumb:
5012 bx pc push {r6, lr}
5013 nop ldr r6, __func_addr
5014 .arm mov lr, pc
5015 b func bx r6
5016 .arm
5017 ;; back_to_thumb
5018 ldmia r13! {r6, lr}
5019 bx lr
5020 __func_addr:
5021 .word func */
5022
5023#define THUMB2ARM_GLUE_SIZE 8
5024static const insn16 t2a1_bx_pc_insn = 0x4778;
5025static const insn16 t2a2_noop_insn = 0x46c0;
5026static const insn32 t2a3_b_insn = 0xea000000;
5027
5028#define VFP11_ERRATUM_VENEER_SIZE 8
5029
5030#define ARM_BX_VENEER_SIZE 12
5031static const insn32 armbx1_tst_insn = 0xe3100001;
5032static const insn32 armbx2_moveq_insn = 0x01a0f000;
5033static const insn32 armbx3_bx_insn = 0xe12fff10;
5034
5035#ifndef ELFARM_NABI_C_INCLUDED
5036static void
5037arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5038{
5039 asection * s;
5040 bfd_byte * contents;
5041
5042 if (size == 0)
5043 {
5044 /* Do not include empty glue sections in the output. */
5045 if (abfd != NULL)
5046 {
5047 s = bfd_get_section_by_name (abfd, name);
5048 if (s != NULL)
5049 s->flags |= SEC_EXCLUDE;
5050 }
5051 return;
5052 }
5053
5054 BFD_ASSERT (abfd != NULL);
5055
5056 s = bfd_get_section_by_name (abfd, name);
5057 BFD_ASSERT (s != NULL);
5058
5059 contents = (bfd_byte *) bfd_alloc (abfd, size);
5060
5061 BFD_ASSERT (s->size == size);
5062 s->contents = contents;
5063}
5064
5065bfd_boolean
5066bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5067{
5068 struct elf32_arm_link_hash_table * globals;
5069
5070 globals = elf32_arm_hash_table (info);
5071 BFD_ASSERT (globals != NULL);
5072
5073 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5074 globals->arm_glue_size,
5075 ARM2THUMB_GLUE_SECTION_NAME);
5076
5077 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5078 globals->thumb_glue_size,
5079 THUMB2ARM_GLUE_SECTION_NAME);
5080
5081 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5082 globals->vfp11_erratum_glue_size,
5083 VFP11_ERRATUM_VENEER_SECTION_NAME);
5084
5085 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5086 globals->bx_glue_size,
5087 ARM_BX_GLUE_SECTION_NAME);
5088
5089 return TRUE;
5090}
5091
5092/* Allocate space and symbols for calling a Thumb function from Arm mode.
5093 returns the symbol identifying the stub. */
5094
5095static struct elf_link_hash_entry *
5096record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5097 struct elf_link_hash_entry * h)
5098{
5099 const char * name = h->root.root.string;
5100 asection * s;
5101 char * tmp_name;
5102 struct elf_link_hash_entry * myh;
5103 struct bfd_link_hash_entry * bh;
5104 struct elf32_arm_link_hash_table * globals;
5105 bfd_vma val;
5106 bfd_size_type size;
5107
5108 globals = elf32_arm_hash_table (link_info);
5109 BFD_ASSERT (globals != NULL);
5110 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5111
5112 s = bfd_get_section_by_name
5113 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5114
5115 BFD_ASSERT (s != NULL);
5116
5117 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5118 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5119
5120 BFD_ASSERT (tmp_name);
5121
5122 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5123
5124 myh = elf_link_hash_lookup
5125 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5126
5127 if (myh != NULL)
5128 {
5129 /* We've already seen this guy. */
5130 free (tmp_name);
5131 return myh;
5132 }
5133
5134 /* The only trick here is using hash_table->arm_glue_size as the value.
5135 Even though the section isn't allocated yet, this is where we will be
5136 putting it. The +1 on the value marks that the stub has not been
5137 output yet - not that it is a Thumb function. */
5138 bh = NULL;
5139 val = globals->arm_glue_size + 1;
5140 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5141 tmp_name, BSF_GLOBAL, s, val,
5142 NULL, TRUE, FALSE, &bh);
5143
5144 myh = (struct elf_link_hash_entry *) bh;
5145 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5146 myh->forced_local = 1;
5147
5148 free (tmp_name);
5149
5150 if (link_info->shared || globals->root.is_relocatable_executable
5151 || globals->pic_veneer)
5152 size = ARM2THUMB_PIC_GLUE_SIZE;
5153 else if (globals->use_blx)
5154 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5155 else
5156 size = ARM2THUMB_STATIC_GLUE_SIZE;
5157
5158 s->size += size;
5159 globals->arm_glue_size += size;
5160
5161 return myh;
5162}
5163
5164/* Allocate space for ARMv4 BX veneers. */
5165
5166static void
5167record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5168{
5169 asection * s;
5170 struct elf32_arm_link_hash_table *globals;
5171 char *tmp_name;
5172 struct elf_link_hash_entry *myh;
5173 struct bfd_link_hash_entry *bh;
5174 bfd_vma val;
5175
5176 /* BX PC does not need a veneer. */
5177 if (reg == 15)
5178 return;
5179
5180 globals = elf32_arm_hash_table (link_info);
5181 BFD_ASSERT (globals != NULL);
5182 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5183
5184 /* Check if this veneer has already been allocated. */
5185 if (globals->bx_glue_offset[reg])
5186 return;
5187
5188 s = bfd_get_section_by_name
5189 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5190
5191 BFD_ASSERT (s != NULL);
5192
5193 /* Add symbol for veneer. */
5194 tmp_name = (char *)
5195 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5196
5197 BFD_ASSERT (tmp_name);
5198
5199 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5200
5201 myh = elf_link_hash_lookup
5202 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5203
5204 BFD_ASSERT (myh == NULL);
5205
5206 bh = NULL;
5207 val = globals->bx_glue_size;
5208 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5209 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5210 NULL, TRUE, FALSE, &bh);
5211
5212 myh = (struct elf_link_hash_entry *) bh;
5213 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5214 myh->forced_local = 1;
5215
5216 s->size += ARM_BX_VENEER_SIZE;
5217 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5218 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5219}
5220
5221
5222/* Add an entry to the code/data map for section SEC. */
5223
5224static void
5225elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5226{
5227 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5228 unsigned int newidx;
5229
5230 if (sec_data->map == NULL)
5231 {
5232 sec_data->map = (elf32_arm_section_map *)
5233 bfd_malloc (sizeof (elf32_arm_section_map));
5234 sec_data->mapcount = 0;
5235 sec_data->mapsize = 1;
5236 }
5237
5238 newidx = sec_data->mapcount++;
5239
5240 if (sec_data->mapcount > sec_data->mapsize)
5241 {
5242 sec_data->mapsize *= 2;
5243 sec_data->map = (elf32_arm_section_map *)
5244 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5245 * sizeof (elf32_arm_section_map));
5246 }
5247
5248 if (sec_data->map)
5249 {
5250 sec_data->map[newidx].vma = vma;
5251 sec_data->map[newidx].type = type;
5252 }
5253}
5254
5255
5256/* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5257 veneers are handled for now. */
5258
5259static bfd_vma
5260record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5261 elf32_vfp11_erratum_list *branch,
5262 bfd *branch_bfd,
5263 asection *branch_sec,
5264 unsigned int offset)
5265{
5266 asection *s;
5267 struct elf32_arm_link_hash_table *hash_table;
5268 char *tmp_name;
5269 struct elf_link_hash_entry *myh;
5270 struct bfd_link_hash_entry *bh;
5271 bfd_vma val;
5272 struct _arm_elf_section_data *sec_data;
5273 elf32_vfp11_erratum_list *newerr;
5274
5275 hash_table = elf32_arm_hash_table (link_info);
5276 BFD_ASSERT (hash_table != NULL);
5277 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5278
5279 s = bfd_get_section_by_name
5280 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5281
5282 sec_data = elf32_arm_section_data (s);
5283
5284 BFD_ASSERT (s != NULL);
5285
5286 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5287 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5288
5289 BFD_ASSERT (tmp_name);
5290
5291 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5292 hash_table->num_vfp11_fixes);
5293
5294 myh = elf_link_hash_lookup
5295 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5296
5297 BFD_ASSERT (myh == NULL);
5298
5299 bh = NULL;
5300 val = hash_table->vfp11_erratum_glue_size;
5301 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5302 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5303 NULL, TRUE, FALSE, &bh);
5304
5305 myh = (struct elf_link_hash_entry *) bh;
5306 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5307 myh->forced_local = 1;
5308
5309 /* Link veneer back to calling location. */
5310 sec_data->erratumcount += 1;
5311 newerr = (elf32_vfp11_erratum_list *)
5312 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5313
5314 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5315 newerr->vma = -1;
5316 newerr->u.v.branch = branch;
5317 newerr->u.v.id = hash_table->num_vfp11_fixes;
5318 branch->u.b.veneer = newerr;
5319
5320 newerr->next = sec_data->erratumlist;
5321 sec_data->erratumlist = newerr;
5322
5323 /* A symbol for the return from the veneer. */
5324 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5325 hash_table->num_vfp11_fixes);
5326
5327 myh = elf_link_hash_lookup
5328 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5329
5330 if (myh != NULL)
5331 abort ();
5332
5333 bh = NULL;
5334 val = offset + 4;
5335 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5336 branch_sec, val, NULL, TRUE, FALSE, &bh);
5337
5338 myh = (struct elf_link_hash_entry *) bh;
5339 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5340 myh->forced_local = 1;
5341
5342 free (tmp_name);
5343
5344 /* Generate a mapping symbol for the veneer section, and explicitly add an
5345 entry for that symbol to the code/data map for the section. */
5346 if (hash_table->vfp11_erratum_glue_size == 0)
5347 {
5348 bh = NULL;
5349 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5350 ever requires this erratum fix. */
5351 _bfd_generic_link_add_one_symbol (link_info,
5352 hash_table->bfd_of_glue_owner, "$a",
5353 BSF_LOCAL, s, 0, NULL,
5354 TRUE, FALSE, &bh);
5355
5356 myh = (struct elf_link_hash_entry *) bh;
5357 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5358 myh->forced_local = 1;
5359
5360 /* The elf32_arm_init_maps function only cares about symbols from input
5361 BFDs. We must make a note of this generated mapping symbol
5362 ourselves so that code byteswapping works properly in
5363 elf32_arm_write_section. */
5364 elf32_arm_section_map_add (s, 'a', 0);
5365 }
5366
5367 s->size += VFP11_ERRATUM_VENEER_SIZE;
5368 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5369 hash_table->num_vfp11_fixes++;
5370
5371 /* The offset of the veneer. */
5372 return val;
5373}
5374
5375#define ARM_GLUE_SECTION_FLAGS \
5376 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5377 | SEC_READONLY | SEC_LINKER_CREATED)
5378
5379/* Create a fake section for use by the ARM backend of the linker. */
5380
5381static bfd_boolean
5382arm_make_glue_section (bfd * abfd, const char * name)
5383{
5384 asection * sec;
5385
5386 sec = bfd_get_section_by_name (abfd, name);
5387 if (sec != NULL)
5388 /* Already made. */
5389 return TRUE;
5390
5391 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5392
5393 if (sec == NULL
5394 || !bfd_set_section_alignment (abfd, sec, 2))
5395 return FALSE;
5396
5397 /* Set the gc mark to prevent the section from being removed by garbage
5398 collection, despite the fact that no relocs refer to this section. */
5399 sec->gc_mark = 1;
5400
5401 return TRUE;
5402}
5403
5404/* Add the glue sections to ABFD. This function is called from the
5405 linker scripts in ld/emultempl/{armelf}.em. */
5406
5407bfd_boolean
5408bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5409 struct bfd_link_info *info)
5410{
5411 /* If we are only performing a partial
5412 link do not bother adding the glue. */
5413 if (info->relocatable)
5414 return TRUE;
5415
5416 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5417 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5418 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5419 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5420}
5421
5422/* Select a BFD to be used to hold the sections used by the glue code.
5423 This function is called from the linker scripts in ld/emultempl/
5424 {armelf/pe}.em. */
5425
5426bfd_boolean
5427bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5428{
5429 struct elf32_arm_link_hash_table *globals;
5430
5431 /* If we are only performing a partial link
5432 do not bother getting a bfd to hold the glue. */
5433 if (info->relocatable)
5434 return TRUE;
5435
5436 /* Make sure we don't attach the glue sections to a dynamic object. */
5437 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5438
5439 globals = elf32_arm_hash_table (info);
5440 BFD_ASSERT (globals != NULL);
5441
5442 if (globals->bfd_of_glue_owner != NULL)
5443 return TRUE;
5444
5445 /* Save the bfd for later use. */
5446 globals->bfd_of_glue_owner = abfd;
5447
5448 return TRUE;
5449}
5450
5451static void
5452check_use_blx (struct elf32_arm_link_hash_table *globals)
5453{
5454 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5455 Tag_CPU_arch) > 2)
5456 globals->use_blx = 1;
5457}
5458
5459bfd_boolean
5460bfd_elf32_arm_process_before_allocation (bfd *abfd,
5461 struct bfd_link_info *link_info)
5462{
5463 Elf_Internal_Shdr *symtab_hdr;
5464 Elf_Internal_Rela *internal_relocs = NULL;
5465 Elf_Internal_Rela *irel, *irelend;
5466 bfd_byte *contents = NULL;
5467
5468 asection *sec;
5469 struct elf32_arm_link_hash_table *globals;
5470
5471 /* If we are only performing a partial link do not bother
5472 to construct any glue. */
5473 if (link_info->relocatable)
5474 return TRUE;
5475
5476 /* Here we have a bfd that is to be included on the link. We have a
5477 hook to do reloc rummaging, before section sizes are nailed down. */
5478 globals = elf32_arm_hash_table (link_info);
5479 BFD_ASSERT (globals != NULL);
5480
5481 check_use_blx (globals);
5482
5483 if (globals->byteswap_code && !bfd_big_endian (abfd))
5484 {
5485 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5486 abfd);
5487 return FALSE;
5488 }
5489
5490 /* PR 5398: If we have not decided to include any loadable sections in
5491 the output then we will not have a glue owner bfd. This is OK, it
5492 just means that there is nothing else for us to do here. */
5493 if (globals->bfd_of_glue_owner == NULL)
5494 return TRUE;
5495
5496 /* Rummage around all the relocs and map the glue vectors. */
5497 sec = abfd->sections;
5498
5499 if (sec == NULL)
5500 return TRUE;
5501
5502 for (; sec != NULL; sec = sec->next)
5503 {
5504 if (sec->reloc_count == 0)
5505 continue;
5506
5507 if ((sec->flags & SEC_EXCLUDE) != 0)
5508 continue;
5509
5510 symtab_hdr = & elf_symtab_hdr (abfd);
5511
5512 /* Load the relocs. */
5513 internal_relocs
5514 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5515
5516 if (internal_relocs == NULL)
5517 goto error_return;
5518
5519 irelend = internal_relocs + sec->reloc_count;
5520 for (irel = internal_relocs; irel < irelend; irel++)
5521 {
5522 long r_type;
5523 unsigned long r_index;
5524
5525 struct elf_link_hash_entry *h;
5526
5527 r_type = ELF32_R_TYPE (irel->r_info);
5528 r_index = ELF32_R_SYM (irel->r_info);
5529
5530 /* These are the only relocation types we care about. */
5531 if ( r_type != R_ARM_PC24
5532 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5533 continue;
5534
5535 /* Get the section contents if we haven't done so already. */
5536 if (contents == NULL)
5537 {
5538 /* Get cached copy if it exists. */
5539 if (elf_section_data (sec)->this_hdr.contents != NULL)
5540 contents = elf_section_data (sec)->this_hdr.contents;
5541 else
5542 {
5543 /* Go get them off disk. */
5544 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5545 goto error_return;
5546 }
5547 }
5548
5549 if (r_type == R_ARM_V4BX)
5550 {
5551 int reg;
5552
5553 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5554 record_arm_bx_glue (link_info, reg);
5555 continue;
5556 }
5557
5558 /* If the relocation is not against a symbol it cannot concern us. */
5559 h = NULL;
5560
5561 /* We don't care about local symbols. */
5562 if (r_index < symtab_hdr->sh_info)
5563 continue;
5564
5565 /* This is an external symbol. */
5566 r_index -= symtab_hdr->sh_info;
5567 h = (struct elf_link_hash_entry *)
5568 elf_sym_hashes (abfd)[r_index];
5569
5570 /* If the relocation is against a static symbol it must be within
5571 the current section and so cannot be a cross ARM/Thumb relocation. */
5572 if (h == NULL)
5573 continue;
5574
5575 /* If the call will go through a PLT entry then we do not need
5576 glue. */
5577 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5578 continue;
5579
5580 switch (r_type)
5581 {
5582 case R_ARM_PC24:
5583 /* This one is a call from arm code. We need to look up
5584 the target of the call. If it is a thumb target, we
5585 insert glue. */
5586 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5587 record_arm_to_thumb_glue (link_info, h);
5588 break;
5589
5590 default:
5591 abort ();
5592 }
5593 }
5594
5595 if (contents != NULL
5596 && elf_section_data (sec)->this_hdr.contents != contents)
5597 free (contents);
5598 contents = NULL;
5599
5600 if (internal_relocs != NULL
5601 && elf_section_data (sec)->relocs != internal_relocs)
5602 free (internal_relocs);
5603 internal_relocs = NULL;
5604 }
5605
5606 return TRUE;
5607
5608error_return:
5609 if (contents != NULL
5610 && elf_section_data (sec)->this_hdr.contents != contents)
5611 free (contents);
5612 if (internal_relocs != NULL
5613 && elf_section_data (sec)->relocs != internal_relocs)
5614 free (internal_relocs);
5615
5616 return FALSE;
5617}
5618#endif
5619
5620
5621/* Initialise maps of ARM/Thumb/data for input BFDs. */
5622
5623void
5624bfd_elf32_arm_init_maps (bfd *abfd)
5625{
5626 Elf_Internal_Sym *isymbuf;
5627 Elf_Internal_Shdr *hdr;
5628 unsigned int i, localsyms;
5629
5630 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5631 if (! is_arm_elf (abfd))
5632 return;
5633
5634 if ((abfd->flags & DYNAMIC) != 0)
5635 return;
5636
5637 hdr = & elf_symtab_hdr (abfd);
5638 localsyms = hdr->sh_info;
5639
5640 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5641 should contain the number of local symbols, which should come before any
5642 global symbols. Mapping symbols are always local. */
5643 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5644 NULL);
5645
5646 /* No internal symbols read? Skip this BFD. */
5647 if (isymbuf == NULL)
5648 return;
5649
5650 for (i = 0; i < localsyms; i++)
5651 {
5652 Elf_Internal_Sym *isym = &isymbuf[i];
5653 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5654 const char *name;
5655
5656 if (sec != NULL
5657 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5658 {
5659 name = bfd_elf_string_from_elf_section (abfd,
5660 hdr->sh_link, isym->st_name);
5661
5662 if (bfd_is_arm_special_symbol_name (name,
5663 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5664 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5665 }
5666 }
5667}
5668
5669
5670/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5671 say what they wanted. */
5672
5673void
5674bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5675{
5676 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5677 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5678
5679 if (globals == NULL)
5680 return;
5681
5682 if (globals->fix_cortex_a8 == -1)
5683 {
5684 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5685 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5686 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5687 || out_attr[Tag_CPU_arch_profile].i == 0))
5688 globals->fix_cortex_a8 = 1;
5689 else
5690 globals->fix_cortex_a8 = 0;
5691 }
5692}
5693
5694
5695void
5696bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5697{
5698 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5699 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5700
5701 if (globals == NULL)
5702 return;
5703 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5704 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5705 {
5706 switch (globals->vfp11_fix)
5707 {
5708 case BFD_ARM_VFP11_FIX_DEFAULT:
5709 case BFD_ARM_VFP11_FIX_NONE:
5710 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5711 break;
5712
5713 default:
5714 /* Give a warning, but do as the user requests anyway. */
5715 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5716 "workaround is not necessary for target architecture"), obfd);
5717 }
5718 }
5719 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5720 /* For earlier architectures, we might need the workaround, but do not
5721 enable it by default. If users is running with broken hardware, they
5722 must enable the erratum fix explicitly. */
5723 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5724}
5725
5726
5727enum bfd_arm_vfp11_pipe
5728{
5729 VFP11_FMAC,
5730 VFP11_LS,
5731 VFP11_DS,
5732 VFP11_BAD
5733};
5734
5735/* Return a VFP register number. This is encoded as RX:X for single-precision
5736 registers, or X:RX for double-precision registers, where RX is the group of
5737 four bits in the instruction encoding and X is the single extension bit.
5738 RX and X fields are specified using their lowest (starting) bit. The return
5739 value is:
5740
5741 0...31: single-precision registers s0...s31
5742 32...63: double-precision registers d0...d31.
5743
5744 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5745 encounter VFP3 instructions, so we allow the full range for DP registers. */
5746
5747static unsigned int
5748bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5749 unsigned int x)
5750{
5751 if (is_double)
5752 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5753 else
5754 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5755}
5756
5757/* Set bits in *WMASK according to a register number REG as encoded by
5758 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5759
5760static void
5761bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5762{
5763 if (reg < 32)
5764 *wmask |= 1 << reg;
5765 else if (reg < 48)
5766 *wmask |= 3 << ((reg - 32) * 2);
5767}
5768
5769/* Return TRUE if WMASK overwrites anything in REGS. */
5770
5771static bfd_boolean
5772bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5773{
5774 int i;
5775
5776 for (i = 0; i < numregs; i++)
5777 {
5778 unsigned int reg = regs[i];
5779
5780 if (reg < 32 && (wmask & (1 << reg)) != 0)
5781 return TRUE;
5782
5783 reg -= 32;
5784
5785 if (reg >= 16)
5786 continue;
5787
5788 if ((wmask & (3 << (reg * 2))) != 0)
5789 return TRUE;
5790 }
5791
5792 return FALSE;
5793}
5794
5795/* In this function, we're interested in two things: finding input registers
5796 for VFP data-processing instructions, and finding the set of registers which
5797 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5798 hold the written set, so FLDM etc. are easy to deal with (we're only
5799 interested in 32 SP registers or 16 dp registers, due to the VFP version
5800 implemented by the chip in question). DP registers are marked by setting
5801 both SP registers in the write mask). */
5802
5803static enum bfd_arm_vfp11_pipe
5804bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5805 int *numregs)
5806{
5807 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5808 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5809
5810 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5811 {
5812 unsigned int pqrs;
5813 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5814 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5815
5816 pqrs = ((insn & 0x00800000) >> 20)
5817 | ((insn & 0x00300000) >> 19)
5818 | ((insn & 0x00000040) >> 6);
5819
5820 switch (pqrs)
5821 {
5822 case 0: /* fmac[sd]. */
5823 case 1: /* fnmac[sd]. */
5824 case 2: /* fmsc[sd]. */
5825 case 3: /* fnmsc[sd]. */
5826 vpipe = VFP11_FMAC;
5827 bfd_arm_vfp11_write_mask (destmask, fd);
5828 regs[0] = fd;
5829 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5830 regs[2] = fm;
5831 *numregs = 3;
5832 break;
5833
5834 case 4: /* fmul[sd]. */
5835 case 5: /* fnmul[sd]. */
5836 case 6: /* fadd[sd]. */
5837 case 7: /* fsub[sd]. */
5838 vpipe = VFP11_FMAC;
5839 goto vfp_binop;
5840
5841 case 8: /* fdiv[sd]. */
5842 vpipe = VFP11_DS;
5843 vfp_binop:
5844 bfd_arm_vfp11_write_mask (destmask, fd);
5845 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5846 regs[1] = fm;
5847 *numregs = 2;
5848 break;
5849
5850 case 15: /* extended opcode. */
5851 {
5852 unsigned int extn = ((insn >> 15) & 0x1e)
5853 | ((insn >> 7) & 1);
5854
5855 switch (extn)
5856 {
5857 case 0: /* fcpy[sd]. */
5858 case 1: /* fabs[sd]. */
5859 case 2: /* fneg[sd]. */
5860 case 8: /* fcmp[sd]. */
5861 case 9: /* fcmpe[sd]. */
5862 case 10: /* fcmpz[sd]. */
5863 case 11: /* fcmpez[sd]. */
5864 case 16: /* fuito[sd]. */
5865 case 17: /* fsito[sd]. */
5866 case 24: /* ftoui[sd]. */
5867 case 25: /* ftouiz[sd]. */
5868 case 26: /* ftosi[sd]. */
5869 case 27: /* ftosiz[sd]. */
5870 /* These instructions will not bounce due to underflow. */
5871 *numregs = 0;
5872 vpipe = VFP11_FMAC;
5873 break;
5874
5875 case 3: /* fsqrt[sd]. */
5876 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5877 registers to cause the erratum in previous instructions. */
5878 bfd_arm_vfp11_write_mask (destmask, fd);
5879 vpipe = VFP11_DS;
5880 break;
5881
5882 case 15: /* fcvt{ds,sd}. */
5883 {
5884 int rnum = 0;
5885
5886 bfd_arm_vfp11_write_mask (destmask, fd);
5887
5888 /* Only FCVTSD can underflow. */
5889 if ((insn & 0x100) != 0)
5890 regs[rnum++] = fm;
5891
5892 *numregs = rnum;
5893
5894 vpipe = VFP11_FMAC;
5895 }
5896 break;
5897
5898 default:
5899 return VFP11_BAD;
5900 }
5901 }
5902 break;
5903
5904 default:
5905 return VFP11_BAD;
5906 }
5907 }
5908 /* Two-register transfer. */
5909 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5910 {
5911 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5912
5913 if ((insn & 0x100000) == 0)
5914 {
5915 if (is_double)
5916 bfd_arm_vfp11_write_mask (destmask, fm);
5917 else
5918 {
5919 bfd_arm_vfp11_write_mask (destmask, fm);
5920 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5921 }
5922 }
5923
5924 vpipe = VFP11_LS;
5925 }
5926 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5927 {
5928 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5929 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5930
5931 switch (puw)
5932 {
5933 case 0: /* Two-reg transfer. We should catch these above. */
5934 abort ();
5935
5936 case 2: /* fldm[sdx]. */
5937 case 3:
5938 case 5:
5939 {
5940 unsigned int i, offset = insn & 0xff;
5941
5942 if (is_double)
5943 offset >>= 1;
5944
5945 for (i = fd; i < fd + offset; i++)
5946 bfd_arm_vfp11_write_mask (destmask, i);
5947 }
5948 break;
5949
5950 case 4: /* fld[sd]. */
5951 case 6:
5952 bfd_arm_vfp11_write_mask (destmask, fd);
5953 break;
5954
5955 default:
5956 return VFP11_BAD;
5957 }
5958
5959 vpipe = VFP11_LS;
5960 }
5961 /* Single-register transfer. Note L==0. */
5962 else if ((insn & 0x0f100e10) == 0x0e000a10)
5963 {
5964 unsigned int opcode = (insn >> 21) & 7;
5965 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5966
5967 switch (opcode)
5968 {
5969 case 0: /* fmsr/fmdlr. */
5970 case 1: /* fmdhr. */
5971 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5972 destination register. I don't know if this is exactly right,
5973 but it is the conservative choice. */
5974 bfd_arm_vfp11_write_mask (destmask, fn);
5975 break;
5976
5977 case 7: /* fmxr. */
5978 break;
5979 }
5980
5981 vpipe = VFP11_LS;
5982 }
5983
5984 return vpipe;
5985}
5986
5987
5988static int elf32_arm_compare_mapping (const void * a, const void * b);
5989
5990
5991/* Look for potentially-troublesome code sequences which might trigger the
5992 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5993 (available from ARM) for details of the erratum. A short version is
5994 described in ld.texinfo. */
5995
5996bfd_boolean
5997bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5998{
5999 asection *sec;
6000 bfd_byte *contents = NULL;
6001 int state = 0;
6002 int regs[3], numregs = 0;
6003 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6004 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6005
6006 if (globals == NULL)
6007 return FALSE;
6008
6009 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6010 The states transition as follows:
6011
6012 0 -> 1 (vector) or 0 -> 2 (scalar)
6013 A VFP FMAC-pipeline instruction has been seen. Fill
6014 regs[0]..regs[numregs-1] with its input operands. Remember this
6015 instruction in 'first_fmac'.
6016
6017 1 -> 2
6018 Any instruction, except for a VFP instruction which overwrites
6019 regs[*].
6020
6021 1 -> 3 [ -> 0 ] or
6022 2 -> 3 [ -> 0 ]
6023 A VFP instruction has been seen which overwrites any of regs[*].
6024 We must make a veneer! Reset state to 0 before examining next
6025 instruction.
6026
6027 2 -> 0
6028 If we fail to match anything in state 2, reset to state 0 and reset
6029 the instruction pointer to the instruction after 'first_fmac'.
6030
6031 If the VFP11 vector mode is in use, there must be at least two unrelated
6032 instructions between anti-dependent VFP11 instructions to properly avoid
6033 triggering the erratum, hence the use of the extra state 1. */
6034
6035 /* If we are only performing a partial link do not bother
6036 to construct any glue. */
6037 if (link_info->relocatable)
6038 return TRUE;
6039
6040 /* Skip if this bfd does not correspond to an ELF image. */
6041 if (! is_arm_elf (abfd))
6042 return TRUE;
6043
6044 /* We should have chosen a fix type by the time we get here. */
6045 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6046
6047 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6048 return TRUE;
6049
6050 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6051 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6052 return TRUE;
6053
6054 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6055 {
6056 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6057 struct _arm_elf_section_data *sec_data;
6058
6059 /* If we don't have executable progbits, we're not interested in this
6060 section. Also skip if section is to be excluded. */
6061 if (elf_section_type (sec) != SHT_PROGBITS
6062 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6063 || (sec->flags & SEC_EXCLUDE) != 0
6064 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6065 || sec->output_section == bfd_abs_section_ptr
6066 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6067 continue;
6068
6069 sec_data = elf32_arm_section_data (sec);
6070
6071 if (sec_data->mapcount == 0)
6072 continue;
6073
6074 if (elf_section_data (sec)->this_hdr.contents != NULL)
6075 contents = elf_section_data (sec)->this_hdr.contents;
6076 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6077 goto error_return;
6078
6079 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6080 elf32_arm_compare_mapping);
6081
6082 for (span = 0; span < sec_data->mapcount; span++)
6083 {
6084 unsigned int span_start = sec_data->map[span].vma;
6085 unsigned int span_end = (span == sec_data->mapcount - 1)
6086 ? sec->size : sec_data->map[span + 1].vma;
6087 char span_type = sec_data->map[span].type;
6088
6089 /* FIXME: Only ARM mode is supported at present. We may need to
6090 support Thumb-2 mode also at some point. */
6091 if (span_type != 'a')
6092 continue;
6093
6094 for (i = span_start; i < span_end;)
6095 {
6096 unsigned int next_i = i + 4;
6097 unsigned int insn = bfd_big_endian (abfd)
6098 ? (contents[i] << 24)
6099 | (contents[i + 1] << 16)
6100 | (contents[i + 2] << 8)
6101 | contents[i + 3]
6102 : (contents[i + 3] << 24)
6103 | (contents[i + 2] << 16)
6104 | (contents[i + 1] << 8)
6105 | contents[i];
6106 unsigned int writemask = 0;
6107 enum bfd_arm_vfp11_pipe vpipe;
6108
6109 switch (state)
6110 {
6111 case 0:
6112 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6113 &numregs);
6114 /* I'm assuming the VFP11 erratum can trigger with denorm
6115 operands on either the FMAC or the DS pipeline. This might
6116 lead to slightly overenthusiastic veneer insertion. */
6117 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6118 {
6119 state = use_vector ? 1 : 2;
6120 first_fmac = i;
6121 veneer_of_insn = insn;
6122 }
6123 break;
6124
6125 case 1:
6126 {
6127 int other_regs[3], other_numregs;
6128 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6129 other_regs,
6130 &other_numregs);
6131 if (vpipe != VFP11_BAD
6132 && bfd_arm_vfp11_antidependency (writemask, regs,
6133 numregs))
6134 state = 3;
6135 else
6136 state = 2;
6137 }
6138 break;
6139
6140 case 2:
6141 {
6142 int other_regs[3], other_numregs;
6143 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6144 other_regs,
6145 &other_numregs);
6146 if (vpipe != VFP11_BAD
6147 && bfd_arm_vfp11_antidependency (writemask, regs,
6148 numregs))
6149 state = 3;
6150 else
6151 {
6152 state = 0;
6153 next_i = first_fmac + 4;
6154 }
6155 }
6156 break;
6157
6158 case 3:
6159 abort (); /* Should be unreachable. */
6160 }
6161
6162 if (state == 3)
6163 {
6164 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6165 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6166
6167 elf32_arm_section_data (sec)->erratumcount += 1;
6168
6169 newerr->u.b.vfp_insn = veneer_of_insn;
6170
6171 switch (span_type)
6172 {
6173 case 'a':
6174 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6175 break;
6176
6177 default:
6178 abort ();
6179 }
6180
6181 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6182 first_fmac);
6183
6184 newerr->vma = -1;
6185
6186 newerr->next = sec_data->erratumlist;
6187 sec_data->erratumlist = newerr;
6188
6189 state = 0;
6190 }
6191
6192 i = next_i;
6193 }
6194 }
6195
6196 if (contents != NULL
6197 && elf_section_data (sec)->this_hdr.contents != contents)
6198 free (contents);
6199 contents = NULL;
6200 }
6201
6202 return TRUE;
6203
6204error_return:
6205 if (contents != NULL
6206 && elf_section_data (sec)->this_hdr.contents != contents)
6207 free (contents);
6208
6209 return FALSE;
6210}
6211
6212/* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6213 after sections have been laid out, using specially-named symbols. */
6214
6215void
6216bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6217 struct bfd_link_info *link_info)
6218{
6219 asection *sec;
6220 struct elf32_arm_link_hash_table *globals;
6221 char *tmp_name;
6222
6223 if (link_info->relocatable)
6224 return;
6225
6226 /* Skip if this bfd does not correspond to an ELF image. */
6227 if (! is_arm_elf (abfd))
6228 return;
6229
6230 globals = elf32_arm_hash_table (link_info);
6231 if (globals == NULL)
6232 return;
6233
6234 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6235 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6236
6237 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6238 {
6239 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6240 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6241
6242 for (; errnode != NULL; errnode = errnode->next)
6243 {
6244 struct elf_link_hash_entry *myh;
6245 bfd_vma vma;
6246
6247 switch (errnode->type)
6248 {
6249 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6250 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6251 /* Find veneer symbol. */
6252 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6253 errnode->u.b.veneer->u.v.id);
6254
6255 myh = elf_link_hash_lookup
6256 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6257
6258 if (myh == NULL)
6259 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6260 "`%s'"), abfd, tmp_name);
6261
6262 vma = myh->root.u.def.section->output_section->vma
6263 + myh->root.u.def.section->output_offset
6264 + myh->root.u.def.value;
6265
6266 errnode->u.b.veneer->vma = vma;
6267 break;
6268
6269 case VFP11_ERRATUM_ARM_VENEER:
6270 case VFP11_ERRATUM_THUMB_VENEER:
6271 /* Find return location. */
6272 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6273 errnode->u.v.id);
6274
6275 myh = elf_link_hash_lookup
6276 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6277
6278 if (myh == NULL)
6279 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6280 "`%s'"), abfd, tmp_name);
6281
6282 vma = myh->root.u.def.section->output_section->vma
6283 + myh->root.u.def.section->output_offset
6284 + myh->root.u.def.value;
6285
6286 errnode->u.v.branch->vma = vma;
6287 break;
6288
6289 default:
6290 abort ();
6291 }
6292 }
6293 }
6294
6295 free (tmp_name);
6296}
6297
6298
6299/* Set target relocation values needed during linking. */
6300
6301void
6302bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6303 struct bfd_link_info *link_info,
6304 int target1_is_rel,
6305 char * target2_type,
6306 int fix_v4bx,
6307 int use_blx,
6308 bfd_arm_vfp11_fix vfp11_fix,
6309 int no_enum_warn, int no_wchar_warn,
6310 int pic_veneer, int fix_cortex_a8)
6311{
6312 struct elf32_arm_link_hash_table *globals;
6313
6314 globals = elf32_arm_hash_table (link_info);
6315 if (globals == NULL)
6316 return;
6317
6318 globals->target1_is_rel = target1_is_rel;
6319 if (strcmp (target2_type, "rel") == 0)
6320 globals->target2_reloc = R_ARM_REL32;
6321 else if (strcmp (target2_type, "abs") == 0)
6322 globals->target2_reloc = R_ARM_ABS32;
6323 else if (strcmp (target2_type, "got-rel") == 0)
6324 globals->target2_reloc = R_ARM_GOT_PREL;
6325 else
6326 {
6327 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6328 target2_type);
6329 }
6330 globals->fix_v4bx = fix_v4bx;
6331 globals->use_blx |= use_blx;
6332 globals->vfp11_fix = vfp11_fix;
6333 globals->pic_veneer = pic_veneer;
6334 globals->fix_cortex_a8 = fix_cortex_a8;
6335
6336 BFD_ASSERT (is_arm_elf (output_bfd));
6337 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6338 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6339}
6340
6341/* Replace the target offset of a Thumb bl or b.w instruction. */
6342
6343static void
6344insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6345{
6346 bfd_vma upper;
6347 bfd_vma lower;
6348 int reloc_sign;
6349
6350 BFD_ASSERT ((offset & 1) == 0);
6351
6352 upper = bfd_get_16 (abfd, insn);
6353 lower = bfd_get_16 (abfd, insn + 2);
6354 reloc_sign = (offset < 0) ? 1 : 0;
6355 upper = (upper & ~(bfd_vma) 0x7ff)
6356 | ((offset >> 12) & 0x3ff)
6357 | (reloc_sign << 10);
6358 lower = (lower & ~(bfd_vma) 0x2fff)
6359 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6360 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6361 | ((offset >> 1) & 0x7ff);
6362 bfd_put_16 (abfd, upper, insn);
6363 bfd_put_16 (abfd, lower, insn + 2);
6364}
6365
6366/* Thumb code calling an ARM function. */
6367
6368static int
6369elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6370 const char * name,
6371 bfd * input_bfd,
6372 bfd * output_bfd,
6373 asection * input_section,
6374 bfd_byte * hit_data,
6375 asection * sym_sec,
6376 bfd_vma offset,
6377 bfd_signed_vma addend,
6378 bfd_vma val,
6379 char **error_message)
6380{
6381 asection * s = 0;
6382 bfd_vma my_offset;
6383 long int ret_offset;
6384 struct elf_link_hash_entry * myh;
6385 struct elf32_arm_link_hash_table * globals;
6386
6387 myh = find_thumb_glue (info, name, error_message);
6388 if (myh == NULL)
6389 return FALSE;
6390
6391 globals = elf32_arm_hash_table (info);
6392 BFD_ASSERT (globals != NULL);
6393 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6394
6395 my_offset = myh->root.u.def.value;
6396
6397 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6398 THUMB2ARM_GLUE_SECTION_NAME);
6399
6400 BFD_ASSERT (s != NULL);
6401 BFD_ASSERT (s->contents != NULL);
6402 BFD_ASSERT (s->output_section != NULL);
6403
6404 if ((my_offset & 0x01) == 0x01)
6405 {
6406 if (sym_sec != NULL
6407 && sym_sec->owner != NULL
6408 && !INTERWORK_FLAG (sym_sec->owner))
6409 {
6410 (*_bfd_error_handler)
6411 (_("%B(%s): warning: interworking not enabled.\n"
6412 " first occurrence: %B: thumb call to arm"),
6413 sym_sec->owner, input_bfd, name);
6414
6415 return FALSE;
6416 }
6417
6418 --my_offset;
6419 myh->root.u.def.value = my_offset;
6420
6421 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6422 s->contents + my_offset);
6423
6424 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6425 s->contents + my_offset + 2);
6426
6427 ret_offset =
6428 /* Address of destination of the stub. */
6429 ((bfd_signed_vma) val)
6430 - ((bfd_signed_vma)
6431 /* Offset from the start of the current section
6432 to the start of the stubs. */
6433 (s->output_offset
6434 /* Offset of the start of this stub from the start of the stubs. */
6435 + my_offset
6436 /* Address of the start of the current section. */
6437 + s->output_section->vma)
6438 /* The branch instruction is 4 bytes into the stub. */
6439 + 4
6440 /* ARM branches work from the pc of the instruction + 8. */
6441 + 8);
6442
6443 put_arm_insn (globals, output_bfd,
6444 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6445 s->contents + my_offset + 4);
6446 }
6447
6448 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6449
6450 /* Now go back and fix up the original BL insn to point to here. */
6451 ret_offset =
6452 /* Address of where the stub is located. */
6453 (s->output_section->vma + s->output_offset + my_offset)
6454 /* Address of where the BL is located. */
6455 - (input_section->output_section->vma + input_section->output_offset
6456 + offset)
6457 /* Addend in the relocation. */
6458 - addend
6459 /* Biassing for PC-relative addressing. */
6460 - 8;
6461
6462 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6463
6464 return TRUE;
6465}
6466
6467/* Populate an Arm to Thumb stub. Returns the stub symbol. */
6468
6469static struct elf_link_hash_entry *
6470elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6471 const char * name,
6472 bfd * input_bfd,
6473 bfd * output_bfd,
6474 asection * sym_sec,
6475 bfd_vma val,
6476 asection * s,
6477 char ** error_message)
6478{
6479 bfd_vma my_offset;
6480 long int ret_offset;
6481 struct elf_link_hash_entry * myh;
6482 struct elf32_arm_link_hash_table * globals;
6483
6484 myh = find_arm_glue (info, name, error_message);
6485 if (myh == NULL)
6486 return NULL;
6487
6488 globals = elf32_arm_hash_table (info);
6489 BFD_ASSERT (globals != NULL);
6490 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6491
6492 my_offset = myh->root.u.def.value;
6493
6494 if ((my_offset & 0x01) == 0x01)
6495 {
6496 if (sym_sec != NULL
6497 && sym_sec->owner != NULL
6498 && !INTERWORK_FLAG (sym_sec->owner))
6499 {
6500 (*_bfd_error_handler)
6501 (_("%B(%s): warning: interworking not enabled.\n"
6502 " first occurrence: %B: arm call to thumb"),
6503 sym_sec->owner, input_bfd, name);
6504 }
6505
6506 --my_offset;
6507 myh->root.u.def.value = my_offset;
6508
6509 if (info->shared || globals->root.is_relocatable_executable
6510 || globals->pic_veneer)
6511 {
6512 /* For relocatable objects we can't use absolute addresses,
6513 so construct the address from a relative offset. */
6514 /* TODO: If the offset is small it's probably worth
6515 constructing the address with adds. */
6516 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6517 s->contents + my_offset);
6518 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6519 s->contents + my_offset + 4);
6520 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6521 s->contents + my_offset + 8);
6522 /* Adjust the offset by 4 for the position of the add,
6523 and 8 for the pipeline offset. */
6524 ret_offset = (val - (s->output_offset
6525 + s->output_section->vma
6526 + my_offset + 12))
6527 | 1;
6528 bfd_put_32 (output_bfd, ret_offset,
6529 s->contents + my_offset + 12);
6530 }
6531 else if (globals->use_blx)
6532 {
6533 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6534 s->contents + my_offset);
6535
6536 /* It's a thumb address. Add the low order bit. */
6537 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6538 s->contents + my_offset + 4);
6539 }
6540 else
6541 {
6542 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6543 s->contents + my_offset);
6544
6545 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6546 s->contents + my_offset + 4);
6547
6548 /* It's a thumb address. Add the low order bit. */
6549 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6550 s->contents + my_offset + 8);
6551
6552 my_offset += 12;
6553 }
6554 }
6555
6556 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6557
6558 return myh;
6559}
6560
6561/* Arm code calling a Thumb function. */
6562
6563static int
6564elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6565 const char * name,
6566 bfd * input_bfd,
6567 bfd * output_bfd,
6568 asection * input_section,
6569 bfd_byte * hit_data,
6570 asection * sym_sec,
6571 bfd_vma offset,
6572 bfd_signed_vma addend,
6573 bfd_vma val,
6574 char **error_message)
6575{
6576 unsigned long int tmp;
6577 bfd_vma my_offset;
6578 asection * s;
6579 long int ret_offset;
6580 struct elf_link_hash_entry * myh;
6581 struct elf32_arm_link_hash_table * globals;
6582
6583 globals = elf32_arm_hash_table (info);
6584 BFD_ASSERT (globals != NULL);
6585 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6586
6587 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6588 ARM2THUMB_GLUE_SECTION_NAME);
6589 BFD_ASSERT (s != NULL);
6590 BFD_ASSERT (s->contents != NULL);
6591 BFD_ASSERT (s->output_section != NULL);
6592
6593 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6594 sym_sec, val, s, error_message);
6595 if (!myh)
6596 return FALSE;
6597
6598 my_offset = myh->root.u.def.value;
6599 tmp = bfd_get_32 (input_bfd, hit_data);
6600 tmp = tmp & 0xFF000000;
6601
6602 /* Somehow these are both 4 too far, so subtract 8. */
6603 ret_offset = (s->output_offset
6604 + my_offset
6605 + s->output_section->vma
6606 - (input_section->output_offset
6607 + input_section->output_section->vma
6608 + offset + addend)
6609 - 8);
6610
6611 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6612
6613 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6614
6615 return TRUE;
6616}
6617
6618/* Populate Arm stub for an exported Thumb function. */
6619
6620static bfd_boolean
6621elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6622{
6623 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6624 asection * s;
6625 struct elf_link_hash_entry * myh;
6626 struct elf32_arm_link_hash_entry *eh;
6627 struct elf32_arm_link_hash_table * globals;
6628 asection *sec;
6629 bfd_vma val;
6630 char *error_message;
6631
6632 eh = elf32_arm_hash_entry (h);
6633 /* Allocate stubs for exported Thumb functions on v4t. */
6634 if (eh->export_glue == NULL)
6635 return TRUE;
6636
6637 globals = elf32_arm_hash_table (info);
6638 BFD_ASSERT (globals != NULL);
6639 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6640
6641 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6642 ARM2THUMB_GLUE_SECTION_NAME);
6643 BFD_ASSERT (s != NULL);
6644 BFD_ASSERT (s->contents != NULL);
6645 BFD_ASSERT (s->output_section != NULL);
6646
6647 sec = eh->export_glue->root.u.def.section;
6648
6649 BFD_ASSERT (sec->output_section != NULL);
6650
6651 val = eh->export_glue->root.u.def.value + sec->output_offset
6652 + sec->output_section->vma;
6653
6654 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6655 h->root.u.def.section->owner,
6656 globals->obfd, sec, val, s,
6657 &error_message);
6658 BFD_ASSERT (myh);
6659 return TRUE;
6660}
6661
6662/* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6663
6664static bfd_vma
6665elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6666{
6667 bfd_byte *p;
6668 bfd_vma glue_addr;
6669 asection *s;
6670 struct elf32_arm_link_hash_table *globals;
6671
6672 globals = elf32_arm_hash_table (info);
6673 BFD_ASSERT (globals != NULL);
6674 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6675
6676 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6677 ARM_BX_GLUE_SECTION_NAME);
6678 BFD_ASSERT (s != NULL);
6679 BFD_ASSERT (s->contents != NULL);
6680 BFD_ASSERT (s->output_section != NULL);
6681
6682 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6683
6684 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6685
6686 if ((globals->bx_glue_offset[reg] & 1) == 0)
6687 {
6688 p = s->contents + glue_addr;
6689 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6690 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6691 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6692 globals->bx_glue_offset[reg] |= 1;
6693 }
6694
6695 return glue_addr + s->output_section->vma + s->output_offset;
6696}
6697
6698/* Generate Arm stubs for exported Thumb symbols. */
6699static void
6700elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6701 struct bfd_link_info *link_info)
6702{
6703 struct elf32_arm_link_hash_table * globals;
6704
6705 if (link_info == NULL)
6706 /* Ignore this if we are not called by the ELF backend linker. */
6707 return;
6708
6709 globals = elf32_arm_hash_table (link_info);
6710 if (globals == NULL)
6711 return;
6712
6713 /* If blx is available then exported Thumb symbols are OK and there is
6714 nothing to do. */
6715 if (globals->use_blx)
6716 return;
6717
6718 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6719 link_info);
6720}
6721
6722/* Some relocations map to different relocations depending on the
6723 target. Return the real relocation. */
6724
6725static int
6726arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6727 int r_type)
6728{
6729 switch (r_type)
6730 {
6731 case R_ARM_TARGET1:
6732 if (globals->target1_is_rel)
6733 return R_ARM_REL32;
6734 else
6735 return R_ARM_ABS32;
6736
6737 case R_ARM_TARGET2:
6738 return globals->target2_reloc;
6739
6740 default:
6741 return r_type;
6742 }
6743}
6744
6745/* Return the base VMA address which should be subtracted from real addresses
6746 when resolving @dtpoff relocation.
6747 This is PT_TLS segment p_vaddr. */
6748
6749static bfd_vma
6750dtpoff_base (struct bfd_link_info *info)
6751{
6752 /* If tls_sec is NULL, we should have signalled an error already. */
6753 if (elf_hash_table (info)->tls_sec == NULL)
6754 return 0;
6755 return elf_hash_table (info)->tls_sec->vma;
6756}
6757
6758/* Return the relocation value for @tpoff relocation
6759 if STT_TLS virtual address is ADDRESS. */
6760
6761static bfd_vma
6762tpoff (struct bfd_link_info *info, bfd_vma address)
6763{
6764 struct elf_link_hash_table *htab = elf_hash_table (info);
6765 bfd_vma base;
6766
6767 /* If tls_sec is NULL, we should have signalled an error already. */
6768 if (htab->tls_sec == NULL)
6769 return 0;
6770 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6771 return address - htab->tls_sec->vma + base;
6772}
6773
6774/* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6775 VALUE is the relocation value. */
6776
6777static bfd_reloc_status_type
6778elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6779{
6780 if (value > 0xfff)
6781 return bfd_reloc_overflow;
6782
6783 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6784 bfd_put_32 (abfd, value, data);
6785 return bfd_reloc_ok;
6786}
6787
6788/* For a given value of n, calculate the value of G_n as required to
6789 deal with group relocations. We return it in the form of an
6790 encoded constant-and-rotation, together with the final residual. If n is
6791 specified as less than zero, then final_residual is filled with the
6792 input value and no further action is performed. */
6793
6794static bfd_vma
6795calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6796{
6797 int current_n;
6798 bfd_vma g_n;
6799 bfd_vma encoded_g_n = 0;
6800 bfd_vma residual = value; /* Also known as Y_n. */
6801
6802 for (current_n = 0; current_n <= n; current_n++)
6803 {
6804 int shift;
6805
6806 /* Calculate which part of the value to mask. */
6807 if (residual == 0)
6808 shift = 0;
6809 else
6810 {
6811 int msb;
6812
6813 /* Determine the most significant bit in the residual and
6814 align the resulting value to a 2-bit boundary. */
6815 for (msb = 30; msb >= 0; msb -= 2)
6816 if (residual & (3 << msb))
6817 break;
6818
6819 /* The desired shift is now (msb - 6), or zero, whichever
6820 is the greater. */
6821 shift = msb - 6;
6822 if (shift < 0)
6823 shift = 0;
6824 }
6825
6826 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6827 g_n = residual & (0xff << shift);
6828 encoded_g_n = (g_n >> shift)
6829 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6830
6831 /* Calculate the residual for the next time around. */
6832 residual &= ~g_n;
6833 }
6834
6835 *final_residual = residual;
6836
6837 return encoded_g_n;
6838}
6839
6840/* Given an ARM instruction, determine whether it is an ADD or a SUB.
6841 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6842
6843static int
6844identify_add_or_sub (bfd_vma insn)
6845{
6846 int opcode = insn & 0x1e00000;
6847
6848 if (opcode == 1 << 23) /* ADD */
6849 return 1;
6850
6851 if (opcode == 1 << 22) /* SUB */
6852 return -1;
6853
6854 return 0;
6855}
6856
6857/* Perform a relocation as part of a final link. */
6858
6859static bfd_reloc_status_type
6860elf32_arm_final_link_relocate (reloc_howto_type * howto,
6861 bfd * input_bfd,
6862 bfd * output_bfd,
6863 asection * input_section,
6864 bfd_byte * contents,
6865 Elf_Internal_Rela * rel,
6866 bfd_vma value,
6867 struct bfd_link_info * info,
6868 asection * sym_sec,
6869 const char * sym_name,
6870 int sym_flags,
6871 struct elf_link_hash_entry * h,
6872 bfd_boolean * unresolved_reloc_p,
6873 char ** error_message)
6874{
6875 unsigned long r_type = howto->type;
6876 unsigned long r_symndx;
6877 bfd_byte * hit_data = contents + rel->r_offset;
6878 bfd * dynobj = NULL;
6879 bfd_vma * local_got_offsets;
6880 asection * sgot = NULL;
6881 asection * splt = NULL;
6882 asection * sreloc = NULL;
6883 bfd_vma addend;
6884 bfd_signed_vma signed_addend;
6885 struct elf32_arm_link_hash_table * globals;
6886
6887 globals = elf32_arm_hash_table (info);
6888 if (globals == NULL)
6889 return bfd_reloc_notsupported;
6890
6891 BFD_ASSERT (is_arm_elf (input_bfd));
6892
6893 /* Some relocation types map to different relocations depending on the
6894 target. We pick the right one here. */
6895 r_type = arm_real_reloc_type (globals, r_type);
6896 if (r_type != howto->type)
6897 howto = elf32_arm_howto_from_type (r_type);
6898
6899 /* If the start address has been set, then set the EF_ARM_HASENTRY
6900 flag. Setting this more than once is redundant, but the cost is
6901 not too high, and it keeps the code simple.
6902
6903 The test is done here, rather than somewhere else, because the
6904 start address is only set just before the final link commences.
6905
6906 Note - if the user deliberately sets a start address of 0, the
6907 flag will not be set. */
6908 if (bfd_get_start_address (output_bfd) != 0)
6909 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6910
6911 dynobj = elf_hash_table (info)->dynobj;
6912 if (dynobj)
6913 {
6914 sgot = bfd_get_section_by_name (dynobj, ".got");
6915 splt = bfd_get_section_by_name (dynobj, ".plt");
6916 }
6917 local_got_offsets = elf_local_got_offsets (input_bfd);
6918 r_symndx = ELF32_R_SYM (rel->r_info);
6919
6920 if (globals->use_rel)
6921 {
6922 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6923
6924 if (addend & ((howto->src_mask + 1) >> 1))
6925 {
6926 signed_addend = -1;
6927 signed_addend &= ~ howto->src_mask;
6928 signed_addend |= addend;
6929 }
6930 else
6931 signed_addend = addend;
6932 }
6933 else
6934 addend = signed_addend = rel->r_addend;
6935
6936 switch (r_type)
6937 {
6938 case R_ARM_NONE:
6939 /* We don't need to find a value for this symbol. It's just a
6940 marker. */
6941 *unresolved_reloc_p = FALSE;
6942 return bfd_reloc_ok;
6943
6944 case R_ARM_ABS12:
6945 if (!globals->vxworks_p)
6946 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6947
6948 case R_ARM_PC24:
6949 case R_ARM_ABS32:
6950 case R_ARM_ABS32_NOI:
6951 case R_ARM_REL32:
6952 case R_ARM_REL32_NOI:
6953 case R_ARM_CALL:
6954 case R_ARM_JUMP24:
6955 case R_ARM_XPC25:
6956 case R_ARM_PREL31:
6957 case R_ARM_PLT32:
6958 /* Handle relocations which should use the PLT entry. ABS32/REL32
6959 will use the symbol's value, which may point to a PLT entry, but we
6960 don't need to handle that here. If we created a PLT entry, all
6961 branches in this object should go to it, except if the PLT is too
6962 far away, in which case a long branch stub should be inserted. */
6963 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6964 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6965 && r_type != R_ARM_CALL
6966 && r_type != R_ARM_JUMP24
6967 && r_type != R_ARM_PLT32)
6968 && h != NULL
6969 && splt != NULL
6970 && h->plt.offset != (bfd_vma) -1)
6971 {
6972 /* If we've created a .plt section, and assigned a PLT entry to
6973 this function, it should not be known to bind locally. If
6974 it were, we would have cleared the PLT entry. */
6975 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6976
6977 value = (splt->output_section->vma
6978 + splt->output_offset
6979 + h->plt.offset);
6980 *unresolved_reloc_p = FALSE;
6981 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6982 contents, rel->r_offset, value,
6983 rel->r_addend);
6984 }
6985
6986 /* When generating a shared object or relocatable executable, these
6987 relocations are copied into the output file to be resolved at
6988 run time. */
6989 if ((info->shared || globals->root.is_relocatable_executable)
6990 && (input_section->flags & SEC_ALLOC)
6991 && !(globals->vxworks_p
6992 && strcmp (input_section->output_section->name,
6993 ".tls_vars") == 0)
6994 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6995 || !SYMBOL_CALLS_LOCAL (info, h))
6996 && (!strstr (input_section->name, STUB_SUFFIX))
6997 && (h == NULL
6998 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6999 || h->root.type != bfd_link_hash_undefweak)
7000 && r_type != R_ARM_PC24
7001 && r_type != R_ARM_CALL
7002 && r_type != R_ARM_JUMP24
7003 && r_type != R_ARM_PREL31
7004 && r_type != R_ARM_PLT32)
7005 {
7006 Elf_Internal_Rela outrel;
7007 bfd_byte *loc;
7008 bfd_boolean skip, relocate;
7009
7010 *unresolved_reloc_p = FALSE;
7011
7012 if (sreloc == NULL)
7013 {
7014 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
7015 ! globals->use_rel);
7016
7017 if (sreloc == NULL)
7018 return bfd_reloc_notsupported;
7019 }
7020
7021 skip = FALSE;
7022 relocate = FALSE;
7023
7024 outrel.r_addend = addend;
7025 outrel.r_offset =
7026 _bfd_elf_section_offset (output_bfd, info, input_section,
7027 rel->r_offset);
7028 if (outrel.r_offset == (bfd_vma) -1)
7029 skip = TRUE;
7030 else if (outrel.r_offset == (bfd_vma) -2)
7031 skip = TRUE, relocate = TRUE;
7032 outrel.r_offset += (input_section->output_section->vma
7033 + input_section->output_offset);
7034
7035 if (skip)
7036 memset (&outrel, 0, sizeof outrel);
7037 else if (h != NULL
7038 && h->dynindx != -1
7039 && (!info->shared
7040 || !info->symbolic
7041 || !h->def_regular))
7042 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7043 else
7044 {
7045 int symbol;
7046
7047 /* This symbol is local, or marked to become local. */
7048 if (sym_flags == STT_ARM_TFUNC)
7049 value |= 1;
7050 if (globals->symbian_p)
7051 {
7052 asection *osec;
7053
7054 /* On Symbian OS, the data segment and text segement
7055 can be relocated independently. Therefore, we
7056 must indicate the segment to which this
7057 relocation is relative. The BPABI allows us to
7058 use any symbol in the right segment; we just use
7059 the section symbol as it is convenient. (We
7060 cannot use the symbol given by "h" directly as it
7061 will not appear in the dynamic symbol table.)
7062
7063 Note that the dynamic linker ignores the section
7064 symbol value, so we don't subtract osec->vma
7065 from the emitted reloc addend. */
7066 if (sym_sec)
7067 osec = sym_sec->output_section;
7068 else
7069 osec = input_section->output_section;
7070 symbol = elf_section_data (osec)->dynindx;
7071 if (symbol == 0)
7072 {
7073 struct elf_link_hash_table *htab = elf_hash_table (info);
7074
7075 if ((osec->flags & SEC_READONLY) == 0
7076 && htab->data_index_section != NULL)
7077 osec = htab->data_index_section;
7078 else
7079 osec = htab->text_index_section;
7080 symbol = elf_section_data (osec)->dynindx;
7081 }
7082 BFD_ASSERT (symbol != 0);
7083 }
7084 else
7085 /* On SVR4-ish systems, the dynamic loader cannot
7086 relocate the text and data segments independently,
7087 so the symbol does not matter. */
7088 symbol = 0;
7089 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7090 if (globals->use_rel)
7091 relocate = TRUE;
7092 else
7093 outrel.r_addend += value;
7094 }
7095
7096 loc = sreloc->contents;
7097 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7098 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7099
7100 /* If this reloc is against an external symbol, we do not want to
7101 fiddle with the addend. Otherwise, we need to include the symbol
7102 value so that it becomes an addend for the dynamic reloc. */
7103 if (! relocate)
7104 return bfd_reloc_ok;
7105
7106 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7107 contents, rel->r_offset, value,
7108 (bfd_vma) 0);
7109 }
7110 else switch (r_type)
7111 {
7112 case R_ARM_ABS12:
7113 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7114
7115 case R_ARM_XPC25: /* Arm BLX instruction. */
7116 case R_ARM_CALL:
7117 case R_ARM_JUMP24:
7118 case R_ARM_PC24: /* Arm B/BL instruction. */
7119 case R_ARM_PLT32:
7120 {
7121 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7122
7123 if (r_type == R_ARM_XPC25)
7124 {
7125 /* Check for Arm calling Arm function. */
7126 /* FIXME: Should we translate the instruction into a BL
7127 instruction instead ? */
7128 if (sym_flags != STT_ARM_TFUNC)
7129 (*_bfd_error_handler)
7130 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7131 input_bfd,
7132 h ? h->root.root.string : "(local)");
7133 }
7134 else if (r_type == R_ARM_PC24)
7135 {
7136 /* Check for Arm calling Thumb function. */
7137 if (sym_flags == STT_ARM_TFUNC)
7138 {
7139 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7140 output_bfd, input_section,
7141 hit_data, sym_sec, rel->r_offset,
7142 signed_addend, value,
7143 error_message))
7144 return bfd_reloc_ok;
7145 else
7146 return bfd_reloc_dangerous;
7147 }
7148 }
7149
7150 /* Check if a stub has to be inserted because the
7151 destination is too far or we are changing mode. */
7152 if ( r_type == R_ARM_CALL
7153 || r_type == R_ARM_JUMP24
7154 || r_type == R_ARM_PLT32)
7155 {
7156 enum elf32_arm_stub_type stub_type = arm_stub_none;
7157 struct elf32_arm_link_hash_entry *hash;
7158
7159 hash = (struct elf32_arm_link_hash_entry *) h;
7160 stub_type = arm_type_of_stub (info, input_section, rel,
7161 &sym_flags, hash,
7162 value, sym_sec,
7163 input_bfd, sym_name);
7164
7165 if (stub_type != arm_stub_none)
7166 {
7167 /* The target is out of reach, so redirect the
7168 branch to the local stub for this function. */
7169
7170 stub_entry = elf32_arm_get_stub_entry (input_section,
7171 sym_sec, h,
7172 rel, globals,
7173 stub_type);
7174 if (stub_entry != NULL)
7175 value = (stub_entry->stub_offset
7176 + stub_entry->stub_sec->output_offset
7177 + stub_entry->stub_sec->output_section->vma);
7178 }
7179 else
7180 {
7181 /* If the call goes through a PLT entry, make sure to
7182 check distance to the right destination address. */
7183 if (h != NULL
7184 && splt != NULL
7185 && h->plt.offset != (bfd_vma) -1)
7186 {
7187 value = (splt->output_section->vma
7188 + splt->output_offset
7189 + h->plt.offset);
7190 *unresolved_reloc_p = FALSE;
7191 /* The PLT entry is in ARM mode, regardless of the
7192 target function. */
7193 sym_flags = STT_FUNC;
7194 }
7195 }
7196 }
7197
7198 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7199 where:
7200 S is the address of the symbol in the relocation.
7201 P is address of the instruction being relocated.
7202 A is the addend (extracted from the instruction) in bytes.
7203
7204 S is held in 'value'.
7205 P is the base address of the section containing the
7206 instruction plus the offset of the reloc into that
7207 section, ie:
7208 (input_section->output_section->vma +
7209 input_section->output_offset +
7210 rel->r_offset).
7211 A is the addend, converted into bytes, ie:
7212 (signed_addend * 4)
7213
7214 Note: None of these operations have knowledge of the pipeline
7215 size of the processor, thus it is up to the assembler to
7216 encode this information into the addend. */
7217 value -= (input_section->output_section->vma
7218 + input_section->output_offset);
7219 value -= rel->r_offset;
7220 if (globals->use_rel)
7221 value += (signed_addend << howto->size);
7222 else
7223 /* RELA addends do not have to be adjusted by howto->size. */
7224 value += signed_addend;
7225
7226 signed_addend = value;
7227 signed_addend >>= howto->rightshift;
7228
7229 /* A branch to an undefined weak symbol is turned into a jump to
7230 the next instruction unless a PLT entry will be created.
7231 Do the same for local undefined symbols (but not for STN_UNDEF).
7232 The jump to the next instruction is optimized as a NOP depending
7233 on the architecture. */
7234 if (h ? (h->root.type == bfd_link_hash_undefweak
7235 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7236 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
7237 {
7238 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7239
7240 if (arch_has_arm_nop (globals))
7241 value |= 0x0320f000;
7242 else
7243 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7244 }
7245 else
7246 {
7247 /* Perform a signed range check. */
7248 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7249 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7250 return bfd_reloc_overflow;
7251
7252 addend = (value & 2);
7253
7254 value = (signed_addend & howto->dst_mask)
7255 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7256
7257 if (r_type == R_ARM_CALL)
7258 {
7259 /* Set the H bit in the BLX instruction. */
7260 if (sym_flags == STT_ARM_TFUNC)
7261 {
7262 if (addend)
7263 value |= (1 << 24);
7264 else
7265 value &= ~(bfd_vma)(1 << 24);
7266 }
7267
7268 /* Select the correct instruction (BL or BLX). */
7269 /* Only if we are not handling a BL to a stub. In this
7270 case, mode switching is performed by the stub. */
7271 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7272 value |= (1 << 28);
7273 else
7274 {
7275 value &= ~(bfd_vma)(1 << 28);
7276 value |= (1 << 24);
7277 }
7278 }
7279 }
7280 }
7281 break;
7282
7283 case R_ARM_ABS32:
7284 value += addend;
7285 if (sym_flags == STT_ARM_TFUNC)
7286 value |= 1;
7287 break;
7288
7289 case R_ARM_ABS32_NOI:
7290 value += addend;
7291 break;
7292
7293 case R_ARM_REL32:
7294 value += addend;
7295 if (sym_flags == STT_ARM_TFUNC)
7296 value |= 1;
7297 value -= (input_section->output_section->vma
7298 + input_section->output_offset + rel->r_offset);
7299 break;
7300
7301 case R_ARM_REL32_NOI:
7302 value += addend;
7303 value -= (input_section->output_section->vma
7304 + input_section->output_offset + rel->r_offset);
7305 break;
7306
7307 case R_ARM_PREL31:
7308 value -= (input_section->output_section->vma
7309 + input_section->output_offset + rel->r_offset);
7310 value += signed_addend;
7311 if (! h || h->root.type != bfd_link_hash_undefweak)
7312 {
7313 /* Check for overflow. */
7314 if ((value ^ (value >> 1)) & (1 << 30))
7315 return bfd_reloc_overflow;
7316 }
7317 value &= 0x7fffffff;
7318 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7319 if (sym_flags == STT_ARM_TFUNC)
7320 value |= 1;
7321 break;
7322 }
7323
7324 bfd_put_32 (input_bfd, value, hit_data);
7325 return bfd_reloc_ok;
7326
7327 case R_ARM_ABS8:
7328 value += addend;
7329
7330 /* There is no way to tell whether the user intended to use a signed or
7331 unsigned addend. When checking for overflow we accept either,
7332 as specified by the AAELF. */
7333 if ((long) value > 0xff || (long) value < -0x80)
7334 return bfd_reloc_overflow;
7335
7336 bfd_put_8 (input_bfd, value, hit_data);
7337 return bfd_reloc_ok;
7338
7339 case R_ARM_ABS16:
7340 value += addend;
7341
7342 /* See comment for R_ARM_ABS8. */
7343 if ((long) value > 0xffff || (long) value < -0x8000)
7344 return bfd_reloc_overflow;
7345
7346 bfd_put_16 (input_bfd, value, hit_data);
7347 return bfd_reloc_ok;
7348
7349 case R_ARM_THM_ABS5:
7350 /* Support ldr and str instructions for the thumb. */
7351 if (globals->use_rel)
7352 {
7353 /* Need to refetch addend. */
7354 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7355 /* ??? Need to determine shift amount from operand size. */
7356 addend >>= howto->rightshift;
7357 }
7358 value += addend;
7359
7360 /* ??? Isn't value unsigned? */
7361 if ((long) value > 0x1f || (long) value < -0x10)
7362 return bfd_reloc_overflow;
7363
7364 /* ??? Value needs to be properly shifted into place first. */
7365 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7366 bfd_put_16 (input_bfd, value, hit_data);
7367 return bfd_reloc_ok;
7368
7369 case R_ARM_THM_ALU_PREL_11_0:
7370 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7371 {
7372 bfd_vma insn;
7373 bfd_signed_vma relocation;
7374
7375 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7376 | bfd_get_16 (input_bfd, hit_data + 2);
7377
7378 if (globals->use_rel)
7379 {
7380 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7381 | ((insn & (1 << 26)) >> 15);
7382 if (insn & 0xf00000)
7383 signed_addend = -signed_addend;
7384 }
7385
7386 relocation = value + signed_addend;
7387 relocation -= (input_section->output_section->vma
7388 + input_section->output_offset
7389 + rel->r_offset);
7390
7391 value = abs (relocation);
7392
7393 if (value >= 0x1000)
7394 return bfd_reloc_overflow;
7395
7396 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7397 | ((value & 0x700) << 4)
7398 | ((value & 0x800) << 15);
7399 if (relocation < 0)
7400 insn |= 0xa00000;
7401
7402 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7403 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7404
7405 return bfd_reloc_ok;
7406 }
7407
7408 case R_ARM_THM_PC8:
7409 /* PR 10073: This reloc is not generated by the GNU toolchain,
7410 but it is supported for compatibility with third party libraries
7411 generated by other compilers, specifically the ARM/IAR. */
7412 {
7413 bfd_vma insn;
7414 bfd_signed_vma relocation;
7415
7416 insn = bfd_get_16 (input_bfd, hit_data);
7417
7418 if (globals->use_rel)
7419 addend = (insn & 0x00ff) << 2;
7420
7421 relocation = value + addend;
7422 relocation -= (input_section->output_section->vma
7423 + input_section->output_offset
7424 + rel->r_offset);
7425
7426 value = abs (relocation);
7427
7428 /* We do not check for overflow of this reloc. Although strictly
7429 speaking this is incorrect, it appears to be necessary in order
7430 to work with IAR generated relocs. Since GCC and GAS do not
7431 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7432 a problem for them. */
7433 value &= 0x3fc;
7434
7435 insn = (insn & 0xff00) | (value >> 2);
7436
7437 bfd_put_16 (input_bfd, insn, hit_data);
7438
7439 return bfd_reloc_ok;
7440 }
7441
7442 case R_ARM_THM_PC12:
7443 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7444 {
7445 bfd_vma insn;
7446 bfd_signed_vma relocation;
7447
7448 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7449 | bfd_get_16 (input_bfd, hit_data + 2);
7450
7451 if (globals->use_rel)
7452 {
7453 signed_addend = insn & 0xfff;
7454 if (!(insn & (1 << 23)))
7455 signed_addend = -signed_addend;
7456 }
7457
7458 relocation = value + signed_addend;
7459 relocation -= (input_section->output_section->vma
7460 + input_section->output_offset
7461 + rel->r_offset);
7462
7463 value = abs (relocation);
7464
7465 if (value >= 0x1000)
7466 return bfd_reloc_overflow;
7467
7468 insn = (insn & 0xff7ff000) | value;
7469 if (relocation >= 0)
7470 insn |= (1 << 23);
7471
7472 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7473 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7474
7475 return bfd_reloc_ok;
7476 }
7477
7478 case R_ARM_THM_XPC22:
7479 case R_ARM_THM_CALL:
7480 case R_ARM_THM_JUMP24:
7481 /* Thumb BL (branch long instruction). */
7482 {
7483 bfd_vma relocation;
7484 bfd_vma reloc_sign;
7485 bfd_boolean overflow = FALSE;
7486 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7487 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7488 bfd_signed_vma reloc_signed_max;
7489 bfd_signed_vma reloc_signed_min;
7490 bfd_vma check;
7491 bfd_signed_vma signed_check;
7492 int bitsize;
7493 const int thumb2 = using_thumb2 (globals);
7494
7495 /* A branch to an undefined weak symbol is turned into a jump to
7496 the next instruction unless a PLT entry will be created.
7497 The jump to the next instruction is optimized as a NOP.W for
7498 Thumb-2 enabled architectures. */
7499 if (h && h->root.type == bfd_link_hash_undefweak
7500 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7501 {
7502 if (arch_has_thumb2_nop (globals))
7503 {
7504 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7505 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7506 }
7507 else
7508 {
7509 bfd_put_16 (input_bfd, 0xe000, hit_data);
7510 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7511 }
7512 return bfd_reloc_ok;
7513 }
7514
7515 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7516 with Thumb-1) involving the J1 and J2 bits. */
7517 if (globals->use_rel)
7518 {
7519 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7520 bfd_vma upper = upper_insn & 0x3ff;
7521 bfd_vma lower = lower_insn & 0x7ff;
7522 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7523 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7524 bfd_vma i1 = j1 ^ s ? 0 : 1;
7525 bfd_vma i2 = j2 ^ s ? 0 : 1;
7526
7527 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7528 /* Sign extend. */
7529 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7530
7531 signed_addend = addend;
7532 }
7533
7534 if (r_type == R_ARM_THM_XPC22)
7535 {
7536 /* Check for Thumb to Thumb call. */
7537 /* FIXME: Should we translate the instruction into a BL
7538 instruction instead ? */
7539 if (sym_flags == STT_ARM_TFUNC)
7540 (*_bfd_error_handler)
7541 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7542 input_bfd,
7543 h ? h->root.root.string : "(local)");
7544 }
7545 else
7546 {
7547 /* If it is not a call to Thumb, assume call to Arm.
7548 If it is a call relative to a section name, then it is not a
7549 function call at all, but rather a long jump. Calls through
7550 the PLT do not require stubs. */
7551 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7552 && (h == NULL || splt == NULL
7553 || h->plt.offset == (bfd_vma) -1))
7554 {
7555 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7556 {
7557 /* Convert BL to BLX. */
7558 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7559 }
7560 else if (( r_type != R_ARM_THM_CALL)
7561 && (r_type != R_ARM_THM_JUMP24))
7562 {
7563 if (elf32_thumb_to_arm_stub
7564 (info, sym_name, input_bfd, output_bfd, input_section,
7565 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7566 error_message))
7567 return bfd_reloc_ok;
7568 else
7569 return bfd_reloc_dangerous;
7570 }
7571 }
7572 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7573 && r_type == R_ARM_THM_CALL)
7574 {
7575 /* Make sure this is a BL. */
7576 lower_insn |= 0x1800;
7577 }
7578 }
7579
7580 enum elf32_arm_stub_type stub_type = arm_stub_none;
7581 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7582 {
7583 /* Check if a stub has to be inserted because the destination
7584 is too far. */
7585 struct elf32_arm_stub_hash_entry *stub_entry;
7586 struct elf32_arm_link_hash_entry *hash;
7587
7588 hash = (struct elf32_arm_link_hash_entry *) h;
7589
7590 stub_type = arm_type_of_stub (info, input_section, rel,
7591 &sym_flags, hash, value, sym_sec,
7592 input_bfd, sym_name);
7593
7594 if (stub_type != arm_stub_none)
7595 {
7596 /* The target is out of reach or we are changing modes, so
7597 redirect the branch to the local stub for this
7598 function. */
7599 stub_entry = elf32_arm_get_stub_entry (input_section,
7600 sym_sec, h,
7601 rel, globals,
7602 stub_type);
7603 if (stub_entry != NULL)
7604 value = (stub_entry->stub_offset
7605 + stub_entry->stub_sec->output_offset
7606 + stub_entry->stub_sec->output_section->vma);
7607
7608 /* If this call becomes a call to Arm, force BLX. */
7609 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7610 {
7611 if ((stub_entry
7612 && !arm_stub_is_thumb (stub_entry->stub_type))
7613 || (sym_flags != STT_ARM_TFUNC))
7614 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7615 }
7616 }
7617 }
7618
7619 /* Handle calls via the PLT. */
7620 if (stub_type == arm_stub_none
7621 && h != NULL
7622 && splt != NULL
7623 && h->plt.offset != (bfd_vma) -1)
7624 {
7625 value = (splt->output_section->vma
7626 + splt->output_offset
7627 + h->plt.offset);
7628
7629 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7630 {
7631 /* If the Thumb BLX instruction is available, convert
7632 the BL to a BLX instruction to call the ARM-mode
7633 PLT entry. */
7634 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7635 sym_flags = STT_FUNC;
7636 }
7637 else
7638 {
7639 /* Target the Thumb stub before the ARM PLT entry. */
7640 value -= PLT_THUMB_STUB_SIZE;
7641 sym_flags = STT_ARM_TFUNC;
7642 }
7643 *unresolved_reloc_p = FALSE;
7644 }
7645
7646 relocation = value + signed_addend;
7647
7648 relocation -= (input_section->output_section->vma
7649 + input_section->output_offset
7650 + rel->r_offset);
7651
7652 check = relocation >> howto->rightshift;
7653
7654 /* If this is a signed value, the rightshift just dropped
7655 leading 1 bits (assuming twos complement). */
7656 if ((bfd_signed_vma) relocation >= 0)
7657 signed_check = check;
7658 else
7659 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7660
7661 /* Calculate the permissable maximum and minimum values for
7662 this relocation according to whether we're relocating for
7663 Thumb-2 or not. */
7664 bitsize = howto->bitsize;
7665 if (!thumb2)
7666 bitsize -= 2;
7667 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7668 reloc_signed_min = ~reloc_signed_max;
7669
7670 /* Assumes two's complement. */
7671 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7672 overflow = TRUE;
7673
7674 if ((lower_insn & 0x5000) == 0x4000)
7675 /* For a BLX instruction, make sure that the relocation is rounded up
7676 to a word boundary. This follows the semantics of the instruction
7677 which specifies that bit 1 of the target address will come from bit
7678 1 of the base address. */
7679 relocation = (relocation + 2) & ~ 3;
7680
7681 /* Put RELOCATION back into the insn. Assumes two's complement.
7682 We use the Thumb-2 encoding, which is safe even if dealing with
7683 a Thumb-1 instruction by virtue of our overflow check above. */
7684 reloc_sign = (signed_check < 0) ? 1 : 0;
7685 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7686 | ((relocation >> 12) & 0x3ff)
7687 | (reloc_sign << 10);
7688 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7689 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7690 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7691 | ((relocation >> 1) & 0x7ff);
7692
7693 /* Put the relocated value back in the object file: */
7694 bfd_put_16 (input_bfd, upper_insn, hit_data);
7695 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7696
7697 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7698 }
7699 break;
7700
7701 case R_ARM_THM_JUMP19:
7702 /* Thumb32 conditional branch instruction. */
7703 {
7704 bfd_vma relocation;
7705 bfd_boolean overflow = FALSE;
7706 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7707 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7708 bfd_signed_vma reloc_signed_max = 0xffffe;
7709 bfd_signed_vma reloc_signed_min = -0x100000;
7710 bfd_signed_vma signed_check;
7711
7712 /* Need to refetch the addend, reconstruct the top three bits,
7713 and squish the two 11 bit pieces together. */
7714 if (globals->use_rel)
7715 {
7716 bfd_vma S = (upper_insn & 0x0400) >> 10;
7717 bfd_vma upper = (upper_insn & 0x003f);
7718 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7719 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7720 bfd_vma lower = (lower_insn & 0x07ff);
7721
7722 upper |= J1 << 6;
7723 upper |= J2 << 7;
7724 upper |= (!S) << 8;
7725 upper -= 0x0100; /* Sign extend. */
7726
7727 addend = (upper << 12) | (lower << 1);
7728 signed_addend = addend;
7729 }
7730
7731 /* Handle calls via the PLT. */
7732 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7733 {
7734 value = (splt->output_section->vma
7735 + splt->output_offset
7736 + h->plt.offset);
7737 /* Target the Thumb stub before the ARM PLT entry. */
7738 value -= PLT_THUMB_STUB_SIZE;
7739 *unresolved_reloc_p = FALSE;
7740 }
7741
7742 /* ??? Should handle interworking? GCC might someday try to
7743 use this for tail calls. */
7744
7745 relocation = value + signed_addend;
7746 relocation -= (input_section->output_section->vma
7747 + input_section->output_offset
7748 + rel->r_offset);
7749 signed_check = (bfd_signed_vma) relocation;
7750
7751 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7752 overflow = TRUE;
7753
7754 /* Put RELOCATION back into the insn. */
7755 {
7756 bfd_vma S = (relocation & 0x00100000) >> 20;
7757 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7758 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7759 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7760 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7761
7762 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7763 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7764 }
7765
7766 /* Put the relocated value back in the object file: */
7767 bfd_put_16 (input_bfd, upper_insn, hit_data);
7768 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7769
7770 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7771 }
7772
7773 case R_ARM_THM_JUMP11:
7774 case R_ARM_THM_JUMP8:
7775 case R_ARM_THM_JUMP6:
7776 /* Thumb B (branch) instruction). */
7777 {
7778 bfd_signed_vma relocation;
7779 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7780 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7781 bfd_signed_vma signed_check;
7782
7783 /* CZB cannot jump backward. */
7784 if (r_type == R_ARM_THM_JUMP6)
7785 reloc_signed_min = 0;
7786
7787 if (globals->use_rel)
7788 {
7789 /* Need to refetch addend. */
7790 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7791 if (addend & ((howto->src_mask + 1) >> 1))
7792 {
7793 signed_addend = -1;
7794 signed_addend &= ~ howto->src_mask;
7795 signed_addend |= addend;
7796 }
7797 else
7798 signed_addend = addend;
7799 /* The value in the insn has been right shifted. We need to
7800 undo this, so that we can perform the address calculation
7801 in terms of bytes. */
7802 signed_addend <<= howto->rightshift;
7803 }
7804 relocation = value + signed_addend;
7805
7806 relocation -= (input_section->output_section->vma
7807 + input_section->output_offset
7808 + rel->r_offset);
7809
7810 relocation >>= howto->rightshift;
7811 signed_check = relocation;
7812
7813 if (r_type == R_ARM_THM_JUMP6)
7814 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7815 else
7816 relocation &= howto->dst_mask;
7817 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7818
7819 bfd_put_16 (input_bfd, relocation, hit_data);
7820
7821 /* Assumes two's complement. */
7822 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7823 return bfd_reloc_overflow;
7824
7825 return bfd_reloc_ok;
7826 }
7827
7828 case R_ARM_ALU_PCREL7_0:
7829 case R_ARM_ALU_PCREL15_8:
7830 case R_ARM_ALU_PCREL23_15:
7831 {
7832 bfd_vma insn;
7833 bfd_vma relocation;
7834
7835 insn = bfd_get_32 (input_bfd, hit_data);
7836 if (globals->use_rel)
7837 {
7838 /* Extract the addend. */
7839 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7840 signed_addend = addend;
7841 }
7842 relocation = value + signed_addend;
7843
7844 relocation -= (input_section->output_section->vma
7845 + input_section->output_offset
7846 + rel->r_offset);
7847 insn = (insn & ~0xfff)
7848 | ((howto->bitpos << 7) & 0xf00)
7849 | ((relocation >> howto->bitpos) & 0xff);
7850 bfd_put_32 (input_bfd, value, hit_data);
7851 }
7852 return bfd_reloc_ok;
7853
7854 case R_ARM_GNU_VTINHERIT:
7855 case R_ARM_GNU_VTENTRY:
7856 return bfd_reloc_ok;
7857
7858 case R_ARM_GOTOFF32:
7859 /* Relocation is relative to the start of the
7860 global offset table. */
7861
7862 BFD_ASSERT (sgot != NULL);
7863 if (sgot == NULL)
7864 return bfd_reloc_notsupported;
7865
7866 /* If we are addressing a Thumb function, we need to adjust the
7867 address by one, so that attempts to call the function pointer will
7868 correctly interpret it as Thumb code. */
7869 if (sym_flags == STT_ARM_TFUNC)
7870 value += 1;
7871
7872 /* Note that sgot->output_offset is not involved in this
7873 calculation. We always want the start of .got. If we
7874 define _GLOBAL_OFFSET_TABLE in a different way, as is
7875 permitted by the ABI, we might have to change this
7876 calculation. */
7877 value -= sgot->output_section->vma;
7878 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7879 contents, rel->r_offset, value,
7880 rel->r_addend);
7881
7882 case R_ARM_GOTPC:
7883 /* Use global offset table as symbol value. */
7884 BFD_ASSERT (sgot != NULL);
7885
7886 if (sgot == NULL)
7887 return bfd_reloc_notsupported;
7888
7889 *unresolved_reloc_p = FALSE;
7890 value = sgot->output_section->vma;
7891 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7892 contents, rel->r_offset, value,
7893 rel->r_addend);
7894
7895 case R_ARM_GOT32:
7896 case R_ARM_GOT_PREL:
7897 /* Relocation is to the entry for this symbol in the
7898 global offset table. */
7899 if (sgot == NULL)
7900 return bfd_reloc_notsupported;
7901
7902 if (h != NULL)
7903 {
7904 bfd_vma off;
7905 bfd_boolean dyn;
7906
7907 off = h->got.offset;
7908 BFD_ASSERT (off != (bfd_vma) -1);
7909 dyn = globals->root.dynamic_sections_created;
7910
7911 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7912 || (info->shared
7913 && SYMBOL_REFERENCES_LOCAL (info, h))
7914 || (ELF_ST_VISIBILITY (h->other)
7915 && h->root.type == bfd_link_hash_undefweak))
7916 {
7917 /* This is actually a static link, or it is a -Bsymbolic link
7918 and the symbol is defined locally. We must initialize this
7919 entry in the global offset table. Since the offset must
7920 always be a multiple of 4, we use the least significant bit
7921 to record whether we have initialized it already.
7922
7923 When doing a dynamic link, we create a .rel(a).got relocation
7924 entry to initialize the value. This is done in the
7925 finish_dynamic_symbol routine. */
7926 if ((off & 1) != 0)
7927 off &= ~1;
7928 else
7929 {
7930 /* If we are addressing a Thumb function, we need to
7931 adjust the address by one, so that attempts to
7932 call the function pointer will correctly
7933 interpret it as Thumb code. */
7934 if (sym_flags == STT_ARM_TFUNC)
7935 value |= 1;
7936
7937 bfd_put_32 (output_bfd, value, sgot->contents + off);
7938 h->got.offset |= 1;
7939 }
7940 }
7941 else
7942 *unresolved_reloc_p = FALSE;
7943
7944 value = sgot->output_offset + off;
7945 }
7946 else
7947 {
7948 bfd_vma off;
7949
7950 BFD_ASSERT (local_got_offsets != NULL &&
7951 local_got_offsets[r_symndx] != (bfd_vma) -1);
7952
7953 off = local_got_offsets[r_symndx];
7954
7955 /* The offset must always be a multiple of 4. We use the
7956 least significant bit to record whether we have already
7957 generated the necessary reloc. */
7958 if ((off & 1) != 0)
7959 off &= ~1;
7960 else
7961 {
7962 /* If we are addressing a Thumb function, we need to
7963 adjust the address by one, so that attempts to
7964 call the function pointer will correctly
7965 interpret it as Thumb code. */
7966 if (sym_flags == STT_ARM_TFUNC)
7967 value |= 1;
7968
7969 if (globals->use_rel)
7970 bfd_put_32 (output_bfd, value, sgot->contents + off);
7971
7972 if (info->shared)
7973 {
7974 asection * srelgot;
7975 Elf_Internal_Rela outrel;
7976 bfd_byte *loc;
7977
7978 srelgot = (bfd_get_section_by_name
7979 (dynobj, RELOC_SECTION (globals, ".got")));
7980 BFD_ASSERT (srelgot != NULL);
7981
7982 outrel.r_addend = addend + value;
7983 outrel.r_offset = (sgot->output_section->vma
7984 + sgot->output_offset
7985 + off);
7986 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7987 loc = srelgot->contents;
7988 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7989 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7990 }
7991
7992 local_got_offsets[r_symndx] |= 1;
7993 }
7994
7995 value = sgot->output_offset + off;
7996 }
7997 if (r_type != R_ARM_GOT32)
7998 value += sgot->output_section->vma;
7999
8000 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8001 contents, rel->r_offset, value,
8002 rel->r_addend);
8003
8004 case R_ARM_TLS_LDO32:
8005 value = value - dtpoff_base (info);
8006
8007 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8008 contents, rel->r_offset, value,
8009 rel->r_addend);
8010
8011 case R_ARM_TLS_LDM32:
8012 {
8013 bfd_vma off;
8014
8015 if (globals->sgot == NULL)
8016 abort ();
8017
8018 off = globals->tls_ldm_got.offset;
8019
8020 if ((off & 1) != 0)
8021 off &= ~1;
8022 else
8023 {
8024 /* If we don't know the module number, create a relocation
8025 for it. */
8026 if (info->shared)
8027 {
8028 Elf_Internal_Rela outrel;
8029 bfd_byte *loc;
8030
8031 if (globals->srelgot == NULL)
8032 abort ();
8033
8034 outrel.r_addend = 0;
8035 outrel.r_offset = (globals->sgot->output_section->vma
8036 + globals->sgot->output_offset + off);
8037 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8038
8039 if (globals->use_rel)
8040 bfd_put_32 (output_bfd, outrel.r_addend,
8041 globals->sgot->contents + off);
8042
8043 loc = globals->srelgot->contents;
8044 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8045 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8046 }
8047 else
8048 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8049
8050 globals->tls_ldm_got.offset |= 1;
8051 }
8052
8053 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8054 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8055
8056 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8057 contents, rel->r_offset, value,
8058 rel->r_addend);
8059 }
8060
8061 case R_ARM_TLS_GD32:
8062 case R_ARM_TLS_IE32:
8063 {
8064 bfd_vma off;
8065 int indx;
8066 char tls_type;
8067
8068 if (globals->sgot == NULL)
8069 abort ();
8070
8071 indx = 0;
8072 if (h != NULL)
8073 {
8074 bfd_boolean dyn;
8075 dyn = globals->root.dynamic_sections_created;
8076 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8077 && (!info->shared
8078 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8079 {
8080 *unresolved_reloc_p = FALSE;
8081 indx = h->dynindx;
8082 }
8083 off = h->got.offset;
8084 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8085 }
8086 else
8087 {
8088 if (local_got_offsets == NULL)
8089 abort ();
8090 off = local_got_offsets[r_symndx];
8091 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8092 }
8093
8094 if (tls_type == GOT_UNKNOWN)
8095 abort ();
8096
8097 if ((off & 1) != 0)
8098 off &= ~1;
8099 else
8100 {
8101 bfd_boolean need_relocs = FALSE;
8102 Elf_Internal_Rela outrel;
8103 bfd_byte *loc = NULL;
8104 int cur_off = off;
8105
8106 /* The GOT entries have not been initialized yet. Do it
8107 now, and emit any relocations. If both an IE GOT and a
8108 GD GOT are necessary, we emit the GD first. */
8109
8110 if ((info->shared || indx != 0)
8111 && (h == NULL
8112 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8113 || h->root.type != bfd_link_hash_undefweak))
8114 {
8115 need_relocs = TRUE;
8116 if (globals->srelgot == NULL)
8117 abort ();
8118 loc = globals->srelgot->contents;
8119 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8120 }
8121
8122 if (tls_type & GOT_TLS_GD)
8123 {
8124 if (need_relocs)
8125 {
8126 outrel.r_addend = 0;
8127 outrel.r_offset = (globals->sgot->output_section->vma
8128 + globals->sgot->output_offset
8129 + cur_off);
8130 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8131
8132 if (globals->use_rel)
8133 bfd_put_32 (output_bfd, outrel.r_addend,
8134 globals->sgot->contents + cur_off);
8135
8136 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8137 globals->srelgot->reloc_count++;
8138 loc += RELOC_SIZE (globals);
8139
8140 if (indx == 0)
8141 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8142 globals->sgot->contents + cur_off + 4);
8143 else
8144 {
8145 outrel.r_addend = 0;
8146 outrel.r_info = ELF32_R_INFO (indx,
8147 R_ARM_TLS_DTPOFF32);
8148 outrel.r_offset += 4;
8149
8150 if (globals->use_rel)
8151 bfd_put_32 (output_bfd, outrel.r_addend,
8152 globals->sgot->contents + cur_off + 4);
8153
8154
8155 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8156 globals->srelgot->reloc_count++;
8157 loc += RELOC_SIZE (globals);
8158 }
8159 }
8160 else
8161 {
8162 /* If we are not emitting relocations for a
8163 general dynamic reference, then we must be in a
8164 static link or an executable link with the
8165 symbol binding locally. Mark it as belonging
8166 to module 1, the executable. */
8167 bfd_put_32 (output_bfd, 1,
8168 globals->sgot->contents + cur_off);
8169 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8170 globals->sgot->contents + cur_off + 4);
8171 }
8172
8173 cur_off += 8;
8174 }
8175
8176 if (tls_type & GOT_TLS_IE)
8177 {
8178 if (need_relocs)
8179 {
8180 if (indx == 0)
8181 outrel.r_addend = value - dtpoff_base (info);
8182 else
8183 outrel.r_addend = 0;
8184 outrel.r_offset = (globals->sgot->output_section->vma
8185 + globals->sgot->output_offset
8186 + cur_off);
8187 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8188
8189 if (globals->use_rel)
8190 bfd_put_32 (output_bfd, outrel.r_addend,
8191 globals->sgot->contents + cur_off);
8192
8193 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8194 globals->srelgot->reloc_count++;
8195 loc += RELOC_SIZE (globals);
8196 }
8197 else
8198 bfd_put_32 (output_bfd, tpoff (info, value),
8199 globals->sgot->contents + cur_off);
8200 cur_off += 4;
8201 }
8202
8203 if (h != NULL)
8204 h->got.offset |= 1;
8205 else
8206 local_got_offsets[r_symndx] |= 1;
8207 }
8208
8209 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8210 off += 8;
8211 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8212 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8213
8214 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8215 contents, rel->r_offset, value,
8216 rel->r_addend);
8217 }
8218
8219 case R_ARM_TLS_LE32:
8220 if (info->shared)
8221 {
8222 (*_bfd_error_handler)
8223 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8224 input_bfd, input_section,
8225 (long) rel->r_offset, howto->name);
8226 return (bfd_reloc_status_type) FALSE;
8227 }
8228 else
8229 value = tpoff (info, value);
8230
8231 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8232 contents, rel->r_offset, value,
8233 rel->r_addend);
8234
8235 case R_ARM_V4BX:
8236 if (globals->fix_v4bx)
8237 {
8238 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8239
8240 /* Ensure that we have a BX instruction. */
8241 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8242
8243 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8244 {
8245 /* Branch to veneer. */
8246 bfd_vma glue_addr;
8247 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8248 glue_addr -= input_section->output_section->vma
8249 + input_section->output_offset
8250 + rel->r_offset + 8;
8251 insn = (insn & 0xf0000000) | 0x0a000000
8252 | ((glue_addr >> 2) & 0x00ffffff);
8253 }
8254 else
8255 {
8256 /* Preserve Rm (lowest four bits) and the condition code
8257 (highest four bits). Other bits encode MOV PC,Rm. */
8258 insn = (insn & 0xf000000f) | 0x01a0f000;
8259 }
8260
8261 bfd_put_32 (input_bfd, insn, hit_data);
8262 }
8263 return bfd_reloc_ok;
8264
8265 case R_ARM_MOVW_ABS_NC:
8266 case R_ARM_MOVT_ABS:
8267 case R_ARM_MOVW_PREL_NC:
8268 case R_ARM_MOVT_PREL:
8269 /* Until we properly support segment-base-relative addressing then
8270 we assume the segment base to be zero, as for the group relocations.
8271 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8272 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8273 case R_ARM_MOVW_BREL_NC:
8274 case R_ARM_MOVW_BREL:
8275 case R_ARM_MOVT_BREL:
8276 {
8277 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8278
8279 if (globals->use_rel)
8280 {
8281 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8282 signed_addend = (addend ^ 0x8000) - 0x8000;
8283 }
8284
8285 value += signed_addend;
8286
8287 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8288 value -= (input_section->output_section->vma
8289 + input_section->output_offset + rel->r_offset);
8290
8291 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8292 return bfd_reloc_overflow;
8293
8294 if (sym_flags == STT_ARM_TFUNC)
8295 value |= 1;
8296
8297 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8298 || r_type == R_ARM_MOVT_BREL)
8299 value >>= 16;
8300
8301 insn &= 0xfff0f000;
8302 insn |= value & 0xfff;
8303 insn |= (value & 0xf000) << 4;
8304 bfd_put_32 (input_bfd, insn, hit_data);
8305 }
8306 return bfd_reloc_ok;
8307
8308 case R_ARM_THM_MOVW_ABS_NC:
8309 case R_ARM_THM_MOVT_ABS:
8310 case R_ARM_THM_MOVW_PREL_NC:
8311 case R_ARM_THM_MOVT_PREL:
8312 /* Until we properly support segment-base-relative addressing then
8313 we assume the segment base to be zero, as for the above relocations.
8314 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8315 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8316 as R_ARM_THM_MOVT_ABS. */
8317 case R_ARM_THM_MOVW_BREL_NC:
8318 case R_ARM_THM_MOVW_BREL:
8319 case R_ARM_THM_MOVT_BREL:
8320 {
8321 bfd_vma insn;
8322
8323 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8324 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8325
8326 if (globals->use_rel)
8327 {
8328 addend = ((insn >> 4) & 0xf000)
8329 | ((insn >> 15) & 0x0800)
8330 | ((insn >> 4) & 0x0700)
8331 | (insn & 0x00ff);
8332 signed_addend = (addend ^ 0x8000) - 0x8000;
8333 }
8334
8335 value += signed_addend;
8336
8337 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8338 value -= (input_section->output_section->vma
8339 + input_section->output_offset + rel->r_offset);
8340
8341 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8342 return bfd_reloc_overflow;
8343
8344 if (sym_flags == STT_ARM_TFUNC)
8345 value |= 1;
8346
8347 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8348 || r_type == R_ARM_THM_MOVT_BREL)
8349 value >>= 16;
8350
8351 insn &= 0xfbf08f00;
8352 insn |= (value & 0xf000) << 4;
8353 insn |= (value & 0x0800) << 15;
8354 insn |= (value & 0x0700) << 4;
8355 insn |= (value & 0x00ff);
8356
8357 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8358 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8359 }
8360 return bfd_reloc_ok;
8361
8362 case R_ARM_ALU_PC_G0_NC:
8363 case R_ARM_ALU_PC_G1_NC:
8364 case R_ARM_ALU_PC_G0:
8365 case R_ARM_ALU_PC_G1:
8366 case R_ARM_ALU_PC_G2:
8367 case R_ARM_ALU_SB_G0_NC:
8368 case R_ARM_ALU_SB_G1_NC:
8369 case R_ARM_ALU_SB_G0:
8370 case R_ARM_ALU_SB_G1:
8371 case R_ARM_ALU_SB_G2:
8372 {
8373 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8374 bfd_vma pc = input_section->output_section->vma
8375 + input_section->output_offset + rel->r_offset;
8376 /* sb should be the origin of the *segment* containing the symbol.
8377 It is not clear how to obtain this OS-dependent value, so we
8378 make an arbitrary choice of zero. */
8379 bfd_vma sb = 0;
8380 bfd_vma residual;
8381 bfd_vma g_n;
8382 bfd_signed_vma signed_value;
8383 int group = 0;
8384
8385 /* Determine which group of bits to select. */
8386 switch (r_type)
8387 {
8388 case R_ARM_ALU_PC_G0_NC:
8389 case R_ARM_ALU_PC_G0:
8390 case R_ARM_ALU_SB_G0_NC:
8391 case R_ARM_ALU_SB_G0:
8392 group = 0;
8393 break;
8394
8395 case R_ARM_ALU_PC_G1_NC:
8396 case R_ARM_ALU_PC_G1:
8397 case R_ARM_ALU_SB_G1_NC:
8398 case R_ARM_ALU_SB_G1:
8399 group = 1;
8400 break;
8401
8402 case R_ARM_ALU_PC_G2:
8403 case R_ARM_ALU_SB_G2:
8404 group = 2;
8405 break;
8406
8407 default:
8408 abort ();
8409 }
8410
8411 /* If REL, extract the addend from the insn. If RELA, it will
8412 have already been fetched for us. */
8413 if (globals->use_rel)
8414 {
8415 int negative;
8416 bfd_vma constant = insn & 0xff;
8417 bfd_vma rotation = (insn & 0xf00) >> 8;
8418
8419 if (rotation == 0)
8420 signed_addend = constant;
8421 else
8422 {
8423 /* Compensate for the fact that in the instruction, the
8424 rotation is stored in multiples of 2 bits. */
8425 rotation *= 2;
8426
8427 /* Rotate "constant" right by "rotation" bits. */
8428 signed_addend = (constant >> rotation) |
8429 (constant << (8 * sizeof (bfd_vma) - rotation));
8430 }
8431
8432 /* Determine if the instruction is an ADD or a SUB.
8433 (For REL, this determines the sign of the addend.) */
8434 negative = identify_add_or_sub (insn);
8435 if (negative == 0)
8436 {
8437 (*_bfd_error_handler)
8438 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8439 input_bfd, input_section,
8440 (long) rel->r_offset, howto->name);
8441 return bfd_reloc_overflow;
8442 }
8443
8444 signed_addend *= negative;
8445 }
8446
8447 /* Compute the value (X) to go in the place. */
8448 if (r_type == R_ARM_ALU_PC_G0_NC
8449 || r_type == R_ARM_ALU_PC_G1_NC
8450 || r_type == R_ARM_ALU_PC_G0
8451 || r_type == R_ARM_ALU_PC_G1
8452 || r_type == R_ARM_ALU_PC_G2)
8453 /* PC relative. */
8454 signed_value = value - pc + signed_addend;
8455 else
8456 /* Section base relative. */
8457 signed_value = value - sb + signed_addend;
8458
8459 /* If the target symbol is a Thumb function, then set the
8460 Thumb bit in the address. */
8461 if (sym_flags == STT_ARM_TFUNC)
8462 signed_value |= 1;
8463
8464 /* Calculate the value of the relevant G_n, in encoded
8465 constant-with-rotation format. */
8466 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8467 &residual);
8468
8469 /* Check for overflow if required. */
8470 if ((r_type == R_ARM_ALU_PC_G0
8471 || r_type == R_ARM_ALU_PC_G1
8472 || r_type == R_ARM_ALU_PC_G2
8473 || r_type == R_ARM_ALU_SB_G0
8474 || r_type == R_ARM_ALU_SB_G1
8475 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8476 {
8477 (*_bfd_error_handler)
8478 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8479 input_bfd, input_section,
8480 (long) rel->r_offset, abs (signed_value), howto->name);
8481 return bfd_reloc_overflow;
8482 }
8483
8484 /* Mask out the value and the ADD/SUB part of the opcode; take care
8485 not to destroy the S bit. */
8486 insn &= 0xff1ff000;
8487
8488 /* Set the opcode according to whether the value to go in the
8489 place is negative. */
8490 if (signed_value < 0)
8491 insn |= 1 << 22;
8492 else
8493 insn |= 1 << 23;
8494
8495 /* Encode the offset. */
8496 insn |= g_n;
8497
8498 bfd_put_32 (input_bfd, insn, hit_data);
8499 }
8500 return bfd_reloc_ok;
8501
8502 case R_ARM_LDR_PC_G0:
8503 case R_ARM_LDR_PC_G1:
8504 case R_ARM_LDR_PC_G2:
8505 case R_ARM_LDR_SB_G0:
8506 case R_ARM_LDR_SB_G1:
8507 case R_ARM_LDR_SB_G2:
8508 {
8509 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8510 bfd_vma pc = input_section->output_section->vma
8511 + input_section->output_offset + rel->r_offset;
8512 bfd_vma sb = 0; /* See note above. */
8513 bfd_vma residual;
8514 bfd_signed_vma signed_value;
8515 int group = 0;
8516
8517 /* Determine which groups of bits to calculate. */
8518 switch (r_type)
8519 {
8520 case R_ARM_LDR_PC_G0:
8521 case R_ARM_LDR_SB_G0:
8522 group = 0;
8523 break;
8524
8525 case R_ARM_LDR_PC_G1:
8526 case R_ARM_LDR_SB_G1:
8527 group = 1;
8528 break;
8529
8530 case R_ARM_LDR_PC_G2:
8531 case R_ARM_LDR_SB_G2:
8532 group = 2;
8533 break;
8534
8535 default:
8536 abort ();
8537 }
8538
8539 /* If REL, extract the addend from the insn. If RELA, it will
8540 have already been fetched for us. */
8541 if (globals->use_rel)
8542 {
8543 int negative = (insn & (1 << 23)) ? 1 : -1;
8544 signed_addend = negative * (insn & 0xfff);
8545 }
8546
8547 /* Compute the value (X) to go in the place. */
8548 if (r_type == R_ARM_LDR_PC_G0
8549 || r_type == R_ARM_LDR_PC_G1
8550 || r_type == R_ARM_LDR_PC_G2)
8551 /* PC relative. */
8552 signed_value = value - pc + signed_addend;
8553 else
8554 /* Section base relative. */
8555 signed_value = value - sb + signed_addend;
8556
8557 /* Calculate the value of the relevant G_{n-1} to obtain
8558 the residual at that stage. */
8559 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8560
8561 /* Check for overflow. */
8562 if (residual >= 0x1000)
8563 {
8564 (*_bfd_error_handler)
8565 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8566 input_bfd, input_section,
8567 (long) rel->r_offset, abs (signed_value), howto->name);
8568 return bfd_reloc_overflow;
8569 }
8570
8571 /* Mask out the value and U bit. */
8572 insn &= 0xff7ff000;
8573
8574 /* Set the U bit if the value to go in the place is non-negative. */
8575 if (signed_value >= 0)
8576 insn |= 1 << 23;
8577
8578 /* Encode the offset. */
8579 insn |= residual;
8580
8581 bfd_put_32 (input_bfd, insn, hit_data);
8582 }
8583 return bfd_reloc_ok;
8584
8585 case R_ARM_LDRS_PC_G0:
8586 case R_ARM_LDRS_PC_G1:
8587 case R_ARM_LDRS_PC_G2:
8588 case R_ARM_LDRS_SB_G0:
8589 case R_ARM_LDRS_SB_G1:
8590 case R_ARM_LDRS_SB_G2:
8591 {
8592 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8593 bfd_vma pc = input_section->output_section->vma
8594 + input_section->output_offset + rel->r_offset;
8595 bfd_vma sb = 0; /* See note above. */
8596 bfd_vma residual;
8597 bfd_signed_vma signed_value;
8598 int group = 0;
8599
8600 /* Determine which groups of bits to calculate. */
8601 switch (r_type)
8602 {
8603 case R_ARM_LDRS_PC_G0:
8604 case R_ARM_LDRS_SB_G0:
8605 group = 0;
8606 break;
8607
8608 case R_ARM_LDRS_PC_G1:
8609 case R_ARM_LDRS_SB_G1:
8610 group = 1;
8611 break;
8612
8613 case R_ARM_LDRS_PC_G2:
8614 case R_ARM_LDRS_SB_G2:
8615 group = 2;
8616 break;
8617
8618 default:
8619 abort ();
8620 }
8621
8622 /* If REL, extract the addend from the insn. If RELA, it will
8623 have already been fetched for us. */
8624 if (globals->use_rel)
8625 {
8626 int negative = (insn & (1 << 23)) ? 1 : -1;
8627 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8628 }
8629
8630 /* Compute the value (X) to go in the place. */
8631 if (r_type == R_ARM_LDRS_PC_G0
8632 || r_type == R_ARM_LDRS_PC_G1
8633 || r_type == R_ARM_LDRS_PC_G2)
8634 /* PC relative. */
8635 signed_value = value - pc + signed_addend;
8636 else
8637 /* Section base relative. */
8638 signed_value = value - sb + signed_addend;
8639
8640 /* Calculate the value of the relevant G_{n-1} to obtain
8641 the residual at that stage. */
8642 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8643
8644 /* Check for overflow. */
8645 if (residual >= 0x100)
8646 {
8647 (*_bfd_error_handler)
8648 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8649 input_bfd, input_section,
8650 (long) rel->r_offset, abs (signed_value), howto->name);
8651 return bfd_reloc_overflow;
8652 }
8653
8654 /* Mask out the value and U bit. */
8655 insn &= 0xff7ff0f0;
8656
8657 /* Set the U bit if the value to go in the place is non-negative. */
8658 if (signed_value >= 0)
8659 insn |= 1 << 23;
8660
8661 /* Encode the offset. */
8662 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8663
8664 bfd_put_32 (input_bfd, insn, hit_data);
8665 }
8666 return bfd_reloc_ok;
8667
8668 case R_ARM_LDC_PC_G0:
8669 case R_ARM_LDC_PC_G1:
8670 case R_ARM_LDC_PC_G2:
8671 case R_ARM_LDC_SB_G0:
8672 case R_ARM_LDC_SB_G1:
8673 case R_ARM_LDC_SB_G2:
8674 {
8675 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8676 bfd_vma pc = input_section->output_section->vma
8677 + input_section->output_offset + rel->r_offset;
8678 bfd_vma sb = 0; /* See note above. */
8679 bfd_vma residual;
8680 bfd_signed_vma signed_value;
8681 int group = 0;
8682
8683 /* Determine which groups of bits to calculate. */
8684 switch (r_type)
8685 {
8686 case R_ARM_LDC_PC_G0:
8687 case R_ARM_LDC_SB_G0:
8688 group = 0;
8689 break;
8690
8691 case R_ARM_LDC_PC_G1:
8692 case R_ARM_LDC_SB_G1:
8693 group = 1;
8694 break;
8695
8696 case R_ARM_LDC_PC_G2:
8697 case R_ARM_LDC_SB_G2:
8698 group = 2;
8699 break;
8700
8701 default:
8702 abort ();
8703 }
8704
8705 /* If REL, extract the addend from the insn. If RELA, it will
8706 have already been fetched for us. */
8707 if (globals->use_rel)
8708 {
8709 int negative = (insn & (1 << 23)) ? 1 : -1;
8710 signed_addend = negative * ((insn & 0xff) << 2);
8711 }
8712
8713 /* Compute the value (X) to go in the place. */
8714 if (r_type == R_ARM_LDC_PC_G0
8715 || r_type == R_ARM_LDC_PC_G1
8716 || r_type == R_ARM_LDC_PC_G2)
8717 /* PC relative. */
8718 signed_value = value - pc + signed_addend;
8719 else
8720 /* Section base relative. */
8721 signed_value = value - sb + signed_addend;
8722
8723 /* Calculate the value of the relevant G_{n-1} to obtain
8724 the residual at that stage. */
8725 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8726
8727 /* Check for overflow. (The absolute value to go in the place must be
8728 divisible by four and, after having been divided by four, must
8729 fit in eight bits.) */
8730 if ((residual & 0x3) != 0 || residual >= 0x400)
8731 {
8732 (*_bfd_error_handler)
8733 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8734 input_bfd, input_section,
8735 (long) rel->r_offset, abs (signed_value), howto->name);
8736 return bfd_reloc_overflow;
8737 }
8738
8739 /* Mask out the value and U bit. */
8740 insn &= 0xff7fff00;
8741
8742 /* Set the U bit if the value to go in the place is non-negative. */
8743 if (signed_value >= 0)
8744 insn |= 1 << 23;
8745
8746 /* Encode the offset. */
8747 insn |= residual >> 2;
8748
8749 bfd_put_32 (input_bfd, insn, hit_data);
8750 }
8751 return bfd_reloc_ok;
8752
8753 default:
8754 return bfd_reloc_notsupported;
8755 }
8756}
8757
8758/* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8759static void
8760arm_add_to_rel (bfd * abfd,
8761 bfd_byte * address,
8762 reloc_howto_type * howto,
8763 bfd_signed_vma increment)
8764{
8765 bfd_signed_vma addend;
8766
8767 if (howto->type == R_ARM_THM_CALL
8768 || howto->type == R_ARM_THM_JUMP24)
8769 {
8770 int upper_insn, lower_insn;
8771 int upper, lower;
8772
8773 upper_insn = bfd_get_16 (abfd, address);
8774 lower_insn = bfd_get_16 (abfd, address + 2);
8775 upper = upper_insn & 0x7ff;
8776 lower = lower_insn & 0x7ff;
8777
8778 addend = (upper << 12) | (lower << 1);
8779 addend += increment;
8780 addend >>= 1;
8781
8782 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8783 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8784
8785 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8786 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8787 }
8788 else
8789 {
8790 bfd_vma contents;
8791
8792 contents = bfd_get_32 (abfd, address);
8793
8794 /* Get the (signed) value from the instruction. */
8795 addend = contents & howto->src_mask;
8796 if (addend & ((howto->src_mask + 1) >> 1))
8797 {
8798 bfd_signed_vma mask;
8799
8800 mask = -1;
8801 mask &= ~ howto->src_mask;
8802 addend |= mask;
8803 }
8804
8805 /* Add in the increment, (which is a byte value). */
8806 switch (howto->type)
8807 {
8808 default:
8809 addend += increment;
8810 break;
8811
8812 case R_ARM_PC24:
8813 case R_ARM_PLT32:
8814 case R_ARM_CALL:
8815 case R_ARM_JUMP24:
8816 addend <<= howto->size;
8817 addend += increment;
8818
8819 /* Should we check for overflow here ? */
8820
8821 /* Drop any undesired bits. */
8822 addend >>= howto->rightshift;
8823 break;
8824 }
8825
8826 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8827
8828 bfd_put_32 (abfd, contents, address);
8829 }
8830}
8831
8832#define IS_ARM_TLS_RELOC(R_TYPE) \
8833 ((R_TYPE) == R_ARM_TLS_GD32 \
8834 || (R_TYPE) == R_ARM_TLS_LDO32 \
8835 || (R_TYPE) == R_ARM_TLS_LDM32 \
8836 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8837 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8838 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8839 || (R_TYPE) == R_ARM_TLS_LE32 \
8840 || (R_TYPE) == R_ARM_TLS_IE32)
8841
8842/* Relocate an ARM ELF section. */
8843
8844static bfd_boolean
8845elf32_arm_relocate_section (bfd * output_bfd,
8846 struct bfd_link_info * info,
8847 bfd * input_bfd,
8848 asection * input_section,
8849 bfd_byte * contents,
8850 Elf_Internal_Rela * relocs,
8851 Elf_Internal_Sym * local_syms,
8852 asection ** local_sections)
8853{
8854 Elf_Internal_Shdr *symtab_hdr;
8855 struct elf_link_hash_entry **sym_hashes;
8856 Elf_Internal_Rela *rel;
8857 Elf_Internal_Rela *relend;
8858 const char *name;
8859 struct elf32_arm_link_hash_table * globals;
8860
8861 globals = elf32_arm_hash_table (info);
8862 if (globals == NULL)
8863 return FALSE;
8864
8865 symtab_hdr = & elf_symtab_hdr (input_bfd);
8866 sym_hashes = elf_sym_hashes (input_bfd);
8867
8868 rel = relocs;
8869 relend = relocs + input_section->reloc_count;
8870 for (; rel < relend; rel++)
8871 {
8872 int r_type;
8873 reloc_howto_type * howto;
8874 unsigned long r_symndx;
8875 Elf_Internal_Sym * sym;
8876 asection * sec;
8877 struct elf_link_hash_entry * h;
8878 bfd_vma relocation;
8879 bfd_reloc_status_type r;
8880 arelent bfd_reloc;
8881 char sym_type;
8882 bfd_boolean unresolved_reloc = FALSE;
8883 char *error_message = NULL;
8884
8885 r_symndx = ELF32_R_SYM (rel->r_info);
8886 r_type = ELF32_R_TYPE (rel->r_info);
8887 r_type = arm_real_reloc_type (globals, r_type);
8888
8889 if ( r_type == R_ARM_GNU_VTENTRY
8890 || r_type == R_ARM_GNU_VTINHERIT)
8891 continue;
8892
8893 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8894 howto = bfd_reloc.howto;
8895
8896 h = NULL;
8897 sym = NULL;
8898 sec = NULL;
8899
8900 if (r_symndx < symtab_hdr->sh_info)
8901 {
8902 sym = local_syms + r_symndx;
8903 sym_type = ELF32_ST_TYPE (sym->st_info);
8904 sec = local_sections[r_symndx];
8905
8906 /* An object file might have a reference to a local
8907 undefined symbol. This is a daft object file, but we
8908 should at least do something about it. V4BX & NONE
8909 relocations do not use the symbol and are explicitly
8910 allowed to use the undefined symbol, so allow those.
8911 Likewise for relocations against STN_UNDEF. */
8912 if (r_type != R_ARM_V4BX
8913 && r_type != R_ARM_NONE
8914 && r_symndx != STN_UNDEF
8915 && bfd_is_und_section (sec)
8916 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8917 {
8918 if (!info->callbacks->undefined_symbol
8919 (info, bfd_elf_string_from_elf_section
8920 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8921 input_bfd, input_section,
8922 rel->r_offset, TRUE))
8923 return FALSE;
8924 }
8925
8926 if (globals->use_rel)
8927 {
8928 relocation = (sec->output_section->vma
8929 + sec->output_offset
8930 + sym->st_value);
8931 if (!info->relocatable
8932 && (sec->flags & SEC_MERGE)
8933 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8934 {
8935 asection *msec;
8936 bfd_vma addend, value;
8937
8938 switch (r_type)
8939 {
8940 case R_ARM_MOVW_ABS_NC:
8941 case R_ARM_MOVT_ABS:
8942 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8943 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8944 addend = (addend ^ 0x8000) - 0x8000;
8945 break;
8946
8947 case R_ARM_THM_MOVW_ABS_NC:
8948 case R_ARM_THM_MOVT_ABS:
8949 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8950 << 16;
8951 value |= bfd_get_16 (input_bfd,
8952 contents + rel->r_offset + 2);
8953 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8954 | ((value & 0x04000000) >> 15);
8955 addend = (addend ^ 0x8000) - 0x8000;
8956 break;
8957
8958 default:
8959 if (howto->rightshift
8960 || (howto->src_mask & (howto->src_mask + 1)))
8961 {
8962 (*_bfd_error_handler)
8963 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8964 input_bfd, input_section,
8965 (long) rel->r_offset, howto->name);
8966 return FALSE;
8967 }
8968
8969 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8970
8971 /* Get the (signed) value from the instruction. */
8972 addend = value & howto->src_mask;
8973 if (addend & ((howto->src_mask + 1) >> 1))
8974 {
8975 bfd_signed_vma mask;
8976
8977 mask = -1;
8978 mask &= ~ howto->src_mask;
8979 addend |= mask;
8980 }
8981 break;
8982 }
8983
8984 msec = sec;
8985 addend =
8986 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8987 - relocation;
8988 addend += msec->output_section->vma + msec->output_offset;
8989
8990 /* Cases here must match those in the preceeding
8991 switch statement. */
8992 switch (r_type)
8993 {
8994 case R_ARM_MOVW_ABS_NC:
8995 case R_ARM_MOVT_ABS:
8996 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8997 | (addend & 0xfff);
8998 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8999 break;
9000
9001 case R_ARM_THM_MOVW_ABS_NC:
9002 case R_ARM_THM_MOVT_ABS:
9003 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
9004 | (addend & 0xff) | ((addend & 0x0800) << 15);
9005 bfd_put_16 (input_bfd, value >> 16,
9006 contents + rel->r_offset);
9007 bfd_put_16 (input_bfd, value,
9008 contents + rel->r_offset + 2);
9009 break;
9010
9011 default:
9012 value = (value & ~ howto->dst_mask)
9013 | (addend & howto->dst_mask);
9014 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
9015 break;
9016 }
9017 }
9018 }
9019 else
9020 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
9021 }
9022 else
9023 {
9024 bfd_boolean warned;
9025
9026 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
9027 r_symndx, symtab_hdr, sym_hashes,
9028 h, sec, relocation,
9029 unresolved_reloc, warned);
9030
9031 sym_type = h->type;
9032 }
9033
9034 if (sec != NULL && elf_discarded_section (sec))
9035 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
9036 rel, relend, howto, contents);
9037
9038 if (info->relocatable)
9039 {
9040 /* This is a relocatable link. We don't have to change
9041 anything, unless the reloc is against a section symbol,
9042 in which case we have to adjust according to where the
9043 section symbol winds up in the output section. */
9044 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9045 {
9046 if (globals->use_rel)
9047 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9048 howto, (bfd_signed_vma) sec->output_offset);
9049 else
9050 rel->r_addend += sec->output_offset;
9051 }
9052 continue;
9053 }
9054
9055 if (h != NULL)
9056 name = h->root.root.string;
9057 else
9058 {
9059 name = (bfd_elf_string_from_elf_section
9060 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9061 if (name == NULL || *name == '\0')
9062 name = bfd_section_name (input_bfd, sec);
9063 }
9064
9065 if (r_symndx != STN_UNDEF
9066 && r_type != R_ARM_NONE
9067 && (h == NULL
9068 || h->root.type == bfd_link_hash_defined
9069 || h->root.type == bfd_link_hash_defweak)
9070 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9071 {
9072 (*_bfd_error_handler)
9073 ((sym_type == STT_TLS
9074 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9075 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9076 input_bfd,
9077 input_section,
9078 (long) rel->r_offset,
9079 howto->name,
9080 name);
9081 }
9082
9083 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9084 input_section, contents, rel,
9085 relocation, info, sec, name,
9086 (h ? ELF_ST_TYPE (h->type) :
9087 ELF_ST_TYPE (sym->st_info)), h,
9088 &unresolved_reloc, &error_message);
9089
9090 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9091 because such sections are not SEC_ALLOC and thus ld.so will
9092 not process them. */
9093 if (unresolved_reloc
9094 && !((input_section->flags & SEC_DEBUGGING) != 0
9095 && h->def_dynamic))
9096 {
9097 (*_bfd_error_handler)
9098 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9099 input_bfd,
9100 input_section,
9101 (long) rel->r_offset,
9102 howto->name,
9103 h->root.root.string);
9104 return FALSE;
9105 }
9106
9107 if (r != bfd_reloc_ok)
9108 {
9109 switch (r)
9110 {
9111 case bfd_reloc_overflow:
9112 /* If the overflowing reloc was to an undefined symbol,
9113 we have already printed one error message and there
9114 is no point complaining again. */
9115 if ((! h ||
9116 h->root.type != bfd_link_hash_undefined)
9117 && (!((*info->callbacks->reloc_overflow)
9118 (info, (h ? &h->root : NULL), name, howto->name,
9119 (bfd_vma) 0, input_bfd, input_section,
9120 rel->r_offset))))
9121 return FALSE;
9122 break;
9123
9124 case bfd_reloc_undefined:
9125 if (!((*info->callbacks->undefined_symbol)
9126 (info, name, input_bfd, input_section,
9127 rel->r_offset, TRUE)))
9128 return FALSE;
9129 break;
9130
9131 case bfd_reloc_outofrange:
9132 error_message = _("out of range");
9133 goto common_error;
9134
9135 case bfd_reloc_notsupported:
9136 error_message = _("unsupported relocation");
9137 goto common_error;
9138
9139 case bfd_reloc_dangerous:
9140 /* error_message should already be set. */
9141 goto common_error;
9142
9143 default:
9144 error_message = _("unknown error");
9145 /* Fall through. */
9146
9147 common_error:
9148 BFD_ASSERT (error_message != NULL);
9149 if (!((*info->callbacks->reloc_dangerous)
9150 (info, error_message, input_bfd, input_section,
9151 rel->r_offset)))
9152 return FALSE;
9153 break;
9154 }
9155 }
9156 }
9157
9158 return TRUE;
9159}
9160
9161/* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9162 adds the edit to the start of the list. (The list must be built in order of
9163 ascending TINDEX: the function's callers are primarily responsible for
9164 maintaining that condition). */
9165
9166static void
9167add_unwind_table_edit (arm_unwind_table_edit **head,
9168 arm_unwind_table_edit **tail,
9169 arm_unwind_edit_type type,
9170 asection *linked_section,
9171 unsigned int tindex)
9172{
9173 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9174 xmalloc (sizeof (arm_unwind_table_edit));
9175
9176 new_edit->type = type;
9177 new_edit->linked_section = linked_section;
9178 new_edit->index = tindex;
9179
9180 if (tindex > 0)
9181 {
9182 new_edit->next = NULL;
9183
9184 if (*tail)
9185 (*tail)->next = new_edit;
9186
9187 (*tail) = new_edit;
9188
9189 if (!*head)
9190 (*head) = new_edit;
9191 }
9192 else
9193 {
9194 new_edit->next = *head;
9195
9196 if (!*tail)
9197 *tail = new_edit;
9198
9199 *head = new_edit;
9200 }
9201}
9202
9203static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9204
9205/* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9206static void
9207adjust_exidx_size(asection *exidx_sec, int adjust)
9208{
9209 asection *out_sec;
9210
9211 if (!exidx_sec->rawsize)
9212 exidx_sec->rawsize = exidx_sec->size;
9213
9214 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9215 out_sec = exidx_sec->output_section;
9216 /* Adjust size of output section. */
9217 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9218}
9219
9220/* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9221static void
9222insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9223{
9224 struct _arm_elf_section_data *exidx_arm_data;
9225
9226 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9227 add_unwind_table_edit (
9228 &exidx_arm_data->u.exidx.unwind_edit_list,
9229 &exidx_arm_data->u.exidx.unwind_edit_tail,
9230 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9231
9232 adjust_exidx_size(exidx_sec, 8);
9233}
9234
9235/* Scan .ARM.exidx tables, and create a list describing edits which should be
9236 made to those tables, such that:
9237
9238 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9239 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9240 codes which have been inlined into the index).
9241
9242 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
9243
9244 The edits are applied when the tables are written
9245 (in elf32_arm_write_section).
9246*/
9247
9248bfd_boolean
9249elf32_arm_fix_exidx_coverage (asection **text_section_order,
9250 unsigned int num_text_sections,
9251 struct bfd_link_info *info,
9252 bfd_boolean merge_exidx_entries)
9253{
9254 bfd *inp;
9255 unsigned int last_second_word = 0, i;
9256 asection *last_exidx_sec = NULL;
9257 asection *last_text_sec = NULL;
9258 int last_unwind_type = -1;
9259
9260 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9261 text sections. */
9262 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9263 {
9264 asection *sec;
9265
9266 for (sec = inp->sections; sec != NULL; sec = sec->next)
9267 {
9268 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9269 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9270
9271 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9272 continue;
9273
9274 if (elf_sec->linked_to)
9275 {
9276 Elf_Internal_Shdr *linked_hdr
9277 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9278 struct _arm_elf_section_data *linked_sec_arm_data
9279 = get_arm_elf_section_data (linked_hdr->bfd_section);
9280
9281 if (linked_sec_arm_data == NULL)
9282 continue;
9283
9284 /* Link this .ARM.exidx section back from the text section it
9285 describes. */
9286 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9287 }
9288 }
9289 }
9290
9291 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9292 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9293 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9294
9295 for (i = 0; i < num_text_sections; i++)
9296 {
9297 asection *sec = text_section_order[i];
9298 asection *exidx_sec;
9299 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9300 struct _arm_elf_section_data *exidx_arm_data;
9301 bfd_byte *contents = NULL;
9302 int deleted_exidx_bytes = 0;
9303 bfd_vma j;
9304 arm_unwind_table_edit *unwind_edit_head = NULL;
9305 arm_unwind_table_edit *unwind_edit_tail = NULL;
9306 Elf_Internal_Shdr *hdr;
9307 bfd *ibfd;
9308
9309 if (arm_data == NULL)
9310 continue;
9311
9312 exidx_sec = arm_data->u.text.arm_exidx_sec;
9313 if (exidx_sec == NULL)
9314 {
9315 /* Section has no unwind data. */
9316 if (last_unwind_type == 0 || !last_exidx_sec)
9317 continue;
9318
9319 /* Ignore zero sized sections. */
9320 if (sec->size == 0)
9321 continue;
9322
9323 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9324 last_unwind_type = 0;
9325 continue;
9326 }
9327
9328 /* Skip /DISCARD/ sections. */
9329 if (bfd_is_abs_section (exidx_sec->output_section))
9330 continue;
9331
9332 hdr = &elf_section_data (exidx_sec)->this_hdr;
9333 if (hdr->sh_type != SHT_ARM_EXIDX)
9334 continue;
9335
9336 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9337 if (exidx_arm_data == NULL)
9338 continue;
9339
9340 ibfd = exidx_sec->owner;
9341
9342 if (hdr->contents != NULL)
9343 contents = hdr->contents;
9344 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9345 /* An error? */
9346 continue;
9347
9348 for (j = 0; j < hdr->sh_size; j += 8)
9349 {
9350 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9351 int unwind_type;
9352 int elide = 0;
9353
9354 /* An EXIDX_CANTUNWIND entry. */
9355 if (second_word == 1)
9356 {
9357 if (last_unwind_type == 0)
9358 elide = 1;
9359 unwind_type = 0;
9360 }
9361 /* Inlined unwinding data. Merge if equal to previous. */
9362 else if ((second_word & 0x80000000) != 0)
9363 {
9364 if (merge_exidx_entries
9365 && last_second_word == second_word && last_unwind_type == 1)
9366 elide = 1;
9367 unwind_type = 1;
9368 last_second_word = second_word;
9369 }
9370 /* Normal table entry. In theory we could merge these too,
9371 but duplicate entries are likely to be much less common. */
9372 else
9373 unwind_type = 2;
9374
9375 if (elide)
9376 {
9377 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9378 DELETE_EXIDX_ENTRY, NULL, j / 8);
9379
9380 deleted_exidx_bytes += 8;
9381 }
9382
9383 last_unwind_type = unwind_type;
9384 }
9385
9386 /* Free contents if we allocated it ourselves. */
9387 if (contents != hdr->contents)
9388 free (contents);
9389
9390 /* Record edits to be applied later (in elf32_arm_write_section). */
9391 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9392 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9393
9394 if (deleted_exidx_bytes > 0)
9395 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9396
9397 last_exidx_sec = exidx_sec;
9398 last_text_sec = sec;
9399 }
9400
9401 /* Add terminating CANTUNWIND entry. */
9402 if (last_exidx_sec && last_unwind_type != 0)
9403 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9404
9405 return TRUE;
9406}
9407
9408static bfd_boolean
9409elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9410 bfd *ibfd, const char *name)
9411{
9412 asection *sec, *osec;
9413
9414 sec = bfd_get_section_by_name (ibfd, name);
9415 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9416 return TRUE;
9417
9418 osec = sec->output_section;
9419 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9420 return TRUE;
9421
9422 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9423 sec->output_offset, sec->size))
9424 return FALSE;
9425
9426 return TRUE;
9427}
9428
9429static bfd_boolean
9430elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9431{
9432 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9433 asection *sec, *osec;
9434
9435 if (globals == NULL)
9436 return FALSE;
9437
9438 /* Invoke the regular ELF backend linker to do all the work. */
9439 if (!bfd_elf_final_link (abfd, info))
9440 return FALSE;
9441
9442 /* Process stub sections (eg BE8 encoding, ...). */
9443 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9444 int i;
9445 for (i=0; i<htab->top_id; i++)
9446 {
9447 sec = htab->stub_group[i].stub_sec;
9448 /* Only process it once, in its link_sec slot. */
9449 if (sec && i == htab->stub_group[i].link_sec->id)
9450 {
9451 osec = sec->output_section;
9452 elf32_arm_write_section (abfd, info, sec, sec->contents);
9453 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9454 sec->output_offset, sec->size))
9455 return FALSE;
9456 }
9457 }
9458
9459 /* Write out any glue sections now that we have created all the
9460 stubs. */
9461 if (globals->bfd_of_glue_owner != NULL)
9462 {
9463 if (! elf32_arm_output_glue_section (info, abfd,
9464 globals->bfd_of_glue_owner,
9465 ARM2THUMB_GLUE_SECTION_NAME))
9466 return FALSE;
9467
9468 if (! elf32_arm_output_glue_section (info, abfd,
9469 globals->bfd_of_glue_owner,
9470 THUMB2ARM_GLUE_SECTION_NAME))
9471 return FALSE;
9472
9473 if (! elf32_arm_output_glue_section (info, abfd,
9474 globals->bfd_of_glue_owner,
9475 VFP11_ERRATUM_VENEER_SECTION_NAME))
9476 return FALSE;
9477
9478 if (! elf32_arm_output_glue_section (info, abfd,
9479 globals->bfd_of_glue_owner,
9480 ARM_BX_GLUE_SECTION_NAME))
9481 return FALSE;
9482 }
9483
9484 return TRUE;
9485}
9486
9487/* Set the right machine number. */
9488
9489static bfd_boolean
9490elf32_arm_object_p (bfd *abfd)
9491{
9492 unsigned int mach;
9493
9494 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9495
9496 if (mach != bfd_mach_arm_unknown)
9497 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9498
9499 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9500 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9501
9502 else
9503 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9504
9505 return TRUE;
9506}
9507
9508/* Function to keep ARM specific flags in the ELF header. */
9509
9510static bfd_boolean
9511elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9512{
9513 if (elf_flags_init (abfd)
9514 && elf_elfheader (abfd)->e_flags != flags)
9515 {
9516 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9517 {
9518 if (flags & EF_ARM_INTERWORK)
9519 (*_bfd_error_handler)
9520 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9521 abfd);
9522 else
9523 _bfd_error_handler
9524 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9525 abfd);
9526 }
9527 }
9528 else
9529 {
9530 elf_elfheader (abfd)->e_flags = flags;
9531 elf_flags_init (abfd) = TRUE;
9532 }
9533
9534 return TRUE;
9535}
9536
9537/* Copy backend specific data from one object module to another. */
9538
9539static bfd_boolean
9540elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9541{
9542 flagword in_flags;
9543 flagword out_flags;
9544
9545 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9546 return TRUE;
9547
9548 in_flags = elf_elfheader (ibfd)->e_flags;
9549 out_flags = elf_elfheader (obfd)->e_flags;
9550
9551 if (elf_flags_init (obfd)
9552 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9553 && in_flags != out_flags)
9554 {
9555 /* Cannot mix APCS26 and APCS32 code. */
9556 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9557 return FALSE;
9558
9559 /* Cannot mix float APCS and non-float APCS code. */
9560 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9561 return FALSE;
9562
9563 /* If the src and dest have different interworking flags
9564 then turn off the interworking bit. */
9565 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9566 {
9567 if (out_flags & EF_ARM_INTERWORK)
9568 _bfd_error_handler
9569 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9570 obfd, ibfd);
9571
9572 in_flags &= ~EF_ARM_INTERWORK;
9573 }
9574
9575 /* Likewise for PIC, though don't warn for this case. */
9576 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9577 in_flags &= ~EF_ARM_PIC;
9578 }
9579
9580 elf_elfheader (obfd)->e_flags = in_flags;
9581 elf_flags_init (obfd) = TRUE;
9582
9583 /* Also copy the EI_OSABI field. */
9584 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9585 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9586
9587 /* Copy object attributes. */
9588 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9589
9590 return TRUE;
9591}
9592
9593/* Values for Tag_ABI_PCS_R9_use. */
9594enum
9595{
9596 AEABI_R9_V6,
9597 AEABI_R9_SB,
9598 AEABI_R9_TLS,
9599 AEABI_R9_unused
9600};
9601
9602/* Values for Tag_ABI_PCS_RW_data. */
9603enum
9604{
9605 AEABI_PCS_RW_data_absolute,
9606 AEABI_PCS_RW_data_PCrel,
9607 AEABI_PCS_RW_data_SBrel,
9608 AEABI_PCS_RW_data_unused
9609};
9610
9611/* Values for Tag_ABI_enum_size. */
9612enum
9613{
9614 AEABI_enum_unused,
9615 AEABI_enum_short,
9616 AEABI_enum_wide,
9617 AEABI_enum_forced_wide
9618};
9619
9620/* Determine whether an object attribute tag takes an integer, a
9621 string or both. */
9622
9623static int
9624elf32_arm_obj_attrs_arg_type (int tag)
9625{
9626 if (tag == Tag_compatibility)
9627 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9628 else if (tag == Tag_nodefaults)
9629 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9630 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9631 return ATTR_TYPE_FLAG_STR_VAL;
9632 else if (tag < 32)
9633 return ATTR_TYPE_FLAG_INT_VAL;
9634 else
9635 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9636}
9637
9638/* The ABI defines that Tag_conformance should be emitted first, and that
9639 Tag_nodefaults should be second (if either is defined). This sets those
9640 two positions, and bumps up the position of all the remaining tags to
9641 compensate. */
9642static int
9643elf32_arm_obj_attrs_order (int num)
9644{
9645 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
9646 return Tag_conformance;
9647 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
9648 return Tag_nodefaults;
9649 if ((num - 2) < Tag_nodefaults)
9650 return num - 2;
9651 if ((num - 1) < Tag_conformance)
9652 return num - 1;
9653 return num;
9654}
9655
9656/* Attribute numbers >=64 (mod 128) can be safely ignored. */
9657static bfd_boolean
9658elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
9659{
9660 if ((tag & 127) < 64)
9661 {
9662 _bfd_error_handler
9663 (_("%B: Unknown mandatory EABI object attribute %d"),
9664 abfd, tag);
9665 bfd_set_error (bfd_error_bad_value);
9666 return FALSE;
9667 }
9668 else
9669 {
9670 _bfd_error_handler
9671 (_("Warning: %B: Unknown EABI object attribute %d"),
9672 abfd, tag);
9673 return TRUE;
9674 }
9675}
9676
9677/* Read the architecture from the Tag_also_compatible_with attribute, if any.
9678 Returns -1 if no architecture could be read. */
9679
9680static int
9681get_secondary_compatible_arch (bfd *abfd)
9682{
9683 obj_attribute *attr =
9684 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9685
9686 /* Note: the tag and its argument below are uleb128 values, though
9687 currently-defined values fit in one byte for each. */
9688 if (attr->s
9689 && attr->s[0] == Tag_CPU_arch
9690 && (attr->s[1] & 128) != 128
9691 && attr->s[2] == 0)
9692 return attr->s[1];
9693
9694 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9695 return -1;
9696}
9697
9698/* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9699 The tag is removed if ARCH is -1. */
9700
9701static void
9702set_secondary_compatible_arch (bfd *abfd, int arch)
9703{
9704 obj_attribute *attr =
9705 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9706
9707 if (arch == -1)
9708 {
9709 attr->s = NULL;
9710 return;
9711 }
9712
9713 /* Note: the tag and its argument below are uleb128 values, though
9714 currently-defined values fit in one byte for each. */
9715 if (!attr->s)
9716 attr->s = (char *) bfd_alloc (abfd, 3);
9717 attr->s[0] = Tag_CPU_arch;
9718 attr->s[1] = arch;
9719 attr->s[2] = '\0';
9720}
9721
9722/* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9723 into account. */
9724
9725static int
9726tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9727 int newtag, int secondary_compat)
9728{
9729#define T(X) TAG_CPU_ARCH_##X
9730 int tagl, tagh, result;
9731 const int v6t2[] =
9732 {
9733 T(V6T2), /* PRE_V4. */
9734 T(V6T2), /* V4. */
9735 T(V6T2), /* V4T. */
9736 T(V6T2), /* V5T. */
9737 T(V6T2), /* V5TE. */
9738 T(V6T2), /* V5TEJ. */
9739 T(V6T2), /* V6. */
9740 T(V7), /* V6KZ. */
9741 T(V6T2) /* V6T2. */
9742 };
9743 const int v6k[] =
9744 {
9745 T(V6K), /* PRE_V4. */
9746 T(V6K), /* V4. */
9747 T(V6K), /* V4T. */
9748 T(V6K), /* V5T. */
9749 T(V6K), /* V5TE. */
9750 T(V6K), /* V5TEJ. */
9751 T(V6K), /* V6. */
9752 T(V6KZ), /* V6KZ. */
9753 T(V7), /* V6T2. */
9754 T(V6K) /* V6K. */
9755 };
9756 const int v7[] =
9757 {
9758 T(V7), /* PRE_V4. */
9759 T(V7), /* V4. */
9760 T(V7), /* V4T. */
9761 T(V7), /* V5T. */
9762 T(V7), /* V5TE. */
9763 T(V7), /* V5TEJ. */
9764 T(V7), /* V6. */
9765 T(V7), /* V6KZ. */
9766 T(V7), /* V6T2. */
9767 T(V7), /* V6K. */
9768 T(V7) /* V7. */
9769 };
9770 const int v6_m[] =
9771 {
9772 -1, /* PRE_V4. */
9773 -1, /* V4. */
9774 T(V6K), /* V4T. */
9775 T(V6K), /* V5T. */
9776 T(V6K), /* V5TE. */
9777 T(V6K), /* V5TEJ. */
9778 T(V6K), /* V6. */
9779 T(V6KZ), /* V6KZ. */
9780 T(V7), /* V6T2. */
9781 T(V6K), /* V6K. */
9782 T(V7), /* V7. */
9783 T(V6_M) /* V6_M. */
9784 };
9785 const int v6s_m[] =
9786 {
9787 -1, /* PRE_V4. */
9788 -1, /* V4. */
9789 T(V6K), /* V4T. */
9790 T(V6K), /* V5T. */
9791 T(V6K), /* V5TE. */
9792 T(V6K), /* V5TEJ. */
9793 T(V6K), /* V6. */
9794 T(V6KZ), /* V6KZ. */
9795 T(V7), /* V6T2. */
9796 T(V6K), /* V6K. */
9797 T(V7), /* V7. */
9798 T(V6S_M), /* V6_M. */
9799 T(V6S_M) /* V6S_M. */
9800 };
9801 const int v7e_m[] =
9802 {
9803 -1, /* PRE_V4. */
9804 -1, /* V4. */
9805 T(V7E_M), /* V4T. */
9806 T(V7E_M), /* V5T. */
9807 T(V7E_M), /* V5TE. */
9808 T(V7E_M), /* V5TEJ. */
9809 T(V7E_M), /* V6. */
9810 T(V7E_M), /* V6KZ. */
9811 T(V7E_M), /* V6T2. */
9812 T(V7E_M), /* V6K. */
9813 T(V7E_M), /* V7. */
9814 T(V7E_M), /* V6_M. */
9815 T(V7E_M), /* V6S_M. */
9816 T(V7E_M) /* V7E_M. */
9817 };
9818 const int v4t_plus_v6_m[] =
9819 {
9820 -1, /* PRE_V4. */
9821 -1, /* V4. */
9822 T(V4T), /* V4T. */
9823 T(V5T), /* V5T. */
9824 T(V5TE), /* V5TE. */
9825 T(V5TEJ), /* V5TEJ. */
9826 T(V6), /* V6. */
9827 T(V6KZ), /* V6KZ. */
9828 T(V6T2), /* V6T2. */
9829 T(V6K), /* V6K. */
9830 T(V7), /* V7. */
9831 T(V6_M), /* V6_M. */
9832 T(V6S_M), /* V6S_M. */
9833 T(V7E_M), /* V7E_M. */
9834 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9835 };
9836 const int *comb[] =
9837 {
9838 v6t2,
9839 v6k,
9840 v7,
9841 v6_m,
9842 v6s_m,
9843 v7e_m,
9844 /* Pseudo-architecture. */
9845 v4t_plus_v6_m
9846 };
9847
9848 /* Check we've not got a higher architecture than we know about. */
9849
9850 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9851 {
9852 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9853 return -1;
9854 }
9855
9856 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9857
9858 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9859 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9860 oldtag = T(V4T_PLUS_V6_M);
9861
9862 /* And override the new tag if we have a Tag_also_compatible_with on the
9863 input. */
9864
9865 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9866 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9867 newtag = T(V4T_PLUS_V6_M);
9868
9869 tagl = (oldtag < newtag) ? oldtag : newtag;
9870 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9871
9872 /* Architectures before V6KZ add features monotonically. */
9873 if (tagh <= TAG_CPU_ARCH_V6KZ)
9874 return result;
9875
9876 result = comb[tagh - T(V6T2)][tagl];
9877
9878 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9879 as the canonical version. */
9880 if (result == T(V4T_PLUS_V6_M))
9881 {
9882 result = T(V4T);
9883 *secondary_compat_out = T(V6_M);
9884 }
9885 else
9886 *secondary_compat_out = -1;
9887
9888 if (result == -1)
9889 {
9890 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9891 ibfd, oldtag, newtag);
9892 return -1;
9893 }
9894
9895 return result;
9896#undef T
9897}
9898
9899/* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9900 are conflicting attributes. */
9901
9902static bfd_boolean
9903elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9904{
9905 obj_attribute *in_attr;
9906 obj_attribute *out_attr;
9907 /* Some tags have 0 = don't care, 1 = strong requirement,
9908 2 = weak requirement. */
9909 static const int order_021[3] = {0, 2, 1};
9910 int i;
9911 bfd_boolean result = TRUE;
9912
9913 /* Skip the linker stubs file. This preserves previous behavior
9914 of accepting unknown attributes in the first input file - but
9915 is that a bug? */
9916 if (ibfd->flags & BFD_LINKER_CREATED)
9917 return TRUE;
9918
9919 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9920 {
9921 /* This is the first object. Copy the attributes. */
9922 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9923
9924 out_attr = elf_known_obj_attributes_proc (obfd);
9925
9926 /* Use the Tag_null value to indicate the attributes have been
9927 initialized. */
9928 out_attr[0].i = 1;
9929
9930 /* We do not output objects with Tag_MPextension_use_legacy - we move
9931 the attribute's value to Tag_MPextension_use. */
9932 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9933 {
9934 if (out_attr[Tag_MPextension_use].i != 0
9935 && out_attr[Tag_MPextension_use_legacy].i
9936 != out_attr[Tag_MPextension_use].i)
9937 {
9938 _bfd_error_handler
9939 (_("Error: %B has both the current and legacy "
9940 "Tag_MPextension_use attributes"), ibfd);
9941 result = FALSE;
9942 }
9943
9944 out_attr[Tag_MPextension_use] =
9945 out_attr[Tag_MPextension_use_legacy];
9946 out_attr[Tag_MPextension_use_legacy].type = 0;
9947 out_attr[Tag_MPextension_use_legacy].i = 0;
9948 }
9949
9950 return result;
9951 }
9952
9953 in_attr = elf_known_obj_attributes_proc (ibfd);
9954 out_attr = elf_known_obj_attributes_proc (obfd);
9955 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9956 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9957 {
9958 /* Ignore mismatches if the object doesn't use floating point. */
9959 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9960 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9961 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9962 {
9963 _bfd_error_handler
9964 (_("error: %B uses VFP register arguments, %B does not"),
9965 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9966 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9967 result = FALSE;
9968 }
9969 }
9970
9971 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9972 {
9973 /* Merge this attribute with existing attributes. */
9974 switch (i)
9975 {
9976 case Tag_CPU_raw_name:
9977 case Tag_CPU_name:
9978 /* These are merged after Tag_CPU_arch. */
9979 break;
9980
9981 case Tag_ABI_optimization_goals:
9982 case Tag_ABI_FP_optimization_goals:
9983 /* Use the first value seen. */
9984 break;
9985
9986 case Tag_CPU_arch:
9987 {
9988 int secondary_compat = -1, secondary_compat_out = -1;
9989 unsigned int saved_out_attr = out_attr[i].i;
9990 static const char *name_table[] = {
9991 /* These aren't real CPU names, but we can't guess
9992 that from the architecture version alone. */
9993 "Pre v4",
9994 "ARM v4",
9995 "ARM v4T",
9996 "ARM v5T",
9997 "ARM v5TE",
9998 "ARM v5TEJ",
9999 "ARM v6",
10000 "ARM v6KZ",
10001 "ARM v6T2",
10002 "ARM v6K",
10003 "ARM v7",
10004 "ARM v6-M",
10005 "ARM v6S-M"
10006 };
10007
10008 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
10009 secondary_compat = get_secondary_compatible_arch (ibfd);
10010 secondary_compat_out = get_secondary_compatible_arch (obfd);
10011 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
10012 &secondary_compat_out,
10013 in_attr[i].i,
10014 secondary_compat);
10015 set_secondary_compatible_arch (obfd, secondary_compat_out);
10016
10017 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
10018 if (out_attr[i].i == saved_out_attr)
10019 ; /* Leave the names alone. */
10020 else if (out_attr[i].i == in_attr[i].i)
10021 {
10022 /* The output architecture has been changed to match the
10023 input architecture. Use the input names. */
10024 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
10025 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
10026 : NULL;
10027 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
10028 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
10029 : NULL;
10030 }
10031 else
10032 {
10033 out_attr[Tag_CPU_name].s = NULL;
10034 out_attr[Tag_CPU_raw_name].s = NULL;
10035 }
10036
10037 /* If we still don't have a value for Tag_CPU_name,
10038 make one up now. Tag_CPU_raw_name remains blank. */
10039 if (out_attr[Tag_CPU_name].s == NULL
10040 && out_attr[i].i < ARRAY_SIZE (name_table))
10041 out_attr[Tag_CPU_name].s =
10042 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
10043 }
10044 break;
10045
10046 case Tag_ARM_ISA_use:
10047 case Tag_THUMB_ISA_use:
10048 case Tag_WMMX_arch:
10049 case Tag_Advanced_SIMD_arch:
10050 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
10051 case Tag_ABI_FP_rounding:
10052 case Tag_ABI_FP_exceptions:
10053 case Tag_ABI_FP_user_exceptions:
10054 case Tag_ABI_FP_number_model:
10055 case Tag_FP_HP_extension:
10056 case Tag_CPU_unaligned_access:
10057 case Tag_T2EE_use:
10058 case Tag_MPextension_use:
10059 /* Use the largest value specified. */
10060 if (in_attr[i].i > out_attr[i].i)
10061 out_attr[i].i = in_attr[i].i;
10062 break;
10063
10064 case Tag_ABI_align_preserved:
10065 case Tag_ABI_PCS_RO_data:
10066 /* Use the smallest value specified. */
10067 if (in_attr[i].i < out_attr[i].i)
10068 out_attr[i].i = in_attr[i].i;
10069 break;
10070
10071 case Tag_ABI_align_needed:
10072 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10073 && (in_attr[Tag_ABI_align_preserved].i == 0
10074 || out_attr[Tag_ABI_align_preserved].i == 0))
10075 {
10076 /* This error message should be enabled once all non-conformant
10077 binaries in the toolchain have had the attributes set
10078 properly.
10079 _bfd_error_handler
10080 (_("error: %B: 8-byte data alignment conflicts with %B"),
10081 obfd, ibfd);
10082 result = FALSE; */
10083 }
10084 /* Fall through. */
10085 case Tag_ABI_FP_denormal:
10086 case Tag_ABI_PCS_GOT_use:
10087 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10088 value if greater than 2 (for future-proofing). */
10089 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10090 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10091 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10092 out_attr[i].i = in_attr[i].i;
10093 break;
10094
10095 case Tag_Virtualization_use:
10096 /* The virtualization tag effectively stores two bits of
10097 information: the intended use of TrustZone (in bit 0), and the
10098 intended use of Virtualization (in bit 1). */
10099 if (out_attr[i].i == 0)
10100 out_attr[i].i = in_attr[i].i;
10101 else if (in_attr[i].i != 0
10102 && in_attr[i].i != out_attr[i].i)
10103 {
10104 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
10105 out_attr[i].i = 3;
10106 else
10107 {
10108 _bfd_error_handler
10109 (_("error: %B: unable to merge virtualization attributes "
10110 "with %B"),
10111 obfd, ibfd);
10112 result = FALSE;
10113 }
10114 }
10115 break;
10116
10117 case Tag_CPU_arch_profile:
10118 if (out_attr[i].i != in_attr[i].i)
10119 {
10120 /* 0 will merge with anything.
10121 'A' and 'S' merge to 'A'.
10122 'R' and 'S' merge to 'R'.
10123 'M' and 'A|R|S' is an error. */
10124 if (out_attr[i].i == 0
10125 || (out_attr[i].i == 'S'
10126 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10127 out_attr[i].i = in_attr[i].i;
10128 else if (in_attr[i].i == 0
10129 || (in_attr[i].i == 'S'
10130 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10131 ; /* Do nothing. */
10132 else
10133 {
10134 _bfd_error_handler
10135 (_("error: %B: Conflicting architecture profiles %c/%c"),
10136 ibfd,
10137 in_attr[i].i ? in_attr[i].i : '0',
10138 out_attr[i].i ? out_attr[i].i : '0');
10139 result = FALSE;
10140 }
10141 }
10142 break;
10143 case Tag_FP_arch:
10144 {
10145 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
10146 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
10147 when it's 0. It might mean absence of FP hardware if
10148 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
10149
10150 static const struct
10151 {
10152 int ver;
10153 int regs;
10154 } vfp_versions[7] =
10155 {
10156 {0, 0},
10157 {1, 16},
10158 {2, 16},
10159 {3, 32},
10160 {3, 16},
10161 {4, 32},
10162 {4, 16}
10163 };
10164 int ver;
10165 int regs;
10166 int newval;
10167
10168 /* If the output has no requirement about FP hardware,
10169 follow the requirement of the input. */
10170 if (out_attr[i].i == 0)
10171 {
10172 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
10173 out_attr[i].i = in_attr[i].i;
10174 out_attr[Tag_ABI_HardFP_use].i
10175 = in_attr[Tag_ABI_HardFP_use].i;
10176 break;
10177 }
10178 /* If the input has no requirement about FP hardware, do
10179 nothing. */
10180 else if (in_attr[i].i == 0)
10181 {
10182 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
10183 break;
10184 }
10185
10186 /* Both the input and the output have nonzero Tag_FP_arch.
10187 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
10188
10189 /* If both the input and the output have zero Tag_ABI_HardFP_use,
10190 do nothing. */
10191 if (in_attr[Tag_ABI_HardFP_use].i == 0
10192 && out_attr[Tag_ABI_HardFP_use].i == 0)
10193 ;
10194 /* If the input and the output have different Tag_ABI_HardFP_use,
10195 the combination of them is 3 (SP & DP). */
10196 else if (in_attr[Tag_ABI_HardFP_use].i
10197 != out_attr[Tag_ABI_HardFP_use].i)
10198 out_attr[Tag_ABI_HardFP_use].i = 3;
10199
10200 /* Now we can handle Tag_FP_arch. */
10201
10202 /* Values greater than 6 aren't defined, so just pick the
10203 biggest */
10204 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10205 {
10206 out_attr[i] = in_attr[i];
10207 break;
10208 }
10209 /* The output uses the superset of input features
10210 (ISA version) and registers. */
10211 ver = vfp_versions[in_attr[i].i].ver;
10212 if (ver < vfp_versions[out_attr[i].i].ver)
10213 ver = vfp_versions[out_attr[i].i].ver;
10214 regs = vfp_versions[in_attr[i].i].regs;
10215 if (regs < vfp_versions[out_attr[i].i].regs)
10216 regs = vfp_versions[out_attr[i].i].regs;
10217 /* This assumes all possible supersets are also a valid
10218 options. */
10219 for (newval = 6; newval > 0; newval--)
10220 {
10221 if (regs == vfp_versions[newval].regs
10222 && ver == vfp_versions[newval].ver)
10223 break;
10224 }
10225 out_attr[i].i = newval;
10226 }
10227 break;
10228 case Tag_PCS_config:
10229 if (out_attr[i].i == 0)
10230 out_attr[i].i = in_attr[i].i;
10231 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10232 {
10233 /* It's sometimes ok to mix different configs, so this is only
10234 a warning. */
10235 _bfd_error_handler
10236 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10237 }
10238 break;
10239 case Tag_ABI_PCS_R9_use:
10240 if (in_attr[i].i != out_attr[i].i
10241 && out_attr[i].i != AEABI_R9_unused
10242 && in_attr[i].i != AEABI_R9_unused)
10243 {
10244 _bfd_error_handler
10245 (_("error: %B: Conflicting use of R9"), ibfd);
10246 result = FALSE;
10247 }
10248 if (out_attr[i].i == AEABI_R9_unused)
10249 out_attr[i].i = in_attr[i].i;
10250 break;
10251 case Tag_ABI_PCS_RW_data:
10252 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10253 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10254 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10255 {
10256 _bfd_error_handler
10257 (_("error: %B: SB relative addressing conflicts with use of R9"),
10258 ibfd);
10259 result = FALSE;
10260 }
10261 /* Use the smallest value specified. */
10262 if (in_attr[i].i < out_attr[i].i)
10263 out_attr[i].i = in_attr[i].i;
10264 break;
10265 case Tag_ABI_PCS_wchar_t:
10266 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10267 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10268 {
10269 _bfd_error_handler
10270 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10271 ibfd, in_attr[i].i, out_attr[i].i);
10272 }
10273 else if (in_attr[i].i && !out_attr[i].i)
10274 out_attr[i].i = in_attr[i].i;
10275 break;
10276 case Tag_ABI_enum_size:
10277 if (in_attr[i].i != AEABI_enum_unused)
10278 {
10279 if (out_attr[i].i == AEABI_enum_unused
10280 || out_attr[i].i == AEABI_enum_forced_wide)
10281 {
10282 /* The existing object is compatible with anything.
10283 Use whatever requirements the new object has. */
10284 out_attr[i].i = in_attr[i].i;
10285 }
10286 else if (in_attr[i].i != AEABI_enum_forced_wide
10287 && out_attr[i].i != in_attr[i].i
10288 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10289 {
10290 static const char *aeabi_enum_names[] =
10291 { "", "variable-size", "32-bit", "" };
10292 const char *in_name =
10293 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10294 ? aeabi_enum_names[in_attr[i].i]
10295 : "<unknown>";
10296 const char *out_name =
10297 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10298 ? aeabi_enum_names[out_attr[i].i]
10299 : "<unknown>";
10300 _bfd_error_handler
10301 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10302 ibfd, in_name, out_name);
10303 }
10304 }
10305 break;
10306 case Tag_ABI_VFP_args:
10307 /* Aready done. */
10308 break;
10309 case Tag_ABI_WMMX_args:
10310 if (in_attr[i].i != out_attr[i].i)
10311 {
10312 _bfd_error_handler
10313 (_("error: %B uses iWMMXt register arguments, %B does not"),
10314 ibfd, obfd);
10315 result = FALSE;
10316 }
10317 break;
10318 case Tag_compatibility:
10319 /* Merged in target-independent code. */
10320 break;
10321 case Tag_ABI_HardFP_use:
10322 /* This is handled along with Tag_FP_arch. */
10323 break;
10324 case Tag_ABI_FP_16bit_format:
10325 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10326 {
10327 if (in_attr[i].i != out_attr[i].i)
10328 {
10329 _bfd_error_handler
10330 (_("error: fp16 format mismatch between %B and %B"),
10331 ibfd, obfd);
10332 result = FALSE;
10333 }
10334 }
10335 if (in_attr[i].i != 0)
10336 out_attr[i].i = in_attr[i].i;
10337 break;
10338
10339 case Tag_DIV_use:
10340 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10341 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10342 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10343 CPU. We will merge as follows: If the input attribute's value
10344 is one then the output attribute's value remains unchanged. If
10345 the input attribute's value is zero or two then if the output
10346 attribute's value is one the output value is set to the input
10347 value, otherwise the output value must be the same as the
10348 inputs. */
10349 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10350 {
10351 if (in_attr[i].i != out_attr[i].i)
10352 {
10353 _bfd_error_handler
10354 (_("DIV usage mismatch between %B and %B"),
10355 ibfd, obfd);
10356 result = FALSE;
10357 }
10358 }
10359
10360 if (in_attr[i].i != 1)
10361 out_attr[i].i = in_attr[i].i;
10362
10363 break;
10364
10365 case Tag_MPextension_use_legacy:
10366 /* We don't output objects with Tag_MPextension_use_legacy - we
10367 move the value to Tag_MPextension_use. */
10368 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10369 {
10370 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10371 {
10372 _bfd_error_handler
10373 (_("%B has has both the current and legacy "
10374 "Tag_MPextension_use attributes"),
10375 ibfd);
10376 result = FALSE;
10377 }
10378 }
10379
10380 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10381 out_attr[Tag_MPextension_use] = in_attr[i];
10382
10383 break;
10384
10385 case Tag_nodefaults:
10386 /* This tag is set if it exists, but the value is unused (and is
10387 typically zero). We don't actually need to do anything here -
10388 the merge happens automatically when the type flags are merged
10389 below. */
10390 break;
10391 case Tag_also_compatible_with:
10392 /* Already done in Tag_CPU_arch. */
10393 break;
10394 case Tag_conformance:
10395 /* Keep the attribute if it matches. Throw it away otherwise.
10396 No attribute means no claim to conform. */
10397 if (!in_attr[i].s || !out_attr[i].s
10398 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10399 out_attr[i].s = NULL;
10400 break;
10401
10402 default:
10403 result
10404 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
10405 }
10406
10407 /* If out_attr was copied from in_attr then it won't have a type yet. */
10408 if (in_attr[i].type && !out_attr[i].type)
10409 out_attr[i].type = in_attr[i].type;
10410 }
10411
10412 /* Merge Tag_compatibility attributes and any common GNU ones. */
10413 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10414 return FALSE;
10415
10416 /* Check for any attributes not known on ARM. */
10417 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
10418
10419 return result;
10420}
10421
10422
10423/* Return TRUE if the two EABI versions are incompatible. */
10424
10425static bfd_boolean
10426elf32_arm_versions_compatible (unsigned iver, unsigned over)
10427{
10428 /* v4 and v5 are the same spec before and after it was released,
10429 so allow mixing them. */
10430 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10431 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10432 return TRUE;
10433
10434 return (iver == over);
10435}
10436
10437/* Merge backend specific data from an object file to the output
10438 object file when linking. */
10439
10440static bfd_boolean
10441elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10442
10443/* Display the flags field. */
10444
10445static bfd_boolean
10446elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10447{
10448 FILE * file = (FILE *) ptr;
10449 unsigned long flags;
10450
10451 BFD_ASSERT (abfd != NULL && ptr != NULL);
10452
10453 /* Print normal ELF private data. */
10454 _bfd_elf_print_private_bfd_data (abfd, ptr);
10455
10456 flags = elf_elfheader (abfd)->e_flags;
10457 /* Ignore init flag - it may not be set, despite the flags field
10458 containing valid data. */
10459
10460 /* xgettext:c-format */
10461 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10462
10463 switch (EF_ARM_EABI_VERSION (flags))
10464 {
10465 case EF_ARM_EABI_UNKNOWN:
10466 /* The following flag bits are GNU extensions and not part of the
10467 official ARM ELF extended ABI. Hence they are only decoded if
10468 the EABI version is not set. */
10469 if (flags & EF_ARM_INTERWORK)
10470 fprintf (file, _(" [interworking enabled]"));
10471
10472 if (flags & EF_ARM_APCS_26)
10473 fprintf (file, " [APCS-26]");
10474 else
10475 fprintf (file, " [APCS-32]");
10476
10477 if (flags & EF_ARM_VFP_FLOAT)
10478 fprintf (file, _(" [VFP float format]"));
10479 else if (flags & EF_ARM_MAVERICK_FLOAT)
10480 fprintf (file, _(" [Maverick float format]"));
10481 else
10482 fprintf (file, _(" [FPA float format]"));
10483
10484 if (flags & EF_ARM_APCS_FLOAT)
10485 fprintf (file, _(" [floats passed in float registers]"));
10486
10487 if (flags & EF_ARM_PIC)
10488 fprintf (file, _(" [position independent]"));
10489
10490 if (flags & EF_ARM_NEW_ABI)
10491 fprintf (file, _(" [new ABI]"));
10492
10493 if (flags & EF_ARM_OLD_ABI)
10494 fprintf (file, _(" [old ABI]"));
10495
10496 if (flags & EF_ARM_SOFT_FLOAT)
10497 fprintf (file, _(" [software FP]"));
10498
10499 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10500 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10501 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10502 | EF_ARM_MAVERICK_FLOAT);
10503 break;
10504
10505 case EF_ARM_EABI_VER1:
10506 fprintf (file, _(" [Version1 EABI]"));
10507
10508 if (flags & EF_ARM_SYMSARESORTED)
10509 fprintf (file, _(" [sorted symbol table]"));
10510 else
10511 fprintf (file, _(" [unsorted symbol table]"));
10512
10513 flags &= ~ EF_ARM_SYMSARESORTED;
10514 break;
10515
10516 case EF_ARM_EABI_VER2:
10517 fprintf (file, _(" [Version2 EABI]"));
10518
10519 if (flags & EF_ARM_SYMSARESORTED)
10520 fprintf (file, _(" [sorted symbol table]"));
10521 else
10522 fprintf (file, _(" [unsorted symbol table]"));
10523
10524 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10525 fprintf (file, _(" [dynamic symbols use segment index]"));
10526
10527 if (flags & EF_ARM_MAPSYMSFIRST)
10528 fprintf (file, _(" [mapping symbols precede others]"));
10529
10530 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10531 | EF_ARM_MAPSYMSFIRST);
10532 break;
10533
10534 case EF_ARM_EABI_VER3:
10535 fprintf (file, _(" [Version3 EABI]"));
10536 break;
10537
10538 case EF_ARM_EABI_VER4:
10539 fprintf (file, _(" [Version4 EABI]"));
10540 goto eabi;
10541
10542 case EF_ARM_EABI_VER5:
10543 fprintf (file, _(" [Version5 EABI]"));
10544 eabi:
10545 if (flags & EF_ARM_BE8)
10546 fprintf (file, _(" [BE8]"));
10547
10548 if (flags & EF_ARM_LE8)
10549 fprintf (file, _(" [LE8]"));
10550
10551 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10552 break;
10553
10554 default:
10555 fprintf (file, _(" <EABI version unrecognised>"));
10556 break;
10557 }
10558
10559 flags &= ~ EF_ARM_EABIMASK;
10560
10561 if (flags & EF_ARM_RELEXEC)
10562 fprintf (file, _(" [relocatable executable]"));
10563
10564 if (flags & EF_ARM_HASENTRY)
10565 fprintf (file, _(" [has entry point]"));
10566
10567 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10568
10569 if (flags)
10570 fprintf (file, _("<Unrecognised flag bits set>"));
10571
10572 fputc ('\n', file);
10573
10574 return TRUE;
10575}
10576
10577static int
10578elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10579{
10580 switch (ELF_ST_TYPE (elf_sym->st_info))
10581 {
10582 case STT_ARM_TFUNC:
10583 return ELF_ST_TYPE (elf_sym->st_info);
10584
10585 case STT_ARM_16BIT:
10586 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10587 This allows us to distinguish between data used by Thumb instructions
10588 and non-data (which is probably code) inside Thumb regions of an
10589 executable. */
10590 if (type != STT_OBJECT && type != STT_TLS)
10591 return ELF_ST_TYPE (elf_sym->st_info);
10592 break;
10593
10594 default:
10595 break;
10596 }
10597
10598 return type;
10599}
10600
10601static asection *
10602elf32_arm_gc_mark_hook (asection *sec,
10603 struct bfd_link_info *info,
10604 Elf_Internal_Rela *rel,
10605 struct elf_link_hash_entry *h,
10606 Elf_Internal_Sym *sym)
10607{
10608 if (h != NULL)
10609 switch (ELF32_R_TYPE (rel->r_info))
10610 {
10611 case R_ARM_GNU_VTINHERIT:
10612 case R_ARM_GNU_VTENTRY:
10613 return NULL;
10614 }
10615
10616 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10617}
10618
10619/* Update the got entry reference counts for the section being removed. */
10620
10621static bfd_boolean
10622elf32_arm_gc_sweep_hook (bfd * abfd,
10623 struct bfd_link_info * info,
10624 asection * sec,
10625 const Elf_Internal_Rela * relocs)
10626{
10627 Elf_Internal_Shdr *symtab_hdr;
10628 struct elf_link_hash_entry **sym_hashes;
10629 bfd_signed_vma *local_got_refcounts;
10630 const Elf_Internal_Rela *rel, *relend;
10631 struct elf32_arm_link_hash_table * globals;
10632
10633 if (info->relocatable)
10634 return TRUE;
10635
10636 globals = elf32_arm_hash_table (info);
10637 if (globals == NULL)
10638 return FALSE;
10639
10640 elf_section_data (sec)->local_dynrel = NULL;
10641
10642 symtab_hdr = & elf_symtab_hdr (abfd);
10643 sym_hashes = elf_sym_hashes (abfd);
10644 local_got_refcounts = elf_local_got_refcounts (abfd);
10645
10646 check_use_blx (globals);
10647
10648 relend = relocs + sec->reloc_count;
10649 for (rel = relocs; rel < relend; rel++)
10650 {
10651 unsigned long r_symndx;
10652 struct elf_link_hash_entry *h = NULL;
10653 int r_type;
10654
10655 r_symndx = ELF32_R_SYM (rel->r_info);
10656 if (r_symndx >= symtab_hdr->sh_info)
10657 {
10658 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10659 while (h->root.type == bfd_link_hash_indirect
10660 || h->root.type == bfd_link_hash_warning)
10661 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10662 }
10663
10664 r_type = ELF32_R_TYPE (rel->r_info);
10665 r_type = arm_real_reloc_type (globals, r_type);
10666 switch (r_type)
10667 {
10668 case R_ARM_GOT32:
10669 case R_ARM_GOT_PREL:
10670 case R_ARM_TLS_GD32:
10671 case R_ARM_TLS_IE32:
10672 if (h != NULL)
10673 {
10674 if (h->got.refcount > 0)
10675 h->got.refcount -= 1;
10676 }
10677 else if (local_got_refcounts != NULL)
10678 {
10679 if (local_got_refcounts[r_symndx] > 0)
10680 local_got_refcounts[r_symndx] -= 1;
10681 }
10682 break;
10683
10684 case R_ARM_TLS_LDM32:
10685 globals->tls_ldm_got.refcount -= 1;
10686 break;
10687
10688 case R_ARM_ABS32:
10689 case R_ARM_ABS32_NOI:
10690 case R_ARM_REL32:
10691 case R_ARM_REL32_NOI:
10692 case R_ARM_PC24:
10693 case R_ARM_PLT32:
10694 case R_ARM_CALL:
10695 case R_ARM_JUMP24:
10696 case R_ARM_PREL31:
10697 case R_ARM_THM_CALL:
10698 case R_ARM_THM_JUMP24:
10699 case R_ARM_THM_JUMP19:
10700 case R_ARM_MOVW_ABS_NC:
10701 case R_ARM_MOVT_ABS:
10702 case R_ARM_MOVW_PREL_NC:
10703 case R_ARM_MOVT_PREL:
10704 case R_ARM_THM_MOVW_ABS_NC:
10705 case R_ARM_THM_MOVT_ABS:
10706 case R_ARM_THM_MOVW_PREL_NC:
10707 case R_ARM_THM_MOVT_PREL:
10708 /* Should the interworking branches be here also? */
10709
10710 if (h != NULL)
10711 {
10712 struct elf32_arm_link_hash_entry *eh;
10713 struct elf32_arm_relocs_copied **pp;
10714 struct elf32_arm_relocs_copied *p;
10715
10716 eh = (struct elf32_arm_link_hash_entry *) h;
10717
10718 if (h->plt.refcount > 0)
10719 {
10720 h->plt.refcount -= 1;
10721 if (r_type == R_ARM_THM_CALL)
10722 eh->plt_maybe_thumb_refcount--;
10723
10724 if (r_type == R_ARM_THM_JUMP24
10725 || r_type == R_ARM_THM_JUMP19)
10726 eh->plt_thumb_refcount--;
10727 }
10728
10729 if (r_type == R_ARM_ABS32
10730 || r_type == R_ARM_REL32
10731 || r_type == R_ARM_ABS32_NOI
10732 || r_type == R_ARM_REL32_NOI)
10733 {
10734 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10735 pp = &p->next)
10736 if (p->section == sec)
10737 {
10738 p->count -= 1;
10739 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10740 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10741 p->pc_count -= 1;
10742 if (p->count == 0)
10743 *pp = p->next;
10744 break;
10745 }
10746 }
10747 }
10748 break;
10749
10750 default:
10751 break;
10752 }
10753 }
10754
10755 return TRUE;
10756}
10757
10758/* Look through the relocs for a section during the first phase. */
10759
10760static bfd_boolean
10761elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10762 asection *sec, const Elf_Internal_Rela *relocs)
10763{
10764 Elf_Internal_Shdr *symtab_hdr;
10765 struct elf_link_hash_entry **sym_hashes;
10766 const Elf_Internal_Rela *rel;
10767 const Elf_Internal_Rela *rel_end;
10768 bfd *dynobj;
10769 asection *sreloc;
10770 struct elf32_arm_link_hash_table *htab;
10771 bfd_boolean needs_plt;
10772 unsigned long nsyms;
10773
10774 if (info->relocatable)
10775 return TRUE;
10776
10777 BFD_ASSERT (is_arm_elf (abfd));
10778
10779 htab = elf32_arm_hash_table (info);
10780 if (htab == NULL)
10781 return FALSE;
10782
10783 sreloc = NULL;
10784
10785 /* Create dynamic sections for relocatable executables so that we can
10786 copy relocations. */
10787 if (htab->root.is_relocatable_executable
10788 && ! htab->root.dynamic_sections_created)
10789 {
10790 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10791 return FALSE;
10792 }
10793
10794 dynobj = elf_hash_table (info)->dynobj;
10795 symtab_hdr = & elf_symtab_hdr (abfd);
10796 sym_hashes = elf_sym_hashes (abfd);
10797 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10798
10799 rel_end = relocs + sec->reloc_count;
10800 for (rel = relocs; rel < rel_end; rel++)
10801 {
10802 struct elf_link_hash_entry *h;
10803 struct elf32_arm_link_hash_entry *eh;
10804 unsigned long r_symndx;
10805 int r_type;
10806
10807 r_symndx = ELF32_R_SYM (rel->r_info);
10808 r_type = ELF32_R_TYPE (rel->r_info);
10809 r_type = arm_real_reloc_type (htab, r_type);
10810
10811 if (r_symndx >= nsyms
10812 /* PR 9934: It is possible to have relocations that do not
10813 refer to symbols, thus it is also possible to have an
10814 object file containing relocations but no symbol table. */
10815 && (r_symndx > STN_UNDEF || nsyms > 0))
10816 {
10817 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10818 r_symndx);
10819 return FALSE;
10820 }
10821
10822 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10823 h = NULL;
10824 else
10825 {
10826 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10827 while (h->root.type == bfd_link_hash_indirect
10828 || h->root.type == bfd_link_hash_warning)
10829 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10830 }
10831
10832 eh = (struct elf32_arm_link_hash_entry *) h;
10833
10834 switch (r_type)
10835 {
10836 case R_ARM_GOT32:
10837 case R_ARM_GOT_PREL:
10838 case R_ARM_TLS_GD32:
10839 case R_ARM_TLS_IE32:
10840 /* This symbol requires a global offset table entry. */
10841 {
10842 int tls_type, old_tls_type;
10843
10844 switch (r_type)
10845 {
10846 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10847 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10848 default: tls_type = GOT_NORMAL; break;
10849 }
10850
10851 if (h != NULL)
10852 {
10853 h->got.refcount++;
10854 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10855 }
10856 else
10857 {
10858 bfd_signed_vma *local_got_refcounts;
10859
10860 /* This is a global offset table entry for a local symbol. */
10861 local_got_refcounts = elf_local_got_refcounts (abfd);
10862 if (local_got_refcounts == NULL)
10863 {
10864 bfd_size_type size;
10865
10866 size = symtab_hdr->sh_info;
10867 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10868 local_got_refcounts = (bfd_signed_vma *)
10869 bfd_zalloc (abfd, size);
10870 if (local_got_refcounts == NULL)
10871 return FALSE;
10872 elf_local_got_refcounts (abfd) = local_got_refcounts;
10873 elf32_arm_local_got_tls_type (abfd)
10874 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10875 }
10876 local_got_refcounts[r_symndx] += 1;
10877 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10878 }
10879
10880 /* We will already have issued an error message if there is a
10881 TLS / non-TLS mismatch, based on the symbol type. We don't
10882 support any linker relaxations. So just combine any TLS
10883 types needed. */
10884 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10885 && tls_type != GOT_NORMAL)
10886 tls_type |= old_tls_type;
10887
10888 if (old_tls_type != tls_type)
10889 {
10890 if (h != NULL)
10891 elf32_arm_hash_entry (h)->tls_type = tls_type;
10892 else
10893 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10894 }
10895 }
10896 /* Fall through. */
10897
10898 case R_ARM_TLS_LDM32:
10899 if (r_type == R_ARM_TLS_LDM32)
10900 htab->tls_ldm_got.refcount++;
10901 /* Fall through. */
10902
10903 case R_ARM_GOTOFF32:
10904 case R_ARM_GOTPC:
10905 if (htab->sgot == NULL)
10906 {
10907 if (htab->root.dynobj == NULL)
10908 htab->root.dynobj = abfd;
10909 if (!create_got_section (htab->root.dynobj, info))
10910 return FALSE;
10911 }
10912 break;
10913
10914 case R_ARM_ABS12:
10915 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10916 ldr __GOTT_INDEX__ offsets. */
10917 if (!htab->vxworks_p)
10918 break;
10919 /* Fall through. */
10920
10921 case R_ARM_PC24:
10922 case R_ARM_PLT32:
10923 case R_ARM_CALL:
10924 case R_ARM_JUMP24:
10925 case R_ARM_PREL31:
10926 case R_ARM_THM_CALL:
10927 case R_ARM_THM_JUMP24:
10928 case R_ARM_THM_JUMP19:
10929 needs_plt = 1;
10930 goto normal_reloc;
10931
10932 case R_ARM_MOVW_ABS_NC:
10933 case R_ARM_MOVT_ABS:
10934 case R_ARM_THM_MOVW_ABS_NC:
10935 case R_ARM_THM_MOVT_ABS:
10936 if (info->shared)
10937 {
10938 (*_bfd_error_handler)
10939 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10940 abfd, elf32_arm_howto_table_1[r_type].name,
10941 (h) ? h->root.root.string : "a local symbol");
10942 bfd_set_error (bfd_error_bad_value);
10943 return FALSE;
10944 }
10945
10946 /* Fall through. */
10947 case R_ARM_ABS32:
10948 case R_ARM_ABS32_NOI:
10949 case R_ARM_REL32:
10950 case R_ARM_REL32_NOI:
10951 case R_ARM_MOVW_PREL_NC:
10952 case R_ARM_MOVT_PREL:
10953 case R_ARM_THM_MOVW_PREL_NC:
10954 case R_ARM_THM_MOVT_PREL:
10955 needs_plt = 0;
10956 normal_reloc:
10957
10958 /* Should the interworking branches be listed here? */
10959 if (h != NULL)
10960 {
10961 /* If this reloc is in a read-only section, we might
10962 need a copy reloc. We can't check reliably at this
10963 stage whether the section is read-only, as input
10964 sections have not yet been mapped to output sections.
10965 Tentatively set the flag for now, and correct in
10966 adjust_dynamic_symbol. */
10967 if (!info->shared)
10968 h->non_got_ref = 1;
10969
10970 /* We may need a .plt entry if the function this reloc
10971 refers to is in a different object. We can't tell for
10972 sure yet, because something later might force the
10973 symbol local. */
10974 if (needs_plt)
10975 h->needs_plt = 1;
10976
10977 /* If we create a PLT entry, this relocation will reference
10978 it, even if it's an ABS32 relocation. */
10979 h->plt.refcount += 1;
10980
10981 /* It's too early to use htab->use_blx here, so we have to
10982 record possible blx references separately from
10983 relocs that definitely need a thumb stub. */
10984
10985 if (r_type == R_ARM_THM_CALL)
10986 eh->plt_maybe_thumb_refcount += 1;
10987
10988 if (r_type == R_ARM_THM_JUMP24
10989 || r_type == R_ARM_THM_JUMP19)
10990 eh->plt_thumb_refcount += 1;
10991 }
10992
10993 /* If we are creating a shared library or relocatable executable,
10994 and this is a reloc against a global symbol, or a non PC
10995 relative reloc against a local symbol, then we need to copy
10996 the reloc into the shared library. However, if we are linking
10997 with -Bsymbolic, we do not need to copy a reloc against a
10998 global symbol which is defined in an object we are
10999 including in the link (i.e., DEF_REGULAR is set). At
11000 this point we have not seen all the input files, so it is
11001 possible that DEF_REGULAR is not set now but will be set
11002 later (it is never cleared). We account for that
11003 possibility below by storing information in the
11004 relocs_copied field of the hash table entry. */
11005 if ((info->shared || htab->root.is_relocatable_executable)
11006 && (sec->flags & SEC_ALLOC) != 0
11007 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11008 || (h != NULL && ! h->needs_plt
11009 && (! info->symbolic || ! h->def_regular))))
11010 {
11011 struct elf32_arm_relocs_copied *p, **head;
11012
11013 /* When creating a shared object, we must copy these
11014 reloc types into the output file. We create a reloc
11015 section in dynobj and make room for this reloc. */
11016 if (sreloc == NULL)
11017 {
11018 sreloc = _bfd_elf_make_dynamic_reloc_section
11019 (sec, dynobj, 2, abfd, ! htab->use_rel);
11020
11021 if (sreloc == NULL)
11022 return FALSE;
11023
11024 /* BPABI objects never have dynamic relocations mapped. */
11025 if (htab->symbian_p)
11026 {
11027 flagword flags;
11028
11029 flags = bfd_get_section_flags (dynobj, sreloc);
11030 flags &= ~(SEC_LOAD | SEC_ALLOC);
11031 bfd_set_section_flags (dynobj, sreloc, flags);
11032 }
11033 }
11034
11035 /* If this is a global symbol, we count the number of
11036 relocations we need for this symbol. */
11037 if (h != NULL)
11038 {
11039 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11040 }
11041 else
11042 {
11043 /* Track dynamic relocs needed for local syms too.
11044 We really need local syms available to do this
11045 easily. Oh well. */
11046 asection *s;
11047 void *vpp;
11048 Elf_Internal_Sym *isym;
11049
11050 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11051 abfd, r_symndx);
11052 if (isym == NULL)
11053 return FALSE;
11054
11055 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11056 if (s == NULL)
11057 s = sec;
11058
11059 vpp = &elf_section_data (s)->local_dynrel;
11060 head = (struct elf32_arm_relocs_copied **) vpp;
11061 }
11062
11063 p = *head;
11064 if (p == NULL || p->section != sec)
11065 {
11066 bfd_size_type amt = sizeof *p;
11067
11068 p = (struct elf32_arm_relocs_copied *)
11069 bfd_alloc (htab->root.dynobj, amt);
11070 if (p == NULL)
11071 return FALSE;
11072 p->next = *head;
11073 *head = p;
11074 p->section = sec;
11075 p->count = 0;
11076 p->pc_count = 0;
11077 }
11078
11079 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11080 p->pc_count += 1;
11081 p->count += 1;
11082 }
11083 break;
11084
11085 /* This relocation describes the C++ object vtable hierarchy.
11086 Reconstruct it for later use during GC. */
11087 case R_ARM_GNU_VTINHERIT:
11088 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11089 return FALSE;
11090 break;
11091
11092 /* This relocation describes which C++ vtable entries are actually
11093 used. Record for later use during GC. */
11094 case R_ARM_GNU_VTENTRY:
11095 BFD_ASSERT (h != NULL);
11096 if (h != NULL
11097 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11098 return FALSE;
11099 break;
11100 }
11101 }
11102
11103 return TRUE;
11104}
11105
11106/* Unwinding tables are not referenced directly. This pass marks them as
11107 required if the corresponding code section is marked. */
11108
11109static bfd_boolean
11110elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11111 elf_gc_mark_hook_fn gc_mark_hook)
11112{
11113 bfd *sub;
11114 Elf_Internal_Shdr **elf_shdrp;
11115 bfd_boolean again;
11116
11117 /* Marking EH data may cause additional code sections to be marked,
11118 requiring multiple passes. */
11119 again = TRUE;
11120 while (again)
11121 {
11122 again = FALSE;
11123 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11124 {
11125 asection *o;
11126
11127 if (! is_arm_elf (sub))
11128 continue;
11129
11130 elf_shdrp = elf_elfsections (sub);
11131 for (o = sub->sections; o != NULL; o = o->next)
11132 {
11133 Elf_Internal_Shdr *hdr;
11134
11135 hdr = &elf_section_data (o)->this_hdr;
11136 if (hdr->sh_type == SHT_ARM_EXIDX
11137 && hdr->sh_link
11138 && hdr->sh_link < elf_numsections (sub)
11139 && !o->gc_mark
11140 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11141 {
11142 again = TRUE;
11143 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11144 return FALSE;
11145 }
11146 }
11147 }
11148 }
11149
11150 return TRUE;
11151}
11152
11153/* Treat mapping symbols as special target symbols. */
11154
11155static bfd_boolean
11156elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11157{
11158 return bfd_is_arm_special_symbol_name (sym->name,
11159 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11160}
11161
11162/* This is a copy of elf_find_function() from elf.c except that
11163 ARM mapping symbols are ignored when looking for function names
11164 and STT_ARM_TFUNC is considered to a function type. */
11165
11166static bfd_boolean
11167arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11168 asection * section,
11169 asymbol ** symbols,
11170 bfd_vma offset,
11171 const char ** filename_ptr,
11172 const char ** functionname_ptr)
11173{
11174 const char * filename = NULL;
11175 asymbol * func = NULL;
11176 bfd_vma low_func = 0;
11177 asymbol ** p;
11178
11179 for (p = symbols; *p != NULL; p++)
11180 {
11181 elf_symbol_type *q;
11182
11183 q = (elf_symbol_type *) *p;
11184
11185 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11186 {
11187 default:
11188 break;
11189 case STT_FILE:
11190 filename = bfd_asymbol_name (&q->symbol);
11191 break;
11192 case STT_FUNC:
11193 case STT_ARM_TFUNC:
11194 case STT_NOTYPE:
11195 /* Skip mapping symbols. */
11196 if ((q->symbol.flags & BSF_LOCAL)
11197 && bfd_is_arm_special_symbol_name (q->symbol.name,
11198 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11199 continue;
11200 /* Fall through. */
11201 if (bfd_get_section (&q->symbol) == section
11202 && q->symbol.value >= low_func
11203 && q->symbol.value <= offset)
11204 {
11205 func = (asymbol *) q;
11206 low_func = q->symbol.value;
11207 }
11208 break;
11209 }
11210 }
11211
11212 if (func == NULL)
11213 return FALSE;
11214
11215 if (filename_ptr)
11216 *filename_ptr = filename;
11217 if (functionname_ptr)
11218 *functionname_ptr = bfd_asymbol_name (func);
11219
11220 return TRUE;
11221}
11222
11223
11224/* Find the nearest line to a particular section and offset, for error
11225 reporting. This code is a duplicate of the code in elf.c, except
11226 that it uses arm_elf_find_function. */
11227
11228static bfd_boolean
11229elf32_arm_find_nearest_line (bfd * abfd,
11230 asection * section,
11231 asymbol ** symbols,
11232 bfd_vma offset,
11233 const char ** filename_ptr,
11234 const char ** functionname_ptr,
11235 unsigned int * line_ptr)
11236{
11237 bfd_boolean found = FALSE;
11238
11239 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11240
11241 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11242 filename_ptr, functionname_ptr,
11243 line_ptr, 0,
11244 & elf_tdata (abfd)->dwarf2_find_line_info))
11245 {
11246 if (!*functionname_ptr)
11247 arm_elf_find_function (abfd, section, symbols, offset,
11248 *filename_ptr ? NULL : filename_ptr,
11249 functionname_ptr);
11250
11251 return TRUE;
11252 }
11253
11254 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11255 & found, filename_ptr,
11256 functionname_ptr, line_ptr,
11257 & elf_tdata (abfd)->line_info))
11258 return FALSE;
11259
11260 if (found && (*functionname_ptr || *line_ptr))
11261 return TRUE;
11262
11263 if (symbols == NULL)
11264 return FALSE;
11265
11266 if (! arm_elf_find_function (abfd, section, symbols, offset,
11267 filename_ptr, functionname_ptr))
11268 return FALSE;
11269
11270 *line_ptr = 0;
11271 return TRUE;
11272}
11273
11274static bfd_boolean
11275elf32_arm_find_inliner_info (bfd * abfd,
11276 const char ** filename_ptr,
11277 const char ** functionname_ptr,
11278 unsigned int * line_ptr)
11279{
11280 bfd_boolean found;
11281 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11282 functionname_ptr, line_ptr,
11283 & elf_tdata (abfd)->dwarf2_find_line_info);
11284 return found;
11285}
11286
11287/* Adjust a symbol defined by a dynamic object and referenced by a
11288 regular object. The current definition is in some section of the
11289 dynamic object, but we're not including those sections. We have to
11290 change the definition to something the rest of the link can
11291 understand. */
11292
11293static bfd_boolean
11294elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11295 struct elf_link_hash_entry * h)
11296{
11297 bfd * dynobj;
11298 asection * s;
11299 struct elf32_arm_link_hash_entry * eh;
11300 struct elf32_arm_link_hash_table *globals;
11301
11302 globals = elf32_arm_hash_table (info);
11303 if (globals == NULL)
11304 return FALSE;
11305
11306 dynobj = elf_hash_table (info)->dynobj;
11307
11308 /* Make sure we know what is going on here. */
11309 BFD_ASSERT (dynobj != NULL
11310 && (h->needs_plt
11311 || h->u.weakdef != NULL
11312 || (h->def_dynamic
11313 && h->ref_regular
11314 && !h->def_regular)));
11315
11316 eh = (struct elf32_arm_link_hash_entry *) h;
11317
11318 /* If this is a function, put it in the procedure linkage table. We
11319 will fill in the contents of the procedure linkage table later,
11320 when we know the address of the .got section. */
11321 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11322 || h->needs_plt)
11323 {
11324 if (h->plt.refcount <= 0
11325 || SYMBOL_CALLS_LOCAL (info, h)
11326 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11327 && h->root.type == bfd_link_hash_undefweak))
11328 {
11329 /* This case can occur if we saw a PLT32 reloc in an input
11330 file, but the symbol was never referred to by a dynamic
11331 object, or if all references were garbage collected. In
11332 such a case, we don't actually need to build a procedure
11333 linkage table, and we can just do a PC24 reloc instead. */
11334 h->plt.offset = (bfd_vma) -1;
11335 eh->plt_thumb_refcount = 0;
11336 eh->plt_maybe_thumb_refcount = 0;
11337 h->needs_plt = 0;
11338 }
11339
11340 return TRUE;
11341 }
11342 else
11343 {
11344 /* It's possible that we incorrectly decided a .plt reloc was
11345 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11346 in check_relocs. We can't decide accurately between function
11347 and non-function syms in check-relocs; Objects loaded later in
11348 the link may change h->type. So fix it now. */
11349 h->plt.offset = (bfd_vma) -1;
11350 eh->plt_thumb_refcount = 0;
11351 eh->plt_maybe_thumb_refcount = 0;
11352 }
11353
11354 /* If this is a weak symbol, and there is a real definition, the
11355 processor independent code will have arranged for us to see the
11356 real definition first, and we can just use the same value. */
11357 if (h->u.weakdef != NULL)
11358 {
11359 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11360 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11361 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11362 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11363 return TRUE;
11364 }
11365
11366 /* If there are no non-GOT references, we do not need a copy
11367 relocation. */
11368 if (!h->non_got_ref)
11369 return TRUE;
11370
11371 /* This is a reference to a symbol defined by a dynamic object which
11372 is not a function. */
11373
11374 /* If we are creating a shared library, we must presume that the
11375 only references to the symbol are via the global offset table.
11376 For such cases we need not do anything here; the relocations will
11377 be handled correctly by relocate_section. Relocatable executables
11378 can reference data in shared objects directly, so we don't need to
11379 do anything here. */
11380 if (info->shared || globals->root.is_relocatable_executable)
11381 return TRUE;
11382
11383 if (h->size == 0)
11384 {
11385 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11386 h->root.root.string);
11387 return TRUE;
11388 }
11389
11390 /* We must allocate the symbol in our .dynbss section, which will
11391 become part of the .bss section of the executable. There will be
11392 an entry for this symbol in the .dynsym section. The dynamic
11393 object will contain position independent code, so all references
11394 from the dynamic object to this symbol will go through the global
11395 offset table. The dynamic linker will use the .dynsym entry to
11396 determine the address it must put in the global offset table, so
11397 both the dynamic object and the regular object will refer to the
11398 same memory location for the variable. */
11399 s = bfd_get_section_by_name (dynobj, ".dynbss");
11400 BFD_ASSERT (s != NULL);
11401
11402 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11403 copy the initial value out of the dynamic object and into the
11404 runtime process image. We need to remember the offset into the
11405 .rel(a).bss section we are going to use. */
11406 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11407 {
11408 asection *srel;
11409
11410 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11411 BFD_ASSERT (srel != NULL);
11412 srel->size += RELOC_SIZE (globals);
11413 h->needs_copy = 1;
11414 }
11415
11416 return _bfd_elf_adjust_dynamic_copy (h, s);
11417}
11418
11419/* Allocate space in .plt, .got and associated reloc sections for
11420 dynamic relocs. */
11421
11422static bfd_boolean
11423allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11424{
11425 struct bfd_link_info *info;
11426 struct elf32_arm_link_hash_table *htab;
11427 struct elf32_arm_link_hash_entry *eh;
11428 struct elf32_arm_relocs_copied *p;
11429 bfd_signed_vma thumb_refs;
11430
11431 eh = (struct elf32_arm_link_hash_entry *) h;
11432
11433 if (h->root.type == bfd_link_hash_indirect)
11434 return TRUE;
11435
11436 if (h->root.type == bfd_link_hash_warning)
11437 /* When warning symbols are created, they **replace** the "real"
11438 entry in the hash table, thus we never get to see the real
11439 symbol in a hash traversal. So look at it now. */
11440 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11441
11442 info = (struct bfd_link_info *) inf;
11443 htab = elf32_arm_hash_table (info);
11444 if (htab == NULL)
11445 return FALSE;
11446
11447 if (htab->root.dynamic_sections_created
11448 && h->plt.refcount > 0)
11449 {
11450 /* Make sure this symbol is output as a dynamic symbol.
11451 Undefined weak syms won't yet be marked as dynamic. */
11452 if (h->dynindx == -1
11453 && !h->forced_local)
11454 {
11455 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11456 return FALSE;
11457 }
11458
11459 if (info->shared
11460 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11461 {
11462 asection *s = htab->splt;
11463
11464 /* If this is the first .plt entry, make room for the special
11465 first entry. */
11466 if (s->size == 0)
11467 s->size += htab->plt_header_size;
11468
11469 h->plt.offset = s->size;
11470
11471 /* If we will insert a Thumb trampoline before this PLT, leave room
11472 for it. */
11473 thumb_refs = eh->plt_thumb_refcount;
11474 if (!htab->use_blx)
11475 thumb_refs += eh->plt_maybe_thumb_refcount;
11476
11477 if (thumb_refs > 0)
11478 {
11479 h->plt.offset += PLT_THUMB_STUB_SIZE;
11480 s->size += PLT_THUMB_STUB_SIZE;
11481 }
11482
11483 /* If this symbol is not defined in a regular file, and we are
11484 not generating a shared library, then set the symbol to this
11485 location in the .plt. This is required to make function
11486 pointers compare as equal between the normal executable and
11487 the shared library. */
11488 if (! info->shared
11489 && !h->def_regular)
11490 {
11491 h->root.u.def.section = s;
11492 h->root.u.def.value = h->plt.offset;
11493
11494 /* Make sure the function is not marked as Thumb, in case
11495 it is the target of an ABS32 relocation, which will
11496 point to the PLT entry. */
11497 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11498 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11499 }
11500
11501 /* Make room for this entry. */
11502 s->size += htab->plt_entry_size;
11503
11504 if (!htab->symbian_p)
11505 {
11506 /* We also need to make an entry in the .got.plt section, which
11507 will be placed in the .got section by the linker script. */
11508 eh->plt_got_offset = htab->sgotplt->size;
11509 htab->sgotplt->size += 4;
11510 }
11511
11512 /* We also need to make an entry in the .rel(a).plt section. */
11513 htab->srelplt->size += RELOC_SIZE (htab);
11514
11515 /* VxWorks executables have a second set of relocations for
11516 each PLT entry. They go in a separate relocation section,
11517 which is processed by the kernel loader. */
11518 if (htab->vxworks_p && !info->shared)
11519 {
11520 /* There is a relocation for the initial PLT entry:
11521 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11522 if (h->plt.offset == htab->plt_header_size)
11523 htab->srelplt2->size += RELOC_SIZE (htab);
11524
11525 /* There are two extra relocations for each subsequent
11526 PLT entry: an R_ARM_32 relocation for the GOT entry,
11527 and an R_ARM_32 relocation for the PLT entry. */
11528 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11529 }
11530 }
11531 else
11532 {
11533 h->plt.offset = (bfd_vma) -1;
11534 h->needs_plt = 0;
11535 }
11536 }
11537 else
11538 {
11539 h->plt.offset = (bfd_vma) -1;
11540 h->needs_plt = 0;
11541 }
11542
11543 if (h->got.refcount > 0)
11544 {
11545 asection *s;
11546 bfd_boolean dyn;
11547 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11548 int indx;
11549
11550 /* Make sure this symbol is output as a dynamic symbol.
11551 Undefined weak syms won't yet be marked as dynamic. */
11552 if (h->dynindx == -1
11553 && !h->forced_local)
11554 {
11555 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11556 return FALSE;
11557 }
11558
11559 if (!htab->symbian_p)
11560 {
11561 s = htab->sgot;
11562 h->got.offset = s->size;
11563
11564 if (tls_type == GOT_UNKNOWN)
11565 abort ();
11566
11567 if (tls_type == GOT_NORMAL)
11568 /* Non-TLS symbols need one GOT slot. */
11569 s->size += 4;
11570 else
11571 {
11572 if (tls_type & GOT_TLS_GD)
11573 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11574 s->size += 8;
11575 if (tls_type & GOT_TLS_IE)
11576 /* R_ARM_TLS_IE32 needs one GOT slot. */
11577 s->size += 4;
11578 }
11579
11580 dyn = htab->root.dynamic_sections_created;
11581
11582 indx = 0;
11583 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11584 && (!info->shared
11585 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11586 indx = h->dynindx;
11587
11588 if (tls_type != GOT_NORMAL
11589 && (info->shared || indx != 0)
11590 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11591 || h->root.type != bfd_link_hash_undefweak))
11592 {
11593 if (tls_type & GOT_TLS_IE)
11594 htab->srelgot->size += RELOC_SIZE (htab);
11595
11596 if (tls_type & GOT_TLS_GD)
11597 htab->srelgot->size += RELOC_SIZE (htab);
11598
11599 if ((tls_type & GOT_TLS_GD) && indx != 0)
11600 htab->srelgot->size += RELOC_SIZE (htab);
11601 }
11602 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11603 || h->root.type != bfd_link_hash_undefweak)
11604 && (info->shared
11605 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11606 htab->srelgot->size += RELOC_SIZE (htab);
11607 }
11608 }
11609 else
11610 h->got.offset = (bfd_vma) -1;
11611
11612 /* Allocate stubs for exported Thumb functions on v4t. */
11613 if (!htab->use_blx && h->dynindx != -1
11614 && h->def_regular
11615 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11616 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11617 {
11618 struct elf_link_hash_entry * th;
11619 struct bfd_link_hash_entry * bh;
11620 struct elf_link_hash_entry * myh;
11621 char name[1024];
11622 asection *s;
11623 bh = NULL;
11624 /* Create a new symbol to regist the real location of the function. */
11625 s = h->root.u.def.section;
11626 sprintf (name, "__real_%s", h->root.root.string);
11627 _bfd_generic_link_add_one_symbol (info, s->owner,
11628 name, BSF_GLOBAL, s,
11629 h->root.u.def.value,
11630 NULL, TRUE, FALSE, &bh);
11631
11632 myh = (struct elf_link_hash_entry *) bh;
11633 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11634 myh->forced_local = 1;
11635 eh->export_glue = myh;
11636 th = record_arm_to_thumb_glue (info, h);
11637 /* Point the symbol at the stub. */
11638 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11639 h->root.u.def.section = th->root.u.def.section;
11640 h->root.u.def.value = th->root.u.def.value & ~1;
11641 }
11642
11643 if (eh->relocs_copied == NULL)
11644 return TRUE;
11645
11646 /* In the shared -Bsymbolic case, discard space allocated for
11647 dynamic pc-relative relocs against symbols which turn out to be
11648 defined in regular objects. For the normal shared case, discard
11649 space for pc-relative relocs that have become local due to symbol
11650 visibility changes. */
11651
11652 if (info->shared || htab->root.is_relocatable_executable)
11653 {
11654 /* The only relocs that use pc_count are R_ARM_REL32 and
11655 R_ARM_REL32_NOI, which will appear on something like
11656 ".long foo - .". We want calls to protected symbols to resolve
11657 directly to the function rather than going via the plt. If people
11658 want function pointer comparisons to work as expected then they
11659 should avoid writing assembly like ".long foo - .". */
11660 if (SYMBOL_CALLS_LOCAL (info, h))
11661 {
11662 struct elf32_arm_relocs_copied **pp;
11663
11664 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11665 {
11666 p->count -= p->pc_count;
11667 p->pc_count = 0;
11668 if (p->count == 0)
11669 *pp = p->next;
11670 else
11671 pp = &p->next;
11672 }
11673 }
11674
11675 if (htab->vxworks_p)
11676 {
11677 struct elf32_arm_relocs_copied **pp;
11678
11679 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11680 {
11681 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11682 *pp = p->next;
11683 else
11684 pp = &p->next;
11685 }
11686 }
11687
11688 /* Also discard relocs on undefined weak syms with non-default
11689 visibility. */
11690 if (eh->relocs_copied != NULL
11691 && h->root.type == bfd_link_hash_undefweak)
11692 {
11693 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11694 eh->relocs_copied = NULL;
11695
11696 /* Make sure undefined weak symbols are output as a dynamic
11697 symbol in PIEs. */
11698 else if (h->dynindx == -1
11699 && !h->forced_local)
11700 {
11701 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11702 return FALSE;
11703 }
11704 }
11705
11706 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11707 && h->root.type == bfd_link_hash_new)
11708 {
11709 /* Output absolute symbols so that we can create relocations
11710 against them. For normal symbols we output a relocation
11711 against the section that contains them. */
11712 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11713 return FALSE;
11714 }
11715
11716 }
11717 else
11718 {
11719 /* For the non-shared case, discard space for relocs against
11720 symbols which turn out to need copy relocs or are not
11721 dynamic. */
11722
11723 if (!h->non_got_ref
11724 && ((h->def_dynamic
11725 && !h->def_regular)
11726 || (htab->root.dynamic_sections_created
11727 && (h->root.type == bfd_link_hash_undefweak
11728 || h->root.type == bfd_link_hash_undefined))))
11729 {
11730 /* Make sure this symbol is output as a dynamic symbol.
11731 Undefined weak syms won't yet be marked as dynamic. */
11732 if (h->dynindx == -1
11733 && !h->forced_local)
11734 {
11735 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11736 return FALSE;
11737 }
11738
11739 /* If that succeeded, we know we'll be keeping all the
11740 relocs. */
11741 if (h->dynindx != -1)
11742 goto keep;
11743 }
11744
11745 eh->relocs_copied = NULL;
11746
11747 keep: ;
11748 }
11749
11750 /* Finally, allocate space. */
11751 for (p = eh->relocs_copied; p != NULL; p = p->next)
11752 {
11753 asection *sreloc = elf_section_data (p->section)->sreloc;
11754 sreloc->size += p->count * RELOC_SIZE (htab);
11755 }
11756
11757 return TRUE;
11758}
11759
11760/* Find any dynamic relocs that apply to read-only sections. */
11761
11762static bfd_boolean
11763elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11764{
11765 struct elf32_arm_link_hash_entry * eh;
11766 struct elf32_arm_relocs_copied * p;
11767
11768 if (h->root.type == bfd_link_hash_warning)
11769 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11770
11771 eh = (struct elf32_arm_link_hash_entry *) h;
11772 for (p = eh->relocs_copied; p != NULL; p = p->next)
11773 {
11774 asection *s = p->section;
11775
11776 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11777 {
11778 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11779
11780 info->flags |= DF_TEXTREL;
11781
11782 /* Not an error, just cut short the traversal. */
11783 return FALSE;
11784 }
11785 }
11786 return TRUE;
11787}
11788
11789void
11790bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11791 int byteswap_code)
11792{
11793 struct elf32_arm_link_hash_table *globals;
11794
11795 globals = elf32_arm_hash_table (info);
11796 if (globals == NULL)
11797 return;
11798
11799 globals->byteswap_code = byteswap_code;
11800}
11801
11802/* Set the sizes of the dynamic sections. */
11803
11804static bfd_boolean
11805elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11806 struct bfd_link_info * info)
11807{
11808 bfd * dynobj;
11809 asection * s;
11810 bfd_boolean plt;
11811 bfd_boolean relocs;
11812 bfd *ibfd;
11813 struct elf32_arm_link_hash_table *htab;
11814
11815 htab = elf32_arm_hash_table (info);
11816 if (htab == NULL)
11817 return FALSE;
11818
11819 dynobj = elf_hash_table (info)->dynobj;
11820 BFD_ASSERT (dynobj != NULL);
11821 check_use_blx (htab);
11822
11823 if (elf_hash_table (info)->dynamic_sections_created)
11824 {
11825 /* Set the contents of the .interp section to the interpreter. */
11826 if (info->executable)
11827 {
11828 s = bfd_get_section_by_name (dynobj, ".interp");
11829 BFD_ASSERT (s != NULL);
11830 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11831 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11832 }
11833 }
11834
11835 /* Set up .got offsets for local syms, and space for local dynamic
11836 relocs. */
11837 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11838 {
11839 bfd_signed_vma *local_got;
11840 bfd_signed_vma *end_local_got;
11841 char *local_tls_type;
11842 bfd_size_type locsymcount;
11843 Elf_Internal_Shdr *symtab_hdr;
11844 asection *srel;
11845 bfd_boolean is_vxworks = htab->vxworks_p;
11846
11847 if (! is_arm_elf (ibfd))
11848 continue;
11849
11850 for (s = ibfd->sections; s != NULL; s = s->next)
11851 {
11852 struct elf32_arm_relocs_copied *p;
11853
11854 for (p = (struct elf32_arm_relocs_copied *)
11855 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11856 {
11857 if (!bfd_is_abs_section (p->section)
11858 && bfd_is_abs_section (p->section->output_section))
11859 {
11860 /* Input section has been discarded, either because
11861 it is a copy of a linkonce section or due to
11862 linker script /DISCARD/, so we'll be discarding
11863 the relocs too. */
11864 }
11865 else if (is_vxworks
11866 && strcmp (p->section->output_section->name,
11867 ".tls_vars") == 0)
11868 {
11869 /* Relocations in vxworks .tls_vars sections are
11870 handled specially by the loader. */
11871 }
11872 else if (p->count != 0)
11873 {
11874 srel = elf_section_data (p->section)->sreloc;
11875 srel->size += p->count * RELOC_SIZE (htab);
11876 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11877 info->flags |= DF_TEXTREL;
11878 }
11879 }
11880 }
11881
11882 local_got = elf_local_got_refcounts (ibfd);
11883 if (!local_got)
11884 continue;
11885
11886 symtab_hdr = & elf_symtab_hdr (ibfd);
11887 locsymcount = symtab_hdr->sh_info;
11888 end_local_got = local_got + locsymcount;
11889 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11890 s = htab->sgot;
11891 srel = htab->srelgot;
11892 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11893 {
11894 if (*local_got > 0)
11895 {
11896 *local_got = s->size;
11897 if (*local_tls_type & GOT_TLS_GD)
11898 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11899 s->size += 8;
11900 if (*local_tls_type & GOT_TLS_IE)
11901 s->size += 4;
11902 if (*local_tls_type == GOT_NORMAL)
11903 s->size += 4;
11904
11905 if (info->shared || *local_tls_type == GOT_TLS_GD)
11906 srel->size += RELOC_SIZE (htab);
11907 }
11908 else
11909 *local_got = (bfd_vma) -1;
11910 }
11911 }
11912
11913 if (htab->tls_ldm_got.refcount > 0)
11914 {
11915 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11916 for R_ARM_TLS_LDM32 relocations. */
11917 htab->tls_ldm_got.offset = htab->sgot->size;
11918 htab->sgot->size += 8;
11919 if (info->shared)
11920 htab->srelgot->size += RELOC_SIZE (htab);
11921 }
11922 else
11923 htab->tls_ldm_got.offset = -1;
11924
11925 /* Allocate global sym .plt and .got entries, and space for global
11926 sym dynamic relocs. */
11927 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11928
11929 /* Here we rummage through the found bfds to collect glue information. */
11930 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11931 {
11932 if (! is_arm_elf (ibfd))
11933 continue;
11934
11935 /* Initialise mapping tables for code/data. */
11936 bfd_elf32_arm_init_maps (ibfd);
11937
11938 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11939 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11940 /* xgettext:c-format */
11941 _bfd_error_handler (_("Errors encountered processing file %s"),
11942 ibfd->filename);
11943 }
11944
11945 /* Allocate space for the glue sections now that we've sized them. */
11946 bfd_elf32_arm_allocate_interworking_sections (info);
11947
11948 /* The check_relocs and adjust_dynamic_symbol entry points have
11949 determined the sizes of the various dynamic sections. Allocate
11950 memory for them. */
11951 plt = FALSE;
11952 relocs = FALSE;
11953 for (s = dynobj->sections; s != NULL; s = s->next)
11954 {
11955 const char * name;
11956
11957 if ((s->flags & SEC_LINKER_CREATED) == 0)
11958 continue;
11959
11960 /* It's OK to base decisions on the section name, because none
11961 of the dynobj section names depend upon the input files. */
11962 name = bfd_get_section_name (dynobj, s);
11963
11964 if (strcmp (name, ".plt") == 0)
11965 {
11966 /* Remember whether there is a PLT. */
11967 plt = s->size != 0;
11968 }
11969 else if (CONST_STRNEQ (name, ".rel"))
11970 {
11971 if (s->size != 0)
11972 {
11973 /* Remember whether there are any reloc sections other
11974 than .rel(a).plt and .rela.plt.unloaded. */
11975 if (s != htab->srelplt && s != htab->srelplt2)
11976 relocs = TRUE;
11977
11978 /* We use the reloc_count field as a counter if we need
11979 to copy relocs into the output file. */
11980 s->reloc_count = 0;
11981 }
11982 }
11983 else if (! CONST_STRNEQ (name, ".got")
11984 && strcmp (name, ".dynbss") != 0)
11985 {
11986 /* It's not one of our sections, so don't allocate space. */
11987 continue;
11988 }
11989
11990 if (s->size == 0)
11991 {
11992 /* If we don't need this section, strip it from the
11993 output file. This is mostly to handle .rel(a).bss and
11994 .rel(a).plt. We must create both sections in
11995 create_dynamic_sections, because they must be created
11996 before the linker maps input sections to output
11997 sections. The linker does that before
11998 adjust_dynamic_symbol is called, and it is that
11999 function which decides whether anything needs to go
12000 into these sections. */
12001 s->flags |= SEC_EXCLUDE;
12002 continue;
12003 }
12004
12005 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12006 continue;
12007
12008 /* Allocate memory for the section contents. */
12009 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12010 if (s->contents == NULL)
12011 return FALSE;
12012 }
12013
12014 if (elf_hash_table (info)->dynamic_sections_created)
12015 {
12016 /* Add some entries to the .dynamic section. We fill in the
12017 values later, in elf32_arm_finish_dynamic_sections, but we
12018 must add the entries now so that we get the correct size for
12019 the .dynamic section. The DT_DEBUG entry is filled in by the
12020 dynamic linker and used by the debugger. */
12021#define add_dynamic_entry(TAG, VAL) \
12022 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12023
12024 if (info->executable)
12025 {
12026 if (!add_dynamic_entry (DT_DEBUG, 0))
12027 return FALSE;
12028 }
12029
12030 if (plt)
12031 {
12032 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12033 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12034 || !add_dynamic_entry (DT_PLTREL,
12035 htab->use_rel ? DT_REL : DT_RELA)
12036 || !add_dynamic_entry (DT_JMPREL, 0))
12037 return FALSE;
12038 }
12039
12040 if (relocs)
12041 {
12042 if (htab->use_rel)
12043 {
12044 if (!add_dynamic_entry (DT_REL, 0)
12045 || !add_dynamic_entry (DT_RELSZ, 0)
12046 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12047 return FALSE;
12048 }
12049 else
12050 {
12051 if (!add_dynamic_entry (DT_RELA, 0)
12052 || !add_dynamic_entry (DT_RELASZ, 0)
12053 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12054 return FALSE;
12055 }
12056 }
12057
12058 /* If any dynamic relocs apply to a read-only section,
12059 then we need a DT_TEXTREL entry. */
12060 if ((info->flags & DF_TEXTREL) == 0)
12061 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12062 info);
12063
12064 if ((info->flags & DF_TEXTREL) != 0)
12065 {
12066 if (!add_dynamic_entry (DT_TEXTREL, 0))
12067 return FALSE;
12068 }
12069 if (htab->vxworks_p
12070 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12071 return FALSE;
12072 }
12073#undef add_dynamic_entry
12074
12075 return TRUE;
12076}
12077
12078/* Finish up dynamic symbol handling. We set the contents of various
12079 dynamic sections here. */
12080
12081static bfd_boolean
12082elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12083 struct bfd_link_info * info,
12084 struct elf_link_hash_entry * h,
12085 Elf_Internal_Sym * sym)
12086{
12087 bfd * dynobj;
12088 struct elf32_arm_link_hash_table *htab;
12089 struct elf32_arm_link_hash_entry *eh;
12090
12091 dynobj = elf_hash_table (info)->dynobj;
12092 htab = elf32_arm_hash_table (info);
12093 if (htab == NULL)
12094 return FALSE;
12095
12096 eh = (struct elf32_arm_link_hash_entry *) h;
12097
12098 if (h->plt.offset != (bfd_vma) -1)
12099 {
12100 asection * splt;
12101 asection * srel;
12102 bfd_byte *loc;
12103 bfd_vma plt_index;
12104 Elf_Internal_Rela rel;
12105
12106 /* This symbol has an entry in the procedure linkage table. Set
12107 it up. */
12108
12109 BFD_ASSERT (h->dynindx != -1);
12110
12111 splt = bfd_get_section_by_name (dynobj, ".plt");
12112 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12113 BFD_ASSERT (splt != NULL && srel != NULL);
12114
12115 /* Fill in the entry in the procedure linkage table. */
12116 if (htab->symbian_p)
12117 {
12118 put_arm_insn (htab, output_bfd,
12119 elf32_arm_symbian_plt_entry[0],
12120 splt->contents + h->plt.offset);
12121 bfd_put_32 (output_bfd,
12122 elf32_arm_symbian_plt_entry[1],
12123 splt->contents + h->plt.offset + 4);
12124
12125 /* Fill in the entry in the .rel.plt section. */
12126 rel.r_offset = (splt->output_section->vma
12127 + splt->output_offset
12128 + h->plt.offset + 4);
12129 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12130
12131 /* Get the index in the procedure linkage table which
12132 corresponds to this symbol. This is the index of this symbol
12133 in all the symbols for which we are making plt entries. The
12134 first entry in the procedure linkage table is reserved. */
12135 plt_index = ((h->plt.offset - htab->plt_header_size)
12136 / htab->plt_entry_size);
12137 }
12138 else
12139 {
12140 bfd_vma got_offset, got_address, plt_address;
12141 bfd_vma got_displacement;
12142 asection * sgot;
12143 bfd_byte * ptr;
12144
12145 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12146 BFD_ASSERT (sgot != NULL);
12147
12148 /* Get the offset into the .got.plt table of the entry that
12149 corresponds to this function. */
12150 got_offset = eh->plt_got_offset;
12151
12152 /* Get the index in the procedure linkage table which
12153 corresponds to this symbol. This is the index of this symbol
12154 in all the symbols for which we are making plt entries. The
12155 first three entries in .got.plt are reserved; after that
12156 symbols appear in the same order as in .plt. */
12157 plt_index = (got_offset - 12) / 4;
12158
12159 /* Calculate the address of the GOT entry. */
12160 got_address = (sgot->output_section->vma
12161 + sgot->output_offset
12162 + got_offset);
12163
12164 /* ...and the address of the PLT entry. */
12165 plt_address = (splt->output_section->vma
12166 + splt->output_offset
12167 + h->plt.offset);
12168
12169 ptr = htab->splt->contents + h->plt.offset;
12170 if (htab->vxworks_p && info->shared)
12171 {
12172 unsigned int i;
12173 bfd_vma val;
12174
12175 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12176 {
12177 val = elf32_arm_vxworks_shared_plt_entry[i];
12178 if (i == 2)
12179 val |= got_address - sgot->output_section->vma;
12180 if (i == 5)
12181 val |= plt_index * RELOC_SIZE (htab);
12182 if (i == 2 || i == 5)
12183 bfd_put_32 (output_bfd, val, ptr);
12184 else
12185 put_arm_insn (htab, output_bfd, val, ptr);
12186 }
12187 }
12188 else if (htab->vxworks_p)
12189 {
12190 unsigned int i;
12191 bfd_vma val;
12192
12193 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12194 {
12195 val = elf32_arm_vxworks_exec_plt_entry[i];
12196 if (i == 2)
12197 val |= got_address;
12198 if (i == 4)
12199 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12200 if (i == 5)
12201 val |= plt_index * RELOC_SIZE (htab);
12202 if (i == 2 || i == 5)
12203 bfd_put_32 (output_bfd, val, ptr);
12204 else
12205 put_arm_insn (htab, output_bfd, val, ptr);
12206 }
12207
12208 loc = (htab->srelplt2->contents
12209 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12210
12211 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12212 referencing the GOT for this PLT entry. */
12213 rel.r_offset = plt_address + 8;
12214 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12215 rel.r_addend = got_offset;
12216 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12217 loc += RELOC_SIZE (htab);
12218
12219 /* Create the R_ARM_ABS32 relocation referencing the
12220 beginning of the PLT for this GOT entry. */
12221 rel.r_offset = got_address;
12222 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12223 rel.r_addend = 0;
12224 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12225 }
12226 else
12227 {
12228 bfd_signed_vma thumb_refs;
12229 /* Calculate the displacement between the PLT slot and the
12230 entry in the GOT. The eight-byte offset accounts for the
12231 value produced by adding to pc in the first instruction
12232 of the PLT stub. */
12233 got_displacement = got_address - (plt_address + 8);
12234
12235 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12236
12237 thumb_refs = eh->plt_thumb_refcount;
12238 if (!htab->use_blx)
12239 thumb_refs += eh->plt_maybe_thumb_refcount;
12240
12241 if (thumb_refs > 0)
12242 {
12243 put_thumb_insn (htab, output_bfd,
12244 elf32_arm_plt_thumb_stub[0], ptr - 4);
12245 put_thumb_insn (htab, output_bfd,
12246 elf32_arm_plt_thumb_stub[1], ptr - 2);
12247 }
12248
12249 put_arm_insn (htab, output_bfd,
12250 elf32_arm_plt_entry[0]
12251 | ((got_displacement & 0x0ff00000) >> 20),
12252 ptr + 0);
12253 put_arm_insn (htab, output_bfd,
12254 elf32_arm_plt_entry[1]
12255 | ((got_displacement & 0x000ff000) >> 12),
12256 ptr+ 4);
12257 put_arm_insn (htab, output_bfd,
12258 elf32_arm_plt_entry[2]
12259 | (got_displacement & 0x00000fff),
12260 ptr + 8);
12261#ifdef FOUR_WORD_PLT
12262 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12263#endif
12264 }
12265
12266 /* Fill in the entry in the global offset table. */
12267 bfd_put_32 (output_bfd,
12268 (splt->output_section->vma
12269 + splt->output_offset),
12270 sgot->contents + got_offset);
12271
12272 /* Fill in the entry in the .rel(a).plt section. */
12273 rel.r_addend = 0;
12274 rel.r_offset = got_address;
12275 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12276 }
12277
12278 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12279 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12280
12281 if (!h->def_regular)
12282 {
12283 /* Mark the symbol as undefined, rather than as defined in
12284 the .plt section. Leave the value alone. */
12285 sym->st_shndx = SHN_UNDEF;
12286 /* If the symbol is weak, we do need to clear the value.
12287 Otherwise, the PLT entry would provide a definition for
12288 the symbol even if the symbol wasn't defined anywhere,
12289 and so the symbol would never be NULL. */
12290 if (!h->ref_regular_nonweak)
12291 sym->st_value = 0;
12292 }
12293 }
12294
12295 if (h->got.offset != (bfd_vma) -1
12296 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12297 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12298 {
12299 asection * sgot;
12300 asection * srel;
12301 Elf_Internal_Rela rel;
12302 bfd_byte *loc;
12303 bfd_vma offset;
12304
12305 /* This symbol has an entry in the global offset table. Set it
12306 up. */
12307 sgot = bfd_get_section_by_name (dynobj, ".got");
12308 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12309 BFD_ASSERT (sgot != NULL && srel != NULL);
12310
12311 offset = (h->got.offset & ~(bfd_vma) 1);
12312 rel.r_addend = 0;
12313 rel.r_offset = (sgot->output_section->vma
12314 + sgot->output_offset
12315 + offset);
12316
12317 /* If this is a static link, or it is a -Bsymbolic link and the
12318 symbol is defined locally or was forced to be local because
12319 of a version file, we just want to emit a RELATIVE reloc.
12320 The entry in the global offset table will already have been
12321 initialized in the relocate_section function. */
12322 if (info->shared
12323 && SYMBOL_REFERENCES_LOCAL (info, h))
12324 {
12325 BFD_ASSERT ((h->got.offset & 1) != 0);
12326 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12327 if (!htab->use_rel)
12328 {
12329 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12330 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12331 }
12332 }
12333 else
12334 {
12335 BFD_ASSERT ((h->got.offset & 1) == 0);
12336 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12337 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12338 }
12339
12340 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12341 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12342 }
12343
12344 if (h->needs_copy)
12345 {
12346 asection * s;
12347 Elf_Internal_Rela rel;
12348 bfd_byte *loc;
12349
12350 /* This symbol needs a copy reloc. Set it up. */
12351 BFD_ASSERT (h->dynindx != -1
12352 && (h->root.type == bfd_link_hash_defined
12353 || h->root.type == bfd_link_hash_defweak));
12354
12355 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12356 RELOC_SECTION (htab, ".bss"));
12357 BFD_ASSERT (s != NULL);
12358
12359 rel.r_addend = 0;
12360 rel.r_offset = (h->root.u.def.value
12361 + h->root.u.def.section->output_section->vma
12362 + h->root.u.def.section->output_offset);
12363 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12364 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12365 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12366 }
12367
12368 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12369 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12370 to the ".got" section. */
12371 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12372 || (!htab->vxworks_p && h == htab->root.hgot))
12373 sym->st_shndx = SHN_ABS;
12374
12375 return TRUE;
12376}
12377
12378/* Finish up the dynamic sections. */
12379
12380static bfd_boolean
12381elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12382{
12383 bfd * dynobj;
12384 asection * sgot;
12385 asection * sdyn;
12386 struct elf32_arm_link_hash_table *htab;
12387
12388 htab = elf32_arm_hash_table (info);
12389 if (htab == NULL)
12390 return FALSE;
12391
12392 dynobj = elf_hash_table (info)->dynobj;
12393
12394 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12395 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12396 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12397
12398 if (elf_hash_table (info)->dynamic_sections_created)
12399 {
12400 asection *splt;
12401 Elf32_External_Dyn *dyncon, *dynconend;
12402
12403 splt = bfd_get_section_by_name (dynobj, ".plt");
12404 BFD_ASSERT (splt != NULL && sdyn != NULL);
12405
12406 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12407 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12408
12409 for (; dyncon < dynconend; dyncon++)
12410 {
12411 Elf_Internal_Dyn dyn;
12412 const char * name;
12413 asection * s;
12414
12415 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12416
12417 switch (dyn.d_tag)
12418 {
12419 unsigned int type;
12420
12421 default:
12422 if (htab->vxworks_p
12423 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12424 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12425 break;
12426
12427 case DT_HASH:
12428 name = ".hash";
12429 goto get_vma_if_bpabi;
12430 case DT_STRTAB:
12431 name = ".dynstr";
12432 goto get_vma_if_bpabi;
12433 case DT_SYMTAB:
12434 name = ".dynsym";
12435 goto get_vma_if_bpabi;
12436 case DT_VERSYM:
12437 name = ".gnu.version";
12438 goto get_vma_if_bpabi;
12439 case DT_VERDEF:
12440 name = ".gnu.version_d";
12441 goto get_vma_if_bpabi;
12442 case DT_VERNEED:
12443 name = ".gnu.version_r";
12444 goto get_vma_if_bpabi;
12445
12446 case DT_PLTGOT:
12447 name = ".got";
12448 goto get_vma;
12449 case DT_JMPREL:
12450 name = RELOC_SECTION (htab, ".plt");
12451 get_vma:
12452 s = bfd_get_section_by_name (output_bfd, name);
12453 BFD_ASSERT (s != NULL);
12454 if (!htab->symbian_p)
12455 dyn.d_un.d_ptr = s->vma;
12456 else
12457 /* In the BPABI, tags in the PT_DYNAMIC section point
12458 at the file offset, not the memory address, for the
12459 convenience of the post linker. */
12460 dyn.d_un.d_ptr = s->filepos;
12461 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12462 break;
12463
12464 get_vma_if_bpabi:
12465 if (htab->symbian_p)
12466 goto get_vma;
12467 break;
12468
12469 case DT_PLTRELSZ:
12470 s = bfd_get_section_by_name (output_bfd,
12471 RELOC_SECTION (htab, ".plt"));
12472 BFD_ASSERT (s != NULL);
12473 dyn.d_un.d_val = s->size;
12474 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12475 break;
12476
12477 case DT_RELSZ:
12478 case DT_RELASZ:
12479 if (!htab->symbian_p)
12480 {
12481 /* My reading of the SVR4 ABI indicates that the
12482 procedure linkage table relocs (DT_JMPREL) should be
12483 included in the overall relocs (DT_REL). This is
12484 what Solaris does. However, UnixWare can not handle
12485 that case. Therefore, we override the DT_RELSZ entry
12486 here to make it not include the JMPREL relocs. Since
12487 the linker script arranges for .rel(a).plt to follow all
12488 other relocation sections, we don't have to worry
12489 about changing the DT_REL entry. */
12490 s = bfd_get_section_by_name (output_bfd,
12491 RELOC_SECTION (htab, ".plt"));
12492 if (s != NULL)
12493 dyn.d_un.d_val -= s->size;
12494 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12495 break;
12496 }
12497 /* Fall through. */
12498
12499 case DT_REL:
12500 case DT_RELA:
12501 /* In the BPABI, the DT_REL tag must point at the file
12502 offset, not the VMA, of the first relocation
12503 section. So, we use code similar to that in
12504 elflink.c, but do not check for SHF_ALLOC on the
12505 relcoation section, since relocations sections are
12506 never allocated under the BPABI. The comments above
12507 about Unixware notwithstanding, we include all of the
12508 relocations here. */
12509 if (htab->symbian_p)
12510 {
12511 unsigned int i;
12512 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12513 ? SHT_REL : SHT_RELA);
12514 dyn.d_un.d_val = 0;
12515 for (i = 1; i < elf_numsections (output_bfd); i++)
12516 {
12517 Elf_Internal_Shdr *hdr
12518 = elf_elfsections (output_bfd)[i];
12519 if (hdr->sh_type == type)
12520 {
12521 if (dyn.d_tag == DT_RELSZ
12522 || dyn.d_tag == DT_RELASZ)
12523 dyn.d_un.d_val += hdr->sh_size;
12524 else if ((ufile_ptr) hdr->sh_offset
12525 <= dyn.d_un.d_val - 1)
12526 dyn.d_un.d_val = hdr->sh_offset;
12527 }
12528 }
12529 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12530 }
12531 break;
12532
12533 /* Set the bottom bit of DT_INIT/FINI if the
12534 corresponding function is Thumb. */
12535 case DT_INIT:
12536 name = info->init_function;
12537 goto get_sym;
12538 case DT_FINI:
12539 name = info->fini_function;
12540 get_sym:
12541 /* If it wasn't set by elf_bfd_final_link
12542 then there is nothing to adjust. */
12543 if (dyn.d_un.d_val != 0)
12544 {
12545 struct elf_link_hash_entry * eh;
12546
12547 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12548 FALSE, FALSE, TRUE);
12549 if (eh != NULL
12550 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12551 {
12552 dyn.d_un.d_val |= 1;
12553 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12554 }
12555 }
12556 break;
12557 }
12558 }
12559
12560 /* Fill in the first entry in the procedure linkage table. */
12561 if (splt->size > 0 && htab->plt_header_size)
12562 {
12563 const bfd_vma *plt0_entry;
12564 bfd_vma got_address, plt_address, got_displacement;
12565
12566 /* Calculate the addresses of the GOT and PLT. */
12567 got_address = sgot->output_section->vma + sgot->output_offset;
12568 plt_address = splt->output_section->vma + splt->output_offset;
12569
12570 if (htab->vxworks_p)
12571 {
12572 /* The VxWorks GOT is relocated by the dynamic linker.
12573 Therefore, we must emit relocations rather than simply
12574 computing the values now. */
12575 Elf_Internal_Rela rel;
12576
12577 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12578 put_arm_insn (htab, output_bfd, plt0_entry[0],
12579 splt->contents + 0);
12580 put_arm_insn (htab, output_bfd, plt0_entry[1],
12581 splt->contents + 4);
12582 put_arm_insn (htab, output_bfd, plt0_entry[2],
12583 splt->contents + 8);
12584 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12585
12586 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12587 rel.r_offset = plt_address + 12;
12588 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12589 rel.r_addend = 0;
12590 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12591 htab->srelplt2->contents);
12592 }
12593 else
12594 {
12595 got_displacement = got_address - (plt_address + 16);
12596
12597 plt0_entry = elf32_arm_plt0_entry;
12598 put_arm_insn (htab, output_bfd, plt0_entry[0],
12599 splt->contents + 0);
12600 put_arm_insn (htab, output_bfd, plt0_entry[1],
12601 splt->contents + 4);
12602 put_arm_insn (htab, output_bfd, plt0_entry[2],
12603 splt->contents + 8);
12604 put_arm_insn (htab, output_bfd, plt0_entry[3],
12605 splt->contents + 12);
12606
12607#ifdef FOUR_WORD_PLT
12608 /* The displacement value goes in the otherwise-unused
12609 last word of the second entry. */
12610 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12611#else
12612 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12613#endif
12614 }
12615 }
12616
12617 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12618 really seem like the right value. */
12619 if (splt->output_section->owner == output_bfd)
12620 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12621
12622 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12623 {
12624 /* Correct the .rel(a).plt.unloaded relocations. They will have
12625 incorrect symbol indexes. */
12626 int num_plts;
12627 unsigned char *p;
12628
12629 num_plts = ((htab->splt->size - htab->plt_header_size)
12630 / htab->plt_entry_size);
12631 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12632
12633 for (; num_plts; num_plts--)
12634 {
12635 Elf_Internal_Rela rel;
12636
12637 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12638 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12639 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12640 p += RELOC_SIZE (htab);
12641
12642 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12643 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12644 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12645 p += RELOC_SIZE (htab);
12646 }
12647 }
12648 }
12649
12650 /* Fill in the first three entries in the global offset table. */
12651 if (sgot)
12652 {
12653 if (sgot->size > 0)
12654 {
12655 if (sdyn == NULL)
12656 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12657 else
12658 bfd_put_32 (output_bfd,
12659 sdyn->output_section->vma + sdyn->output_offset,
12660 sgot->contents);
12661 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12662 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12663 }
12664
12665 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12666 }
12667
12668 return TRUE;
12669}
12670
12671static void
12672elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12673{
12674 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12675 struct elf32_arm_link_hash_table *globals;
12676
12677 i_ehdrp = elf_elfheader (abfd);
12678
12679 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12680 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12681 else
12682 i_ehdrp->e_ident[EI_OSABI] = 0;
12683 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12684
12685 if (link_info)
12686 {
12687 globals = elf32_arm_hash_table (link_info);
12688 if (globals != NULL && globals->byteswap_code)
12689 i_ehdrp->e_flags |= EF_ARM_BE8;
12690 }
12691}
12692
12693static enum elf_reloc_type_class
12694elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12695{
12696 switch ((int) ELF32_R_TYPE (rela->r_info))
12697 {
12698 case R_ARM_RELATIVE:
12699 return reloc_class_relative;
12700 case R_ARM_JUMP_SLOT:
12701 return reloc_class_plt;
12702 case R_ARM_COPY:
12703 return reloc_class_copy;
12704 default:
12705 return reloc_class_normal;
12706 }
12707}
12708
12709/* Set the right machine number for an Arm ELF file. */
12710
12711static bfd_boolean
12712elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12713{
12714 if (hdr->sh_type == SHT_NOTE)
12715 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12716
12717 return TRUE;
12718}
12719
12720static void
12721elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12722{
12723 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12724}
12725
12726/* Return TRUE if this is an unwinding table entry. */
12727
12728static bfd_boolean
12729is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12730{
12731 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12732 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12733}
12734
12735
12736/* Set the type and flags for an ARM section. We do this by
12737 the section name, which is a hack, but ought to work. */
12738
12739static bfd_boolean
12740elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12741{
12742 const char * name;
12743
12744 name = bfd_get_section_name (abfd, sec);
12745
12746 if (is_arm_elf_unwind_section_name (abfd, name))
12747 {
12748 hdr->sh_type = SHT_ARM_EXIDX;
12749 hdr->sh_flags |= SHF_LINK_ORDER;
12750 }
12751 return TRUE;
12752}
12753
12754/* Handle an ARM specific section when reading an object file. This is
12755 called when bfd_section_from_shdr finds a section with an unknown
12756 type. */
12757
12758static bfd_boolean
12759elf32_arm_section_from_shdr (bfd *abfd,
12760 Elf_Internal_Shdr * hdr,
12761 const char *name,
12762 int shindex)
12763{
12764 /* There ought to be a place to keep ELF backend specific flags, but
12765 at the moment there isn't one. We just keep track of the
12766 sections by their name, instead. Fortunately, the ABI gives
12767 names for all the ARM specific sections, so we will probably get
12768 away with this. */
12769 switch (hdr->sh_type)
12770 {
12771 case SHT_ARM_EXIDX:
12772 case SHT_ARM_PREEMPTMAP:
12773 case SHT_ARM_ATTRIBUTES:
12774 break;
12775
12776 default:
12777 return FALSE;
12778 }
12779
12780 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12781 return FALSE;
12782
12783 return TRUE;
12784}
12785
12786static _arm_elf_section_data *
12787get_arm_elf_section_data (asection * sec)
12788{
12789 if (sec && sec->owner && is_arm_elf (sec->owner))
12790 return elf32_arm_section_data (sec);
12791 else
12792 return NULL;
12793}
12794
12795typedef struct
12796{
12797 void *finfo;
12798 struct bfd_link_info *info;
12799 asection *sec;
12800 int sec_shndx;
12801 int (*func) (void *, const char *, Elf_Internal_Sym *,
12802 asection *, struct elf_link_hash_entry *);
12803} output_arch_syminfo;
12804
12805enum map_symbol_type
12806{
12807 ARM_MAP_ARM,
12808 ARM_MAP_THUMB,
12809 ARM_MAP_DATA
12810};
12811
12812
12813/* Output a single mapping symbol. */
12814
12815static bfd_boolean
12816elf32_arm_output_map_sym (output_arch_syminfo *osi,
12817 enum map_symbol_type type,
12818 bfd_vma offset)
12819{
12820 static const char *names[3] = {"$a", "$t", "$d"};
12821 Elf_Internal_Sym sym;
12822
12823 sym.st_value = osi->sec->output_section->vma
12824 + osi->sec->output_offset
12825 + offset;
12826 sym.st_size = 0;
12827 sym.st_other = 0;
12828 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12829 sym.st_shndx = osi->sec_shndx;
12830 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12831 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12832}
12833
12834
12835/* Output mapping symbols for PLT entries associated with H. */
12836
12837static bfd_boolean
12838elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12839{
12840 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12841 struct elf32_arm_link_hash_table *htab;
12842 struct elf32_arm_link_hash_entry *eh;
12843 bfd_vma addr;
12844
12845 if (h->root.type == bfd_link_hash_indirect)
12846 return TRUE;
12847
12848 if (h->root.type == bfd_link_hash_warning)
12849 /* When warning symbols are created, they **replace** the "real"
12850 entry in the hash table, thus we never get to see the real
12851 symbol in a hash traversal. So look at it now. */
12852 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12853
12854 if (h->plt.offset == (bfd_vma) -1)
12855 return TRUE;
12856
12857 htab = elf32_arm_hash_table (osi->info);
12858 if (htab == NULL)
12859 return FALSE;
12860
12861 eh = (struct elf32_arm_link_hash_entry *) h;
12862 addr = h->plt.offset;
12863 if (htab->symbian_p)
12864 {
12865 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12866 return FALSE;
12867 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12868 return FALSE;
12869 }
12870 else if (htab->vxworks_p)
12871 {
12872 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12873 return FALSE;
12874 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12875 return FALSE;
12876 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12877 return FALSE;
12878 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12879 return FALSE;
12880 }
12881 else
12882 {
12883 bfd_signed_vma thumb_refs;
12884
12885 thumb_refs = eh->plt_thumb_refcount;
12886 if (!htab->use_blx)
12887 thumb_refs += eh->plt_maybe_thumb_refcount;
12888
12889 if (thumb_refs > 0)
12890 {
12891 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12892 return FALSE;
12893 }
12894#ifdef FOUR_WORD_PLT
12895 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12896 return FALSE;
12897 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12898 return FALSE;
12899#else
12900 /* A three-word PLT with no Thumb thunk contains only Arm code,
12901 so only need to output a mapping symbol for the first PLT entry and
12902 entries with thumb thunks. */
12903 if (thumb_refs > 0 || addr == 20)
12904 {
12905 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12906 return FALSE;
12907 }
12908#endif
12909 }
12910
12911 return TRUE;
12912}
12913
12914/* Output a single local symbol for a generated stub. */
12915
12916static bfd_boolean
12917elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12918 bfd_vma offset, bfd_vma size)
12919{
12920 Elf_Internal_Sym sym;
12921
12922 sym.st_value = osi->sec->output_section->vma
12923 + osi->sec->output_offset
12924 + offset;
12925 sym.st_size = size;
12926 sym.st_other = 0;
12927 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12928 sym.st_shndx = osi->sec_shndx;
12929 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12930}
12931
12932static bfd_boolean
12933arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12934 void * in_arg)
12935{
12936 struct elf32_arm_stub_hash_entry *stub_entry;
12937 asection *stub_sec;
12938 bfd_vma addr;
12939 char *stub_name;
12940 output_arch_syminfo *osi;
12941 const insn_sequence *template_sequence;
12942 enum stub_insn_type prev_type;
12943 int size;
12944 int i;
12945 enum map_symbol_type sym_type;
12946
12947 /* Massage our args to the form they really have. */
12948 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12949 osi = (output_arch_syminfo *) in_arg;
12950
12951 stub_sec = stub_entry->stub_sec;
12952
12953 /* Ensure this stub is attached to the current section being
12954 processed. */
12955 if (stub_sec != osi->sec)
12956 return TRUE;
12957
12958 addr = (bfd_vma) stub_entry->stub_offset;
12959 stub_name = stub_entry->output_name;
12960
12961 template_sequence = stub_entry->stub_template;
12962 switch (template_sequence[0].type)
12963 {
12964 case ARM_TYPE:
12965 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12966 return FALSE;
12967 break;
12968 case THUMB16_TYPE:
12969 case THUMB32_TYPE:
12970 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12971 stub_entry->stub_size))
12972 return FALSE;
12973 break;
12974 default:
12975 BFD_FAIL ();
12976 return 0;
12977 }
12978
12979 prev_type = DATA_TYPE;
12980 size = 0;
12981 for (i = 0; i < stub_entry->stub_template_size; i++)
12982 {
12983 switch (template_sequence[i].type)
12984 {
12985 case ARM_TYPE:
12986 sym_type = ARM_MAP_ARM;
12987 break;
12988
12989 case THUMB16_TYPE:
12990 case THUMB32_TYPE:
12991 sym_type = ARM_MAP_THUMB;
12992 break;
12993
12994 case DATA_TYPE:
12995 sym_type = ARM_MAP_DATA;
12996 break;
12997
12998 default:
12999 BFD_FAIL ();
13000 return FALSE;
13001 }
13002
13003 if (template_sequence[i].type != prev_type)
13004 {
13005 prev_type = template_sequence[i].type;
13006 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13007 return FALSE;
13008 }
13009
13010 switch (template_sequence[i].type)
13011 {
13012 case ARM_TYPE:
13013 case THUMB32_TYPE:
13014 size += 4;
13015 break;
13016
13017 case THUMB16_TYPE:
13018 size += 2;
13019 break;
13020
13021 case DATA_TYPE:
13022 size += 4;
13023 break;
13024
13025 default:
13026 BFD_FAIL ();
13027 return FALSE;
13028 }
13029 }
13030
13031 return TRUE;
13032}
13033
13034/* Output mapping symbols for linker generated sections,
13035 and for those data-only sections that do not have a
13036 $d. */
13037
13038static bfd_boolean
13039elf32_arm_output_arch_local_syms (bfd *output_bfd,
13040 struct bfd_link_info *info,
13041 void *finfo,
13042 int (*func) (void *, const char *,
13043 Elf_Internal_Sym *,
13044 asection *,
13045 struct elf_link_hash_entry *))
13046{
13047 output_arch_syminfo osi;
13048 struct elf32_arm_link_hash_table *htab;
13049 bfd_vma offset;
13050 bfd_size_type size;
13051 bfd *input_bfd;
13052
13053 htab = elf32_arm_hash_table (info);
13054 if (htab == NULL)
13055 return FALSE;
13056
13057 check_use_blx (htab);
13058
13059 osi.finfo = finfo;
13060 osi.info = info;
13061 osi.func = func;
13062
13063 /* Add a $d mapping symbol to data-only sections that
13064 don't have any mapping symbol. This may result in (harmless) redundant
13065 mapping symbols. */
13066 for (input_bfd = info->input_bfds;
13067 input_bfd != NULL;
13068 input_bfd = input_bfd->link_next)
13069 {
13070 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13071 for (osi.sec = input_bfd->sections;
13072 osi.sec != NULL;
13073 osi.sec = osi.sec->next)
13074 {
13075 if (osi.sec->output_section != NULL
13076 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13077 != 0)
13078 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13079 == SEC_HAS_CONTENTS
13080 && get_arm_elf_section_data (osi.sec) != NULL
13081 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13082 && osi.sec->size > 0)
13083 {
13084 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13085 (output_bfd, osi.sec->output_section);
13086 if (osi.sec_shndx != (int)SHN_BAD)
13087 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13088 }
13089 }
13090 }
13091
13092 /* ARM->Thumb glue. */
13093 if (htab->arm_glue_size > 0)
13094 {
13095 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13096 ARM2THUMB_GLUE_SECTION_NAME);
13097
13098 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13099 (output_bfd, osi.sec->output_section);
13100 if (info->shared || htab->root.is_relocatable_executable
13101 || htab->pic_veneer)
13102 size = ARM2THUMB_PIC_GLUE_SIZE;
13103 else if (htab->use_blx)
13104 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13105 else
13106 size = ARM2THUMB_STATIC_GLUE_SIZE;
13107
13108 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13109 {
13110 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13111 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13112 }
13113 }
13114
13115 /* Thumb->ARM glue. */
13116 if (htab->thumb_glue_size > 0)
13117 {
13118 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13119 THUMB2ARM_GLUE_SECTION_NAME);
13120
13121 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13122 (output_bfd, osi.sec->output_section);
13123 size = THUMB2ARM_GLUE_SIZE;
13124
13125 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13126 {
13127 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13128 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13129 }
13130 }
13131
13132 /* ARMv4 BX veneers. */
13133 if (htab->bx_glue_size > 0)
13134 {
13135 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13136 ARM_BX_GLUE_SECTION_NAME);
13137
13138 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13139 (output_bfd, osi.sec->output_section);
13140
13141 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13142 }
13143
13144 /* Long calls stubs. */
13145 if (htab->stub_bfd && htab->stub_bfd->sections)
13146 {
13147 asection* stub_sec;
13148
13149 for (stub_sec = htab->stub_bfd->sections;
13150 stub_sec != NULL;
13151 stub_sec = stub_sec->next)
13152 {
13153 /* Ignore non-stub sections. */
13154 if (!strstr (stub_sec->name, STUB_SUFFIX))
13155 continue;
13156
13157 osi.sec = stub_sec;
13158
13159 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13160 (output_bfd, osi.sec->output_section);
13161
13162 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13163 }
13164 }
13165
13166 /* Finally, output mapping symbols for the PLT. */
13167 if (!htab->splt || htab->splt->size == 0)
13168 return TRUE;
13169
13170 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13171 htab->splt->output_section);
13172 osi.sec = htab->splt;
13173 /* Output mapping symbols for the plt header. SymbianOS does not have a
13174 plt header. */
13175 if (htab->vxworks_p)
13176 {
13177 /* VxWorks shared libraries have no PLT header. */
13178 if (!info->shared)
13179 {
13180 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13181 return FALSE;
13182 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13183 return FALSE;
13184 }
13185 }
13186 else if (!htab->symbian_p)
13187 {
13188 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13189 return FALSE;
13190#ifndef FOUR_WORD_PLT
13191 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13192 return FALSE;
13193#endif
13194 }
13195
13196 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13197 return TRUE;
13198}
13199
13200/* Allocate target specific section data. */
13201
13202static bfd_boolean
13203elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13204{
13205 if (!sec->used_by_bfd)
13206 {
13207 _arm_elf_section_data *sdata;
13208 bfd_size_type amt = sizeof (*sdata);
13209
13210 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13211 if (sdata == NULL)
13212 return FALSE;
13213 sec->used_by_bfd = sdata;
13214 }
13215
13216 return _bfd_elf_new_section_hook (abfd, sec);
13217}
13218
13219
13220/* Used to order a list of mapping symbols by address. */
13221
13222static int
13223elf32_arm_compare_mapping (const void * a, const void * b)
13224{
13225 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13226 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13227
13228 if (amap->vma > bmap->vma)
13229 return 1;
13230 else if (amap->vma < bmap->vma)
13231 return -1;
13232 else if (amap->type > bmap->type)
13233 /* Ensure results do not depend on the host qsort for objects with
13234 multiple mapping symbols at the same address by sorting on type
13235 after vma. */
13236 return 1;
13237 else if (amap->type < bmap->type)
13238 return -1;
13239 else
13240 return 0;
13241}
13242
13243/* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13244
13245static unsigned long
13246offset_prel31 (unsigned long addr, bfd_vma offset)
13247{
13248 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13249}
13250
13251/* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13252 relocations. */
13253
13254static void
13255copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13256{
13257 unsigned long first_word = bfd_get_32 (output_bfd, from);
13258 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13259
13260 /* High bit of first word is supposed to be zero. */
13261 if ((first_word & 0x80000000ul) == 0)
13262 first_word = offset_prel31 (first_word, offset);
13263
13264 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13265 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13266 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13267 second_word = offset_prel31 (second_word, offset);
13268
13269 bfd_put_32 (output_bfd, first_word, to);
13270 bfd_put_32 (output_bfd, second_word, to + 4);
13271}
13272
13273/* Data for make_branch_to_a8_stub(). */
13274
13275struct a8_branch_to_stub_data {
13276 asection *writing_section;
13277 bfd_byte *contents;
13278};
13279
13280
13281/* Helper to insert branches to Cortex-A8 erratum stubs in the right
13282 places for a particular section. */
13283
13284static bfd_boolean
13285make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13286 void *in_arg)
13287{
13288 struct elf32_arm_stub_hash_entry *stub_entry;
13289 struct a8_branch_to_stub_data *data;
13290 bfd_byte *contents;
13291 unsigned long branch_insn;
13292 bfd_vma veneered_insn_loc, veneer_entry_loc;
13293 bfd_signed_vma branch_offset;
13294 bfd *abfd;
13295 unsigned int target;
13296
13297 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13298 data = (struct a8_branch_to_stub_data *) in_arg;
13299
13300 if (stub_entry->target_section != data->writing_section
13301 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
13302 return TRUE;
13303
13304 contents = data->contents;
13305
13306 veneered_insn_loc = stub_entry->target_section->output_section->vma
13307 + stub_entry->target_section->output_offset
13308 + stub_entry->target_value;
13309
13310 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13311 + stub_entry->stub_sec->output_offset
13312 + stub_entry->stub_offset;
13313
13314 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13315 veneered_insn_loc &= ~3u;
13316
13317 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13318
13319 abfd = stub_entry->target_section->owner;
13320 target = stub_entry->target_value;
13321
13322 /* We attempt to avoid this condition by setting stubs_always_after_branch
13323 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13324 This check is just to be on the safe side... */
13325 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13326 {
13327 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13328 "allocated in unsafe location"), abfd);
13329 return FALSE;
13330 }
13331
13332 switch (stub_entry->stub_type)
13333 {
13334 case arm_stub_a8_veneer_b:
13335 case arm_stub_a8_veneer_b_cond:
13336 branch_insn = 0xf0009000;
13337 goto jump24;
13338
13339 case arm_stub_a8_veneer_blx:
13340 branch_insn = 0xf000e800;
13341 goto jump24;
13342
13343 case arm_stub_a8_veneer_bl:
13344 {
13345 unsigned int i1, j1, i2, j2, s;
13346
13347 branch_insn = 0xf000d000;
13348
13349 jump24:
13350 if (branch_offset < -16777216 || branch_offset > 16777214)
13351 {
13352 /* There's not much we can do apart from complain if this
13353 happens. */
13354 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13355 "of range (input file too large)"), abfd);
13356 return FALSE;
13357 }
13358
13359 /* i1 = not(j1 eor s), so:
13360 not i1 = j1 eor s
13361 j1 = (not i1) eor s. */
13362
13363 branch_insn |= (branch_offset >> 1) & 0x7ff;
13364 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13365 i2 = (branch_offset >> 22) & 1;
13366 i1 = (branch_offset >> 23) & 1;
13367 s = (branch_offset >> 24) & 1;
13368 j1 = (!i1) ^ s;
13369 j2 = (!i2) ^ s;
13370 branch_insn |= j2 << 11;
13371 branch_insn |= j1 << 13;
13372 branch_insn |= s << 26;
13373 }
13374 break;
13375
13376 default:
13377 BFD_FAIL ();
13378 return FALSE;
13379 }
13380
13381 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13382 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13383
13384 return TRUE;
13385}
13386
13387/* Do code byteswapping. Return FALSE afterwards so that the section is
13388 written out as normal. */
13389
13390static bfd_boolean
13391elf32_arm_write_section (bfd *output_bfd,
13392 struct bfd_link_info *link_info,
13393 asection *sec,
13394 bfd_byte *contents)
13395{
13396 unsigned int mapcount, errcount;
13397 _arm_elf_section_data *arm_data;
13398 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13399 elf32_arm_section_map *map;
13400 elf32_vfp11_erratum_list *errnode;
13401 bfd_vma ptr;
13402 bfd_vma end;
13403 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13404 bfd_byte tmp;
13405 unsigned int i;
13406
13407 if (globals == NULL)
13408 return FALSE;
13409
13410 /* If this section has not been allocated an _arm_elf_section_data
13411 structure then we cannot record anything. */
13412 arm_data = get_arm_elf_section_data (sec);
13413 if (arm_data == NULL)
13414 return FALSE;
13415
13416 mapcount = arm_data->mapcount;
13417 map = arm_data->map;
13418 errcount = arm_data->erratumcount;
13419
13420 if (errcount != 0)
13421 {
13422 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13423
13424 for (errnode = arm_data->erratumlist; errnode != 0;
13425 errnode = errnode->next)
13426 {
13427 bfd_vma target = errnode->vma - offset;
13428
13429 switch (errnode->type)
13430 {
13431 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13432 {
13433 bfd_vma branch_to_veneer;
13434 /* Original condition code of instruction, plus bit mask for
13435 ARM B instruction. */
13436 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13437 | 0x0a000000;
13438
13439 /* The instruction is before the label. */
13440 target -= 4;
13441
13442 /* Above offset included in -4 below. */
13443 branch_to_veneer = errnode->u.b.veneer->vma
13444 - errnode->vma - 4;
13445
13446 if ((signed) branch_to_veneer < -(1 << 25)
13447 || (signed) branch_to_veneer >= (1 << 25))
13448 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13449 "range"), output_bfd);
13450
13451 insn |= (branch_to_veneer >> 2) & 0xffffff;
13452 contents[endianflip ^ target] = insn & 0xff;
13453 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13454 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13455 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13456 }
13457 break;
13458
13459 case VFP11_ERRATUM_ARM_VENEER:
13460 {
13461 bfd_vma branch_from_veneer;
13462 unsigned int insn;
13463
13464 /* Take size of veneer into account. */
13465 branch_from_veneer = errnode->u.v.branch->vma
13466 - errnode->vma - 12;
13467
13468 if ((signed) branch_from_veneer < -(1 << 25)
13469 || (signed) branch_from_veneer >= (1 << 25))
13470 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13471 "range"), output_bfd);
13472
13473 /* Original instruction. */
13474 insn = errnode->u.v.branch->u.b.vfp_insn;
13475 contents[endianflip ^ target] = insn & 0xff;
13476 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13477 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13478 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13479
13480 /* Branch back to insn after original insn. */
13481 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13482 contents[endianflip ^ (target + 4)] = insn & 0xff;
13483 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13484 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13485 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13486 }
13487 break;
13488
13489 default:
13490 abort ();
13491 }
13492 }
13493 }
13494
13495 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13496 {
13497 arm_unwind_table_edit *edit_node
13498 = arm_data->u.exidx.unwind_edit_list;
13499 /* Now, sec->size is the size of the section we will write. The original
13500 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13501 markers) was sec->rawsize. (This isn't the case if we perform no
13502 edits, then rawsize will be zero and we should use size). */
13503 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13504 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13505 unsigned int in_index, out_index;
13506 bfd_vma add_to_offsets = 0;
13507
13508 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13509 {
13510 if (edit_node)
13511 {
13512 unsigned int edit_index = edit_node->index;
13513
13514 if (in_index < edit_index && in_index * 8 < input_size)
13515 {
13516 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13517 contents + in_index * 8, add_to_offsets);
13518 out_index++;
13519 in_index++;
13520 }
13521 else if (in_index == edit_index
13522 || (in_index * 8 >= input_size
13523 && edit_index == UINT_MAX))
13524 {
13525 switch (edit_node->type)
13526 {
13527 case DELETE_EXIDX_ENTRY:
13528 in_index++;
13529 add_to_offsets += 8;
13530 break;
13531
13532 case INSERT_EXIDX_CANTUNWIND_AT_END:
13533 {
13534 asection *text_sec = edit_node->linked_section;
13535 bfd_vma text_offset = text_sec->output_section->vma
13536 + text_sec->output_offset
13537 + text_sec->size;
13538 bfd_vma exidx_offset = offset + out_index * 8;
13539 unsigned long prel31_offset;
13540
13541 /* Note: this is meant to be equivalent to an
13542 R_ARM_PREL31 relocation. These synthetic
13543 EXIDX_CANTUNWIND markers are not relocated by the
13544 usual BFD method. */
13545 prel31_offset = (text_offset - exidx_offset)
13546 & 0x7ffffffful;
13547
13548 /* First address we can't unwind. */
13549 bfd_put_32 (output_bfd, prel31_offset,
13550 &edited_contents[out_index * 8]);
13551
13552 /* Code for EXIDX_CANTUNWIND. */
13553 bfd_put_32 (output_bfd, 0x1,
13554 &edited_contents[out_index * 8 + 4]);
13555
13556 out_index++;
13557 add_to_offsets -= 8;
13558 }
13559 break;
13560 }
13561
13562 edit_node = edit_node->next;
13563 }
13564 }
13565 else
13566 {
13567 /* No more edits, copy remaining entries verbatim. */
13568 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13569 contents + in_index * 8, add_to_offsets);
13570 out_index++;
13571 in_index++;
13572 }
13573 }
13574
13575 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13576 bfd_set_section_contents (output_bfd, sec->output_section,
13577 edited_contents,
13578 (file_ptr) sec->output_offset, sec->size);
13579
13580 return TRUE;
13581 }
13582
13583 /* Fix code to point to Cortex-A8 erratum stubs. */
13584 if (globals->fix_cortex_a8)
13585 {
13586 struct a8_branch_to_stub_data data;
13587
13588 data.writing_section = sec;
13589 data.contents = contents;
13590
13591 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13592 &data);
13593 }
13594
13595 if (mapcount == 0)
13596 return FALSE;
13597
13598 if (globals->byteswap_code)
13599 {
13600 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13601
13602 ptr = map[0].vma;
13603 for (i = 0; i < mapcount; i++)
13604 {
13605 if (i == mapcount - 1)
13606 end = sec->size;
13607 else
13608 end = map[i + 1].vma;
13609
13610 switch (map[i].type)
13611 {
13612 case 'a':
13613 /* Byte swap code words. */
13614 while (ptr + 3 < end)
13615 {
13616 tmp = contents[ptr];
13617 contents[ptr] = contents[ptr + 3];
13618 contents[ptr + 3] = tmp;
13619 tmp = contents[ptr + 1];
13620 contents[ptr + 1] = contents[ptr + 2];
13621 contents[ptr + 2] = tmp;
13622 ptr += 4;
13623 }
13624 break;
13625
13626 case 't':
13627 /* Byte swap code halfwords. */
13628 while (ptr + 1 < end)
13629 {
13630 tmp = contents[ptr];
13631 contents[ptr] = contents[ptr + 1];
13632 contents[ptr + 1] = tmp;
13633 ptr += 2;
13634 }
13635 break;
13636
13637 case 'd':
13638 /* Leave data alone. */
13639 break;
13640 }
13641 ptr = end;
13642 }
13643 }
13644
13645 free (map);
13646 arm_data->mapcount = -1;
13647 arm_data->mapsize = 0;
13648 arm_data->map = NULL;
13649
13650 return FALSE;
13651}
13652
13653/* Display STT_ARM_TFUNC symbols as functions. */
13654
13655static void
13656elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13657 asymbol *asym)
13658{
13659 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13660
13661 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13662 elfsym->symbol.flags |= BSF_FUNCTION;
13663}
13664
13665
13666/* Mangle thumb function symbols as we read them in. */
13667
13668static bfd_boolean
13669elf32_arm_swap_symbol_in (bfd * abfd,
13670 const void *psrc,
13671 const void *pshn,
13672 Elf_Internal_Sym *dst)
13673{
13674 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13675 return FALSE;
13676
13677 /* New EABI objects mark thumb function symbols by setting the low bit of
13678 the address. Turn these into STT_ARM_TFUNC. */
13679 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13680 && (dst->st_value & 1))
13681 {
13682 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13683 dst->st_value &= ~(bfd_vma) 1;
13684 }
13685 return TRUE;
13686}
13687
13688
13689/* Mangle thumb function symbols as we write them out. */
13690
13691static void
13692elf32_arm_swap_symbol_out (bfd *abfd,
13693 const Elf_Internal_Sym *src,
13694 void *cdst,
13695 void *shndx)
13696{
13697 Elf_Internal_Sym newsym;
13698
13699 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13700 of the address set, as per the new EABI. We do this unconditionally
13701 because objcopy does not set the elf header flags until after
13702 it writes out the symbol table. */
13703 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13704 {
13705 newsym = *src;
13706 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13707 if (newsym.st_shndx != SHN_UNDEF)
13708 {
13709 /* Do this only for defined symbols. At link type, the static
13710 linker will simulate the work of dynamic linker of resolving
13711 symbols and will carry over the thumbness of found symbols to
13712 the output symbol table. It's not clear how it happens, but
13713 the thumbness of undefined symbols can well be different at
13714 runtime, and writing '1' for them will be confusing for users
13715 and possibly for dynamic linker itself.
13716 */
13717 newsym.st_value |= 1;
13718 }
13719
13720 src = &newsym;
13721 }
13722 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13723}
13724
13725/* Add the PT_ARM_EXIDX program header. */
13726
13727static bfd_boolean
13728elf32_arm_modify_segment_map (bfd *abfd,
13729 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13730{
13731 struct elf_segment_map *m;
13732 asection *sec;
13733
13734 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13735 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13736 {
13737 /* If there is already a PT_ARM_EXIDX header, then we do not
13738 want to add another one. This situation arises when running
13739 "strip"; the input binary already has the header. */
13740 m = elf_tdata (abfd)->segment_map;
13741 while (m && m->p_type != PT_ARM_EXIDX)
13742 m = m->next;
13743 if (!m)
13744 {
13745 m = (struct elf_segment_map *)
13746 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13747 if (m == NULL)
13748 return FALSE;
13749 m->p_type = PT_ARM_EXIDX;
13750 m->count = 1;
13751 m->sections[0] = sec;
13752
13753 m->next = elf_tdata (abfd)->segment_map;
13754 elf_tdata (abfd)->segment_map = m;
13755 }
13756 }
13757
13758 return TRUE;
13759}
13760
13761/* We may add a PT_ARM_EXIDX program header. */
13762
13763static int
13764elf32_arm_additional_program_headers (bfd *abfd,
13765 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13766{
13767 asection *sec;
13768
13769 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13770 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13771 return 1;
13772 else
13773 return 0;
13774}
13775
13776/* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13777
13778static bfd_boolean
13779elf32_arm_is_function_type (unsigned int type)
13780{
13781 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13782}
13783
13784/* We use this to override swap_symbol_in and swap_symbol_out. */
13785const struct elf_size_info elf32_arm_size_info =
13786{
13787 sizeof (Elf32_External_Ehdr),
13788 sizeof (Elf32_External_Phdr),
13789 sizeof (Elf32_External_Shdr),
13790 sizeof (Elf32_External_Rel),
13791 sizeof (Elf32_External_Rela),
13792 sizeof (Elf32_External_Sym),
13793 sizeof (Elf32_External_Dyn),
13794 sizeof (Elf_External_Note),
13795 4,
13796 1,
13797 32, 2,
13798 ELFCLASS32, EV_CURRENT,
13799 bfd_elf32_write_out_phdrs,
13800 bfd_elf32_write_shdrs_and_ehdr,
13801 bfd_elf32_checksum_contents,
13802 bfd_elf32_write_relocs,
13803 elf32_arm_swap_symbol_in,
13804 elf32_arm_swap_symbol_out,
13805 bfd_elf32_slurp_reloc_table,
13806 bfd_elf32_slurp_symbol_table,
13807 bfd_elf32_swap_dyn_in,
13808 bfd_elf32_swap_dyn_out,
13809 bfd_elf32_swap_reloc_in,
13810 bfd_elf32_swap_reloc_out,
13811 bfd_elf32_swap_reloca_in,
13812 bfd_elf32_swap_reloca_out
13813};
13814
13815#define ELF_ARCH bfd_arch_arm
13816#define ELF_TARGET_ID ARM_ELF_DATA
13817#define ELF_MACHINE_CODE EM_ARM
13818#ifdef __QNXTARGET__
13819#define ELF_MAXPAGESIZE 0x1000
13820#else
13821#define ELF_MAXPAGESIZE 0x8000
13822#endif
13823#define ELF_MINPAGESIZE 0x1000
13824#define ELF_COMMONPAGESIZE 0x1000
13825
13826#define bfd_elf32_mkobject elf32_arm_mkobject
13827
13828#define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13829#define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13830#define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13831#define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13832#define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13833#define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13834#define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13835#define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13836#define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13837#define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13838#define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13839#define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13840#define bfd_elf32_bfd_final_link elf32_arm_final_link
13841
13842#define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13843#define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13844#define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13845#define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13846#define elf_backend_check_relocs elf32_arm_check_relocs
13847#define elf_backend_relocate_section elf32_arm_relocate_section
13848#define elf_backend_write_section elf32_arm_write_section
13849#define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13850#define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13851#define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13852#define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13853#define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13854#define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13855#define elf_backend_post_process_headers elf32_arm_post_process_headers
13856#define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13857#define elf_backend_object_p elf32_arm_object_p
13858#define elf_backend_section_flags elf32_arm_section_flags
13859#define elf_backend_fake_sections elf32_arm_fake_sections
13860#define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13861#define elf_backend_final_write_processing elf32_arm_final_write_processing
13862#define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13863#define elf_backend_symbol_processing elf32_arm_symbol_processing
13864#define elf_backend_size_info elf32_arm_size_info
13865#define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13866#define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13867#define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13868#define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13869#define elf_backend_is_function_type elf32_arm_is_function_type
13870
13871#define elf_backend_can_refcount 1
13872#define elf_backend_can_gc_sections 1
13873#define elf_backend_plt_readonly 1
13874#define elf_backend_want_got_plt 1
13875#define elf_backend_want_plt_sym 0
13876#define elf_backend_may_use_rel_p 1
13877#define elf_backend_may_use_rela_p 0
13878#define elf_backend_default_use_rela_p 0
13879
13880#define elf_backend_got_header_size 12
13881
13882#undef elf_backend_obj_attrs_vendor
13883#define elf_backend_obj_attrs_vendor "aeabi"
13884#undef elf_backend_obj_attrs_section
13885#define elf_backend_obj_attrs_section ".ARM.attributes"
13886#undef elf_backend_obj_attrs_arg_type
13887#define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13888#undef elf_backend_obj_attrs_section_type
13889#define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13890#define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13891#define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
13892
13893#include "elf32-target.h"
13894
13895/* VxWorks Targets. */
13896
13897#undef TARGET_LITTLE_SYM
13898#define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13899#undef TARGET_LITTLE_NAME
13900#define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13901#undef TARGET_BIG_SYM
13902#define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13903#undef TARGET_BIG_NAME
13904#define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13905
13906/* Like elf32_arm_link_hash_table_create -- but overrides
13907 appropriately for VxWorks. */
13908
13909static struct bfd_link_hash_table *
13910elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13911{
13912 struct bfd_link_hash_table *ret;
13913
13914 ret = elf32_arm_link_hash_table_create (abfd);
13915 if (ret)
13916 {
13917 struct elf32_arm_link_hash_table *htab
13918 = (struct elf32_arm_link_hash_table *) ret;
13919 htab->use_rel = 0;
13920 htab->vxworks_p = 1;
13921 }
13922 return ret;
13923}
13924
13925static void
13926elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13927{
13928 elf32_arm_final_write_processing (abfd, linker);
13929 elf_vxworks_final_write_processing (abfd, linker);
13930}
13931
13932#undef elf32_bed
13933#define elf32_bed elf32_arm_vxworks_bed
13934
13935#undef bfd_elf32_bfd_link_hash_table_create
13936#define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13937#undef elf_backend_add_symbol_hook
13938#define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13939#undef elf_backend_final_write_processing
13940#define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13941#undef elf_backend_emit_relocs
13942#define elf_backend_emit_relocs elf_vxworks_emit_relocs
13943
13944#undef elf_backend_may_use_rel_p
13945#define elf_backend_may_use_rel_p 0
13946#undef elf_backend_may_use_rela_p
13947#define elf_backend_may_use_rela_p 1
13948#undef elf_backend_default_use_rela_p
13949#define elf_backend_default_use_rela_p 1
13950#undef elf_backend_want_plt_sym
13951#define elf_backend_want_plt_sym 1
13952#undef ELF_MAXPAGESIZE
13953#define ELF_MAXPAGESIZE 0x1000
13954
13955#include "elf32-target.h"
13956
13957
13958/* Merge backend specific data from an object file to the output
13959 object file when linking. */
13960
13961static bfd_boolean
13962elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
13963{
13964 flagword out_flags;
13965 flagword in_flags;
13966 bfd_boolean flags_compatible = TRUE;
13967 asection *sec;
13968
13969 /* Check if we have the same endianess. */
13970 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
13971 return FALSE;
13972
13973 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13974 return TRUE;
13975
13976 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
13977 return FALSE;
13978
13979 /* The input BFD must have had its flags initialised. */
13980 /* The following seems bogus to me -- The flags are initialized in
13981 the assembler but I don't think an elf_flags_init field is
13982 written into the object. */
13983 /* BFD_ASSERT (elf_flags_init (ibfd)); */
13984
13985 in_flags = elf_elfheader (ibfd)->e_flags;
13986 out_flags = elf_elfheader (obfd)->e_flags;
13987
13988 /* In theory there is no reason why we couldn't handle this. However
13989 in practice it isn't even close to working and there is no real
13990 reason to want it. */
13991 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
13992 && !(ibfd->flags & DYNAMIC)
13993 && (in_flags & EF_ARM_BE8))
13994 {
13995 _bfd_error_handler (_("error: %B is already in final BE8 format"),
13996 ibfd);
13997 return FALSE;
13998 }
13999
14000 if (!elf_flags_init (obfd))
14001 {
14002 /* If the input is the default architecture and had the default
14003 flags then do not bother setting the flags for the output
14004 architecture, instead allow future merges to do this. If no
14005 future merges ever set these flags then they will retain their
14006 uninitialised values, which surprise surprise, correspond
14007 to the default values. */
14008 if (bfd_get_arch_info (ibfd)->the_default
14009 && elf_elfheader (ibfd)->e_flags == 0)
14010 return TRUE;
14011
14012 elf_flags_init (obfd) = TRUE;
14013 elf_elfheader (obfd)->e_flags = in_flags;
14014
14015 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14016 && bfd_get_arch_info (obfd)->the_default)
14017 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14018
14019 return TRUE;
14020 }
14021
14022 /* Determine what should happen if the input ARM architecture
14023 does not match the output ARM architecture. */
14024 if (! bfd_arm_merge_machines (ibfd, obfd))
14025 return FALSE;
14026
14027 /* Identical flags must be compatible. */
14028 if (in_flags == out_flags)
14029 return TRUE;
14030
14031 /* Check to see if the input BFD actually contains any sections. If
14032 not, its flags may not have been initialised either, but it
14033 cannot actually cause any incompatiblity. Do not short-circuit
14034 dynamic objects; their section list may be emptied by
14035 elf_link_add_object_symbols.
14036
14037 Also check to see if there are no code sections in the input.
14038 In this case there is no need to check for code specific flags.
14039 XXX - do we need to worry about floating-point format compatability
14040 in data sections ? */
14041 if (!(ibfd->flags & DYNAMIC))
14042 {
14043 bfd_boolean null_input_bfd = TRUE;
14044 bfd_boolean only_data_sections = TRUE;
14045
14046 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14047 {
14048 /* Ignore synthetic glue sections. */
14049 if (strcmp (sec->name, ".glue_7")
14050 && strcmp (sec->name, ".glue_7t"))
14051 {
14052 if ((bfd_get_section_flags (ibfd, sec)
14053 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14054 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14055 only_data_sections = FALSE;
14056
14057 null_input_bfd = FALSE;
14058 break;
14059 }
14060 }
14061
14062 if (null_input_bfd || only_data_sections)
14063 return TRUE;
14064 }
14065
14066 /* Complain about various flag mismatches. */
14067 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14068 EF_ARM_EABI_VERSION (out_flags)))
14069 {
14070 _bfd_error_handler
14071 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14072 ibfd, obfd,
14073 (in_flags & EF_ARM_EABIMASK) >> 24,
14074 (out_flags & EF_ARM_EABIMASK) >> 24);
14075 return FALSE;
14076 }
14077
14078 /* Not sure what needs to be checked for EABI versions >= 1. */
14079 /* VxWorks libraries do not use these flags. */
14080 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14081 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14082 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14083 {
14084 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14085 {
14086 _bfd_error_handler
14087 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14088 ibfd, obfd,
14089 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14090 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14091 flags_compatible = FALSE;
14092 }
14093
14094 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14095 {
14096 if (in_flags & EF_ARM_APCS_FLOAT)
14097 _bfd_error_handler
14098 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14099 ibfd, obfd);
14100 else
14101 _bfd_error_handler
14102 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14103 ibfd, obfd);
14104
14105 flags_compatible = FALSE;
14106 }
14107
14108 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14109 {
14110 if (in_flags & EF_ARM_VFP_FLOAT)
14111 _bfd_error_handler
14112 (_("error: %B uses VFP instructions, whereas %B does not"),
14113 ibfd, obfd);
14114 else
14115 _bfd_error_handler
14116 (_("error: %B uses FPA instructions, whereas %B does not"),
14117 ibfd, obfd);
14118
14119 flags_compatible = FALSE;
14120 }
14121
14122 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14123 {
14124 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14125 _bfd_error_handler
14126 (_("error: %B uses Maverick instructions, whereas %B does not"),
14127 ibfd, obfd);
14128 else
14129 _bfd_error_handler
14130 (_("error: %B does not use Maverick instructions, whereas %B does"),
14131 ibfd, obfd);
14132
14133 flags_compatible = FALSE;
14134 }
14135
14136#ifdef EF_ARM_SOFT_FLOAT
14137 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14138 {
14139 /* We can allow interworking between code that is VFP format
14140 layout, and uses either soft float or integer regs for
14141 passing floating point arguments and results. We already
14142 know that the APCS_FLOAT flags match; similarly for VFP
14143 flags. */
14144 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14145 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14146 {
14147 if (in_flags & EF_ARM_SOFT_FLOAT)
14148 _bfd_error_handler
14149 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14150 ibfd, obfd);
14151 else
14152 _bfd_error_handler
14153 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14154 ibfd, obfd);
14155
14156 flags_compatible = FALSE;
14157 }
14158 }
14159#endif
14160
14161 /* Interworking mismatch is only a warning. */
14162 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14163 {
14164 if (in_flags & EF_ARM_INTERWORK)
14165 {
14166 _bfd_error_handler
14167 (_("Warning: %B supports interworking, whereas %B does not"),
14168 ibfd, obfd);
14169 }
14170 else
14171 {
14172 _bfd_error_handler
14173 (_("Warning: %B does not support interworking, whereas %B does"),
14174 ibfd, obfd);
14175 }
14176 }
14177 }
14178
14179 return flags_compatible;
14180}
14181
14182
14183/* Symbian OS Targets. */
14184
14185#undef TARGET_LITTLE_SYM
14186#define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14187#undef TARGET_LITTLE_NAME
14188#define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14189#undef TARGET_BIG_SYM
14190#define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14191#undef TARGET_BIG_NAME
14192#define TARGET_BIG_NAME "elf32-bigarm-symbian"
14193
14194/* Like elf32_arm_link_hash_table_create -- but overrides
14195 appropriately for Symbian OS. */
14196
14197static struct bfd_link_hash_table *
14198elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14199{
14200 struct bfd_link_hash_table *ret;
14201
14202 ret = elf32_arm_link_hash_table_create (abfd);
14203 if (ret)
14204 {
14205 struct elf32_arm_link_hash_table *htab
14206 = (struct elf32_arm_link_hash_table *)ret;
14207 /* There is no PLT header for Symbian OS. */
14208 htab->plt_header_size = 0;
14209 /* The PLT entries are each one instruction and one word. */
14210 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14211 htab->symbian_p = 1;
14212 /* Symbian uses armv5t or above, so use_blx is always true. */
14213 htab->use_blx = 1;
14214 htab->root.is_relocatable_executable = 1;
14215 }
14216 return ret;
14217}
14218
14219static const struct bfd_elf_special_section
14220elf32_arm_symbian_special_sections[] =
14221{
14222 /* In a BPABI executable, the dynamic linking sections do not go in
14223 the loadable read-only segment. The post-linker may wish to
14224 refer to these sections, but they are not part of the final
14225 program image. */
14226 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14227 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14228 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14229 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14230 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14231 /* These sections do not need to be writable as the SymbianOS
14232 postlinker will arrange things so that no dynamic relocation is
14233 required. */
14234 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14235 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14236 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14237 { NULL, 0, 0, 0, 0 }
14238};
14239
14240static void
14241elf32_arm_symbian_begin_write_processing (bfd *abfd,
14242 struct bfd_link_info *link_info)
14243{
14244 /* BPABI objects are never loaded directly by an OS kernel; they are
14245 processed by a postlinker first, into an OS-specific format. If
14246 the D_PAGED bit is set on the file, BFD will align segments on
14247 page boundaries, so that an OS can directly map the file. With
14248 BPABI objects, that just results in wasted space. In addition,
14249 because we clear the D_PAGED bit, map_sections_to_segments will
14250 recognize that the program headers should not be mapped into any
14251 loadable segment. */
14252 abfd->flags &= ~D_PAGED;
14253 elf32_arm_begin_write_processing (abfd, link_info);
14254}
14255
14256static bfd_boolean
14257elf32_arm_symbian_modify_segment_map (bfd *abfd,
14258 struct bfd_link_info *info)
14259{
14260 struct elf_segment_map *m;
14261 asection *dynsec;
14262
14263 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14264 segment. However, because the .dynamic section is not marked
14265 with SEC_LOAD, the generic ELF code will not create such a
14266 segment. */
14267 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14268 if (dynsec)
14269 {
14270 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14271 if (m->p_type == PT_DYNAMIC)
14272 break;
14273
14274 if (m == NULL)
14275 {
14276 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14277 m->next = elf_tdata (abfd)->segment_map;
14278 elf_tdata (abfd)->segment_map = m;
14279 }
14280 }
14281
14282 /* Also call the generic arm routine. */
14283 return elf32_arm_modify_segment_map (abfd, info);
14284}
14285
14286/* Return address for Ith PLT stub in section PLT, for relocation REL
14287 or (bfd_vma) -1 if it should not be included. */
14288
14289static bfd_vma
14290elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14291 const arelent *rel ATTRIBUTE_UNUSED)
14292{
14293 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14294}
14295
14296
14297#undef elf32_bed
14298#define elf32_bed elf32_arm_symbian_bed
14299
14300/* The dynamic sections are not allocated on SymbianOS; the postlinker
14301 will process them and then discard them. */
14302#undef ELF_DYNAMIC_SEC_FLAGS
14303#define ELF_DYNAMIC_SEC_FLAGS \
14304 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14305
14306#undef elf_backend_add_symbol_hook
14307#undef elf_backend_emit_relocs
14308
14309#undef bfd_elf32_bfd_link_hash_table_create
14310#define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14311#undef elf_backend_special_sections
14312#define elf_backend_special_sections elf32_arm_symbian_special_sections
14313#undef elf_backend_begin_write_processing
14314#define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14315#undef elf_backend_final_write_processing
14316#define elf_backend_final_write_processing elf32_arm_final_write_processing
14317
14318#undef elf_backend_modify_segment_map
14319#define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14320
14321/* There is no .got section for BPABI objects, and hence no header. */
14322#undef elf_backend_got_header_size
14323#define elf_backend_got_header_size 0
14324
14325/* Similarly, there is no .got.plt section. */
14326#undef elf_backend_want_got_plt
14327#define elf_backend_want_got_plt 0
14328
14329#undef elf_backend_plt_sym_val
14330#define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14331
14332#undef elf_backend_may_use_rel_p
14333#define elf_backend_may_use_rel_p 1
14334#undef elf_backend_may_use_rela_p
14335#define elf_backend_may_use_rela_p 0
14336#undef elf_backend_default_use_rela_p
14337#define elf_backend_default_use_rela_p 0
14338#undef elf_backend_want_plt_sym
14339#define elf_backend_want_plt_sym 0
14340#undef ELF_MAXPAGESIZE
14341#define ELF_MAXPAGESIZE 0x8000
14342
14343#include "elf32-target.h"
This page took 0.076416 seconds and 4 git commands to generate.