gas: sparc: fix collision of registers and pseudo-ops.
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1764 };
1765
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769 HOWTO (R_ARM_RREL32, /* type */
1770 0, /* rightshift */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1772 0, /* bitsize */
1773 FALSE, /* pc_relative */
1774 0, /* bitpos */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1779 0, /* src_mask */
1780 0, /* dst_mask */
1781 FALSE), /* pcrel_offset */
1782
1783 HOWTO (R_ARM_RABS32, /* type */
1784 0, /* rightshift */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1786 0, /* bitsize */
1787 FALSE, /* pc_relative */
1788 0, /* bitpos */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1793 0, /* src_mask */
1794 0, /* dst_mask */
1795 FALSE), /* pcrel_offset */
1796
1797 HOWTO (R_ARM_RPC24, /* type */
1798 0, /* rightshift */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1800 0, /* bitsize */
1801 FALSE, /* pc_relative */
1802 0, /* bitpos */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1807 0, /* src_mask */
1808 0, /* dst_mask */
1809 FALSE), /* pcrel_offset */
1810
1811 HOWTO (R_ARM_RBASE, /* type */
1812 0, /* rightshift */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1814 0, /* bitsize */
1815 FALSE, /* pc_relative */
1816 0, /* bitpos */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1821 0, /* src_mask */
1822 0, /* dst_mask */
1823 FALSE) /* pcrel_offset */
1824 };
1825
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1831
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838
1839 return NULL;
1840 }
1841
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1845 {
1846 unsigned int r_type;
1847
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851
1852 struct elf32_arm_reloc_map
1853 {
1854 bfd_reloc_code_real_type bfd_reloc_val;
1855 unsigned char elf_reloc_val;
1856 };
1857
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860 {
1861 {BFD_RELOC_NONE, R_ARM_NONE},
1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1867 {BFD_RELOC_32, R_ARM_ABS32},
1868 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1869 {BFD_RELOC_8, R_ARM_ABS8},
1870 {BFD_RELOC_16, R_ARM_ABS16},
1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951 };
1952
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 bfd_reloc_code_real_type code)
1956 {
1957 unsigned int i;
1958
1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962
1963 return NULL;
1964 }
1965
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 const char *r_name)
1969 {
1970 unsigned int i;
1971
1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973 if (elf32_arm_howto_table_1[i].name != NULL
1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975 return &elf32_arm_howto_table_1[i];
1976
1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978 if (elf32_arm_howto_table_2[i].name != NULL
1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980 return &elf32_arm_howto_table_2[i];
1981
1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983 if (elf32_arm_howto_table_3[i].name != NULL
1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985 return &elf32_arm_howto_table_3[i];
1986
1987 return NULL;
1988 }
1989
1990 /* Support for core dump NOTE sections. */
1991
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995 int offset;
1996 size_t size;
1997
1998 switch (note->descsz)
1999 {
2000 default:
2001 return FALSE;
2002
2003 case 148: /* Linux/ARM 32-bit. */
2004 /* pr_cursig */
2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006
2007 /* pr_pid */
2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009
2010 /* pr_reg */
2011 offset = 72;
2012 size = 72;
2013
2014 break;
2015 }
2016
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 size, note->descpos + offset);
2020 }
2021
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025 switch (note->descsz)
2026 {
2027 default:
2028 return FALSE;
2029
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd)->core->pid
2032 = bfd_get_32 (abfd, note->descdata + 12);
2033 elf_tdata (abfd)->core->program
2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 elf_tdata (abfd)->core->command
2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037 }
2038
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2042 {
2043 char *command = elf_tdata (abfd)->core->command;
2044 int n = strlen (command);
2045
2046 if (0 < n && command[n - 1] == ' ')
2047 command[n - 1] = '\0';
2048 }
2049
2050 return TRUE;
2051 }
2052
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 int note_type, ...)
2056 {
2057 switch (note_type)
2058 {
2059 default:
2060 return NULL;
2061
2062 case NT_PRPSINFO:
2063 {
2064 char data[124];
2065 va_list ap;
2066
2067 va_start (ap, note_type);
2068 memset (data, 0, sizeof (data));
2069 strncpy (data + 28, va_arg (ap, const char *), 16);
2070 strncpy (data + 44, va_arg (ap, const char *), 80);
2071 va_end (ap);
2072
2073 return elfcore_write_note (abfd, buf, bufsiz,
2074 "CORE", note_type, data, sizeof (data));
2075 }
2076
2077 case NT_PRSTATUS:
2078 {
2079 char data[148];
2080 va_list ap;
2081 long pid;
2082 int cursig;
2083 const void *greg;
2084
2085 va_start (ap, note_type);
2086 memset (data, 0, sizeof (data));
2087 pid = va_arg (ap, long);
2088 bfd_put_32 (abfd, pid, data + 24);
2089 cursig = va_arg (ap, int);
2090 bfd_put_16 (abfd, cursig, data + 12);
2091 greg = va_arg (ap, const void *);
2092 memcpy (data + 72, greg, 72);
2093 va_end (ap);
2094
2095 return elfcore_write_note (abfd, buf, bufsiz,
2096 "CORE", note_type, data, sizeof (data));
2097 }
2098 }
2099 }
2100
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2105
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2109
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114 interworkable. */
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2119
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2126
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2129
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2132
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2135
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2138
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2140
2141 /* The name of the dynamic interpreter. This is put in the .interp
2142 section. */
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2144
2145 static const unsigned long tls_trampoline [] =
2146 {
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2150 };
2151
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164
2165 #ifdef FOUR_WORD_PLT
2166
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2170 linker first. */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2177 };
2178
2179 /* Subsequent entries in a procedure linkage table look like
2180 this. */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2187 };
2188
2189 #else /* not FOUR_WORD_PLT */
2190
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2194 linker first. */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2202 };
2203
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2211 };
2212
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2221 };
2222
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224
2225 #endif /* not FOUR_WORD_PLT */
2226
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2236 /* add lr, pc */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2239 };
2240
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242 look like this. */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2251 /* nop */
2252 };
2253
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2262 };
2263
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2273 };
2274
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2284 };
2285
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290 0x4778, /* bx pc */
2291 0x46c0 /* nop */
2292 };
2293
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295 address spaces. */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2300 };
2301
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2305 linker first. */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308 /* First bundle: */
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2318 /* Third bundle: */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2322 /* .Lplt_tail: */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2331
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2339 };
2340
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349
2350 enum stub_insn_type
2351 {
2352 THUMB16_TYPE = 1,
2353 THUMB32_TYPE,
2354 ARM_TYPE,
2355 DATA_TYPE
2356 };
2357
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2367
2368 typedef struct
2369 {
2370 bfd_vma data;
2371 enum stub_insn_type type;
2372 unsigned int r_type;
2373 int reloc_addend;
2374 } insn_sequence;
2375
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2379 {
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2382 };
2383
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2385 available. */
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2387 {
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2391 };
2392
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2395 {
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2403 };
2404
2405 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2406 allowed. */
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2408 {
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2412 ARM_INSN (0xe12fff1c), /* bx ip */
2413 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2414 };
2415
2416 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2417 available. */
2418 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2419 {
2420 THUMB16_INSN (0x4778), /* bx pc */
2421 THUMB16_INSN (0x46c0), /* nop */
2422 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2423 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2424 };
2425
2426 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2427 one, when the destination is close enough. */
2428 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2429 {
2430 THUMB16_INSN (0x4778), /* bx pc */
2431 THUMB16_INSN (0x46c0), /* nop */
2432 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2433 };
2434
2435 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2436 blx to reach the stub if necessary. */
2437 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2438 {
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2440 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2441 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2442 };
2443
2444 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2445 blx to reach the stub if necessary. We can not add into pc;
2446 it is not guaranteed to mode switch (different in ARMv6 and
2447 ARMv7). */
2448 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2449 {
2450 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2451 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2452 ARM_INSN (0xe12fff1c), /* bx ip */
2453 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2454 };
2455
2456 /* V4T ARM -> ARM long branch stub, PIC. */
2457 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2458 {
2459 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2460 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2461 ARM_INSN (0xe12fff1c), /* bx ip */
2462 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2463 };
2464
2465 /* V4T Thumb -> ARM long branch stub, PIC. */
2466 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2467 {
2468 THUMB16_INSN (0x4778), /* bx pc */
2469 THUMB16_INSN (0x46c0), /* nop */
2470 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2471 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2472 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2473 };
2474
2475 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2476 architectures. */
2477 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2478 {
2479 THUMB16_INSN (0xb401), /* push {r0} */
2480 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2481 THUMB16_INSN (0x46fc), /* mov ip, pc */
2482 THUMB16_INSN (0x4484), /* add ip, r0 */
2483 THUMB16_INSN (0xbc01), /* pop {r0} */
2484 THUMB16_INSN (0x4760), /* bx ip */
2485 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2486 };
2487
2488 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2489 allowed. */
2490 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2491 {
2492 THUMB16_INSN (0x4778), /* bx pc */
2493 THUMB16_INSN (0x46c0), /* nop */
2494 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2495 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2496 ARM_INSN (0xe12fff1c), /* bx ip */
2497 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2498 };
2499
2500 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2501 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2502 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2503 {
2504 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2505 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2506 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2507 };
2508
2509 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2510 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2511 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2512 {
2513 THUMB16_INSN (0x4778), /* bx pc */
2514 THUMB16_INSN (0x46c0), /* nop */
2515 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2516 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2517 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2518 };
2519
2520 /* NaCl ARM -> ARM long branch stub. */
2521 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2522 {
2523 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2524 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2525 ARM_INSN (0xe12fff1c), /* bx ip */
2526 ARM_INSN (0xe320f000), /* nop */
2527 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2528 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2529 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2530 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2531 };
2532
2533 /* NaCl ARM -> ARM long branch stub, PIC. */
2534 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2535 {
2536 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2537 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2538 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2539 ARM_INSN (0xe12fff1c), /* bx ip */
2540 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2541 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2542 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2543 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2544 };
2545
2546
2547 /* Cortex-A8 erratum-workaround stubs. */
2548
2549 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2550 can't use a conditional branch to reach this stub). */
2551
2552 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2553 {
2554 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2555 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2556 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2557 };
2558
2559 /* Stub used for b.w and bl.w instructions. */
2560
2561 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2562 {
2563 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2564 };
2565
2566 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2567 {
2568 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2569 };
2570
2571 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2572 instruction (which switches to ARM mode) to point to this stub. Jump to the
2573 real destination using an ARM-mode branch. */
2574
2575 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2576 {
2577 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2578 };
2579
2580 /* For each section group there can be a specially created linker section
2581 to hold the stubs for that group. The name of the stub section is based
2582 upon the name of another section within that group with the suffix below
2583 applied.
2584
2585 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2586 create what appeared to be a linker stub section when it actually
2587 contained user code/data. For example, consider this fragment:
2588
2589 const char * stubborn_problems[] = { "np" };
2590
2591 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2592 section called:
2593
2594 .data.rel.local.stubborn_problems
2595
2596 This then causes problems in arm32_arm_build_stubs() as it triggers:
2597
2598 // Ignore non-stub sections.
2599 if (!strstr (stub_sec->name, STUB_SUFFIX))
2600 continue;
2601
2602 And so the section would be ignored instead of being processed. Hence
2603 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2604 C identifier. */
2605 #define STUB_SUFFIX ".__stub"
2606
2607 /* One entry per long/short branch stub defined above. */
2608 #define DEF_STUBS \
2609 DEF_STUB(long_branch_any_any) \
2610 DEF_STUB(long_branch_v4t_arm_thumb) \
2611 DEF_STUB(long_branch_thumb_only) \
2612 DEF_STUB(long_branch_v4t_thumb_thumb) \
2613 DEF_STUB(long_branch_v4t_thumb_arm) \
2614 DEF_STUB(short_branch_v4t_thumb_arm) \
2615 DEF_STUB(long_branch_any_arm_pic) \
2616 DEF_STUB(long_branch_any_thumb_pic) \
2617 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2618 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2619 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2620 DEF_STUB(long_branch_thumb_only_pic) \
2621 DEF_STUB(long_branch_any_tls_pic) \
2622 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2623 DEF_STUB(long_branch_arm_nacl) \
2624 DEF_STUB(long_branch_arm_nacl_pic) \
2625 DEF_STUB(a8_veneer_b_cond) \
2626 DEF_STUB(a8_veneer_b) \
2627 DEF_STUB(a8_veneer_bl) \
2628 DEF_STUB(a8_veneer_blx)
2629
2630 #define DEF_STUB(x) arm_stub_##x,
2631 enum elf32_arm_stub_type
2632 {
2633 arm_stub_none,
2634 DEF_STUBS
2635 max_stub_type
2636 };
2637 #undef DEF_STUB
2638
2639 /* Note the first a8_veneer type. */
2640 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2641
2642 typedef struct
2643 {
2644 const insn_sequence* template_sequence;
2645 int template_size;
2646 } stub_def;
2647
2648 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2649 static const stub_def stub_definitions[] =
2650 {
2651 {NULL, 0},
2652 DEF_STUBS
2653 };
2654
2655 struct elf32_arm_stub_hash_entry
2656 {
2657 /* Base hash table entry structure. */
2658 struct bfd_hash_entry root;
2659
2660 /* The stub section. */
2661 asection *stub_sec;
2662
2663 /* Offset within stub_sec of the beginning of this stub. */
2664 bfd_vma stub_offset;
2665
2666 /* Given the symbol's value and its section we can determine its final
2667 value when building the stubs (so the stub knows where to jump). */
2668 bfd_vma target_value;
2669 asection *target_section;
2670
2671 /* Same as above but for the source of the branch to the stub. Used for
2672 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2673 such, source section does not need to be recorded since Cortex-A8 erratum
2674 workaround stubs are only generated when both source and target are in the
2675 same section. */
2676 bfd_vma source_value;
2677
2678 /* The instruction which caused this stub to be generated (only valid for
2679 Cortex-A8 erratum workaround stubs at present). */
2680 unsigned long orig_insn;
2681
2682 /* The stub type. */
2683 enum elf32_arm_stub_type stub_type;
2684 /* Its encoding size in bytes. */
2685 int stub_size;
2686 /* Its template. */
2687 const insn_sequence *stub_template;
2688 /* The size of the template (number of entries). */
2689 int stub_template_size;
2690
2691 /* The symbol table entry, if any, that this was derived from. */
2692 struct elf32_arm_link_hash_entry *h;
2693
2694 /* Type of branch. */
2695 enum arm_st_branch_type branch_type;
2696
2697 /* Where this stub is being called from, or, in the case of combined
2698 stub sections, the first input section in the group. */
2699 asection *id_sec;
2700
2701 /* The name for the local symbol at the start of this stub. The
2702 stub name in the hash table has to be unique; this does not, so
2703 it can be friendlier. */
2704 char *output_name;
2705 };
2706
2707 /* Used to build a map of a section. This is required for mixed-endian
2708 code/data. */
2709
2710 typedef struct elf32_elf_section_map
2711 {
2712 bfd_vma vma;
2713 char type;
2714 }
2715 elf32_arm_section_map;
2716
2717 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2718
2719 typedef enum
2720 {
2721 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2722 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2723 VFP11_ERRATUM_ARM_VENEER,
2724 VFP11_ERRATUM_THUMB_VENEER
2725 }
2726 elf32_vfp11_erratum_type;
2727
2728 typedef struct elf32_vfp11_erratum_list
2729 {
2730 struct elf32_vfp11_erratum_list *next;
2731 bfd_vma vma;
2732 union
2733 {
2734 struct
2735 {
2736 struct elf32_vfp11_erratum_list *veneer;
2737 unsigned int vfp_insn;
2738 } b;
2739 struct
2740 {
2741 struct elf32_vfp11_erratum_list *branch;
2742 unsigned int id;
2743 } v;
2744 } u;
2745 elf32_vfp11_erratum_type type;
2746 }
2747 elf32_vfp11_erratum_list;
2748
2749 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2750 veneer. */
2751 typedef enum
2752 {
2753 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2754 STM32L4XX_ERRATUM_VENEER
2755 }
2756 elf32_stm32l4xx_erratum_type;
2757
2758 typedef struct elf32_stm32l4xx_erratum_list
2759 {
2760 struct elf32_stm32l4xx_erratum_list *next;
2761 bfd_vma vma;
2762 union
2763 {
2764 struct
2765 {
2766 struct elf32_stm32l4xx_erratum_list *veneer;
2767 unsigned int insn;
2768 } b;
2769 struct
2770 {
2771 struct elf32_stm32l4xx_erratum_list *branch;
2772 unsigned int id;
2773 } v;
2774 } u;
2775 elf32_stm32l4xx_erratum_type type;
2776 }
2777 elf32_stm32l4xx_erratum_list;
2778
2779 typedef enum
2780 {
2781 DELETE_EXIDX_ENTRY,
2782 INSERT_EXIDX_CANTUNWIND_AT_END
2783 }
2784 arm_unwind_edit_type;
2785
2786 /* A (sorted) list of edits to apply to an unwind table. */
2787 typedef struct arm_unwind_table_edit
2788 {
2789 arm_unwind_edit_type type;
2790 /* Note: we sometimes want to insert an unwind entry corresponding to a
2791 section different from the one we're currently writing out, so record the
2792 (text) section this edit relates to here. */
2793 asection *linked_section;
2794 unsigned int index;
2795 struct arm_unwind_table_edit *next;
2796 }
2797 arm_unwind_table_edit;
2798
2799 typedef struct _arm_elf_section_data
2800 {
2801 /* Information about mapping symbols. */
2802 struct bfd_elf_section_data elf;
2803 unsigned int mapcount;
2804 unsigned int mapsize;
2805 elf32_arm_section_map *map;
2806 /* Information about CPU errata. */
2807 unsigned int erratumcount;
2808 elf32_vfp11_erratum_list *erratumlist;
2809 unsigned int stm32l4xx_erratumcount;
2810 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2811 unsigned int additional_reloc_count;
2812 /* Information about unwind tables. */
2813 union
2814 {
2815 /* Unwind info attached to a text section. */
2816 struct
2817 {
2818 asection *arm_exidx_sec;
2819 } text;
2820
2821 /* Unwind info attached to an .ARM.exidx section. */
2822 struct
2823 {
2824 arm_unwind_table_edit *unwind_edit_list;
2825 arm_unwind_table_edit *unwind_edit_tail;
2826 } exidx;
2827 } u;
2828 }
2829 _arm_elf_section_data;
2830
2831 #define elf32_arm_section_data(sec) \
2832 ((_arm_elf_section_data *) elf_section_data (sec))
2833
2834 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2835 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2836 so may be created multiple times: we use an array of these entries whilst
2837 relaxing which we can refresh easily, then create stubs for each potentially
2838 erratum-triggering instruction once we've settled on a solution. */
2839
2840 struct a8_erratum_fix
2841 {
2842 bfd *input_bfd;
2843 asection *section;
2844 bfd_vma offset;
2845 bfd_vma target_offset;
2846 unsigned long orig_insn;
2847 char *stub_name;
2848 enum elf32_arm_stub_type stub_type;
2849 enum arm_st_branch_type branch_type;
2850 };
2851
2852 /* A table of relocs applied to branches which might trigger Cortex-A8
2853 erratum. */
2854
2855 struct a8_erratum_reloc
2856 {
2857 bfd_vma from;
2858 bfd_vma destination;
2859 struct elf32_arm_link_hash_entry *hash;
2860 const char *sym_name;
2861 unsigned int r_type;
2862 enum arm_st_branch_type branch_type;
2863 bfd_boolean non_a8_stub;
2864 };
2865
2866 /* The size of the thread control block. */
2867 #define TCB_SIZE 8
2868
2869 /* ARM-specific information about a PLT entry, over and above the usual
2870 gotplt_union. */
2871 struct arm_plt_info
2872 {
2873 /* We reference count Thumb references to a PLT entry separately,
2874 so that we can emit the Thumb trampoline only if needed. */
2875 bfd_signed_vma thumb_refcount;
2876
2877 /* Some references from Thumb code may be eliminated by BL->BLX
2878 conversion, so record them separately. */
2879 bfd_signed_vma maybe_thumb_refcount;
2880
2881 /* How many of the recorded PLT accesses were from non-call relocations.
2882 This information is useful when deciding whether anything takes the
2883 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2884 non-call references to the function should resolve directly to the
2885 real runtime target. */
2886 unsigned int noncall_refcount;
2887
2888 /* Since PLT entries have variable size if the Thumb prologue is
2889 used, we need to record the index into .got.plt instead of
2890 recomputing it from the PLT offset. */
2891 bfd_signed_vma got_offset;
2892 };
2893
2894 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2895 struct arm_local_iplt_info
2896 {
2897 /* The information that is usually found in the generic ELF part of
2898 the hash table entry. */
2899 union gotplt_union root;
2900
2901 /* The information that is usually found in the ARM-specific part of
2902 the hash table entry. */
2903 struct arm_plt_info arm;
2904
2905 /* A list of all potential dynamic relocations against this symbol. */
2906 struct elf_dyn_relocs *dyn_relocs;
2907 };
2908
2909 struct elf_arm_obj_tdata
2910 {
2911 struct elf_obj_tdata root;
2912
2913 /* tls_type for each local got entry. */
2914 char *local_got_tls_type;
2915
2916 /* GOTPLT entries for TLS descriptors. */
2917 bfd_vma *local_tlsdesc_gotent;
2918
2919 /* Information for local symbols that need entries in .iplt. */
2920 struct arm_local_iplt_info **local_iplt;
2921
2922 /* Zero to warn when linking objects with incompatible enum sizes. */
2923 int no_enum_size_warning;
2924
2925 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2926 int no_wchar_size_warning;
2927 };
2928
2929 #define elf_arm_tdata(bfd) \
2930 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2931
2932 #define elf32_arm_local_got_tls_type(bfd) \
2933 (elf_arm_tdata (bfd)->local_got_tls_type)
2934
2935 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2936 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2937
2938 #define elf32_arm_local_iplt(bfd) \
2939 (elf_arm_tdata (bfd)->local_iplt)
2940
2941 #define is_arm_elf(bfd) \
2942 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2943 && elf_tdata (bfd) != NULL \
2944 && elf_object_id (bfd) == ARM_ELF_DATA)
2945
2946 static bfd_boolean
2947 elf32_arm_mkobject (bfd *abfd)
2948 {
2949 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2950 ARM_ELF_DATA);
2951 }
2952
2953 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2954
2955 /* Arm ELF linker hash entry. */
2956 struct elf32_arm_link_hash_entry
2957 {
2958 struct elf_link_hash_entry root;
2959
2960 /* Track dynamic relocs copied for this symbol. */
2961 struct elf_dyn_relocs *dyn_relocs;
2962
2963 /* ARM-specific PLT information. */
2964 struct arm_plt_info plt;
2965
2966 #define GOT_UNKNOWN 0
2967 #define GOT_NORMAL 1
2968 #define GOT_TLS_GD 2
2969 #define GOT_TLS_IE 4
2970 #define GOT_TLS_GDESC 8
2971 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2972 unsigned int tls_type : 8;
2973
2974 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2975 unsigned int is_iplt : 1;
2976
2977 unsigned int unused : 23;
2978
2979 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2980 starting at the end of the jump table. */
2981 bfd_vma tlsdesc_got;
2982
2983 /* The symbol marking the real symbol location for exported thumb
2984 symbols with Arm stubs. */
2985 struct elf_link_hash_entry *export_glue;
2986
2987 /* A pointer to the most recently used stub hash entry against this
2988 symbol. */
2989 struct elf32_arm_stub_hash_entry *stub_cache;
2990 };
2991
2992 /* Traverse an arm ELF linker hash table. */
2993 #define elf32_arm_link_hash_traverse(table, func, info) \
2994 (elf_link_hash_traverse \
2995 (&(table)->root, \
2996 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2997 (info)))
2998
2999 /* Get the ARM elf linker hash table from a link_info structure. */
3000 #define elf32_arm_hash_table(info) \
3001 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3002 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3003
3004 #define arm_stub_hash_lookup(table, string, create, copy) \
3005 ((struct elf32_arm_stub_hash_entry *) \
3006 bfd_hash_lookup ((table), (string), (create), (copy)))
3007
3008 /* Array to keep track of which stub sections have been created, and
3009 information on stub grouping. */
3010 struct map_stub
3011 {
3012 /* This is the section to which stubs in the group will be
3013 attached. */
3014 asection *link_sec;
3015 /* The stub section. */
3016 asection *stub_sec;
3017 };
3018
3019 #define elf32_arm_compute_jump_table_size(htab) \
3020 ((htab)->next_tls_desc_index * 4)
3021
3022 /* ARM ELF linker hash table. */
3023 struct elf32_arm_link_hash_table
3024 {
3025 /* The main hash table. */
3026 struct elf_link_hash_table root;
3027
3028 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3029 bfd_size_type thumb_glue_size;
3030
3031 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3032 bfd_size_type arm_glue_size;
3033
3034 /* The size in bytes of section containing the ARMv4 BX veneers. */
3035 bfd_size_type bx_glue_size;
3036
3037 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3038 veneer has been populated. */
3039 bfd_vma bx_glue_offset[15];
3040
3041 /* The size in bytes of the section containing glue for VFP11 erratum
3042 veneers. */
3043 bfd_size_type vfp11_erratum_glue_size;
3044
3045 /* The size in bytes of the section containing glue for STM32L4XX erratum
3046 veneers. */
3047 bfd_size_type stm32l4xx_erratum_glue_size;
3048
3049 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3050 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3051 elf32_arm_write_section(). */
3052 struct a8_erratum_fix *a8_erratum_fixes;
3053 unsigned int num_a8_erratum_fixes;
3054
3055 /* An arbitrary input BFD chosen to hold the glue sections. */
3056 bfd * bfd_of_glue_owner;
3057
3058 /* Nonzero to output a BE8 image. */
3059 int byteswap_code;
3060
3061 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3062 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3063 int target1_is_rel;
3064
3065 /* The relocation to use for R_ARM_TARGET2 relocations. */
3066 int target2_reloc;
3067
3068 /* 0 = Ignore R_ARM_V4BX.
3069 1 = Convert BX to MOV PC.
3070 2 = Generate v4 interworing stubs. */
3071 int fix_v4bx;
3072
3073 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3074 int fix_cortex_a8;
3075
3076 /* Whether we should fix the ARM1176 BLX immediate issue. */
3077 int fix_arm1176;
3078
3079 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3080 int use_blx;
3081
3082 /* What sort of code sequences we should look for which may trigger the
3083 VFP11 denorm erratum. */
3084 bfd_arm_vfp11_fix vfp11_fix;
3085
3086 /* Global counter for the number of fixes we have emitted. */
3087 int num_vfp11_fixes;
3088
3089 /* What sort of code sequences we should look for which may trigger the
3090 STM32L4XX erratum. */
3091 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3092
3093 /* Global counter for the number of fixes we have emitted. */
3094 int num_stm32l4xx_fixes;
3095
3096 /* Nonzero to force PIC branch veneers. */
3097 int pic_veneer;
3098
3099 /* The number of bytes in the initial entry in the PLT. */
3100 bfd_size_type plt_header_size;
3101
3102 /* The number of bytes in the subsequent PLT etries. */
3103 bfd_size_type plt_entry_size;
3104
3105 /* True if the target system is VxWorks. */
3106 int vxworks_p;
3107
3108 /* True if the target system is Symbian OS. */
3109 int symbian_p;
3110
3111 /* True if the target system is Native Client. */
3112 int nacl_p;
3113
3114 /* True if the target uses REL relocations. */
3115 int use_rel;
3116
3117 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3118 bfd_vma next_tls_desc_index;
3119
3120 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3121 bfd_vma num_tls_desc;
3122
3123 /* Short-cuts to get to dynamic linker sections. */
3124 asection *sdynbss;
3125 asection *srelbss;
3126
3127 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3128 asection *srelplt2;
3129
3130 /* The offset into splt of the PLT entry for the TLS descriptor
3131 resolver. Special values are 0, if not necessary (or not found
3132 to be necessary yet), and -1 if needed but not determined
3133 yet. */
3134 bfd_vma dt_tlsdesc_plt;
3135
3136 /* The offset into sgot of the GOT entry used by the PLT entry
3137 above. */
3138 bfd_vma dt_tlsdesc_got;
3139
3140 /* Offset in .plt section of tls_arm_trampoline. */
3141 bfd_vma tls_trampoline;
3142
3143 /* Data for R_ARM_TLS_LDM32 relocations. */
3144 union
3145 {
3146 bfd_signed_vma refcount;
3147 bfd_vma offset;
3148 } tls_ldm_got;
3149
3150 /* Small local sym cache. */
3151 struct sym_cache sym_cache;
3152
3153 /* For convenience in allocate_dynrelocs. */
3154 bfd * obfd;
3155
3156 /* The amount of space used by the reserved portion of the sgotplt
3157 section, plus whatever space is used by the jump slots. */
3158 bfd_vma sgotplt_jump_table_size;
3159
3160 /* The stub hash table. */
3161 struct bfd_hash_table stub_hash_table;
3162
3163 /* Linker stub bfd. */
3164 bfd *stub_bfd;
3165
3166 /* Linker call-backs. */
3167 asection * (*add_stub_section) (const char *, asection *, asection *,
3168 unsigned int);
3169 void (*layout_sections_again) (void);
3170
3171 /* Array to keep track of which stub sections have been created, and
3172 information on stub grouping. */
3173 struct map_stub *stub_group;
3174
3175 /* Number of elements in stub_group. */
3176 unsigned int top_id;
3177
3178 /* Assorted information used by elf32_arm_size_stubs. */
3179 unsigned int bfd_count;
3180 unsigned int top_index;
3181 asection **input_list;
3182 };
3183
3184 static inline int
3185 ctz (unsigned int mask)
3186 {
3187 #if GCC_VERSION >= 3004
3188 return __builtin_ctz (mask);
3189 #else
3190 unsigned int i;
3191
3192 for (i = 0; i < 8 * sizeof (mask); i++)
3193 {
3194 if (mask & 0x1)
3195 break;
3196 mask = (mask >> 1);
3197 }
3198 return i;
3199 #endif
3200 }
3201
3202 static inline int
3203 popcount (unsigned int mask)
3204 {
3205 #if GCC_VERSION >= 3004
3206 return __builtin_popcount (mask);
3207 #else
3208 unsigned int i, sum = 0;
3209
3210 for (i = 0; i < 8 * sizeof (mask); i++)
3211 {
3212 if (mask & 0x1)
3213 sum++;
3214 mask = (mask >> 1);
3215 }
3216 return sum;
3217 #endif
3218 }
3219
3220 /* Create an entry in an ARM ELF linker hash table. */
3221
3222 static struct bfd_hash_entry *
3223 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3224 struct bfd_hash_table * table,
3225 const char * string)
3226 {
3227 struct elf32_arm_link_hash_entry * ret =
3228 (struct elf32_arm_link_hash_entry *) entry;
3229
3230 /* Allocate the structure if it has not already been allocated by a
3231 subclass. */
3232 if (ret == NULL)
3233 ret = (struct elf32_arm_link_hash_entry *)
3234 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3235 if (ret == NULL)
3236 return (struct bfd_hash_entry *) ret;
3237
3238 /* Call the allocation method of the superclass. */
3239 ret = ((struct elf32_arm_link_hash_entry *)
3240 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3241 table, string));
3242 if (ret != NULL)
3243 {
3244 ret->dyn_relocs = NULL;
3245 ret->tls_type = GOT_UNKNOWN;
3246 ret->tlsdesc_got = (bfd_vma) -1;
3247 ret->plt.thumb_refcount = 0;
3248 ret->plt.maybe_thumb_refcount = 0;
3249 ret->plt.noncall_refcount = 0;
3250 ret->plt.got_offset = -1;
3251 ret->is_iplt = FALSE;
3252 ret->export_glue = NULL;
3253
3254 ret->stub_cache = NULL;
3255 }
3256
3257 return (struct bfd_hash_entry *) ret;
3258 }
3259
3260 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3261 symbols. */
3262
3263 static bfd_boolean
3264 elf32_arm_allocate_local_sym_info (bfd *abfd)
3265 {
3266 if (elf_local_got_refcounts (abfd) == NULL)
3267 {
3268 bfd_size_type num_syms;
3269 bfd_size_type size;
3270 char *data;
3271
3272 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3273 size = num_syms * (sizeof (bfd_signed_vma)
3274 + sizeof (struct arm_local_iplt_info *)
3275 + sizeof (bfd_vma)
3276 + sizeof (char));
3277 data = bfd_zalloc (abfd, size);
3278 if (data == NULL)
3279 return FALSE;
3280
3281 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3282 data += num_syms * sizeof (bfd_signed_vma);
3283
3284 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3285 data += num_syms * sizeof (struct arm_local_iplt_info *);
3286
3287 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3288 data += num_syms * sizeof (bfd_vma);
3289
3290 elf32_arm_local_got_tls_type (abfd) = data;
3291 }
3292 return TRUE;
3293 }
3294
3295 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3296 to input bfd ABFD. Create the information if it doesn't already exist.
3297 Return null if an allocation fails. */
3298
3299 static struct arm_local_iplt_info *
3300 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3301 {
3302 struct arm_local_iplt_info **ptr;
3303
3304 if (!elf32_arm_allocate_local_sym_info (abfd))
3305 return NULL;
3306
3307 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3308 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3309 if (*ptr == NULL)
3310 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3311 return *ptr;
3312 }
3313
3314 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3315 in ABFD's symbol table. If the symbol is global, H points to its
3316 hash table entry, otherwise H is null.
3317
3318 Return true if the symbol does have PLT information. When returning
3319 true, point *ROOT_PLT at the target-independent reference count/offset
3320 union and *ARM_PLT at the ARM-specific information. */
3321
3322 static bfd_boolean
3323 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3324 unsigned long r_symndx, union gotplt_union **root_plt,
3325 struct arm_plt_info **arm_plt)
3326 {
3327 struct arm_local_iplt_info *local_iplt;
3328
3329 if (h != NULL)
3330 {
3331 *root_plt = &h->root.plt;
3332 *arm_plt = &h->plt;
3333 return TRUE;
3334 }
3335
3336 if (elf32_arm_local_iplt (abfd) == NULL)
3337 return FALSE;
3338
3339 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3340 if (local_iplt == NULL)
3341 return FALSE;
3342
3343 *root_plt = &local_iplt->root;
3344 *arm_plt = &local_iplt->arm;
3345 return TRUE;
3346 }
3347
3348 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3349 before it. */
3350
3351 static bfd_boolean
3352 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3353 struct arm_plt_info *arm_plt)
3354 {
3355 struct elf32_arm_link_hash_table *htab;
3356
3357 htab = elf32_arm_hash_table (info);
3358 return (arm_plt->thumb_refcount != 0
3359 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3360 }
3361
3362 /* Return a pointer to the head of the dynamic reloc list that should
3363 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3364 ABFD's symbol table. Return null if an error occurs. */
3365
3366 static struct elf_dyn_relocs **
3367 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3368 Elf_Internal_Sym *isym)
3369 {
3370 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3371 {
3372 struct arm_local_iplt_info *local_iplt;
3373
3374 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3375 if (local_iplt == NULL)
3376 return NULL;
3377 return &local_iplt->dyn_relocs;
3378 }
3379 else
3380 {
3381 /* Track dynamic relocs needed for local syms too.
3382 We really need local syms available to do this
3383 easily. Oh well. */
3384 asection *s;
3385 void *vpp;
3386
3387 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3388 if (s == NULL)
3389 abort ();
3390
3391 vpp = &elf_section_data (s)->local_dynrel;
3392 return (struct elf_dyn_relocs **) vpp;
3393 }
3394 }
3395
3396 /* Initialize an entry in the stub hash table. */
3397
3398 static struct bfd_hash_entry *
3399 stub_hash_newfunc (struct bfd_hash_entry *entry,
3400 struct bfd_hash_table *table,
3401 const char *string)
3402 {
3403 /* Allocate the structure if it has not already been allocated by a
3404 subclass. */
3405 if (entry == NULL)
3406 {
3407 entry = (struct bfd_hash_entry *)
3408 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3409 if (entry == NULL)
3410 return entry;
3411 }
3412
3413 /* Call the allocation method of the superclass. */
3414 entry = bfd_hash_newfunc (entry, table, string);
3415 if (entry != NULL)
3416 {
3417 struct elf32_arm_stub_hash_entry *eh;
3418
3419 /* Initialize the local fields. */
3420 eh = (struct elf32_arm_stub_hash_entry *) entry;
3421 eh->stub_sec = NULL;
3422 eh->stub_offset = 0;
3423 eh->source_value = 0;
3424 eh->target_value = 0;
3425 eh->target_section = NULL;
3426 eh->orig_insn = 0;
3427 eh->stub_type = arm_stub_none;
3428 eh->stub_size = 0;
3429 eh->stub_template = NULL;
3430 eh->stub_template_size = 0;
3431 eh->h = NULL;
3432 eh->id_sec = NULL;
3433 eh->output_name = NULL;
3434 }
3435
3436 return entry;
3437 }
3438
3439 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3440 shortcuts to them in our hash table. */
3441
3442 static bfd_boolean
3443 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3444 {
3445 struct elf32_arm_link_hash_table *htab;
3446
3447 htab = elf32_arm_hash_table (info);
3448 if (htab == NULL)
3449 return FALSE;
3450
3451 /* BPABI objects never have a GOT, or associated sections. */
3452 if (htab->symbian_p)
3453 return TRUE;
3454
3455 if (! _bfd_elf_create_got_section (dynobj, info))
3456 return FALSE;
3457
3458 return TRUE;
3459 }
3460
3461 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3462
3463 static bfd_boolean
3464 create_ifunc_sections (struct bfd_link_info *info)
3465 {
3466 struct elf32_arm_link_hash_table *htab;
3467 const struct elf_backend_data *bed;
3468 bfd *dynobj;
3469 asection *s;
3470 flagword flags;
3471
3472 htab = elf32_arm_hash_table (info);
3473 dynobj = htab->root.dynobj;
3474 bed = get_elf_backend_data (dynobj);
3475 flags = bed->dynamic_sec_flags;
3476
3477 if (htab->root.iplt == NULL)
3478 {
3479 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3480 flags | SEC_READONLY | SEC_CODE);
3481 if (s == NULL
3482 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3483 return FALSE;
3484 htab->root.iplt = s;
3485 }
3486
3487 if (htab->root.irelplt == NULL)
3488 {
3489 s = bfd_make_section_anyway_with_flags (dynobj,
3490 RELOC_SECTION (htab, ".iplt"),
3491 flags | SEC_READONLY);
3492 if (s == NULL
3493 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3494 return FALSE;
3495 htab->root.irelplt = s;
3496 }
3497
3498 if (htab->root.igotplt == NULL)
3499 {
3500 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3501 if (s == NULL
3502 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3503 return FALSE;
3504 htab->root.igotplt = s;
3505 }
3506 return TRUE;
3507 }
3508
3509 /* Determine if we're dealing with a Thumb only architecture. */
3510
3511 static bfd_boolean
3512 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3513 {
3514 int arch;
3515 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3516 Tag_CPU_arch_profile);
3517
3518 if (profile)
3519 return profile == 'M';
3520
3521 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3522
3523 /* Force return logic to be reviewed for each new architecture. */
3524 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3525 || arch == TAG_CPU_ARCH_V8M_BASE
3526 || arch == TAG_CPU_ARCH_V8M_MAIN);
3527
3528 if (arch == TAG_CPU_ARCH_V6_M
3529 || arch == TAG_CPU_ARCH_V6S_M
3530 || arch == TAG_CPU_ARCH_V7E_M
3531 || arch == TAG_CPU_ARCH_V8M_BASE
3532 || arch == TAG_CPU_ARCH_V8M_MAIN)
3533 return TRUE;
3534
3535 return FALSE;
3536 }
3537
3538 /* Determine if we're dealing with a Thumb-2 object. */
3539
3540 static bfd_boolean
3541 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3542 {
3543 int arch;
3544 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3545 Tag_THUMB_ISA_use);
3546
3547 if (thumb_isa)
3548 return thumb_isa == 2;
3549
3550 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3551
3552 /* Force return logic to be reviewed for each new architecture. */
3553 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3554 || arch == TAG_CPU_ARCH_V8M_BASE
3555 || arch == TAG_CPU_ARCH_V8M_MAIN);
3556
3557 return (arch == TAG_CPU_ARCH_V6T2
3558 || arch == TAG_CPU_ARCH_V7
3559 || arch == TAG_CPU_ARCH_V7E_M
3560 || arch == TAG_CPU_ARCH_V8
3561 || arch == TAG_CPU_ARCH_V8M_MAIN);
3562 }
3563
3564 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3565 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3566 hash table. */
3567
3568 static bfd_boolean
3569 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3570 {
3571 struct elf32_arm_link_hash_table *htab;
3572
3573 htab = elf32_arm_hash_table (info);
3574 if (htab == NULL)
3575 return FALSE;
3576
3577 if (!htab->root.sgot && !create_got_section (dynobj, info))
3578 return FALSE;
3579
3580 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3581 return FALSE;
3582
3583 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3584 if (!bfd_link_pic (info))
3585 htab->srelbss = bfd_get_linker_section (dynobj,
3586 RELOC_SECTION (htab, ".bss"));
3587
3588 if (htab->vxworks_p)
3589 {
3590 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3591 return FALSE;
3592
3593 if (bfd_link_pic (info))
3594 {
3595 htab->plt_header_size = 0;
3596 htab->plt_entry_size
3597 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3598 }
3599 else
3600 {
3601 htab->plt_header_size
3602 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3603 htab->plt_entry_size
3604 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3605 }
3606
3607 if (elf_elfheader (dynobj))
3608 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3609 }
3610 else
3611 {
3612 /* PR ld/16017
3613 Test for thumb only architectures. Note - we cannot just call
3614 using_thumb_only() as the attributes in the output bfd have not been
3615 initialised at this point, so instead we use the input bfd. */
3616 bfd * saved_obfd = htab->obfd;
3617
3618 htab->obfd = dynobj;
3619 if (using_thumb_only (htab))
3620 {
3621 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3622 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3623 }
3624 htab->obfd = saved_obfd;
3625 }
3626
3627 if (!htab->root.splt
3628 || !htab->root.srelplt
3629 || !htab->sdynbss
3630 || (!bfd_link_pic (info) && !htab->srelbss))
3631 abort ();
3632
3633 return TRUE;
3634 }
3635
3636 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3637
3638 static void
3639 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3640 struct elf_link_hash_entry *dir,
3641 struct elf_link_hash_entry *ind)
3642 {
3643 struct elf32_arm_link_hash_entry *edir, *eind;
3644
3645 edir = (struct elf32_arm_link_hash_entry *) dir;
3646 eind = (struct elf32_arm_link_hash_entry *) ind;
3647
3648 if (eind->dyn_relocs != NULL)
3649 {
3650 if (edir->dyn_relocs != NULL)
3651 {
3652 struct elf_dyn_relocs **pp;
3653 struct elf_dyn_relocs *p;
3654
3655 /* Add reloc counts against the indirect sym to the direct sym
3656 list. Merge any entries against the same section. */
3657 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3658 {
3659 struct elf_dyn_relocs *q;
3660
3661 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3662 if (q->sec == p->sec)
3663 {
3664 q->pc_count += p->pc_count;
3665 q->count += p->count;
3666 *pp = p->next;
3667 break;
3668 }
3669 if (q == NULL)
3670 pp = &p->next;
3671 }
3672 *pp = edir->dyn_relocs;
3673 }
3674
3675 edir->dyn_relocs = eind->dyn_relocs;
3676 eind->dyn_relocs = NULL;
3677 }
3678
3679 if (ind->root.type == bfd_link_hash_indirect)
3680 {
3681 /* Copy over PLT info. */
3682 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3683 eind->plt.thumb_refcount = 0;
3684 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3685 eind->plt.maybe_thumb_refcount = 0;
3686 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3687 eind->plt.noncall_refcount = 0;
3688
3689 /* We should only allocate a function to .iplt once the final
3690 symbol information is known. */
3691 BFD_ASSERT (!eind->is_iplt);
3692
3693 if (dir->got.refcount <= 0)
3694 {
3695 edir->tls_type = eind->tls_type;
3696 eind->tls_type = GOT_UNKNOWN;
3697 }
3698 }
3699
3700 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3701 }
3702
3703 /* Destroy an ARM elf linker hash table. */
3704
3705 static void
3706 elf32_arm_link_hash_table_free (bfd *obfd)
3707 {
3708 struct elf32_arm_link_hash_table *ret
3709 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3710
3711 bfd_hash_table_free (&ret->stub_hash_table);
3712 _bfd_elf_link_hash_table_free (obfd);
3713 }
3714
3715 /* Create an ARM elf linker hash table. */
3716
3717 static struct bfd_link_hash_table *
3718 elf32_arm_link_hash_table_create (bfd *abfd)
3719 {
3720 struct elf32_arm_link_hash_table *ret;
3721 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3722
3723 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3724 if (ret == NULL)
3725 return NULL;
3726
3727 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3728 elf32_arm_link_hash_newfunc,
3729 sizeof (struct elf32_arm_link_hash_entry),
3730 ARM_ELF_DATA))
3731 {
3732 free (ret);
3733 return NULL;
3734 }
3735
3736 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3737 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3738 #ifdef FOUR_WORD_PLT
3739 ret->plt_header_size = 16;
3740 ret->plt_entry_size = 16;
3741 #else
3742 ret->plt_header_size = 20;
3743 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3744 #endif
3745 ret->use_rel = 1;
3746 ret->obfd = abfd;
3747
3748 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3749 sizeof (struct elf32_arm_stub_hash_entry)))
3750 {
3751 _bfd_elf_link_hash_table_free (abfd);
3752 return NULL;
3753 }
3754 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3755
3756 return &ret->root.root;
3757 }
3758
3759 /* Determine what kind of NOPs are available. */
3760
3761 static bfd_boolean
3762 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3763 {
3764 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3765 Tag_CPU_arch);
3766
3767 /* Force return logic to be reviewed for each new architecture. */
3768 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3769 || arch == TAG_CPU_ARCH_V8M_BASE
3770 || arch == TAG_CPU_ARCH_V8M_MAIN);
3771
3772 return (arch == TAG_CPU_ARCH_V6T2
3773 || arch == TAG_CPU_ARCH_V6K
3774 || arch == TAG_CPU_ARCH_V7
3775 || arch == TAG_CPU_ARCH_V8);
3776 }
3777
3778 static bfd_boolean
3779 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3780 {
3781 switch (stub_type)
3782 {
3783 case arm_stub_long_branch_thumb_only:
3784 case arm_stub_long_branch_v4t_thumb_arm:
3785 case arm_stub_short_branch_v4t_thumb_arm:
3786 case arm_stub_long_branch_v4t_thumb_arm_pic:
3787 case arm_stub_long_branch_v4t_thumb_tls_pic:
3788 case arm_stub_long_branch_thumb_only_pic:
3789 return TRUE;
3790 case arm_stub_none:
3791 BFD_FAIL ();
3792 return FALSE;
3793 break;
3794 default:
3795 return FALSE;
3796 }
3797 }
3798
3799 /* Determine the type of stub needed, if any, for a call. */
3800
3801 static enum elf32_arm_stub_type
3802 arm_type_of_stub (struct bfd_link_info *info,
3803 asection *input_sec,
3804 const Elf_Internal_Rela *rel,
3805 unsigned char st_type,
3806 enum arm_st_branch_type *actual_branch_type,
3807 struct elf32_arm_link_hash_entry *hash,
3808 bfd_vma destination,
3809 asection *sym_sec,
3810 bfd *input_bfd,
3811 const char *name)
3812 {
3813 bfd_vma location;
3814 bfd_signed_vma branch_offset;
3815 unsigned int r_type;
3816 struct elf32_arm_link_hash_table * globals;
3817 int thumb2;
3818 int thumb_only;
3819 enum elf32_arm_stub_type stub_type = arm_stub_none;
3820 int use_plt = 0;
3821 enum arm_st_branch_type branch_type = *actual_branch_type;
3822 union gotplt_union *root_plt;
3823 struct arm_plt_info *arm_plt;
3824
3825 if (branch_type == ST_BRANCH_LONG)
3826 return stub_type;
3827
3828 globals = elf32_arm_hash_table (info);
3829 if (globals == NULL)
3830 return stub_type;
3831
3832 thumb_only = using_thumb_only (globals);
3833
3834 thumb2 = using_thumb2 (globals);
3835
3836 /* Determine where the call point is. */
3837 location = (input_sec->output_offset
3838 + input_sec->output_section->vma
3839 + rel->r_offset);
3840
3841 r_type = ELF32_R_TYPE (rel->r_info);
3842
3843 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3844 are considering a function call relocation. */
3845 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3846 || r_type == R_ARM_THM_JUMP19)
3847 && branch_type == ST_BRANCH_TO_ARM)
3848 branch_type = ST_BRANCH_TO_THUMB;
3849
3850 /* For TLS call relocs, it is the caller's responsibility to provide
3851 the address of the appropriate trampoline. */
3852 if (r_type != R_ARM_TLS_CALL
3853 && r_type != R_ARM_THM_TLS_CALL
3854 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3855 &root_plt, &arm_plt)
3856 && root_plt->offset != (bfd_vma) -1)
3857 {
3858 asection *splt;
3859
3860 if (hash == NULL || hash->is_iplt)
3861 splt = globals->root.iplt;
3862 else
3863 splt = globals->root.splt;
3864 if (splt != NULL)
3865 {
3866 use_plt = 1;
3867
3868 /* Note when dealing with PLT entries: the main PLT stub is in
3869 ARM mode, so if the branch is in Thumb mode, another
3870 Thumb->ARM stub will be inserted later just before the ARM
3871 PLT stub. We don't take this extra distance into account
3872 here, because if a long branch stub is needed, we'll add a
3873 Thumb->Arm one and branch directly to the ARM PLT entry
3874 because it avoids spreading offset corrections in several
3875 places. */
3876
3877 destination = (splt->output_section->vma
3878 + splt->output_offset
3879 + root_plt->offset);
3880 st_type = STT_FUNC;
3881 branch_type = ST_BRANCH_TO_ARM;
3882 }
3883 }
3884 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3885 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3886
3887 branch_offset = (bfd_signed_vma)(destination - location);
3888
3889 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3890 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3891 {
3892 /* Handle cases where:
3893 - this call goes too far (different Thumb/Thumb2 max
3894 distance)
3895 - it's a Thumb->Arm call and blx is not available, or it's a
3896 Thumb->Arm branch (not bl). A stub is needed in this case,
3897 but only if this call is not through a PLT entry. Indeed,
3898 PLT stubs handle mode switching already.
3899 */
3900 if ((!thumb2
3901 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3902 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3903 || (thumb2
3904 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3905 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3906 || (thumb2
3907 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3908 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3909 && (r_type == R_ARM_THM_JUMP19))
3910 || (branch_type == ST_BRANCH_TO_ARM
3911 && (((r_type == R_ARM_THM_CALL
3912 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3913 || (r_type == R_ARM_THM_JUMP24)
3914 || (r_type == R_ARM_THM_JUMP19))
3915 && !use_plt))
3916 {
3917 if (branch_type == ST_BRANCH_TO_THUMB)
3918 {
3919 /* Thumb to thumb. */
3920 if (!thumb_only)
3921 {
3922 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3923 /* PIC stubs. */
3924 ? ((globals->use_blx
3925 && (r_type == R_ARM_THM_CALL))
3926 /* V5T and above. Stub starts with ARM code, so
3927 we must be able to switch mode before
3928 reaching it, which is only possible for 'bl'
3929 (ie R_ARM_THM_CALL relocation). */
3930 ? arm_stub_long_branch_any_thumb_pic
3931 /* On V4T, use Thumb code only. */
3932 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3933
3934 /* non-PIC stubs. */
3935 : ((globals->use_blx
3936 && (r_type == R_ARM_THM_CALL))
3937 /* V5T and above. */
3938 ? arm_stub_long_branch_any_any
3939 /* V4T. */
3940 : arm_stub_long_branch_v4t_thumb_thumb);
3941 }
3942 else
3943 {
3944 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3945 /* PIC stub. */
3946 ? arm_stub_long_branch_thumb_only_pic
3947 /* non-PIC stub. */
3948 : arm_stub_long_branch_thumb_only;
3949 }
3950 }
3951 else
3952 {
3953 /* Thumb to arm. */
3954 if (sym_sec != NULL
3955 && sym_sec->owner != NULL
3956 && !INTERWORK_FLAG (sym_sec->owner))
3957 {
3958 (*_bfd_error_handler)
3959 (_("%B(%s): warning: interworking not enabled.\n"
3960 " first occurrence: %B: Thumb call to ARM"),
3961 sym_sec->owner, input_bfd, name);
3962 }
3963
3964 stub_type =
3965 (bfd_link_pic (info) | globals->pic_veneer)
3966 /* PIC stubs. */
3967 ? (r_type == R_ARM_THM_TLS_CALL
3968 /* TLS PIC stubs. */
3969 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3970 : arm_stub_long_branch_v4t_thumb_tls_pic)
3971 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3972 /* V5T PIC and above. */
3973 ? arm_stub_long_branch_any_arm_pic
3974 /* V4T PIC stub. */
3975 : arm_stub_long_branch_v4t_thumb_arm_pic))
3976
3977 /* non-PIC stubs. */
3978 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3979 /* V5T and above. */
3980 ? arm_stub_long_branch_any_any
3981 /* V4T. */
3982 : arm_stub_long_branch_v4t_thumb_arm);
3983
3984 /* Handle v4t short branches. */
3985 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3986 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3987 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3988 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3989 }
3990 }
3991 }
3992 else if (r_type == R_ARM_CALL
3993 || r_type == R_ARM_JUMP24
3994 || r_type == R_ARM_PLT32
3995 || r_type == R_ARM_TLS_CALL)
3996 {
3997 if (branch_type == ST_BRANCH_TO_THUMB)
3998 {
3999 /* Arm to thumb. */
4000
4001 if (sym_sec != NULL
4002 && sym_sec->owner != NULL
4003 && !INTERWORK_FLAG (sym_sec->owner))
4004 {
4005 (*_bfd_error_handler)
4006 (_("%B(%s): warning: interworking not enabled.\n"
4007 " first occurrence: %B: ARM call to Thumb"),
4008 sym_sec->owner, input_bfd, name);
4009 }
4010
4011 /* We have an extra 2-bytes reach because of
4012 the mode change (bit 24 (H) of BLX encoding). */
4013 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4014 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4015 || (r_type == R_ARM_CALL && !globals->use_blx)
4016 || (r_type == R_ARM_JUMP24)
4017 || (r_type == R_ARM_PLT32))
4018 {
4019 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4020 /* PIC stubs. */
4021 ? ((globals->use_blx)
4022 /* V5T and above. */
4023 ? arm_stub_long_branch_any_thumb_pic
4024 /* V4T stub. */
4025 : arm_stub_long_branch_v4t_arm_thumb_pic)
4026
4027 /* non-PIC stubs. */
4028 : ((globals->use_blx)
4029 /* V5T and above. */
4030 ? arm_stub_long_branch_any_any
4031 /* V4T. */
4032 : arm_stub_long_branch_v4t_arm_thumb);
4033 }
4034 }
4035 else
4036 {
4037 /* Arm to arm. */
4038 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4039 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4040 {
4041 stub_type =
4042 (bfd_link_pic (info) | globals->pic_veneer)
4043 /* PIC stubs. */
4044 ? (r_type == R_ARM_TLS_CALL
4045 /* TLS PIC Stub. */
4046 ? arm_stub_long_branch_any_tls_pic
4047 : (globals->nacl_p
4048 ? arm_stub_long_branch_arm_nacl_pic
4049 : arm_stub_long_branch_any_arm_pic))
4050 /* non-PIC stubs. */
4051 : (globals->nacl_p
4052 ? arm_stub_long_branch_arm_nacl
4053 : arm_stub_long_branch_any_any);
4054 }
4055 }
4056 }
4057
4058 /* If a stub is needed, record the actual destination type. */
4059 if (stub_type != arm_stub_none)
4060 *actual_branch_type = branch_type;
4061
4062 return stub_type;
4063 }
4064
4065 /* Build a name for an entry in the stub hash table. */
4066
4067 static char *
4068 elf32_arm_stub_name (const asection *input_section,
4069 const asection *sym_sec,
4070 const struct elf32_arm_link_hash_entry *hash,
4071 const Elf_Internal_Rela *rel,
4072 enum elf32_arm_stub_type stub_type)
4073 {
4074 char *stub_name;
4075 bfd_size_type len;
4076
4077 if (hash)
4078 {
4079 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4080 stub_name = (char *) bfd_malloc (len);
4081 if (stub_name != NULL)
4082 sprintf (stub_name, "%08x_%s+%x_%d",
4083 input_section->id & 0xffffffff,
4084 hash->root.root.root.string,
4085 (int) rel->r_addend & 0xffffffff,
4086 (int) stub_type);
4087 }
4088 else
4089 {
4090 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4091 stub_name = (char *) bfd_malloc (len);
4092 if (stub_name != NULL)
4093 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4094 input_section->id & 0xffffffff,
4095 sym_sec->id & 0xffffffff,
4096 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4097 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4098 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4099 (int) rel->r_addend & 0xffffffff,
4100 (int) stub_type);
4101 }
4102
4103 return stub_name;
4104 }
4105
4106 /* Look up an entry in the stub hash. Stub entries are cached because
4107 creating the stub name takes a bit of time. */
4108
4109 static struct elf32_arm_stub_hash_entry *
4110 elf32_arm_get_stub_entry (const asection *input_section,
4111 const asection *sym_sec,
4112 struct elf_link_hash_entry *hash,
4113 const Elf_Internal_Rela *rel,
4114 struct elf32_arm_link_hash_table *htab,
4115 enum elf32_arm_stub_type stub_type)
4116 {
4117 struct elf32_arm_stub_hash_entry *stub_entry;
4118 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4119 const asection *id_sec;
4120
4121 if ((input_section->flags & SEC_CODE) == 0)
4122 return NULL;
4123
4124 /* If this input section is part of a group of sections sharing one
4125 stub section, then use the id of the first section in the group.
4126 Stub names need to include a section id, as there may well be
4127 more than one stub used to reach say, printf, and we need to
4128 distinguish between them. */
4129 id_sec = htab->stub_group[input_section->id].link_sec;
4130
4131 if (h != NULL && h->stub_cache != NULL
4132 && h->stub_cache->h == h
4133 && h->stub_cache->id_sec == id_sec
4134 && h->stub_cache->stub_type == stub_type)
4135 {
4136 stub_entry = h->stub_cache;
4137 }
4138 else
4139 {
4140 char *stub_name;
4141
4142 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4143 if (stub_name == NULL)
4144 return NULL;
4145
4146 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4147 stub_name, FALSE, FALSE);
4148 if (h != NULL)
4149 h->stub_cache = stub_entry;
4150
4151 free (stub_name);
4152 }
4153
4154 return stub_entry;
4155 }
4156
4157 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4158 section. */
4159
4160 static bfd_boolean
4161 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4162 {
4163 if (stub_type >= max_stub_type)
4164 abort (); /* Should be unreachable. */
4165
4166 return FALSE;
4167 }
4168
4169 /* Required alignment (as a power of 2) for the dedicated section holding
4170 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4171 with input sections. */
4172
4173 static int
4174 arm_dedicated_stub_output_section_required_alignment
4175 (enum elf32_arm_stub_type stub_type)
4176 {
4177 if (stub_type >= max_stub_type)
4178 abort (); /* Should be unreachable. */
4179
4180 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4181 return 0;
4182 }
4183
4184 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4185 NULL if veneers of this type are interspersed with input sections. */
4186
4187 static const char *
4188 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4189 {
4190 if (stub_type >= max_stub_type)
4191 abort (); /* Should be unreachable. */
4192
4193 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4194 return NULL;
4195 }
4196
4197 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4198 returns the address of the hash table field in HTAB holding a pointer to the
4199 corresponding input section. Otherwise, returns NULL. */
4200
4201 static asection **
4202 arm_dedicated_stub_input_section_ptr
4203 (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
4204 enum elf32_arm_stub_type stub_type)
4205 {
4206 if (stub_type >= max_stub_type)
4207 abort (); /* Should be unreachable. */
4208
4209 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4210 return NULL;
4211 }
4212
4213 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4214 is the section that branch into veneer and can be NULL if stub should go in
4215 a dedicated output section. Returns a pointer to the stub section, and the
4216 section to which the stub section will be attached (in *LINK_SEC_P).
4217 LINK_SEC_P may be NULL. */
4218
4219 static asection *
4220 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4221 struct elf32_arm_link_hash_table *htab,
4222 enum elf32_arm_stub_type stub_type)
4223 {
4224 asection *link_sec, *out_sec, **stub_sec_p;
4225 const char *stub_sec_prefix;
4226 bfd_boolean dedicated_output_section =
4227 arm_dedicated_stub_output_section_required (stub_type);
4228 int align;
4229
4230 if (dedicated_output_section)
4231 {
4232 bfd *output_bfd = htab->obfd;
4233 const char *out_sec_name =
4234 arm_dedicated_stub_output_section_name (stub_type);
4235 link_sec = NULL;
4236 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4237 stub_sec_prefix = out_sec_name;
4238 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4239 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4240 if (out_sec == NULL)
4241 {
4242 (*_bfd_error_handler) (_("No address assigned to the veneers output "
4243 "section %s"), out_sec_name);
4244 return NULL;
4245 }
4246 }
4247 else
4248 {
4249 link_sec = htab->stub_group[section->id].link_sec;
4250 BFD_ASSERT (link_sec != NULL);
4251 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4252 if (*stub_sec_p == NULL)
4253 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4254 stub_sec_prefix = link_sec->name;
4255 out_sec = link_sec->output_section;
4256 align = htab->nacl_p ? 4 : 3;
4257 }
4258
4259 if (*stub_sec_p == NULL)
4260 {
4261 size_t namelen;
4262 bfd_size_type len;
4263 char *s_name;
4264
4265 namelen = strlen (stub_sec_prefix);
4266 len = namelen + sizeof (STUB_SUFFIX);
4267 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4268 if (s_name == NULL)
4269 return NULL;
4270
4271 memcpy (s_name, stub_sec_prefix, namelen);
4272 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4273 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4274 align);
4275 if (*stub_sec_p == NULL)
4276 return NULL;
4277
4278 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4279 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4280 | SEC_KEEP;
4281 }
4282
4283 if (!dedicated_output_section)
4284 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4285
4286 if (link_sec_p)
4287 *link_sec_p = link_sec;
4288
4289 return *stub_sec_p;
4290 }
4291
4292 /* Add a new stub entry to the stub hash. Not all fields of the new
4293 stub entry are initialised. */
4294
4295 static struct elf32_arm_stub_hash_entry *
4296 elf32_arm_add_stub (const char *stub_name, asection *section,
4297 struct elf32_arm_link_hash_table *htab,
4298 enum elf32_arm_stub_type stub_type)
4299 {
4300 asection *link_sec;
4301 asection *stub_sec;
4302 struct elf32_arm_stub_hash_entry *stub_entry;
4303
4304 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4305 stub_type);
4306 if (stub_sec == NULL)
4307 return NULL;
4308
4309 /* Enter this entry into the linker stub hash table. */
4310 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4311 TRUE, FALSE);
4312 if (stub_entry == NULL)
4313 {
4314 if (section == NULL)
4315 section = stub_sec;
4316 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4317 section->owner,
4318 stub_name);
4319 return NULL;
4320 }
4321
4322 stub_entry->stub_sec = stub_sec;
4323 stub_entry->stub_offset = 0;
4324 stub_entry->id_sec = link_sec;
4325
4326 return stub_entry;
4327 }
4328
4329 /* Store an Arm insn into an output section not processed by
4330 elf32_arm_write_section. */
4331
4332 static void
4333 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4334 bfd * output_bfd, bfd_vma val, void * ptr)
4335 {
4336 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4337 bfd_putl32 (val, ptr);
4338 else
4339 bfd_putb32 (val, ptr);
4340 }
4341
4342 /* Store a 16-bit Thumb insn into an output section not processed by
4343 elf32_arm_write_section. */
4344
4345 static void
4346 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4347 bfd * output_bfd, bfd_vma val, void * ptr)
4348 {
4349 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4350 bfd_putl16 (val, ptr);
4351 else
4352 bfd_putb16 (val, ptr);
4353 }
4354
4355 /* Store a Thumb2 insn into an output section not processed by
4356 elf32_arm_write_section. */
4357
4358 static void
4359 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4360 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4361 {
4362 /* T2 instructions are 16-bit streamed. */
4363 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4364 {
4365 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4366 bfd_putl16 ((val & 0xffff), ptr + 2);
4367 }
4368 else
4369 {
4370 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4371 bfd_putb16 ((val & 0xffff), ptr + 2);
4372 }
4373 }
4374
4375 /* If it's possible to change R_TYPE to a more efficient access
4376 model, return the new reloc type. */
4377
4378 static unsigned
4379 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4380 struct elf_link_hash_entry *h)
4381 {
4382 int is_local = (h == NULL);
4383
4384 if (bfd_link_pic (info)
4385 || (h && h->root.type == bfd_link_hash_undefweak))
4386 return r_type;
4387
4388 /* We do not support relaxations for Old TLS models. */
4389 switch (r_type)
4390 {
4391 case R_ARM_TLS_GOTDESC:
4392 case R_ARM_TLS_CALL:
4393 case R_ARM_THM_TLS_CALL:
4394 case R_ARM_TLS_DESCSEQ:
4395 case R_ARM_THM_TLS_DESCSEQ:
4396 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4397 }
4398
4399 return r_type;
4400 }
4401
4402 static bfd_reloc_status_type elf32_arm_final_link_relocate
4403 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4404 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4405 const char *, unsigned char, enum arm_st_branch_type,
4406 struct elf_link_hash_entry *, bfd_boolean *, char **);
4407
4408 static unsigned int
4409 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4410 {
4411 switch (stub_type)
4412 {
4413 case arm_stub_a8_veneer_b_cond:
4414 case arm_stub_a8_veneer_b:
4415 case arm_stub_a8_veneer_bl:
4416 return 2;
4417
4418 case arm_stub_long_branch_any_any:
4419 case arm_stub_long_branch_v4t_arm_thumb:
4420 case arm_stub_long_branch_thumb_only:
4421 case arm_stub_long_branch_v4t_thumb_thumb:
4422 case arm_stub_long_branch_v4t_thumb_arm:
4423 case arm_stub_short_branch_v4t_thumb_arm:
4424 case arm_stub_long_branch_any_arm_pic:
4425 case arm_stub_long_branch_any_thumb_pic:
4426 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4427 case arm_stub_long_branch_v4t_arm_thumb_pic:
4428 case arm_stub_long_branch_v4t_thumb_arm_pic:
4429 case arm_stub_long_branch_thumb_only_pic:
4430 case arm_stub_long_branch_any_tls_pic:
4431 case arm_stub_long_branch_v4t_thumb_tls_pic:
4432 case arm_stub_a8_veneer_blx:
4433 return 4;
4434
4435 case arm_stub_long_branch_arm_nacl:
4436 case arm_stub_long_branch_arm_nacl_pic:
4437 return 16;
4438
4439 default:
4440 abort (); /* Should be unreachable. */
4441 }
4442 }
4443
4444 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4445 veneering (TRUE) or have their own symbol (FALSE). */
4446
4447 static bfd_boolean
4448 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4449 {
4450 if (stub_type >= max_stub_type)
4451 abort (); /* Should be unreachable. */
4452
4453 return FALSE;
4454 }
4455
4456 /* Returns the padding needed for the dedicated section used stubs of type
4457 STUB_TYPE. */
4458
4459 static int
4460 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4461 {
4462 if (stub_type >= max_stub_type)
4463 abort (); /* Should be unreachable. */
4464
4465 return 0;
4466 }
4467
4468 static bfd_boolean
4469 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4470 void * in_arg)
4471 {
4472 #define MAXRELOCS 3
4473 struct elf32_arm_stub_hash_entry *stub_entry;
4474 struct elf32_arm_link_hash_table *globals;
4475 struct bfd_link_info *info;
4476 asection *stub_sec;
4477 bfd *stub_bfd;
4478 bfd_byte *loc;
4479 bfd_vma sym_value;
4480 int template_size;
4481 int size;
4482 const insn_sequence *template_sequence;
4483 int i;
4484 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4485 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4486 int nrelocs = 0;
4487
4488 /* Massage our args to the form they really have. */
4489 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4490 info = (struct bfd_link_info *) in_arg;
4491
4492 globals = elf32_arm_hash_table (info);
4493 if (globals == NULL)
4494 return FALSE;
4495
4496 stub_sec = stub_entry->stub_sec;
4497
4498 if ((globals->fix_cortex_a8 < 0)
4499 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4500 /* We have to do less-strictly-aligned fixes last. */
4501 return TRUE;
4502
4503 /* Make a note of the offset within the stubs for this entry. */
4504 stub_entry->stub_offset = stub_sec->size;
4505 loc = stub_sec->contents + stub_entry->stub_offset;
4506
4507 stub_bfd = stub_sec->owner;
4508
4509 /* This is the address of the stub destination. */
4510 sym_value = (stub_entry->target_value
4511 + stub_entry->target_section->output_offset
4512 + stub_entry->target_section->output_section->vma);
4513
4514 template_sequence = stub_entry->stub_template;
4515 template_size = stub_entry->stub_template_size;
4516
4517 size = 0;
4518 for (i = 0; i < template_size; i++)
4519 {
4520 switch (template_sequence[i].type)
4521 {
4522 case THUMB16_TYPE:
4523 {
4524 bfd_vma data = (bfd_vma) template_sequence[i].data;
4525 if (template_sequence[i].reloc_addend != 0)
4526 {
4527 /* We've borrowed the reloc_addend field to mean we should
4528 insert a condition code into this (Thumb-1 branch)
4529 instruction. See THUMB16_BCOND_INSN. */
4530 BFD_ASSERT ((data & 0xff00) == 0xd000);
4531 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4532 }
4533 bfd_put_16 (stub_bfd, data, loc + size);
4534 size += 2;
4535 }
4536 break;
4537
4538 case THUMB32_TYPE:
4539 bfd_put_16 (stub_bfd,
4540 (template_sequence[i].data >> 16) & 0xffff,
4541 loc + size);
4542 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4543 loc + size + 2);
4544 if (template_sequence[i].r_type != R_ARM_NONE)
4545 {
4546 stub_reloc_idx[nrelocs] = i;
4547 stub_reloc_offset[nrelocs++] = size;
4548 }
4549 size += 4;
4550 break;
4551
4552 case ARM_TYPE:
4553 bfd_put_32 (stub_bfd, template_sequence[i].data,
4554 loc + size);
4555 /* Handle cases where the target is encoded within the
4556 instruction. */
4557 if (template_sequence[i].r_type == R_ARM_JUMP24)
4558 {
4559 stub_reloc_idx[nrelocs] = i;
4560 stub_reloc_offset[nrelocs++] = size;
4561 }
4562 size += 4;
4563 break;
4564
4565 case DATA_TYPE:
4566 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4567 stub_reloc_idx[nrelocs] = i;
4568 stub_reloc_offset[nrelocs++] = size;
4569 size += 4;
4570 break;
4571
4572 default:
4573 BFD_FAIL ();
4574 return FALSE;
4575 }
4576 }
4577
4578 stub_sec->size += size;
4579
4580 /* Stub size has already been computed in arm_size_one_stub. Check
4581 consistency. */
4582 BFD_ASSERT (size == stub_entry->stub_size);
4583
4584 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4585 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4586 sym_value |= 1;
4587
4588 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4589 in each stub. */
4590 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4591
4592 for (i = 0; i < nrelocs; i++)
4593 {
4594 Elf_Internal_Rela rel;
4595 bfd_boolean unresolved_reloc;
4596 char *error_message;
4597 bfd_vma points_to =
4598 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4599
4600 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4601 rel.r_info = ELF32_R_INFO (0,
4602 template_sequence[stub_reloc_idx[i]].r_type);
4603 rel.r_addend = 0;
4604
4605 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4606 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4607 template should refer back to the instruction after the original
4608 branch. We use target_section as Cortex-A8 erratum workaround stubs
4609 are only generated when both source and target are in the same
4610 section. */
4611 points_to = stub_entry->target_section->output_section->vma
4612 + stub_entry->target_section->output_offset
4613 + stub_entry->source_value;
4614
4615 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4616 (template_sequence[stub_reloc_idx[i]].r_type),
4617 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4618 points_to, info, stub_entry->target_section, "", STT_FUNC,
4619 stub_entry->branch_type,
4620 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4621 &error_message);
4622 }
4623
4624 return TRUE;
4625 #undef MAXRELOCS
4626 }
4627
4628 /* Calculate the template, template size and instruction size for a stub.
4629 Return value is the instruction size. */
4630
4631 static unsigned int
4632 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4633 const insn_sequence **stub_template,
4634 int *stub_template_size)
4635 {
4636 const insn_sequence *template_sequence = NULL;
4637 int template_size = 0, i;
4638 unsigned int size;
4639
4640 template_sequence = stub_definitions[stub_type].template_sequence;
4641 if (stub_template)
4642 *stub_template = template_sequence;
4643
4644 template_size = stub_definitions[stub_type].template_size;
4645 if (stub_template_size)
4646 *stub_template_size = template_size;
4647
4648 size = 0;
4649 for (i = 0; i < template_size; i++)
4650 {
4651 switch (template_sequence[i].type)
4652 {
4653 case THUMB16_TYPE:
4654 size += 2;
4655 break;
4656
4657 case ARM_TYPE:
4658 case THUMB32_TYPE:
4659 case DATA_TYPE:
4660 size += 4;
4661 break;
4662
4663 default:
4664 BFD_FAIL ();
4665 return 0;
4666 }
4667 }
4668
4669 return size;
4670 }
4671
4672 /* As above, but don't actually build the stub. Just bump offset so
4673 we know stub section sizes. */
4674
4675 static bfd_boolean
4676 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4677 void *in_arg ATTRIBUTE_UNUSED)
4678 {
4679 struct elf32_arm_stub_hash_entry *stub_entry;
4680 const insn_sequence *template_sequence;
4681 int template_size, size;
4682
4683 /* Massage our args to the form they really have. */
4684 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4685
4686 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4687 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4688
4689 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4690 &template_size);
4691
4692 stub_entry->stub_size = size;
4693 stub_entry->stub_template = template_sequence;
4694 stub_entry->stub_template_size = template_size;
4695
4696 size = (size + 7) & ~7;
4697 stub_entry->stub_sec->size += size;
4698
4699 return TRUE;
4700 }
4701
4702 /* External entry points for sizing and building linker stubs. */
4703
4704 /* Set up various things so that we can make a list of input sections
4705 for each output section included in the link. Returns -1 on error,
4706 0 when no stubs will be needed, and 1 on success. */
4707
4708 int
4709 elf32_arm_setup_section_lists (bfd *output_bfd,
4710 struct bfd_link_info *info)
4711 {
4712 bfd *input_bfd;
4713 unsigned int bfd_count;
4714 unsigned int top_id, top_index;
4715 asection *section;
4716 asection **input_list, **list;
4717 bfd_size_type amt;
4718 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4719
4720 if (htab == NULL)
4721 return 0;
4722 if (! is_elf_hash_table (htab))
4723 return 0;
4724
4725 /* Count the number of input BFDs and find the top input section id. */
4726 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4727 input_bfd != NULL;
4728 input_bfd = input_bfd->link.next)
4729 {
4730 bfd_count += 1;
4731 for (section = input_bfd->sections;
4732 section != NULL;
4733 section = section->next)
4734 {
4735 if (top_id < section->id)
4736 top_id = section->id;
4737 }
4738 }
4739 htab->bfd_count = bfd_count;
4740
4741 amt = sizeof (struct map_stub) * (top_id + 1);
4742 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4743 if (htab->stub_group == NULL)
4744 return -1;
4745 htab->top_id = top_id;
4746
4747 /* We can't use output_bfd->section_count here to find the top output
4748 section index as some sections may have been removed, and
4749 _bfd_strip_section_from_output doesn't renumber the indices. */
4750 for (section = output_bfd->sections, top_index = 0;
4751 section != NULL;
4752 section = section->next)
4753 {
4754 if (top_index < section->index)
4755 top_index = section->index;
4756 }
4757
4758 htab->top_index = top_index;
4759 amt = sizeof (asection *) * (top_index + 1);
4760 input_list = (asection **) bfd_malloc (amt);
4761 htab->input_list = input_list;
4762 if (input_list == NULL)
4763 return -1;
4764
4765 /* For sections we aren't interested in, mark their entries with a
4766 value we can check later. */
4767 list = input_list + top_index;
4768 do
4769 *list = bfd_abs_section_ptr;
4770 while (list-- != input_list);
4771
4772 for (section = output_bfd->sections;
4773 section != NULL;
4774 section = section->next)
4775 {
4776 if ((section->flags & SEC_CODE) != 0)
4777 input_list[section->index] = NULL;
4778 }
4779
4780 return 1;
4781 }
4782
4783 /* The linker repeatedly calls this function for each input section,
4784 in the order that input sections are linked into output sections.
4785 Build lists of input sections to determine groupings between which
4786 we may insert linker stubs. */
4787
4788 void
4789 elf32_arm_next_input_section (struct bfd_link_info *info,
4790 asection *isec)
4791 {
4792 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4793
4794 if (htab == NULL)
4795 return;
4796
4797 if (isec->output_section->index <= htab->top_index)
4798 {
4799 asection **list = htab->input_list + isec->output_section->index;
4800
4801 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4802 {
4803 /* Steal the link_sec pointer for our list. */
4804 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4805 /* This happens to make the list in reverse order,
4806 which we reverse later. */
4807 PREV_SEC (isec) = *list;
4808 *list = isec;
4809 }
4810 }
4811 }
4812
4813 /* See whether we can group stub sections together. Grouping stub
4814 sections may result in fewer stubs. More importantly, we need to
4815 put all .init* and .fini* stubs at the end of the .init or
4816 .fini output sections respectively, because glibc splits the
4817 _init and _fini functions into multiple parts. Putting a stub in
4818 the middle of a function is not a good idea. */
4819
4820 static void
4821 group_sections (struct elf32_arm_link_hash_table *htab,
4822 bfd_size_type stub_group_size,
4823 bfd_boolean stubs_always_after_branch)
4824 {
4825 asection **list = htab->input_list;
4826
4827 do
4828 {
4829 asection *tail = *list;
4830 asection *head;
4831
4832 if (tail == bfd_abs_section_ptr)
4833 continue;
4834
4835 /* Reverse the list: we must avoid placing stubs at the
4836 beginning of the section because the beginning of the text
4837 section may be required for an interrupt vector in bare metal
4838 code. */
4839 #define NEXT_SEC PREV_SEC
4840 head = NULL;
4841 while (tail != NULL)
4842 {
4843 /* Pop from tail. */
4844 asection *item = tail;
4845 tail = PREV_SEC (item);
4846
4847 /* Push on head. */
4848 NEXT_SEC (item) = head;
4849 head = item;
4850 }
4851
4852 while (head != NULL)
4853 {
4854 asection *curr;
4855 asection *next;
4856 bfd_vma stub_group_start = head->output_offset;
4857 bfd_vma end_of_next;
4858
4859 curr = head;
4860 while (NEXT_SEC (curr) != NULL)
4861 {
4862 next = NEXT_SEC (curr);
4863 end_of_next = next->output_offset + next->size;
4864 if (end_of_next - stub_group_start >= stub_group_size)
4865 /* End of NEXT is too far from start, so stop. */
4866 break;
4867 /* Add NEXT to the group. */
4868 curr = next;
4869 }
4870
4871 /* OK, the size from the start to the start of CURR is less
4872 than stub_group_size and thus can be handled by one stub
4873 section. (Or the head section is itself larger than
4874 stub_group_size, in which case we may be toast.)
4875 We should really be keeping track of the total size of
4876 stubs added here, as stubs contribute to the final output
4877 section size. */
4878 do
4879 {
4880 next = NEXT_SEC (head);
4881 /* Set up this stub group. */
4882 htab->stub_group[head->id].link_sec = curr;
4883 }
4884 while (head != curr && (head = next) != NULL);
4885
4886 /* But wait, there's more! Input sections up to stub_group_size
4887 bytes after the stub section can be handled by it too. */
4888 if (!stubs_always_after_branch)
4889 {
4890 stub_group_start = curr->output_offset + curr->size;
4891
4892 while (next != NULL)
4893 {
4894 end_of_next = next->output_offset + next->size;
4895 if (end_of_next - stub_group_start >= stub_group_size)
4896 /* End of NEXT is too far from stubs, so stop. */
4897 break;
4898 /* Add NEXT to the stub group. */
4899 head = next;
4900 next = NEXT_SEC (head);
4901 htab->stub_group[head->id].link_sec = curr;
4902 }
4903 }
4904 head = next;
4905 }
4906 }
4907 while (list++ != htab->input_list + htab->top_index);
4908
4909 free (htab->input_list);
4910 #undef PREV_SEC
4911 #undef NEXT_SEC
4912 }
4913
4914 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4915 erratum fix. */
4916
4917 static int
4918 a8_reloc_compare (const void *a, const void *b)
4919 {
4920 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4921 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4922
4923 if (ra->from < rb->from)
4924 return -1;
4925 else if (ra->from > rb->from)
4926 return 1;
4927 else
4928 return 0;
4929 }
4930
4931 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4932 const char *, char **);
4933
4934 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4935 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4936 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4937 otherwise. */
4938
4939 static bfd_boolean
4940 cortex_a8_erratum_scan (bfd *input_bfd,
4941 struct bfd_link_info *info,
4942 struct a8_erratum_fix **a8_fixes_p,
4943 unsigned int *num_a8_fixes_p,
4944 unsigned int *a8_fix_table_size_p,
4945 struct a8_erratum_reloc *a8_relocs,
4946 unsigned int num_a8_relocs,
4947 unsigned prev_num_a8_fixes,
4948 bfd_boolean *stub_changed_p)
4949 {
4950 asection *section;
4951 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4952 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4953 unsigned int num_a8_fixes = *num_a8_fixes_p;
4954 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4955
4956 if (htab == NULL)
4957 return FALSE;
4958
4959 for (section = input_bfd->sections;
4960 section != NULL;
4961 section = section->next)
4962 {
4963 bfd_byte *contents = NULL;
4964 struct _arm_elf_section_data *sec_data;
4965 unsigned int span;
4966 bfd_vma base_vma;
4967
4968 if (elf_section_type (section) != SHT_PROGBITS
4969 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4970 || (section->flags & SEC_EXCLUDE) != 0
4971 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4972 || (section->output_section == bfd_abs_section_ptr))
4973 continue;
4974
4975 base_vma = section->output_section->vma + section->output_offset;
4976
4977 if (elf_section_data (section)->this_hdr.contents != NULL)
4978 contents = elf_section_data (section)->this_hdr.contents;
4979 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4980 return TRUE;
4981
4982 sec_data = elf32_arm_section_data (section);
4983
4984 for (span = 0; span < sec_data->mapcount; span++)
4985 {
4986 unsigned int span_start = sec_data->map[span].vma;
4987 unsigned int span_end = (span == sec_data->mapcount - 1)
4988 ? section->size : sec_data->map[span + 1].vma;
4989 unsigned int i;
4990 char span_type = sec_data->map[span].type;
4991 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4992
4993 if (span_type != 't')
4994 continue;
4995
4996 /* Span is entirely within a single 4KB region: skip scanning. */
4997 if (((base_vma + span_start) & ~0xfff)
4998 == ((base_vma + span_end) & ~0xfff))
4999 continue;
5000
5001 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5002
5003 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5004 * The branch target is in the same 4KB region as the
5005 first half of the branch.
5006 * The instruction before the branch is a 32-bit
5007 length non-branch instruction. */
5008 for (i = span_start; i < span_end;)
5009 {
5010 unsigned int insn = bfd_getl16 (&contents[i]);
5011 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5012 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5013
5014 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5015 insn_32bit = TRUE;
5016
5017 if (insn_32bit)
5018 {
5019 /* Load the rest of the insn (in manual-friendly order). */
5020 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5021
5022 /* Encoding T4: B<c>.W. */
5023 is_b = (insn & 0xf800d000) == 0xf0009000;
5024 /* Encoding T1: BL<c>.W. */
5025 is_bl = (insn & 0xf800d000) == 0xf000d000;
5026 /* Encoding T2: BLX<c>.W. */
5027 is_blx = (insn & 0xf800d000) == 0xf000c000;
5028 /* Encoding T3: B<c>.W (not permitted in IT block). */
5029 is_bcc = (insn & 0xf800d000) == 0xf0008000
5030 && (insn & 0x07f00000) != 0x03800000;
5031 }
5032
5033 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5034
5035 if (((base_vma + i) & 0xfff) == 0xffe
5036 && insn_32bit
5037 && is_32bit_branch
5038 && last_was_32bit
5039 && ! last_was_branch)
5040 {
5041 bfd_signed_vma offset = 0;
5042 bfd_boolean force_target_arm = FALSE;
5043 bfd_boolean force_target_thumb = FALSE;
5044 bfd_vma target;
5045 enum elf32_arm_stub_type stub_type = arm_stub_none;
5046 struct a8_erratum_reloc key, *found;
5047 bfd_boolean use_plt = FALSE;
5048
5049 key.from = base_vma + i;
5050 found = (struct a8_erratum_reloc *)
5051 bsearch (&key, a8_relocs, num_a8_relocs,
5052 sizeof (struct a8_erratum_reloc),
5053 &a8_reloc_compare);
5054
5055 if (found)
5056 {
5057 char *error_message = NULL;
5058 struct elf_link_hash_entry *entry;
5059
5060 /* We don't care about the error returned from this
5061 function, only if there is glue or not. */
5062 entry = find_thumb_glue (info, found->sym_name,
5063 &error_message);
5064
5065 if (entry)
5066 found->non_a8_stub = TRUE;
5067
5068 /* Keep a simpler condition, for the sake of clarity. */
5069 if (htab->root.splt != NULL && found->hash != NULL
5070 && found->hash->root.plt.offset != (bfd_vma) -1)
5071 use_plt = TRUE;
5072
5073 if (found->r_type == R_ARM_THM_CALL)
5074 {
5075 if (found->branch_type == ST_BRANCH_TO_ARM
5076 || use_plt)
5077 force_target_arm = TRUE;
5078 else
5079 force_target_thumb = TRUE;
5080 }
5081 }
5082
5083 /* Check if we have an offending branch instruction. */
5084
5085 if (found && found->non_a8_stub)
5086 /* We've already made a stub for this instruction, e.g.
5087 it's a long branch or a Thumb->ARM stub. Assume that
5088 stub will suffice to work around the A8 erratum (see
5089 setting of always_after_branch above). */
5090 ;
5091 else if (is_bcc)
5092 {
5093 offset = (insn & 0x7ff) << 1;
5094 offset |= (insn & 0x3f0000) >> 4;
5095 offset |= (insn & 0x2000) ? 0x40000 : 0;
5096 offset |= (insn & 0x800) ? 0x80000 : 0;
5097 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5098 if (offset & 0x100000)
5099 offset |= ~ ((bfd_signed_vma) 0xfffff);
5100 stub_type = arm_stub_a8_veneer_b_cond;
5101 }
5102 else if (is_b || is_bl || is_blx)
5103 {
5104 int s = (insn & 0x4000000) != 0;
5105 int j1 = (insn & 0x2000) != 0;
5106 int j2 = (insn & 0x800) != 0;
5107 int i1 = !(j1 ^ s);
5108 int i2 = !(j2 ^ s);
5109
5110 offset = (insn & 0x7ff) << 1;
5111 offset |= (insn & 0x3ff0000) >> 4;
5112 offset |= i2 << 22;
5113 offset |= i1 << 23;
5114 offset |= s << 24;
5115 if (offset & 0x1000000)
5116 offset |= ~ ((bfd_signed_vma) 0xffffff);
5117
5118 if (is_blx)
5119 offset &= ~ ((bfd_signed_vma) 3);
5120
5121 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5122 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5123 }
5124
5125 if (stub_type != arm_stub_none)
5126 {
5127 bfd_vma pc_for_insn = base_vma + i + 4;
5128
5129 /* The original instruction is a BL, but the target is
5130 an ARM instruction. If we were not making a stub,
5131 the BL would have been converted to a BLX. Use the
5132 BLX stub instead in that case. */
5133 if (htab->use_blx && force_target_arm
5134 && stub_type == arm_stub_a8_veneer_bl)
5135 {
5136 stub_type = arm_stub_a8_veneer_blx;
5137 is_blx = TRUE;
5138 is_bl = FALSE;
5139 }
5140 /* Conversely, if the original instruction was
5141 BLX but the target is Thumb mode, use the BL
5142 stub. */
5143 else if (force_target_thumb
5144 && stub_type == arm_stub_a8_veneer_blx)
5145 {
5146 stub_type = arm_stub_a8_veneer_bl;
5147 is_blx = FALSE;
5148 is_bl = TRUE;
5149 }
5150
5151 if (is_blx)
5152 pc_for_insn &= ~ ((bfd_vma) 3);
5153
5154 /* If we found a relocation, use the proper destination,
5155 not the offset in the (unrelocated) instruction.
5156 Note this is always done if we switched the stub type
5157 above. */
5158 if (found)
5159 offset =
5160 (bfd_signed_vma) (found->destination - pc_for_insn);
5161
5162 /* If the stub will use a Thumb-mode branch to a
5163 PLT target, redirect it to the preceding Thumb
5164 entry point. */
5165 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5166 offset -= PLT_THUMB_STUB_SIZE;
5167
5168 target = pc_for_insn + offset;
5169
5170 /* The BLX stub is ARM-mode code. Adjust the offset to
5171 take the different PC value (+8 instead of +4) into
5172 account. */
5173 if (stub_type == arm_stub_a8_veneer_blx)
5174 offset += 4;
5175
5176 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5177 {
5178 char *stub_name = NULL;
5179
5180 if (num_a8_fixes == a8_fix_table_size)
5181 {
5182 a8_fix_table_size *= 2;
5183 a8_fixes = (struct a8_erratum_fix *)
5184 bfd_realloc (a8_fixes,
5185 sizeof (struct a8_erratum_fix)
5186 * a8_fix_table_size);
5187 }
5188
5189 if (num_a8_fixes < prev_num_a8_fixes)
5190 {
5191 /* If we're doing a subsequent scan,
5192 check if we've found the same fix as
5193 before, and try and reuse the stub
5194 name. */
5195 stub_name = a8_fixes[num_a8_fixes].stub_name;
5196 if ((a8_fixes[num_a8_fixes].section != section)
5197 || (a8_fixes[num_a8_fixes].offset != i))
5198 {
5199 free (stub_name);
5200 stub_name = NULL;
5201 *stub_changed_p = TRUE;
5202 }
5203 }
5204
5205 if (!stub_name)
5206 {
5207 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5208 if (stub_name != NULL)
5209 sprintf (stub_name, "%x:%x", section->id, i);
5210 }
5211
5212 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5213 a8_fixes[num_a8_fixes].section = section;
5214 a8_fixes[num_a8_fixes].offset = i;
5215 a8_fixes[num_a8_fixes].target_offset =
5216 target - base_vma;
5217 a8_fixes[num_a8_fixes].orig_insn = insn;
5218 a8_fixes[num_a8_fixes].stub_name = stub_name;
5219 a8_fixes[num_a8_fixes].stub_type = stub_type;
5220 a8_fixes[num_a8_fixes].branch_type =
5221 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5222
5223 num_a8_fixes++;
5224 }
5225 }
5226 }
5227
5228 i += insn_32bit ? 4 : 2;
5229 last_was_32bit = insn_32bit;
5230 last_was_branch = is_32bit_branch;
5231 }
5232 }
5233
5234 if (elf_section_data (section)->this_hdr.contents == NULL)
5235 free (contents);
5236 }
5237
5238 *a8_fixes_p = a8_fixes;
5239 *num_a8_fixes_p = num_a8_fixes;
5240 *a8_fix_table_size_p = a8_fix_table_size;
5241
5242 return FALSE;
5243 }
5244
5245 /* Create or update a stub entry depending on whether the stub can already be
5246 found in HTAB. The stub is identified by:
5247 - its type STUB_TYPE
5248 - its source branch (note that several can share the same stub) whose
5249 section and relocation (if any) are given by SECTION and IRELA
5250 respectively
5251 - its target symbol whose input section, hash, name, value and branch type
5252 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5253 respectively
5254
5255 If found, the value of the stub's target symbol is updated from SYM_VALUE
5256 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5257 TRUE and the stub entry is initialized.
5258
5259 Returns whether the stub could be successfully created or updated, or FALSE
5260 if an error occured. */
5261
5262 static bfd_boolean
5263 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5264 enum elf32_arm_stub_type stub_type, asection *section,
5265 Elf_Internal_Rela *irela, asection *sym_sec,
5266 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5267 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5268 bfd_boolean *new_stub)
5269 {
5270 const asection *id_sec;
5271 char *stub_name;
5272 struct elf32_arm_stub_hash_entry *stub_entry;
5273 unsigned int r_type;
5274 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5275
5276 BFD_ASSERT (stub_type != arm_stub_none);
5277 *new_stub = FALSE;
5278
5279 if (sym_claimed)
5280 stub_name = sym_name;
5281 else
5282 {
5283 BFD_ASSERT (irela);
5284 BFD_ASSERT (section);
5285
5286 /* Support for grouping stub sections. */
5287 id_sec = htab->stub_group[section->id].link_sec;
5288
5289 /* Get the name of this stub. */
5290 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5291 stub_type);
5292 if (!stub_name)
5293 return FALSE;
5294 }
5295
5296 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5297 FALSE);
5298 /* The proper stub has already been created, just update its value. */
5299 if (stub_entry != NULL)
5300 {
5301 if (!sym_claimed)
5302 free (stub_name);
5303 stub_entry->target_value = sym_value;
5304 return TRUE;
5305 }
5306
5307 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5308 if (stub_entry == NULL)
5309 {
5310 if (!sym_claimed)
5311 free (stub_name);
5312 return FALSE;
5313 }
5314
5315 stub_entry->target_value = sym_value;
5316 stub_entry->target_section = sym_sec;
5317 stub_entry->stub_type = stub_type;
5318 stub_entry->h = hash;
5319 stub_entry->branch_type = branch_type;
5320
5321 if (sym_claimed)
5322 stub_entry->output_name = sym_name;
5323 else
5324 {
5325 if (sym_name == NULL)
5326 sym_name = "unnamed";
5327 stub_entry->output_name = (char *)
5328 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5329 + strlen (sym_name));
5330 if (stub_entry->output_name == NULL)
5331 {
5332 free (stub_name);
5333 return FALSE;
5334 }
5335
5336 /* For historical reasons, use the existing names for ARM-to-Thumb and
5337 Thumb-to-ARM stubs. */
5338 r_type = ELF32_R_TYPE (irela->r_info);
5339 if ((r_type == (unsigned int) R_ARM_THM_CALL
5340 || r_type == (unsigned int) R_ARM_THM_JUMP24
5341 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5342 && branch_type == ST_BRANCH_TO_ARM)
5343 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5344 else if ((r_type == (unsigned int) R_ARM_CALL
5345 || r_type == (unsigned int) R_ARM_JUMP24)
5346 && branch_type == ST_BRANCH_TO_THUMB)
5347 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5348 else
5349 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5350 }
5351
5352 *new_stub = TRUE;
5353 return TRUE;
5354 }
5355
5356 /* Determine and set the size of the stub section for a final link.
5357
5358 The basic idea here is to examine all the relocations looking for
5359 PC-relative calls to a target that is unreachable with a "bl"
5360 instruction. */
5361
5362 bfd_boolean
5363 elf32_arm_size_stubs (bfd *output_bfd,
5364 bfd *stub_bfd,
5365 struct bfd_link_info *info,
5366 bfd_signed_vma group_size,
5367 asection * (*add_stub_section) (const char *, asection *,
5368 asection *,
5369 unsigned int),
5370 void (*layout_sections_again) (void))
5371 {
5372 bfd_size_type stub_group_size;
5373 bfd_boolean stubs_always_after_branch;
5374 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5375 struct a8_erratum_fix *a8_fixes = NULL;
5376 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5377 struct a8_erratum_reloc *a8_relocs = NULL;
5378 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5379
5380 if (htab == NULL)
5381 return FALSE;
5382
5383 if (htab->fix_cortex_a8)
5384 {
5385 a8_fixes = (struct a8_erratum_fix *)
5386 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5387 a8_relocs = (struct a8_erratum_reloc *)
5388 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5389 }
5390
5391 /* Propagate mach to stub bfd, because it may not have been
5392 finalized when we created stub_bfd. */
5393 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5394 bfd_get_mach (output_bfd));
5395
5396 /* Stash our params away. */
5397 htab->stub_bfd = stub_bfd;
5398 htab->add_stub_section = add_stub_section;
5399 htab->layout_sections_again = layout_sections_again;
5400 stubs_always_after_branch = group_size < 0;
5401
5402 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5403 as the first half of a 32-bit branch straddling two 4K pages. This is a
5404 crude way of enforcing that. */
5405 if (htab->fix_cortex_a8)
5406 stubs_always_after_branch = 1;
5407
5408 if (group_size < 0)
5409 stub_group_size = -group_size;
5410 else
5411 stub_group_size = group_size;
5412
5413 if (stub_group_size == 1)
5414 {
5415 /* Default values. */
5416 /* Thumb branch range is +-4MB has to be used as the default
5417 maximum size (a given section can contain both ARM and Thumb
5418 code, so the worst case has to be taken into account).
5419
5420 This value is 24K less than that, which allows for 2025
5421 12-byte stubs. If we exceed that, then we will fail to link.
5422 The user will have to relink with an explicit group size
5423 option. */
5424 stub_group_size = 4170000;
5425 }
5426
5427 group_sections (htab, stub_group_size, stubs_always_after_branch);
5428
5429 /* If we're applying the cortex A8 fix, we need to determine the
5430 program header size now, because we cannot change it later --
5431 that could alter section placements. Notice the A8 erratum fix
5432 ends up requiring the section addresses to remain unchanged
5433 modulo the page size. That's something we cannot represent
5434 inside BFD, and we don't want to force the section alignment to
5435 be the page size. */
5436 if (htab->fix_cortex_a8)
5437 (*htab->layout_sections_again) ();
5438
5439 while (1)
5440 {
5441 bfd *input_bfd;
5442 unsigned int bfd_indx;
5443 asection *stub_sec;
5444 enum elf32_arm_stub_type stub_type;
5445 bfd_boolean stub_changed = FALSE;
5446 unsigned prev_num_a8_fixes = num_a8_fixes;
5447
5448 num_a8_fixes = 0;
5449 for (input_bfd = info->input_bfds, bfd_indx = 0;
5450 input_bfd != NULL;
5451 input_bfd = input_bfd->link.next, bfd_indx++)
5452 {
5453 Elf_Internal_Shdr *symtab_hdr;
5454 asection *section;
5455 Elf_Internal_Sym *local_syms = NULL;
5456
5457 if (!is_arm_elf (input_bfd))
5458 continue;
5459
5460 num_a8_relocs = 0;
5461
5462 /* We'll need the symbol table in a second. */
5463 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5464 if (symtab_hdr->sh_info == 0)
5465 continue;
5466
5467 /* Walk over each section attached to the input bfd. */
5468 for (section = input_bfd->sections;
5469 section != NULL;
5470 section = section->next)
5471 {
5472 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5473
5474 /* If there aren't any relocs, then there's nothing more
5475 to do. */
5476 if ((section->flags & SEC_RELOC) == 0
5477 || section->reloc_count == 0
5478 || (section->flags & SEC_CODE) == 0)
5479 continue;
5480
5481 /* If this section is a link-once section that will be
5482 discarded, then don't create any stubs. */
5483 if (section->output_section == NULL
5484 || section->output_section->owner != output_bfd)
5485 continue;
5486
5487 /* Get the relocs. */
5488 internal_relocs
5489 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5490 NULL, info->keep_memory);
5491 if (internal_relocs == NULL)
5492 goto error_ret_free_local;
5493
5494 /* Now examine each relocation. */
5495 irela = internal_relocs;
5496 irelaend = irela + section->reloc_count;
5497 for (; irela < irelaend; irela++)
5498 {
5499 unsigned int r_type, r_indx;
5500 asection *sym_sec;
5501 bfd_vma sym_value;
5502 bfd_vma destination;
5503 struct elf32_arm_link_hash_entry *hash;
5504 const char *sym_name;
5505 unsigned char st_type;
5506 enum arm_st_branch_type branch_type;
5507 bfd_boolean created_stub = FALSE;
5508
5509 r_type = ELF32_R_TYPE (irela->r_info);
5510 r_indx = ELF32_R_SYM (irela->r_info);
5511
5512 if (r_type >= (unsigned int) R_ARM_max)
5513 {
5514 bfd_set_error (bfd_error_bad_value);
5515 error_ret_free_internal:
5516 if (elf_section_data (section)->relocs == NULL)
5517 free (internal_relocs);
5518 /* Fall through. */
5519 error_ret_free_local:
5520 if (local_syms != NULL
5521 && (symtab_hdr->contents
5522 != (unsigned char *) local_syms))
5523 free (local_syms);
5524 return FALSE;
5525 }
5526
5527 hash = NULL;
5528 if (r_indx >= symtab_hdr->sh_info)
5529 hash = elf32_arm_hash_entry
5530 (elf_sym_hashes (input_bfd)
5531 [r_indx - symtab_hdr->sh_info]);
5532
5533 /* Only look for stubs on branch instructions, or
5534 non-relaxed TLSCALL */
5535 if ((r_type != (unsigned int) R_ARM_CALL)
5536 && (r_type != (unsigned int) R_ARM_THM_CALL)
5537 && (r_type != (unsigned int) R_ARM_JUMP24)
5538 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5539 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5540 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5541 && (r_type != (unsigned int) R_ARM_PLT32)
5542 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5543 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5544 && r_type == elf32_arm_tls_transition
5545 (info, r_type, &hash->root)
5546 && ((hash ? hash->tls_type
5547 : (elf32_arm_local_got_tls_type
5548 (input_bfd)[r_indx]))
5549 & GOT_TLS_GDESC) != 0))
5550 continue;
5551
5552 /* Now determine the call target, its name, value,
5553 section. */
5554 sym_sec = NULL;
5555 sym_value = 0;
5556 destination = 0;
5557 sym_name = NULL;
5558
5559 if (r_type == (unsigned int) R_ARM_TLS_CALL
5560 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5561 {
5562 /* A non-relaxed TLS call. The target is the
5563 plt-resident trampoline and nothing to do
5564 with the symbol. */
5565 BFD_ASSERT (htab->tls_trampoline > 0);
5566 sym_sec = htab->root.splt;
5567 sym_value = htab->tls_trampoline;
5568 hash = 0;
5569 st_type = STT_FUNC;
5570 branch_type = ST_BRANCH_TO_ARM;
5571 }
5572 else if (!hash)
5573 {
5574 /* It's a local symbol. */
5575 Elf_Internal_Sym *sym;
5576
5577 if (local_syms == NULL)
5578 {
5579 local_syms
5580 = (Elf_Internal_Sym *) symtab_hdr->contents;
5581 if (local_syms == NULL)
5582 local_syms
5583 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5584 symtab_hdr->sh_info, 0,
5585 NULL, NULL, NULL);
5586 if (local_syms == NULL)
5587 goto error_ret_free_internal;
5588 }
5589
5590 sym = local_syms + r_indx;
5591 if (sym->st_shndx == SHN_UNDEF)
5592 sym_sec = bfd_und_section_ptr;
5593 else if (sym->st_shndx == SHN_ABS)
5594 sym_sec = bfd_abs_section_ptr;
5595 else if (sym->st_shndx == SHN_COMMON)
5596 sym_sec = bfd_com_section_ptr;
5597 else
5598 sym_sec =
5599 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5600
5601 if (!sym_sec)
5602 /* This is an undefined symbol. It can never
5603 be resolved. */
5604 continue;
5605
5606 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5607 sym_value = sym->st_value;
5608 destination = (sym_value + irela->r_addend
5609 + sym_sec->output_offset
5610 + sym_sec->output_section->vma);
5611 st_type = ELF_ST_TYPE (sym->st_info);
5612 branch_type =
5613 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5614 sym_name
5615 = bfd_elf_string_from_elf_section (input_bfd,
5616 symtab_hdr->sh_link,
5617 sym->st_name);
5618 }
5619 else
5620 {
5621 /* It's an external symbol. */
5622 while (hash->root.root.type == bfd_link_hash_indirect
5623 || hash->root.root.type == bfd_link_hash_warning)
5624 hash = ((struct elf32_arm_link_hash_entry *)
5625 hash->root.root.u.i.link);
5626
5627 if (hash->root.root.type == bfd_link_hash_defined
5628 || hash->root.root.type == bfd_link_hash_defweak)
5629 {
5630 sym_sec = hash->root.root.u.def.section;
5631 sym_value = hash->root.root.u.def.value;
5632
5633 struct elf32_arm_link_hash_table *globals =
5634 elf32_arm_hash_table (info);
5635
5636 /* For a destination in a shared library,
5637 use the PLT stub as target address to
5638 decide whether a branch stub is
5639 needed. */
5640 if (globals != NULL
5641 && globals->root.splt != NULL
5642 && hash != NULL
5643 && hash->root.plt.offset != (bfd_vma) -1)
5644 {
5645 sym_sec = globals->root.splt;
5646 sym_value = hash->root.plt.offset;
5647 if (sym_sec->output_section != NULL)
5648 destination = (sym_value
5649 + sym_sec->output_offset
5650 + sym_sec->output_section->vma);
5651 }
5652 else if (sym_sec->output_section != NULL)
5653 destination = (sym_value + irela->r_addend
5654 + sym_sec->output_offset
5655 + sym_sec->output_section->vma);
5656 }
5657 else if ((hash->root.root.type == bfd_link_hash_undefined)
5658 || (hash->root.root.type == bfd_link_hash_undefweak))
5659 {
5660 /* For a shared library, use the PLT stub as
5661 target address to decide whether a long
5662 branch stub is needed.
5663 For absolute code, they cannot be handled. */
5664 struct elf32_arm_link_hash_table *globals =
5665 elf32_arm_hash_table (info);
5666
5667 if (globals != NULL
5668 && globals->root.splt != NULL
5669 && hash != NULL
5670 && hash->root.plt.offset != (bfd_vma) -1)
5671 {
5672 sym_sec = globals->root.splt;
5673 sym_value = hash->root.plt.offset;
5674 if (sym_sec->output_section != NULL)
5675 destination = (sym_value
5676 + sym_sec->output_offset
5677 + sym_sec->output_section->vma);
5678 }
5679 else
5680 continue;
5681 }
5682 else
5683 {
5684 bfd_set_error (bfd_error_bad_value);
5685 goto error_ret_free_internal;
5686 }
5687 st_type = hash->root.type;
5688 branch_type =
5689 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5690 sym_name = hash->root.root.root.string;
5691 }
5692
5693 do
5694 {
5695 bfd_boolean new_stub;
5696
5697 /* Determine what (if any) linker stub is needed. */
5698 stub_type = arm_type_of_stub (info, section, irela,
5699 st_type, &branch_type,
5700 hash, destination, sym_sec,
5701 input_bfd, sym_name);
5702 if (stub_type == arm_stub_none)
5703 break;
5704
5705 /* We've either created a stub for this reloc already,
5706 or we are about to. */
5707 created_stub =
5708 elf32_arm_create_stub (htab, stub_type, section, irela,
5709 sym_sec, hash,
5710 (char *) sym_name, sym_value,
5711 branch_type, &new_stub);
5712
5713 if (!created_stub)
5714 goto error_ret_free_internal;
5715 else if (!new_stub)
5716 break;
5717 else
5718 stub_changed = TRUE;
5719 }
5720 while (0);
5721
5722 /* Look for relocations which might trigger Cortex-A8
5723 erratum. */
5724 if (htab->fix_cortex_a8
5725 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5726 || r_type == (unsigned int) R_ARM_THM_JUMP19
5727 || r_type == (unsigned int) R_ARM_THM_CALL
5728 || r_type == (unsigned int) R_ARM_THM_XPC22))
5729 {
5730 bfd_vma from = section->output_section->vma
5731 + section->output_offset
5732 + irela->r_offset;
5733
5734 if ((from & 0xfff) == 0xffe)
5735 {
5736 /* Found a candidate. Note we haven't checked the
5737 destination is within 4K here: if we do so (and
5738 don't create an entry in a8_relocs) we can't tell
5739 that a branch should have been relocated when
5740 scanning later. */
5741 if (num_a8_relocs == a8_reloc_table_size)
5742 {
5743 a8_reloc_table_size *= 2;
5744 a8_relocs = (struct a8_erratum_reloc *)
5745 bfd_realloc (a8_relocs,
5746 sizeof (struct a8_erratum_reloc)
5747 * a8_reloc_table_size);
5748 }
5749
5750 a8_relocs[num_a8_relocs].from = from;
5751 a8_relocs[num_a8_relocs].destination = destination;
5752 a8_relocs[num_a8_relocs].r_type = r_type;
5753 a8_relocs[num_a8_relocs].branch_type = branch_type;
5754 a8_relocs[num_a8_relocs].sym_name = sym_name;
5755 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5756 a8_relocs[num_a8_relocs].hash = hash;
5757
5758 num_a8_relocs++;
5759 }
5760 }
5761 }
5762
5763 /* We're done with the internal relocs, free them. */
5764 if (elf_section_data (section)->relocs == NULL)
5765 free (internal_relocs);
5766 }
5767
5768 if (htab->fix_cortex_a8)
5769 {
5770 /* Sort relocs which might apply to Cortex-A8 erratum. */
5771 qsort (a8_relocs, num_a8_relocs,
5772 sizeof (struct a8_erratum_reloc),
5773 &a8_reloc_compare);
5774
5775 /* Scan for branches which might trigger Cortex-A8 erratum. */
5776 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5777 &num_a8_fixes, &a8_fix_table_size,
5778 a8_relocs, num_a8_relocs,
5779 prev_num_a8_fixes, &stub_changed)
5780 != 0)
5781 goto error_ret_free_local;
5782 }
5783
5784 if (local_syms != NULL
5785 && symtab_hdr->contents != (unsigned char *) local_syms)
5786 {
5787 if (!info->keep_memory)
5788 free (local_syms);
5789 else
5790 symtab_hdr->contents = (unsigned char *) local_syms;
5791 }
5792 }
5793
5794 if (prev_num_a8_fixes != num_a8_fixes)
5795 stub_changed = TRUE;
5796
5797 if (!stub_changed)
5798 break;
5799
5800 /* OK, we've added some stubs. Find out the new size of the
5801 stub sections. */
5802 for (stub_sec = htab->stub_bfd->sections;
5803 stub_sec != NULL;
5804 stub_sec = stub_sec->next)
5805 {
5806 /* Ignore non-stub sections. */
5807 if (!strstr (stub_sec->name, STUB_SUFFIX))
5808 continue;
5809
5810 stub_sec->size = 0;
5811 }
5812
5813 /* Compute stub section size, considering padding. */
5814 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5815 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
5816 stub_type++)
5817 {
5818 int size, padding;
5819 asection **stub_sec_p;
5820
5821 padding = arm_dedicated_stub_section_padding (stub_type);
5822 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
5823 /* Skip if no stub input section or no stub section padding
5824 required. */
5825 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
5826 continue;
5827 /* Stub section padding required but no dedicated section. */
5828 BFD_ASSERT (stub_sec_p);
5829
5830 size = (*stub_sec_p)->size;
5831 size = (size + padding - 1) & ~(padding - 1);
5832 (*stub_sec_p)->size = size;
5833 }
5834
5835 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5836 if (htab->fix_cortex_a8)
5837 for (i = 0; i < num_a8_fixes; i++)
5838 {
5839 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5840 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
5841
5842 if (stub_sec == NULL)
5843 return FALSE;
5844
5845 stub_sec->size
5846 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5847 NULL);
5848 }
5849
5850
5851 /* Ask the linker to do its stuff. */
5852 (*htab->layout_sections_again) ();
5853 }
5854
5855 /* Add stubs for Cortex-A8 erratum fixes now. */
5856 if (htab->fix_cortex_a8)
5857 {
5858 for (i = 0; i < num_a8_fixes; i++)
5859 {
5860 struct elf32_arm_stub_hash_entry *stub_entry;
5861 char *stub_name = a8_fixes[i].stub_name;
5862 asection *section = a8_fixes[i].section;
5863 unsigned int section_id = a8_fixes[i].section->id;
5864 asection *link_sec = htab->stub_group[section_id].link_sec;
5865 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5866 const insn_sequence *template_sequence;
5867 int template_size, size = 0;
5868
5869 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5870 TRUE, FALSE);
5871 if (stub_entry == NULL)
5872 {
5873 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5874 section->owner,
5875 stub_name);
5876 return FALSE;
5877 }
5878
5879 stub_entry->stub_sec = stub_sec;
5880 stub_entry->stub_offset = 0;
5881 stub_entry->id_sec = link_sec;
5882 stub_entry->stub_type = a8_fixes[i].stub_type;
5883 stub_entry->source_value = a8_fixes[i].offset;
5884 stub_entry->target_section = a8_fixes[i].section;
5885 stub_entry->target_value = a8_fixes[i].target_offset;
5886 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5887 stub_entry->branch_type = a8_fixes[i].branch_type;
5888
5889 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5890 &template_sequence,
5891 &template_size);
5892
5893 stub_entry->stub_size = size;
5894 stub_entry->stub_template = template_sequence;
5895 stub_entry->stub_template_size = template_size;
5896 }
5897
5898 /* Stash the Cortex-A8 erratum fix array for use later in
5899 elf32_arm_write_section(). */
5900 htab->a8_erratum_fixes = a8_fixes;
5901 htab->num_a8_erratum_fixes = num_a8_fixes;
5902 }
5903 else
5904 {
5905 htab->a8_erratum_fixes = NULL;
5906 htab->num_a8_erratum_fixes = 0;
5907 }
5908 return TRUE;
5909 }
5910
5911 /* Build all the stubs associated with the current output file. The
5912 stubs are kept in a hash table attached to the main linker hash
5913 table. We also set up the .plt entries for statically linked PIC
5914 functions here. This function is called via arm_elf_finish in the
5915 linker. */
5916
5917 bfd_boolean
5918 elf32_arm_build_stubs (struct bfd_link_info *info)
5919 {
5920 asection *stub_sec;
5921 struct bfd_hash_table *table;
5922 struct elf32_arm_link_hash_table *htab;
5923
5924 htab = elf32_arm_hash_table (info);
5925 if (htab == NULL)
5926 return FALSE;
5927
5928 for (stub_sec = htab->stub_bfd->sections;
5929 stub_sec != NULL;
5930 stub_sec = stub_sec->next)
5931 {
5932 bfd_size_type size;
5933
5934 /* Ignore non-stub sections. */
5935 if (!strstr (stub_sec->name, STUB_SUFFIX))
5936 continue;
5937
5938 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
5939 must at least be done for stub section requiring padding. */
5940 size = stub_sec->size;
5941 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5942 if (stub_sec->contents == NULL && size != 0)
5943 return FALSE;
5944 stub_sec->size = 0;
5945 }
5946
5947 /* Build the stubs as directed by the stub hash table. */
5948 table = &htab->stub_hash_table;
5949 bfd_hash_traverse (table, arm_build_one_stub, info);
5950 if (htab->fix_cortex_a8)
5951 {
5952 /* Place the cortex a8 stubs last. */
5953 htab->fix_cortex_a8 = -1;
5954 bfd_hash_traverse (table, arm_build_one_stub, info);
5955 }
5956
5957 return TRUE;
5958 }
5959
5960 /* Locate the Thumb encoded calling stub for NAME. */
5961
5962 static struct elf_link_hash_entry *
5963 find_thumb_glue (struct bfd_link_info *link_info,
5964 const char *name,
5965 char **error_message)
5966 {
5967 char *tmp_name;
5968 struct elf_link_hash_entry *hash;
5969 struct elf32_arm_link_hash_table *hash_table;
5970
5971 /* We need a pointer to the armelf specific hash table. */
5972 hash_table = elf32_arm_hash_table (link_info);
5973 if (hash_table == NULL)
5974 return NULL;
5975
5976 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5977 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5978
5979 BFD_ASSERT (tmp_name);
5980
5981 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5982
5983 hash = elf_link_hash_lookup
5984 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5985
5986 if (hash == NULL
5987 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5988 tmp_name, name) == -1)
5989 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5990
5991 free (tmp_name);
5992
5993 return hash;
5994 }
5995
5996 /* Locate the ARM encoded calling stub for NAME. */
5997
5998 static struct elf_link_hash_entry *
5999 find_arm_glue (struct bfd_link_info *link_info,
6000 const char *name,
6001 char **error_message)
6002 {
6003 char *tmp_name;
6004 struct elf_link_hash_entry *myh;
6005 struct elf32_arm_link_hash_table *hash_table;
6006
6007 /* We need a pointer to the elfarm specific hash table. */
6008 hash_table = elf32_arm_hash_table (link_info);
6009 if (hash_table == NULL)
6010 return NULL;
6011
6012 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6013 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6014
6015 BFD_ASSERT (tmp_name);
6016
6017 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6018
6019 myh = elf_link_hash_lookup
6020 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6021
6022 if (myh == NULL
6023 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
6024 tmp_name, name) == -1)
6025 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6026
6027 free (tmp_name);
6028
6029 return myh;
6030 }
6031
6032 /* ARM->Thumb glue (static images):
6033
6034 .arm
6035 __func_from_arm:
6036 ldr r12, __func_addr
6037 bx r12
6038 __func_addr:
6039 .word func @ behave as if you saw a ARM_32 reloc.
6040
6041 (v5t static images)
6042 .arm
6043 __func_from_arm:
6044 ldr pc, __func_addr
6045 __func_addr:
6046 .word func @ behave as if you saw a ARM_32 reloc.
6047
6048 (relocatable images)
6049 .arm
6050 __func_from_arm:
6051 ldr r12, __func_offset
6052 add r12, r12, pc
6053 bx r12
6054 __func_offset:
6055 .word func - . */
6056
6057 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6058 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6059 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6060 static const insn32 a2t3_func_addr_insn = 0x00000001;
6061
6062 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6063 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6064 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6065
6066 #define ARM2THUMB_PIC_GLUE_SIZE 16
6067 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6068 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6069 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6070
6071 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6072
6073 .thumb .thumb
6074 .align 2 .align 2
6075 __func_from_thumb: __func_from_thumb:
6076 bx pc push {r6, lr}
6077 nop ldr r6, __func_addr
6078 .arm mov lr, pc
6079 b func bx r6
6080 .arm
6081 ;; back_to_thumb
6082 ldmia r13! {r6, lr}
6083 bx lr
6084 __func_addr:
6085 .word func */
6086
6087 #define THUMB2ARM_GLUE_SIZE 8
6088 static const insn16 t2a1_bx_pc_insn = 0x4778;
6089 static const insn16 t2a2_noop_insn = 0x46c0;
6090 static const insn32 t2a3_b_insn = 0xea000000;
6091
6092 #define VFP11_ERRATUM_VENEER_SIZE 8
6093 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6094 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6095
6096 #define ARM_BX_VENEER_SIZE 12
6097 static const insn32 armbx1_tst_insn = 0xe3100001;
6098 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6099 static const insn32 armbx3_bx_insn = 0xe12fff10;
6100
6101 #ifndef ELFARM_NABI_C_INCLUDED
6102 static void
6103 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6104 {
6105 asection * s;
6106 bfd_byte * contents;
6107
6108 if (size == 0)
6109 {
6110 /* Do not include empty glue sections in the output. */
6111 if (abfd != NULL)
6112 {
6113 s = bfd_get_linker_section (abfd, name);
6114 if (s != NULL)
6115 s->flags |= SEC_EXCLUDE;
6116 }
6117 return;
6118 }
6119
6120 BFD_ASSERT (abfd != NULL);
6121
6122 s = bfd_get_linker_section (abfd, name);
6123 BFD_ASSERT (s != NULL);
6124
6125 contents = (bfd_byte *) bfd_alloc (abfd, size);
6126
6127 BFD_ASSERT (s->size == size);
6128 s->contents = contents;
6129 }
6130
6131 bfd_boolean
6132 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6133 {
6134 struct elf32_arm_link_hash_table * globals;
6135
6136 globals = elf32_arm_hash_table (info);
6137 BFD_ASSERT (globals != NULL);
6138
6139 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6140 globals->arm_glue_size,
6141 ARM2THUMB_GLUE_SECTION_NAME);
6142
6143 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6144 globals->thumb_glue_size,
6145 THUMB2ARM_GLUE_SECTION_NAME);
6146
6147 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6148 globals->vfp11_erratum_glue_size,
6149 VFP11_ERRATUM_VENEER_SECTION_NAME);
6150
6151 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6152 globals->stm32l4xx_erratum_glue_size,
6153 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6154
6155 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6156 globals->bx_glue_size,
6157 ARM_BX_GLUE_SECTION_NAME);
6158
6159 return TRUE;
6160 }
6161
6162 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6163 returns the symbol identifying the stub. */
6164
6165 static struct elf_link_hash_entry *
6166 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6167 struct elf_link_hash_entry * h)
6168 {
6169 const char * name = h->root.root.string;
6170 asection * s;
6171 char * tmp_name;
6172 struct elf_link_hash_entry * myh;
6173 struct bfd_link_hash_entry * bh;
6174 struct elf32_arm_link_hash_table * globals;
6175 bfd_vma val;
6176 bfd_size_type size;
6177
6178 globals = elf32_arm_hash_table (link_info);
6179 BFD_ASSERT (globals != NULL);
6180 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6181
6182 s = bfd_get_linker_section
6183 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6184
6185 BFD_ASSERT (s != NULL);
6186
6187 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6188 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6189
6190 BFD_ASSERT (tmp_name);
6191
6192 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6193
6194 myh = elf_link_hash_lookup
6195 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6196
6197 if (myh != NULL)
6198 {
6199 /* We've already seen this guy. */
6200 free (tmp_name);
6201 return myh;
6202 }
6203
6204 /* The only trick here is using hash_table->arm_glue_size as the value.
6205 Even though the section isn't allocated yet, this is where we will be
6206 putting it. The +1 on the value marks that the stub has not been
6207 output yet - not that it is a Thumb function. */
6208 bh = NULL;
6209 val = globals->arm_glue_size + 1;
6210 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6211 tmp_name, BSF_GLOBAL, s, val,
6212 NULL, TRUE, FALSE, &bh);
6213
6214 myh = (struct elf_link_hash_entry *) bh;
6215 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6216 myh->forced_local = 1;
6217
6218 free (tmp_name);
6219
6220 if (bfd_link_pic (link_info)
6221 || globals->root.is_relocatable_executable
6222 || globals->pic_veneer)
6223 size = ARM2THUMB_PIC_GLUE_SIZE;
6224 else if (globals->use_blx)
6225 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6226 else
6227 size = ARM2THUMB_STATIC_GLUE_SIZE;
6228
6229 s->size += size;
6230 globals->arm_glue_size += size;
6231
6232 return myh;
6233 }
6234
6235 /* Allocate space for ARMv4 BX veneers. */
6236
6237 static void
6238 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6239 {
6240 asection * s;
6241 struct elf32_arm_link_hash_table *globals;
6242 char *tmp_name;
6243 struct elf_link_hash_entry *myh;
6244 struct bfd_link_hash_entry *bh;
6245 bfd_vma val;
6246
6247 /* BX PC does not need a veneer. */
6248 if (reg == 15)
6249 return;
6250
6251 globals = elf32_arm_hash_table (link_info);
6252 BFD_ASSERT (globals != NULL);
6253 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6254
6255 /* Check if this veneer has already been allocated. */
6256 if (globals->bx_glue_offset[reg])
6257 return;
6258
6259 s = bfd_get_linker_section
6260 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6261
6262 BFD_ASSERT (s != NULL);
6263
6264 /* Add symbol for veneer. */
6265 tmp_name = (char *)
6266 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6267
6268 BFD_ASSERT (tmp_name);
6269
6270 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6271
6272 myh = elf_link_hash_lookup
6273 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6274
6275 BFD_ASSERT (myh == NULL);
6276
6277 bh = NULL;
6278 val = globals->bx_glue_size;
6279 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6280 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6281 NULL, TRUE, FALSE, &bh);
6282
6283 myh = (struct elf_link_hash_entry *) bh;
6284 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6285 myh->forced_local = 1;
6286
6287 s->size += ARM_BX_VENEER_SIZE;
6288 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6289 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6290 }
6291
6292
6293 /* Add an entry to the code/data map for section SEC. */
6294
6295 static void
6296 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6297 {
6298 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6299 unsigned int newidx;
6300
6301 if (sec_data->map == NULL)
6302 {
6303 sec_data->map = (elf32_arm_section_map *)
6304 bfd_malloc (sizeof (elf32_arm_section_map));
6305 sec_data->mapcount = 0;
6306 sec_data->mapsize = 1;
6307 }
6308
6309 newidx = sec_data->mapcount++;
6310
6311 if (sec_data->mapcount > sec_data->mapsize)
6312 {
6313 sec_data->mapsize *= 2;
6314 sec_data->map = (elf32_arm_section_map *)
6315 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6316 * sizeof (elf32_arm_section_map));
6317 }
6318
6319 if (sec_data->map)
6320 {
6321 sec_data->map[newidx].vma = vma;
6322 sec_data->map[newidx].type = type;
6323 }
6324 }
6325
6326
6327 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6328 veneers are handled for now. */
6329
6330 static bfd_vma
6331 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6332 elf32_vfp11_erratum_list *branch,
6333 bfd *branch_bfd,
6334 asection *branch_sec,
6335 unsigned int offset)
6336 {
6337 asection *s;
6338 struct elf32_arm_link_hash_table *hash_table;
6339 char *tmp_name;
6340 struct elf_link_hash_entry *myh;
6341 struct bfd_link_hash_entry *bh;
6342 bfd_vma val;
6343 struct _arm_elf_section_data *sec_data;
6344 elf32_vfp11_erratum_list *newerr;
6345
6346 hash_table = elf32_arm_hash_table (link_info);
6347 BFD_ASSERT (hash_table != NULL);
6348 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6349
6350 s = bfd_get_linker_section
6351 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6352
6353 sec_data = elf32_arm_section_data (s);
6354
6355 BFD_ASSERT (s != NULL);
6356
6357 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6358 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6359
6360 BFD_ASSERT (tmp_name);
6361
6362 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6363 hash_table->num_vfp11_fixes);
6364
6365 myh = elf_link_hash_lookup
6366 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6367
6368 BFD_ASSERT (myh == NULL);
6369
6370 bh = NULL;
6371 val = hash_table->vfp11_erratum_glue_size;
6372 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6373 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6374 NULL, TRUE, FALSE, &bh);
6375
6376 myh = (struct elf_link_hash_entry *) bh;
6377 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6378 myh->forced_local = 1;
6379
6380 /* Link veneer back to calling location. */
6381 sec_data->erratumcount += 1;
6382 newerr = (elf32_vfp11_erratum_list *)
6383 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6384
6385 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6386 newerr->vma = -1;
6387 newerr->u.v.branch = branch;
6388 newerr->u.v.id = hash_table->num_vfp11_fixes;
6389 branch->u.b.veneer = newerr;
6390
6391 newerr->next = sec_data->erratumlist;
6392 sec_data->erratumlist = newerr;
6393
6394 /* A symbol for the return from the veneer. */
6395 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6396 hash_table->num_vfp11_fixes);
6397
6398 myh = elf_link_hash_lookup
6399 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6400
6401 if (myh != NULL)
6402 abort ();
6403
6404 bh = NULL;
6405 val = offset + 4;
6406 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6407 branch_sec, val, NULL, TRUE, FALSE, &bh);
6408
6409 myh = (struct elf_link_hash_entry *) bh;
6410 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6411 myh->forced_local = 1;
6412
6413 free (tmp_name);
6414
6415 /* Generate a mapping symbol for the veneer section, and explicitly add an
6416 entry for that symbol to the code/data map for the section. */
6417 if (hash_table->vfp11_erratum_glue_size == 0)
6418 {
6419 bh = NULL;
6420 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6421 ever requires this erratum fix. */
6422 _bfd_generic_link_add_one_symbol (link_info,
6423 hash_table->bfd_of_glue_owner, "$a",
6424 BSF_LOCAL, s, 0, NULL,
6425 TRUE, FALSE, &bh);
6426
6427 myh = (struct elf_link_hash_entry *) bh;
6428 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6429 myh->forced_local = 1;
6430
6431 /* The elf32_arm_init_maps function only cares about symbols from input
6432 BFDs. We must make a note of this generated mapping symbol
6433 ourselves so that code byteswapping works properly in
6434 elf32_arm_write_section. */
6435 elf32_arm_section_map_add (s, 'a', 0);
6436 }
6437
6438 s->size += VFP11_ERRATUM_VENEER_SIZE;
6439 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6440 hash_table->num_vfp11_fixes++;
6441
6442 /* The offset of the veneer. */
6443 return val;
6444 }
6445
6446 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6447 veneers need to be handled because used only in Cortex-M. */
6448
6449 static bfd_vma
6450 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6451 elf32_stm32l4xx_erratum_list *branch,
6452 bfd *branch_bfd,
6453 asection *branch_sec,
6454 unsigned int offset,
6455 bfd_size_type veneer_size)
6456 {
6457 asection *s;
6458 struct elf32_arm_link_hash_table *hash_table;
6459 char *tmp_name;
6460 struct elf_link_hash_entry *myh;
6461 struct bfd_link_hash_entry *bh;
6462 bfd_vma val;
6463 struct _arm_elf_section_data *sec_data;
6464 elf32_stm32l4xx_erratum_list *newerr;
6465
6466 hash_table = elf32_arm_hash_table (link_info);
6467 BFD_ASSERT (hash_table != NULL);
6468 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6469
6470 s = bfd_get_linker_section
6471 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6472
6473 BFD_ASSERT (s != NULL);
6474
6475 sec_data = elf32_arm_section_data (s);
6476
6477 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6478 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6479
6480 BFD_ASSERT (tmp_name);
6481
6482 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6483 hash_table->num_stm32l4xx_fixes);
6484
6485 myh = elf_link_hash_lookup
6486 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6487
6488 BFD_ASSERT (myh == NULL);
6489
6490 bh = NULL;
6491 val = hash_table->stm32l4xx_erratum_glue_size;
6492 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6493 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6494 NULL, TRUE, FALSE, &bh);
6495
6496 myh = (struct elf_link_hash_entry *) bh;
6497 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6498 myh->forced_local = 1;
6499
6500 /* Link veneer back to calling location. */
6501 sec_data->stm32l4xx_erratumcount += 1;
6502 newerr = (elf32_stm32l4xx_erratum_list *)
6503 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6504
6505 newerr->type = STM32L4XX_ERRATUM_VENEER;
6506 newerr->vma = -1;
6507 newerr->u.v.branch = branch;
6508 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6509 branch->u.b.veneer = newerr;
6510
6511 newerr->next = sec_data->stm32l4xx_erratumlist;
6512 sec_data->stm32l4xx_erratumlist = newerr;
6513
6514 /* A symbol for the return from the veneer. */
6515 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6516 hash_table->num_stm32l4xx_fixes);
6517
6518 myh = elf_link_hash_lookup
6519 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6520
6521 if (myh != NULL)
6522 abort ();
6523
6524 bh = NULL;
6525 val = offset + 4;
6526 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6527 branch_sec, val, NULL, TRUE, FALSE, &bh);
6528
6529 myh = (struct elf_link_hash_entry *) bh;
6530 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6531 myh->forced_local = 1;
6532
6533 free (tmp_name);
6534
6535 /* Generate a mapping symbol for the veneer section, and explicitly add an
6536 entry for that symbol to the code/data map for the section. */
6537 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6538 {
6539 bh = NULL;
6540 /* Creates a THUMB symbol since there is no other choice. */
6541 _bfd_generic_link_add_one_symbol (link_info,
6542 hash_table->bfd_of_glue_owner, "$t",
6543 BSF_LOCAL, s, 0, NULL,
6544 TRUE, FALSE, &bh);
6545
6546 myh = (struct elf_link_hash_entry *) bh;
6547 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6548 myh->forced_local = 1;
6549
6550 /* The elf32_arm_init_maps function only cares about symbols from input
6551 BFDs. We must make a note of this generated mapping symbol
6552 ourselves so that code byteswapping works properly in
6553 elf32_arm_write_section. */
6554 elf32_arm_section_map_add (s, 't', 0);
6555 }
6556
6557 s->size += veneer_size;
6558 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6559 hash_table->num_stm32l4xx_fixes++;
6560
6561 /* The offset of the veneer. */
6562 return val;
6563 }
6564
6565 #define ARM_GLUE_SECTION_FLAGS \
6566 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6567 | SEC_READONLY | SEC_LINKER_CREATED)
6568
6569 /* Create a fake section for use by the ARM backend of the linker. */
6570
6571 static bfd_boolean
6572 arm_make_glue_section (bfd * abfd, const char * name)
6573 {
6574 asection * sec;
6575
6576 sec = bfd_get_linker_section (abfd, name);
6577 if (sec != NULL)
6578 /* Already made. */
6579 return TRUE;
6580
6581 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6582
6583 if (sec == NULL
6584 || !bfd_set_section_alignment (abfd, sec, 2))
6585 return FALSE;
6586
6587 /* Set the gc mark to prevent the section from being removed by garbage
6588 collection, despite the fact that no relocs refer to this section. */
6589 sec->gc_mark = 1;
6590
6591 return TRUE;
6592 }
6593
6594 /* Set size of .plt entries. This function is called from the
6595 linker scripts in ld/emultempl/{armelf}.em. */
6596
6597 void
6598 bfd_elf32_arm_use_long_plt (void)
6599 {
6600 elf32_arm_use_long_plt_entry = TRUE;
6601 }
6602
6603 /* Add the glue sections to ABFD. This function is called from the
6604 linker scripts in ld/emultempl/{armelf}.em. */
6605
6606 bfd_boolean
6607 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6608 struct bfd_link_info *info)
6609 {
6610 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6611 bfd_boolean dostm32l4xx = globals
6612 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6613 bfd_boolean addglue;
6614
6615 /* If we are only performing a partial
6616 link do not bother adding the glue. */
6617 if (bfd_link_relocatable (info))
6618 return TRUE;
6619
6620 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6621 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6622 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6623 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6624
6625 if (!dostm32l4xx)
6626 return addglue;
6627
6628 return addglue
6629 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6630 }
6631
6632 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
6633 ensures they are not marked for deletion by
6634 strip_excluded_output_sections () when veneers are going to be created
6635 later. Not doing so would trigger assert on empty section size in
6636 lang_size_sections_1 (). */
6637
6638 void
6639 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
6640 {
6641 enum elf32_arm_stub_type stub_type;
6642
6643 /* If we are only performing a partial
6644 link do not bother adding the glue. */
6645 if (bfd_link_relocatable (info))
6646 return;
6647
6648 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6649 {
6650 asection *out_sec;
6651 const char *out_sec_name;
6652
6653 if (!arm_dedicated_stub_output_section_required (stub_type))
6654 continue;
6655
6656 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
6657 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
6658 if (out_sec != NULL)
6659 out_sec->flags |= SEC_KEEP;
6660 }
6661 }
6662
6663 /* Select a BFD to be used to hold the sections used by the glue code.
6664 This function is called from the linker scripts in ld/emultempl/
6665 {armelf/pe}.em. */
6666
6667 bfd_boolean
6668 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6669 {
6670 struct elf32_arm_link_hash_table *globals;
6671
6672 /* If we are only performing a partial link
6673 do not bother getting a bfd to hold the glue. */
6674 if (bfd_link_relocatable (info))
6675 return TRUE;
6676
6677 /* Make sure we don't attach the glue sections to a dynamic object. */
6678 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6679
6680 globals = elf32_arm_hash_table (info);
6681 BFD_ASSERT (globals != NULL);
6682
6683 if (globals->bfd_of_glue_owner != NULL)
6684 return TRUE;
6685
6686 /* Save the bfd for later use. */
6687 globals->bfd_of_glue_owner = abfd;
6688
6689 return TRUE;
6690 }
6691
6692 static void
6693 check_use_blx (struct elf32_arm_link_hash_table *globals)
6694 {
6695 int cpu_arch;
6696
6697 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6698 Tag_CPU_arch);
6699
6700 if (globals->fix_arm1176)
6701 {
6702 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6703 globals->use_blx = 1;
6704 }
6705 else
6706 {
6707 if (cpu_arch > TAG_CPU_ARCH_V4T)
6708 globals->use_blx = 1;
6709 }
6710 }
6711
6712 bfd_boolean
6713 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6714 struct bfd_link_info *link_info)
6715 {
6716 Elf_Internal_Shdr *symtab_hdr;
6717 Elf_Internal_Rela *internal_relocs = NULL;
6718 Elf_Internal_Rela *irel, *irelend;
6719 bfd_byte *contents = NULL;
6720
6721 asection *sec;
6722 struct elf32_arm_link_hash_table *globals;
6723
6724 /* If we are only performing a partial link do not bother
6725 to construct any glue. */
6726 if (bfd_link_relocatable (link_info))
6727 return TRUE;
6728
6729 /* Here we have a bfd that is to be included on the link. We have a
6730 hook to do reloc rummaging, before section sizes are nailed down. */
6731 globals = elf32_arm_hash_table (link_info);
6732 BFD_ASSERT (globals != NULL);
6733
6734 check_use_blx (globals);
6735
6736 if (globals->byteswap_code && !bfd_big_endian (abfd))
6737 {
6738 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6739 abfd);
6740 return FALSE;
6741 }
6742
6743 /* PR 5398: If we have not decided to include any loadable sections in
6744 the output then we will not have a glue owner bfd. This is OK, it
6745 just means that there is nothing else for us to do here. */
6746 if (globals->bfd_of_glue_owner == NULL)
6747 return TRUE;
6748
6749 /* Rummage around all the relocs and map the glue vectors. */
6750 sec = abfd->sections;
6751
6752 if (sec == NULL)
6753 return TRUE;
6754
6755 for (; sec != NULL; sec = sec->next)
6756 {
6757 if (sec->reloc_count == 0)
6758 continue;
6759
6760 if ((sec->flags & SEC_EXCLUDE) != 0)
6761 continue;
6762
6763 symtab_hdr = & elf_symtab_hdr (abfd);
6764
6765 /* Load the relocs. */
6766 internal_relocs
6767 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6768
6769 if (internal_relocs == NULL)
6770 goto error_return;
6771
6772 irelend = internal_relocs + sec->reloc_count;
6773 for (irel = internal_relocs; irel < irelend; irel++)
6774 {
6775 long r_type;
6776 unsigned long r_index;
6777
6778 struct elf_link_hash_entry *h;
6779
6780 r_type = ELF32_R_TYPE (irel->r_info);
6781 r_index = ELF32_R_SYM (irel->r_info);
6782
6783 /* These are the only relocation types we care about. */
6784 if ( r_type != R_ARM_PC24
6785 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6786 continue;
6787
6788 /* Get the section contents if we haven't done so already. */
6789 if (contents == NULL)
6790 {
6791 /* Get cached copy if it exists. */
6792 if (elf_section_data (sec)->this_hdr.contents != NULL)
6793 contents = elf_section_data (sec)->this_hdr.contents;
6794 else
6795 {
6796 /* Go get them off disk. */
6797 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6798 goto error_return;
6799 }
6800 }
6801
6802 if (r_type == R_ARM_V4BX)
6803 {
6804 int reg;
6805
6806 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6807 record_arm_bx_glue (link_info, reg);
6808 continue;
6809 }
6810
6811 /* If the relocation is not against a symbol it cannot concern us. */
6812 h = NULL;
6813
6814 /* We don't care about local symbols. */
6815 if (r_index < symtab_hdr->sh_info)
6816 continue;
6817
6818 /* This is an external symbol. */
6819 r_index -= symtab_hdr->sh_info;
6820 h = (struct elf_link_hash_entry *)
6821 elf_sym_hashes (abfd)[r_index];
6822
6823 /* If the relocation is against a static symbol it must be within
6824 the current section and so cannot be a cross ARM/Thumb relocation. */
6825 if (h == NULL)
6826 continue;
6827
6828 /* If the call will go through a PLT entry then we do not need
6829 glue. */
6830 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6831 continue;
6832
6833 switch (r_type)
6834 {
6835 case R_ARM_PC24:
6836 /* This one is a call from arm code. We need to look up
6837 the target of the call. If it is a thumb target, we
6838 insert glue. */
6839 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6840 == ST_BRANCH_TO_THUMB)
6841 record_arm_to_thumb_glue (link_info, h);
6842 break;
6843
6844 default:
6845 abort ();
6846 }
6847 }
6848
6849 if (contents != NULL
6850 && elf_section_data (sec)->this_hdr.contents != contents)
6851 free (contents);
6852 contents = NULL;
6853
6854 if (internal_relocs != NULL
6855 && elf_section_data (sec)->relocs != internal_relocs)
6856 free (internal_relocs);
6857 internal_relocs = NULL;
6858 }
6859
6860 return TRUE;
6861
6862 error_return:
6863 if (contents != NULL
6864 && elf_section_data (sec)->this_hdr.contents != contents)
6865 free (contents);
6866 if (internal_relocs != NULL
6867 && elf_section_data (sec)->relocs != internal_relocs)
6868 free (internal_relocs);
6869
6870 return FALSE;
6871 }
6872 #endif
6873
6874
6875 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6876
6877 void
6878 bfd_elf32_arm_init_maps (bfd *abfd)
6879 {
6880 Elf_Internal_Sym *isymbuf;
6881 Elf_Internal_Shdr *hdr;
6882 unsigned int i, localsyms;
6883
6884 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6885 if (! is_arm_elf (abfd))
6886 return;
6887
6888 if ((abfd->flags & DYNAMIC) != 0)
6889 return;
6890
6891 hdr = & elf_symtab_hdr (abfd);
6892 localsyms = hdr->sh_info;
6893
6894 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6895 should contain the number of local symbols, which should come before any
6896 global symbols. Mapping symbols are always local. */
6897 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6898 NULL);
6899
6900 /* No internal symbols read? Skip this BFD. */
6901 if (isymbuf == NULL)
6902 return;
6903
6904 for (i = 0; i < localsyms; i++)
6905 {
6906 Elf_Internal_Sym *isym = &isymbuf[i];
6907 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6908 const char *name;
6909
6910 if (sec != NULL
6911 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6912 {
6913 name = bfd_elf_string_from_elf_section (abfd,
6914 hdr->sh_link, isym->st_name);
6915
6916 if (bfd_is_arm_special_symbol_name (name,
6917 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6918 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6919 }
6920 }
6921 }
6922
6923
6924 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6925 say what they wanted. */
6926
6927 void
6928 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6929 {
6930 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6931 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6932
6933 if (globals == NULL)
6934 return;
6935
6936 if (globals->fix_cortex_a8 == -1)
6937 {
6938 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6939 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6940 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6941 || out_attr[Tag_CPU_arch_profile].i == 0))
6942 globals->fix_cortex_a8 = 1;
6943 else
6944 globals->fix_cortex_a8 = 0;
6945 }
6946 }
6947
6948
6949 void
6950 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6951 {
6952 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6953 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6954
6955 if (globals == NULL)
6956 return;
6957 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6958 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6959 {
6960 switch (globals->vfp11_fix)
6961 {
6962 case BFD_ARM_VFP11_FIX_DEFAULT:
6963 case BFD_ARM_VFP11_FIX_NONE:
6964 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6965 break;
6966
6967 default:
6968 /* Give a warning, but do as the user requests anyway. */
6969 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6970 "workaround is not necessary for target architecture"), obfd);
6971 }
6972 }
6973 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6974 /* For earlier architectures, we might need the workaround, but do not
6975 enable it by default. If users is running with broken hardware, they
6976 must enable the erratum fix explicitly. */
6977 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6978 }
6979
6980 void
6981 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
6982 {
6983 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6984 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6985
6986 if (globals == NULL)
6987 return;
6988
6989 /* We assume only Cortex-M4 may require the fix. */
6990 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
6991 || out_attr[Tag_CPU_arch_profile].i != 'M')
6992 {
6993 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
6994 /* Give a warning, but do as the user requests anyway. */
6995 (*_bfd_error_handler)
6996 (_("%B: warning: selected STM32L4XX erratum "
6997 "workaround is not necessary for target architecture"), obfd);
6998 }
6999 }
7000
7001 enum bfd_arm_vfp11_pipe
7002 {
7003 VFP11_FMAC,
7004 VFP11_LS,
7005 VFP11_DS,
7006 VFP11_BAD
7007 };
7008
7009 /* Return a VFP register number. This is encoded as RX:X for single-precision
7010 registers, or X:RX for double-precision registers, where RX is the group of
7011 four bits in the instruction encoding and X is the single extension bit.
7012 RX and X fields are specified using their lowest (starting) bit. The return
7013 value is:
7014
7015 0...31: single-precision registers s0...s31
7016 32...63: double-precision registers d0...d31.
7017
7018 Although X should be zero for VFP11 (encoding d0...d15 only), we might
7019 encounter VFP3 instructions, so we allow the full range for DP registers. */
7020
7021 static unsigned int
7022 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
7023 unsigned int x)
7024 {
7025 if (is_double)
7026 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
7027 else
7028 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
7029 }
7030
7031 /* Set bits in *WMASK according to a register number REG as encoded by
7032 bfd_arm_vfp11_regno(). Ignore d16-d31. */
7033
7034 static void
7035 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
7036 {
7037 if (reg < 32)
7038 *wmask |= 1 << reg;
7039 else if (reg < 48)
7040 *wmask |= 3 << ((reg - 32) * 2);
7041 }
7042
7043 /* Return TRUE if WMASK overwrites anything in REGS. */
7044
7045 static bfd_boolean
7046 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
7047 {
7048 int i;
7049
7050 for (i = 0; i < numregs; i++)
7051 {
7052 unsigned int reg = regs[i];
7053
7054 if (reg < 32 && (wmask & (1 << reg)) != 0)
7055 return TRUE;
7056
7057 reg -= 32;
7058
7059 if (reg >= 16)
7060 continue;
7061
7062 if ((wmask & (3 << (reg * 2))) != 0)
7063 return TRUE;
7064 }
7065
7066 return FALSE;
7067 }
7068
7069 /* In this function, we're interested in two things: finding input registers
7070 for VFP data-processing instructions, and finding the set of registers which
7071 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7072 hold the written set, so FLDM etc. are easy to deal with (we're only
7073 interested in 32 SP registers or 16 dp registers, due to the VFP version
7074 implemented by the chip in question). DP registers are marked by setting
7075 both SP registers in the write mask). */
7076
7077 static enum bfd_arm_vfp11_pipe
7078 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7079 int *numregs)
7080 {
7081 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7082 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7083
7084 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7085 {
7086 unsigned int pqrs;
7087 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7088 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7089
7090 pqrs = ((insn & 0x00800000) >> 20)
7091 | ((insn & 0x00300000) >> 19)
7092 | ((insn & 0x00000040) >> 6);
7093
7094 switch (pqrs)
7095 {
7096 case 0: /* fmac[sd]. */
7097 case 1: /* fnmac[sd]. */
7098 case 2: /* fmsc[sd]. */
7099 case 3: /* fnmsc[sd]. */
7100 vpipe = VFP11_FMAC;
7101 bfd_arm_vfp11_write_mask (destmask, fd);
7102 regs[0] = fd;
7103 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7104 regs[2] = fm;
7105 *numregs = 3;
7106 break;
7107
7108 case 4: /* fmul[sd]. */
7109 case 5: /* fnmul[sd]. */
7110 case 6: /* fadd[sd]. */
7111 case 7: /* fsub[sd]. */
7112 vpipe = VFP11_FMAC;
7113 goto vfp_binop;
7114
7115 case 8: /* fdiv[sd]. */
7116 vpipe = VFP11_DS;
7117 vfp_binop:
7118 bfd_arm_vfp11_write_mask (destmask, fd);
7119 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7120 regs[1] = fm;
7121 *numregs = 2;
7122 break;
7123
7124 case 15: /* extended opcode. */
7125 {
7126 unsigned int extn = ((insn >> 15) & 0x1e)
7127 | ((insn >> 7) & 1);
7128
7129 switch (extn)
7130 {
7131 case 0: /* fcpy[sd]. */
7132 case 1: /* fabs[sd]. */
7133 case 2: /* fneg[sd]. */
7134 case 8: /* fcmp[sd]. */
7135 case 9: /* fcmpe[sd]. */
7136 case 10: /* fcmpz[sd]. */
7137 case 11: /* fcmpez[sd]. */
7138 case 16: /* fuito[sd]. */
7139 case 17: /* fsito[sd]. */
7140 case 24: /* ftoui[sd]. */
7141 case 25: /* ftouiz[sd]. */
7142 case 26: /* ftosi[sd]. */
7143 case 27: /* ftosiz[sd]. */
7144 /* These instructions will not bounce due to underflow. */
7145 *numregs = 0;
7146 vpipe = VFP11_FMAC;
7147 break;
7148
7149 case 3: /* fsqrt[sd]. */
7150 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7151 registers to cause the erratum in previous instructions. */
7152 bfd_arm_vfp11_write_mask (destmask, fd);
7153 vpipe = VFP11_DS;
7154 break;
7155
7156 case 15: /* fcvt{ds,sd}. */
7157 {
7158 int rnum = 0;
7159
7160 bfd_arm_vfp11_write_mask (destmask, fd);
7161
7162 /* Only FCVTSD can underflow. */
7163 if ((insn & 0x100) != 0)
7164 regs[rnum++] = fm;
7165
7166 *numregs = rnum;
7167
7168 vpipe = VFP11_FMAC;
7169 }
7170 break;
7171
7172 default:
7173 return VFP11_BAD;
7174 }
7175 }
7176 break;
7177
7178 default:
7179 return VFP11_BAD;
7180 }
7181 }
7182 /* Two-register transfer. */
7183 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7184 {
7185 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7186
7187 if ((insn & 0x100000) == 0)
7188 {
7189 if (is_double)
7190 bfd_arm_vfp11_write_mask (destmask, fm);
7191 else
7192 {
7193 bfd_arm_vfp11_write_mask (destmask, fm);
7194 bfd_arm_vfp11_write_mask (destmask, fm + 1);
7195 }
7196 }
7197
7198 vpipe = VFP11_LS;
7199 }
7200 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
7201 {
7202 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7203 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7204
7205 switch (puw)
7206 {
7207 case 0: /* Two-reg transfer. We should catch these above. */
7208 abort ();
7209
7210 case 2: /* fldm[sdx]. */
7211 case 3:
7212 case 5:
7213 {
7214 unsigned int i, offset = insn & 0xff;
7215
7216 if (is_double)
7217 offset >>= 1;
7218
7219 for (i = fd; i < fd + offset; i++)
7220 bfd_arm_vfp11_write_mask (destmask, i);
7221 }
7222 break;
7223
7224 case 4: /* fld[sd]. */
7225 case 6:
7226 bfd_arm_vfp11_write_mask (destmask, fd);
7227 break;
7228
7229 default:
7230 return VFP11_BAD;
7231 }
7232
7233 vpipe = VFP11_LS;
7234 }
7235 /* Single-register transfer. Note L==0. */
7236 else if ((insn & 0x0f100e10) == 0x0e000a10)
7237 {
7238 unsigned int opcode = (insn >> 21) & 7;
7239 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7240
7241 switch (opcode)
7242 {
7243 case 0: /* fmsr/fmdlr. */
7244 case 1: /* fmdhr. */
7245 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7246 destination register. I don't know if this is exactly right,
7247 but it is the conservative choice. */
7248 bfd_arm_vfp11_write_mask (destmask, fn);
7249 break;
7250
7251 case 7: /* fmxr. */
7252 break;
7253 }
7254
7255 vpipe = VFP11_LS;
7256 }
7257
7258 return vpipe;
7259 }
7260
7261
7262 static int elf32_arm_compare_mapping (const void * a, const void * b);
7263
7264
7265 /* Look for potentially-troublesome code sequences which might trigger the
7266 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7267 (available from ARM) for details of the erratum. A short version is
7268 described in ld.texinfo. */
7269
7270 bfd_boolean
7271 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7272 {
7273 asection *sec;
7274 bfd_byte *contents = NULL;
7275 int state = 0;
7276 int regs[3], numregs = 0;
7277 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7278 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7279
7280 if (globals == NULL)
7281 return FALSE;
7282
7283 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7284 The states transition as follows:
7285
7286 0 -> 1 (vector) or 0 -> 2 (scalar)
7287 A VFP FMAC-pipeline instruction has been seen. Fill
7288 regs[0]..regs[numregs-1] with its input operands. Remember this
7289 instruction in 'first_fmac'.
7290
7291 1 -> 2
7292 Any instruction, except for a VFP instruction which overwrites
7293 regs[*].
7294
7295 1 -> 3 [ -> 0 ] or
7296 2 -> 3 [ -> 0 ]
7297 A VFP instruction has been seen which overwrites any of regs[*].
7298 We must make a veneer! Reset state to 0 before examining next
7299 instruction.
7300
7301 2 -> 0
7302 If we fail to match anything in state 2, reset to state 0 and reset
7303 the instruction pointer to the instruction after 'first_fmac'.
7304
7305 If the VFP11 vector mode is in use, there must be at least two unrelated
7306 instructions between anti-dependent VFP11 instructions to properly avoid
7307 triggering the erratum, hence the use of the extra state 1. */
7308
7309 /* If we are only performing a partial link do not bother
7310 to construct any glue. */
7311 if (bfd_link_relocatable (link_info))
7312 return TRUE;
7313
7314 /* Skip if this bfd does not correspond to an ELF image. */
7315 if (! is_arm_elf (abfd))
7316 return TRUE;
7317
7318 /* We should have chosen a fix type by the time we get here. */
7319 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7320
7321 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7322 return TRUE;
7323
7324 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7325 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7326 return TRUE;
7327
7328 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7329 {
7330 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7331 struct _arm_elf_section_data *sec_data;
7332
7333 /* If we don't have executable progbits, we're not interested in this
7334 section. Also skip if section is to be excluded. */
7335 if (elf_section_type (sec) != SHT_PROGBITS
7336 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7337 || (sec->flags & SEC_EXCLUDE) != 0
7338 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7339 || sec->output_section == bfd_abs_section_ptr
7340 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7341 continue;
7342
7343 sec_data = elf32_arm_section_data (sec);
7344
7345 if (sec_data->mapcount == 0)
7346 continue;
7347
7348 if (elf_section_data (sec)->this_hdr.contents != NULL)
7349 contents = elf_section_data (sec)->this_hdr.contents;
7350 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7351 goto error_return;
7352
7353 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7354 elf32_arm_compare_mapping);
7355
7356 for (span = 0; span < sec_data->mapcount; span++)
7357 {
7358 unsigned int span_start = sec_data->map[span].vma;
7359 unsigned int span_end = (span == sec_data->mapcount - 1)
7360 ? sec->size : sec_data->map[span + 1].vma;
7361 char span_type = sec_data->map[span].type;
7362
7363 /* FIXME: Only ARM mode is supported at present. We may need to
7364 support Thumb-2 mode also at some point. */
7365 if (span_type != 'a')
7366 continue;
7367
7368 for (i = span_start; i < span_end;)
7369 {
7370 unsigned int next_i = i + 4;
7371 unsigned int insn = bfd_big_endian (abfd)
7372 ? (contents[i] << 24)
7373 | (contents[i + 1] << 16)
7374 | (contents[i + 2] << 8)
7375 | contents[i + 3]
7376 : (contents[i + 3] << 24)
7377 | (contents[i + 2] << 16)
7378 | (contents[i + 1] << 8)
7379 | contents[i];
7380 unsigned int writemask = 0;
7381 enum bfd_arm_vfp11_pipe vpipe;
7382
7383 switch (state)
7384 {
7385 case 0:
7386 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7387 &numregs);
7388 /* I'm assuming the VFP11 erratum can trigger with denorm
7389 operands on either the FMAC or the DS pipeline. This might
7390 lead to slightly overenthusiastic veneer insertion. */
7391 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7392 {
7393 state = use_vector ? 1 : 2;
7394 first_fmac = i;
7395 veneer_of_insn = insn;
7396 }
7397 break;
7398
7399 case 1:
7400 {
7401 int other_regs[3], other_numregs;
7402 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7403 other_regs,
7404 &other_numregs);
7405 if (vpipe != VFP11_BAD
7406 && bfd_arm_vfp11_antidependency (writemask, regs,
7407 numregs))
7408 state = 3;
7409 else
7410 state = 2;
7411 }
7412 break;
7413
7414 case 2:
7415 {
7416 int other_regs[3], other_numregs;
7417 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7418 other_regs,
7419 &other_numregs);
7420 if (vpipe != VFP11_BAD
7421 && bfd_arm_vfp11_antidependency (writemask, regs,
7422 numregs))
7423 state = 3;
7424 else
7425 {
7426 state = 0;
7427 next_i = first_fmac + 4;
7428 }
7429 }
7430 break;
7431
7432 case 3:
7433 abort (); /* Should be unreachable. */
7434 }
7435
7436 if (state == 3)
7437 {
7438 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7439 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7440
7441 elf32_arm_section_data (sec)->erratumcount += 1;
7442
7443 newerr->u.b.vfp_insn = veneer_of_insn;
7444
7445 switch (span_type)
7446 {
7447 case 'a':
7448 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7449 break;
7450
7451 default:
7452 abort ();
7453 }
7454
7455 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7456 first_fmac);
7457
7458 newerr->vma = -1;
7459
7460 newerr->next = sec_data->erratumlist;
7461 sec_data->erratumlist = newerr;
7462
7463 state = 0;
7464 }
7465
7466 i = next_i;
7467 }
7468 }
7469
7470 if (contents != NULL
7471 && elf_section_data (sec)->this_hdr.contents != contents)
7472 free (contents);
7473 contents = NULL;
7474 }
7475
7476 return TRUE;
7477
7478 error_return:
7479 if (contents != NULL
7480 && elf_section_data (sec)->this_hdr.contents != contents)
7481 free (contents);
7482
7483 return FALSE;
7484 }
7485
7486 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7487 after sections have been laid out, using specially-named symbols. */
7488
7489 void
7490 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7491 struct bfd_link_info *link_info)
7492 {
7493 asection *sec;
7494 struct elf32_arm_link_hash_table *globals;
7495 char *tmp_name;
7496
7497 if (bfd_link_relocatable (link_info))
7498 return;
7499
7500 /* Skip if this bfd does not correspond to an ELF image. */
7501 if (! is_arm_elf (abfd))
7502 return;
7503
7504 globals = elf32_arm_hash_table (link_info);
7505 if (globals == NULL)
7506 return;
7507
7508 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7509 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7510
7511 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7512 {
7513 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7514 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7515
7516 for (; errnode != NULL; errnode = errnode->next)
7517 {
7518 struct elf_link_hash_entry *myh;
7519 bfd_vma vma;
7520
7521 switch (errnode->type)
7522 {
7523 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7524 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7525 /* Find veneer symbol. */
7526 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7527 errnode->u.b.veneer->u.v.id);
7528
7529 myh = elf_link_hash_lookup
7530 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7531
7532 if (myh == NULL)
7533 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7534 "`%s'"), abfd, tmp_name);
7535
7536 vma = myh->root.u.def.section->output_section->vma
7537 + myh->root.u.def.section->output_offset
7538 + myh->root.u.def.value;
7539
7540 errnode->u.b.veneer->vma = vma;
7541 break;
7542
7543 case VFP11_ERRATUM_ARM_VENEER:
7544 case VFP11_ERRATUM_THUMB_VENEER:
7545 /* Find return location. */
7546 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7547 errnode->u.v.id);
7548
7549 myh = elf_link_hash_lookup
7550 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7551
7552 if (myh == NULL)
7553 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7554 "`%s'"), abfd, tmp_name);
7555
7556 vma = myh->root.u.def.section->output_section->vma
7557 + myh->root.u.def.section->output_offset
7558 + myh->root.u.def.value;
7559
7560 errnode->u.v.branch->vma = vma;
7561 break;
7562
7563 default:
7564 abort ();
7565 }
7566 }
7567 }
7568
7569 free (tmp_name);
7570 }
7571
7572 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7573 return locations after sections have been laid out, using
7574 specially-named symbols. */
7575
7576 void
7577 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7578 struct bfd_link_info *link_info)
7579 {
7580 asection *sec;
7581 struct elf32_arm_link_hash_table *globals;
7582 char *tmp_name;
7583
7584 if (bfd_link_relocatable (link_info))
7585 return;
7586
7587 /* Skip if this bfd does not correspond to an ELF image. */
7588 if (! is_arm_elf (abfd))
7589 return;
7590
7591 globals = elf32_arm_hash_table (link_info);
7592 if (globals == NULL)
7593 return;
7594
7595 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7596 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7597
7598 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7599 {
7600 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7601 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7602
7603 for (; errnode != NULL; errnode = errnode->next)
7604 {
7605 struct elf_link_hash_entry *myh;
7606 bfd_vma vma;
7607
7608 switch (errnode->type)
7609 {
7610 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7611 /* Find veneer symbol. */
7612 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7613 errnode->u.b.veneer->u.v.id);
7614
7615 myh = elf_link_hash_lookup
7616 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7617
7618 if (myh == NULL)
7619 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7620 "`%s'"), abfd, tmp_name);
7621
7622 vma = myh->root.u.def.section->output_section->vma
7623 + myh->root.u.def.section->output_offset
7624 + myh->root.u.def.value;
7625
7626 errnode->u.b.veneer->vma = vma;
7627 break;
7628
7629 case STM32L4XX_ERRATUM_VENEER:
7630 /* Find return location. */
7631 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7632 errnode->u.v.id);
7633
7634 myh = elf_link_hash_lookup
7635 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7636
7637 if (myh == NULL)
7638 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7639 "`%s'"), abfd, tmp_name);
7640
7641 vma = myh->root.u.def.section->output_section->vma
7642 + myh->root.u.def.section->output_offset
7643 + myh->root.u.def.value;
7644
7645 errnode->u.v.branch->vma = vma;
7646 break;
7647
7648 default:
7649 abort ();
7650 }
7651 }
7652 }
7653
7654 free (tmp_name);
7655 }
7656
7657 static inline bfd_boolean
7658 is_thumb2_ldmia (const insn32 insn)
7659 {
7660 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7661 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7662 return (insn & 0xffd02000) == 0xe8900000;
7663 }
7664
7665 static inline bfd_boolean
7666 is_thumb2_ldmdb (const insn32 insn)
7667 {
7668 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7669 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7670 return (insn & 0xffd02000) == 0xe9100000;
7671 }
7672
7673 static inline bfd_boolean
7674 is_thumb2_vldm (const insn32 insn)
7675 {
7676 /* A6.5 Extension register load or store instruction
7677 A7.7.229
7678 We look for SP 32-bit and DP 64-bit registers.
7679 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7680 <list> is consecutive 64-bit registers
7681 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7682 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7683 <list> is consecutive 32-bit registers
7684 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7685 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7686 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7687 return
7688 (((insn & 0xfe100f00) == 0xec100b00) ||
7689 ((insn & 0xfe100f00) == 0xec100a00))
7690 && /* (IA without !). */
7691 (((((insn << 7) >> 28) & 0xd) == 0x4)
7692 /* (IA with !), includes VPOP (when reg number is SP). */
7693 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7694 /* (DB with !). */
7695 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7696 }
7697
7698 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7699 VLDM opcode and:
7700 - computes the number and the mode of memory accesses
7701 - decides if the replacement should be done:
7702 . replaces only if > 8-word accesses
7703 . or (testing purposes only) replaces all accesses. */
7704
7705 static bfd_boolean
7706 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7707 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7708 {
7709 int nb_words = 0;
7710
7711 /* The field encoding the register list is the same for both LDMIA
7712 and LDMDB encodings. */
7713 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7714 nb_words = popcount (insn & 0x0000ffff);
7715 else if (is_thumb2_vldm (insn))
7716 nb_words = (insn & 0xff);
7717
7718 /* DEFAULT mode accounts for the real bug condition situation,
7719 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7720 return
7721 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7722 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7723 }
7724
7725 /* Look for potentially-troublesome code sequences which might trigger
7726 the STM STM32L4XX erratum. */
7727
7728 bfd_boolean
7729 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7730 struct bfd_link_info *link_info)
7731 {
7732 asection *sec;
7733 bfd_byte *contents = NULL;
7734 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7735
7736 if (globals == NULL)
7737 return FALSE;
7738
7739 /* If we are only performing a partial link do not bother
7740 to construct any glue. */
7741 if (bfd_link_relocatable (link_info))
7742 return TRUE;
7743
7744 /* Skip if this bfd does not correspond to an ELF image. */
7745 if (! is_arm_elf (abfd))
7746 return TRUE;
7747
7748 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7749 return TRUE;
7750
7751 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7752 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7753 return TRUE;
7754
7755 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7756 {
7757 unsigned int i, span;
7758 struct _arm_elf_section_data *sec_data;
7759
7760 /* If we don't have executable progbits, we're not interested in this
7761 section. Also skip if section is to be excluded. */
7762 if (elf_section_type (sec) != SHT_PROGBITS
7763 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7764 || (sec->flags & SEC_EXCLUDE) != 0
7765 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7766 || sec->output_section == bfd_abs_section_ptr
7767 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7768 continue;
7769
7770 sec_data = elf32_arm_section_data (sec);
7771
7772 if (sec_data->mapcount == 0)
7773 continue;
7774
7775 if (elf_section_data (sec)->this_hdr.contents != NULL)
7776 contents = elf_section_data (sec)->this_hdr.contents;
7777 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7778 goto error_return;
7779
7780 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7781 elf32_arm_compare_mapping);
7782
7783 for (span = 0; span < sec_data->mapcount; span++)
7784 {
7785 unsigned int span_start = sec_data->map[span].vma;
7786 unsigned int span_end = (span == sec_data->mapcount - 1)
7787 ? sec->size : sec_data->map[span + 1].vma;
7788 char span_type = sec_data->map[span].type;
7789 int itblock_current_pos = 0;
7790
7791 /* Only Thumb2 mode need be supported with this CM4 specific
7792 code, we should not encounter any arm mode eg span_type
7793 != 'a'. */
7794 if (span_type != 't')
7795 continue;
7796
7797 for (i = span_start; i < span_end;)
7798 {
7799 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7800 bfd_boolean insn_32bit = FALSE;
7801 bfd_boolean is_ldm = FALSE;
7802 bfd_boolean is_vldm = FALSE;
7803 bfd_boolean is_not_last_in_it_block = FALSE;
7804
7805 /* The first 16-bits of all 32-bit thumb2 instructions start
7806 with opcode[15..13]=0b111 and the encoded op1 can be anything
7807 except opcode[12..11]!=0b00.
7808 See 32-bit Thumb instruction encoding. */
7809 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7810 insn_32bit = TRUE;
7811
7812 /* Compute the predicate that tells if the instruction
7813 is concerned by the IT block
7814 - Creates an error if there is a ldm that is not
7815 last in the IT block thus cannot be replaced
7816 - Otherwise we can create a branch at the end of the
7817 IT block, it will be controlled naturally by IT
7818 with the proper pseudo-predicate
7819 - So the only interesting predicate is the one that
7820 tells that we are not on the last item of an IT
7821 block. */
7822 if (itblock_current_pos != 0)
7823 is_not_last_in_it_block = !!--itblock_current_pos;
7824
7825 if (insn_32bit)
7826 {
7827 /* Load the rest of the insn (in manual-friendly order). */
7828 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7829 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7830 is_vldm = is_thumb2_vldm (insn);
7831
7832 /* Veneers are created for (v)ldm depending on
7833 option flags and memory accesses conditions; but
7834 if the instruction is not the last instruction of
7835 an IT block, we cannot create a jump there, so we
7836 bail out. */
7837 if ((is_ldm || is_vldm) &&
7838 stm32l4xx_need_create_replacing_stub
7839 (insn, globals->stm32l4xx_fix))
7840 {
7841 if (is_not_last_in_it_block)
7842 {
7843 (*_bfd_error_handler)
7844 /* Note - overlong line used here to allow for translation. */
7845 (_("\
7846 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7847 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7848 abfd, sec, (long)i);
7849 }
7850 else
7851 {
7852 elf32_stm32l4xx_erratum_list *newerr =
7853 (elf32_stm32l4xx_erratum_list *)
7854 bfd_zmalloc
7855 (sizeof (elf32_stm32l4xx_erratum_list));
7856
7857 elf32_arm_section_data (sec)
7858 ->stm32l4xx_erratumcount += 1;
7859 newerr->u.b.insn = insn;
7860 /* We create only thumb branches. */
7861 newerr->type =
7862 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7863 record_stm32l4xx_erratum_veneer
7864 (link_info, newerr, abfd, sec,
7865 i,
7866 is_ldm ?
7867 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7868 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7869 newerr->vma = -1;
7870 newerr->next = sec_data->stm32l4xx_erratumlist;
7871 sec_data->stm32l4xx_erratumlist = newerr;
7872 }
7873 }
7874 }
7875 else
7876 {
7877 /* A7.7.37 IT p208
7878 IT blocks are only encoded in T1
7879 Encoding T1: IT{x{y{z}}} <firstcond>
7880 1 0 1 1 - 1 1 1 1 - firstcond - mask
7881 if mask = '0000' then see 'related encodings'
7882 We don't deal with UNPREDICTABLE, just ignore these.
7883 There can be no nested IT blocks so an IT block
7884 is naturally a new one for which it is worth
7885 computing its size. */
7886 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7887 ((insn & 0x000f) != 0x0000);
7888 /* If we have a new IT block we compute its size. */
7889 if (is_newitblock)
7890 {
7891 /* Compute the number of instructions controlled
7892 by the IT block, it will be used to decide
7893 whether we are inside an IT block or not. */
7894 unsigned int mask = insn & 0x000f;
7895 itblock_current_pos = 4 - ctz (mask);
7896 }
7897 }
7898
7899 i += insn_32bit ? 4 : 2;
7900 }
7901 }
7902
7903 if (contents != NULL
7904 && elf_section_data (sec)->this_hdr.contents != contents)
7905 free (contents);
7906 contents = NULL;
7907 }
7908
7909 return TRUE;
7910
7911 error_return:
7912 if (contents != NULL
7913 && elf_section_data (sec)->this_hdr.contents != contents)
7914 free (contents);
7915
7916 return FALSE;
7917 }
7918
7919 /* Set target relocation values needed during linking. */
7920
7921 void
7922 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7923 struct bfd_link_info *link_info,
7924 int target1_is_rel,
7925 char * target2_type,
7926 int fix_v4bx,
7927 int use_blx,
7928 bfd_arm_vfp11_fix vfp11_fix,
7929 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7930 int no_enum_warn, int no_wchar_warn,
7931 int pic_veneer, int fix_cortex_a8,
7932 int fix_arm1176)
7933 {
7934 struct elf32_arm_link_hash_table *globals;
7935
7936 globals = elf32_arm_hash_table (link_info);
7937 if (globals == NULL)
7938 return;
7939
7940 globals->target1_is_rel = target1_is_rel;
7941 if (strcmp (target2_type, "rel") == 0)
7942 globals->target2_reloc = R_ARM_REL32;
7943 else if (strcmp (target2_type, "abs") == 0)
7944 globals->target2_reloc = R_ARM_ABS32;
7945 else if (strcmp (target2_type, "got-rel") == 0)
7946 globals->target2_reloc = R_ARM_GOT_PREL;
7947 else
7948 {
7949 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7950 target2_type);
7951 }
7952 globals->fix_v4bx = fix_v4bx;
7953 globals->use_blx |= use_blx;
7954 globals->vfp11_fix = vfp11_fix;
7955 globals->stm32l4xx_fix = stm32l4xx_fix;
7956 globals->pic_veneer = pic_veneer;
7957 globals->fix_cortex_a8 = fix_cortex_a8;
7958 globals->fix_arm1176 = fix_arm1176;
7959
7960 BFD_ASSERT (is_arm_elf (output_bfd));
7961 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7962 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7963 }
7964
7965 /* Replace the target offset of a Thumb bl or b.w instruction. */
7966
7967 static void
7968 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7969 {
7970 bfd_vma upper;
7971 bfd_vma lower;
7972 int reloc_sign;
7973
7974 BFD_ASSERT ((offset & 1) == 0);
7975
7976 upper = bfd_get_16 (abfd, insn);
7977 lower = bfd_get_16 (abfd, insn + 2);
7978 reloc_sign = (offset < 0) ? 1 : 0;
7979 upper = (upper & ~(bfd_vma) 0x7ff)
7980 | ((offset >> 12) & 0x3ff)
7981 | (reloc_sign << 10);
7982 lower = (lower & ~(bfd_vma) 0x2fff)
7983 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7984 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7985 | ((offset >> 1) & 0x7ff);
7986 bfd_put_16 (abfd, upper, insn);
7987 bfd_put_16 (abfd, lower, insn + 2);
7988 }
7989
7990 /* Thumb code calling an ARM function. */
7991
7992 static int
7993 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7994 const char * name,
7995 bfd * input_bfd,
7996 bfd * output_bfd,
7997 asection * input_section,
7998 bfd_byte * hit_data,
7999 asection * sym_sec,
8000 bfd_vma offset,
8001 bfd_signed_vma addend,
8002 bfd_vma val,
8003 char **error_message)
8004 {
8005 asection * s = 0;
8006 bfd_vma my_offset;
8007 long int ret_offset;
8008 struct elf_link_hash_entry * myh;
8009 struct elf32_arm_link_hash_table * globals;
8010
8011 myh = find_thumb_glue (info, name, error_message);
8012 if (myh == NULL)
8013 return FALSE;
8014
8015 globals = elf32_arm_hash_table (info);
8016 BFD_ASSERT (globals != NULL);
8017 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8018
8019 my_offset = myh->root.u.def.value;
8020
8021 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8022 THUMB2ARM_GLUE_SECTION_NAME);
8023
8024 BFD_ASSERT (s != NULL);
8025 BFD_ASSERT (s->contents != NULL);
8026 BFD_ASSERT (s->output_section != NULL);
8027
8028 if ((my_offset & 0x01) == 0x01)
8029 {
8030 if (sym_sec != NULL
8031 && sym_sec->owner != NULL
8032 && !INTERWORK_FLAG (sym_sec->owner))
8033 {
8034 (*_bfd_error_handler)
8035 (_("%B(%s): warning: interworking not enabled.\n"
8036 " first occurrence: %B: Thumb call to ARM"),
8037 sym_sec->owner, input_bfd, name);
8038
8039 return FALSE;
8040 }
8041
8042 --my_offset;
8043 myh->root.u.def.value = my_offset;
8044
8045 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
8046 s->contents + my_offset);
8047
8048 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
8049 s->contents + my_offset + 2);
8050
8051 ret_offset =
8052 /* Address of destination of the stub. */
8053 ((bfd_signed_vma) val)
8054 - ((bfd_signed_vma)
8055 /* Offset from the start of the current section
8056 to the start of the stubs. */
8057 (s->output_offset
8058 /* Offset of the start of this stub from the start of the stubs. */
8059 + my_offset
8060 /* Address of the start of the current section. */
8061 + s->output_section->vma)
8062 /* The branch instruction is 4 bytes into the stub. */
8063 + 4
8064 /* ARM branches work from the pc of the instruction + 8. */
8065 + 8);
8066
8067 put_arm_insn (globals, output_bfd,
8068 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8069 s->contents + my_offset + 4);
8070 }
8071
8072 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8073
8074 /* Now go back and fix up the original BL insn to point to here. */
8075 ret_offset =
8076 /* Address of where the stub is located. */
8077 (s->output_section->vma + s->output_offset + my_offset)
8078 /* Address of where the BL is located. */
8079 - (input_section->output_section->vma + input_section->output_offset
8080 + offset)
8081 /* Addend in the relocation. */
8082 - addend
8083 /* Biassing for PC-relative addressing. */
8084 - 8;
8085
8086 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8087
8088 return TRUE;
8089 }
8090
8091 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8092
8093 static struct elf_link_hash_entry *
8094 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8095 const char * name,
8096 bfd * input_bfd,
8097 bfd * output_bfd,
8098 asection * sym_sec,
8099 bfd_vma val,
8100 asection * s,
8101 char ** error_message)
8102 {
8103 bfd_vma my_offset;
8104 long int ret_offset;
8105 struct elf_link_hash_entry * myh;
8106 struct elf32_arm_link_hash_table * globals;
8107
8108 myh = find_arm_glue (info, name, error_message);
8109 if (myh == NULL)
8110 return NULL;
8111
8112 globals = elf32_arm_hash_table (info);
8113 BFD_ASSERT (globals != NULL);
8114 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8115
8116 my_offset = myh->root.u.def.value;
8117
8118 if ((my_offset & 0x01) == 0x01)
8119 {
8120 if (sym_sec != NULL
8121 && sym_sec->owner != NULL
8122 && !INTERWORK_FLAG (sym_sec->owner))
8123 {
8124 (*_bfd_error_handler)
8125 (_("%B(%s): warning: interworking not enabled.\n"
8126 " first occurrence: %B: arm call to thumb"),
8127 sym_sec->owner, input_bfd, name);
8128 }
8129
8130 --my_offset;
8131 myh->root.u.def.value = my_offset;
8132
8133 if (bfd_link_pic (info)
8134 || globals->root.is_relocatable_executable
8135 || globals->pic_veneer)
8136 {
8137 /* For relocatable objects we can't use absolute addresses,
8138 so construct the address from a relative offset. */
8139 /* TODO: If the offset is small it's probably worth
8140 constructing the address with adds. */
8141 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8142 s->contents + my_offset);
8143 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8144 s->contents + my_offset + 4);
8145 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8146 s->contents + my_offset + 8);
8147 /* Adjust the offset by 4 for the position of the add,
8148 and 8 for the pipeline offset. */
8149 ret_offset = (val - (s->output_offset
8150 + s->output_section->vma
8151 + my_offset + 12))
8152 | 1;
8153 bfd_put_32 (output_bfd, ret_offset,
8154 s->contents + my_offset + 12);
8155 }
8156 else if (globals->use_blx)
8157 {
8158 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8159 s->contents + my_offset);
8160
8161 /* It's a thumb address. Add the low order bit. */
8162 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8163 s->contents + my_offset + 4);
8164 }
8165 else
8166 {
8167 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8168 s->contents + my_offset);
8169
8170 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8171 s->contents + my_offset + 4);
8172
8173 /* It's a thumb address. Add the low order bit. */
8174 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8175 s->contents + my_offset + 8);
8176
8177 my_offset += 12;
8178 }
8179 }
8180
8181 BFD_ASSERT (my_offset <= globals->arm_glue_size);
8182
8183 return myh;
8184 }
8185
8186 /* Arm code calling a Thumb function. */
8187
8188 static int
8189 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8190 const char * name,
8191 bfd * input_bfd,
8192 bfd * output_bfd,
8193 asection * input_section,
8194 bfd_byte * hit_data,
8195 asection * sym_sec,
8196 bfd_vma offset,
8197 bfd_signed_vma addend,
8198 bfd_vma val,
8199 char **error_message)
8200 {
8201 unsigned long int tmp;
8202 bfd_vma my_offset;
8203 asection * s;
8204 long int ret_offset;
8205 struct elf_link_hash_entry * myh;
8206 struct elf32_arm_link_hash_table * globals;
8207
8208 globals = elf32_arm_hash_table (info);
8209 BFD_ASSERT (globals != NULL);
8210 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8211
8212 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8213 ARM2THUMB_GLUE_SECTION_NAME);
8214 BFD_ASSERT (s != NULL);
8215 BFD_ASSERT (s->contents != NULL);
8216 BFD_ASSERT (s->output_section != NULL);
8217
8218 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8219 sym_sec, val, s, error_message);
8220 if (!myh)
8221 return FALSE;
8222
8223 my_offset = myh->root.u.def.value;
8224 tmp = bfd_get_32 (input_bfd, hit_data);
8225 tmp = tmp & 0xFF000000;
8226
8227 /* Somehow these are both 4 too far, so subtract 8. */
8228 ret_offset = (s->output_offset
8229 + my_offset
8230 + s->output_section->vma
8231 - (input_section->output_offset
8232 + input_section->output_section->vma
8233 + offset + addend)
8234 - 8);
8235
8236 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8237
8238 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8239
8240 return TRUE;
8241 }
8242
8243 /* Populate Arm stub for an exported Thumb function. */
8244
8245 static bfd_boolean
8246 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8247 {
8248 struct bfd_link_info * info = (struct bfd_link_info *) inf;
8249 asection * s;
8250 struct elf_link_hash_entry * myh;
8251 struct elf32_arm_link_hash_entry *eh;
8252 struct elf32_arm_link_hash_table * globals;
8253 asection *sec;
8254 bfd_vma val;
8255 char *error_message;
8256
8257 eh = elf32_arm_hash_entry (h);
8258 /* Allocate stubs for exported Thumb functions on v4t. */
8259 if (eh->export_glue == NULL)
8260 return TRUE;
8261
8262 globals = elf32_arm_hash_table (info);
8263 BFD_ASSERT (globals != NULL);
8264 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8265
8266 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8267 ARM2THUMB_GLUE_SECTION_NAME);
8268 BFD_ASSERT (s != NULL);
8269 BFD_ASSERT (s->contents != NULL);
8270 BFD_ASSERT (s->output_section != NULL);
8271
8272 sec = eh->export_glue->root.u.def.section;
8273
8274 BFD_ASSERT (sec->output_section != NULL);
8275
8276 val = eh->export_glue->root.u.def.value + sec->output_offset
8277 + sec->output_section->vma;
8278
8279 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8280 h->root.u.def.section->owner,
8281 globals->obfd, sec, val, s,
8282 &error_message);
8283 BFD_ASSERT (myh);
8284 return TRUE;
8285 }
8286
8287 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8288
8289 static bfd_vma
8290 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8291 {
8292 bfd_byte *p;
8293 bfd_vma glue_addr;
8294 asection *s;
8295 struct elf32_arm_link_hash_table *globals;
8296
8297 globals = elf32_arm_hash_table (info);
8298 BFD_ASSERT (globals != NULL);
8299 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8300
8301 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8302 ARM_BX_GLUE_SECTION_NAME);
8303 BFD_ASSERT (s != NULL);
8304 BFD_ASSERT (s->contents != NULL);
8305 BFD_ASSERT (s->output_section != NULL);
8306
8307 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8308
8309 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8310
8311 if ((globals->bx_glue_offset[reg] & 1) == 0)
8312 {
8313 p = s->contents + glue_addr;
8314 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8315 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8316 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8317 globals->bx_glue_offset[reg] |= 1;
8318 }
8319
8320 return glue_addr + s->output_section->vma + s->output_offset;
8321 }
8322
8323 /* Generate Arm stubs for exported Thumb symbols. */
8324 static void
8325 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8326 struct bfd_link_info *link_info)
8327 {
8328 struct elf32_arm_link_hash_table * globals;
8329
8330 if (link_info == NULL)
8331 /* Ignore this if we are not called by the ELF backend linker. */
8332 return;
8333
8334 globals = elf32_arm_hash_table (link_info);
8335 if (globals == NULL)
8336 return;
8337
8338 /* If blx is available then exported Thumb symbols are OK and there is
8339 nothing to do. */
8340 if (globals->use_blx)
8341 return;
8342
8343 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8344 link_info);
8345 }
8346
8347 /* Reserve space for COUNT dynamic relocations in relocation selection
8348 SRELOC. */
8349
8350 static void
8351 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8352 bfd_size_type count)
8353 {
8354 struct elf32_arm_link_hash_table *htab;
8355
8356 htab = elf32_arm_hash_table (info);
8357 BFD_ASSERT (htab->root.dynamic_sections_created);
8358 if (sreloc == NULL)
8359 abort ();
8360 sreloc->size += RELOC_SIZE (htab) * count;
8361 }
8362
8363 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8364 dynamic, the relocations should go in SRELOC, otherwise they should
8365 go in the special .rel.iplt section. */
8366
8367 static void
8368 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8369 bfd_size_type count)
8370 {
8371 struct elf32_arm_link_hash_table *htab;
8372
8373 htab = elf32_arm_hash_table (info);
8374 if (!htab->root.dynamic_sections_created)
8375 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8376 else
8377 {
8378 BFD_ASSERT (sreloc != NULL);
8379 sreloc->size += RELOC_SIZE (htab) * count;
8380 }
8381 }
8382
8383 /* Add relocation REL to the end of relocation section SRELOC. */
8384
8385 static void
8386 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8387 asection *sreloc, Elf_Internal_Rela *rel)
8388 {
8389 bfd_byte *loc;
8390 struct elf32_arm_link_hash_table *htab;
8391
8392 htab = elf32_arm_hash_table (info);
8393 if (!htab->root.dynamic_sections_created
8394 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8395 sreloc = htab->root.irelplt;
8396 if (sreloc == NULL)
8397 abort ();
8398 loc = sreloc->contents;
8399 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8400 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8401 abort ();
8402 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8403 }
8404
8405 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8406 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8407 to .plt. */
8408
8409 static void
8410 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8411 bfd_boolean is_iplt_entry,
8412 union gotplt_union *root_plt,
8413 struct arm_plt_info *arm_plt)
8414 {
8415 struct elf32_arm_link_hash_table *htab;
8416 asection *splt;
8417 asection *sgotplt;
8418
8419 htab = elf32_arm_hash_table (info);
8420
8421 if (is_iplt_entry)
8422 {
8423 splt = htab->root.iplt;
8424 sgotplt = htab->root.igotplt;
8425
8426 /* NaCl uses a special first entry in .iplt too. */
8427 if (htab->nacl_p && splt->size == 0)
8428 splt->size += htab->plt_header_size;
8429
8430 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8431 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8432 }
8433 else
8434 {
8435 splt = htab->root.splt;
8436 sgotplt = htab->root.sgotplt;
8437
8438 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8439 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8440
8441 /* If this is the first .plt entry, make room for the special
8442 first entry. */
8443 if (splt->size == 0)
8444 splt->size += htab->plt_header_size;
8445
8446 htab->next_tls_desc_index++;
8447 }
8448
8449 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8450 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8451 splt->size += PLT_THUMB_STUB_SIZE;
8452 root_plt->offset = splt->size;
8453 splt->size += htab->plt_entry_size;
8454
8455 if (!htab->symbian_p)
8456 {
8457 /* We also need to make an entry in the .got.plt section, which
8458 will be placed in the .got section by the linker script. */
8459 if (is_iplt_entry)
8460 arm_plt->got_offset = sgotplt->size;
8461 else
8462 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8463 sgotplt->size += 4;
8464 }
8465 }
8466
8467 static bfd_vma
8468 arm_movw_immediate (bfd_vma value)
8469 {
8470 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8471 }
8472
8473 static bfd_vma
8474 arm_movt_immediate (bfd_vma value)
8475 {
8476 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8477 }
8478
8479 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8480 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8481 Otherwise, DYNINDX is the index of the symbol in the dynamic
8482 symbol table and SYM_VALUE is undefined.
8483
8484 ROOT_PLT points to the offset of the PLT entry from the start of its
8485 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8486 bookkeeping information.
8487
8488 Returns FALSE if there was a problem. */
8489
8490 static bfd_boolean
8491 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8492 union gotplt_union *root_plt,
8493 struct arm_plt_info *arm_plt,
8494 int dynindx, bfd_vma sym_value)
8495 {
8496 struct elf32_arm_link_hash_table *htab;
8497 asection *sgot;
8498 asection *splt;
8499 asection *srel;
8500 bfd_byte *loc;
8501 bfd_vma plt_index;
8502 Elf_Internal_Rela rel;
8503 bfd_vma plt_header_size;
8504 bfd_vma got_header_size;
8505
8506 htab = elf32_arm_hash_table (info);
8507
8508 /* Pick the appropriate sections and sizes. */
8509 if (dynindx == -1)
8510 {
8511 splt = htab->root.iplt;
8512 sgot = htab->root.igotplt;
8513 srel = htab->root.irelplt;
8514
8515 /* There are no reserved entries in .igot.plt, and no special
8516 first entry in .iplt. */
8517 got_header_size = 0;
8518 plt_header_size = 0;
8519 }
8520 else
8521 {
8522 splt = htab->root.splt;
8523 sgot = htab->root.sgotplt;
8524 srel = htab->root.srelplt;
8525
8526 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8527 plt_header_size = htab->plt_header_size;
8528 }
8529 BFD_ASSERT (splt != NULL && srel != NULL);
8530
8531 /* Fill in the entry in the procedure linkage table. */
8532 if (htab->symbian_p)
8533 {
8534 BFD_ASSERT (dynindx >= 0);
8535 put_arm_insn (htab, output_bfd,
8536 elf32_arm_symbian_plt_entry[0],
8537 splt->contents + root_plt->offset);
8538 bfd_put_32 (output_bfd,
8539 elf32_arm_symbian_plt_entry[1],
8540 splt->contents + root_plt->offset + 4);
8541
8542 /* Fill in the entry in the .rel.plt section. */
8543 rel.r_offset = (splt->output_section->vma
8544 + splt->output_offset
8545 + root_plt->offset + 4);
8546 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8547
8548 /* Get the index in the procedure linkage table which
8549 corresponds to this symbol. This is the index of this symbol
8550 in all the symbols for which we are making plt entries. The
8551 first entry in the procedure linkage table is reserved. */
8552 plt_index = ((root_plt->offset - plt_header_size)
8553 / htab->plt_entry_size);
8554 }
8555 else
8556 {
8557 bfd_vma got_offset, got_address, plt_address;
8558 bfd_vma got_displacement, initial_got_entry;
8559 bfd_byte * ptr;
8560
8561 BFD_ASSERT (sgot != NULL);
8562
8563 /* Get the offset into the .(i)got.plt table of the entry that
8564 corresponds to this function. */
8565 got_offset = (arm_plt->got_offset & -2);
8566
8567 /* Get the index in the procedure linkage table which
8568 corresponds to this symbol. This is the index of this symbol
8569 in all the symbols for which we are making plt entries.
8570 After the reserved .got.plt entries, all symbols appear in
8571 the same order as in .plt. */
8572 plt_index = (got_offset - got_header_size) / 4;
8573
8574 /* Calculate the address of the GOT entry. */
8575 got_address = (sgot->output_section->vma
8576 + sgot->output_offset
8577 + got_offset);
8578
8579 /* ...and the address of the PLT entry. */
8580 plt_address = (splt->output_section->vma
8581 + splt->output_offset
8582 + root_plt->offset);
8583
8584 ptr = splt->contents + root_plt->offset;
8585 if (htab->vxworks_p && bfd_link_pic (info))
8586 {
8587 unsigned int i;
8588 bfd_vma val;
8589
8590 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8591 {
8592 val = elf32_arm_vxworks_shared_plt_entry[i];
8593 if (i == 2)
8594 val |= got_address - sgot->output_section->vma;
8595 if (i == 5)
8596 val |= plt_index * RELOC_SIZE (htab);
8597 if (i == 2 || i == 5)
8598 bfd_put_32 (output_bfd, val, ptr);
8599 else
8600 put_arm_insn (htab, output_bfd, val, ptr);
8601 }
8602 }
8603 else if (htab->vxworks_p)
8604 {
8605 unsigned int i;
8606 bfd_vma val;
8607
8608 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8609 {
8610 val = elf32_arm_vxworks_exec_plt_entry[i];
8611 if (i == 2)
8612 val |= got_address;
8613 if (i == 4)
8614 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8615 if (i == 5)
8616 val |= plt_index * RELOC_SIZE (htab);
8617 if (i == 2 || i == 5)
8618 bfd_put_32 (output_bfd, val, ptr);
8619 else
8620 put_arm_insn (htab, output_bfd, val, ptr);
8621 }
8622
8623 loc = (htab->srelplt2->contents
8624 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8625
8626 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8627 referencing the GOT for this PLT entry. */
8628 rel.r_offset = plt_address + 8;
8629 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8630 rel.r_addend = got_offset;
8631 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8632 loc += RELOC_SIZE (htab);
8633
8634 /* Create the R_ARM_ABS32 relocation referencing the
8635 beginning of the PLT for this GOT entry. */
8636 rel.r_offset = got_address;
8637 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8638 rel.r_addend = 0;
8639 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8640 }
8641 else if (htab->nacl_p)
8642 {
8643 /* Calculate the displacement between the PLT slot and the
8644 common tail that's part of the special initial PLT slot. */
8645 int32_t tail_displacement
8646 = ((splt->output_section->vma + splt->output_offset
8647 + ARM_NACL_PLT_TAIL_OFFSET)
8648 - (plt_address + htab->plt_entry_size + 4));
8649 BFD_ASSERT ((tail_displacement & 3) == 0);
8650 tail_displacement >>= 2;
8651
8652 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8653 || (-tail_displacement & 0xff000000) == 0);
8654
8655 /* Calculate the displacement between the PLT slot and the entry
8656 in the GOT. The offset accounts for the value produced by
8657 adding to pc in the penultimate instruction of the PLT stub. */
8658 got_displacement = (got_address
8659 - (plt_address + htab->plt_entry_size));
8660
8661 /* NaCl does not support interworking at all. */
8662 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8663
8664 put_arm_insn (htab, output_bfd,
8665 elf32_arm_nacl_plt_entry[0]
8666 | arm_movw_immediate (got_displacement),
8667 ptr + 0);
8668 put_arm_insn (htab, output_bfd,
8669 elf32_arm_nacl_plt_entry[1]
8670 | arm_movt_immediate (got_displacement),
8671 ptr + 4);
8672 put_arm_insn (htab, output_bfd,
8673 elf32_arm_nacl_plt_entry[2],
8674 ptr + 8);
8675 put_arm_insn (htab, output_bfd,
8676 elf32_arm_nacl_plt_entry[3]
8677 | (tail_displacement & 0x00ffffff),
8678 ptr + 12);
8679 }
8680 else if (using_thumb_only (htab))
8681 {
8682 /* PR ld/16017: Generate thumb only PLT entries. */
8683 if (!using_thumb2 (htab))
8684 {
8685 /* FIXME: We ought to be able to generate thumb-1 PLT
8686 instructions... */
8687 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8688 output_bfd);
8689 return FALSE;
8690 }
8691
8692 /* Calculate the displacement between the PLT slot and the entry in
8693 the GOT. The 12-byte offset accounts for the value produced by
8694 adding to pc in the 3rd instruction of the PLT stub. */
8695 got_displacement = got_address - (plt_address + 12);
8696
8697 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8698 instead of 'put_thumb_insn'. */
8699 put_arm_insn (htab, output_bfd,
8700 elf32_thumb2_plt_entry[0]
8701 | ((got_displacement & 0x000000ff) << 16)
8702 | ((got_displacement & 0x00000700) << 20)
8703 | ((got_displacement & 0x00000800) >> 1)
8704 | ((got_displacement & 0x0000f000) >> 12),
8705 ptr + 0);
8706 put_arm_insn (htab, output_bfd,
8707 elf32_thumb2_plt_entry[1]
8708 | ((got_displacement & 0x00ff0000) )
8709 | ((got_displacement & 0x07000000) << 4)
8710 | ((got_displacement & 0x08000000) >> 17)
8711 | ((got_displacement & 0xf0000000) >> 28),
8712 ptr + 4);
8713 put_arm_insn (htab, output_bfd,
8714 elf32_thumb2_plt_entry[2],
8715 ptr + 8);
8716 put_arm_insn (htab, output_bfd,
8717 elf32_thumb2_plt_entry[3],
8718 ptr + 12);
8719 }
8720 else
8721 {
8722 /* Calculate the displacement between the PLT slot and the
8723 entry in the GOT. The eight-byte offset accounts for the
8724 value produced by adding to pc in the first instruction
8725 of the PLT stub. */
8726 got_displacement = got_address - (plt_address + 8);
8727
8728 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8729 {
8730 put_thumb_insn (htab, output_bfd,
8731 elf32_arm_plt_thumb_stub[0], ptr - 4);
8732 put_thumb_insn (htab, output_bfd,
8733 elf32_arm_plt_thumb_stub[1], ptr - 2);
8734 }
8735
8736 if (!elf32_arm_use_long_plt_entry)
8737 {
8738 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8739
8740 put_arm_insn (htab, output_bfd,
8741 elf32_arm_plt_entry_short[0]
8742 | ((got_displacement & 0x0ff00000) >> 20),
8743 ptr + 0);
8744 put_arm_insn (htab, output_bfd,
8745 elf32_arm_plt_entry_short[1]
8746 | ((got_displacement & 0x000ff000) >> 12),
8747 ptr+ 4);
8748 put_arm_insn (htab, output_bfd,
8749 elf32_arm_plt_entry_short[2]
8750 | (got_displacement & 0x00000fff),
8751 ptr + 8);
8752 #ifdef FOUR_WORD_PLT
8753 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8754 #endif
8755 }
8756 else
8757 {
8758 put_arm_insn (htab, output_bfd,
8759 elf32_arm_plt_entry_long[0]
8760 | ((got_displacement & 0xf0000000) >> 28),
8761 ptr + 0);
8762 put_arm_insn (htab, output_bfd,
8763 elf32_arm_plt_entry_long[1]
8764 | ((got_displacement & 0x0ff00000) >> 20),
8765 ptr + 4);
8766 put_arm_insn (htab, output_bfd,
8767 elf32_arm_plt_entry_long[2]
8768 | ((got_displacement & 0x000ff000) >> 12),
8769 ptr+ 8);
8770 put_arm_insn (htab, output_bfd,
8771 elf32_arm_plt_entry_long[3]
8772 | (got_displacement & 0x00000fff),
8773 ptr + 12);
8774 }
8775 }
8776
8777 /* Fill in the entry in the .rel(a).(i)plt section. */
8778 rel.r_offset = got_address;
8779 rel.r_addend = 0;
8780 if (dynindx == -1)
8781 {
8782 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8783 The dynamic linker or static executable then calls SYM_VALUE
8784 to determine the correct run-time value of the .igot.plt entry. */
8785 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8786 initial_got_entry = sym_value;
8787 }
8788 else
8789 {
8790 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8791 initial_got_entry = (splt->output_section->vma
8792 + splt->output_offset);
8793 }
8794
8795 /* Fill in the entry in the global offset table. */
8796 bfd_put_32 (output_bfd, initial_got_entry,
8797 sgot->contents + got_offset);
8798 }
8799
8800 if (dynindx == -1)
8801 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8802 else
8803 {
8804 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8805 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8806 }
8807
8808 return TRUE;
8809 }
8810
8811 /* Some relocations map to different relocations depending on the
8812 target. Return the real relocation. */
8813
8814 static int
8815 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8816 int r_type)
8817 {
8818 switch (r_type)
8819 {
8820 case R_ARM_TARGET1:
8821 if (globals->target1_is_rel)
8822 return R_ARM_REL32;
8823 else
8824 return R_ARM_ABS32;
8825
8826 case R_ARM_TARGET2:
8827 return globals->target2_reloc;
8828
8829 default:
8830 return r_type;
8831 }
8832 }
8833
8834 /* Return the base VMA address which should be subtracted from real addresses
8835 when resolving @dtpoff relocation.
8836 This is PT_TLS segment p_vaddr. */
8837
8838 static bfd_vma
8839 dtpoff_base (struct bfd_link_info *info)
8840 {
8841 /* If tls_sec is NULL, we should have signalled an error already. */
8842 if (elf_hash_table (info)->tls_sec == NULL)
8843 return 0;
8844 return elf_hash_table (info)->tls_sec->vma;
8845 }
8846
8847 /* Return the relocation value for @tpoff relocation
8848 if STT_TLS virtual address is ADDRESS. */
8849
8850 static bfd_vma
8851 tpoff (struct bfd_link_info *info, bfd_vma address)
8852 {
8853 struct elf_link_hash_table *htab = elf_hash_table (info);
8854 bfd_vma base;
8855
8856 /* If tls_sec is NULL, we should have signalled an error already. */
8857 if (htab->tls_sec == NULL)
8858 return 0;
8859 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8860 return address - htab->tls_sec->vma + base;
8861 }
8862
8863 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8864 VALUE is the relocation value. */
8865
8866 static bfd_reloc_status_type
8867 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8868 {
8869 if (value > 0xfff)
8870 return bfd_reloc_overflow;
8871
8872 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8873 bfd_put_32 (abfd, value, data);
8874 return bfd_reloc_ok;
8875 }
8876
8877 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8878 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8879 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8880
8881 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8882 is to then call final_link_relocate. Return other values in the
8883 case of error.
8884
8885 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8886 the pre-relaxed code. It would be nice if the relocs were updated
8887 to match the optimization. */
8888
8889 static bfd_reloc_status_type
8890 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8891 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8892 Elf_Internal_Rela *rel, unsigned long is_local)
8893 {
8894 unsigned long insn;
8895
8896 switch (ELF32_R_TYPE (rel->r_info))
8897 {
8898 default:
8899 return bfd_reloc_notsupported;
8900
8901 case R_ARM_TLS_GOTDESC:
8902 if (is_local)
8903 insn = 0;
8904 else
8905 {
8906 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8907 if (insn & 1)
8908 insn -= 5; /* THUMB */
8909 else
8910 insn -= 8; /* ARM */
8911 }
8912 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8913 return bfd_reloc_continue;
8914
8915 case R_ARM_THM_TLS_DESCSEQ:
8916 /* Thumb insn. */
8917 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8918 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8919 {
8920 if (is_local)
8921 /* nop */
8922 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8923 }
8924 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8925 {
8926 if (is_local)
8927 /* nop */
8928 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8929 else
8930 /* ldr rx,[ry] */
8931 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8932 }
8933 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8934 {
8935 if (is_local)
8936 /* nop */
8937 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8938 else
8939 /* mov r0, rx */
8940 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8941 contents + rel->r_offset);
8942 }
8943 else
8944 {
8945 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8946 /* It's a 32 bit instruction, fetch the rest of it for
8947 error generation. */
8948 insn = (insn << 16)
8949 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8950 (*_bfd_error_handler)
8951 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8952 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8953 return bfd_reloc_notsupported;
8954 }
8955 break;
8956
8957 case R_ARM_TLS_DESCSEQ:
8958 /* arm insn. */
8959 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8960 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8961 {
8962 if (is_local)
8963 /* mov rx, ry */
8964 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8965 contents + rel->r_offset);
8966 }
8967 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8968 {
8969 if (is_local)
8970 /* nop */
8971 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8972 else
8973 /* ldr rx,[ry] */
8974 bfd_put_32 (input_bfd, insn & 0xfffff000,
8975 contents + rel->r_offset);
8976 }
8977 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8978 {
8979 if (is_local)
8980 /* nop */
8981 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8982 else
8983 /* mov r0, rx */
8984 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8985 contents + rel->r_offset);
8986 }
8987 else
8988 {
8989 (*_bfd_error_handler)
8990 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8991 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8992 return bfd_reloc_notsupported;
8993 }
8994 break;
8995
8996 case R_ARM_TLS_CALL:
8997 /* GD->IE relaxation, turn the instruction into 'nop' or
8998 'ldr r0, [pc,r0]' */
8999 insn = is_local ? 0xe1a00000 : 0xe79f0000;
9000 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9001 break;
9002
9003 case R_ARM_THM_TLS_CALL:
9004 /* GD->IE relaxation. */
9005 if (!is_local)
9006 /* add r0,pc; ldr r0, [r0] */
9007 insn = 0x44786800;
9008 else if (using_thumb2 (globals))
9009 /* nop.w */
9010 insn = 0xf3af8000;
9011 else
9012 /* nop; nop */
9013 insn = 0xbf00bf00;
9014
9015 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
9016 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
9017 break;
9018 }
9019 return bfd_reloc_ok;
9020 }
9021
9022 /* For a given value of n, calculate the value of G_n as required to
9023 deal with group relocations. We return it in the form of an
9024 encoded constant-and-rotation, together with the final residual. If n is
9025 specified as less than zero, then final_residual is filled with the
9026 input value and no further action is performed. */
9027
9028 static bfd_vma
9029 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
9030 {
9031 int current_n;
9032 bfd_vma g_n;
9033 bfd_vma encoded_g_n = 0;
9034 bfd_vma residual = value; /* Also known as Y_n. */
9035
9036 for (current_n = 0; current_n <= n; current_n++)
9037 {
9038 int shift;
9039
9040 /* Calculate which part of the value to mask. */
9041 if (residual == 0)
9042 shift = 0;
9043 else
9044 {
9045 int msb;
9046
9047 /* Determine the most significant bit in the residual and
9048 align the resulting value to a 2-bit boundary. */
9049 for (msb = 30; msb >= 0; msb -= 2)
9050 if (residual & (3 << msb))
9051 break;
9052
9053 /* The desired shift is now (msb - 6), or zero, whichever
9054 is the greater. */
9055 shift = msb - 6;
9056 if (shift < 0)
9057 shift = 0;
9058 }
9059
9060 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9061 g_n = residual & (0xff << shift);
9062 encoded_g_n = (g_n >> shift)
9063 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9064
9065 /* Calculate the residual for the next time around. */
9066 residual &= ~g_n;
9067 }
9068
9069 *final_residual = residual;
9070
9071 return encoded_g_n;
9072 }
9073
9074 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9075 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9076
9077 static int
9078 identify_add_or_sub (bfd_vma insn)
9079 {
9080 int opcode = insn & 0x1e00000;
9081
9082 if (opcode == 1 << 23) /* ADD */
9083 return 1;
9084
9085 if (opcode == 1 << 22) /* SUB */
9086 return -1;
9087
9088 return 0;
9089 }
9090
9091 /* Perform a relocation as part of a final link. */
9092
9093 static bfd_reloc_status_type
9094 elf32_arm_final_link_relocate (reloc_howto_type * howto,
9095 bfd * input_bfd,
9096 bfd * output_bfd,
9097 asection * input_section,
9098 bfd_byte * contents,
9099 Elf_Internal_Rela * rel,
9100 bfd_vma value,
9101 struct bfd_link_info * info,
9102 asection * sym_sec,
9103 const char * sym_name,
9104 unsigned char st_type,
9105 enum arm_st_branch_type branch_type,
9106 struct elf_link_hash_entry * h,
9107 bfd_boolean * unresolved_reloc_p,
9108 char ** error_message)
9109 {
9110 unsigned long r_type = howto->type;
9111 unsigned long r_symndx;
9112 bfd_byte * hit_data = contents + rel->r_offset;
9113 bfd_vma * local_got_offsets;
9114 bfd_vma * local_tlsdesc_gotents;
9115 asection * sgot;
9116 asection * splt;
9117 asection * sreloc = NULL;
9118 asection * srelgot;
9119 bfd_vma addend;
9120 bfd_signed_vma signed_addend;
9121 unsigned char dynreloc_st_type;
9122 bfd_vma dynreloc_value;
9123 struct elf32_arm_link_hash_table * globals;
9124 struct elf32_arm_link_hash_entry *eh;
9125 union gotplt_union *root_plt;
9126 struct arm_plt_info *arm_plt;
9127 bfd_vma plt_offset;
9128 bfd_vma gotplt_offset;
9129 bfd_boolean has_iplt_entry;
9130
9131 globals = elf32_arm_hash_table (info);
9132 if (globals == NULL)
9133 return bfd_reloc_notsupported;
9134
9135 BFD_ASSERT (is_arm_elf (input_bfd));
9136
9137 /* Some relocation types map to different relocations depending on the
9138 target. We pick the right one here. */
9139 r_type = arm_real_reloc_type (globals, r_type);
9140
9141 /* It is possible to have linker relaxations on some TLS access
9142 models. Update our information here. */
9143 r_type = elf32_arm_tls_transition (info, r_type, h);
9144
9145 if (r_type != howto->type)
9146 howto = elf32_arm_howto_from_type (r_type);
9147
9148 eh = (struct elf32_arm_link_hash_entry *) h;
9149 sgot = globals->root.sgot;
9150 local_got_offsets = elf_local_got_offsets (input_bfd);
9151 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9152
9153 if (globals->root.dynamic_sections_created)
9154 srelgot = globals->root.srelgot;
9155 else
9156 srelgot = NULL;
9157
9158 r_symndx = ELF32_R_SYM (rel->r_info);
9159
9160 if (globals->use_rel)
9161 {
9162 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9163
9164 if (addend & ((howto->src_mask + 1) >> 1))
9165 {
9166 signed_addend = -1;
9167 signed_addend &= ~ howto->src_mask;
9168 signed_addend |= addend;
9169 }
9170 else
9171 signed_addend = addend;
9172 }
9173 else
9174 addend = signed_addend = rel->r_addend;
9175
9176 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9177 are resolving a function call relocation. */
9178 if (using_thumb_only (globals)
9179 && (r_type == R_ARM_THM_CALL
9180 || r_type == R_ARM_THM_JUMP24)
9181 && branch_type == ST_BRANCH_TO_ARM)
9182 branch_type = ST_BRANCH_TO_THUMB;
9183
9184 /* Record the symbol information that should be used in dynamic
9185 relocations. */
9186 dynreloc_st_type = st_type;
9187 dynreloc_value = value;
9188 if (branch_type == ST_BRANCH_TO_THUMB)
9189 dynreloc_value |= 1;
9190
9191 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9192 VALUE appropriately for relocations that we resolve at link time. */
9193 has_iplt_entry = FALSE;
9194 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9195 && root_plt->offset != (bfd_vma) -1)
9196 {
9197 plt_offset = root_plt->offset;
9198 gotplt_offset = arm_plt->got_offset;
9199
9200 if (h == NULL || eh->is_iplt)
9201 {
9202 has_iplt_entry = TRUE;
9203 splt = globals->root.iplt;
9204
9205 /* Populate .iplt entries here, because not all of them will
9206 be seen by finish_dynamic_symbol. The lower bit is set if
9207 we have already populated the entry. */
9208 if (plt_offset & 1)
9209 plt_offset--;
9210 else
9211 {
9212 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9213 -1, dynreloc_value))
9214 root_plt->offset |= 1;
9215 else
9216 return bfd_reloc_notsupported;
9217 }
9218
9219 /* Static relocations always resolve to the .iplt entry. */
9220 st_type = STT_FUNC;
9221 value = (splt->output_section->vma
9222 + splt->output_offset
9223 + plt_offset);
9224 branch_type = ST_BRANCH_TO_ARM;
9225
9226 /* If there are non-call relocations that resolve to the .iplt
9227 entry, then all dynamic ones must too. */
9228 if (arm_plt->noncall_refcount != 0)
9229 {
9230 dynreloc_st_type = st_type;
9231 dynreloc_value = value;
9232 }
9233 }
9234 else
9235 /* We populate the .plt entry in finish_dynamic_symbol. */
9236 splt = globals->root.splt;
9237 }
9238 else
9239 {
9240 splt = NULL;
9241 plt_offset = (bfd_vma) -1;
9242 gotplt_offset = (bfd_vma) -1;
9243 }
9244
9245 switch (r_type)
9246 {
9247 case R_ARM_NONE:
9248 /* We don't need to find a value for this symbol. It's just a
9249 marker. */
9250 *unresolved_reloc_p = FALSE;
9251 return bfd_reloc_ok;
9252
9253 case R_ARM_ABS12:
9254 if (!globals->vxworks_p)
9255 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9256
9257 case R_ARM_PC24:
9258 case R_ARM_ABS32:
9259 case R_ARM_ABS32_NOI:
9260 case R_ARM_REL32:
9261 case R_ARM_REL32_NOI:
9262 case R_ARM_CALL:
9263 case R_ARM_JUMP24:
9264 case R_ARM_XPC25:
9265 case R_ARM_PREL31:
9266 case R_ARM_PLT32:
9267 /* Handle relocations which should use the PLT entry. ABS32/REL32
9268 will use the symbol's value, which may point to a PLT entry, but we
9269 don't need to handle that here. If we created a PLT entry, all
9270 branches in this object should go to it, except if the PLT is too
9271 far away, in which case a long branch stub should be inserted. */
9272 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9273 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9274 && r_type != R_ARM_CALL
9275 && r_type != R_ARM_JUMP24
9276 && r_type != R_ARM_PLT32)
9277 && plt_offset != (bfd_vma) -1)
9278 {
9279 /* If we've created a .plt section, and assigned a PLT entry
9280 to this function, it must either be a STT_GNU_IFUNC reference
9281 or not be known to bind locally. In other cases, we should
9282 have cleared the PLT entry by now. */
9283 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9284
9285 value = (splt->output_section->vma
9286 + splt->output_offset
9287 + plt_offset);
9288 *unresolved_reloc_p = FALSE;
9289 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9290 contents, rel->r_offset, value,
9291 rel->r_addend);
9292 }
9293
9294 /* When generating a shared object or relocatable executable, these
9295 relocations are copied into the output file to be resolved at
9296 run time. */
9297 if ((bfd_link_pic (info)
9298 || globals->root.is_relocatable_executable)
9299 && (input_section->flags & SEC_ALLOC)
9300 && !(globals->vxworks_p
9301 && strcmp (input_section->output_section->name,
9302 ".tls_vars") == 0)
9303 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9304 || !SYMBOL_CALLS_LOCAL (info, h))
9305 && !(input_bfd == globals->stub_bfd
9306 && strstr (input_section->name, STUB_SUFFIX))
9307 && (h == NULL
9308 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9309 || h->root.type != bfd_link_hash_undefweak)
9310 && r_type != R_ARM_PC24
9311 && r_type != R_ARM_CALL
9312 && r_type != R_ARM_JUMP24
9313 && r_type != R_ARM_PREL31
9314 && r_type != R_ARM_PLT32)
9315 {
9316 Elf_Internal_Rela outrel;
9317 bfd_boolean skip, relocate;
9318
9319 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9320 && !h->def_regular)
9321 {
9322 char *v = _("shared object");
9323
9324 if (bfd_link_executable (info))
9325 v = _("PIE executable");
9326
9327 (*_bfd_error_handler)
9328 (_("%B: relocation %s against external or undefined symbol `%s'"
9329 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9330 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9331 return bfd_reloc_notsupported;
9332 }
9333
9334 *unresolved_reloc_p = FALSE;
9335
9336 if (sreloc == NULL && globals->root.dynamic_sections_created)
9337 {
9338 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9339 ! globals->use_rel);
9340
9341 if (sreloc == NULL)
9342 return bfd_reloc_notsupported;
9343 }
9344
9345 skip = FALSE;
9346 relocate = FALSE;
9347
9348 outrel.r_addend = addend;
9349 outrel.r_offset =
9350 _bfd_elf_section_offset (output_bfd, info, input_section,
9351 rel->r_offset);
9352 if (outrel.r_offset == (bfd_vma) -1)
9353 skip = TRUE;
9354 else if (outrel.r_offset == (bfd_vma) -2)
9355 skip = TRUE, relocate = TRUE;
9356 outrel.r_offset += (input_section->output_section->vma
9357 + input_section->output_offset);
9358
9359 if (skip)
9360 memset (&outrel, 0, sizeof outrel);
9361 else if (h != NULL
9362 && h->dynindx != -1
9363 && (!bfd_link_pic (info)
9364 || !SYMBOLIC_BIND (info, h)
9365 || !h->def_regular))
9366 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9367 else
9368 {
9369 int symbol;
9370
9371 /* This symbol is local, or marked to become local. */
9372 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9373 if (globals->symbian_p)
9374 {
9375 asection *osec;
9376
9377 /* On Symbian OS, the data segment and text segement
9378 can be relocated independently. Therefore, we
9379 must indicate the segment to which this
9380 relocation is relative. The BPABI allows us to
9381 use any symbol in the right segment; we just use
9382 the section symbol as it is convenient. (We
9383 cannot use the symbol given by "h" directly as it
9384 will not appear in the dynamic symbol table.)
9385
9386 Note that the dynamic linker ignores the section
9387 symbol value, so we don't subtract osec->vma
9388 from the emitted reloc addend. */
9389 if (sym_sec)
9390 osec = sym_sec->output_section;
9391 else
9392 osec = input_section->output_section;
9393 symbol = elf_section_data (osec)->dynindx;
9394 if (symbol == 0)
9395 {
9396 struct elf_link_hash_table *htab = elf_hash_table (info);
9397
9398 if ((osec->flags & SEC_READONLY) == 0
9399 && htab->data_index_section != NULL)
9400 osec = htab->data_index_section;
9401 else
9402 osec = htab->text_index_section;
9403 symbol = elf_section_data (osec)->dynindx;
9404 }
9405 BFD_ASSERT (symbol != 0);
9406 }
9407 else
9408 /* On SVR4-ish systems, the dynamic loader cannot
9409 relocate the text and data segments independently,
9410 so the symbol does not matter. */
9411 symbol = 0;
9412 if (dynreloc_st_type == STT_GNU_IFUNC)
9413 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9414 to the .iplt entry. Instead, every non-call reference
9415 must use an R_ARM_IRELATIVE relocation to obtain the
9416 correct run-time address. */
9417 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9418 else
9419 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9420 if (globals->use_rel)
9421 relocate = TRUE;
9422 else
9423 outrel.r_addend += dynreloc_value;
9424 }
9425
9426 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9427
9428 /* If this reloc is against an external symbol, we do not want to
9429 fiddle with the addend. Otherwise, we need to include the symbol
9430 value so that it becomes an addend for the dynamic reloc. */
9431 if (! relocate)
9432 return bfd_reloc_ok;
9433
9434 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9435 contents, rel->r_offset,
9436 dynreloc_value, (bfd_vma) 0);
9437 }
9438 else switch (r_type)
9439 {
9440 case R_ARM_ABS12:
9441 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9442
9443 case R_ARM_XPC25: /* Arm BLX instruction. */
9444 case R_ARM_CALL:
9445 case R_ARM_JUMP24:
9446 case R_ARM_PC24: /* Arm B/BL instruction. */
9447 case R_ARM_PLT32:
9448 {
9449 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9450
9451 if (r_type == R_ARM_XPC25)
9452 {
9453 /* Check for Arm calling Arm function. */
9454 /* FIXME: Should we translate the instruction into a BL
9455 instruction instead ? */
9456 if (branch_type != ST_BRANCH_TO_THUMB)
9457 (*_bfd_error_handler)
9458 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9459 input_bfd,
9460 h ? h->root.root.string : "(local)");
9461 }
9462 else if (r_type == R_ARM_PC24)
9463 {
9464 /* Check for Arm calling Thumb function. */
9465 if (branch_type == ST_BRANCH_TO_THUMB)
9466 {
9467 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9468 output_bfd, input_section,
9469 hit_data, sym_sec, rel->r_offset,
9470 signed_addend, value,
9471 error_message))
9472 return bfd_reloc_ok;
9473 else
9474 return bfd_reloc_dangerous;
9475 }
9476 }
9477
9478 /* Check if a stub has to be inserted because the
9479 destination is too far or we are changing mode. */
9480 if ( r_type == R_ARM_CALL
9481 || r_type == R_ARM_JUMP24
9482 || r_type == R_ARM_PLT32)
9483 {
9484 enum elf32_arm_stub_type stub_type = arm_stub_none;
9485 struct elf32_arm_link_hash_entry *hash;
9486
9487 hash = (struct elf32_arm_link_hash_entry *) h;
9488 stub_type = arm_type_of_stub (info, input_section, rel,
9489 st_type, &branch_type,
9490 hash, value, sym_sec,
9491 input_bfd, sym_name);
9492
9493 if (stub_type != arm_stub_none)
9494 {
9495 /* The target is out of reach, so redirect the
9496 branch to the local stub for this function. */
9497 stub_entry = elf32_arm_get_stub_entry (input_section,
9498 sym_sec, h,
9499 rel, globals,
9500 stub_type);
9501 {
9502 if (stub_entry != NULL)
9503 value = (stub_entry->stub_offset
9504 + stub_entry->stub_sec->output_offset
9505 + stub_entry->stub_sec->output_section->vma);
9506
9507 if (plt_offset != (bfd_vma) -1)
9508 *unresolved_reloc_p = FALSE;
9509 }
9510 }
9511 else
9512 {
9513 /* If the call goes through a PLT entry, make sure to
9514 check distance to the right destination address. */
9515 if (plt_offset != (bfd_vma) -1)
9516 {
9517 value = (splt->output_section->vma
9518 + splt->output_offset
9519 + plt_offset);
9520 *unresolved_reloc_p = FALSE;
9521 /* The PLT entry is in ARM mode, regardless of the
9522 target function. */
9523 branch_type = ST_BRANCH_TO_ARM;
9524 }
9525 }
9526 }
9527
9528 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9529 where:
9530 S is the address of the symbol in the relocation.
9531 P is address of the instruction being relocated.
9532 A is the addend (extracted from the instruction) in bytes.
9533
9534 S is held in 'value'.
9535 P is the base address of the section containing the
9536 instruction plus the offset of the reloc into that
9537 section, ie:
9538 (input_section->output_section->vma +
9539 input_section->output_offset +
9540 rel->r_offset).
9541 A is the addend, converted into bytes, ie:
9542 (signed_addend * 4)
9543
9544 Note: None of these operations have knowledge of the pipeline
9545 size of the processor, thus it is up to the assembler to
9546 encode this information into the addend. */
9547 value -= (input_section->output_section->vma
9548 + input_section->output_offset);
9549 value -= rel->r_offset;
9550 if (globals->use_rel)
9551 value += (signed_addend << howto->size);
9552 else
9553 /* RELA addends do not have to be adjusted by howto->size. */
9554 value += signed_addend;
9555
9556 signed_addend = value;
9557 signed_addend >>= howto->rightshift;
9558
9559 /* A branch to an undefined weak symbol is turned into a jump to
9560 the next instruction unless a PLT entry will be created.
9561 Do the same for local undefined symbols (but not for STN_UNDEF).
9562 The jump to the next instruction is optimized as a NOP depending
9563 on the architecture. */
9564 if (h ? (h->root.type == bfd_link_hash_undefweak
9565 && plt_offset == (bfd_vma) -1)
9566 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9567 {
9568 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9569
9570 if (arch_has_arm_nop (globals))
9571 value |= 0x0320f000;
9572 else
9573 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9574 }
9575 else
9576 {
9577 /* Perform a signed range check. */
9578 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9579 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9580 return bfd_reloc_overflow;
9581
9582 addend = (value & 2);
9583
9584 value = (signed_addend & howto->dst_mask)
9585 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9586
9587 if (r_type == R_ARM_CALL)
9588 {
9589 /* Set the H bit in the BLX instruction. */
9590 if (branch_type == ST_BRANCH_TO_THUMB)
9591 {
9592 if (addend)
9593 value |= (1 << 24);
9594 else
9595 value &= ~(bfd_vma)(1 << 24);
9596 }
9597
9598 /* Select the correct instruction (BL or BLX). */
9599 /* Only if we are not handling a BL to a stub. In this
9600 case, mode switching is performed by the stub. */
9601 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9602 value |= (1 << 28);
9603 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9604 {
9605 value &= ~(bfd_vma)(1 << 28);
9606 value |= (1 << 24);
9607 }
9608 }
9609 }
9610 }
9611 break;
9612
9613 case R_ARM_ABS32:
9614 value += addend;
9615 if (branch_type == ST_BRANCH_TO_THUMB)
9616 value |= 1;
9617 break;
9618
9619 case R_ARM_ABS32_NOI:
9620 value += addend;
9621 break;
9622
9623 case R_ARM_REL32:
9624 value += addend;
9625 if (branch_type == ST_BRANCH_TO_THUMB)
9626 value |= 1;
9627 value -= (input_section->output_section->vma
9628 + input_section->output_offset + rel->r_offset);
9629 break;
9630
9631 case R_ARM_REL32_NOI:
9632 value += addend;
9633 value -= (input_section->output_section->vma
9634 + input_section->output_offset + rel->r_offset);
9635 break;
9636
9637 case R_ARM_PREL31:
9638 value -= (input_section->output_section->vma
9639 + input_section->output_offset + rel->r_offset);
9640 value += signed_addend;
9641 if (! h || h->root.type != bfd_link_hash_undefweak)
9642 {
9643 /* Check for overflow. */
9644 if ((value ^ (value >> 1)) & (1 << 30))
9645 return bfd_reloc_overflow;
9646 }
9647 value &= 0x7fffffff;
9648 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9649 if (branch_type == ST_BRANCH_TO_THUMB)
9650 value |= 1;
9651 break;
9652 }
9653
9654 bfd_put_32 (input_bfd, value, hit_data);
9655 return bfd_reloc_ok;
9656
9657 case R_ARM_ABS8:
9658 /* PR 16202: Refectch the addend using the correct size. */
9659 if (globals->use_rel)
9660 addend = bfd_get_8 (input_bfd, hit_data);
9661 value += addend;
9662
9663 /* There is no way to tell whether the user intended to use a signed or
9664 unsigned addend. When checking for overflow we accept either,
9665 as specified by the AAELF. */
9666 if ((long) value > 0xff || (long) value < -0x80)
9667 return bfd_reloc_overflow;
9668
9669 bfd_put_8 (input_bfd, value, hit_data);
9670 return bfd_reloc_ok;
9671
9672 case R_ARM_ABS16:
9673 /* PR 16202: Refectch the addend using the correct size. */
9674 if (globals->use_rel)
9675 addend = bfd_get_16 (input_bfd, hit_data);
9676 value += addend;
9677
9678 /* See comment for R_ARM_ABS8. */
9679 if ((long) value > 0xffff || (long) value < -0x8000)
9680 return bfd_reloc_overflow;
9681
9682 bfd_put_16 (input_bfd, value, hit_data);
9683 return bfd_reloc_ok;
9684
9685 case R_ARM_THM_ABS5:
9686 /* Support ldr and str instructions for the thumb. */
9687 if (globals->use_rel)
9688 {
9689 /* Need to refetch addend. */
9690 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9691 /* ??? Need to determine shift amount from operand size. */
9692 addend >>= howto->rightshift;
9693 }
9694 value += addend;
9695
9696 /* ??? Isn't value unsigned? */
9697 if ((long) value > 0x1f || (long) value < -0x10)
9698 return bfd_reloc_overflow;
9699
9700 /* ??? Value needs to be properly shifted into place first. */
9701 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9702 bfd_put_16 (input_bfd, value, hit_data);
9703 return bfd_reloc_ok;
9704
9705 case R_ARM_THM_ALU_PREL_11_0:
9706 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9707 {
9708 bfd_vma insn;
9709 bfd_signed_vma relocation;
9710
9711 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9712 | bfd_get_16 (input_bfd, hit_data + 2);
9713
9714 if (globals->use_rel)
9715 {
9716 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9717 | ((insn & (1 << 26)) >> 15);
9718 if (insn & 0xf00000)
9719 signed_addend = -signed_addend;
9720 }
9721
9722 relocation = value + signed_addend;
9723 relocation -= Pa (input_section->output_section->vma
9724 + input_section->output_offset
9725 + rel->r_offset);
9726
9727 value = relocation;
9728
9729 if (value >= 0x1000)
9730 return bfd_reloc_overflow;
9731
9732 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9733 | ((value & 0x700) << 4)
9734 | ((value & 0x800) << 15);
9735 if (relocation < 0)
9736 insn |= 0xa00000;
9737
9738 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9739 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9740
9741 return bfd_reloc_ok;
9742 }
9743
9744 case R_ARM_THM_PC8:
9745 /* PR 10073: This reloc is not generated by the GNU toolchain,
9746 but it is supported for compatibility with third party libraries
9747 generated by other compilers, specifically the ARM/IAR. */
9748 {
9749 bfd_vma insn;
9750 bfd_signed_vma relocation;
9751
9752 insn = bfd_get_16 (input_bfd, hit_data);
9753
9754 if (globals->use_rel)
9755 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9756
9757 relocation = value + addend;
9758 relocation -= Pa (input_section->output_section->vma
9759 + input_section->output_offset
9760 + rel->r_offset);
9761
9762 value = relocation;
9763
9764 /* We do not check for overflow of this reloc. Although strictly
9765 speaking this is incorrect, it appears to be necessary in order
9766 to work with IAR generated relocs. Since GCC and GAS do not
9767 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9768 a problem for them. */
9769 value &= 0x3fc;
9770
9771 insn = (insn & 0xff00) | (value >> 2);
9772
9773 bfd_put_16 (input_bfd, insn, hit_data);
9774
9775 return bfd_reloc_ok;
9776 }
9777
9778 case R_ARM_THM_PC12:
9779 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9780 {
9781 bfd_vma insn;
9782 bfd_signed_vma relocation;
9783
9784 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9785 | bfd_get_16 (input_bfd, hit_data + 2);
9786
9787 if (globals->use_rel)
9788 {
9789 signed_addend = insn & 0xfff;
9790 if (!(insn & (1 << 23)))
9791 signed_addend = -signed_addend;
9792 }
9793
9794 relocation = value + signed_addend;
9795 relocation -= Pa (input_section->output_section->vma
9796 + input_section->output_offset
9797 + rel->r_offset);
9798
9799 value = relocation;
9800
9801 if (value >= 0x1000)
9802 return bfd_reloc_overflow;
9803
9804 insn = (insn & 0xff7ff000) | value;
9805 if (relocation >= 0)
9806 insn |= (1 << 23);
9807
9808 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9809 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9810
9811 return bfd_reloc_ok;
9812 }
9813
9814 case R_ARM_THM_XPC22:
9815 case R_ARM_THM_CALL:
9816 case R_ARM_THM_JUMP24:
9817 /* Thumb BL (branch long instruction). */
9818 {
9819 bfd_vma relocation;
9820 bfd_vma reloc_sign;
9821 bfd_boolean overflow = FALSE;
9822 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9823 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9824 bfd_signed_vma reloc_signed_max;
9825 bfd_signed_vma reloc_signed_min;
9826 bfd_vma check;
9827 bfd_signed_vma signed_check;
9828 int bitsize;
9829 const int thumb2 = using_thumb2 (globals);
9830
9831 /* A branch to an undefined weak symbol is turned into a jump to
9832 the next instruction unless a PLT entry will be created.
9833 The jump to the next instruction is optimized as a NOP.W for
9834 Thumb-2 enabled architectures. */
9835 if (h && h->root.type == bfd_link_hash_undefweak
9836 && plt_offset == (bfd_vma) -1)
9837 {
9838 if (thumb2)
9839 {
9840 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9841 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9842 }
9843 else
9844 {
9845 bfd_put_16 (input_bfd, 0xe000, hit_data);
9846 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9847 }
9848 return bfd_reloc_ok;
9849 }
9850
9851 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9852 with Thumb-1) involving the J1 and J2 bits. */
9853 if (globals->use_rel)
9854 {
9855 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9856 bfd_vma upper = upper_insn & 0x3ff;
9857 bfd_vma lower = lower_insn & 0x7ff;
9858 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9859 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9860 bfd_vma i1 = j1 ^ s ? 0 : 1;
9861 bfd_vma i2 = j2 ^ s ? 0 : 1;
9862
9863 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9864 /* Sign extend. */
9865 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9866
9867 signed_addend = addend;
9868 }
9869
9870 if (r_type == R_ARM_THM_XPC22)
9871 {
9872 /* Check for Thumb to Thumb call. */
9873 /* FIXME: Should we translate the instruction into a BL
9874 instruction instead ? */
9875 if (branch_type == ST_BRANCH_TO_THUMB)
9876 (*_bfd_error_handler)
9877 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9878 input_bfd,
9879 h ? h->root.root.string : "(local)");
9880 }
9881 else
9882 {
9883 /* If it is not a call to Thumb, assume call to Arm.
9884 If it is a call relative to a section name, then it is not a
9885 function call at all, but rather a long jump. Calls through
9886 the PLT do not require stubs. */
9887 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9888 {
9889 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9890 {
9891 /* Convert BL to BLX. */
9892 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9893 }
9894 else if (( r_type != R_ARM_THM_CALL)
9895 && (r_type != R_ARM_THM_JUMP24))
9896 {
9897 if (elf32_thumb_to_arm_stub
9898 (info, sym_name, input_bfd, output_bfd, input_section,
9899 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9900 error_message))
9901 return bfd_reloc_ok;
9902 else
9903 return bfd_reloc_dangerous;
9904 }
9905 }
9906 else if (branch_type == ST_BRANCH_TO_THUMB
9907 && globals->use_blx
9908 && r_type == R_ARM_THM_CALL)
9909 {
9910 /* Make sure this is a BL. */
9911 lower_insn |= 0x1800;
9912 }
9913 }
9914
9915 enum elf32_arm_stub_type stub_type = arm_stub_none;
9916 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9917 {
9918 /* Check if a stub has to be inserted because the destination
9919 is too far. */
9920 struct elf32_arm_stub_hash_entry *stub_entry;
9921 struct elf32_arm_link_hash_entry *hash;
9922
9923 hash = (struct elf32_arm_link_hash_entry *) h;
9924
9925 stub_type = arm_type_of_stub (info, input_section, rel,
9926 st_type, &branch_type,
9927 hash, value, sym_sec,
9928 input_bfd, sym_name);
9929
9930 if (stub_type != arm_stub_none)
9931 {
9932 /* The target is out of reach or we are changing modes, so
9933 redirect the branch to the local stub for this
9934 function. */
9935 stub_entry = elf32_arm_get_stub_entry (input_section,
9936 sym_sec, h,
9937 rel, globals,
9938 stub_type);
9939 if (stub_entry != NULL)
9940 {
9941 value = (stub_entry->stub_offset
9942 + stub_entry->stub_sec->output_offset
9943 + stub_entry->stub_sec->output_section->vma);
9944
9945 if (plt_offset != (bfd_vma) -1)
9946 *unresolved_reloc_p = FALSE;
9947 }
9948
9949 /* If this call becomes a call to Arm, force BLX. */
9950 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9951 {
9952 if ((stub_entry
9953 && !arm_stub_is_thumb (stub_entry->stub_type))
9954 || branch_type != ST_BRANCH_TO_THUMB)
9955 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9956 }
9957 }
9958 }
9959
9960 /* Handle calls via the PLT. */
9961 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9962 {
9963 value = (splt->output_section->vma
9964 + splt->output_offset
9965 + plt_offset);
9966
9967 if (globals->use_blx
9968 && r_type == R_ARM_THM_CALL
9969 && ! using_thumb_only (globals))
9970 {
9971 /* If the Thumb BLX instruction is available, convert
9972 the BL to a BLX instruction to call the ARM-mode
9973 PLT entry. */
9974 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9975 branch_type = ST_BRANCH_TO_ARM;
9976 }
9977 else
9978 {
9979 if (! using_thumb_only (globals))
9980 /* Target the Thumb stub before the ARM PLT entry. */
9981 value -= PLT_THUMB_STUB_SIZE;
9982 branch_type = ST_BRANCH_TO_THUMB;
9983 }
9984 *unresolved_reloc_p = FALSE;
9985 }
9986
9987 relocation = value + signed_addend;
9988
9989 relocation -= (input_section->output_section->vma
9990 + input_section->output_offset
9991 + rel->r_offset);
9992
9993 check = relocation >> howto->rightshift;
9994
9995 /* If this is a signed value, the rightshift just dropped
9996 leading 1 bits (assuming twos complement). */
9997 if ((bfd_signed_vma) relocation >= 0)
9998 signed_check = check;
9999 else
10000 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
10001
10002 /* Calculate the permissable maximum and minimum values for
10003 this relocation according to whether we're relocating for
10004 Thumb-2 or not. */
10005 bitsize = howto->bitsize;
10006 if (!thumb2)
10007 bitsize -= 2;
10008 reloc_signed_max = (1 << (bitsize - 1)) - 1;
10009 reloc_signed_min = ~reloc_signed_max;
10010
10011 /* Assumes two's complement. */
10012 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10013 overflow = TRUE;
10014
10015 if ((lower_insn & 0x5000) == 0x4000)
10016 /* For a BLX instruction, make sure that the relocation is rounded up
10017 to a word boundary. This follows the semantics of the instruction
10018 which specifies that bit 1 of the target address will come from bit
10019 1 of the base address. */
10020 relocation = (relocation + 2) & ~ 3;
10021
10022 /* Put RELOCATION back into the insn. Assumes two's complement.
10023 We use the Thumb-2 encoding, which is safe even if dealing with
10024 a Thumb-1 instruction by virtue of our overflow check above. */
10025 reloc_sign = (signed_check < 0) ? 1 : 0;
10026 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
10027 | ((relocation >> 12) & 0x3ff)
10028 | (reloc_sign << 10);
10029 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
10030 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
10031 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
10032 | ((relocation >> 1) & 0x7ff);
10033
10034 /* Put the relocated value back in the object file: */
10035 bfd_put_16 (input_bfd, upper_insn, hit_data);
10036 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10037
10038 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10039 }
10040 break;
10041
10042 case R_ARM_THM_JUMP19:
10043 /* Thumb32 conditional branch instruction. */
10044 {
10045 bfd_vma relocation;
10046 bfd_boolean overflow = FALSE;
10047 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10048 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10049 bfd_signed_vma reloc_signed_max = 0xffffe;
10050 bfd_signed_vma reloc_signed_min = -0x100000;
10051 bfd_signed_vma signed_check;
10052 enum elf32_arm_stub_type stub_type = arm_stub_none;
10053 struct elf32_arm_stub_hash_entry *stub_entry;
10054 struct elf32_arm_link_hash_entry *hash;
10055
10056 /* Need to refetch the addend, reconstruct the top three bits,
10057 and squish the two 11 bit pieces together. */
10058 if (globals->use_rel)
10059 {
10060 bfd_vma S = (upper_insn & 0x0400) >> 10;
10061 bfd_vma upper = (upper_insn & 0x003f);
10062 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
10063 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
10064 bfd_vma lower = (lower_insn & 0x07ff);
10065
10066 upper |= J1 << 6;
10067 upper |= J2 << 7;
10068 upper |= (!S) << 8;
10069 upper -= 0x0100; /* Sign extend. */
10070
10071 addend = (upper << 12) | (lower << 1);
10072 signed_addend = addend;
10073 }
10074
10075 /* Handle calls via the PLT. */
10076 if (plt_offset != (bfd_vma) -1)
10077 {
10078 value = (splt->output_section->vma
10079 + splt->output_offset
10080 + plt_offset);
10081 /* Target the Thumb stub before the ARM PLT entry. */
10082 value -= PLT_THUMB_STUB_SIZE;
10083 *unresolved_reloc_p = FALSE;
10084 }
10085
10086 hash = (struct elf32_arm_link_hash_entry *)h;
10087
10088 stub_type = arm_type_of_stub (info, input_section, rel,
10089 st_type, &branch_type,
10090 hash, value, sym_sec,
10091 input_bfd, sym_name);
10092 if (stub_type != arm_stub_none)
10093 {
10094 stub_entry = elf32_arm_get_stub_entry (input_section,
10095 sym_sec, h,
10096 rel, globals,
10097 stub_type);
10098 if (stub_entry != NULL)
10099 {
10100 value = (stub_entry->stub_offset
10101 + stub_entry->stub_sec->output_offset
10102 + stub_entry->stub_sec->output_section->vma);
10103 }
10104 }
10105
10106 relocation = value + signed_addend;
10107 relocation -= (input_section->output_section->vma
10108 + input_section->output_offset
10109 + rel->r_offset);
10110 signed_check = (bfd_signed_vma) relocation;
10111
10112 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10113 overflow = TRUE;
10114
10115 /* Put RELOCATION back into the insn. */
10116 {
10117 bfd_vma S = (relocation & 0x00100000) >> 20;
10118 bfd_vma J2 = (relocation & 0x00080000) >> 19;
10119 bfd_vma J1 = (relocation & 0x00040000) >> 18;
10120 bfd_vma hi = (relocation & 0x0003f000) >> 12;
10121 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
10122
10123 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10124 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10125 }
10126
10127 /* Put the relocated value back in the object file: */
10128 bfd_put_16 (input_bfd, upper_insn, hit_data);
10129 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10130
10131 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10132 }
10133
10134 case R_ARM_THM_JUMP11:
10135 case R_ARM_THM_JUMP8:
10136 case R_ARM_THM_JUMP6:
10137 /* Thumb B (branch) instruction). */
10138 {
10139 bfd_signed_vma relocation;
10140 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10141 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10142 bfd_signed_vma signed_check;
10143
10144 /* CZB cannot jump backward. */
10145 if (r_type == R_ARM_THM_JUMP6)
10146 reloc_signed_min = 0;
10147
10148 if (globals->use_rel)
10149 {
10150 /* Need to refetch addend. */
10151 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10152 if (addend & ((howto->src_mask + 1) >> 1))
10153 {
10154 signed_addend = -1;
10155 signed_addend &= ~ howto->src_mask;
10156 signed_addend |= addend;
10157 }
10158 else
10159 signed_addend = addend;
10160 /* The value in the insn has been right shifted. We need to
10161 undo this, so that we can perform the address calculation
10162 in terms of bytes. */
10163 signed_addend <<= howto->rightshift;
10164 }
10165 relocation = value + signed_addend;
10166
10167 relocation -= (input_section->output_section->vma
10168 + input_section->output_offset
10169 + rel->r_offset);
10170
10171 relocation >>= howto->rightshift;
10172 signed_check = relocation;
10173
10174 if (r_type == R_ARM_THM_JUMP6)
10175 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10176 else
10177 relocation &= howto->dst_mask;
10178 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10179
10180 bfd_put_16 (input_bfd, relocation, hit_data);
10181
10182 /* Assumes two's complement. */
10183 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10184 return bfd_reloc_overflow;
10185
10186 return bfd_reloc_ok;
10187 }
10188
10189 case R_ARM_ALU_PCREL7_0:
10190 case R_ARM_ALU_PCREL15_8:
10191 case R_ARM_ALU_PCREL23_15:
10192 {
10193 bfd_vma insn;
10194 bfd_vma relocation;
10195
10196 insn = bfd_get_32 (input_bfd, hit_data);
10197 if (globals->use_rel)
10198 {
10199 /* Extract the addend. */
10200 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10201 signed_addend = addend;
10202 }
10203 relocation = value + signed_addend;
10204
10205 relocation -= (input_section->output_section->vma
10206 + input_section->output_offset
10207 + rel->r_offset);
10208 insn = (insn & ~0xfff)
10209 | ((howto->bitpos << 7) & 0xf00)
10210 | ((relocation >> howto->bitpos) & 0xff);
10211 bfd_put_32 (input_bfd, value, hit_data);
10212 }
10213 return bfd_reloc_ok;
10214
10215 case R_ARM_GNU_VTINHERIT:
10216 case R_ARM_GNU_VTENTRY:
10217 return bfd_reloc_ok;
10218
10219 case R_ARM_GOTOFF32:
10220 /* Relocation is relative to the start of the
10221 global offset table. */
10222
10223 BFD_ASSERT (sgot != NULL);
10224 if (sgot == NULL)
10225 return bfd_reloc_notsupported;
10226
10227 /* If we are addressing a Thumb function, we need to adjust the
10228 address by one, so that attempts to call the function pointer will
10229 correctly interpret it as Thumb code. */
10230 if (branch_type == ST_BRANCH_TO_THUMB)
10231 value += 1;
10232
10233 /* Note that sgot->output_offset is not involved in this
10234 calculation. We always want the start of .got. If we
10235 define _GLOBAL_OFFSET_TABLE in a different way, as is
10236 permitted by the ABI, we might have to change this
10237 calculation. */
10238 value -= sgot->output_section->vma;
10239 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10240 contents, rel->r_offset, value,
10241 rel->r_addend);
10242
10243 case R_ARM_GOTPC:
10244 /* Use global offset table as symbol value. */
10245 BFD_ASSERT (sgot != NULL);
10246
10247 if (sgot == NULL)
10248 return bfd_reloc_notsupported;
10249
10250 *unresolved_reloc_p = FALSE;
10251 value = sgot->output_section->vma;
10252 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10253 contents, rel->r_offset, value,
10254 rel->r_addend);
10255
10256 case R_ARM_GOT32:
10257 case R_ARM_GOT_PREL:
10258 /* Relocation is to the entry for this symbol in the
10259 global offset table. */
10260 if (sgot == NULL)
10261 return bfd_reloc_notsupported;
10262
10263 if (dynreloc_st_type == STT_GNU_IFUNC
10264 && plt_offset != (bfd_vma) -1
10265 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10266 {
10267 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10268 symbol, and the relocation resolves directly to the runtime
10269 target rather than to the .iplt entry. This means that any
10270 .got entry would be the same value as the .igot.plt entry,
10271 so there's no point creating both. */
10272 sgot = globals->root.igotplt;
10273 value = sgot->output_offset + gotplt_offset;
10274 }
10275 else if (h != NULL)
10276 {
10277 bfd_vma off;
10278
10279 off = h->got.offset;
10280 BFD_ASSERT (off != (bfd_vma) -1);
10281 if ((off & 1) != 0)
10282 {
10283 /* We have already processsed one GOT relocation against
10284 this symbol. */
10285 off &= ~1;
10286 if (globals->root.dynamic_sections_created
10287 && !SYMBOL_REFERENCES_LOCAL (info, h))
10288 *unresolved_reloc_p = FALSE;
10289 }
10290 else
10291 {
10292 Elf_Internal_Rela outrel;
10293
10294 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10295 {
10296 /* If the symbol doesn't resolve locally in a static
10297 object, we have an undefined reference. If the
10298 symbol doesn't resolve locally in a dynamic object,
10299 it should be resolved by the dynamic linker. */
10300 if (globals->root.dynamic_sections_created)
10301 {
10302 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10303 *unresolved_reloc_p = FALSE;
10304 }
10305 else
10306 outrel.r_info = 0;
10307 outrel.r_addend = 0;
10308 }
10309 else
10310 {
10311 if (dynreloc_st_type == STT_GNU_IFUNC)
10312 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10313 else if (bfd_link_pic (info) &&
10314 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10315 || h->root.type != bfd_link_hash_undefweak))
10316 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10317 else
10318 outrel.r_info = 0;
10319 outrel.r_addend = dynreloc_value;
10320 }
10321
10322 /* The GOT entry is initialized to zero by default.
10323 See if we should install a different value. */
10324 if (outrel.r_addend != 0
10325 && (outrel.r_info == 0 || globals->use_rel))
10326 {
10327 bfd_put_32 (output_bfd, outrel.r_addend,
10328 sgot->contents + off);
10329 outrel.r_addend = 0;
10330 }
10331
10332 if (outrel.r_info != 0)
10333 {
10334 outrel.r_offset = (sgot->output_section->vma
10335 + sgot->output_offset
10336 + off);
10337 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10338 }
10339 h->got.offset |= 1;
10340 }
10341 value = sgot->output_offset + off;
10342 }
10343 else
10344 {
10345 bfd_vma off;
10346
10347 BFD_ASSERT (local_got_offsets != NULL &&
10348 local_got_offsets[r_symndx] != (bfd_vma) -1);
10349
10350 off = local_got_offsets[r_symndx];
10351
10352 /* The offset must always be a multiple of 4. We use the
10353 least significant bit to record whether we have already
10354 generated the necessary reloc. */
10355 if ((off & 1) != 0)
10356 off &= ~1;
10357 else
10358 {
10359 if (globals->use_rel)
10360 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10361
10362 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10363 {
10364 Elf_Internal_Rela outrel;
10365
10366 outrel.r_addend = addend + dynreloc_value;
10367 outrel.r_offset = (sgot->output_section->vma
10368 + sgot->output_offset
10369 + off);
10370 if (dynreloc_st_type == STT_GNU_IFUNC)
10371 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10372 else
10373 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10374 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10375 }
10376
10377 local_got_offsets[r_symndx] |= 1;
10378 }
10379
10380 value = sgot->output_offset + off;
10381 }
10382 if (r_type != R_ARM_GOT32)
10383 value += sgot->output_section->vma;
10384
10385 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10386 contents, rel->r_offset, value,
10387 rel->r_addend);
10388
10389 case R_ARM_TLS_LDO32:
10390 value = value - dtpoff_base (info);
10391
10392 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10393 contents, rel->r_offset, value,
10394 rel->r_addend);
10395
10396 case R_ARM_TLS_LDM32:
10397 {
10398 bfd_vma off;
10399
10400 if (sgot == NULL)
10401 abort ();
10402
10403 off = globals->tls_ldm_got.offset;
10404
10405 if ((off & 1) != 0)
10406 off &= ~1;
10407 else
10408 {
10409 /* If we don't know the module number, create a relocation
10410 for it. */
10411 if (bfd_link_pic (info))
10412 {
10413 Elf_Internal_Rela outrel;
10414
10415 if (srelgot == NULL)
10416 abort ();
10417
10418 outrel.r_addend = 0;
10419 outrel.r_offset = (sgot->output_section->vma
10420 + sgot->output_offset + off);
10421 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10422
10423 if (globals->use_rel)
10424 bfd_put_32 (output_bfd, outrel.r_addend,
10425 sgot->contents + off);
10426
10427 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10428 }
10429 else
10430 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10431
10432 globals->tls_ldm_got.offset |= 1;
10433 }
10434
10435 value = sgot->output_section->vma + sgot->output_offset + off
10436 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10437
10438 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10439 contents, rel->r_offset, value,
10440 rel->r_addend);
10441 }
10442
10443 case R_ARM_TLS_CALL:
10444 case R_ARM_THM_TLS_CALL:
10445 case R_ARM_TLS_GD32:
10446 case R_ARM_TLS_IE32:
10447 case R_ARM_TLS_GOTDESC:
10448 case R_ARM_TLS_DESCSEQ:
10449 case R_ARM_THM_TLS_DESCSEQ:
10450 {
10451 bfd_vma off, offplt;
10452 int indx = 0;
10453 char tls_type;
10454
10455 BFD_ASSERT (sgot != NULL);
10456
10457 if (h != NULL)
10458 {
10459 bfd_boolean dyn;
10460 dyn = globals->root.dynamic_sections_created;
10461 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10462 bfd_link_pic (info),
10463 h)
10464 && (!bfd_link_pic (info)
10465 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10466 {
10467 *unresolved_reloc_p = FALSE;
10468 indx = h->dynindx;
10469 }
10470 off = h->got.offset;
10471 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10472 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10473 }
10474 else
10475 {
10476 BFD_ASSERT (local_got_offsets != NULL);
10477 off = local_got_offsets[r_symndx];
10478 offplt = local_tlsdesc_gotents[r_symndx];
10479 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10480 }
10481
10482 /* Linker relaxations happens from one of the
10483 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10484 if (ELF32_R_TYPE(rel->r_info) != r_type)
10485 tls_type = GOT_TLS_IE;
10486
10487 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10488
10489 if ((off & 1) != 0)
10490 off &= ~1;
10491 else
10492 {
10493 bfd_boolean need_relocs = FALSE;
10494 Elf_Internal_Rela outrel;
10495 int cur_off = off;
10496
10497 /* The GOT entries have not been initialized yet. Do it
10498 now, and emit any relocations. If both an IE GOT and a
10499 GD GOT are necessary, we emit the GD first. */
10500
10501 if ((bfd_link_pic (info) || indx != 0)
10502 && (h == NULL
10503 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10504 || h->root.type != bfd_link_hash_undefweak))
10505 {
10506 need_relocs = TRUE;
10507 BFD_ASSERT (srelgot != NULL);
10508 }
10509
10510 if (tls_type & GOT_TLS_GDESC)
10511 {
10512 bfd_byte *loc;
10513
10514 /* We should have relaxed, unless this is an undefined
10515 weak symbol. */
10516 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10517 || bfd_link_pic (info));
10518 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10519 <= globals->root.sgotplt->size);
10520
10521 outrel.r_addend = 0;
10522 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10523 + globals->root.sgotplt->output_offset
10524 + offplt
10525 + globals->sgotplt_jump_table_size);
10526
10527 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10528 sreloc = globals->root.srelplt;
10529 loc = sreloc->contents;
10530 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10531 BFD_ASSERT (loc + RELOC_SIZE (globals)
10532 <= sreloc->contents + sreloc->size);
10533
10534 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10535
10536 /* For globals, the first word in the relocation gets
10537 the relocation index and the top bit set, or zero,
10538 if we're binding now. For locals, it gets the
10539 symbol's offset in the tls section. */
10540 bfd_put_32 (output_bfd,
10541 !h ? value - elf_hash_table (info)->tls_sec->vma
10542 : info->flags & DF_BIND_NOW ? 0
10543 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10544 globals->root.sgotplt->contents + offplt
10545 + globals->sgotplt_jump_table_size);
10546
10547 /* Second word in the relocation is always zero. */
10548 bfd_put_32 (output_bfd, 0,
10549 globals->root.sgotplt->contents + offplt
10550 + globals->sgotplt_jump_table_size + 4);
10551 }
10552 if (tls_type & GOT_TLS_GD)
10553 {
10554 if (need_relocs)
10555 {
10556 outrel.r_addend = 0;
10557 outrel.r_offset = (sgot->output_section->vma
10558 + sgot->output_offset
10559 + cur_off);
10560 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10561
10562 if (globals->use_rel)
10563 bfd_put_32 (output_bfd, outrel.r_addend,
10564 sgot->contents + cur_off);
10565
10566 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10567
10568 if (indx == 0)
10569 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10570 sgot->contents + cur_off + 4);
10571 else
10572 {
10573 outrel.r_addend = 0;
10574 outrel.r_info = ELF32_R_INFO (indx,
10575 R_ARM_TLS_DTPOFF32);
10576 outrel.r_offset += 4;
10577
10578 if (globals->use_rel)
10579 bfd_put_32 (output_bfd, outrel.r_addend,
10580 sgot->contents + cur_off + 4);
10581
10582 elf32_arm_add_dynreloc (output_bfd, info,
10583 srelgot, &outrel);
10584 }
10585 }
10586 else
10587 {
10588 /* If we are not emitting relocations for a
10589 general dynamic reference, then we must be in a
10590 static link or an executable link with the
10591 symbol binding locally. Mark it as belonging
10592 to module 1, the executable. */
10593 bfd_put_32 (output_bfd, 1,
10594 sgot->contents + cur_off);
10595 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10596 sgot->contents + cur_off + 4);
10597 }
10598
10599 cur_off += 8;
10600 }
10601
10602 if (tls_type & GOT_TLS_IE)
10603 {
10604 if (need_relocs)
10605 {
10606 if (indx == 0)
10607 outrel.r_addend = value - dtpoff_base (info);
10608 else
10609 outrel.r_addend = 0;
10610 outrel.r_offset = (sgot->output_section->vma
10611 + sgot->output_offset
10612 + cur_off);
10613 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10614
10615 if (globals->use_rel)
10616 bfd_put_32 (output_bfd, outrel.r_addend,
10617 sgot->contents + cur_off);
10618
10619 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10620 }
10621 else
10622 bfd_put_32 (output_bfd, tpoff (info, value),
10623 sgot->contents + cur_off);
10624 cur_off += 4;
10625 }
10626
10627 if (h != NULL)
10628 h->got.offset |= 1;
10629 else
10630 local_got_offsets[r_symndx] |= 1;
10631 }
10632
10633 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10634 off += 8;
10635 else if (tls_type & GOT_TLS_GDESC)
10636 off = offplt;
10637
10638 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10639 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10640 {
10641 bfd_signed_vma offset;
10642 /* TLS stubs are arm mode. The original symbol is a
10643 data object, so branch_type is bogus. */
10644 branch_type = ST_BRANCH_TO_ARM;
10645 enum elf32_arm_stub_type stub_type
10646 = arm_type_of_stub (info, input_section, rel,
10647 st_type, &branch_type,
10648 (struct elf32_arm_link_hash_entry *)h,
10649 globals->tls_trampoline, globals->root.splt,
10650 input_bfd, sym_name);
10651
10652 if (stub_type != arm_stub_none)
10653 {
10654 struct elf32_arm_stub_hash_entry *stub_entry
10655 = elf32_arm_get_stub_entry
10656 (input_section, globals->root.splt, 0, rel,
10657 globals, stub_type);
10658 offset = (stub_entry->stub_offset
10659 + stub_entry->stub_sec->output_offset
10660 + stub_entry->stub_sec->output_section->vma);
10661 }
10662 else
10663 offset = (globals->root.splt->output_section->vma
10664 + globals->root.splt->output_offset
10665 + globals->tls_trampoline);
10666
10667 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10668 {
10669 unsigned long inst;
10670
10671 offset -= (input_section->output_section->vma
10672 + input_section->output_offset
10673 + rel->r_offset + 8);
10674
10675 inst = offset >> 2;
10676 inst &= 0x00ffffff;
10677 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10678 }
10679 else
10680 {
10681 /* Thumb blx encodes the offset in a complicated
10682 fashion. */
10683 unsigned upper_insn, lower_insn;
10684 unsigned neg;
10685
10686 offset -= (input_section->output_section->vma
10687 + input_section->output_offset
10688 + rel->r_offset + 4);
10689
10690 if (stub_type != arm_stub_none
10691 && arm_stub_is_thumb (stub_type))
10692 {
10693 lower_insn = 0xd000;
10694 }
10695 else
10696 {
10697 lower_insn = 0xc000;
10698 /* Round up the offset to a word boundary. */
10699 offset = (offset + 2) & ~2;
10700 }
10701
10702 neg = offset < 0;
10703 upper_insn = (0xf000
10704 | ((offset >> 12) & 0x3ff)
10705 | (neg << 10));
10706 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10707 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10708 | ((offset >> 1) & 0x7ff);
10709 bfd_put_16 (input_bfd, upper_insn, hit_data);
10710 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10711 return bfd_reloc_ok;
10712 }
10713 }
10714 /* These relocations needs special care, as besides the fact
10715 they point somewhere in .gotplt, the addend must be
10716 adjusted accordingly depending on the type of instruction
10717 we refer to. */
10718 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10719 {
10720 unsigned long data, insn;
10721 unsigned thumb;
10722
10723 data = bfd_get_32 (input_bfd, hit_data);
10724 thumb = data & 1;
10725 data &= ~1u;
10726
10727 if (thumb)
10728 {
10729 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10730 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10731 insn = (insn << 16)
10732 | bfd_get_16 (input_bfd,
10733 contents + rel->r_offset - data + 2);
10734 if ((insn & 0xf800c000) == 0xf000c000)
10735 /* bl/blx */
10736 value = -6;
10737 else if ((insn & 0xffffff00) == 0x4400)
10738 /* add */
10739 value = -5;
10740 else
10741 {
10742 (*_bfd_error_handler)
10743 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10744 input_bfd, input_section,
10745 (unsigned long)rel->r_offset, insn);
10746 return bfd_reloc_notsupported;
10747 }
10748 }
10749 else
10750 {
10751 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10752
10753 switch (insn >> 24)
10754 {
10755 case 0xeb: /* bl */
10756 case 0xfa: /* blx */
10757 value = -4;
10758 break;
10759
10760 case 0xe0: /* add */
10761 value = -8;
10762 break;
10763
10764 default:
10765 (*_bfd_error_handler)
10766 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10767 input_bfd, input_section,
10768 (unsigned long)rel->r_offset, insn);
10769 return bfd_reloc_notsupported;
10770 }
10771 }
10772
10773 value += ((globals->root.sgotplt->output_section->vma
10774 + globals->root.sgotplt->output_offset + off)
10775 - (input_section->output_section->vma
10776 + input_section->output_offset
10777 + rel->r_offset)
10778 + globals->sgotplt_jump_table_size);
10779 }
10780 else
10781 value = ((globals->root.sgot->output_section->vma
10782 + globals->root.sgot->output_offset + off)
10783 - (input_section->output_section->vma
10784 + input_section->output_offset + rel->r_offset));
10785
10786 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10787 contents, rel->r_offset, value,
10788 rel->r_addend);
10789 }
10790
10791 case R_ARM_TLS_LE32:
10792 if (bfd_link_dll (info))
10793 {
10794 (*_bfd_error_handler)
10795 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10796 input_bfd, input_section,
10797 (long) rel->r_offset, howto->name);
10798 return bfd_reloc_notsupported;
10799 }
10800 else
10801 value = tpoff (info, value);
10802
10803 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10804 contents, rel->r_offset, value,
10805 rel->r_addend);
10806
10807 case R_ARM_V4BX:
10808 if (globals->fix_v4bx)
10809 {
10810 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10811
10812 /* Ensure that we have a BX instruction. */
10813 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10814
10815 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10816 {
10817 /* Branch to veneer. */
10818 bfd_vma glue_addr;
10819 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10820 glue_addr -= input_section->output_section->vma
10821 + input_section->output_offset
10822 + rel->r_offset + 8;
10823 insn = (insn & 0xf0000000) | 0x0a000000
10824 | ((glue_addr >> 2) & 0x00ffffff);
10825 }
10826 else
10827 {
10828 /* Preserve Rm (lowest four bits) and the condition code
10829 (highest four bits). Other bits encode MOV PC,Rm. */
10830 insn = (insn & 0xf000000f) | 0x01a0f000;
10831 }
10832
10833 bfd_put_32 (input_bfd, insn, hit_data);
10834 }
10835 return bfd_reloc_ok;
10836
10837 case R_ARM_MOVW_ABS_NC:
10838 case R_ARM_MOVT_ABS:
10839 case R_ARM_MOVW_PREL_NC:
10840 case R_ARM_MOVT_PREL:
10841 /* Until we properly support segment-base-relative addressing then
10842 we assume the segment base to be zero, as for the group relocations.
10843 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10844 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10845 case R_ARM_MOVW_BREL_NC:
10846 case R_ARM_MOVW_BREL:
10847 case R_ARM_MOVT_BREL:
10848 {
10849 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10850
10851 if (globals->use_rel)
10852 {
10853 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10854 signed_addend = (addend ^ 0x8000) - 0x8000;
10855 }
10856
10857 value += signed_addend;
10858
10859 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10860 value -= (input_section->output_section->vma
10861 + input_section->output_offset + rel->r_offset);
10862
10863 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10864 return bfd_reloc_overflow;
10865
10866 if (branch_type == ST_BRANCH_TO_THUMB)
10867 value |= 1;
10868
10869 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10870 || r_type == R_ARM_MOVT_BREL)
10871 value >>= 16;
10872
10873 insn &= 0xfff0f000;
10874 insn |= value & 0xfff;
10875 insn |= (value & 0xf000) << 4;
10876 bfd_put_32 (input_bfd, insn, hit_data);
10877 }
10878 return bfd_reloc_ok;
10879
10880 case R_ARM_THM_MOVW_ABS_NC:
10881 case R_ARM_THM_MOVT_ABS:
10882 case R_ARM_THM_MOVW_PREL_NC:
10883 case R_ARM_THM_MOVT_PREL:
10884 /* Until we properly support segment-base-relative addressing then
10885 we assume the segment base to be zero, as for the above relocations.
10886 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10887 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10888 as R_ARM_THM_MOVT_ABS. */
10889 case R_ARM_THM_MOVW_BREL_NC:
10890 case R_ARM_THM_MOVW_BREL:
10891 case R_ARM_THM_MOVT_BREL:
10892 {
10893 bfd_vma insn;
10894
10895 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10896 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10897
10898 if (globals->use_rel)
10899 {
10900 addend = ((insn >> 4) & 0xf000)
10901 | ((insn >> 15) & 0x0800)
10902 | ((insn >> 4) & 0x0700)
10903 | (insn & 0x00ff);
10904 signed_addend = (addend ^ 0x8000) - 0x8000;
10905 }
10906
10907 value += signed_addend;
10908
10909 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10910 value -= (input_section->output_section->vma
10911 + input_section->output_offset + rel->r_offset);
10912
10913 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10914 return bfd_reloc_overflow;
10915
10916 if (branch_type == ST_BRANCH_TO_THUMB)
10917 value |= 1;
10918
10919 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10920 || r_type == R_ARM_THM_MOVT_BREL)
10921 value >>= 16;
10922
10923 insn &= 0xfbf08f00;
10924 insn |= (value & 0xf000) << 4;
10925 insn |= (value & 0x0800) << 15;
10926 insn |= (value & 0x0700) << 4;
10927 insn |= (value & 0x00ff);
10928
10929 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10930 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10931 }
10932 return bfd_reloc_ok;
10933
10934 case R_ARM_ALU_PC_G0_NC:
10935 case R_ARM_ALU_PC_G1_NC:
10936 case R_ARM_ALU_PC_G0:
10937 case R_ARM_ALU_PC_G1:
10938 case R_ARM_ALU_PC_G2:
10939 case R_ARM_ALU_SB_G0_NC:
10940 case R_ARM_ALU_SB_G1_NC:
10941 case R_ARM_ALU_SB_G0:
10942 case R_ARM_ALU_SB_G1:
10943 case R_ARM_ALU_SB_G2:
10944 {
10945 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10946 bfd_vma pc = input_section->output_section->vma
10947 + input_section->output_offset + rel->r_offset;
10948 /* sb is the origin of the *segment* containing the symbol. */
10949 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10950 bfd_vma residual;
10951 bfd_vma g_n;
10952 bfd_signed_vma signed_value;
10953 int group = 0;
10954
10955 /* Determine which group of bits to select. */
10956 switch (r_type)
10957 {
10958 case R_ARM_ALU_PC_G0_NC:
10959 case R_ARM_ALU_PC_G0:
10960 case R_ARM_ALU_SB_G0_NC:
10961 case R_ARM_ALU_SB_G0:
10962 group = 0;
10963 break;
10964
10965 case R_ARM_ALU_PC_G1_NC:
10966 case R_ARM_ALU_PC_G1:
10967 case R_ARM_ALU_SB_G1_NC:
10968 case R_ARM_ALU_SB_G1:
10969 group = 1;
10970 break;
10971
10972 case R_ARM_ALU_PC_G2:
10973 case R_ARM_ALU_SB_G2:
10974 group = 2;
10975 break;
10976
10977 default:
10978 abort ();
10979 }
10980
10981 /* If REL, extract the addend from the insn. If RELA, it will
10982 have already been fetched for us. */
10983 if (globals->use_rel)
10984 {
10985 int negative;
10986 bfd_vma constant = insn & 0xff;
10987 bfd_vma rotation = (insn & 0xf00) >> 8;
10988
10989 if (rotation == 0)
10990 signed_addend = constant;
10991 else
10992 {
10993 /* Compensate for the fact that in the instruction, the
10994 rotation is stored in multiples of 2 bits. */
10995 rotation *= 2;
10996
10997 /* Rotate "constant" right by "rotation" bits. */
10998 signed_addend = (constant >> rotation) |
10999 (constant << (8 * sizeof (bfd_vma) - rotation));
11000 }
11001
11002 /* Determine if the instruction is an ADD or a SUB.
11003 (For REL, this determines the sign of the addend.) */
11004 negative = identify_add_or_sub (insn);
11005 if (negative == 0)
11006 {
11007 (*_bfd_error_handler)
11008 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
11009 input_bfd, input_section,
11010 (long) rel->r_offset, howto->name);
11011 return bfd_reloc_overflow;
11012 }
11013
11014 signed_addend *= negative;
11015 }
11016
11017 /* Compute the value (X) to go in the place. */
11018 if (r_type == R_ARM_ALU_PC_G0_NC
11019 || r_type == R_ARM_ALU_PC_G1_NC
11020 || r_type == R_ARM_ALU_PC_G0
11021 || r_type == R_ARM_ALU_PC_G1
11022 || r_type == R_ARM_ALU_PC_G2)
11023 /* PC relative. */
11024 signed_value = value - pc + signed_addend;
11025 else
11026 /* Section base relative. */
11027 signed_value = value - sb + signed_addend;
11028
11029 /* If the target symbol is a Thumb function, then set the
11030 Thumb bit in the address. */
11031 if (branch_type == ST_BRANCH_TO_THUMB)
11032 signed_value |= 1;
11033
11034 /* Calculate the value of the relevant G_n, in encoded
11035 constant-with-rotation format. */
11036 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11037 group, &residual);
11038
11039 /* Check for overflow if required. */
11040 if ((r_type == R_ARM_ALU_PC_G0
11041 || r_type == R_ARM_ALU_PC_G1
11042 || r_type == R_ARM_ALU_PC_G2
11043 || r_type == R_ARM_ALU_SB_G0
11044 || r_type == R_ARM_ALU_SB_G1
11045 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
11046 {
11047 (*_bfd_error_handler)
11048 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11049 input_bfd, input_section,
11050 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
11051 howto->name);
11052 return bfd_reloc_overflow;
11053 }
11054
11055 /* Mask out the value and the ADD/SUB part of the opcode; take care
11056 not to destroy the S bit. */
11057 insn &= 0xff1ff000;
11058
11059 /* Set the opcode according to whether the value to go in the
11060 place is negative. */
11061 if (signed_value < 0)
11062 insn |= 1 << 22;
11063 else
11064 insn |= 1 << 23;
11065
11066 /* Encode the offset. */
11067 insn |= g_n;
11068
11069 bfd_put_32 (input_bfd, insn, hit_data);
11070 }
11071 return bfd_reloc_ok;
11072
11073 case R_ARM_LDR_PC_G0:
11074 case R_ARM_LDR_PC_G1:
11075 case R_ARM_LDR_PC_G2:
11076 case R_ARM_LDR_SB_G0:
11077 case R_ARM_LDR_SB_G1:
11078 case R_ARM_LDR_SB_G2:
11079 {
11080 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11081 bfd_vma pc = input_section->output_section->vma
11082 + input_section->output_offset + rel->r_offset;
11083 /* sb is the origin of the *segment* containing the symbol. */
11084 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11085 bfd_vma residual;
11086 bfd_signed_vma signed_value;
11087 int group = 0;
11088
11089 /* Determine which groups of bits to calculate. */
11090 switch (r_type)
11091 {
11092 case R_ARM_LDR_PC_G0:
11093 case R_ARM_LDR_SB_G0:
11094 group = 0;
11095 break;
11096
11097 case R_ARM_LDR_PC_G1:
11098 case R_ARM_LDR_SB_G1:
11099 group = 1;
11100 break;
11101
11102 case R_ARM_LDR_PC_G2:
11103 case R_ARM_LDR_SB_G2:
11104 group = 2;
11105 break;
11106
11107 default:
11108 abort ();
11109 }
11110
11111 /* If REL, extract the addend from the insn. If RELA, it will
11112 have already been fetched for us. */
11113 if (globals->use_rel)
11114 {
11115 int negative = (insn & (1 << 23)) ? 1 : -1;
11116 signed_addend = negative * (insn & 0xfff);
11117 }
11118
11119 /* Compute the value (X) to go in the place. */
11120 if (r_type == R_ARM_LDR_PC_G0
11121 || r_type == R_ARM_LDR_PC_G1
11122 || r_type == R_ARM_LDR_PC_G2)
11123 /* PC relative. */
11124 signed_value = value - pc + signed_addend;
11125 else
11126 /* Section base relative. */
11127 signed_value = value - sb + signed_addend;
11128
11129 /* Calculate the value of the relevant G_{n-1} to obtain
11130 the residual at that stage. */
11131 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11132 group - 1, &residual);
11133
11134 /* Check for overflow. */
11135 if (residual >= 0x1000)
11136 {
11137 (*_bfd_error_handler)
11138 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11139 input_bfd, input_section,
11140 (long) rel->r_offset, labs (signed_value), howto->name);
11141 return bfd_reloc_overflow;
11142 }
11143
11144 /* Mask out the value and U bit. */
11145 insn &= 0xff7ff000;
11146
11147 /* Set the U bit if the value to go in the place is non-negative. */
11148 if (signed_value >= 0)
11149 insn |= 1 << 23;
11150
11151 /* Encode the offset. */
11152 insn |= residual;
11153
11154 bfd_put_32 (input_bfd, insn, hit_data);
11155 }
11156 return bfd_reloc_ok;
11157
11158 case R_ARM_LDRS_PC_G0:
11159 case R_ARM_LDRS_PC_G1:
11160 case R_ARM_LDRS_PC_G2:
11161 case R_ARM_LDRS_SB_G0:
11162 case R_ARM_LDRS_SB_G1:
11163 case R_ARM_LDRS_SB_G2:
11164 {
11165 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11166 bfd_vma pc = input_section->output_section->vma
11167 + input_section->output_offset + rel->r_offset;
11168 /* sb is the origin of the *segment* containing the symbol. */
11169 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11170 bfd_vma residual;
11171 bfd_signed_vma signed_value;
11172 int group = 0;
11173
11174 /* Determine which groups of bits to calculate. */
11175 switch (r_type)
11176 {
11177 case R_ARM_LDRS_PC_G0:
11178 case R_ARM_LDRS_SB_G0:
11179 group = 0;
11180 break;
11181
11182 case R_ARM_LDRS_PC_G1:
11183 case R_ARM_LDRS_SB_G1:
11184 group = 1;
11185 break;
11186
11187 case R_ARM_LDRS_PC_G2:
11188 case R_ARM_LDRS_SB_G2:
11189 group = 2;
11190 break;
11191
11192 default:
11193 abort ();
11194 }
11195
11196 /* If REL, extract the addend from the insn. If RELA, it will
11197 have already been fetched for us. */
11198 if (globals->use_rel)
11199 {
11200 int negative = (insn & (1 << 23)) ? 1 : -1;
11201 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11202 }
11203
11204 /* Compute the value (X) to go in the place. */
11205 if (r_type == R_ARM_LDRS_PC_G0
11206 || r_type == R_ARM_LDRS_PC_G1
11207 || r_type == R_ARM_LDRS_PC_G2)
11208 /* PC relative. */
11209 signed_value = value - pc + signed_addend;
11210 else
11211 /* Section base relative. */
11212 signed_value = value - sb + signed_addend;
11213
11214 /* Calculate the value of the relevant G_{n-1} to obtain
11215 the residual at that stage. */
11216 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11217 group - 1, &residual);
11218
11219 /* Check for overflow. */
11220 if (residual >= 0x100)
11221 {
11222 (*_bfd_error_handler)
11223 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11224 input_bfd, input_section,
11225 (long) rel->r_offset, labs (signed_value), howto->name);
11226 return bfd_reloc_overflow;
11227 }
11228
11229 /* Mask out the value and U bit. */
11230 insn &= 0xff7ff0f0;
11231
11232 /* Set the U bit if the value to go in the place is non-negative. */
11233 if (signed_value >= 0)
11234 insn |= 1 << 23;
11235
11236 /* Encode the offset. */
11237 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11238
11239 bfd_put_32 (input_bfd, insn, hit_data);
11240 }
11241 return bfd_reloc_ok;
11242
11243 case R_ARM_LDC_PC_G0:
11244 case R_ARM_LDC_PC_G1:
11245 case R_ARM_LDC_PC_G2:
11246 case R_ARM_LDC_SB_G0:
11247 case R_ARM_LDC_SB_G1:
11248 case R_ARM_LDC_SB_G2:
11249 {
11250 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11251 bfd_vma pc = input_section->output_section->vma
11252 + input_section->output_offset + rel->r_offset;
11253 /* sb is the origin of the *segment* containing the symbol. */
11254 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11255 bfd_vma residual;
11256 bfd_signed_vma signed_value;
11257 int group = 0;
11258
11259 /* Determine which groups of bits to calculate. */
11260 switch (r_type)
11261 {
11262 case R_ARM_LDC_PC_G0:
11263 case R_ARM_LDC_SB_G0:
11264 group = 0;
11265 break;
11266
11267 case R_ARM_LDC_PC_G1:
11268 case R_ARM_LDC_SB_G1:
11269 group = 1;
11270 break;
11271
11272 case R_ARM_LDC_PC_G2:
11273 case R_ARM_LDC_SB_G2:
11274 group = 2;
11275 break;
11276
11277 default:
11278 abort ();
11279 }
11280
11281 /* If REL, extract the addend from the insn. If RELA, it will
11282 have already been fetched for us. */
11283 if (globals->use_rel)
11284 {
11285 int negative = (insn & (1 << 23)) ? 1 : -1;
11286 signed_addend = negative * ((insn & 0xff) << 2);
11287 }
11288
11289 /* Compute the value (X) to go in the place. */
11290 if (r_type == R_ARM_LDC_PC_G0
11291 || r_type == R_ARM_LDC_PC_G1
11292 || r_type == R_ARM_LDC_PC_G2)
11293 /* PC relative. */
11294 signed_value = value - pc + signed_addend;
11295 else
11296 /* Section base relative. */
11297 signed_value = value - sb + signed_addend;
11298
11299 /* Calculate the value of the relevant G_{n-1} to obtain
11300 the residual at that stage. */
11301 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11302 group - 1, &residual);
11303
11304 /* Check for overflow. (The absolute value to go in the place must be
11305 divisible by four and, after having been divided by four, must
11306 fit in eight bits.) */
11307 if ((residual & 0x3) != 0 || residual >= 0x400)
11308 {
11309 (*_bfd_error_handler)
11310 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11311 input_bfd, input_section,
11312 (long) rel->r_offset, labs (signed_value), howto->name);
11313 return bfd_reloc_overflow;
11314 }
11315
11316 /* Mask out the value and U bit. */
11317 insn &= 0xff7fff00;
11318
11319 /* Set the U bit if the value to go in the place is non-negative. */
11320 if (signed_value >= 0)
11321 insn |= 1 << 23;
11322
11323 /* Encode the offset. */
11324 insn |= residual >> 2;
11325
11326 bfd_put_32 (input_bfd, insn, hit_data);
11327 }
11328 return bfd_reloc_ok;
11329
11330 case R_ARM_THM_ALU_ABS_G0_NC:
11331 case R_ARM_THM_ALU_ABS_G1_NC:
11332 case R_ARM_THM_ALU_ABS_G2_NC:
11333 case R_ARM_THM_ALU_ABS_G3_NC:
11334 {
11335 const int shift_array[4] = {0, 8, 16, 24};
11336 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11337 bfd_vma addr = value;
11338 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11339
11340 /* Compute address. */
11341 if (globals->use_rel)
11342 signed_addend = insn & 0xff;
11343 addr += signed_addend;
11344 if (branch_type == ST_BRANCH_TO_THUMB)
11345 addr |= 1;
11346 /* Clean imm8 insn. */
11347 insn &= 0xff00;
11348 /* And update with correct part of address. */
11349 insn |= (addr >> shift) & 0xff;
11350 /* Update insn. */
11351 bfd_put_16 (input_bfd, insn, hit_data);
11352 }
11353
11354 *unresolved_reloc_p = FALSE;
11355 return bfd_reloc_ok;
11356
11357 default:
11358 return bfd_reloc_notsupported;
11359 }
11360 }
11361
11362 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11363 static void
11364 arm_add_to_rel (bfd * abfd,
11365 bfd_byte * address,
11366 reloc_howto_type * howto,
11367 bfd_signed_vma increment)
11368 {
11369 bfd_signed_vma addend;
11370
11371 if (howto->type == R_ARM_THM_CALL
11372 || howto->type == R_ARM_THM_JUMP24)
11373 {
11374 int upper_insn, lower_insn;
11375 int upper, lower;
11376
11377 upper_insn = bfd_get_16 (abfd, address);
11378 lower_insn = bfd_get_16 (abfd, address + 2);
11379 upper = upper_insn & 0x7ff;
11380 lower = lower_insn & 0x7ff;
11381
11382 addend = (upper << 12) | (lower << 1);
11383 addend += increment;
11384 addend >>= 1;
11385
11386 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11387 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11388
11389 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11390 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11391 }
11392 else
11393 {
11394 bfd_vma contents;
11395
11396 contents = bfd_get_32 (abfd, address);
11397
11398 /* Get the (signed) value from the instruction. */
11399 addend = contents & howto->src_mask;
11400 if (addend & ((howto->src_mask + 1) >> 1))
11401 {
11402 bfd_signed_vma mask;
11403
11404 mask = -1;
11405 mask &= ~ howto->src_mask;
11406 addend |= mask;
11407 }
11408
11409 /* Add in the increment, (which is a byte value). */
11410 switch (howto->type)
11411 {
11412 default:
11413 addend += increment;
11414 break;
11415
11416 case R_ARM_PC24:
11417 case R_ARM_PLT32:
11418 case R_ARM_CALL:
11419 case R_ARM_JUMP24:
11420 addend <<= howto->size;
11421 addend += increment;
11422
11423 /* Should we check for overflow here ? */
11424
11425 /* Drop any undesired bits. */
11426 addend >>= howto->rightshift;
11427 break;
11428 }
11429
11430 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11431
11432 bfd_put_32 (abfd, contents, address);
11433 }
11434 }
11435
11436 #define IS_ARM_TLS_RELOC(R_TYPE) \
11437 ((R_TYPE) == R_ARM_TLS_GD32 \
11438 || (R_TYPE) == R_ARM_TLS_LDO32 \
11439 || (R_TYPE) == R_ARM_TLS_LDM32 \
11440 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11441 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11442 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11443 || (R_TYPE) == R_ARM_TLS_LE32 \
11444 || (R_TYPE) == R_ARM_TLS_IE32 \
11445 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11446
11447 /* Specific set of relocations for the gnu tls dialect. */
11448 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11449 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11450 || (R_TYPE) == R_ARM_TLS_CALL \
11451 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11452 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11453 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11454
11455 /* Relocate an ARM ELF section. */
11456
11457 static bfd_boolean
11458 elf32_arm_relocate_section (bfd * output_bfd,
11459 struct bfd_link_info * info,
11460 bfd * input_bfd,
11461 asection * input_section,
11462 bfd_byte * contents,
11463 Elf_Internal_Rela * relocs,
11464 Elf_Internal_Sym * local_syms,
11465 asection ** local_sections)
11466 {
11467 Elf_Internal_Shdr *symtab_hdr;
11468 struct elf_link_hash_entry **sym_hashes;
11469 Elf_Internal_Rela *rel;
11470 Elf_Internal_Rela *relend;
11471 const char *name;
11472 struct elf32_arm_link_hash_table * globals;
11473
11474 globals = elf32_arm_hash_table (info);
11475 if (globals == NULL)
11476 return FALSE;
11477
11478 symtab_hdr = & elf_symtab_hdr (input_bfd);
11479 sym_hashes = elf_sym_hashes (input_bfd);
11480
11481 rel = relocs;
11482 relend = relocs + input_section->reloc_count;
11483 for (; rel < relend; rel++)
11484 {
11485 int r_type;
11486 reloc_howto_type * howto;
11487 unsigned long r_symndx;
11488 Elf_Internal_Sym * sym;
11489 asection * sec;
11490 struct elf_link_hash_entry * h;
11491 bfd_vma relocation;
11492 bfd_reloc_status_type r;
11493 arelent bfd_reloc;
11494 char sym_type;
11495 bfd_boolean unresolved_reloc = FALSE;
11496 char *error_message = NULL;
11497
11498 r_symndx = ELF32_R_SYM (rel->r_info);
11499 r_type = ELF32_R_TYPE (rel->r_info);
11500 r_type = arm_real_reloc_type (globals, r_type);
11501
11502 if ( r_type == R_ARM_GNU_VTENTRY
11503 || r_type == R_ARM_GNU_VTINHERIT)
11504 continue;
11505
11506 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11507 howto = bfd_reloc.howto;
11508
11509 h = NULL;
11510 sym = NULL;
11511 sec = NULL;
11512
11513 if (r_symndx < symtab_hdr->sh_info)
11514 {
11515 sym = local_syms + r_symndx;
11516 sym_type = ELF32_ST_TYPE (sym->st_info);
11517 sec = local_sections[r_symndx];
11518
11519 /* An object file might have a reference to a local
11520 undefined symbol. This is a daft object file, but we
11521 should at least do something about it. V4BX & NONE
11522 relocations do not use the symbol and are explicitly
11523 allowed to use the undefined symbol, so allow those.
11524 Likewise for relocations against STN_UNDEF. */
11525 if (r_type != R_ARM_V4BX
11526 && r_type != R_ARM_NONE
11527 && r_symndx != STN_UNDEF
11528 && bfd_is_und_section (sec)
11529 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11530 (*info->callbacks->undefined_symbol)
11531 (info, bfd_elf_string_from_elf_section
11532 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11533 input_bfd, input_section,
11534 rel->r_offset, TRUE);
11535
11536 if (globals->use_rel)
11537 {
11538 relocation = (sec->output_section->vma
11539 + sec->output_offset
11540 + sym->st_value);
11541 if (!bfd_link_relocatable (info)
11542 && (sec->flags & SEC_MERGE)
11543 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11544 {
11545 asection *msec;
11546 bfd_vma addend, value;
11547
11548 switch (r_type)
11549 {
11550 case R_ARM_MOVW_ABS_NC:
11551 case R_ARM_MOVT_ABS:
11552 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11553 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11554 addend = (addend ^ 0x8000) - 0x8000;
11555 break;
11556
11557 case R_ARM_THM_MOVW_ABS_NC:
11558 case R_ARM_THM_MOVT_ABS:
11559 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11560 << 16;
11561 value |= bfd_get_16 (input_bfd,
11562 contents + rel->r_offset + 2);
11563 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11564 | ((value & 0x04000000) >> 15);
11565 addend = (addend ^ 0x8000) - 0x8000;
11566 break;
11567
11568 default:
11569 if (howto->rightshift
11570 || (howto->src_mask & (howto->src_mask + 1)))
11571 {
11572 (*_bfd_error_handler)
11573 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11574 input_bfd, input_section,
11575 (long) rel->r_offset, howto->name);
11576 return FALSE;
11577 }
11578
11579 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11580
11581 /* Get the (signed) value from the instruction. */
11582 addend = value & howto->src_mask;
11583 if (addend & ((howto->src_mask + 1) >> 1))
11584 {
11585 bfd_signed_vma mask;
11586
11587 mask = -1;
11588 mask &= ~ howto->src_mask;
11589 addend |= mask;
11590 }
11591 break;
11592 }
11593
11594 msec = sec;
11595 addend =
11596 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11597 - relocation;
11598 addend += msec->output_section->vma + msec->output_offset;
11599
11600 /* Cases here must match those in the preceding
11601 switch statement. */
11602 switch (r_type)
11603 {
11604 case R_ARM_MOVW_ABS_NC:
11605 case R_ARM_MOVT_ABS:
11606 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11607 | (addend & 0xfff);
11608 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11609 break;
11610
11611 case R_ARM_THM_MOVW_ABS_NC:
11612 case R_ARM_THM_MOVT_ABS:
11613 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11614 | (addend & 0xff) | ((addend & 0x0800) << 15);
11615 bfd_put_16 (input_bfd, value >> 16,
11616 contents + rel->r_offset);
11617 bfd_put_16 (input_bfd, value,
11618 contents + rel->r_offset + 2);
11619 break;
11620
11621 default:
11622 value = (value & ~ howto->dst_mask)
11623 | (addend & howto->dst_mask);
11624 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11625 break;
11626 }
11627 }
11628 }
11629 else
11630 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11631 }
11632 else
11633 {
11634 bfd_boolean warned, ignored;
11635
11636 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11637 r_symndx, symtab_hdr, sym_hashes,
11638 h, sec, relocation,
11639 unresolved_reloc, warned, ignored);
11640
11641 sym_type = h->type;
11642 }
11643
11644 if (sec != NULL && discarded_section (sec))
11645 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11646 rel, 1, relend, howto, 0, contents);
11647
11648 if (bfd_link_relocatable (info))
11649 {
11650 /* This is a relocatable link. We don't have to change
11651 anything, unless the reloc is against a section symbol,
11652 in which case we have to adjust according to where the
11653 section symbol winds up in the output section. */
11654 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11655 {
11656 if (globals->use_rel)
11657 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11658 howto, (bfd_signed_vma) sec->output_offset);
11659 else
11660 rel->r_addend += sec->output_offset;
11661 }
11662 continue;
11663 }
11664
11665 if (h != NULL)
11666 name = h->root.root.string;
11667 else
11668 {
11669 name = (bfd_elf_string_from_elf_section
11670 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11671 if (name == NULL || *name == '\0')
11672 name = bfd_section_name (input_bfd, sec);
11673 }
11674
11675 if (r_symndx != STN_UNDEF
11676 && r_type != R_ARM_NONE
11677 && (h == NULL
11678 || h->root.type == bfd_link_hash_defined
11679 || h->root.type == bfd_link_hash_defweak)
11680 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11681 {
11682 (*_bfd_error_handler)
11683 ((sym_type == STT_TLS
11684 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11685 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11686 input_bfd,
11687 input_section,
11688 (long) rel->r_offset,
11689 howto->name,
11690 name);
11691 }
11692
11693 /* We call elf32_arm_final_link_relocate unless we're completely
11694 done, i.e., the relaxation produced the final output we want,
11695 and we won't let anybody mess with it. Also, we have to do
11696 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11697 both in relaxed and non-relaxed cases. */
11698 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11699 || (IS_ARM_TLS_GNU_RELOC (r_type)
11700 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11701 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11702 & GOT_TLS_GDESC)))
11703 {
11704 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11705 contents, rel, h == NULL);
11706 /* This may have been marked unresolved because it came from
11707 a shared library. But we've just dealt with that. */
11708 unresolved_reloc = 0;
11709 }
11710 else
11711 r = bfd_reloc_continue;
11712
11713 if (r == bfd_reloc_continue)
11714 {
11715 unsigned char branch_type =
11716 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11717 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11718
11719 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11720 input_section, contents, rel,
11721 relocation, info, sec, name,
11722 sym_type, branch_type, h,
11723 &unresolved_reloc,
11724 &error_message);
11725 }
11726
11727 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11728 because such sections are not SEC_ALLOC and thus ld.so will
11729 not process them. */
11730 if (unresolved_reloc
11731 && !((input_section->flags & SEC_DEBUGGING) != 0
11732 && h->def_dynamic)
11733 && _bfd_elf_section_offset (output_bfd, info, input_section,
11734 rel->r_offset) != (bfd_vma) -1)
11735 {
11736 (*_bfd_error_handler)
11737 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11738 input_bfd,
11739 input_section,
11740 (long) rel->r_offset,
11741 howto->name,
11742 h->root.root.string);
11743 return FALSE;
11744 }
11745
11746 if (r != bfd_reloc_ok)
11747 {
11748 switch (r)
11749 {
11750 case bfd_reloc_overflow:
11751 /* If the overflowing reloc was to an undefined symbol,
11752 we have already printed one error message and there
11753 is no point complaining again. */
11754 if (!h || h->root.type != bfd_link_hash_undefined)
11755 (*info->callbacks->reloc_overflow)
11756 (info, (h ? &h->root : NULL), name, howto->name,
11757 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
11758 break;
11759
11760 case bfd_reloc_undefined:
11761 (*info->callbacks->undefined_symbol)
11762 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
11763 break;
11764
11765 case bfd_reloc_outofrange:
11766 error_message = _("out of range");
11767 goto common_error;
11768
11769 case bfd_reloc_notsupported:
11770 error_message = _("unsupported relocation");
11771 goto common_error;
11772
11773 case bfd_reloc_dangerous:
11774 /* error_message should already be set. */
11775 goto common_error;
11776
11777 default:
11778 error_message = _("unknown error");
11779 /* Fall through. */
11780
11781 common_error:
11782 BFD_ASSERT (error_message != NULL);
11783 (*info->callbacks->reloc_dangerous)
11784 (info, error_message, input_bfd, input_section, rel->r_offset);
11785 break;
11786 }
11787 }
11788 }
11789
11790 return TRUE;
11791 }
11792
11793 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11794 adds the edit to the start of the list. (The list must be built in order of
11795 ascending TINDEX: the function's callers are primarily responsible for
11796 maintaining that condition). */
11797
11798 static void
11799 add_unwind_table_edit (arm_unwind_table_edit **head,
11800 arm_unwind_table_edit **tail,
11801 arm_unwind_edit_type type,
11802 asection *linked_section,
11803 unsigned int tindex)
11804 {
11805 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11806 xmalloc (sizeof (arm_unwind_table_edit));
11807
11808 new_edit->type = type;
11809 new_edit->linked_section = linked_section;
11810 new_edit->index = tindex;
11811
11812 if (tindex > 0)
11813 {
11814 new_edit->next = NULL;
11815
11816 if (*tail)
11817 (*tail)->next = new_edit;
11818
11819 (*tail) = new_edit;
11820
11821 if (!*head)
11822 (*head) = new_edit;
11823 }
11824 else
11825 {
11826 new_edit->next = *head;
11827
11828 if (!*tail)
11829 *tail = new_edit;
11830
11831 *head = new_edit;
11832 }
11833 }
11834
11835 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11836
11837 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11838 static void
11839 adjust_exidx_size(asection *exidx_sec, int adjust)
11840 {
11841 asection *out_sec;
11842
11843 if (!exidx_sec->rawsize)
11844 exidx_sec->rawsize = exidx_sec->size;
11845
11846 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11847 out_sec = exidx_sec->output_section;
11848 /* Adjust size of output section. */
11849 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11850 }
11851
11852 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11853 static void
11854 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11855 {
11856 struct _arm_elf_section_data *exidx_arm_data;
11857
11858 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11859 add_unwind_table_edit (
11860 &exidx_arm_data->u.exidx.unwind_edit_list,
11861 &exidx_arm_data->u.exidx.unwind_edit_tail,
11862 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11863
11864 exidx_arm_data->additional_reloc_count++;
11865
11866 adjust_exidx_size(exidx_sec, 8);
11867 }
11868
11869 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11870 made to those tables, such that:
11871
11872 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11873 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11874 codes which have been inlined into the index).
11875
11876 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11877
11878 The edits are applied when the tables are written
11879 (in elf32_arm_write_section). */
11880
11881 bfd_boolean
11882 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11883 unsigned int num_text_sections,
11884 struct bfd_link_info *info,
11885 bfd_boolean merge_exidx_entries)
11886 {
11887 bfd *inp;
11888 unsigned int last_second_word = 0, i;
11889 asection *last_exidx_sec = NULL;
11890 asection *last_text_sec = NULL;
11891 int last_unwind_type = -1;
11892
11893 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11894 text sections. */
11895 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11896 {
11897 asection *sec;
11898
11899 for (sec = inp->sections; sec != NULL; sec = sec->next)
11900 {
11901 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11902 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11903
11904 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11905 continue;
11906
11907 if (elf_sec->linked_to)
11908 {
11909 Elf_Internal_Shdr *linked_hdr
11910 = &elf_section_data (elf_sec->linked_to)->this_hdr;
11911 struct _arm_elf_section_data *linked_sec_arm_data
11912 = get_arm_elf_section_data (linked_hdr->bfd_section);
11913
11914 if (linked_sec_arm_data == NULL)
11915 continue;
11916
11917 /* Link this .ARM.exidx section back from the text section it
11918 describes. */
11919 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11920 }
11921 }
11922 }
11923
11924 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11925 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11926 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11927
11928 for (i = 0; i < num_text_sections; i++)
11929 {
11930 asection *sec = text_section_order[i];
11931 asection *exidx_sec;
11932 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11933 struct _arm_elf_section_data *exidx_arm_data;
11934 bfd_byte *contents = NULL;
11935 int deleted_exidx_bytes = 0;
11936 bfd_vma j;
11937 arm_unwind_table_edit *unwind_edit_head = NULL;
11938 arm_unwind_table_edit *unwind_edit_tail = NULL;
11939 Elf_Internal_Shdr *hdr;
11940 bfd *ibfd;
11941
11942 if (arm_data == NULL)
11943 continue;
11944
11945 exidx_sec = arm_data->u.text.arm_exidx_sec;
11946 if (exidx_sec == NULL)
11947 {
11948 /* Section has no unwind data. */
11949 if (last_unwind_type == 0 || !last_exidx_sec)
11950 continue;
11951
11952 /* Ignore zero sized sections. */
11953 if (sec->size == 0)
11954 continue;
11955
11956 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11957 last_unwind_type = 0;
11958 continue;
11959 }
11960
11961 /* Skip /DISCARD/ sections. */
11962 if (bfd_is_abs_section (exidx_sec->output_section))
11963 continue;
11964
11965 hdr = &elf_section_data (exidx_sec)->this_hdr;
11966 if (hdr->sh_type != SHT_ARM_EXIDX)
11967 continue;
11968
11969 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11970 if (exidx_arm_data == NULL)
11971 continue;
11972
11973 ibfd = exidx_sec->owner;
11974
11975 if (hdr->contents != NULL)
11976 contents = hdr->contents;
11977 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11978 /* An error? */
11979 continue;
11980
11981 if (last_unwind_type > 0)
11982 {
11983 unsigned int first_word = bfd_get_32 (ibfd, contents);
11984 /* Add cantunwind if first unwind item does not match section
11985 start. */
11986 if (first_word != sec->vma)
11987 {
11988 insert_cantunwind_after (last_text_sec, last_exidx_sec);
11989 last_unwind_type = 0;
11990 }
11991 }
11992
11993 for (j = 0; j < hdr->sh_size; j += 8)
11994 {
11995 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11996 int unwind_type;
11997 int elide = 0;
11998
11999 /* An EXIDX_CANTUNWIND entry. */
12000 if (second_word == 1)
12001 {
12002 if (last_unwind_type == 0)
12003 elide = 1;
12004 unwind_type = 0;
12005 }
12006 /* Inlined unwinding data. Merge if equal to previous. */
12007 else if ((second_word & 0x80000000) != 0)
12008 {
12009 if (merge_exidx_entries
12010 && last_second_word == second_word && last_unwind_type == 1)
12011 elide = 1;
12012 unwind_type = 1;
12013 last_second_word = second_word;
12014 }
12015 /* Normal table entry. In theory we could merge these too,
12016 but duplicate entries are likely to be much less common. */
12017 else
12018 unwind_type = 2;
12019
12020 if (elide && !bfd_link_relocatable (info))
12021 {
12022 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
12023 DELETE_EXIDX_ENTRY, NULL, j / 8);
12024
12025 deleted_exidx_bytes += 8;
12026 }
12027
12028 last_unwind_type = unwind_type;
12029 }
12030
12031 /* Free contents if we allocated it ourselves. */
12032 if (contents != hdr->contents)
12033 free (contents);
12034
12035 /* Record edits to be applied later (in elf32_arm_write_section). */
12036 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
12037 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
12038
12039 if (deleted_exidx_bytes > 0)
12040 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12041
12042 last_exidx_sec = exidx_sec;
12043 last_text_sec = sec;
12044 }
12045
12046 /* Add terminating CANTUNWIND entry. */
12047 if (!bfd_link_relocatable (info) && last_exidx_sec
12048 && last_unwind_type != 0)
12049 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12050
12051 return TRUE;
12052 }
12053
12054 static bfd_boolean
12055 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12056 bfd *ibfd, const char *name)
12057 {
12058 asection *sec, *osec;
12059
12060 sec = bfd_get_linker_section (ibfd, name);
12061 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12062 return TRUE;
12063
12064 osec = sec->output_section;
12065 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12066 return TRUE;
12067
12068 if (! bfd_set_section_contents (obfd, osec, sec->contents,
12069 sec->output_offset, sec->size))
12070 return FALSE;
12071
12072 return TRUE;
12073 }
12074
12075 static bfd_boolean
12076 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12077 {
12078 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12079 asection *sec, *osec;
12080
12081 if (globals == NULL)
12082 return FALSE;
12083
12084 /* Invoke the regular ELF backend linker to do all the work. */
12085 if (!bfd_elf_final_link (abfd, info))
12086 return FALSE;
12087
12088 /* Process stub sections (eg BE8 encoding, ...). */
12089 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12090 unsigned int i;
12091 for (i=0; i<htab->top_id; i++)
12092 {
12093 sec = htab->stub_group[i].stub_sec;
12094 /* Only process it once, in its link_sec slot. */
12095 if (sec && i == htab->stub_group[i].link_sec->id)
12096 {
12097 osec = sec->output_section;
12098 elf32_arm_write_section (abfd, info, sec, sec->contents);
12099 if (! bfd_set_section_contents (abfd, osec, sec->contents,
12100 sec->output_offset, sec->size))
12101 return FALSE;
12102 }
12103 }
12104
12105 /* Write out any glue sections now that we have created all the
12106 stubs. */
12107 if (globals->bfd_of_glue_owner != NULL)
12108 {
12109 if (! elf32_arm_output_glue_section (info, abfd,
12110 globals->bfd_of_glue_owner,
12111 ARM2THUMB_GLUE_SECTION_NAME))
12112 return FALSE;
12113
12114 if (! elf32_arm_output_glue_section (info, abfd,
12115 globals->bfd_of_glue_owner,
12116 THUMB2ARM_GLUE_SECTION_NAME))
12117 return FALSE;
12118
12119 if (! elf32_arm_output_glue_section (info, abfd,
12120 globals->bfd_of_glue_owner,
12121 VFP11_ERRATUM_VENEER_SECTION_NAME))
12122 return FALSE;
12123
12124 if (! elf32_arm_output_glue_section (info, abfd,
12125 globals->bfd_of_glue_owner,
12126 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12127 return FALSE;
12128
12129 if (! elf32_arm_output_glue_section (info, abfd,
12130 globals->bfd_of_glue_owner,
12131 ARM_BX_GLUE_SECTION_NAME))
12132 return FALSE;
12133 }
12134
12135 return TRUE;
12136 }
12137
12138 /* Return a best guess for the machine number based on the attributes. */
12139
12140 static unsigned int
12141 bfd_arm_get_mach_from_attributes (bfd * abfd)
12142 {
12143 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12144
12145 switch (arch)
12146 {
12147 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12148 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12149 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12150
12151 case TAG_CPU_ARCH_V5TE:
12152 {
12153 char * name;
12154
12155 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12156 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12157
12158 if (name)
12159 {
12160 if (strcmp (name, "IWMMXT2") == 0)
12161 return bfd_mach_arm_iWMMXt2;
12162
12163 if (strcmp (name, "IWMMXT") == 0)
12164 return bfd_mach_arm_iWMMXt;
12165
12166 if (strcmp (name, "XSCALE") == 0)
12167 {
12168 int wmmx;
12169
12170 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12171 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12172 switch (wmmx)
12173 {
12174 case 1: return bfd_mach_arm_iWMMXt;
12175 case 2: return bfd_mach_arm_iWMMXt2;
12176 default: return bfd_mach_arm_XScale;
12177 }
12178 }
12179 }
12180
12181 return bfd_mach_arm_5TE;
12182 }
12183
12184 default:
12185 return bfd_mach_arm_unknown;
12186 }
12187 }
12188
12189 /* Set the right machine number. */
12190
12191 static bfd_boolean
12192 elf32_arm_object_p (bfd *abfd)
12193 {
12194 unsigned int mach;
12195
12196 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12197
12198 if (mach == bfd_mach_arm_unknown)
12199 {
12200 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12201 mach = bfd_mach_arm_ep9312;
12202 else
12203 mach = bfd_arm_get_mach_from_attributes (abfd);
12204 }
12205
12206 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12207 return TRUE;
12208 }
12209
12210 /* Function to keep ARM specific flags in the ELF header. */
12211
12212 static bfd_boolean
12213 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12214 {
12215 if (elf_flags_init (abfd)
12216 && elf_elfheader (abfd)->e_flags != flags)
12217 {
12218 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12219 {
12220 if (flags & EF_ARM_INTERWORK)
12221 (*_bfd_error_handler)
12222 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12223 abfd);
12224 else
12225 _bfd_error_handler
12226 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12227 abfd);
12228 }
12229 }
12230 else
12231 {
12232 elf_elfheader (abfd)->e_flags = flags;
12233 elf_flags_init (abfd) = TRUE;
12234 }
12235
12236 return TRUE;
12237 }
12238
12239 /* Copy backend specific data from one object module to another. */
12240
12241 static bfd_boolean
12242 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12243 {
12244 flagword in_flags;
12245 flagword out_flags;
12246
12247 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12248 return TRUE;
12249
12250 in_flags = elf_elfheader (ibfd)->e_flags;
12251 out_flags = elf_elfheader (obfd)->e_flags;
12252
12253 if (elf_flags_init (obfd)
12254 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12255 && in_flags != out_flags)
12256 {
12257 /* Cannot mix APCS26 and APCS32 code. */
12258 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12259 return FALSE;
12260
12261 /* Cannot mix float APCS and non-float APCS code. */
12262 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12263 return FALSE;
12264
12265 /* If the src and dest have different interworking flags
12266 then turn off the interworking bit. */
12267 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12268 {
12269 if (out_flags & EF_ARM_INTERWORK)
12270 _bfd_error_handler
12271 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12272 obfd, ibfd);
12273
12274 in_flags &= ~EF_ARM_INTERWORK;
12275 }
12276
12277 /* Likewise for PIC, though don't warn for this case. */
12278 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12279 in_flags &= ~EF_ARM_PIC;
12280 }
12281
12282 elf_elfheader (obfd)->e_flags = in_flags;
12283 elf_flags_init (obfd) = TRUE;
12284
12285 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12286 }
12287
12288 /* Values for Tag_ABI_PCS_R9_use. */
12289 enum
12290 {
12291 AEABI_R9_V6,
12292 AEABI_R9_SB,
12293 AEABI_R9_TLS,
12294 AEABI_R9_unused
12295 };
12296
12297 /* Values for Tag_ABI_PCS_RW_data. */
12298 enum
12299 {
12300 AEABI_PCS_RW_data_absolute,
12301 AEABI_PCS_RW_data_PCrel,
12302 AEABI_PCS_RW_data_SBrel,
12303 AEABI_PCS_RW_data_unused
12304 };
12305
12306 /* Values for Tag_ABI_enum_size. */
12307 enum
12308 {
12309 AEABI_enum_unused,
12310 AEABI_enum_short,
12311 AEABI_enum_wide,
12312 AEABI_enum_forced_wide
12313 };
12314
12315 /* Determine whether an object attribute tag takes an integer, a
12316 string or both. */
12317
12318 static int
12319 elf32_arm_obj_attrs_arg_type (int tag)
12320 {
12321 if (tag == Tag_compatibility)
12322 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12323 else if (tag == Tag_nodefaults)
12324 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12325 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12326 return ATTR_TYPE_FLAG_STR_VAL;
12327 else if (tag < 32)
12328 return ATTR_TYPE_FLAG_INT_VAL;
12329 else
12330 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12331 }
12332
12333 /* The ABI defines that Tag_conformance should be emitted first, and that
12334 Tag_nodefaults should be second (if either is defined). This sets those
12335 two positions, and bumps up the position of all the remaining tags to
12336 compensate. */
12337 static int
12338 elf32_arm_obj_attrs_order (int num)
12339 {
12340 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12341 return Tag_conformance;
12342 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12343 return Tag_nodefaults;
12344 if ((num - 2) < Tag_nodefaults)
12345 return num - 2;
12346 if ((num - 1) < Tag_conformance)
12347 return num - 1;
12348 return num;
12349 }
12350
12351 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12352 static bfd_boolean
12353 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12354 {
12355 if ((tag & 127) < 64)
12356 {
12357 _bfd_error_handler
12358 (_("%B: Unknown mandatory EABI object attribute %d"),
12359 abfd, tag);
12360 bfd_set_error (bfd_error_bad_value);
12361 return FALSE;
12362 }
12363 else
12364 {
12365 _bfd_error_handler
12366 (_("Warning: %B: Unknown EABI object attribute %d"),
12367 abfd, tag);
12368 return TRUE;
12369 }
12370 }
12371
12372 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12373 Returns -1 if no architecture could be read. */
12374
12375 static int
12376 get_secondary_compatible_arch (bfd *abfd)
12377 {
12378 obj_attribute *attr =
12379 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12380
12381 /* Note: the tag and its argument below are uleb128 values, though
12382 currently-defined values fit in one byte for each. */
12383 if (attr->s
12384 && attr->s[0] == Tag_CPU_arch
12385 && (attr->s[1] & 128) != 128
12386 && attr->s[2] == 0)
12387 return attr->s[1];
12388
12389 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12390 return -1;
12391 }
12392
12393 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12394 The tag is removed if ARCH is -1. */
12395
12396 static void
12397 set_secondary_compatible_arch (bfd *abfd, int arch)
12398 {
12399 obj_attribute *attr =
12400 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12401
12402 if (arch == -1)
12403 {
12404 attr->s = NULL;
12405 return;
12406 }
12407
12408 /* Note: the tag and its argument below are uleb128 values, though
12409 currently-defined values fit in one byte for each. */
12410 if (!attr->s)
12411 attr->s = (char *) bfd_alloc (abfd, 3);
12412 attr->s[0] = Tag_CPU_arch;
12413 attr->s[1] = arch;
12414 attr->s[2] = '\0';
12415 }
12416
12417 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12418 into account. */
12419
12420 static int
12421 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12422 int newtag, int secondary_compat)
12423 {
12424 #define T(X) TAG_CPU_ARCH_##X
12425 int tagl, tagh, result;
12426 const int v6t2[] =
12427 {
12428 T(V6T2), /* PRE_V4. */
12429 T(V6T2), /* V4. */
12430 T(V6T2), /* V4T. */
12431 T(V6T2), /* V5T. */
12432 T(V6T2), /* V5TE. */
12433 T(V6T2), /* V5TEJ. */
12434 T(V6T2), /* V6. */
12435 T(V7), /* V6KZ. */
12436 T(V6T2) /* V6T2. */
12437 };
12438 const int v6k[] =
12439 {
12440 T(V6K), /* PRE_V4. */
12441 T(V6K), /* V4. */
12442 T(V6K), /* V4T. */
12443 T(V6K), /* V5T. */
12444 T(V6K), /* V5TE. */
12445 T(V6K), /* V5TEJ. */
12446 T(V6K), /* V6. */
12447 T(V6KZ), /* V6KZ. */
12448 T(V7), /* V6T2. */
12449 T(V6K) /* V6K. */
12450 };
12451 const int v7[] =
12452 {
12453 T(V7), /* PRE_V4. */
12454 T(V7), /* V4. */
12455 T(V7), /* V4T. */
12456 T(V7), /* V5T. */
12457 T(V7), /* V5TE. */
12458 T(V7), /* V5TEJ. */
12459 T(V7), /* V6. */
12460 T(V7), /* V6KZ. */
12461 T(V7), /* V6T2. */
12462 T(V7), /* V6K. */
12463 T(V7) /* V7. */
12464 };
12465 const int v6_m[] =
12466 {
12467 -1, /* PRE_V4. */
12468 -1, /* V4. */
12469 T(V6K), /* V4T. */
12470 T(V6K), /* V5T. */
12471 T(V6K), /* V5TE. */
12472 T(V6K), /* V5TEJ. */
12473 T(V6K), /* V6. */
12474 T(V6KZ), /* V6KZ. */
12475 T(V7), /* V6T2. */
12476 T(V6K), /* V6K. */
12477 T(V7), /* V7. */
12478 T(V6_M) /* V6_M. */
12479 };
12480 const int v6s_m[] =
12481 {
12482 -1, /* PRE_V4. */
12483 -1, /* V4. */
12484 T(V6K), /* V4T. */
12485 T(V6K), /* V5T. */
12486 T(V6K), /* V5TE. */
12487 T(V6K), /* V5TEJ. */
12488 T(V6K), /* V6. */
12489 T(V6KZ), /* V6KZ. */
12490 T(V7), /* V6T2. */
12491 T(V6K), /* V6K. */
12492 T(V7), /* V7. */
12493 T(V6S_M), /* V6_M. */
12494 T(V6S_M) /* V6S_M. */
12495 };
12496 const int v7e_m[] =
12497 {
12498 -1, /* PRE_V4. */
12499 -1, /* V4. */
12500 T(V7E_M), /* V4T. */
12501 T(V7E_M), /* V5T. */
12502 T(V7E_M), /* V5TE. */
12503 T(V7E_M), /* V5TEJ. */
12504 T(V7E_M), /* V6. */
12505 T(V7E_M), /* V6KZ. */
12506 T(V7E_M), /* V6T2. */
12507 T(V7E_M), /* V6K. */
12508 T(V7E_M), /* V7. */
12509 T(V7E_M), /* V6_M. */
12510 T(V7E_M), /* V6S_M. */
12511 T(V7E_M) /* V7E_M. */
12512 };
12513 const int v8[] =
12514 {
12515 T(V8), /* PRE_V4. */
12516 T(V8), /* V4. */
12517 T(V8), /* V4T. */
12518 T(V8), /* V5T. */
12519 T(V8), /* V5TE. */
12520 T(V8), /* V5TEJ. */
12521 T(V8), /* V6. */
12522 T(V8), /* V6KZ. */
12523 T(V8), /* V6T2. */
12524 T(V8), /* V6K. */
12525 T(V8), /* V7. */
12526 T(V8), /* V6_M. */
12527 T(V8), /* V6S_M. */
12528 T(V8), /* V7E_M. */
12529 T(V8) /* V8. */
12530 };
12531 const int v8m_baseline[] =
12532 {
12533 -1, /* PRE_V4. */
12534 -1, /* V4. */
12535 -1, /* V4T. */
12536 -1, /* V5T. */
12537 -1, /* V5TE. */
12538 -1, /* V5TEJ. */
12539 -1, /* V6. */
12540 -1, /* V6KZ. */
12541 -1, /* V6T2. */
12542 -1, /* V6K. */
12543 -1, /* V7. */
12544 T(V8M_BASE), /* V6_M. */
12545 T(V8M_BASE), /* V6S_M. */
12546 -1, /* V7E_M. */
12547 -1, /* V8. */
12548 -1,
12549 T(V8M_BASE) /* V8-M BASELINE. */
12550 };
12551 const int v8m_mainline[] =
12552 {
12553 -1, /* PRE_V4. */
12554 -1, /* V4. */
12555 -1, /* V4T. */
12556 -1, /* V5T. */
12557 -1, /* V5TE. */
12558 -1, /* V5TEJ. */
12559 -1, /* V6. */
12560 -1, /* V6KZ. */
12561 -1, /* V6T2. */
12562 -1, /* V6K. */
12563 T(V8M_MAIN), /* V7. */
12564 T(V8M_MAIN), /* V6_M. */
12565 T(V8M_MAIN), /* V6S_M. */
12566 T(V8M_MAIN), /* V7E_M. */
12567 -1, /* V8. */
12568 -1,
12569 T(V8M_MAIN), /* V8-M BASELINE. */
12570 T(V8M_MAIN) /* V8-M MAINLINE. */
12571 };
12572 const int v4t_plus_v6_m[] =
12573 {
12574 -1, /* PRE_V4. */
12575 -1, /* V4. */
12576 T(V4T), /* V4T. */
12577 T(V5T), /* V5T. */
12578 T(V5TE), /* V5TE. */
12579 T(V5TEJ), /* V5TEJ. */
12580 T(V6), /* V6. */
12581 T(V6KZ), /* V6KZ. */
12582 T(V6T2), /* V6T2. */
12583 T(V6K), /* V6K. */
12584 T(V7), /* V7. */
12585 T(V6_M), /* V6_M. */
12586 T(V6S_M), /* V6S_M. */
12587 T(V7E_M), /* V7E_M. */
12588 T(V8), /* V8. */
12589 -1, /* Unused. */
12590 T(V8M_BASE), /* V8-M BASELINE. */
12591 T(V8M_MAIN), /* V8-M MAINLINE. */
12592 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12593 };
12594 const int *comb[] =
12595 {
12596 v6t2,
12597 v6k,
12598 v7,
12599 v6_m,
12600 v6s_m,
12601 v7e_m,
12602 v8,
12603 NULL,
12604 v8m_baseline,
12605 v8m_mainline,
12606 /* Pseudo-architecture. */
12607 v4t_plus_v6_m
12608 };
12609
12610 /* Check we've not got a higher architecture than we know about. */
12611
12612 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12613 {
12614 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12615 return -1;
12616 }
12617
12618 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12619
12620 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12621 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12622 oldtag = T(V4T_PLUS_V6_M);
12623
12624 /* And override the new tag if we have a Tag_also_compatible_with on the
12625 input. */
12626
12627 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12628 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12629 newtag = T(V4T_PLUS_V6_M);
12630
12631 tagl = (oldtag < newtag) ? oldtag : newtag;
12632 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12633
12634 /* Architectures before V6KZ add features monotonically. */
12635 if (tagh <= TAG_CPU_ARCH_V6KZ)
12636 return result;
12637
12638 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12639
12640 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12641 as the canonical version. */
12642 if (result == T(V4T_PLUS_V6_M))
12643 {
12644 result = T(V4T);
12645 *secondary_compat_out = T(V6_M);
12646 }
12647 else
12648 *secondary_compat_out = -1;
12649
12650 if (result == -1)
12651 {
12652 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12653 ibfd, oldtag, newtag);
12654 return -1;
12655 }
12656
12657 return result;
12658 #undef T
12659 }
12660
12661 /* Query attributes object to see if integer divide instructions may be
12662 present in an object. */
12663 static bfd_boolean
12664 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12665 {
12666 int arch = attr[Tag_CPU_arch].i;
12667 int profile = attr[Tag_CPU_arch_profile].i;
12668
12669 switch (attr[Tag_DIV_use].i)
12670 {
12671 case 0:
12672 /* Integer divide allowed if instruction contained in archetecture. */
12673 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12674 return TRUE;
12675 else if (arch >= TAG_CPU_ARCH_V7E_M)
12676 return TRUE;
12677 else
12678 return FALSE;
12679
12680 case 1:
12681 /* Integer divide explicitly prohibited. */
12682 return FALSE;
12683
12684 default:
12685 /* Unrecognised case - treat as allowing divide everywhere. */
12686 case 2:
12687 /* Integer divide allowed in ARM state. */
12688 return TRUE;
12689 }
12690 }
12691
12692 /* Query attributes object to see if integer divide instructions are
12693 forbidden to be in the object. This is not the inverse of
12694 elf32_arm_attributes_accept_div. */
12695 static bfd_boolean
12696 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12697 {
12698 return attr[Tag_DIV_use].i == 1;
12699 }
12700
12701 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12702 are conflicting attributes. */
12703
12704 static bfd_boolean
12705 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12706 {
12707 obj_attribute *in_attr;
12708 obj_attribute *out_attr;
12709 /* Some tags have 0 = don't care, 1 = strong requirement,
12710 2 = weak requirement. */
12711 static const int order_021[3] = {0, 2, 1};
12712 int i;
12713 bfd_boolean result = TRUE;
12714 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12715
12716 /* Skip the linker stubs file. This preserves previous behavior
12717 of accepting unknown attributes in the first input file - but
12718 is that a bug? */
12719 if (ibfd->flags & BFD_LINKER_CREATED)
12720 return TRUE;
12721
12722 /* Skip any input that hasn't attribute section.
12723 This enables to link object files without attribute section with
12724 any others. */
12725 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12726 return TRUE;
12727
12728 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12729 {
12730 /* This is the first object. Copy the attributes. */
12731 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12732
12733 out_attr = elf_known_obj_attributes_proc (obfd);
12734
12735 /* Use the Tag_null value to indicate the attributes have been
12736 initialized. */
12737 out_attr[0].i = 1;
12738
12739 /* We do not output objects with Tag_MPextension_use_legacy - we move
12740 the attribute's value to Tag_MPextension_use. */
12741 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12742 {
12743 if (out_attr[Tag_MPextension_use].i != 0
12744 && out_attr[Tag_MPextension_use_legacy].i
12745 != out_attr[Tag_MPextension_use].i)
12746 {
12747 _bfd_error_handler
12748 (_("Error: %B has both the current and legacy "
12749 "Tag_MPextension_use attributes"), ibfd);
12750 result = FALSE;
12751 }
12752
12753 out_attr[Tag_MPextension_use] =
12754 out_attr[Tag_MPextension_use_legacy];
12755 out_attr[Tag_MPextension_use_legacy].type = 0;
12756 out_attr[Tag_MPextension_use_legacy].i = 0;
12757 }
12758
12759 return result;
12760 }
12761
12762 in_attr = elf_known_obj_attributes_proc (ibfd);
12763 out_attr = elf_known_obj_attributes_proc (obfd);
12764 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12765 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12766 {
12767 /* Ignore mismatches if the object doesn't use floating point or is
12768 floating point ABI independent. */
12769 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12770 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12771 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12772 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12773 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12774 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12775 {
12776 _bfd_error_handler
12777 (_("error: %B uses VFP register arguments, %B does not"),
12778 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12779 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12780 result = FALSE;
12781 }
12782 }
12783
12784 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12785 {
12786 /* Merge this attribute with existing attributes. */
12787 switch (i)
12788 {
12789 case Tag_CPU_raw_name:
12790 case Tag_CPU_name:
12791 /* These are merged after Tag_CPU_arch. */
12792 break;
12793
12794 case Tag_ABI_optimization_goals:
12795 case Tag_ABI_FP_optimization_goals:
12796 /* Use the first value seen. */
12797 break;
12798
12799 case Tag_CPU_arch:
12800 {
12801 int secondary_compat = -1, secondary_compat_out = -1;
12802 unsigned int saved_out_attr = out_attr[i].i;
12803 int arch_attr;
12804 static const char *name_table[] =
12805 {
12806 /* These aren't real CPU names, but we can't guess
12807 that from the architecture version alone. */
12808 "Pre v4",
12809 "ARM v4",
12810 "ARM v4T",
12811 "ARM v5T",
12812 "ARM v5TE",
12813 "ARM v5TEJ",
12814 "ARM v6",
12815 "ARM v6KZ",
12816 "ARM v6T2",
12817 "ARM v6K",
12818 "ARM v7",
12819 "ARM v6-M",
12820 "ARM v6S-M",
12821 "ARM v8",
12822 "",
12823 "ARM v8-M.baseline",
12824 "ARM v8-M.mainline",
12825 };
12826
12827 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12828 secondary_compat = get_secondary_compatible_arch (ibfd);
12829 secondary_compat_out = get_secondary_compatible_arch (obfd);
12830 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12831 &secondary_compat_out,
12832 in_attr[i].i,
12833 secondary_compat);
12834
12835 /* Return with error if failed to merge. */
12836 if (arch_attr == -1)
12837 return FALSE;
12838
12839 out_attr[i].i = arch_attr;
12840
12841 set_secondary_compatible_arch (obfd, secondary_compat_out);
12842
12843 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12844 if (out_attr[i].i == saved_out_attr)
12845 ; /* Leave the names alone. */
12846 else if (out_attr[i].i == in_attr[i].i)
12847 {
12848 /* The output architecture has been changed to match the
12849 input architecture. Use the input names. */
12850 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12851 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12852 : NULL;
12853 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12854 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12855 : NULL;
12856 }
12857 else
12858 {
12859 out_attr[Tag_CPU_name].s = NULL;
12860 out_attr[Tag_CPU_raw_name].s = NULL;
12861 }
12862
12863 /* If we still don't have a value for Tag_CPU_name,
12864 make one up now. Tag_CPU_raw_name remains blank. */
12865 if (out_attr[Tag_CPU_name].s == NULL
12866 && out_attr[i].i < ARRAY_SIZE (name_table))
12867 out_attr[Tag_CPU_name].s =
12868 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12869 }
12870 break;
12871
12872 case Tag_ARM_ISA_use:
12873 case Tag_THUMB_ISA_use:
12874 case Tag_WMMX_arch:
12875 case Tag_Advanced_SIMD_arch:
12876 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12877 case Tag_ABI_FP_rounding:
12878 case Tag_ABI_FP_exceptions:
12879 case Tag_ABI_FP_user_exceptions:
12880 case Tag_ABI_FP_number_model:
12881 case Tag_FP_HP_extension:
12882 case Tag_CPU_unaligned_access:
12883 case Tag_T2EE_use:
12884 case Tag_MPextension_use:
12885 /* Use the largest value specified. */
12886 if (in_attr[i].i > out_attr[i].i)
12887 out_attr[i].i = in_attr[i].i;
12888 break;
12889
12890 case Tag_ABI_align_preserved:
12891 case Tag_ABI_PCS_RO_data:
12892 /* Use the smallest value specified. */
12893 if (in_attr[i].i < out_attr[i].i)
12894 out_attr[i].i = in_attr[i].i;
12895 break;
12896
12897 case Tag_ABI_align_needed:
12898 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12899 && (in_attr[Tag_ABI_align_preserved].i == 0
12900 || out_attr[Tag_ABI_align_preserved].i == 0))
12901 {
12902 /* This error message should be enabled once all non-conformant
12903 binaries in the toolchain have had the attributes set
12904 properly.
12905 _bfd_error_handler
12906 (_("error: %B: 8-byte data alignment conflicts with %B"),
12907 obfd, ibfd);
12908 result = FALSE; */
12909 }
12910 /* Fall through. */
12911 case Tag_ABI_FP_denormal:
12912 case Tag_ABI_PCS_GOT_use:
12913 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12914 value if greater than 2 (for future-proofing). */
12915 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12916 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12917 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12918 out_attr[i].i = in_attr[i].i;
12919 break;
12920
12921 case Tag_Virtualization_use:
12922 /* The virtualization tag effectively stores two bits of
12923 information: the intended use of TrustZone (in bit 0), and the
12924 intended use of Virtualization (in bit 1). */
12925 if (out_attr[i].i == 0)
12926 out_attr[i].i = in_attr[i].i;
12927 else if (in_attr[i].i != 0
12928 && in_attr[i].i != out_attr[i].i)
12929 {
12930 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12931 out_attr[i].i = 3;
12932 else
12933 {
12934 _bfd_error_handler
12935 (_("error: %B: unable to merge virtualization attributes "
12936 "with %B"),
12937 obfd, ibfd);
12938 result = FALSE;
12939 }
12940 }
12941 break;
12942
12943 case Tag_CPU_arch_profile:
12944 if (out_attr[i].i != in_attr[i].i)
12945 {
12946 /* 0 will merge with anything.
12947 'A' and 'S' merge to 'A'.
12948 'R' and 'S' merge to 'R'.
12949 'M' and 'A|R|S' is an error. */
12950 if (out_attr[i].i == 0
12951 || (out_attr[i].i == 'S'
12952 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12953 out_attr[i].i = in_attr[i].i;
12954 else if (in_attr[i].i == 0
12955 || (in_attr[i].i == 'S'
12956 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12957 ; /* Do nothing. */
12958 else
12959 {
12960 _bfd_error_handler
12961 (_("error: %B: Conflicting architecture profiles %c/%c"),
12962 ibfd,
12963 in_attr[i].i ? in_attr[i].i : '0',
12964 out_attr[i].i ? out_attr[i].i : '0');
12965 result = FALSE;
12966 }
12967 }
12968 break;
12969
12970 case Tag_DSP_extension:
12971 /* No need to change output value if any of:
12972 - pre (<=) ARMv5T input architecture (do not have DSP)
12973 - M input profile not ARMv7E-M and do not have DSP. */
12974 if (in_attr[Tag_CPU_arch].i <= 3
12975 || (in_attr[Tag_CPU_arch_profile].i == 'M'
12976 && in_attr[Tag_CPU_arch].i != 13
12977 && in_attr[i].i == 0))
12978 ; /* Do nothing. */
12979 /* Output value should be 0 if DSP part of architecture, ie.
12980 - post (>=) ARMv5te architecture output
12981 - A, R or S profile output or ARMv7E-M output architecture. */
12982 else if (out_attr[Tag_CPU_arch].i >= 4
12983 && (out_attr[Tag_CPU_arch_profile].i == 'A'
12984 || out_attr[Tag_CPU_arch_profile].i == 'R'
12985 || out_attr[Tag_CPU_arch_profile].i == 'S'
12986 || out_attr[Tag_CPU_arch].i == 13))
12987 out_attr[i].i = 0;
12988 /* Otherwise, DSP instructions are added and not part of output
12989 architecture. */
12990 else
12991 out_attr[i].i = 1;
12992 break;
12993
12994 case Tag_FP_arch:
12995 {
12996 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12997 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12998 when it's 0. It might mean absence of FP hardware if
12999 Tag_FP_arch is zero. */
13000
13001 #define VFP_VERSION_COUNT 9
13002 static const struct
13003 {
13004 int ver;
13005 int regs;
13006 } vfp_versions[VFP_VERSION_COUNT] =
13007 {
13008 {0, 0},
13009 {1, 16},
13010 {2, 16},
13011 {3, 32},
13012 {3, 16},
13013 {4, 32},
13014 {4, 16},
13015 {8, 32},
13016 {8, 16}
13017 };
13018 int ver;
13019 int regs;
13020 int newval;
13021
13022 /* If the output has no requirement about FP hardware,
13023 follow the requirement of the input. */
13024 if (out_attr[i].i == 0)
13025 {
13026 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
13027 out_attr[i].i = in_attr[i].i;
13028 out_attr[Tag_ABI_HardFP_use].i
13029 = in_attr[Tag_ABI_HardFP_use].i;
13030 break;
13031 }
13032 /* If the input has no requirement about FP hardware, do
13033 nothing. */
13034 else if (in_attr[i].i == 0)
13035 {
13036 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
13037 break;
13038 }
13039
13040 /* Both the input and the output have nonzero Tag_FP_arch.
13041 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13042
13043 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13044 do nothing. */
13045 if (in_attr[Tag_ABI_HardFP_use].i == 0
13046 && out_attr[Tag_ABI_HardFP_use].i == 0)
13047 ;
13048 /* If the input and the output have different Tag_ABI_HardFP_use,
13049 the combination of them is 0 (implied by Tag_FP_arch). */
13050 else if (in_attr[Tag_ABI_HardFP_use].i
13051 != out_attr[Tag_ABI_HardFP_use].i)
13052 out_attr[Tag_ABI_HardFP_use].i = 0;
13053
13054 /* Now we can handle Tag_FP_arch. */
13055
13056 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13057 pick the biggest. */
13058 if (in_attr[i].i >= VFP_VERSION_COUNT
13059 && in_attr[i].i > out_attr[i].i)
13060 {
13061 out_attr[i] = in_attr[i];
13062 break;
13063 }
13064 /* The output uses the superset of input features
13065 (ISA version) and registers. */
13066 ver = vfp_versions[in_attr[i].i].ver;
13067 if (ver < vfp_versions[out_attr[i].i].ver)
13068 ver = vfp_versions[out_attr[i].i].ver;
13069 regs = vfp_versions[in_attr[i].i].regs;
13070 if (regs < vfp_versions[out_attr[i].i].regs)
13071 regs = vfp_versions[out_attr[i].i].regs;
13072 /* This assumes all possible supersets are also a valid
13073 options. */
13074 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13075 {
13076 if (regs == vfp_versions[newval].regs
13077 && ver == vfp_versions[newval].ver)
13078 break;
13079 }
13080 out_attr[i].i = newval;
13081 }
13082 break;
13083 case Tag_PCS_config:
13084 if (out_attr[i].i == 0)
13085 out_attr[i].i = in_attr[i].i;
13086 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13087 {
13088 /* It's sometimes ok to mix different configs, so this is only
13089 a warning. */
13090 _bfd_error_handler
13091 (_("Warning: %B: Conflicting platform configuration"), ibfd);
13092 }
13093 break;
13094 case Tag_ABI_PCS_R9_use:
13095 if (in_attr[i].i != out_attr[i].i
13096 && out_attr[i].i != AEABI_R9_unused
13097 && in_attr[i].i != AEABI_R9_unused)
13098 {
13099 _bfd_error_handler
13100 (_("error: %B: Conflicting use of R9"), ibfd);
13101 result = FALSE;
13102 }
13103 if (out_attr[i].i == AEABI_R9_unused)
13104 out_attr[i].i = in_attr[i].i;
13105 break;
13106 case Tag_ABI_PCS_RW_data:
13107 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13108 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13109 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13110 {
13111 _bfd_error_handler
13112 (_("error: %B: SB relative addressing conflicts with use of R9"),
13113 ibfd);
13114 result = FALSE;
13115 }
13116 /* Use the smallest value specified. */
13117 if (in_attr[i].i < out_attr[i].i)
13118 out_attr[i].i = in_attr[i].i;
13119 break;
13120 case Tag_ABI_PCS_wchar_t:
13121 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13122 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13123 {
13124 _bfd_error_handler
13125 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13126 ibfd, in_attr[i].i, out_attr[i].i);
13127 }
13128 else if (in_attr[i].i && !out_attr[i].i)
13129 out_attr[i].i = in_attr[i].i;
13130 break;
13131 case Tag_ABI_enum_size:
13132 if (in_attr[i].i != AEABI_enum_unused)
13133 {
13134 if (out_attr[i].i == AEABI_enum_unused
13135 || out_attr[i].i == AEABI_enum_forced_wide)
13136 {
13137 /* The existing object is compatible with anything.
13138 Use whatever requirements the new object has. */
13139 out_attr[i].i = in_attr[i].i;
13140 }
13141 else if (in_attr[i].i != AEABI_enum_forced_wide
13142 && out_attr[i].i != in_attr[i].i
13143 && !elf_arm_tdata (obfd)->no_enum_size_warning)
13144 {
13145 static const char *aeabi_enum_names[] =
13146 { "", "variable-size", "32-bit", "" };
13147 const char *in_name =
13148 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13149 ? aeabi_enum_names[in_attr[i].i]
13150 : "<unknown>";
13151 const char *out_name =
13152 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13153 ? aeabi_enum_names[out_attr[i].i]
13154 : "<unknown>";
13155 _bfd_error_handler
13156 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13157 ibfd, in_name, out_name);
13158 }
13159 }
13160 break;
13161 case Tag_ABI_VFP_args:
13162 /* Aready done. */
13163 break;
13164 case Tag_ABI_WMMX_args:
13165 if (in_attr[i].i != out_attr[i].i)
13166 {
13167 _bfd_error_handler
13168 (_("error: %B uses iWMMXt register arguments, %B does not"),
13169 ibfd, obfd);
13170 result = FALSE;
13171 }
13172 break;
13173 case Tag_compatibility:
13174 /* Merged in target-independent code. */
13175 break;
13176 case Tag_ABI_HardFP_use:
13177 /* This is handled along with Tag_FP_arch. */
13178 break;
13179 case Tag_ABI_FP_16bit_format:
13180 if (in_attr[i].i != 0 && out_attr[i].i != 0)
13181 {
13182 if (in_attr[i].i != out_attr[i].i)
13183 {
13184 _bfd_error_handler
13185 (_("error: fp16 format mismatch between %B and %B"),
13186 ibfd, obfd);
13187 result = FALSE;
13188 }
13189 }
13190 if (in_attr[i].i != 0)
13191 out_attr[i].i = in_attr[i].i;
13192 break;
13193
13194 case Tag_DIV_use:
13195 /* A value of zero on input means that the divide instruction may
13196 be used if available in the base architecture as specified via
13197 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13198 the user did not want divide instructions. A value of 2
13199 explicitly means that divide instructions were allowed in ARM
13200 and Thumb state. */
13201 if (in_attr[i].i == out_attr[i].i)
13202 /* Do nothing. */ ;
13203 else if (elf32_arm_attributes_forbid_div (in_attr)
13204 && !elf32_arm_attributes_accept_div (out_attr))
13205 out_attr[i].i = 1;
13206 else if (elf32_arm_attributes_forbid_div (out_attr)
13207 && elf32_arm_attributes_accept_div (in_attr))
13208 out_attr[i].i = in_attr[i].i;
13209 else if (in_attr[i].i == 2)
13210 out_attr[i].i = in_attr[i].i;
13211 break;
13212
13213 case Tag_MPextension_use_legacy:
13214 /* We don't output objects with Tag_MPextension_use_legacy - we
13215 move the value to Tag_MPextension_use. */
13216 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13217 {
13218 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13219 {
13220 _bfd_error_handler
13221 (_("%B has has both the current and legacy "
13222 "Tag_MPextension_use attributes"),
13223 ibfd);
13224 result = FALSE;
13225 }
13226 }
13227
13228 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13229 out_attr[Tag_MPextension_use] = in_attr[i];
13230
13231 break;
13232
13233 case Tag_nodefaults:
13234 /* This tag is set if it exists, but the value is unused (and is
13235 typically zero). We don't actually need to do anything here -
13236 the merge happens automatically when the type flags are merged
13237 below. */
13238 break;
13239 case Tag_also_compatible_with:
13240 /* Already done in Tag_CPU_arch. */
13241 break;
13242 case Tag_conformance:
13243 /* Keep the attribute if it matches. Throw it away otherwise.
13244 No attribute means no claim to conform. */
13245 if (!in_attr[i].s || !out_attr[i].s
13246 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13247 out_attr[i].s = NULL;
13248 break;
13249
13250 default:
13251 result
13252 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13253 }
13254
13255 /* If out_attr was copied from in_attr then it won't have a type yet. */
13256 if (in_attr[i].type && !out_attr[i].type)
13257 out_attr[i].type = in_attr[i].type;
13258 }
13259
13260 /* Merge Tag_compatibility attributes and any common GNU ones. */
13261 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13262 return FALSE;
13263
13264 /* Check for any attributes not known on ARM. */
13265 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13266
13267 return result;
13268 }
13269
13270
13271 /* Return TRUE if the two EABI versions are incompatible. */
13272
13273 static bfd_boolean
13274 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13275 {
13276 /* v4 and v5 are the same spec before and after it was released,
13277 so allow mixing them. */
13278 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13279 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13280 return TRUE;
13281
13282 return (iver == over);
13283 }
13284
13285 /* Merge backend specific data from an object file to the output
13286 object file when linking. */
13287
13288 static bfd_boolean
13289 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13290
13291 /* Display the flags field. */
13292
13293 static bfd_boolean
13294 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13295 {
13296 FILE * file = (FILE *) ptr;
13297 unsigned long flags;
13298
13299 BFD_ASSERT (abfd != NULL && ptr != NULL);
13300
13301 /* Print normal ELF private data. */
13302 _bfd_elf_print_private_bfd_data (abfd, ptr);
13303
13304 flags = elf_elfheader (abfd)->e_flags;
13305 /* Ignore init flag - it may not be set, despite the flags field
13306 containing valid data. */
13307
13308 /* xgettext:c-format */
13309 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13310
13311 switch (EF_ARM_EABI_VERSION (flags))
13312 {
13313 case EF_ARM_EABI_UNKNOWN:
13314 /* The following flag bits are GNU extensions and not part of the
13315 official ARM ELF extended ABI. Hence they are only decoded if
13316 the EABI version is not set. */
13317 if (flags & EF_ARM_INTERWORK)
13318 fprintf (file, _(" [interworking enabled]"));
13319
13320 if (flags & EF_ARM_APCS_26)
13321 fprintf (file, " [APCS-26]");
13322 else
13323 fprintf (file, " [APCS-32]");
13324
13325 if (flags & EF_ARM_VFP_FLOAT)
13326 fprintf (file, _(" [VFP float format]"));
13327 else if (flags & EF_ARM_MAVERICK_FLOAT)
13328 fprintf (file, _(" [Maverick float format]"));
13329 else
13330 fprintf (file, _(" [FPA float format]"));
13331
13332 if (flags & EF_ARM_APCS_FLOAT)
13333 fprintf (file, _(" [floats passed in float registers]"));
13334
13335 if (flags & EF_ARM_PIC)
13336 fprintf (file, _(" [position independent]"));
13337
13338 if (flags & EF_ARM_NEW_ABI)
13339 fprintf (file, _(" [new ABI]"));
13340
13341 if (flags & EF_ARM_OLD_ABI)
13342 fprintf (file, _(" [old ABI]"));
13343
13344 if (flags & EF_ARM_SOFT_FLOAT)
13345 fprintf (file, _(" [software FP]"));
13346
13347 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13348 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13349 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13350 | EF_ARM_MAVERICK_FLOAT);
13351 break;
13352
13353 case EF_ARM_EABI_VER1:
13354 fprintf (file, _(" [Version1 EABI]"));
13355
13356 if (flags & EF_ARM_SYMSARESORTED)
13357 fprintf (file, _(" [sorted symbol table]"));
13358 else
13359 fprintf (file, _(" [unsorted symbol table]"));
13360
13361 flags &= ~ EF_ARM_SYMSARESORTED;
13362 break;
13363
13364 case EF_ARM_EABI_VER2:
13365 fprintf (file, _(" [Version2 EABI]"));
13366
13367 if (flags & EF_ARM_SYMSARESORTED)
13368 fprintf (file, _(" [sorted symbol table]"));
13369 else
13370 fprintf (file, _(" [unsorted symbol table]"));
13371
13372 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13373 fprintf (file, _(" [dynamic symbols use segment index]"));
13374
13375 if (flags & EF_ARM_MAPSYMSFIRST)
13376 fprintf (file, _(" [mapping symbols precede others]"));
13377
13378 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13379 | EF_ARM_MAPSYMSFIRST);
13380 break;
13381
13382 case EF_ARM_EABI_VER3:
13383 fprintf (file, _(" [Version3 EABI]"));
13384 break;
13385
13386 case EF_ARM_EABI_VER4:
13387 fprintf (file, _(" [Version4 EABI]"));
13388 goto eabi;
13389
13390 case EF_ARM_EABI_VER5:
13391 fprintf (file, _(" [Version5 EABI]"));
13392
13393 if (flags & EF_ARM_ABI_FLOAT_SOFT)
13394 fprintf (file, _(" [soft-float ABI]"));
13395
13396 if (flags & EF_ARM_ABI_FLOAT_HARD)
13397 fprintf (file, _(" [hard-float ABI]"));
13398
13399 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13400
13401 eabi:
13402 if (flags & EF_ARM_BE8)
13403 fprintf (file, _(" [BE8]"));
13404
13405 if (flags & EF_ARM_LE8)
13406 fprintf (file, _(" [LE8]"));
13407
13408 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13409 break;
13410
13411 default:
13412 fprintf (file, _(" <EABI version unrecognised>"));
13413 break;
13414 }
13415
13416 flags &= ~ EF_ARM_EABIMASK;
13417
13418 if (flags & EF_ARM_RELEXEC)
13419 fprintf (file, _(" [relocatable executable]"));
13420
13421 flags &= ~EF_ARM_RELEXEC;
13422
13423 if (flags)
13424 fprintf (file, _("<Unrecognised flag bits set>"));
13425
13426 fputc ('\n', file);
13427
13428 return TRUE;
13429 }
13430
13431 static int
13432 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13433 {
13434 switch (ELF_ST_TYPE (elf_sym->st_info))
13435 {
13436 case STT_ARM_TFUNC:
13437 return ELF_ST_TYPE (elf_sym->st_info);
13438
13439 case STT_ARM_16BIT:
13440 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13441 This allows us to distinguish between data used by Thumb instructions
13442 and non-data (which is probably code) inside Thumb regions of an
13443 executable. */
13444 if (type != STT_OBJECT && type != STT_TLS)
13445 return ELF_ST_TYPE (elf_sym->st_info);
13446 break;
13447
13448 default:
13449 break;
13450 }
13451
13452 return type;
13453 }
13454
13455 static asection *
13456 elf32_arm_gc_mark_hook (asection *sec,
13457 struct bfd_link_info *info,
13458 Elf_Internal_Rela *rel,
13459 struct elf_link_hash_entry *h,
13460 Elf_Internal_Sym *sym)
13461 {
13462 if (h != NULL)
13463 switch (ELF32_R_TYPE (rel->r_info))
13464 {
13465 case R_ARM_GNU_VTINHERIT:
13466 case R_ARM_GNU_VTENTRY:
13467 return NULL;
13468 }
13469
13470 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13471 }
13472
13473 /* Update the got entry reference counts for the section being removed. */
13474
13475 static bfd_boolean
13476 elf32_arm_gc_sweep_hook (bfd * abfd,
13477 struct bfd_link_info * info,
13478 asection * sec,
13479 const Elf_Internal_Rela * relocs)
13480 {
13481 Elf_Internal_Shdr *symtab_hdr;
13482 struct elf_link_hash_entry **sym_hashes;
13483 bfd_signed_vma *local_got_refcounts;
13484 const Elf_Internal_Rela *rel, *relend;
13485 struct elf32_arm_link_hash_table * globals;
13486
13487 if (bfd_link_relocatable (info))
13488 return TRUE;
13489
13490 globals = elf32_arm_hash_table (info);
13491 if (globals == NULL)
13492 return FALSE;
13493
13494 elf_section_data (sec)->local_dynrel = NULL;
13495
13496 symtab_hdr = & elf_symtab_hdr (abfd);
13497 sym_hashes = elf_sym_hashes (abfd);
13498 local_got_refcounts = elf_local_got_refcounts (abfd);
13499
13500 check_use_blx (globals);
13501
13502 relend = relocs + sec->reloc_count;
13503 for (rel = relocs; rel < relend; rel++)
13504 {
13505 unsigned long r_symndx;
13506 struct elf_link_hash_entry *h = NULL;
13507 struct elf32_arm_link_hash_entry *eh;
13508 int r_type;
13509 bfd_boolean call_reloc_p;
13510 bfd_boolean may_become_dynamic_p;
13511 bfd_boolean may_need_local_target_p;
13512 union gotplt_union *root_plt;
13513 struct arm_plt_info *arm_plt;
13514
13515 r_symndx = ELF32_R_SYM (rel->r_info);
13516 if (r_symndx >= symtab_hdr->sh_info)
13517 {
13518 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13519 while (h->root.type == bfd_link_hash_indirect
13520 || h->root.type == bfd_link_hash_warning)
13521 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13522 }
13523 eh = (struct elf32_arm_link_hash_entry *) h;
13524
13525 call_reloc_p = FALSE;
13526 may_become_dynamic_p = FALSE;
13527 may_need_local_target_p = FALSE;
13528
13529 r_type = ELF32_R_TYPE (rel->r_info);
13530 r_type = arm_real_reloc_type (globals, r_type);
13531 switch (r_type)
13532 {
13533 case R_ARM_GOT32:
13534 case R_ARM_GOT_PREL:
13535 case R_ARM_TLS_GD32:
13536 case R_ARM_TLS_IE32:
13537 if (h != NULL)
13538 {
13539 if (h->got.refcount > 0)
13540 h->got.refcount -= 1;
13541 }
13542 else if (local_got_refcounts != NULL)
13543 {
13544 if (local_got_refcounts[r_symndx] > 0)
13545 local_got_refcounts[r_symndx] -= 1;
13546 }
13547 break;
13548
13549 case R_ARM_TLS_LDM32:
13550 globals->tls_ldm_got.refcount -= 1;
13551 break;
13552
13553 case R_ARM_PC24:
13554 case R_ARM_PLT32:
13555 case R_ARM_CALL:
13556 case R_ARM_JUMP24:
13557 case R_ARM_PREL31:
13558 case R_ARM_THM_CALL:
13559 case R_ARM_THM_JUMP24:
13560 case R_ARM_THM_JUMP19:
13561 call_reloc_p = TRUE;
13562 may_need_local_target_p = TRUE;
13563 break;
13564
13565 case R_ARM_ABS12:
13566 if (!globals->vxworks_p)
13567 {
13568 may_need_local_target_p = TRUE;
13569 break;
13570 }
13571 /* Fall through. */
13572 case R_ARM_ABS32:
13573 case R_ARM_ABS32_NOI:
13574 case R_ARM_REL32:
13575 case R_ARM_REL32_NOI:
13576 case R_ARM_MOVW_ABS_NC:
13577 case R_ARM_MOVT_ABS:
13578 case R_ARM_MOVW_PREL_NC:
13579 case R_ARM_MOVT_PREL:
13580 case R_ARM_THM_MOVW_ABS_NC:
13581 case R_ARM_THM_MOVT_ABS:
13582 case R_ARM_THM_MOVW_PREL_NC:
13583 case R_ARM_THM_MOVT_PREL:
13584 /* Should the interworking branches be here also? */
13585 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13586 && (sec->flags & SEC_ALLOC) != 0)
13587 {
13588 if (h == NULL
13589 && elf32_arm_howto_from_type (r_type)->pc_relative)
13590 {
13591 call_reloc_p = TRUE;
13592 may_need_local_target_p = TRUE;
13593 }
13594 else
13595 may_become_dynamic_p = TRUE;
13596 }
13597 else
13598 may_need_local_target_p = TRUE;
13599 break;
13600
13601 default:
13602 break;
13603 }
13604
13605 if (may_need_local_target_p
13606 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13607 {
13608 /* If PLT refcount book-keeping is wrong and too low, we'll
13609 see a zero value (going to -1) for the root PLT reference
13610 count. */
13611 if (root_plt->refcount >= 0)
13612 {
13613 BFD_ASSERT (root_plt->refcount != 0);
13614 root_plt->refcount -= 1;
13615 }
13616 else
13617 /* A value of -1 means the symbol has become local, forced
13618 or seeing a hidden definition. Any other negative value
13619 is an error. */
13620 BFD_ASSERT (root_plt->refcount == -1);
13621
13622 if (!call_reloc_p)
13623 arm_plt->noncall_refcount--;
13624
13625 if (r_type == R_ARM_THM_CALL)
13626 arm_plt->maybe_thumb_refcount--;
13627
13628 if (r_type == R_ARM_THM_JUMP24
13629 || r_type == R_ARM_THM_JUMP19)
13630 arm_plt->thumb_refcount--;
13631 }
13632
13633 if (may_become_dynamic_p)
13634 {
13635 struct elf_dyn_relocs **pp;
13636 struct elf_dyn_relocs *p;
13637
13638 if (h != NULL)
13639 pp = &(eh->dyn_relocs);
13640 else
13641 {
13642 Elf_Internal_Sym *isym;
13643
13644 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13645 abfd, r_symndx);
13646 if (isym == NULL)
13647 return FALSE;
13648 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13649 if (pp == NULL)
13650 return FALSE;
13651 }
13652 for (; (p = *pp) != NULL; pp = &p->next)
13653 if (p->sec == sec)
13654 {
13655 /* Everything must go for SEC. */
13656 *pp = p->next;
13657 break;
13658 }
13659 }
13660 }
13661
13662 return TRUE;
13663 }
13664
13665 /* Look through the relocs for a section during the first phase. */
13666
13667 static bfd_boolean
13668 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13669 asection *sec, const Elf_Internal_Rela *relocs)
13670 {
13671 Elf_Internal_Shdr *symtab_hdr;
13672 struct elf_link_hash_entry **sym_hashes;
13673 const Elf_Internal_Rela *rel;
13674 const Elf_Internal_Rela *rel_end;
13675 bfd *dynobj;
13676 asection *sreloc;
13677 struct elf32_arm_link_hash_table *htab;
13678 bfd_boolean call_reloc_p;
13679 bfd_boolean may_become_dynamic_p;
13680 bfd_boolean may_need_local_target_p;
13681 unsigned long nsyms;
13682
13683 if (bfd_link_relocatable (info))
13684 return TRUE;
13685
13686 BFD_ASSERT (is_arm_elf (abfd));
13687
13688 htab = elf32_arm_hash_table (info);
13689 if (htab == NULL)
13690 return FALSE;
13691
13692 sreloc = NULL;
13693
13694 /* Create dynamic sections for relocatable executables so that we can
13695 copy relocations. */
13696 if (htab->root.is_relocatable_executable
13697 && ! htab->root.dynamic_sections_created)
13698 {
13699 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13700 return FALSE;
13701 }
13702
13703 if (htab->root.dynobj == NULL)
13704 htab->root.dynobj = abfd;
13705 if (!create_ifunc_sections (info))
13706 return FALSE;
13707
13708 dynobj = htab->root.dynobj;
13709
13710 symtab_hdr = & elf_symtab_hdr (abfd);
13711 sym_hashes = elf_sym_hashes (abfd);
13712 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13713
13714 rel_end = relocs + sec->reloc_count;
13715 for (rel = relocs; rel < rel_end; rel++)
13716 {
13717 Elf_Internal_Sym *isym;
13718 struct elf_link_hash_entry *h;
13719 struct elf32_arm_link_hash_entry *eh;
13720 unsigned long r_symndx;
13721 int r_type;
13722
13723 r_symndx = ELF32_R_SYM (rel->r_info);
13724 r_type = ELF32_R_TYPE (rel->r_info);
13725 r_type = arm_real_reloc_type (htab, r_type);
13726
13727 if (r_symndx >= nsyms
13728 /* PR 9934: It is possible to have relocations that do not
13729 refer to symbols, thus it is also possible to have an
13730 object file containing relocations but no symbol table. */
13731 && (r_symndx > STN_UNDEF || nsyms > 0))
13732 {
13733 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13734 r_symndx);
13735 return FALSE;
13736 }
13737
13738 h = NULL;
13739 isym = NULL;
13740 if (nsyms > 0)
13741 {
13742 if (r_symndx < symtab_hdr->sh_info)
13743 {
13744 /* A local symbol. */
13745 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13746 abfd, r_symndx);
13747 if (isym == NULL)
13748 return FALSE;
13749 }
13750 else
13751 {
13752 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13753 while (h->root.type == bfd_link_hash_indirect
13754 || h->root.type == bfd_link_hash_warning)
13755 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13756
13757 /* PR15323, ref flags aren't set for references in the
13758 same object. */
13759 h->root.non_ir_ref = 1;
13760 }
13761 }
13762
13763 eh = (struct elf32_arm_link_hash_entry *) h;
13764
13765 call_reloc_p = FALSE;
13766 may_become_dynamic_p = FALSE;
13767 may_need_local_target_p = FALSE;
13768
13769 /* Could be done earlier, if h were already available. */
13770 r_type = elf32_arm_tls_transition (info, r_type, h);
13771 switch (r_type)
13772 {
13773 case R_ARM_GOT32:
13774 case R_ARM_GOT_PREL:
13775 case R_ARM_TLS_GD32:
13776 case R_ARM_TLS_IE32:
13777 case R_ARM_TLS_GOTDESC:
13778 case R_ARM_TLS_DESCSEQ:
13779 case R_ARM_THM_TLS_DESCSEQ:
13780 case R_ARM_TLS_CALL:
13781 case R_ARM_THM_TLS_CALL:
13782 /* This symbol requires a global offset table entry. */
13783 {
13784 int tls_type, old_tls_type;
13785
13786 switch (r_type)
13787 {
13788 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13789
13790 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13791
13792 case R_ARM_TLS_GOTDESC:
13793 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13794 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13795 tls_type = GOT_TLS_GDESC; break;
13796
13797 default: tls_type = GOT_NORMAL; break;
13798 }
13799
13800 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13801 info->flags |= DF_STATIC_TLS;
13802
13803 if (h != NULL)
13804 {
13805 h->got.refcount++;
13806 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13807 }
13808 else
13809 {
13810 /* This is a global offset table entry for a local symbol. */
13811 if (!elf32_arm_allocate_local_sym_info (abfd))
13812 return FALSE;
13813 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13814 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13815 }
13816
13817 /* If a variable is accessed with both tls methods, two
13818 slots may be created. */
13819 if (GOT_TLS_GD_ANY_P (old_tls_type)
13820 && GOT_TLS_GD_ANY_P (tls_type))
13821 tls_type |= old_tls_type;
13822
13823 /* We will already have issued an error message if there
13824 is a TLS/non-TLS mismatch, based on the symbol
13825 type. So just combine any TLS types needed. */
13826 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13827 && tls_type != GOT_NORMAL)
13828 tls_type |= old_tls_type;
13829
13830 /* If the symbol is accessed in both IE and GDESC
13831 method, we're able to relax. Turn off the GDESC flag,
13832 without messing up with any other kind of tls types
13833 that may be involved. */
13834 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13835 tls_type &= ~GOT_TLS_GDESC;
13836
13837 if (old_tls_type != tls_type)
13838 {
13839 if (h != NULL)
13840 elf32_arm_hash_entry (h)->tls_type = tls_type;
13841 else
13842 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13843 }
13844 }
13845 /* Fall through. */
13846
13847 case R_ARM_TLS_LDM32:
13848 if (r_type == R_ARM_TLS_LDM32)
13849 htab->tls_ldm_got.refcount++;
13850 /* Fall through. */
13851
13852 case R_ARM_GOTOFF32:
13853 case R_ARM_GOTPC:
13854 if (htab->root.sgot == NULL
13855 && !create_got_section (htab->root.dynobj, info))
13856 return FALSE;
13857 break;
13858
13859 case R_ARM_PC24:
13860 case R_ARM_PLT32:
13861 case R_ARM_CALL:
13862 case R_ARM_JUMP24:
13863 case R_ARM_PREL31:
13864 case R_ARM_THM_CALL:
13865 case R_ARM_THM_JUMP24:
13866 case R_ARM_THM_JUMP19:
13867 call_reloc_p = TRUE;
13868 may_need_local_target_p = TRUE;
13869 break;
13870
13871 case R_ARM_ABS12:
13872 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13873 ldr __GOTT_INDEX__ offsets. */
13874 if (!htab->vxworks_p)
13875 {
13876 may_need_local_target_p = TRUE;
13877 break;
13878 }
13879 else goto jump_over;
13880
13881 /* Fall through. */
13882
13883 case R_ARM_MOVW_ABS_NC:
13884 case R_ARM_MOVT_ABS:
13885 case R_ARM_THM_MOVW_ABS_NC:
13886 case R_ARM_THM_MOVT_ABS:
13887 if (bfd_link_pic (info))
13888 {
13889 (*_bfd_error_handler)
13890 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13891 abfd, elf32_arm_howto_table_1[r_type].name,
13892 (h) ? h->root.root.string : "a local symbol");
13893 bfd_set_error (bfd_error_bad_value);
13894 return FALSE;
13895 }
13896
13897 /* Fall through. */
13898 case R_ARM_ABS32:
13899 case R_ARM_ABS32_NOI:
13900 jump_over:
13901 if (h != NULL && bfd_link_executable (info))
13902 {
13903 h->pointer_equality_needed = 1;
13904 }
13905 /* Fall through. */
13906 case R_ARM_REL32:
13907 case R_ARM_REL32_NOI:
13908 case R_ARM_MOVW_PREL_NC:
13909 case R_ARM_MOVT_PREL:
13910 case R_ARM_THM_MOVW_PREL_NC:
13911 case R_ARM_THM_MOVT_PREL:
13912
13913 /* Should the interworking branches be listed here? */
13914 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13915 && (sec->flags & SEC_ALLOC) != 0)
13916 {
13917 if (h == NULL
13918 && elf32_arm_howto_from_type (r_type)->pc_relative)
13919 {
13920 /* In shared libraries and relocatable executables,
13921 we treat local relative references as calls;
13922 see the related SYMBOL_CALLS_LOCAL code in
13923 allocate_dynrelocs. */
13924 call_reloc_p = TRUE;
13925 may_need_local_target_p = TRUE;
13926 }
13927 else
13928 /* We are creating a shared library or relocatable
13929 executable, and this is a reloc against a global symbol,
13930 or a non-PC-relative reloc against a local symbol.
13931 We may need to copy the reloc into the output. */
13932 may_become_dynamic_p = TRUE;
13933 }
13934 else
13935 may_need_local_target_p = TRUE;
13936 break;
13937
13938 /* This relocation describes the C++ object vtable hierarchy.
13939 Reconstruct it for later use during GC. */
13940 case R_ARM_GNU_VTINHERIT:
13941 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13942 return FALSE;
13943 break;
13944
13945 /* This relocation describes which C++ vtable entries are actually
13946 used. Record for later use during GC. */
13947 case R_ARM_GNU_VTENTRY:
13948 BFD_ASSERT (h != NULL);
13949 if (h != NULL
13950 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13951 return FALSE;
13952 break;
13953 }
13954
13955 if (h != NULL)
13956 {
13957 if (call_reloc_p)
13958 /* We may need a .plt entry if the function this reloc
13959 refers to is in a different object, regardless of the
13960 symbol's type. We can't tell for sure yet, because
13961 something later might force the symbol local. */
13962 h->needs_plt = 1;
13963 else if (may_need_local_target_p)
13964 /* If this reloc is in a read-only section, we might
13965 need a copy reloc. We can't check reliably at this
13966 stage whether the section is read-only, as input
13967 sections have not yet been mapped to output sections.
13968 Tentatively set the flag for now, and correct in
13969 adjust_dynamic_symbol. */
13970 h->non_got_ref = 1;
13971 }
13972
13973 if (may_need_local_target_p
13974 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
13975 {
13976 union gotplt_union *root_plt;
13977 struct arm_plt_info *arm_plt;
13978 struct arm_local_iplt_info *local_iplt;
13979
13980 if (h != NULL)
13981 {
13982 root_plt = &h->plt;
13983 arm_plt = &eh->plt;
13984 }
13985 else
13986 {
13987 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
13988 if (local_iplt == NULL)
13989 return FALSE;
13990 root_plt = &local_iplt->root;
13991 arm_plt = &local_iplt->arm;
13992 }
13993
13994 /* If the symbol is a function that doesn't bind locally,
13995 this relocation will need a PLT entry. */
13996 if (root_plt->refcount != -1)
13997 root_plt->refcount += 1;
13998
13999 if (!call_reloc_p)
14000 arm_plt->noncall_refcount++;
14001
14002 /* It's too early to use htab->use_blx here, so we have to
14003 record possible blx references separately from
14004 relocs that definitely need a thumb stub. */
14005
14006 if (r_type == R_ARM_THM_CALL)
14007 arm_plt->maybe_thumb_refcount += 1;
14008
14009 if (r_type == R_ARM_THM_JUMP24
14010 || r_type == R_ARM_THM_JUMP19)
14011 arm_plt->thumb_refcount += 1;
14012 }
14013
14014 if (may_become_dynamic_p)
14015 {
14016 struct elf_dyn_relocs *p, **head;
14017
14018 /* Create a reloc section in dynobj. */
14019 if (sreloc == NULL)
14020 {
14021 sreloc = _bfd_elf_make_dynamic_reloc_section
14022 (sec, dynobj, 2, abfd, ! htab->use_rel);
14023
14024 if (sreloc == NULL)
14025 return FALSE;
14026
14027 /* BPABI objects never have dynamic relocations mapped. */
14028 if (htab->symbian_p)
14029 {
14030 flagword flags;
14031
14032 flags = bfd_get_section_flags (dynobj, sreloc);
14033 flags &= ~(SEC_LOAD | SEC_ALLOC);
14034 bfd_set_section_flags (dynobj, sreloc, flags);
14035 }
14036 }
14037
14038 /* If this is a global symbol, count the number of
14039 relocations we need for this symbol. */
14040 if (h != NULL)
14041 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14042 else
14043 {
14044 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14045 if (head == NULL)
14046 return FALSE;
14047 }
14048
14049 p = *head;
14050 if (p == NULL || p->sec != sec)
14051 {
14052 bfd_size_type amt = sizeof *p;
14053
14054 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14055 if (p == NULL)
14056 return FALSE;
14057 p->next = *head;
14058 *head = p;
14059 p->sec = sec;
14060 p->count = 0;
14061 p->pc_count = 0;
14062 }
14063
14064 if (elf32_arm_howto_from_type (r_type)->pc_relative)
14065 p->pc_count += 1;
14066 p->count += 1;
14067 }
14068 }
14069
14070 return TRUE;
14071 }
14072
14073 /* Unwinding tables are not referenced directly. This pass marks them as
14074 required if the corresponding code section is marked. */
14075
14076 static bfd_boolean
14077 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14078 elf_gc_mark_hook_fn gc_mark_hook)
14079 {
14080 bfd *sub;
14081 Elf_Internal_Shdr **elf_shdrp;
14082 bfd_boolean again;
14083
14084 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14085
14086 /* Marking EH data may cause additional code sections to be marked,
14087 requiring multiple passes. */
14088 again = TRUE;
14089 while (again)
14090 {
14091 again = FALSE;
14092 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14093 {
14094 asection *o;
14095
14096 if (! is_arm_elf (sub))
14097 continue;
14098
14099 elf_shdrp = elf_elfsections (sub);
14100 for (o = sub->sections; o != NULL; o = o->next)
14101 {
14102 Elf_Internal_Shdr *hdr;
14103
14104 hdr = &elf_section_data (o)->this_hdr;
14105 if (hdr->sh_type == SHT_ARM_EXIDX
14106 && hdr->sh_link
14107 && hdr->sh_link < elf_numsections (sub)
14108 && !o->gc_mark
14109 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14110 {
14111 again = TRUE;
14112 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14113 return FALSE;
14114 }
14115 }
14116 }
14117 }
14118
14119 return TRUE;
14120 }
14121
14122 /* Treat mapping symbols as special target symbols. */
14123
14124 static bfd_boolean
14125 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
14126 {
14127 return bfd_is_arm_special_symbol_name (sym->name,
14128 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
14129 }
14130
14131 /* This is a copy of elf_find_function() from elf.c except that
14132 ARM mapping symbols are ignored when looking for function names
14133 and STT_ARM_TFUNC is considered to a function type. */
14134
14135 static bfd_boolean
14136 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
14137 asymbol ** symbols,
14138 asection * section,
14139 bfd_vma offset,
14140 const char ** filename_ptr,
14141 const char ** functionname_ptr)
14142 {
14143 const char * filename = NULL;
14144 asymbol * func = NULL;
14145 bfd_vma low_func = 0;
14146 asymbol ** p;
14147
14148 for (p = symbols; *p != NULL; p++)
14149 {
14150 elf_symbol_type *q;
14151
14152 q = (elf_symbol_type *) *p;
14153
14154 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
14155 {
14156 default:
14157 break;
14158 case STT_FILE:
14159 filename = bfd_asymbol_name (&q->symbol);
14160 break;
14161 case STT_FUNC:
14162 case STT_ARM_TFUNC:
14163 case STT_NOTYPE:
14164 /* Skip mapping symbols. */
14165 if ((q->symbol.flags & BSF_LOCAL)
14166 && bfd_is_arm_special_symbol_name (q->symbol.name,
14167 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14168 continue;
14169 /* Fall through. */
14170 if (bfd_get_section (&q->symbol) == section
14171 && q->symbol.value >= low_func
14172 && q->symbol.value <= offset)
14173 {
14174 func = (asymbol *) q;
14175 low_func = q->symbol.value;
14176 }
14177 break;
14178 }
14179 }
14180
14181 if (func == NULL)
14182 return FALSE;
14183
14184 if (filename_ptr)
14185 *filename_ptr = filename;
14186 if (functionname_ptr)
14187 *functionname_ptr = bfd_asymbol_name (func);
14188
14189 return TRUE;
14190 }
14191
14192
14193 /* Find the nearest line to a particular section and offset, for error
14194 reporting. This code is a duplicate of the code in elf.c, except
14195 that it uses arm_elf_find_function. */
14196
14197 static bfd_boolean
14198 elf32_arm_find_nearest_line (bfd * abfd,
14199 asymbol ** symbols,
14200 asection * section,
14201 bfd_vma offset,
14202 const char ** filename_ptr,
14203 const char ** functionname_ptr,
14204 unsigned int * line_ptr,
14205 unsigned int * discriminator_ptr)
14206 {
14207 bfd_boolean found = FALSE;
14208
14209 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14210 filename_ptr, functionname_ptr,
14211 line_ptr, discriminator_ptr,
14212 dwarf_debug_sections, 0,
14213 & elf_tdata (abfd)->dwarf2_find_line_info))
14214 {
14215 if (!*functionname_ptr)
14216 arm_elf_find_function (abfd, symbols, section, offset,
14217 *filename_ptr ? NULL : filename_ptr,
14218 functionname_ptr);
14219
14220 return TRUE;
14221 }
14222
14223 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14224 uses DWARF1. */
14225
14226 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14227 & found, filename_ptr,
14228 functionname_ptr, line_ptr,
14229 & elf_tdata (abfd)->line_info))
14230 return FALSE;
14231
14232 if (found && (*functionname_ptr || *line_ptr))
14233 return TRUE;
14234
14235 if (symbols == NULL)
14236 return FALSE;
14237
14238 if (! arm_elf_find_function (abfd, symbols, section, offset,
14239 filename_ptr, functionname_ptr))
14240 return FALSE;
14241
14242 *line_ptr = 0;
14243 return TRUE;
14244 }
14245
14246 static bfd_boolean
14247 elf32_arm_find_inliner_info (bfd * abfd,
14248 const char ** filename_ptr,
14249 const char ** functionname_ptr,
14250 unsigned int * line_ptr)
14251 {
14252 bfd_boolean found;
14253 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14254 functionname_ptr, line_ptr,
14255 & elf_tdata (abfd)->dwarf2_find_line_info);
14256 return found;
14257 }
14258
14259 /* Adjust a symbol defined by a dynamic object and referenced by a
14260 regular object. The current definition is in some section of the
14261 dynamic object, but we're not including those sections. We have to
14262 change the definition to something the rest of the link can
14263 understand. */
14264
14265 static bfd_boolean
14266 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14267 struct elf_link_hash_entry * h)
14268 {
14269 bfd * dynobj;
14270 asection * s;
14271 struct elf32_arm_link_hash_entry * eh;
14272 struct elf32_arm_link_hash_table *globals;
14273
14274 globals = elf32_arm_hash_table (info);
14275 if (globals == NULL)
14276 return FALSE;
14277
14278 dynobj = elf_hash_table (info)->dynobj;
14279
14280 /* Make sure we know what is going on here. */
14281 BFD_ASSERT (dynobj != NULL
14282 && (h->needs_plt
14283 || h->type == STT_GNU_IFUNC
14284 || h->u.weakdef != NULL
14285 || (h->def_dynamic
14286 && h->ref_regular
14287 && !h->def_regular)));
14288
14289 eh = (struct elf32_arm_link_hash_entry *) h;
14290
14291 /* If this is a function, put it in the procedure linkage table. We
14292 will fill in the contents of the procedure linkage table later,
14293 when we know the address of the .got section. */
14294 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14295 {
14296 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14297 symbol binds locally. */
14298 if (h->plt.refcount <= 0
14299 || (h->type != STT_GNU_IFUNC
14300 && (SYMBOL_CALLS_LOCAL (info, h)
14301 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14302 && h->root.type == bfd_link_hash_undefweak))))
14303 {
14304 /* This case can occur if we saw a PLT32 reloc in an input
14305 file, but the symbol was never referred to by a dynamic
14306 object, or if all references were garbage collected. In
14307 such a case, we don't actually need to build a procedure
14308 linkage table, and we can just do a PC24 reloc instead. */
14309 h->plt.offset = (bfd_vma) -1;
14310 eh->plt.thumb_refcount = 0;
14311 eh->plt.maybe_thumb_refcount = 0;
14312 eh->plt.noncall_refcount = 0;
14313 h->needs_plt = 0;
14314 }
14315
14316 return TRUE;
14317 }
14318 else
14319 {
14320 /* It's possible that we incorrectly decided a .plt reloc was
14321 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14322 in check_relocs. We can't decide accurately between function
14323 and non-function syms in check-relocs; Objects loaded later in
14324 the link may change h->type. So fix it now. */
14325 h->plt.offset = (bfd_vma) -1;
14326 eh->plt.thumb_refcount = 0;
14327 eh->plt.maybe_thumb_refcount = 0;
14328 eh->plt.noncall_refcount = 0;
14329 }
14330
14331 /* If this is a weak symbol, and there is a real definition, the
14332 processor independent code will have arranged for us to see the
14333 real definition first, and we can just use the same value. */
14334 if (h->u.weakdef != NULL)
14335 {
14336 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14337 || h->u.weakdef->root.type == bfd_link_hash_defweak);
14338 h->root.u.def.section = h->u.weakdef->root.u.def.section;
14339 h->root.u.def.value = h->u.weakdef->root.u.def.value;
14340 return TRUE;
14341 }
14342
14343 /* If there are no non-GOT references, we do not need a copy
14344 relocation. */
14345 if (!h->non_got_ref)
14346 return TRUE;
14347
14348 /* This is a reference to a symbol defined by a dynamic object which
14349 is not a function. */
14350
14351 /* If we are creating a shared library, we must presume that the
14352 only references to the symbol are via the global offset table.
14353 For such cases we need not do anything here; the relocations will
14354 be handled correctly by relocate_section. Relocatable executables
14355 can reference data in shared objects directly, so we don't need to
14356 do anything here. */
14357 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14358 return TRUE;
14359
14360 /* We must allocate the symbol in our .dynbss section, which will
14361 become part of the .bss section of the executable. There will be
14362 an entry for this symbol in the .dynsym section. The dynamic
14363 object will contain position independent code, so all references
14364 from the dynamic object to this symbol will go through the global
14365 offset table. The dynamic linker will use the .dynsym entry to
14366 determine the address it must put in the global offset table, so
14367 both the dynamic object and the regular object will refer to the
14368 same memory location for the variable. */
14369 s = bfd_get_linker_section (dynobj, ".dynbss");
14370 BFD_ASSERT (s != NULL);
14371
14372 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14373 linker to copy the initial value out of the dynamic object and into
14374 the runtime process image. We need to remember the offset into the
14375 .rel(a).bss section we are going to use. */
14376 if (info->nocopyreloc == 0
14377 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14378 && h->size != 0)
14379 {
14380 asection *srel;
14381
14382 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14383 elf32_arm_allocate_dynrelocs (info, srel, 1);
14384 h->needs_copy = 1;
14385 }
14386
14387 return _bfd_elf_adjust_dynamic_copy (info, h, s);
14388 }
14389
14390 /* Allocate space in .plt, .got and associated reloc sections for
14391 dynamic relocs. */
14392
14393 static bfd_boolean
14394 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14395 {
14396 struct bfd_link_info *info;
14397 struct elf32_arm_link_hash_table *htab;
14398 struct elf32_arm_link_hash_entry *eh;
14399 struct elf_dyn_relocs *p;
14400
14401 if (h->root.type == bfd_link_hash_indirect)
14402 return TRUE;
14403
14404 eh = (struct elf32_arm_link_hash_entry *) h;
14405
14406 info = (struct bfd_link_info *) inf;
14407 htab = elf32_arm_hash_table (info);
14408 if (htab == NULL)
14409 return FALSE;
14410
14411 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14412 && h->plt.refcount > 0)
14413 {
14414 /* Make sure this symbol is output as a dynamic symbol.
14415 Undefined weak syms won't yet be marked as dynamic. */
14416 if (h->dynindx == -1
14417 && !h->forced_local)
14418 {
14419 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14420 return FALSE;
14421 }
14422
14423 /* If the call in the PLT entry binds locally, the associated
14424 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14425 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14426 than the .plt section. */
14427 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14428 {
14429 eh->is_iplt = 1;
14430 if (eh->plt.noncall_refcount == 0
14431 && SYMBOL_REFERENCES_LOCAL (info, h))
14432 /* All non-call references can be resolved directly.
14433 This means that they can (and in some cases, must)
14434 resolve directly to the run-time target, rather than
14435 to the PLT. That in turns means that any .got entry
14436 would be equal to the .igot.plt entry, so there's
14437 no point having both. */
14438 h->got.refcount = 0;
14439 }
14440
14441 if (bfd_link_pic (info)
14442 || eh->is_iplt
14443 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14444 {
14445 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14446
14447 /* If this symbol is not defined in a regular file, and we are
14448 not generating a shared library, then set the symbol to this
14449 location in the .plt. This is required to make function
14450 pointers compare as equal between the normal executable and
14451 the shared library. */
14452 if (! bfd_link_pic (info)
14453 && !h->def_regular)
14454 {
14455 h->root.u.def.section = htab->root.splt;
14456 h->root.u.def.value = h->plt.offset;
14457
14458 /* Make sure the function is not marked as Thumb, in case
14459 it is the target of an ABS32 relocation, which will
14460 point to the PLT entry. */
14461 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14462 }
14463
14464 /* VxWorks executables have a second set of relocations for
14465 each PLT entry. They go in a separate relocation section,
14466 which is processed by the kernel loader. */
14467 if (htab->vxworks_p && !bfd_link_pic (info))
14468 {
14469 /* There is a relocation for the initial PLT entry:
14470 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14471 if (h->plt.offset == htab->plt_header_size)
14472 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14473
14474 /* There are two extra relocations for each subsequent
14475 PLT entry: an R_ARM_32 relocation for the GOT entry,
14476 and an R_ARM_32 relocation for the PLT entry. */
14477 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14478 }
14479 }
14480 else
14481 {
14482 h->plt.offset = (bfd_vma) -1;
14483 h->needs_plt = 0;
14484 }
14485 }
14486 else
14487 {
14488 h->plt.offset = (bfd_vma) -1;
14489 h->needs_plt = 0;
14490 }
14491
14492 eh = (struct elf32_arm_link_hash_entry *) h;
14493 eh->tlsdesc_got = (bfd_vma) -1;
14494
14495 if (h->got.refcount > 0)
14496 {
14497 asection *s;
14498 bfd_boolean dyn;
14499 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14500 int indx;
14501
14502 /* Make sure this symbol is output as a dynamic symbol.
14503 Undefined weak syms won't yet be marked as dynamic. */
14504 if (h->dynindx == -1
14505 && !h->forced_local)
14506 {
14507 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14508 return FALSE;
14509 }
14510
14511 if (!htab->symbian_p)
14512 {
14513 s = htab->root.sgot;
14514 h->got.offset = s->size;
14515
14516 if (tls_type == GOT_UNKNOWN)
14517 abort ();
14518
14519 if (tls_type == GOT_NORMAL)
14520 /* Non-TLS symbols need one GOT slot. */
14521 s->size += 4;
14522 else
14523 {
14524 if (tls_type & GOT_TLS_GDESC)
14525 {
14526 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14527 eh->tlsdesc_got
14528 = (htab->root.sgotplt->size
14529 - elf32_arm_compute_jump_table_size (htab));
14530 htab->root.sgotplt->size += 8;
14531 h->got.offset = (bfd_vma) -2;
14532 /* plt.got_offset needs to know there's a TLS_DESC
14533 reloc in the middle of .got.plt. */
14534 htab->num_tls_desc++;
14535 }
14536
14537 if (tls_type & GOT_TLS_GD)
14538 {
14539 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14540 the symbol is both GD and GDESC, got.offset may
14541 have been overwritten. */
14542 h->got.offset = s->size;
14543 s->size += 8;
14544 }
14545
14546 if (tls_type & GOT_TLS_IE)
14547 /* R_ARM_TLS_IE32 needs one GOT slot. */
14548 s->size += 4;
14549 }
14550
14551 dyn = htab->root.dynamic_sections_created;
14552
14553 indx = 0;
14554 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14555 bfd_link_pic (info),
14556 h)
14557 && (!bfd_link_pic (info)
14558 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14559 indx = h->dynindx;
14560
14561 if (tls_type != GOT_NORMAL
14562 && (bfd_link_pic (info) || indx != 0)
14563 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14564 || h->root.type != bfd_link_hash_undefweak))
14565 {
14566 if (tls_type & GOT_TLS_IE)
14567 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14568
14569 if (tls_type & GOT_TLS_GD)
14570 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14571
14572 if (tls_type & GOT_TLS_GDESC)
14573 {
14574 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14575 /* GDESC needs a trampoline to jump to. */
14576 htab->tls_trampoline = -1;
14577 }
14578
14579 /* Only GD needs it. GDESC just emits one relocation per
14580 2 entries. */
14581 if ((tls_type & GOT_TLS_GD) && indx != 0)
14582 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14583 }
14584 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14585 {
14586 if (htab->root.dynamic_sections_created)
14587 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14588 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14589 }
14590 else if (h->type == STT_GNU_IFUNC
14591 && eh->plt.noncall_refcount == 0)
14592 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14593 they all resolve dynamically instead. Reserve room for the
14594 GOT entry's R_ARM_IRELATIVE relocation. */
14595 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14596 else if (bfd_link_pic (info)
14597 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14598 || h->root.type != bfd_link_hash_undefweak))
14599 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14600 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14601 }
14602 }
14603 else
14604 h->got.offset = (bfd_vma) -1;
14605
14606 /* Allocate stubs for exported Thumb functions on v4t. */
14607 if (!htab->use_blx && h->dynindx != -1
14608 && h->def_regular
14609 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14610 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14611 {
14612 struct elf_link_hash_entry * th;
14613 struct bfd_link_hash_entry * bh;
14614 struct elf_link_hash_entry * myh;
14615 char name[1024];
14616 asection *s;
14617 bh = NULL;
14618 /* Create a new symbol to regist the real location of the function. */
14619 s = h->root.u.def.section;
14620 sprintf (name, "__real_%s", h->root.root.string);
14621 _bfd_generic_link_add_one_symbol (info, s->owner,
14622 name, BSF_GLOBAL, s,
14623 h->root.u.def.value,
14624 NULL, TRUE, FALSE, &bh);
14625
14626 myh = (struct elf_link_hash_entry *) bh;
14627 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14628 myh->forced_local = 1;
14629 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14630 eh->export_glue = myh;
14631 th = record_arm_to_thumb_glue (info, h);
14632 /* Point the symbol at the stub. */
14633 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14634 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14635 h->root.u.def.section = th->root.u.def.section;
14636 h->root.u.def.value = th->root.u.def.value & ~1;
14637 }
14638
14639 if (eh->dyn_relocs == NULL)
14640 return TRUE;
14641
14642 /* In the shared -Bsymbolic case, discard space allocated for
14643 dynamic pc-relative relocs against symbols which turn out to be
14644 defined in regular objects. For the normal shared case, discard
14645 space for pc-relative relocs that have become local due to symbol
14646 visibility changes. */
14647
14648 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14649 {
14650 /* Relocs that use pc_count are PC-relative forms, which will appear
14651 on something like ".long foo - ." or "movw REG, foo - .". We want
14652 calls to protected symbols to resolve directly to the function
14653 rather than going via the plt. If people want function pointer
14654 comparisons to work as expected then they should avoid writing
14655 assembly like ".long foo - .". */
14656 if (SYMBOL_CALLS_LOCAL (info, h))
14657 {
14658 struct elf_dyn_relocs **pp;
14659
14660 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14661 {
14662 p->count -= p->pc_count;
14663 p->pc_count = 0;
14664 if (p->count == 0)
14665 *pp = p->next;
14666 else
14667 pp = &p->next;
14668 }
14669 }
14670
14671 if (htab->vxworks_p)
14672 {
14673 struct elf_dyn_relocs **pp;
14674
14675 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14676 {
14677 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14678 *pp = p->next;
14679 else
14680 pp = &p->next;
14681 }
14682 }
14683
14684 /* Also discard relocs on undefined weak syms with non-default
14685 visibility. */
14686 if (eh->dyn_relocs != NULL
14687 && h->root.type == bfd_link_hash_undefweak)
14688 {
14689 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14690 eh->dyn_relocs = NULL;
14691
14692 /* Make sure undefined weak symbols are output as a dynamic
14693 symbol in PIEs. */
14694 else if (h->dynindx == -1
14695 && !h->forced_local)
14696 {
14697 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14698 return FALSE;
14699 }
14700 }
14701
14702 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14703 && h->root.type == bfd_link_hash_new)
14704 {
14705 /* Output absolute symbols so that we can create relocations
14706 against them. For normal symbols we output a relocation
14707 against the section that contains them. */
14708 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14709 return FALSE;
14710 }
14711
14712 }
14713 else
14714 {
14715 /* For the non-shared case, discard space for relocs against
14716 symbols which turn out to need copy relocs or are not
14717 dynamic. */
14718
14719 if (!h->non_got_ref
14720 && ((h->def_dynamic
14721 && !h->def_regular)
14722 || (htab->root.dynamic_sections_created
14723 && (h->root.type == bfd_link_hash_undefweak
14724 || h->root.type == bfd_link_hash_undefined))))
14725 {
14726 /* Make sure this symbol is output as a dynamic symbol.
14727 Undefined weak syms won't yet be marked as dynamic. */
14728 if (h->dynindx == -1
14729 && !h->forced_local)
14730 {
14731 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14732 return FALSE;
14733 }
14734
14735 /* If that succeeded, we know we'll be keeping all the
14736 relocs. */
14737 if (h->dynindx != -1)
14738 goto keep;
14739 }
14740
14741 eh->dyn_relocs = NULL;
14742
14743 keep: ;
14744 }
14745
14746 /* Finally, allocate space. */
14747 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14748 {
14749 asection *sreloc = elf_section_data (p->sec)->sreloc;
14750 if (h->type == STT_GNU_IFUNC
14751 && eh->plt.noncall_refcount == 0
14752 && SYMBOL_REFERENCES_LOCAL (info, h))
14753 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14754 else
14755 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14756 }
14757
14758 return TRUE;
14759 }
14760
14761 /* Find any dynamic relocs that apply to read-only sections. */
14762
14763 static bfd_boolean
14764 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14765 {
14766 struct elf32_arm_link_hash_entry * eh;
14767 struct elf_dyn_relocs * p;
14768
14769 eh = (struct elf32_arm_link_hash_entry *) h;
14770 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14771 {
14772 asection *s = p->sec;
14773
14774 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14775 {
14776 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14777
14778 info->flags |= DF_TEXTREL;
14779
14780 /* Not an error, just cut short the traversal. */
14781 return FALSE;
14782 }
14783 }
14784 return TRUE;
14785 }
14786
14787 void
14788 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14789 int byteswap_code)
14790 {
14791 struct elf32_arm_link_hash_table *globals;
14792
14793 globals = elf32_arm_hash_table (info);
14794 if (globals == NULL)
14795 return;
14796
14797 globals->byteswap_code = byteswap_code;
14798 }
14799
14800 /* Set the sizes of the dynamic sections. */
14801
14802 static bfd_boolean
14803 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14804 struct bfd_link_info * info)
14805 {
14806 bfd * dynobj;
14807 asection * s;
14808 bfd_boolean plt;
14809 bfd_boolean relocs;
14810 bfd *ibfd;
14811 struct elf32_arm_link_hash_table *htab;
14812
14813 htab = elf32_arm_hash_table (info);
14814 if (htab == NULL)
14815 return FALSE;
14816
14817 dynobj = elf_hash_table (info)->dynobj;
14818 BFD_ASSERT (dynobj != NULL);
14819 check_use_blx (htab);
14820
14821 if (elf_hash_table (info)->dynamic_sections_created)
14822 {
14823 /* Set the contents of the .interp section to the interpreter. */
14824 if (bfd_link_executable (info) && !info->nointerp)
14825 {
14826 s = bfd_get_linker_section (dynobj, ".interp");
14827 BFD_ASSERT (s != NULL);
14828 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14829 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14830 }
14831 }
14832
14833 /* Set up .got offsets for local syms, and space for local dynamic
14834 relocs. */
14835 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14836 {
14837 bfd_signed_vma *local_got;
14838 bfd_signed_vma *end_local_got;
14839 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14840 char *local_tls_type;
14841 bfd_vma *local_tlsdesc_gotent;
14842 bfd_size_type locsymcount;
14843 Elf_Internal_Shdr *symtab_hdr;
14844 asection *srel;
14845 bfd_boolean is_vxworks = htab->vxworks_p;
14846 unsigned int symndx;
14847
14848 if (! is_arm_elf (ibfd))
14849 continue;
14850
14851 for (s = ibfd->sections; s != NULL; s = s->next)
14852 {
14853 struct elf_dyn_relocs *p;
14854
14855 for (p = (struct elf_dyn_relocs *)
14856 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14857 {
14858 if (!bfd_is_abs_section (p->sec)
14859 && bfd_is_abs_section (p->sec->output_section))
14860 {
14861 /* Input section has been discarded, either because
14862 it is a copy of a linkonce section or due to
14863 linker script /DISCARD/, so we'll be discarding
14864 the relocs too. */
14865 }
14866 else if (is_vxworks
14867 && strcmp (p->sec->output_section->name,
14868 ".tls_vars") == 0)
14869 {
14870 /* Relocations in vxworks .tls_vars sections are
14871 handled specially by the loader. */
14872 }
14873 else if (p->count != 0)
14874 {
14875 srel = elf_section_data (p->sec)->sreloc;
14876 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14877 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14878 info->flags |= DF_TEXTREL;
14879 }
14880 }
14881 }
14882
14883 local_got = elf_local_got_refcounts (ibfd);
14884 if (!local_got)
14885 continue;
14886
14887 symtab_hdr = & elf_symtab_hdr (ibfd);
14888 locsymcount = symtab_hdr->sh_info;
14889 end_local_got = local_got + locsymcount;
14890 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14891 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14892 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14893 symndx = 0;
14894 s = htab->root.sgot;
14895 srel = htab->root.srelgot;
14896 for (; local_got < end_local_got;
14897 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14898 ++local_tlsdesc_gotent, ++symndx)
14899 {
14900 *local_tlsdesc_gotent = (bfd_vma) -1;
14901 local_iplt = *local_iplt_ptr;
14902 if (local_iplt != NULL)
14903 {
14904 struct elf_dyn_relocs *p;
14905
14906 if (local_iplt->root.refcount > 0)
14907 {
14908 elf32_arm_allocate_plt_entry (info, TRUE,
14909 &local_iplt->root,
14910 &local_iplt->arm);
14911 if (local_iplt->arm.noncall_refcount == 0)
14912 /* All references to the PLT are calls, so all
14913 non-call references can resolve directly to the
14914 run-time target. This means that the .got entry
14915 would be the same as the .igot.plt entry, so there's
14916 no point creating both. */
14917 *local_got = 0;
14918 }
14919 else
14920 {
14921 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14922 local_iplt->root.offset = (bfd_vma) -1;
14923 }
14924
14925 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14926 {
14927 asection *psrel;
14928
14929 psrel = elf_section_data (p->sec)->sreloc;
14930 if (local_iplt->arm.noncall_refcount == 0)
14931 elf32_arm_allocate_irelocs (info, psrel, p->count);
14932 else
14933 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14934 }
14935 }
14936 if (*local_got > 0)
14937 {
14938 Elf_Internal_Sym *isym;
14939
14940 *local_got = s->size;
14941 if (*local_tls_type & GOT_TLS_GD)
14942 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14943 s->size += 8;
14944 if (*local_tls_type & GOT_TLS_GDESC)
14945 {
14946 *local_tlsdesc_gotent = htab->root.sgotplt->size
14947 - elf32_arm_compute_jump_table_size (htab);
14948 htab->root.sgotplt->size += 8;
14949 *local_got = (bfd_vma) -2;
14950 /* plt.got_offset needs to know there's a TLS_DESC
14951 reloc in the middle of .got.plt. */
14952 htab->num_tls_desc++;
14953 }
14954 if (*local_tls_type & GOT_TLS_IE)
14955 s->size += 4;
14956
14957 if (*local_tls_type & GOT_NORMAL)
14958 {
14959 /* If the symbol is both GD and GDESC, *local_got
14960 may have been overwritten. */
14961 *local_got = s->size;
14962 s->size += 4;
14963 }
14964
14965 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14966 if (isym == NULL)
14967 return FALSE;
14968
14969 /* If all references to an STT_GNU_IFUNC PLT are calls,
14970 then all non-call references, including this GOT entry,
14971 resolve directly to the run-time target. */
14972 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
14973 && (local_iplt == NULL
14974 || local_iplt->arm.noncall_refcount == 0))
14975 elf32_arm_allocate_irelocs (info, srel, 1);
14976 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
14977 {
14978 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
14979 || *local_tls_type & GOT_TLS_GD)
14980 elf32_arm_allocate_dynrelocs (info, srel, 1);
14981
14982 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
14983 {
14984 elf32_arm_allocate_dynrelocs (info,
14985 htab->root.srelplt, 1);
14986 htab->tls_trampoline = -1;
14987 }
14988 }
14989 }
14990 else
14991 *local_got = (bfd_vma) -1;
14992 }
14993 }
14994
14995 if (htab->tls_ldm_got.refcount > 0)
14996 {
14997 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14998 for R_ARM_TLS_LDM32 relocations. */
14999 htab->tls_ldm_got.offset = htab->root.sgot->size;
15000 htab->root.sgot->size += 8;
15001 if (bfd_link_pic (info))
15002 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15003 }
15004 else
15005 htab->tls_ldm_got.offset = -1;
15006
15007 /* Allocate global sym .plt and .got entries, and space for global
15008 sym dynamic relocs. */
15009 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
15010
15011 /* Here we rummage through the found bfds to collect glue information. */
15012 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15013 {
15014 if (! is_arm_elf (ibfd))
15015 continue;
15016
15017 /* Initialise mapping tables for code/data. */
15018 bfd_elf32_arm_init_maps (ibfd);
15019
15020 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
15021 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
15022 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
15023 /* xgettext:c-format */
15024 _bfd_error_handler (_("Errors encountered processing file %s"),
15025 ibfd->filename);
15026 }
15027
15028 /* Allocate space for the glue sections now that we've sized them. */
15029 bfd_elf32_arm_allocate_interworking_sections (info);
15030
15031 /* For every jump slot reserved in the sgotplt, reloc_count is
15032 incremented. However, when we reserve space for TLS descriptors,
15033 it's not incremented, so in order to compute the space reserved
15034 for them, it suffices to multiply the reloc count by the jump
15035 slot size. */
15036 if (htab->root.srelplt)
15037 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
15038
15039 if (htab->tls_trampoline)
15040 {
15041 if (htab->root.splt->size == 0)
15042 htab->root.splt->size += htab->plt_header_size;
15043
15044 htab->tls_trampoline = htab->root.splt->size;
15045 htab->root.splt->size += htab->plt_entry_size;
15046
15047 /* If we're not using lazy TLS relocations, don't generate the
15048 PLT and GOT entries they require. */
15049 if (!(info->flags & DF_BIND_NOW))
15050 {
15051 htab->dt_tlsdesc_got = htab->root.sgot->size;
15052 htab->root.sgot->size += 4;
15053
15054 htab->dt_tlsdesc_plt = htab->root.splt->size;
15055 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15056 }
15057 }
15058
15059 /* The check_relocs and adjust_dynamic_symbol entry points have
15060 determined the sizes of the various dynamic sections. Allocate
15061 memory for them. */
15062 plt = FALSE;
15063 relocs = FALSE;
15064 for (s = dynobj->sections; s != NULL; s = s->next)
15065 {
15066 const char * name;
15067
15068 if ((s->flags & SEC_LINKER_CREATED) == 0)
15069 continue;
15070
15071 /* It's OK to base decisions on the section name, because none
15072 of the dynobj section names depend upon the input files. */
15073 name = bfd_get_section_name (dynobj, s);
15074
15075 if (s == htab->root.splt)
15076 {
15077 /* Remember whether there is a PLT. */
15078 plt = s->size != 0;
15079 }
15080 else if (CONST_STRNEQ (name, ".rel"))
15081 {
15082 if (s->size != 0)
15083 {
15084 /* Remember whether there are any reloc sections other
15085 than .rel(a).plt and .rela.plt.unloaded. */
15086 if (s != htab->root.srelplt && s != htab->srelplt2)
15087 relocs = TRUE;
15088
15089 /* We use the reloc_count field as a counter if we need
15090 to copy relocs into the output file. */
15091 s->reloc_count = 0;
15092 }
15093 }
15094 else if (s != htab->root.sgot
15095 && s != htab->root.sgotplt
15096 && s != htab->root.iplt
15097 && s != htab->root.igotplt
15098 && s != htab->sdynbss)
15099 {
15100 /* It's not one of our sections, so don't allocate space. */
15101 continue;
15102 }
15103
15104 if (s->size == 0)
15105 {
15106 /* If we don't need this section, strip it from the
15107 output file. This is mostly to handle .rel(a).bss and
15108 .rel(a).plt. We must create both sections in
15109 create_dynamic_sections, because they must be created
15110 before the linker maps input sections to output
15111 sections. The linker does that before
15112 adjust_dynamic_symbol is called, and it is that
15113 function which decides whether anything needs to go
15114 into these sections. */
15115 s->flags |= SEC_EXCLUDE;
15116 continue;
15117 }
15118
15119 if ((s->flags & SEC_HAS_CONTENTS) == 0)
15120 continue;
15121
15122 /* Allocate memory for the section contents. */
15123 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
15124 if (s->contents == NULL)
15125 return FALSE;
15126 }
15127
15128 if (elf_hash_table (info)->dynamic_sections_created)
15129 {
15130 /* Add some entries to the .dynamic section. We fill in the
15131 values later, in elf32_arm_finish_dynamic_sections, but we
15132 must add the entries now so that we get the correct size for
15133 the .dynamic section. The DT_DEBUG entry is filled in by the
15134 dynamic linker and used by the debugger. */
15135 #define add_dynamic_entry(TAG, VAL) \
15136 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15137
15138 if (bfd_link_executable (info))
15139 {
15140 if (!add_dynamic_entry (DT_DEBUG, 0))
15141 return FALSE;
15142 }
15143
15144 if (plt)
15145 {
15146 if ( !add_dynamic_entry (DT_PLTGOT, 0)
15147 || !add_dynamic_entry (DT_PLTRELSZ, 0)
15148 || !add_dynamic_entry (DT_PLTREL,
15149 htab->use_rel ? DT_REL : DT_RELA)
15150 || !add_dynamic_entry (DT_JMPREL, 0))
15151 return FALSE;
15152
15153 if (htab->dt_tlsdesc_plt &&
15154 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
15155 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
15156 return FALSE;
15157 }
15158
15159 if (relocs)
15160 {
15161 if (htab->use_rel)
15162 {
15163 if (!add_dynamic_entry (DT_REL, 0)
15164 || !add_dynamic_entry (DT_RELSZ, 0)
15165 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
15166 return FALSE;
15167 }
15168 else
15169 {
15170 if (!add_dynamic_entry (DT_RELA, 0)
15171 || !add_dynamic_entry (DT_RELASZ, 0)
15172 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15173 return FALSE;
15174 }
15175 }
15176
15177 /* If any dynamic relocs apply to a read-only section,
15178 then we need a DT_TEXTREL entry. */
15179 if ((info->flags & DF_TEXTREL) == 0)
15180 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15181 info);
15182
15183 if ((info->flags & DF_TEXTREL) != 0)
15184 {
15185 if (!add_dynamic_entry (DT_TEXTREL, 0))
15186 return FALSE;
15187 }
15188 if (htab->vxworks_p
15189 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15190 return FALSE;
15191 }
15192 #undef add_dynamic_entry
15193
15194 return TRUE;
15195 }
15196
15197 /* Size sections even though they're not dynamic. We use it to setup
15198 _TLS_MODULE_BASE_, if needed. */
15199
15200 static bfd_boolean
15201 elf32_arm_always_size_sections (bfd *output_bfd,
15202 struct bfd_link_info *info)
15203 {
15204 asection *tls_sec;
15205
15206 if (bfd_link_relocatable (info))
15207 return TRUE;
15208
15209 tls_sec = elf_hash_table (info)->tls_sec;
15210
15211 if (tls_sec)
15212 {
15213 struct elf_link_hash_entry *tlsbase;
15214
15215 tlsbase = elf_link_hash_lookup
15216 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15217
15218 if (tlsbase)
15219 {
15220 struct bfd_link_hash_entry *bh = NULL;
15221 const struct elf_backend_data *bed
15222 = get_elf_backend_data (output_bfd);
15223
15224 if (!(_bfd_generic_link_add_one_symbol
15225 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15226 tls_sec, 0, NULL, FALSE,
15227 bed->collect, &bh)))
15228 return FALSE;
15229
15230 tlsbase->type = STT_TLS;
15231 tlsbase = (struct elf_link_hash_entry *)bh;
15232 tlsbase->def_regular = 1;
15233 tlsbase->other = STV_HIDDEN;
15234 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15235 }
15236 }
15237 return TRUE;
15238 }
15239
15240 /* Finish up dynamic symbol handling. We set the contents of various
15241 dynamic sections here. */
15242
15243 static bfd_boolean
15244 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15245 struct bfd_link_info * info,
15246 struct elf_link_hash_entry * h,
15247 Elf_Internal_Sym * sym)
15248 {
15249 struct elf32_arm_link_hash_table *htab;
15250 struct elf32_arm_link_hash_entry *eh;
15251
15252 htab = elf32_arm_hash_table (info);
15253 if (htab == NULL)
15254 return FALSE;
15255
15256 eh = (struct elf32_arm_link_hash_entry *) h;
15257
15258 if (h->plt.offset != (bfd_vma) -1)
15259 {
15260 if (!eh->is_iplt)
15261 {
15262 BFD_ASSERT (h->dynindx != -1);
15263 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15264 h->dynindx, 0))
15265 return FALSE;
15266 }
15267
15268 if (!h->def_regular)
15269 {
15270 /* Mark the symbol as undefined, rather than as defined in
15271 the .plt section. */
15272 sym->st_shndx = SHN_UNDEF;
15273 /* If the symbol is weak we need to clear the value.
15274 Otherwise, the PLT entry would provide a definition for
15275 the symbol even if the symbol wasn't defined anywhere,
15276 and so the symbol would never be NULL. Leave the value if
15277 there were any relocations where pointer equality matters
15278 (this is a clue for the dynamic linker, to make function
15279 pointer comparisons work between an application and shared
15280 library). */
15281 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15282 sym->st_value = 0;
15283 }
15284 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15285 {
15286 /* At least one non-call relocation references this .iplt entry,
15287 so the .iplt entry is the function's canonical address. */
15288 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15289 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15290 sym->st_shndx = (_bfd_elf_section_from_bfd_section
15291 (output_bfd, htab->root.iplt->output_section));
15292 sym->st_value = (h->plt.offset
15293 + htab->root.iplt->output_section->vma
15294 + htab->root.iplt->output_offset);
15295 }
15296 }
15297
15298 if (h->needs_copy)
15299 {
15300 asection * s;
15301 Elf_Internal_Rela rel;
15302
15303 /* This symbol needs a copy reloc. Set it up. */
15304 BFD_ASSERT (h->dynindx != -1
15305 && (h->root.type == bfd_link_hash_defined
15306 || h->root.type == bfd_link_hash_defweak));
15307
15308 s = htab->srelbss;
15309 BFD_ASSERT (s != NULL);
15310
15311 rel.r_addend = 0;
15312 rel.r_offset = (h->root.u.def.value
15313 + h->root.u.def.section->output_section->vma
15314 + h->root.u.def.section->output_offset);
15315 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15316 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15317 }
15318
15319 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15320 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15321 to the ".got" section. */
15322 if (h == htab->root.hdynamic
15323 || (!htab->vxworks_p && h == htab->root.hgot))
15324 sym->st_shndx = SHN_ABS;
15325
15326 return TRUE;
15327 }
15328
15329 static void
15330 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15331 void *contents,
15332 const unsigned long *template, unsigned count)
15333 {
15334 unsigned ix;
15335
15336 for (ix = 0; ix != count; ix++)
15337 {
15338 unsigned long insn = template[ix];
15339
15340 /* Emit mov pc,rx if bx is not permitted. */
15341 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15342 insn = (insn & 0xf000000f) | 0x01a0f000;
15343 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15344 }
15345 }
15346
15347 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15348 other variants, NaCl needs this entry in a static executable's
15349 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15350 zero. For .iplt really only the last bundle is useful, and .iplt
15351 could have a shorter first entry, with each individual PLT entry's
15352 relative branch calculated differently so it targets the last
15353 bundle instead of the instruction before it (labelled .Lplt_tail
15354 above). But it's simpler to keep the size and layout of PLT0
15355 consistent with the dynamic case, at the cost of some dead code at
15356 the start of .iplt and the one dead store to the stack at the start
15357 of .Lplt_tail. */
15358 static void
15359 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15360 asection *plt, bfd_vma got_displacement)
15361 {
15362 unsigned int i;
15363
15364 put_arm_insn (htab, output_bfd,
15365 elf32_arm_nacl_plt0_entry[0]
15366 | arm_movw_immediate (got_displacement),
15367 plt->contents + 0);
15368 put_arm_insn (htab, output_bfd,
15369 elf32_arm_nacl_plt0_entry[1]
15370 | arm_movt_immediate (got_displacement),
15371 plt->contents + 4);
15372
15373 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15374 put_arm_insn (htab, output_bfd,
15375 elf32_arm_nacl_plt0_entry[i],
15376 plt->contents + (i * 4));
15377 }
15378
15379 /* Finish up the dynamic sections. */
15380
15381 static bfd_boolean
15382 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15383 {
15384 bfd * dynobj;
15385 asection * sgot;
15386 asection * sdyn;
15387 struct elf32_arm_link_hash_table *htab;
15388
15389 htab = elf32_arm_hash_table (info);
15390 if (htab == NULL)
15391 return FALSE;
15392
15393 dynobj = elf_hash_table (info)->dynobj;
15394
15395 sgot = htab->root.sgotplt;
15396 /* A broken linker script might have discarded the dynamic sections.
15397 Catch this here so that we do not seg-fault later on. */
15398 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15399 return FALSE;
15400 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15401
15402 if (elf_hash_table (info)->dynamic_sections_created)
15403 {
15404 asection *splt;
15405 Elf32_External_Dyn *dyncon, *dynconend;
15406
15407 splt = htab->root.splt;
15408 BFD_ASSERT (splt != NULL && sdyn != NULL);
15409 BFD_ASSERT (htab->symbian_p || sgot != NULL);
15410
15411 dyncon = (Elf32_External_Dyn *) sdyn->contents;
15412 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15413
15414 for (; dyncon < dynconend; dyncon++)
15415 {
15416 Elf_Internal_Dyn dyn;
15417 const char * name;
15418 asection * s;
15419
15420 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15421
15422 switch (dyn.d_tag)
15423 {
15424 unsigned int type;
15425
15426 default:
15427 if (htab->vxworks_p
15428 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15429 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15430 break;
15431
15432 case DT_HASH:
15433 name = ".hash";
15434 goto get_vma_if_bpabi;
15435 case DT_STRTAB:
15436 name = ".dynstr";
15437 goto get_vma_if_bpabi;
15438 case DT_SYMTAB:
15439 name = ".dynsym";
15440 goto get_vma_if_bpabi;
15441 case DT_VERSYM:
15442 name = ".gnu.version";
15443 goto get_vma_if_bpabi;
15444 case DT_VERDEF:
15445 name = ".gnu.version_d";
15446 goto get_vma_if_bpabi;
15447 case DT_VERNEED:
15448 name = ".gnu.version_r";
15449 goto get_vma_if_bpabi;
15450
15451 case DT_PLTGOT:
15452 name = htab->symbian_p ? ".got" : ".got.plt";
15453 goto get_vma;
15454 case DT_JMPREL:
15455 name = RELOC_SECTION (htab, ".plt");
15456 get_vma:
15457 s = bfd_get_linker_section (dynobj, name);
15458 if (s == NULL)
15459 {
15460 (*_bfd_error_handler)
15461 (_("could not find section %s"), name);
15462 bfd_set_error (bfd_error_invalid_operation);
15463 return FALSE;
15464 }
15465 if (!htab->symbian_p)
15466 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15467 else
15468 /* In the BPABI, tags in the PT_DYNAMIC section point
15469 at the file offset, not the memory address, for the
15470 convenience of the post linker. */
15471 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15472 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15473 break;
15474
15475 get_vma_if_bpabi:
15476 if (htab->symbian_p)
15477 goto get_vma;
15478 break;
15479
15480 case DT_PLTRELSZ:
15481 s = htab->root.srelplt;
15482 BFD_ASSERT (s != NULL);
15483 dyn.d_un.d_val = s->size;
15484 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15485 break;
15486
15487 case DT_RELSZ:
15488 case DT_RELASZ:
15489 if (!htab->symbian_p)
15490 {
15491 /* My reading of the SVR4 ABI indicates that the
15492 procedure linkage table relocs (DT_JMPREL) should be
15493 included in the overall relocs (DT_REL). This is
15494 what Solaris does. However, UnixWare can not handle
15495 that case. Therefore, we override the DT_RELSZ entry
15496 here to make it not include the JMPREL relocs. Since
15497 the linker script arranges for .rel(a).plt to follow all
15498 other relocation sections, we don't have to worry
15499 about changing the DT_REL entry. */
15500 s = htab->root.srelplt;
15501 if (s != NULL)
15502 dyn.d_un.d_val -= s->size;
15503 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15504 break;
15505 }
15506 /* Fall through. */
15507
15508 case DT_REL:
15509 case DT_RELA:
15510 /* In the BPABI, the DT_REL tag must point at the file
15511 offset, not the VMA, of the first relocation
15512 section. So, we use code similar to that in
15513 elflink.c, but do not check for SHF_ALLOC on the
15514 relcoation section, since relocations sections are
15515 never allocated under the BPABI. The comments above
15516 about Unixware notwithstanding, we include all of the
15517 relocations here. */
15518 if (htab->symbian_p)
15519 {
15520 unsigned int i;
15521 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15522 ? SHT_REL : SHT_RELA);
15523 dyn.d_un.d_val = 0;
15524 for (i = 1; i < elf_numsections (output_bfd); i++)
15525 {
15526 Elf_Internal_Shdr *hdr
15527 = elf_elfsections (output_bfd)[i];
15528 if (hdr->sh_type == type)
15529 {
15530 if (dyn.d_tag == DT_RELSZ
15531 || dyn.d_tag == DT_RELASZ)
15532 dyn.d_un.d_val += hdr->sh_size;
15533 else if ((ufile_ptr) hdr->sh_offset
15534 <= dyn.d_un.d_val - 1)
15535 dyn.d_un.d_val = hdr->sh_offset;
15536 }
15537 }
15538 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15539 }
15540 break;
15541
15542 case DT_TLSDESC_PLT:
15543 s = htab->root.splt;
15544 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15545 + htab->dt_tlsdesc_plt);
15546 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15547 break;
15548
15549 case DT_TLSDESC_GOT:
15550 s = htab->root.sgot;
15551 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15552 + htab->dt_tlsdesc_got);
15553 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15554 break;
15555
15556 /* Set the bottom bit of DT_INIT/FINI if the
15557 corresponding function is Thumb. */
15558 case DT_INIT:
15559 name = info->init_function;
15560 goto get_sym;
15561 case DT_FINI:
15562 name = info->fini_function;
15563 get_sym:
15564 /* If it wasn't set by elf_bfd_final_link
15565 then there is nothing to adjust. */
15566 if (dyn.d_un.d_val != 0)
15567 {
15568 struct elf_link_hash_entry * eh;
15569
15570 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15571 FALSE, FALSE, TRUE);
15572 if (eh != NULL
15573 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15574 == ST_BRANCH_TO_THUMB)
15575 {
15576 dyn.d_un.d_val |= 1;
15577 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15578 }
15579 }
15580 break;
15581 }
15582 }
15583
15584 /* Fill in the first entry in the procedure linkage table. */
15585 if (splt->size > 0 && htab->plt_header_size)
15586 {
15587 const bfd_vma *plt0_entry;
15588 bfd_vma got_address, plt_address, got_displacement;
15589
15590 /* Calculate the addresses of the GOT and PLT. */
15591 got_address = sgot->output_section->vma + sgot->output_offset;
15592 plt_address = splt->output_section->vma + splt->output_offset;
15593
15594 if (htab->vxworks_p)
15595 {
15596 /* The VxWorks GOT is relocated by the dynamic linker.
15597 Therefore, we must emit relocations rather than simply
15598 computing the values now. */
15599 Elf_Internal_Rela rel;
15600
15601 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15602 put_arm_insn (htab, output_bfd, plt0_entry[0],
15603 splt->contents + 0);
15604 put_arm_insn (htab, output_bfd, plt0_entry[1],
15605 splt->contents + 4);
15606 put_arm_insn (htab, output_bfd, plt0_entry[2],
15607 splt->contents + 8);
15608 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15609
15610 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15611 rel.r_offset = plt_address + 12;
15612 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15613 rel.r_addend = 0;
15614 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15615 htab->srelplt2->contents);
15616 }
15617 else if (htab->nacl_p)
15618 arm_nacl_put_plt0 (htab, output_bfd, splt,
15619 got_address + 8 - (plt_address + 16));
15620 else if (using_thumb_only (htab))
15621 {
15622 got_displacement = got_address - (plt_address + 12);
15623
15624 plt0_entry = elf32_thumb2_plt0_entry;
15625 put_arm_insn (htab, output_bfd, plt0_entry[0],
15626 splt->contents + 0);
15627 put_arm_insn (htab, output_bfd, plt0_entry[1],
15628 splt->contents + 4);
15629 put_arm_insn (htab, output_bfd, plt0_entry[2],
15630 splt->contents + 8);
15631
15632 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15633 }
15634 else
15635 {
15636 got_displacement = got_address - (plt_address + 16);
15637
15638 plt0_entry = elf32_arm_plt0_entry;
15639 put_arm_insn (htab, output_bfd, plt0_entry[0],
15640 splt->contents + 0);
15641 put_arm_insn (htab, output_bfd, plt0_entry[1],
15642 splt->contents + 4);
15643 put_arm_insn (htab, output_bfd, plt0_entry[2],
15644 splt->contents + 8);
15645 put_arm_insn (htab, output_bfd, plt0_entry[3],
15646 splt->contents + 12);
15647
15648 #ifdef FOUR_WORD_PLT
15649 /* The displacement value goes in the otherwise-unused
15650 last word of the second entry. */
15651 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15652 #else
15653 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15654 #endif
15655 }
15656 }
15657
15658 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15659 really seem like the right value. */
15660 if (splt->output_section->owner == output_bfd)
15661 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15662
15663 if (htab->dt_tlsdesc_plt)
15664 {
15665 bfd_vma got_address
15666 = sgot->output_section->vma + sgot->output_offset;
15667 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15668 + htab->root.sgot->output_offset);
15669 bfd_vma plt_address
15670 = splt->output_section->vma + splt->output_offset;
15671
15672 arm_put_trampoline (htab, output_bfd,
15673 splt->contents + htab->dt_tlsdesc_plt,
15674 dl_tlsdesc_lazy_trampoline, 6);
15675
15676 bfd_put_32 (output_bfd,
15677 gotplt_address + htab->dt_tlsdesc_got
15678 - (plt_address + htab->dt_tlsdesc_plt)
15679 - dl_tlsdesc_lazy_trampoline[6],
15680 splt->contents + htab->dt_tlsdesc_plt + 24);
15681 bfd_put_32 (output_bfd,
15682 got_address - (plt_address + htab->dt_tlsdesc_plt)
15683 - dl_tlsdesc_lazy_trampoline[7],
15684 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15685 }
15686
15687 if (htab->tls_trampoline)
15688 {
15689 arm_put_trampoline (htab, output_bfd,
15690 splt->contents + htab->tls_trampoline,
15691 tls_trampoline, 3);
15692 #ifdef FOUR_WORD_PLT
15693 bfd_put_32 (output_bfd, 0x00000000,
15694 splt->contents + htab->tls_trampoline + 12);
15695 #endif
15696 }
15697
15698 if (htab->vxworks_p
15699 && !bfd_link_pic (info)
15700 && htab->root.splt->size > 0)
15701 {
15702 /* Correct the .rel(a).plt.unloaded relocations. They will have
15703 incorrect symbol indexes. */
15704 int num_plts;
15705 unsigned char *p;
15706
15707 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15708 / htab->plt_entry_size);
15709 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15710
15711 for (; num_plts; num_plts--)
15712 {
15713 Elf_Internal_Rela rel;
15714
15715 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15716 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15717 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15718 p += RELOC_SIZE (htab);
15719
15720 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15721 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15722 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15723 p += RELOC_SIZE (htab);
15724 }
15725 }
15726 }
15727
15728 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15729 /* NaCl uses a special first entry in .iplt too. */
15730 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15731
15732 /* Fill in the first three entries in the global offset table. */
15733 if (sgot)
15734 {
15735 if (sgot->size > 0)
15736 {
15737 if (sdyn == NULL)
15738 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15739 else
15740 bfd_put_32 (output_bfd,
15741 sdyn->output_section->vma + sdyn->output_offset,
15742 sgot->contents);
15743 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15744 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15745 }
15746
15747 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15748 }
15749
15750 return TRUE;
15751 }
15752
15753 static void
15754 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15755 {
15756 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15757 struct elf32_arm_link_hash_table *globals;
15758 struct elf_segment_map *m;
15759
15760 i_ehdrp = elf_elfheader (abfd);
15761
15762 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15763 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15764 else
15765 _bfd_elf_post_process_headers (abfd, link_info);
15766 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15767
15768 if (link_info)
15769 {
15770 globals = elf32_arm_hash_table (link_info);
15771 if (globals != NULL && globals->byteswap_code)
15772 i_ehdrp->e_flags |= EF_ARM_BE8;
15773 }
15774
15775 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15776 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15777 {
15778 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15779 if (abi == AEABI_VFP_args_vfp)
15780 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15781 else
15782 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15783 }
15784
15785 /* Scan segment to set p_flags attribute if it contains only sections with
15786 SHF_ARM_NOREAD flag. */
15787 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15788 {
15789 unsigned int j;
15790
15791 if (m->count == 0)
15792 continue;
15793 for (j = 0; j < m->count; j++)
15794 {
15795 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD))
15796 break;
15797 }
15798 if (j == m->count)
15799 {
15800 m->p_flags = PF_X;
15801 m->p_flags_valid = 1;
15802 }
15803 }
15804 }
15805
15806 static enum elf_reloc_type_class
15807 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15808 const asection *rel_sec ATTRIBUTE_UNUSED,
15809 const Elf_Internal_Rela *rela)
15810 {
15811 switch ((int) ELF32_R_TYPE (rela->r_info))
15812 {
15813 case R_ARM_RELATIVE:
15814 return reloc_class_relative;
15815 case R_ARM_JUMP_SLOT:
15816 return reloc_class_plt;
15817 case R_ARM_COPY:
15818 return reloc_class_copy;
15819 case R_ARM_IRELATIVE:
15820 return reloc_class_ifunc;
15821 default:
15822 return reloc_class_normal;
15823 }
15824 }
15825
15826 static void
15827 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15828 {
15829 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15830 }
15831
15832 /* Return TRUE if this is an unwinding table entry. */
15833
15834 static bfd_boolean
15835 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15836 {
15837 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15838 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15839 }
15840
15841
15842 /* Set the type and flags for an ARM section. We do this by
15843 the section name, which is a hack, but ought to work. */
15844
15845 static bfd_boolean
15846 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15847 {
15848 const char * name;
15849
15850 name = bfd_get_section_name (abfd, sec);
15851
15852 if (is_arm_elf_unwind_section_name (abfd, name))
15853 {
15854 hdr->sh_type = SHT_ARM_EXIDX;
15855 hdr->sh_flags |= SHF_LINK_ORDER;
15856 }
15857
15858 if (sec->flags & SEC_ELF_NOREAD)
15859 hdr->sh_flags |= SHF_ARM_NOREAD;
15860
15861 return TRUE;
15862 }
15863
15864 /* Handle an ARM specific section when reading an object file. This is
15865 called when bfd_section_from_shdr finds a section with an unknown
15866 type. */
15867
15868 static bfd_boolean
15869 elf32_arm_section_from_shdr (bfd *abfd,
15870 Elf_Internal_Shdr * hdr,
15871 const char *name,
15872 int shindex)
15873 {
15874 /* There ought to be a place to keep ELF backend specific flags, but
15875 at the moment there isn't one. We just keep track of the
15876 sections by their name, instead. Fortunately, the ABI gives
15877 names for all the ARM specific sections, so we will probably get
15878 away with this. */
15879 switch (hdr->sh_type)
15880 {
15881 case SHT_ARM_EXIDX:
15882 case SHT_ARM_PREEMPTMAP:
15883 case SHT_ARM_ATTRIBUTES:
15884 break;
15885
15886 default:
15887 return FALSE;
15888 }
15889
15890 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15891 return FALSE;
15892
15893 return TRUE;
15894 }
15895
15896 static _arm_elf_section_data *
15897 get_arm_elf_section_data (asection * sec)
15898 {
15899 if (sec && sec->owner && is_arm_elf (sec->owner))
15900 return elf32_arm_section_data (sec);
15901 else
15902 return NULL;
15903 }
15904
15905 typedef struct
15906 {
15907 void *flaginfo;
15908 struct bfd_link_info *info;
15909 asection *sec;
15910 int sec_shndx;
15911 int (*func) (void *, const char *, Elf_Internal_Sym *,
15912 asection *, struct elf_link_hash_entry *);
15913 } output_arch_syminfo;
15914
15915 enum map_symbol_type
15916 {
15917 ARM_MAP_ARM,
15918 ARM_MAP_THUMB,
15919 ARM_MAP_DATA
15920 };
15921
15922
15923 /* Output a single mapping symbol. */
15924
15925 static bfd_boolean
15926 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15927 enum map_symbol_type type,
15928 bfd_vma offset)
15929 {
15930 static const char *names[3] = {"$a", "$t", "$d"};
15931 Elf_Internal_Sym sym;
15932
15933 sym.st_value = osi->sec->output_section->vma
15934 + osi->sec->output_offset
15935 + offset;
15936 sym.st_size = 0;
15937 sym.st_other = 0;
15938 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15939 sym.st_shndx = osi->sec_shndx;
15940 sym.st_target_internal = 0;
15941 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15942 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15943 }
15944
15945 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15946 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15947
15948 static bfd_boolean
15949 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15950 bfd_boolean is_iplt_entry_p,
15951 union gotplt_union *root_plt,
15952 struct arm_plt_info *arm_plt)
15953 {
15954 struct elf32_arm_link_hash_table *htab;
15955 bfd_vma addr, plt_header_size;
15956
15957 if (root_plt->offset == (bfd_vma) -1)
15958 return TRUE;
15959
15960 htab = elf32_arm_hash_table (osi->info);
15961 if (htab == NULL)
15962 return FALSE;
15963
15964 if (is_iplt_entry_p)
15965 {
15966 osi->sec = htab->root.iplt;
15967 plt_header_size = 0;
15968 }
15969 else
15970 {
15971 osi->sec = htab->root.splt;
15972 plt_header_size = htab->plt_header_size;
15973 }
15974 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
15975 (osi->info->output_bfd, osi->sec->output_section));
15976
15977 addr = root_plt->offset & -2;
15978 if (htab->symbian_p)
15979 {
15980 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15981 return FALSE;
15982 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
15983 return FALSE;
15984 }
15985 else if (htab->vxworks_p)
15986 {
15987 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15988 return FALSE;
15989 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
15990 return FALSE;
15991 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
15992 return FALSE;
15993 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
15994 return FALSE;
15995 }
15996 else if (htab->nacl_p)
15997 {
15998 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15999 return FALSE;
16000 }
16001 else if (using_thumb_only (htab))
16002 {
16003 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
16004 return FALSE;
16005 }
16006 else
16007 {
16008 bfd_boolean thumb_stub_p;
16009
16010 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
16011 if (thumb_stub_p)
16012 {
16013 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
16014 return FALSE;
16015 }
16016 #ifdef FOUR_WORD_PLT
16017 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16018 return FALSE;
16019 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
16020 return FALSE;
16021 #else
16022 /* A three-word PLT with no Thumb thunk contains only Arm code,
16023 so only need to output a mapping symbol for the first PLT entry and
16024 entries with thumb thunks. */
16025 if (thumb_stub_p || addr == plt_header_size)
16026 {
16027 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16028 return FALSE;
16029 }
16030 #endif
16031 }
16032
16033 return TRUE;
16034 }
16035
16036 /* Output mapping symbols for PLT entries associated with H. */
16037
16038 static bfd_boolean
16039 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
16040 {
16041 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16042 struct elf32_arm_link_hash_entry *eh;
16043
16044 if (h->root.type == bfd_link_hash_indirect)
16045 return TRUE;
16046
16047 if (h->root.type == bfd_link_hash_warning)
16048 /* When warning symbols are created, they **replace** the "real"
16049 entry in the hash table, thus we never get to see the real
16050 symbol in a hash traversal. So look at it now. */
16051 h = (struct elf_link_hash_entry *) h->root.u.i.link;
16052
16053 eh = (struct elf32_arm_link_hash_entry *) h;
16054 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16055 &h->plt, &eh->plt);
16056 }
16057
16058 /* Bind a veneered symbol to its veneer identified by its hash entry
16059 STUB_ENTRY. The veneered location thus loose its symbol. */
16060
16061 static void
16062 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16063 {
16064 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16065
16066 BFD_ASSERT (hash);
16067 hash->root.root.u.def.section = stub_entry->stub_sec;
16068 hash->root.root.u.def.value = stub_entry->stub_offset;
16069 hash->root.size = stub_entry->stub_size;
16070 }
16071
16072 /* Output a single local symbol for a generated stub. */
16073
16074 static bfd_boolean
16075 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16076 bfd_vma offset, bfd_vma size)
16077 {
16078 Elf_Internal_Sym sym;
16079
16080 sym.st_value = osi->sec->output_section->vma
16081 + osi->sec->output_offset
16082 + offset;
16083 sym.st_size = size;
16084 sym.st_other = 0;
16085 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16086 sym.st_shndx = osi->sec_shndx;
16087 sym.st_target_internal = 0;
16088 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16089 }
16090
16091 static bfd_boolean
16092 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16093 void * in_arg)
16094 {
16095 struct elf32_arm_stub_hash_entry *stub_entry;
16096 asection *stub_sec;
16097 bfd_vma addr;
16098 char *stub_name;
16099 output_arch_syminfo *osi;
16100 const insn_sequence *template_sequence;
16101 enum stub_insn_type prev_type;
16102 int size;
16103 int i;
16104 enum map_symbol_type sym_type;
16105
16106 /* Massage our args to the form they really have. */
16107 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16108 osi = (output_arch_syminfo *) in_arg;
16109
16110 stub_sec = stub_entry->stub_sec;
16111
16112 /* Ensure this stub is attached to the current section being
16113 processed. */
16114 if (stub_sec != osi->sec)
16115 return TRUE;
16116
16117 addr = (bfd_vma) stub_entry->stub_offset;
16118 template_sequence = stub_entry->stub_template;
16119
16120 if (arm_stub_sym_claimed (stub_entry->stub_type))
16121 arm_stub_claim_sym (stub_entry);
16122 else
16123 {
16124 stub_name = stub_entry->output_name;
16125 switch (template_sequence[0].type)
16126 {
16127 case ARM_TYPE:
16128 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
16129 stub_entry->stub_size))
16130 return FALSE;
16131 break;
16132 case THUMB16_TYPE:
16133 case THUMB32_TYPE:
16134 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
16135 stub_entry->stub_size))
16136 return FALSE;
16137 break;
16138 default:
16139 BFD_FAIL ();
16140 return 0;
16141 }
16142 }
16143
16144 prev_type = DATA_TYPE;
16145 size = 0;
16146 for (i = 0; i < stub_entry->stub_template_size; i++)
16147 {
16148 switch (template_sequence[i].type)
16149 {
16150 case ARM_TYPE:
16151 sym_type = ARM_MAP_ARM;
16152 break;
16153
16154 case THUMB16_TYPE:
16155 case THUMB32_TYPE:
16156 sym_type = ARM_MAP_THUMB;
16157 break;
16158
16159 case DATA_TYPE:
16160 sym_type = ARM_MAP_DATA;
16161 break;
16162
16163 default:
16164 BFD_FAIL ();
16165 return FALSE;
16166 }
16167
16168 if (template_sequence[i].type != prev_type)
16169 {
16170 prev_type = template_sequence[i].type;
16171 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16172 return FALSE;
16173 }
16174
16175 switch (template_sequence[i].type)
16176 {
16177 case ARM_TYPE:
16178 case THUMB32_TYPE:
16179 size += 4;
16180 break;
16181
16182 case THUMB16_TYPE:
16183 size += 2;
16184 break;
16185
16186 case DATA_TYPE:
16187 size += 4;
16188 break;
16189
16190 default:
16191 BFD_FAIL ();
16192 return FALSE;
16193 }
16194 }
16195
16196 return TRUE;
16197 }
16198
16199 /* Output mapping symbols for linker generated sections,
16200 and for those data-only sections that do not have a
16201 $d. */
16202
16203 static bfd_boolean
16204 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16205 struct bfd_link_info *info,
16206 void *flaginfo,
16207 int (*func) (void *, const char *,
16208 Elf_Internal_Sym *,
16209 asection *,
16210 struct elf_link_hash_entry *))
16211 {
16212 output_arch_syminfo osi;
16213 struct elf32_arm_link_hash_table *htab;
16214 bfd_vma offset;
16215 bfd_size_type size;
16216 bfd *input_bfd;
16217
16218 htab = elf32_arm_hash_table (info);
16219 if (htab == NULL)
16220 return FALSE;
16221
16222 check_use_blx (htab);
16223
16224 osi.flaginfo = flaginfo;
16225 osi.info = info;
16226 osi.func = func;
16227
16228 /* Add a $d mapping symbol to data-only sections that
16229 don't have any mapping symbol. This may result in (harmless) redundant
16230 mapping symbols. */
16231 for (input_bfd = info->input_bfds;
16232 input_bfd != NULL;
16233 input_bfd = input_bfd->link.next)
16234 {
16235 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16236 for (osi.sec = input_bfd->sections;
16237 osi.sec != NULL;
16238 osi.sec = osi.sec->next)
16239 {
16240 if (osi.sec->output_section != NULL
16241 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16242 != 0)
16243 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16244 == SEC_HAS_CONTENTS
16245 && get_arm_elf_section_data (osi.sec) != NULL
16246 && get_arm_elf_section_data (osi.sec)->mapcount == 0
16247 && osi.sec->size > 0
16248 && (osi.sec->flags & SEC_EXCLUDE) == 0)
16249 {
16250 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16251 (output_bfd, osi.sec->output_section);
16252 if (osi.sec_shndx != (int)SHN_BAD)
16253 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16254 }
16255 }
16256 }
16257
16258 /* ARM->Thumb glue. */
16259 if (htab->arm_glue_size > 0)
16260 {
16261 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16262 ARM2THUMB_GLUE_SECTION_NAME);
16263
16264 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16265 (output_bfd, osi.sec->output_section);
16266 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16267 || htab->pic_veneer)
16268 size = ARM2THUMB_PIC_GLUE_SIZE;
16269 else if (htab->use_blx)
16270 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16271 else
16272 size = ARM2THUMB_STATIC_GLUE_SIZE;
16273
16274 for (offset = 0; offset < htab->arm_glue_size; offset += size)
16275 {
16276 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16277 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16278 }
16279 }
16280
16281 /* Thumb->ARM glue. */
16282 if (htab->thumb_glue_size > 0)
16283 {
16284 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16285 THUMB2ARM_GLUE_SECTION_NAME);
16286
16287 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16288 (output_bfd, osi.sec->output_section);
16289 size = THUMB2ARM_GLUE_SIZE;
16290
16291 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16292 {
16293 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16294 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16295 }
16296 }
16297
16298 /* ARMv4 BX veneers. */
16299 if (htab->bx_glue_size > 0)
16300 {
16301 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16302 ARM_BX_GLUE_SECTION_NAME);
16303
16304 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16305 (output_bfd, osi.sec->output_section);
16306
16307 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16308 }
16309
16310 /* Long calls stubs. */
16311 if (htab->stub_bfd && htab->stub_bfd->sections)
16312 {
16313 asection* stub_sec;
16314
16315 for (stub_sec = htab->stub_bfd->sections;
16316 stub_sec != NULL;
16317 stub_sec = stub_sec->next)
16318 {
16319 /* Ignore non-stub sections. */
16320 if (!strstr (stub_sec->name, STUB_SUFFIX))
16321 continue;
16322
16323 osi.sec = stub_sec;
16324
16325 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16326 (output_bfd, osi.sec->output_section);
16327
16328 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16329 }
16330 }
16331
16332 /* Finally, output mapping symbols for the PLT. */
16333 if (htab->root.splt && htab->root.splt->size > 0)
16334 {
16335 osi.sec = htab->root.splt;
16336 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16337 (output_bfd, osi.sec->output_section));
16338
16339 /* Output mapping symbols for the plt header. SymbianOS does not have a
16340 plt header. */
16341 if (htab->vxworks_p)
16342 {
16343 /* VxWorks shared libraries have no PLT header. */
16344 if (!bfd_link_pic (info))
16345 {
16346 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16347 return FALSE;
16348 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16349 return FALSE;
16350 }
16351 }
16352 else if (htab->nacl_p)
16353 {
16354 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16355 return FALSE;
16356 }
16357 else if (using_thumb_only (htab))
16358 {
16359 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16360 return FALSE;
16361 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16362 return FALSE;
16363 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16364 return FALSE;
16365 }
16366 else if (!htab->symbian_p)
16367 {
16368 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16369 return FALSE;
16370 #ifndef FOUR_WORD_PLT
16371 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16372 return FALSE;
16373 #endif
16374 }
16375 }
16376 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16377 {
16378 /* NaCl uses a special first entry in .iplt too. */
16379 osi.sec = htab->root.iplt;
16380 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16381 (output_bfd, osi.sec->output_section));
16382 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16383 return FALSE;
16384 }
16385 if ((htab->root.splt && htab->root.splt->size > 0)
16386 || (htab->root.iplt && htab->root.iplt->size > 0))
16387 {
16388 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16389 for (input_bfd = info->input_bfds;
16390 input_bfd != NULL;
16391 input_bfd = input_bfd->link.next)
16392 {
16393 struct arm_local_iplt_info **local_iplt;
16394 unsigned int i, num_syms;
16395
16396 local_iplt = elf32_arm_local_iplt (input_bfd);
16397 if (local_iplt != NULL)
16398 {
16399 num_syms = elf_symtab_hdr (input_bfd).sh_info;
16400 for (i = 0; i < num_syms; i++)
16401 if (local_iplt[i] != NULL
16402 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16403 &local_iplt[i]->root,
16404 &local_iplt[i]->arm))
16405 return FALSE;
16406 }
16407 }
16408 }
16409 if (htab->dt_tlsdesc_plt != 0)
16410 {
16411 /* Mapping symbols for the lazy tls trampoline. */
16412 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16413 return FALSE;
16414
16415 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16416 htab->dt_tlsdesc_plt + 24))
16417 return FALSE;
16418 }
16419 if (htab->tls_trampoline != 0)
16420 {
16421 /* Mapping symbols for the tls trampoline. */
16422 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16423 return FALSE;
16424 #ifdef FOUR_WORD_PLT
16425 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16426 htab->tls_trampoline + 12))
16427 return FALSE;
16428 #endif
16429 }
16430
16431 return TRUE;
16432 }
16433
16434 /* Allocate target specific section data. */
16435
16436 static bfd_boolean
16437 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16438 {
16439 if (!sec->used_by_bfd)
16440 {
16441 _arm_elf_section_data *sdata;
16442 bfd_size_type amt = sizeof (*sdata);
16443
16444 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16445 if (sdata == NULL)
16446 return FALSE;
16447 sec->used_by_bfd = sdata;
16448 }
16449
16450 return _bfd_elf_new_section_hook (abfd, sec);
16451 }
16452
16453
16454 /* Used to order a list of mapping symbols by address. */
16455
16456 static int
16457 elf32_arm_compare_mapping (const void * a, const void * b)
16458 {
16459 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16460 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16461
16462 if (amap->vma > bmap->vma)
16463 return 1;
16464 else if (amap->vma < bmap->vma)
16465 return -1;
16466 else if (amap->type > bmap->type)
16467 /* Ensure results do not depend on the host qsort for objects with
16468 multiple mapping symbols at the same address by sorting on type
16469 after vma. */
16470 return 1;
16471 else if (amap->type < bmap->type)
16472 return -1;
16473 else
16474 return 0;
16475 }
16476
16477 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16478
16479 static unsigned long
16480 offset_prel31 (unsigned long addr, bfd_vma offset)
16481 {
16482 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16483 }
16484
16485 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16486 relocations. */
16487
16488 static void
16489 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16490 {
16491 unsigned long first_word = bfd_get_32 (output_bfd, from);
16492 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16493
16494 /* High bit of first word is supposed to be zero. */
16495 if ((first_word & 0x80000000ul) == 0)
16496 first_word = offset_prel31 (first_word, offset);
16497
16498 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16499 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16500 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16501 second_word = offset_prel31 (second_word, offset);
16502
16503 bfd_put_32 (output_bfd, first_word, to);
16504 bfd_put_32 (output_bfd, second_word, to + 4);
16505 }
16506
16507 /* Data for make_branch_to_a8_stub(). */
16508
16509 struct a8_branch_to_stub_data
16510 {
16511 asection *writing_section;
16512 bfd_byte *contents;
16513 };
16514
16515
16516 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16517 places for a particular section. */
16518
16519 static bfd_boolean
16520 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16521 void *in_arg)
16522 {
16523 struct elf32_arm_stub_hash_entry *stub_entry;
16524 struct a8_branch_to_stub_data *data;
16525 bfd_byte *contents;
16526 unsigned long branch_insn;
16527 bfd_vma veneered_insn_loc, veneer_entry_loc;
16528 bfd_signed_vma branch_offset;
16529 bfd *abfd;
16530 unsigned int loc;
16531
16532 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16533 data = (struct a8_branch_to_stub_data *) in_arg;
16534
16535 if (stub_entry->target_section != data->writing_section
16536 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16537 return TRUE;
16538
16539 contents = data->contents;
16540
16541 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16542 generated when both source and target are in the same section. */
16543 veneered_insn_loc = stub_entry->target_section->output_section->vma
16544 + stub_entry->target_section->output_offset
16545 + stub_entry->source_value;
16546
16547 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16548 + stub_entry->stub_sec->output_offset
16549 + stub_entry->stub_offset;
16550
16551 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16552 veneered_insn_loc &= ~3u;
16553
16554 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16555
16556 abfd = stub_entry->target_section->owner;
16557 loc = stub_entry->source_value;
16558
16559 /* We attempt to avoid this condition by setting stubs_always_after_branch
16560 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16561 This check is just to be on the safe side... */
16562 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16563 {
16564 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16565 "allocated in unsafe location"), abfd);
16566 return FALSE;
16567 }
16568
16569 switch (stub_entry->stub_type)
16570 {
16571 case arm_stub_a8_veneer_b:
16572 case arm_stub_a8_veneer_b_cond:
16573 branch_insn = 0xf0009000;
16574 goto jump24;
16575
16576 case arm_stub_a8_veneer_blx:
16577 branch_insn = 0xf000e800;
16578 goto jump24;
16579
16580 case arm_stub_a8_veneer_bl:
16581 {
16582 unsigned int i1, j1, i2, j2, s;
16583
16584 branch_insn = 0xf000d000;
16585
16586 jump24:
16587 if (branch_offset < -16777216 || branch_offset > 16777214)
16588 {
16589 /* There's not much we can do apart from complain if this
16590 happens. */
16591 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16592 "of range (input file too large)"), abfd);
16593 return FALSE;
16594 }
16595
16596 /* i1 = not(j1 eor s), so:
16597 not i1 = j1 eor s
16598 j1 = (not i1) eor s. */
16599
16600 branch_insn |= (branch_offset >> 1) & 0x7ff;
16601 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16602 i2 = (branch_offset >> 22) & 1;
16603 i1 = (branch_offset >> 23) & 1;
16604 s = (branch_offset >> 24) & 1;
16605 j1 = (!i1) ^ s;
16606 j2 = (!i2) ^ s;
16607 branch_insn |= j2 << 11;
16608 branch_insn |= j1 << 13;
16609 branch_insn |= s << 26;
16610 }
16611 break;
16612
16613 default:
16614 BFD_FAIL ();
16615 return FALSE;
16616 }
16617
16618 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16619 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16620
16621 return TRUE;
16622 }
16623
16624 /* Beginning of stm32l4xx work-around. */
16625
16626 /* Functions encoding instructions necessary for the emission of the
16627 fix-stm32l4xx-629360.
16628 Encoding is extracted from the
16629 ARM (C) Architecture Reference Manual
16630 ARMv7-A and ARMv7-R edition
16631 ARM DDI 0406C.b (ID072512). */
16632
16633 static inline bfd_vma
16634 create_instruction_branch_absolute (int branch_offset)
16635 {
16636 /* A8.8.18 B (A8-334)
16637 B target_address (Encoding T4). */
16638 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16639 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16640 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16641
16642 int s = ((branch_offset & 0x1000000) >> 24);
16643 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16644 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16645
16646 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16647 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16648
16649 bfd_vma patched_inst = 0xf0009000
16650 | s << 26 /* S. */
16651 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16652 | j1 << 13 /* J1. */
16653 | j2 << 11 /* J2. */
16654 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16655
16656 return patched_inst;
16657 }
16658
16659 static inline bfd_vma
16660 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16661 {
16662 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16663 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16664 bfd_vma patched_inst = 0xe8900000
16665 | (/*W=*/wback << 21)
16666 | (base_reg << 16)
16667 | (reg_mask & 0x0000ffff);
16668
16669 return patched_inst;
16670 }
16671
16672 static inline bfd_vma
16673 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16674 {
16675 /* A8.8.60 LDMDB/LDMEA (A8-402)
16676 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16677 bfd_vma patched_inst = 0xe9100000
16678 | (/*W=*/wback << 21)
16679 | (base_reg << 16)
16680 | (reg_mask & 0x0000ffff);
16681
16682 return patched_inst;
16683 }
16684
16685 static inline bfd_vma
16686 create_instruction_mov (int target_reg, int source_reg)
16687 {
16688 /* A8.8.103 MOV (register) (A8-486)
16689 MOV Rd, Rm (Encoding T1). */
16690 bfd_vma patched_inst = 0x4600
16691 | (target_reg & 0x7)
16692 | ((target_reg & 0x8) >> 3) << 7
16693 | (source_reg << 3);
16694
16695 return patched_inst;
16696 }
16697
16698 static inline bfd_vma
16699 create_instruction_sub (int target_reg, int source_reg, int value)
16700 {
16701 /* A8.8.221 SUB (immediate) (A8-708)
16702 SUB Rd, Rn, #value (Encoding T3). */
16703 bfd_vma patched_inst = 0xf1a00000
16704 | (target_reg << 8)
16705 | (source_reg << 16)
16706 | (/*S=*/0 << 20)
16707 | ((value & 0x800) >> 11) << 26
16708 | ((value & 0x700) >> 8) << 12
16709 | (value & 0x0ff);
16710
16711 return patched_inst;
16712 }
16713
16714 static inline bfd_vma
16715 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16716 int first_reg)
16717 {
16718 /* A8.8.332 VLDM (A8-922)
16719 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16720 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16721 | (/*W=*/wback << 21)
16722 | (base_reg << 16)
16723 | (num_words & 0x000000ff)
16724 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16725 | (first_reg & 0x00000001) << 22;
16726
16727 return patched_inst;
16728 }
16729
16730 static inline bfd_vma
16731 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16732 int first_reg)
16733 {
16734 /* A8.8.332 VLDM (A8-922)
16735 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16736 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16737 | (base_reg << 16)
16738 | (num_words & 0x000000ff)
16739 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16740 | (first_reg & 0x00000001) << 22;
16741
16742 return patched_inst;
16743 }
16744
16745 static inline bfd_vma
16746 create_instruction_udf_w (int value)
16747 {
16748 /* A8.8.247 UDF (A8-758)
16749 Undefined (Encoding T2). */
16750 bfd_vma patched_inst = 0xf7f0a000
16751 | (value & 0x00000fff)
16752 | (value & 0x000f0000) << 16;
16753
16754 return patched_inst;
16755 }
16756
16757 static inline bfd_vma
16758 create_instruction_udf (int value)
16759 {
16760 /* A8.8.247 UDF (A8-758)
16761 Undefined (Encoding T1). */
16762 bfd_vma patched_inst = 0xde00
16763 | (value & 0xff);
16764
16765 return patched_inst;
16766 }
16767
16768 /* Functions writing an instruction in memory, returning the next
16769 memory position to write to. */
16770
16771 static inline bfd_byte *
16772 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16773 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16774 {
16775 put_thumb2_insn (htab, output_bfd, insn, pt);
16776 return pt + 4;
16777 }
16778
16779 static inline bfd_byte *
16780 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16781 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16782 {
16783 put_thumb_insn (htab, output_bfd, insn, pt);
16784 return pt + 2;
16785 }
16786
16787 /* Function filling up a region in memory with T1 and T2 UDFs taking
16788 care of alignment. */
16789
16790 static bfd_byte *
16791 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16792 bfd * output_bfd,
16793 const bfd_byte * const base_stub_contents,
16794 bfd_byte * const from_stub_contents,
16795 const bfd_byte * const end_stub_contents)
16796 {
16797 bfd_byte *current_stub_contents = from_stub_contents;
16798
16799 /* Fill the remaining of the stub with deterministic contents : UDF
16800 instructions.
16801 Check if realignment is needed on modulo 4 frontier using T1, to
16802 further use T2. */
16803 if ((current_stub_contents < end_stub_contents)
16804 && !((current_stub_contents - base_stub_contents) % 2)
16805 && ((current_stub_contents - base_stub_contents) % 4))
16806 current_stub_contents =
16807 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16808 create_instruction_udf (0));
16809
16810 for (; current_stub_contents < end_stub_contents;)
16811 current_stub_contents =
16812 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16813 create_instruction_udf_w (0));
16814
16815 return current_stub_contents;
16816 }
16817
16818 /* Functions writing the stream of instructions equivalent to the
16819 derived sequence for ldmia, ldmdb, vldm respectively. */
16820
16821 static void
16822 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16823 bfd * output_bfd,
16824 const insn32 initial_insn,
16825 const bfd_byte *const initial_insn_addr,
16826 bfd_byte *const base_stub_contents)
16827 {
16828 int wback = (initial_insn & 0x00200000) >> 21;
16829 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16830 int insn_all_registers = initial_insn & 0x0000ffff;
16831 int insn_low_registers, insn_high_registers;
16832 int usable_register_mask;
16833 int nb_registers = popcount (insn_all_registers);
16834 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16835 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16836 bfd_byte *current_stub_contents = base_stub_contents;
16837
16838 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16839
16840 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16841 smaller than 8 registers load sequences that do not cause the
16842 hardware issue. */
16843 if (nb_registers <= 8)
16844 {
16845 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16846 current_stub_contents =
16847 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16848 initial_insn);
16849
16850 /* B initial_insn_addr+4. */
16851 if (!restore_pc)
16852 current_stub_contents =
16853 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16854 create_instruction_branch_absolute
16855 (initial_insn_addr - current_stub_contents));
16856
16857
16858 /* Fill the remaining of the stub with deterministic contents. */
16859 current_stub_contents =
16860 stm32l4xx_fill_stub_udf (htab, output_bfd,
16861 base_stub_contents, current_stub_contents,
16862 base_stub_contents +
16863 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16864
16865 return;
16866 }
16867
16868 /* - reg_list[13] == 0. */
16869 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16870
16871 /* - reg_list[14] & reg_list[15] != 1. */
16872 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16873
16874 /* - if (wback==1) reg_list[rn] == 0. */
16875 BFD_ASSERT (!wback || !restore_rn);
16876
16877 /* - nb_registers > 8. */
16878 BFD_ASSERT (popcount (insn_all_registers) > 8);
16879
16880 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16881
16882 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16883 - One with the 7 lowest registers (register mask 0x007F)
16884 This LDM will finally contain between 2 and 7 registers
16885 - One with the 7 highest registers (register mask 0xDF80)
16886 This ldm will finally contain between 2 and 7 registers. */
16887 insn_low_registers = insn_all_registers & 0x007F;
16888 insn_high_registers = insn_all_registers & 0xDF80;
16889
16890 /* A spare register may be needed during this veneer to temporarily
16891 handle the base register. This register will be restored with the
16892 last LDM operation.
16893 The usable register may be any general purpose register (that
16894 excludes PC, SP, LR : register mask is 0x1FFF). */
16895 usable_register_mask = 0x1FFF;
16896
16897 /* Generate the stub function. */
16898 if (wback)
16899 {
16900 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16901 current_stub_contents =
16902 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16903 create_instruction_ldmia
16904 (rn, /*wback=*/1, insn_low_registers));
16905
16906 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16907 current_stub_contents =
16908 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16909 create_instruction_ldmia
16910 (rn, /*wback=*/1, insn_high_registers));
16911 if (!restore_pc)
16912 {
16913 /* B initial_insn_addr+4. */
16914 current_stub_contents =
16915 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16916 create_instruction_branch_absolute
16917 (initial_insn_addr - current_stub_contents));
16918 }
16919 }
16920 else /* if (!wback). */
16921 {
16922 ri = rn;
16923
16924 /* If Rn is not part of the high-register-list, move it there. */
16925 if (!(insn_high_registers & (1 << rn)))
16926 {
16927 /* Choose a Ri in the high-register-list that will be restored. */
16928 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16929
16930 /* MOV Ri, Rn. */
16931 current_stub_contents =
16932 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16933 create_instruction_mov (ri, rn));
16934 }
16935
16936 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16937 current_stub_contents =
16938 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16939 create_instruction_ldmia
16940 (ri, /*wback=*/1, insn_low_registers));
16941
16942 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16943 current_stub_contents =
16944 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16945 create_instruction_ldmia
16946 (ri, /*wback=*/0, insn_high_registers));
16947
16948 if (!restore_pc)
16949 {
16950 /* B initial_insn_addr+4. */
16951 current_stub_contents =
16952 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16953 create_instruction_branch_absolute
16954 (initial_insn_addr - current_stub_contents));
16955 }
16956 }
16957
16958 /* Fill the remaining of the stub with deterministic contents. */
16959 current_stub_contents =
16960 stm32l4xx_fill_stub_udf (htab, output_bfd,
16961 base_stub_contents, current_stub_contents,
16962 base_stub_contents +
16963 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16964 }
16965
16966 static void
16967 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16968 bfd * output_bfd,
16969 const insn32 initial_insn,
16970 const bfd_byte *const initial_insn_addr,
16971 bfd_byte *const base_stub_contents)
16972 {
16973 int wback = (initial_insn & 0x00200000) >> 21;
16974 int ri, rn = (initial_insn & 0x000f0000) >> 16;
16975 int insn_all_registers = initial_insn & 0x0000ffff;
16976 int insn_low_registers, insn_high_registers;
16977 int usable_register_mask;
16978 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16979 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16980 int nb_registers = popcount (insn_all_registers);
16981 bfd_byte *current_stub_contents = base_stub_contents;
16982
16983 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
16984
16985 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16986 smaller than 8 registers load sequences that do not cause the
16987 hardware issue. */
16988 if (nb_registers <= 8)
16989 {
16990 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16991 current_stub_contents =
16992 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16993 initial_insn);
16994
16995 /* B initial_insn_addr+4. */
16996 current_stub_contents =
16997 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16998 create_instruction_branch_absolute
16999 (initial_insn_addr - current_stub_contents));
17000
17001 /* Fill the remaining of the stub with deterministic contents. */
17002 current_stub_contents =
17003 stm32l4xx_fill_stub_udf (htab, output_bfd,
17004 base_stub_contents, current_stub_contents,
17005 base_stub_contents +
17006 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17007
17008 return;
17009 }
17010
17011 /* - reg_list[13] == 0. */
17012 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
17013
17014 /* - reg_list[14] & reg_list[15] != 1. */
17015 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17016
17017 /* - if (wback==1) reg_list[rn] == 0. */
17018 BFD_ASSERT (!wback || !restore_rn);
17019
17020 /* - nb_registers > 8. */
17021 BFD_ASSERT (popcount (insn_all_registers) > 8);
17022
17023 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
17024
17025 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17026 - One with the 7 lowest registers (register mask 0x007F)
17027 This LDM will finally contain between 2 and 7 registers
17028 - One with the 7 highest registers (register mask 0xDF80)
17029 This ldm will finally contain between 2 and 7 registers. */
17030 insn_low_registers = insn_all_registers & 0x007F;
17031 insn_high_registers = insn_all_registers & 0xDF80;
17032
17033 /* A spare register may be needed during this veneer to temporarily
17034 handle the base register. This register will be restored with
17035 the last LDM operation.
17036 The usable register may be any general purpose register (that excludes
17037 PC, SP, LR : register mask is 0x1FFF). */
17038 usable_register_mask = 0x1FFF;
17039
17040 /* Generate the stub function. */
17041 if (!wback && !restore_pc && !restore_rn)
17042 {
17043 /* Choose a Ri in the low-register-list that will be restored. */
17044 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17045
17046 /* MOV Ri, Rn. */
17047 current_stub_contents =
17048 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17049 create_instruction_mov (ri, rn));
17050
17051 /* LDMDB Ri!, {R-high-register-list}. */
17052 current_stub_contents =
17053 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17054 create_instruction_ldmdb
17055 (ri, /*wback=*/1, insn_high_registers));
17056
17057 /* LDMDB Ri, {R-low-register-list}. */
17058 current_stub_contents =
17059 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17060 create_instruction_ldmdb
17061 (ri, /*wback=*/0, insn_low_registers));
17062
17063 /* B initial_insn_addr+4. */
17064 current_stub_contents =
17065 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17066 create_instruction_branch_absolute
17067 (initial_insn_addr - current_stub_contents));
17068 }
17069 else if (wback && !restore_pc && !restore_rn)
17070 {
17071 /* LDMDB Rn!, {R-high-register-list}. */
17072 current_stub_contents =
17073 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17074 create_instruction_ldmdb
17075 (rn, /*wback=*/1, insn_high_registers));
17076
17077 /* LDMDB Rn!, {R-low-register-list}. */
17078 current_stub_contents =
17079 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17080 create_instruction_ldmdb
17081 (rn, /*wback=*/1, insn_low_registers));
17082
17083 /* B initial_insn_addr+4. */
17084 current_stub_contents =
17085 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17086 create_instruction_branch_absolute
17087 (initial_insn_addr - current_stub_contents));
17088 }
17089 else if (!wback && restore_pc && !restore_rn)
17090 {
17091 /* Choose a Ri in the high-register-list that will be restored. */
17092 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17093
17094 /* SUB Ri, Rn, #(4*nb_registers). */
17095 current_stub_contents =
17096 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17097 create_instruction_sub (ri, rn, (4 * nb_registers)));
17098
17099 /* LDMIA Ri!, {R-low-register-list}. */
17100 current_stub_contents =
17101 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17102 create_instruction_ldmia
17103 (ri, /*wback=*/1, insn_low_registers));
17104
17105 /* LDMIA Ri, {R-high-register-list}. */
17106 current_stub_contents =
17107 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17108 create_instruction_ldmia
17109 (ri, /*wback=*/0, insn_high_registers));
17110 }
17111 else if (wback && restore_pc && !restore_rn)
17112 {
17113 /* Choose a Ri in the high-register-list that will be restored. */
17114 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17115
17116 /* SUB Rn, Rn, #(4*nb_registers) */
17117 current_stub_contents =
17118 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17119 create_instruction_sub (rn, rn, (4 * nb_registers)));
17120
17121 /* MOV Ri, Rn. */
17122 current_stub_contents =
17123 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17124 create_instruction_mov (ri, rn));
17125
17126 /* LDMIA Ri!, {R-low-register-list}. */
17127 current_stub_contents =
17128 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17129 create_instruction_ldmia
17130 (ri, /*wback=*/1, insn_low_registers));
17131
17132 /* LDMIA Ri, {R-high-register-list}. */
17133 current_stub_contents =
17134 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17135 create_instruction_ldmia
17136 (ri, /*wback=*/0, insn_high_registers));
17137 }
17138 else if (!wback && !restore_pc && restore_rn)
17139 {
17140 ri = rn;
17141 if (!(insn_low_registers & (1 << rn)))
17142 {
17143 /* Choose a Ri in the low-register-list that will be restored. */
17144 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17145
17146 /* MOV Ri, Rn. */
17147 current_stub_contents =
17148 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17149 create_instruction_mov (ri, rn));
17150 }
17151
17152 /* LDMDB Ri!, {R-high-register-list}. */
17153 current_stub_contents =
17154 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17155 create_instruction_ldmdb
17156 (ri, /*wback=*/1, insn_high_registers));
17157
17158 /* LDMDB Ri, {R-low-register-list}. */
17159 current_stub_contents =
17160 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17161 create_instruction_ldmdb
17162 (ri, /*wback=*/0, insn_low_registers));
17163
17164 /* B initial_insn_addr+4. */
17165 current_stub_contents =
17166 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17167 create_instruction_branch_absolute
17168 (initial_insn_addr - current_stub_contents));
17169 }
17170 else if (!wback && restore_pc && restore_rn)
17171 {
17172 ri = rn;
17173 if (!(insn_high_registers & (1 << rn)))
17174 {
17175 /* Choose a Ri in the high-register-list that will be restored. */
17176 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17177 }
17178
17179 /* SUB Ri, Rn, #(4*nb_registers). */
17180 current_stub_contents =
17181 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17182 create_instruction_sub (ri, rn, (4 * nb_registers)));
17183
17184 /* LDMIA Ri!, {R-low-register-list}. */
17185 current_stub_contents =
17186 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17187 create_instruction_ldmia
17188 (ri, /*wback=*/1, insn_low_registers));
17189
17190 /* LDMIA Ri, {R-high-register-list}. */
17191 current_stub_contents =
17192 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17193 create_instruction_ldmia
17194 (ri, /*wback=*/0, insn_high_registers));
17195 }
17196 else if (wback && restore_rn)
17197 {
17198 /* The assembler should not have accepted to encode this. */
17199 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17200 "undefined behavior.\n");
17201 }
17202
17203 /* Fill the remaining of the stub with deterministic contents. */
17204 current_stub_contents =
17205 stm32l4xx_fill_stub_udf (htab, output_bfd,
17206 base_stub_contents, current_stub_contents,
17207 base_stub_contents +
17208 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17209
17210 }
17211
17212 static void
17213 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17214 bfd * output_bfd,
17215 const insn32 initial_insn,
17216 const bfd_byte *const initial_insn_addr,
17217 bfd_byte *const base_stub_contents)
17218 {
17219 int num_words = ((unsigned int) initial_insn << 24) >> 24;
17220 bfd_byte *current_stub_contents = base_stub_contents;
17221
17222 BFD_ASSERT (is_thumb2_vldm (initial_insn));
17223
17224 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17225 smaller than 8 words load sequences that do not cause the
17226 hardware issue. */
17227 if (num_words <= 8)
17228 {
17229 /* Untouched instruction. */
17230 current_stub_contents =
17231 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17232 initial_insn);
17233
17234 /* B initial_insn_addr+4. */
17235 current_stub_contents =
17236 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17237 create_instruction_branch_absolute
17238 (initial_insn_addr - current_stub_contents));
17239 }
17240 else
17241 {
17242 bfd_boolean is_dp = /* DP encoding. */
17243 (initial_insn & 0xfe100f00) == 0xec100b00;
17244 bfd_boolean is_ia_nobang = /* (IA without !). */
17245 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
17246 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
17247 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
17248 bfd_boolean is_db_bang = /* (DB with !). */
17249 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
17250 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17251 /* d = UInt (Vd:D);. */
17252 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17253 | (((unsigned int)initial_insn << 9) >> 31);
17254
17255 /* Compute the number of 8-words chunks needed to split. */
17256 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17257 int chunk;
17258
17259 /* The test coverage has been done assuming the following
17260 hypothesis that exactly one of the previous is_ predicates is
17261 true. */
17262 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17263 && !(is_ia_nobang & is_ia_bang & is_db_bang));
17264
17265 /* We treat the cutting of the words in one pass for all
17266 cases, then we emit the adjustments:
17267
17268 vldm rx, {...}
17269 -> vldm rx!, {8_words_or_less} for each needed 8_word
17270 -> sub rx, rx, #size (list)
17271
17272 vldm rx!, {...}
17273 -> vldm rx!, {8_words_or_less} for each needed 8_word
17274 This also handles vpop instruction (when rx is sp)
17275
17276 vldmd rx!, {...}
17277 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17278 for (chunk = 0; chunk < chunks; ++chunk)
17279 {
17280 bfd_vma new_insn = 0;
17281
17282 if (is_ia_nobang || is_ia_bang)
17283 {
17284 new_insn = create_instruction_vldmia
17285 (base_reg,
17286 is_dp,
17287 /*wback= . */1,
17288 chunks - (chunk + 1) ?
17289 8 : num_words - chunk * 8,
17290 first_reg + chunk * 8);
17291 }
17292 else if (is_db_bang)
17293 {
17294 new_insn = create_instruction_vldmdb
17295 (base_reg,
17296 is_dp,
17297 chunks - (chunk + 1) ?
17298 8 : num_words - chunk * 8,
17299 first_reg + chunk * 8);
17300 }
17301
17302 if (new_insn)
17303 current_stub_contents =
17304 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17305 new_insn);
17306 }
17307
17308 /* Only this case requires the base register compensation
17309 subtract. */
17310 if (is_ia_nobang)
17311 {
17312 current_stub_contents =
17313 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17314 create_instruction_sub
17315 (base_reg, base_reg, 4*num_words));
17316 }
17317
17318 /* B initial_insn_addr+4. */
17319 current_stub_contents =
17320 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17321 create_instruction_branch_absolute
17322 (initial_insn_addr - current_stub_contents));
17323 }
17324
17325 /* Fill the remaining of the stub with deterministic contents. */
17326 current_stub_contents =
17327 stm32l4xx_fill_stub_udf (htab, output_bfd,
17328 base_stub_contents, current_stub_contents,
17329 base_stub_contents +
17330 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17331 }
17332
17333 static void
17334 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17335 bfd * output_bfd,
17336 const insn32 wrong_insn,
17337 const bfd_byte *const wrong_insn_addr,
17338 bfd_byte *const stub_contents)
17339 {
17340 if (is_thumb2_ldmia (wrong_insn))
17341 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17342 wrong_insn, wrong_insn_addr,
17343 stub_contents);
17344 else if (is_thumb2_ldmdb (wrong_insn))
17345 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17346 wrong_insn, wrong_insn_addr,
17347 stub_contents);
17348 else if (is_thumb2_vldm (wrong_insn))
17349 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17350 wrong_insn, wrong_insn_addr,
17351 stub_contents);
17352 }
17353
17354 /* End of stm32l4xx work-around. */
17355
17356
17357 static void
17358 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17359 asection *output_sec, Elf_Internal_Rela *rel)
17360 {
17361 BFD_ASSERT (output_sec && rel);
17362 struct bfd_elf_section_reloc_data *output_reldata;
17363 struct elf32_arm_link_hash_table *htab;
17364 struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17365 Elf_Internal_Shdr *rel_hdr;
17366
17367
17368 if (oesd->rel.hdr)
17369 {
17370 rel_hdr = oesd->rel.hdr;
17371 output_reldata = &(oesd->rel);
17372 }
17373 else if (oesd->rela.hdr)
17374 {
17375 rel_hdr = oesd->rela.hdr;
17376 output_reldata = &(oesd->rela);
17377 }
17378 else
17379 {
17380 abort ();
17381 }
17382
17383 bfd_byte *erel = rel_hdr->contents;
17384 erel += output_reldata->count * rel_hdr->sh_entsize;
17385 htab = elf32_arm_hash_table (info);
17386 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17387 output_reldata->count++;
17388 }
17389
17390 /* Do code byteswapping. Return FALSE afterwards so that the section is
17391 written out as normal. */
17392
17393 static bfd_boolean
17394 elf32_arm_write_section (bfd *output_bfd,
17395 struct bfd_link_info *link_info,
17396 asection *sec,
17397 bfd_byte *contents)
17398 {
17399 unsigned int mapcount, errcount;
17400 _arm_elf_section_data *arm_data;
17401 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17402 elf32_arm_section_map *map;
17403 elf32_vfp11_erratum_list *errnode;
17404 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17405 bfd_vma ptr;
17406 bfd_vma end;
17407 bfd_vma offset = sec->output_section->vma + sec->output_offset;
17408 bfd_byte tmp;
17409 unsigned int i;
17410
17411 if (globals == NULL)
17412 return FALSE;
17413
17414 /* If this section has not been allocated an _arm_elf_section_data
17415 structure then we cannot record anything. */
17416 arm_data = get_arm_elf_section_data (sec);
17417 if (arm_data == NULL)
17418 return FALSE;
17419
17420 mapcount = arm_data->mapcount;
17421 map = arm_data->map;
17422 errcount = arm_data->erratumcount;
17423
17424 if (errcount != 0)
17425 {
17426 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17427
17428 for (errnode = arm_data->erratumlist; errnode != 0;
17429 errnode = errnode->next)
17430 {
17431 bfd_vma target = errnode->vma - offset;
17432
17433 switch (errnode->type)
17434 {
17435 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17436 {
17437 bfd_vma branch_to_veneer;
17438 /* Original condition code of instruction, plus bit mask for
17439 ARM B instruction. */
17440 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17441 | 0x0a000000;
17442
17443 /* The instruction is before the label. */
17444 target -= 4;
17445
17446 /* Above offset included in -4 below. */
17447 branch_to_veneer = errnode->u.b.veneer->vma
17448 - errnode->vma - 4;
17449
17450 if ((signed) branch_to_veneer < -(1 << 25)
17451 || (signed) branch_to_veneer >= (1 << 25))
17452 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17453 "range"), output_bfd);
17454
17455 insn |= (branch_to_veneer >> 2) & 0xffffff;
17456 contents[endianflip ^ target] = insn & 0xff;
17457 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17458 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17459 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17460 }
17461 break;
17462
17463 case VFP11_ERRATUM_ARM_VENEER:
17464 {
17465 bfd_vma branch_from_veneer;
17466 unsigned int insn;
17467
17468 /* Take size of veneer into account. */
17469 branch_from_veneer = errnode->u.v.branch->vma
17470 - errnode->vma - 12;
17471
17472 if ((signed) branch_from_veneer < -(1 << 25)
17473 || (signed) branch_from_veneer >= (1 << 25))
17474 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17475 "range"), output_bfd);
17476
17477 /* Original instruction. */
17478 insn = errnode->u.v.branch->u.b.vfp_insn;
17479 contents[endianflip ^ target] = insn & 0xff;
17480 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17481 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17482 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17483
17484 /* Branch back to insn after original insn. */
17485 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17486 contents[endianflip ^ (target + 4)] = insn & 0xff;
17487 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17488 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17489 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17490 }
17491 break;
17492
17493 default:
17494 abort ();
17495 }
17496 }
17497 }
17498
17499 if (arm_data->stm32l4xx_erratumcount != 0)
17500 {
17501 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17502 stm32l4xx_errnode != 0;
17503 stm32l4xx_errnode = stm32l4xx_errnode->next)
17504 {
17505 bfd_vma target = stm32l4xx_errnode->vma - offset;
17506
17507 switch (stm32l4xx_errnode->type)
17508 {
17509 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17510 {
17511 unsigned int insn;
17512 bfd_vma branch_to_veneer =
17513 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17514
17515 if ((signed) branch_to_veneer < -(1 << 24)
17516 || (signed) branch_to_veneer >= (1 << 24))
17517 {
17518 bfd_vma out_of_range =
17519 ((signed) branch_to_veneer < -(1 << 24)) ?
17520 - branch_to_veneer - (1 << 24) :
17521 ((signed) branch_to_veneer >= (1 << 24)) ?
17522 branch_to_veneer - (1 << 24) : 0;
17523
17524 (*_bfd_error_handler)
17525 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17526 "Jump out of range by %ld bytes. "
17527 "Cannot encode branch instruction. "),
17528 output_bfd,
17529 (long) (stm32l4xx_errnode->vma - 4),
17530 out_of_range);
17531 continue;
17532 }
17533
17534 insn = create_instruction_branch_absolute
17535 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17536
17537 /* The instruction is before the label. */
17538 target -= 4;
17539
17540 put_thumb2_insn (globals, output_bfd,
17541 (bfd_vma) insn, contents + target);
17542 }
17543 break;
17544
17545 case STM32L4XX_ERRATUM_VENEER:
17546 {
17547 bfd_byte * veneer;
17548 bfd_byte * veneer_r;
17549 unsigned int insn;
17550
17551 veneer = contents + target;
17552 veneer_r = veneer
17553 + stm32l4xx_errnode->u.b.veneer->vma
17554 - stm32l4xx_errnode->vma - 4;
17555
17556 if ((signed) (veneer_r - veneer -
17557 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17558 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17559 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17560 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17561 || (signed) (veneer_r - veneer) >= (1 << 24))
17562 {
17563 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17564 "veneer."), output_bfd);
17565 continue;
17566 }
17567
17568 /* Original instruction. */
17569 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17570
17571 stm32l4xx_create_replacing_stub
17572 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17573 }
17574 break;
17575
17576 default:
17577 abort ();
17578 }
17579 }
17580 }
17581
17582 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17583 {
17584 arm_unwind_table_edit *edit_node
17585 = arm_data->u.exidx.unwind_edit_list;
17586 /* Now, sec->size is the size of the section we will write. The original
17587 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17588 markers) was sec->rawsize. (This isn't the case if we perform no
17589 edits, then rawsize will be zero and we should use size). */
17590 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17591 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17592 unsigned int in_index, out_index;
17593 bfd_vma add_to_offsets = 0;
17594
17595 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17596 {
17597 if (edit_node)
17598 {
17599 unsigned int edit_index = edit_node->index;
17600
17601 if (in_index < edit_index && in_index * 8 < input_size)
17602 {
17603 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17604 contents + in_index * 8, add_to_offsets);
17605 out_index++;
17606 in_index++;
17607 }
17608 else if (in_index == edit_index
17609 || (in_index * 8 >= input_size
17610 && edit_index == UINT_MAX))
17611 {
17612 switch (edit_node->type)
17613 {
17614 case DELETE_EXIDX_ENTRY:
17615 in_index++;
17616 add_to_offsets += 8;
17617 break;
17618
17619 case INSERT_EXIDX_CANTUNWIND_AT_END:
17620 {
17621 asection *text_sec = edit_node->linked_section;
17622 bfd_vma text_offset = text_sec->output_section->vma
17623 + text_sec->output_offset
17624 + text_sec->size;
17625 bfd_vma exidx_offset = offset + out_index * 8;
17626 unsigned long prel31_offset;
17627
17628 /* Note: this is meant to be equivalent to an
17629 R_ARM_PREL31 relocation. These synthetic
17630 EXIDX_CANTUNWIND markers are not relocated by the
17631 usual BFD method. */
17632 prel31_offset = (text_offset - exidx_offset)
17633 & 0x7ffffffful;
17634 if (bfd_link_relocatable (link_info))
17635 {
17636 /* Here relocation for new EXIDX_CANTUNWIND is
17637 created, so there is no need to
17638 adjust offset by hand. */
17639 prel31_offset = text_sec->output_offset
17640 + text_sec->size;
17641
17642 /* New relocation entity. */
17643 asection *text_out = text_sec->output_section;
17644 Elf_Internal_Rela rel;
17645 rel.r_addend = 0;
17646 rel.r_offset = exidx_offset;
17647 rel.r_info = ELF32_R_INFO (text_out->target_index,
17648 R_ARM_PREL31);
17649
17650 elf32_arm_add_relocation (output_bfd, link_info,
17651 sec->output_section,
17652 &rel);
17653 }
17654
17655 /* First address we can't unwind. */
17656 bfd_put_32 (output_bfd, prel31_offset,
17657 &edited_contents[out_index * 8]);
17658
17659 /* Code for EXIDX_CANTUNWIND. */
17660 bfd_put_32 (output_bfd, 0x1,
17661 &edited_contents[out_index * 8 + 4]);
17662
17663 out_index++;
17664 add_to_offsets -= 8;
17665 }
17666 break;
17667 }
17668
17669 edit_node = edit_node->next;
17670 }
17671 }
17672 else
17673 {
17674 /* No more edits, copy remaining entries verbatim. */
17675 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17676 contents + in_index * 8, add_to_offsets);
17677 out_index++;
17678 in_index++;
17679 }
17680 }
17681
17682 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17683 bfd_set_section_contents (output_bfd, sec->output_section,
17684 edited_contents,
17685 (file_ptr) sec->output_offset, sec->size);
17686
17687 return TRUE;
17688 }
17689
17690 /* Fix code to point to Cortex-A8 erratum stubs. */
17691 if (globals->fix_cortex_a8)
17692 {
17693 struct a8_branch_to_stub_data data;
17694
17695 data.writing_section = sec;
17696 data.contents = contents;
17697
17698 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17699 & data);
17700 }
17701
17702 if (mapcount == 0)
17703 return FALSE;
17704
17705 if (globals->byteswap_code)
17706 {
17707 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17708
17709 ptr = map[0].vma;
17710 for (i = 0; i < mapcount; i++)
17711 {
17712 if (i == mapcount - 1)
17713 end = sec->size;
17714 else
17715 end = map[i + 1].vma;
17716
17717 switch (map[i].type)
17718 {
17719 case 'a':
17720 /* Byte swap code words. */
17721 while (ptr + 3 < end)
17722 {
17723 tmp = contents[ptr];
17724 contents[ptr] = contents[ptr + 3];
17725 contents[ptr + 3] = tmp;
17726 tmp = contents[ptr + 1];
17727 contents[ptr + 1] = contents[ptr + 2];
17728 contents[ptr + 2] = tmp;
17729 ptr += 4;
17730 }
17731 break;
17732
17733 case 't':
17734 /* Byte swap code halfwords. */
17735 while (ptr + 1 < end)
17736 {
17737 tmp = contents[ptr];
17738 contents[ptr] = contents[ptr + 1];
17739 contents[ptr + 1] = tmp;
17740 ptr += 2;
17741 }
17742 break;
17743
17744 case 'd':
17745 /* Leave data alone. */
17746 break;
17747 }
17748 ptr = end;
17749 }
17750 }
17751
17752 free (map);
17753 arm_data->mapcount = -1;
17754 arm_data->mapsize = 0;
17755 arm_data->map = NULL;
17756
17757 return FALSE;
17758 }
17759
17760 /* Mangle thumb function symbols as we read them in. */
17761
17762 static bfd_boolean
17763 elf32_arm_swap_symbol_in (bfd * abfd,
17764 const void *psrc,
17765 const void *pshn,
17766 Elf_Internal_Sym *dst)
17767 {
17768 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17769 return FALSE;
17770 dst->st_target_internal = 0;
17771
17772 /* New EABI objects mark thumb function symbols by setting the low bit of
17773 the address. */
17774 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17775 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17776 {
17777 if (dst->st_value & 1)
17778 {
17779 dst->st_value &= ~(bfd_vma) 1;
17780 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17781 ST_BRANCH_TO_THUMB);
17782 }
17783 else
17784 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17785 }
17786 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17787 {
17788 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17789 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17790 }
17791 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17792 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17793 else
17794 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17795
17796 return TRUE;
17797 }
17798
17799
17800 /* Mangle thumb function symbols as we write them out. */
17801
17802 static void
17803 elf32_arm_swap_symbol_out (bfd *abfd,
17804 const Elf_Internal_Sym *src,
17805 void *cdst,
17806 void *shndx)
17807 {
17808 Elf_Internal_Sym newsym;
17809
17810 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17811 of the address set, as per the new EABI. We do this unconditionally
17812 because objcopy does not set the elf header flags until after
17813 it writes out the symbol table. */
17814 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17815 {
17816 newsym = *src;
17817 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17818 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17819 if (newsym.st_shndx != SHN_UNDEF)
17820 {
17821 /* Do this only for defined symbols. At link type, the static
17822 linker will simulate the work of dynamic linker of resolving
17823 symbols and will carry over the thumbness of found symbols to
17824 the output symbol table. It's not clear how it happens, but
17825 the thumbness of undefined symbols can well be different at
17826 runtime, and writing '1' for them will be confusing for users
17827 and possibly for dynamic linker itself.
17828 */
17829 newsym.st_value |= 1;
17830 }
17831
17832 src = &newsym;
17833 }
17834 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17835 }
17836
17837 /* Add the PT_ARM_EXIDX program header. */
17838
17839 static bfd_boolean
17840 elf32_arm_modify_segment_map (bfd *abfd,
17841 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17842 {
17843 struct elf_segment_map *m;
17844 asection *sec;
17845
17846 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17847 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17848 {
17849 /* If there is already a PT_ARM_EXIDX header, then we do not
17850 want to add another one. This situation arises when running
17851 "strip"; the input binary already has the header. */
17852 m = elf_seg_map (abfd);
17853 while (m && m->p_type != PT_ARM_EXIDX)
17854 m = m->next;
17855 if (!m)
17856 {
17857 m = (struct elf_segment_map *)
17858 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17859 if (m == NULL)
17860 return FALSE;
17861 m->p_type = PT_ARM_EXIDX;
17862 m->count = 1;
17863 m->sections[0] = sec;
17864
17865 m->next = elf_seg_map (abfd);
17866 elf_seg_map (abfd) = m;
17867 }
17868 }
17869
17870 return TRUE;
17871 }
17872
17873 /* We may add a PT_ARM_EXIDX program header. */
17874
17875 static int
17876 elf32_arm_additional_program_headers (bfd *abfd,
17877 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17878 {
17879 asection *sec;
17880
17881 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17882 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17883 return 1;
17884 else
17885 return 0;
17886 }
17887
17888 /* Hook called by the linker routine which adds symbols from an object
17889 file. */
17890
17891 static bfd_boolean
17892 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17893 Elf_Internal_Sym *sym, const char **namep,
17894 flagword *flagsp, asection **secp, bfd_vma *valp)
17895 {
17896 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17897 && (abfd->flags & DYNAMIC) == 0
17898 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17899 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17900
17901 if (elf32_arm_hash_table (info) == NULL)
17902 return FALSE;
17903
17904 if (elf32_arm_hash_table (info)->vxworks_p
17905 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17906 flagsp, secp, valp))
17907 return FALSE;
17908
17909 return TRUE;
17910 }
17911
17912 /* We use this to override swap_symbol_in and swap_symbol_out. */
17913 const struct elf_size_info elf32_arm_size_info =
17914 {
17915 sizeof (Elf32_External_Ehdr),
17916 sizeof (Elf32_External_Phdr),
17917 sizeof (Elf32_External_Shdr),
17918 sizeof (Elf32_External_Rel),
17919 sizeof (Elf32_External_Rela),
17920 sizeof (Elf32_External_Sym),
17921 sizeof (Elf32_External_Dyn),
17922 sizeof (Elf_External_Note),
17923 4,
17924 1,
17925 32, 2,
17926 ELFCLASS32, EV_CURRENT,
17927 bfd_elf32_write_out_phdrs,
17928 bfd_elf32_write_shdrs_and_ehdr,
17929 bfd_elf32_checksum_contents,
17930 bfd_elf32_write_relocs,
17931 elf32_arm_swap_symbol_in,
17932 elf32_arm_swap_symbol_out,
17933 bfd_elf32_slurp_reloc_table,
17934 bfd_elf32_slurp_symbol_table,
17935 bfd_elf32_swap_dyn_in,
17936 bfd_elf32_swap_dyn_out,
17937 bfd_elf32_swap_reloc_in,
17938 bfd_elf32_swap_reloc_out,
17939 bfd_elf32_swap_reloca_in,
17940 bfd_elf32_swap_reloca_out
17941 };
17942
17943 static bfd_vma
17944 read_code32 (const bfd *abfd, const bfd_byte *addr)
17945 {
17946 /* V7 BE8 code is always little endian. */
17947 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17948 return bfd_getl32 (addr);
17949
17950 return bfd_get_32 (abfd, addr);
17951 }
17952
17953 static bfd_vma
17954 read_code16 (const bfd *abfd, const bfd_byte *addr)
17955 {
17956 /* V7 BE8 code is always little endian. */
17957 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17958 return bfd_getl16 (addr);
17959
17960 return bfd_get_16 (abfd, addr);
17961 }
17962
17963 /* Return size of plt0 entry starting at ADDR
17964 or (bfd_vma) -1 if size can not be determined. */
17965
17966 static bfd_vma
17967 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17968 {
17969 bfd_vma first_word;
17970 bfd_vma plt0_size;
17971
17972 first_word = read_code32 (abfd, addr);
17973
17974 if (first_word == elf32_arm_plt0_entry[0])
17975 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
17976 else if (first_word == elf32_thumb2_plt0_entry[0])
17977 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
17978 else
17979 /* We don't yet handle this PLT format. */
17980 return (bfd_vma) -1;
17981
17982 return plt0_size;
17983 }
17984
17985 /* Return size of plt entry starting at offset OFFSET
17986 of plt section located at address START
17987 or (bfd_vma) -1 if size can not be determined. */
17988
17989 static bfd_vma
17990 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
17991 {
17992 bfd_vma first_insn;
17993 bfd_vma plt_size = 0;
17994 const bfd_byte *addr = start + offset;
17995
17996 /* PLT entry size if fixed on Thumb-only platforms. */
17997 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
17998 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
17999
18000 /* Respect Thumb stub if necessary. */
18001 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
18002 {
18003 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
18004 }
18005
18006 /* Strip immediate from first add. */
18007 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
18008
18009 #ifdef FOUR_WORD_PLT
18010 if (first_insn == elf32_arm_plt_entry[0])
18011 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
18012 #else
18013 if (first_insn == elf32_arm_plt_entry_long[0])
18014 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
18015 else if (first_insn == elf32_arm_plt_entry_short[0])
18016 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
18017 #endif
18018 else
18019 /* We don't yet handle this PLT format. */
18020 return (bfd_vma) -1;
18021
18022 return plt_size;
18023 }
18024
18025 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
18026
18027 static long
18028 elf32_arm_get_synthetic_symtab (bfd *abfd,
18029 long symcount ATTRIBUTE_UNUSED,
18030 asymbol **syms ATTRIBUTE_UNUSED,
18031 long dynsymcount,
18032 asymbol **dynsyms,
18033 asymbol **ret)
18034 {
18035 asection *relplt;
18036 asymbol *s;
18037 arelent *p;
18038 long count, i, n;
18039 size_t size;
18040 Elf_Internal_Shdr *hdr;
18041 char *names;
18042 asection *plt;
18043 bfd_vma offset;
18044 bfd_byte *data;
18045
18046 *ret = NULL;
18047
18048 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18049 return 0;
18050
18051 if (dynsymcount <= 0)
18052 return 0;
18053
18054 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18055 if (relplt == NULL)
18056 return 0;
18057
18058 hdr = &elf_section_data (relplt)->this_hdr;
18059 if (hdr->sh_link != elf_dynsymtab (abfd)
18060 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
18061 return 0;
18062
18063 plt = bfd_get_section_by_name (abfd, ".plt");
18064 if (plt == NULL)
18065 return 0;
18066
18067 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
18068 return -1;
18069
18070 data = plt->contents;
18071 if (data == NULL)
18072 {
18073 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
18074 return -1;
18075 bfd_cache_section_contents((asection *) plt, data);
18076 }
18077
18078 count = relplt->size / hdr->sh_entsize;
18079 size = count * sizeof (asymbol);
18080 p = relplt->relocation;
18081 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18082 {
18083 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
18084 if (p->addend != 0)
18085 size += sizeof ("+0x") - 1 + 8;
18086 }
18087
18088 s = *ret = (asymbol *) bfd_malloc (size);
18089 if (s == NULL)
18090 return -1;
18091
18092 offset = elf32_arm_plt0_size (abfd, data);
18093 if (offset == (bfd_vma) -1)
18094 return -1;
18095
18096 names = (char *) (s + count);
18097 p = relplt->relocation;
18098 n = 0;
18099 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18100 {
18101 size_t len;
18102
18103 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
18104 if (plt_size == (bfd_vma) -1)
18105 break;
18106
18107 *s = **p->sym_ptr_ptr;
18108 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
18109 we are defining a symbol, ensure one of them is set. */
18110 if ((s->flags & BSF_LOCAL) == 0)
18111 s->flags |= BSF_GLOBAL;
18112 s->flags |= BSF_SYNTHETIC;
18113 s->section = plt;
18114 s->value = offset;
18115 s->name = names;
18116 s->udata.p = NULL;
18117 len = strlen ((*p->sym_ptr_ptr)->name);
18118 memcpy (names, (*p->sym_ptr_ptr)->name, len);
18119 names += len;
18120 if (p->addend != 0)
18121 {
18122 char buf[30], *a;
18123
18124 memcpy (names, "+0x", sizeof ("+0x") - 1);
18125 names += sizeof ("+0x") - 1;
18126 bfd_sprintf_vma (abfd, buf, p->addend);
18127 for (a = buf; *a == '0'; ++a)
18128 ;
18129 len = strlen (a);
18130 memcpy (names, a, len);
18131 names += len;
18132 }
18133 memcpy (names, "@plt", sizeof ("@plt"));
18134 names += sizeof ("@plt");
18135 ++s, ++n;
18136 offset += plt_size;
18137 }
18138
18139 return n;
18140 }
18141
18142 static bfd_boolean
18143 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
18144 {
18145 if (hdr->sh_flags & SHF_ARM_NOREAD)
18146 *flags |= SEC_ELF_NOREAD;
18147 return TRUE;
18148 }
18149
18150 static flagword
18151 elf32_arm_lookup_section_flags (char *flag_name)
18152 {
18153 if (!strcmp (flag_name, "SHF_ARM_NOREAD"))
18154 return SHF_ARM_NOREAD;
18155
18156 return SEC_NO_FLAGS;
18157 }
18158
18159 static unsigned int
18160 elf32_arm_count_additional_relocs (asection *sec)
18161 {
18162 struct _arm_elf_section_data *arm_data;
18163 arm_data = get_arm_elf_section_data (sec);
18164 return arm_data->additional_reloc_count;
18165 }
18166
18167 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18168 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18169 FALSE otherwise. ISECTION is the best guess matching section from the
18170 input bfd IBFD, but it might be NULL. */
18171
18172 static bfd_boolean
18173 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18174 bfd *obfd ATTRIBUTE_UNUSED,
18175 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18176 Elf_Internal_Shdr *osection)
18177 {
18178 switch (osection->sh_type)
18179 {
18180 case SHT_ARM_EXIDX:
18181 {
18182 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18183 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18184 unsigned i = 0;
18185
18186 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18187 osection->sh_info = 0;
18188
18189 /* The sh_link field must be set to the text section associated with
18190 this index section. Unfortunately the ARM EHABI does not specify
18191 exactly how to determine this association. Our caller does try
18192 to match up OSECTION with its corresponding input section however
18193 so that is a good first guess. */
18194 if (isection != NULL
18195 && osection->bfd_section != NULL
18196 && isection->bfd_section != NULL
18197 && isection->bfd_section->output_section != NULL
18198 && isection->bfd_section->output_section == osection->bfd_section
18199 && iheaders != NULL
18200 && isection->sh_link > 0
18201 && isection->sh_link < elf_numsections (ibfd)
18202 && iheaders[isection->sh_link]->bfd_section != NULL
18203 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18204 )
18205 {
18206 for (i = elf_numsections (obfd); i-- > 0;)
18207 if (oheaders[i]->bfd_section
18208 == iheaders[isection->sh_link]->bfd_section->output_section)
18209 break;
18210 }
18211
18212 if (i == 0)
18213 {
18214 /* Failing that we have to find a matching section ourselves. If
18215 we had the output section name available we could compare that
18216 with input section names. Unfortunately we don't. So instead
18217 we use a simple heuristic and look for the nearest executable
18218 section before this one. */
18219 for (i = elf_numsections (obfd); i-- > 0;)
18220 if (oheaders[i] == osection)
18221 break;
18222 if (i == 0)
18223 break;
18224
18225 while (i-- > 0)
18226 if (oheaders[i]->sh_type == SHT_PROGBITS
18227 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18228 == (SHF_ALLOC | SHF_EXECINSTR))
18229 break;
18230 }
18231
18232 if (i)
18233 {
18234 osection->sh_link = i;
18235 /* If the text section was part of a group
18236 then the index section should be too. */
18237 if (oheaders[i]->sh_flags & SHF_GROUP)
18238 osection->sh_flags |= SHF_GROUP;
18239 return TRUE;
18240 }
18241 }
18242 break;
18243
18244 case SHT_ARM_PREEMPTMAP:
18245 osection->sh_flags = SHF_ALLOC;
18246 break;
18247
18248 case SHT_ARM_ATTRIBUTES:
18249 case SHT_ARM_DEBUGOVERLAY:
18250 case SHT_ARM_OVERLAYSECTION:
18251 default:
18252 break;
18253 }
18254
18255 return FALSE;
18256 }
18257
18258 #undef elf_backend_copy_special_section_fields
18259 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18260
18261 #define ELF_ARCH bfd_arch_arm
18262 #define ELF_TARGET_ID ARM_ELF_DATA
18263 #define ELF_MACHINE_CODE EM_ARM
18264 #ifdef __QNXTARGET__
18265 #define ELF_MAXPAGESIZE 0x1000
18266 #else
18267 #define ELF_MAXPAGESIZE 0x10000
18268 #endif
18269 #define ELF_MINPAGESIZE 0x1000
18270 #define ELF_COMMONPAGESIZE 0x1000
18271
18272 #define bfd_elf32_mkobject elf32_arm_mkobject
18273
18274 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18275 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18276 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18277 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18278 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18279 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18280 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18281 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18282 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18283 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18284 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18285 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18286 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18287
18288 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18289 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18290 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18291 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18292 #define elf_backend_check_relocs elf32_arm_check_relocs
18293 #define elf_backend_relocate_section elf32_arm_relocate_section
18294 #define elf_backend_write_section elf32_arm_write_section
18295 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18296 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18297 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18298 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18299 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18300 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18301 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18302 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18303 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18304 #define elf_backend_object_p elf32_arm_object_p
18305 #define elf_backend_fake_sections elf32_arm_fake_sections
18306 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18307 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18308 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18309 #define elf_backend_size_info elf32_arm_size_info
18310 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18311 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18312 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18313 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18314 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18315 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18316
18317 #define elf_backend_can_refcount 1
18318 #define elf_backend_can_gc_sections 1
18319 #define elf_backend_plt_readonly 1
18320 #define elf_backend_want_got_plt 1
18321 #define elf_backend_want_plt_sym 0
18322 #define elf_backend_may_use_rel_p 1
18323 #define elf_backend_may_use_rela_p 0
18324 #define elf_backend_default_use_rela_p 0
18325
18326 #define elf_backend_got_header_size 12
18327 #define elf_backend_extern_protected_data 1
18328
18329 #undef elf_backend_obj_attrs_vendor
18330 #define elf_backend_obj_attrs_vendor "aeabi"
18331 #undef elf_backend_obj_attrs_section
18332 #define elf_backend_obj_attrs_section ".ARM.attributes"
18333 #undef elf_backend_obj_attrs_arg_type
18334 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18335 #undef elf_backend_obj_attrs_section_type
18336 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18337 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18338 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18339
18340 #undef elf_backend_section_flags
18341 #define elf_backend_section_flags elf32_arm_section_flags
18342 #undef elf_backend_lookup_section_flags_hook
18343 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18344
18345 #include "elf32-target.h"
18346
18347 /* Native Client targets. */
18348
18349 #undef TARGET_LITTLE_SYM
18350 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18351 #undef TARGET_LITTLE_NAME
18352 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18353 #undef TARGET_BIG_SYM
18354 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18355 #undef TARGET_BIG_NAME
18356 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18357
18358 /* Like elf32_arm_link_hash_table_create -- but overrides
18359 appropriately for NaCl. */
18360
18361 static struct bfd_link_hash_table *
18362 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18363 {
18364 struct bfd_link_hash_table *ret;
18365
18366 ret = elf32_arm_link_hash_table_create (abfd);
18367 if (ret)
18368 {
18369 struct elf32_arm_link_hash_table *htab
18370 = (struct elf32_arm_link_hash_table *) ret;
18371
18372 htab->nacl_p = 1;
18373
18374 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18375 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18376 }
18377 return ret;
18378 }
18379
18380 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18381 really need to use elf32_arm_modify_segment_map. But we do it
18382 anyway just to reduce gratuitous differences with the stock ARM backend. */
18383
18384 static bfd_boolean
18385 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18386 {
18387 return (elf32_arm_modify_segment_map (abfd, info)
18388 && nacl_modify_segment_map (abfd, info));
18389 }
18390
18391 static void
18392 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18393 {
18394 elf32_arm_final_write_processing (abfd, linker);
18395 nacl_final_write_processing (abfd, linker);
18396 }
18397
18398 static bfd_vma
18399 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18400 const arelent *rel ATTRIBUTE_UNUSED)
18401 {
18402 return plt->vma
18403 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18404 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18405 }
18406
18407 #undef elf32_bed
18408 #define elf32_bed elf32_arm_nacl_bed
18409 #undef bfd_elf32_bfd_link_hash_table_create
18410 #define bfd_elf32_bfd_link_hash_table_create \
18411 elf32_arm_nacl_link_hash_table_create
18412 #undef elf_backend_plt_alignment
18413 #define elf_backend_plt_alignment 4
18414 #undef elf_backend_modify_segment_map
18415 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18416 #undef elf_backend_modify_program_headers
18417 #define elf_backend_modify_program_headers nacl_modify_program_headers
18418 #undef elf_backend_final_write_processing
18419 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18420 #undef bfd_elf32_get_synthetic_symtab
18421 #undef elf_backend_plt_sym_val
18422 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18423 #undef elf_backend_copy_special_section_fields
18424
18425 #undef ELF_MINPAGESIZE
18426 #undef ELF_COMMONPAGESIZE
18427
18428
18429 #include "elf32-target.h"
18430
18431 /* Reset to defaults. */
18432 #undef elf_backend_plt_alignment
18433 #undef elf_backend_modify_segment_map
18434 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18435 #undef elf_backend_modify_program_headers
18436 #undef elf_backend_final_write_processing
18437 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18438 #undef ELF_MINPAGESIZE
18439 #define ELF_MINPAGESIZE 0x1000
18440 #undef ELF_COMMONPAGESIZE
18441 #define ELF_COMMONPAGESIZE 0x1000
18442
18443
18444 /* VxWorks Targets. */
18445
18446 #undef TARGET_LITTLE_SYM
18447 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18448 #undef TARGET_LITTLE_NAME
18449 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18450 #undef TARGET_BIG_SYM
18451 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18452 #undef TARGET_BIG_NAME
18453 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18454
18455 /* Like elf32_arm_link_hash_table_create -- but overrides
18456 appropriately for VxWorks. */
18457
18458 static struct bfd_link_hash_table *
18459 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18460 {
18461 struct bfd_link_hash_table *ret;
18462
18463 ret = elf32_arm_link_hash_table_create (abfd);
18464 if (ret)
18465 {
18466 struct elf32_arm_link_hash_table *htab
18467 = (struct elf32_arm_link_hash_table *) ret;
18468 htab->use_rel = 0;
18469 htab->vxworks_p = 1;
18470 }
18471 return ret;
18472 }
18473
18474 static void
18475 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18476 {
18477 elf32_arm_final_write_processing (abfd, linker);
18478 elf_vxworks_final_write_processing (abfd, linker);
18479 }
18480
18481 #undef elf32_bed
18482 #define elf32_bed elf32_arm_vxworks_bed
18483
18484 #undef bfd_elf32_bfd_link_hash_table_create
18485 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18486 #undef elf_backend_final_write_processing
18487 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18488 #undef elf_backend_emit_relocs
18489 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18490
18491 #undef elf_backend_may_use_rel_p
18492 #define elf_backend_may_use_rel_p 0
18493 #undef elf_backend_may_use_rela_p
18494 #define elf_backend_may_use_rela_p 1
18495 #undef elf_backend_default_use_rela_p
18496 #define elf_backend_default_use_rela_p 1
18497 #undef elf_backend_want_plt_sym
18498 #define elf_backend_want_plt_sym 1
18499 #undef ELF_MAXPAGESIZE
18500 #define ELF_MAXPAGESIZE 0x1000
18501
18502 #include "elf32-target.h"
18503
18504
18505 /* Merge backend specific data from an object file to the output
18506 object file when linking. */
18507
18508 static bfd_boolean
18509 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18510 {
18511 flagword out_flags;
18512 flagword in_flags;
18513 bfd_boolean flags_compatible = TRUE;
18514 asection *sec;
18515
18516 /* Check if we have the same endianness. */
18517 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18518 return FALSE;
18519
18520 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18521 return TRUE;
18522
18523 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18524 return FALSE;
18525
18526 /* The input BFD must have had its flags initialised. */
18527 /* The following seems bogus to me -- The flags are initialized in
18528 the assembler but I don't think an elf_flags_init field is
18529 written into the object. */
18530 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18531
18532 in_flags = elf_elfheader (ibfd)->e_flags;
18533 out_flags = elf_elfheader (obfd)->e_flags;
18534
18535 /* In theory there is no reason why we couldn't handle this. However
18536 in practice it isn't even close to working and there is no real
18537 reason to want it. */
18538 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18539 && !(ibfd->flags & DYNAMIC)
18540 && (in_flags & EF_ARM_BE8))
18541 {
18542 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18543 ibfd);
18544 return FALSE;
18545 }
18546
18547 if (!elf_flags_init (obfd))
18548 {
18549 /* If the input is the default architecture and had the default
18550 flags then do not bother setting the flags for the output
18551 architecture, instead allow future merges to do this. If no
18552 future merges ever set these flags then they will retain their
18553 uninitialised values, which surprise surprise, correspond
18554 to the default values. */
18555 if (bfd_get_arch_info (ibfd)->the_default
18556 && elf_elfheader (ibfd)->e_flags == 0)
18557 return TRUE;
18558
18559 elf_flags_init (obfd) = TRUE;
18560 elf_elfheader (obfd)->e_flags = in_flags;
18561
18562 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18563 && bfd_get_arch_info (obfd)->the_default)
18564 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18565
18566 return TRUE;
18567 }
18568
18569 /* Determine what should happen if the input ARM architecture
18570 does not match the output ARM architecture. */
18571 if (! bfd_arm_merge_machines (ibfd, obfd))
18572 return FALSE;
18573
18574 /* Identical flags must be compatible. */
18575 if (in_flags == out_flags)
18576 return TRUE;
18577
18578 /* Check to see if the input BFD actually contains any sections. If
18579 not, its flags may not have been initialised either, but it
18580 cannot actually cause any incompatiblity. Do not short-circuit
18581 dynamic objects; their section list may be emptied by
18582 elf_link_add_object_symbols.
18583
18584 Also check to see if there are no code sections in the input.
18585 In this case there is no need to check for code specific flags.
18586 XXX - do we need to worry about floating-point format compatability
18587 in data sections ? */
18588 if (!(ibfd->flags & DYNAMIC))
18589 {
18590 bfd_boolean null_input_bfd = TRUE;
18591 bfd_boolean only_data_sections = TRUE;
18592
18593 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18594 {
18595 /* Ignore synthetic glue sections. */
18596 if (strcmp (sec->name, ".glue_7")
18597 && strcmp (sec->name, ".glue_7t"))
18598 {
18599 if ((bfd_get_section_flags (ibfd, sec)
18600 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18601 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18602 only_data_sections = FALSE;
18603
18604 null_input_bfd = FALSE;
18605 break;
18606 }
18607 }
18608
18609 if (null_input_bfd || only_data_sections)
18610 return TRUE;
18611 }
18612
18613 /* Complain about various flag mismatches. */
18614 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18615 EF_ARM_EABI_VERSION (out_flags)))
18616 {
18617 _bfd_error_handler
18618 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18619 ibfd, obfd,
18620 (in_flags & EF_ARM_EABIMASK) >> 24,
18621 (out_flags & EF_ARM_EABIMASK) >> 24);
18622 return FALSE;
18623 }
18624
18625 /* Not sure what needs to be checked for EABI versions >= 1. */
18626 /* VxWorks libraries do not use these flags. */
18627 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18628 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18629 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18630 {
18631 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18632 {
18633 _bfd_error_handler
18634 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18635 ibfd, obfd,
18636 in_flags & EF_ARM_APCS_26 ? 26 : 32,
18637 out_flags & EF_ARM_APCS_26 ? 26 : 32);
18638 flags_compatible = FALSE;
18639 }
18640
18641 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18642 {
18643 if (in_flags & EF_ARM_APCS_FLOAT)
18644 _bfd_error_handler
18645 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18646 ibfd, obfd);
18647 else
18648 _bfd_error_handler
18649 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18650 ibfd, obfd);
18651
18652 flags_compatible = FALSE;
18653 }
18654
18655 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18656 {
18657 if (in_flags & EF_ARM_VFP_FLOAT)
18658 _bfd_error_handler
18659 (_("error: %B uses VFP instructions, whereas %B does not"),
18660 ibfd, obfd);
18661 else
18662 _bfd_error_handler
18663 (_("error: %B uses FPA instructions, whereas %B does not"),
18664 ibfd, obfd);
18665
18666 flags_compatible = FALSE;
18667 }
18668
18669 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18670 {
18671 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18672 _bfd_error_handler
18673 (_("error: %B uses Maverick instructions, whereas %B does not"),
18674 ibfd, obfd);
18675 else
18676 _bfd_error_handler
18677 (_("error: %B does not use Maverick instructions, whereas %B does"),
18678 ibfd, obfd);
18679
18680 flags_compatible = FALSE;
18681 }
18682
18683 #ifdef EF_ARM_SOFT_FLOAT
18684 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18685 {
18686 /* We can allow interworking between code that is VFP format
18687 layout, and uses either soft float or integer regs for
18688 passing floating point arguments and results. We already
18689 know that the APCS_FLOAT flags match; similarly for VFP
18690 flags. */
18691 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18692 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18693 {
18694 if (in_flags & EF_ARM_SOFT_FLOAT)
18695 _bfd_error_handler
18696 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18697 ibfd, obfd);
18698 else
18699 _bfd_error_handler
18700 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18701 ibfd, obfd);
18702
18703 flags_compatible = FALSE;
18704 }
18705 }
18706 #endif
18707
18708 /* Interworking mismatch is only a warning. */
18709 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18710 {
18711 if (in_flags & EF_ARM_INTERWORK)
18712 {
18713 _bfd_error_handler
18714 (_("Warning: %B supports interworking, whereas %B does not"),
18715 ibfd, obfd);
18716 }
18717 else
18718 {
18719 _bfd_error_handler
18720 (_("Warning: %B does not support interworking, whereas %B does"),
18721 ibfd, obfd);
18722 }
18723 }
18724 }
18725
18726 return flags_compatible;
18727 }
18728
18729
18730 /* Symbian OS Targets. */
18731
18732 #undef TARGET_LITTLE_SYM
18733 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18734 #undef TARGET_LITTLE_NAME
18735 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18736 #undef TARGET_BIG_SYM
18737 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18738 #undef TARGET_BIG_NAME
18739 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18740
18741 /* Like elf32_arm_link_hash_table_create -- but overrides
18742 appropriately for Symbian OS. */
18743
18744 static struct bfd_link_hash_table *
18745 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18746 {
18747 struct bfd_link_hash_table *ret;
18748
18749 ret = elf32_arm_link_hash_table_create (abfd);
18750 if (ret)
18751 {
18752 struct elf32_arm_link_hash_table *htab
18753 = (struct elf32_arm_link_hash_table *)ret;
18754 /* There is no PLT header for Symbian OS. */
18755 htab->plt_header_size = 0;
18756 /* The PLT entries are each one instruction and one word. */
18757 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18758 htab->symbian_p = 1;
18759 /* Symbian uses armv5t or above, so use_blx is always true. */
18760 htab->use_blx = 1;
18761 htab->root.is_relocatable_executable = 1;
18762 }
18763 return ret;
18764 }
18765
18766 static const struct bfd_elf_special_section
18767 elf32_arm_symbian_special_sections[] =
18768 {
18769 /* In a BPABI executable, the dynamic linking sections do not go in
18770 the loadable read-only segment. The post-linker may wish to
18771 refer to these sections, but they are not part of the final
18772 program image. */
18773 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18774 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18775 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18776 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18777 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18778 /* These sections do not need to be writable as the SymbianOS
18779 postlinker will arrange things so that no dynamic relocation is
18780 required. */
18781 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18782 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18783 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18784 { NULL, 0, 0, 0, 0 }
18785 };
18786
18787 static void
18788 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18789 struct bfd_link_info *link_info)
18790 {
18791 /* BPABI objects are never loaded directly by an OS kernel; they are
18792 processed by a postlinker first, into an OS-specific format. If
18793 the D_PAGED bit is set on the file, BFD will align segments on
18794 page boundaries, so that an OS can directly map the file. With
18795 BPABI objects, that just results in wasted space. In addition,
18796 because we clear the D_PAGED bit, map_sections_to_segments will
18797 recognize that the program headers should not be mapped into any
18798 loadable segment. */
18799 abfd->flags &= ~D_PAGED;
18800 elf32_arm_begin_write_processing (abfd, link_info);
18801 }
18802
18803 static bfd_boolean
18804 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18805 struct bfd_link_info *info)
18806 {
18807 struct elf_segment_map *m;
18808 asection *dynsec;
18809
18810 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18811 segment. However, because the .dynamic section is not marked
18812 with SEC_LOAD, the generic ELF code will not create such a
18813 segment. */
18814 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18815 if (dynsec)
18816 {
18817 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18818 if (m->p_type == PT_DYNAMIC)
18819 break;
18820
18821 if (m == NULL)
18822 {
18823 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18824 m->next = elf_seg_map (abfd);
18825 elf_seg_map (abfd) = m;
18826 }
18827 }
18828
18829 /* Also call the generic arm routine. */
18830 return elf32_arm_modify_segment_map (abfd, info);
18831 }
18832
18833 /* Return address for Ith PLT stub in section PLT, for relocation REL
18834 or (bfd_vma) -1 if it should not be included. */
18835
18836 static bfd_vma
18837 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18838 const arelent *rel ATTRIBUTE_UNUSED)
18839 {
18840 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18841 }
18842
18843 #undef elf32_bed
18844 #define elf32_bed elf32_arm_symbian_bed
18845
18846 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18847 will process them and then discard them. */
18848 #undef ELF_DYNAMIC_SEC_FLAGS
18849 #define ELF_DYNAMIC_SEC_FLAGS \
18850 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18851
18852 #undef elf_backend_emit_relocs
18853
18854 #undef bfd_elf32_bfd_link_hash_table_create
18855 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18856 #undef elf_backend_special_sections
18857 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18858 #undef elf_backend_begin_write_processing
18859 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18860 #undef elf_backend_final_write_processing
18861 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18862
18863 #undef elf_backend_modify_segment_map
18864 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18865
18866 /* There is no .got section for BPABI objects, and hence no header. */
18867 #undef elf_backend_got_header_size
18868 #define elf_backend_got_header_size 0
18869
18870 /* Similarly, there is no .got.plt section. */
18871 #undef elf_backend_want_got_plt
18872 #define elf_backend_want_got_plt 0
18873
18874 #undef elf_backend_plt_sym_val
18875 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18876
18877 #undef elf_backend_may_use_rel_p
18878 #define elf_backend_may_use_rel_p 1
18879 #undef elf_backend_may_use_rela_p
18880 #define elf_backend_may_use_rela_p 0
18881 #undef elf_backend_default_use_rela_p
18882 #define elf_backend_default_use_rela_p 0
18883 #undef elf_backend_want_plt_sym
18884 #define elf_backend_want_plt_sym 0
18885 #undef ELF_MAXPAGESIZE
18886 #define ELF_MAXPAGESIZE 0x8000
18887
18888 #include "elf32-target.h"
This page took 2.044279 seconds and 4 git commands to generate.